Moving iOS Audio Device to sdk.

This change forks the existing iOS audio device module and audio device
from modules/audio_device/ into sdk/objc/Framework. It also updates
RTCPeerConnectionFactory to use the forked implementation.

The unit tests are re-implemented as XCTests.

(was: https://webrtc-review.googlesource.com/c/src/+/67300)

Bug: webrtc:9120
Change-Id: I46c09900246f75ca5285aeb38f7b8b295784ffac
Reviewed-on: https://webrtc-review.googlesource.com/76741
Reviewed-by: Kári Helgason <kthelgason@webrtc.org>
Reviewed-by: Henrik Andreassson <henrika@webrtc.org>
Reviewed-by: Anders Carlsson <andersc@webrtc.org>
Commit-Queue: Peter Hanspers <peterhanspers@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#23238}
diff --git a/sdk/objc/Framework/Classes/Audio/RTCNativeAudioSessionDelegateAdapter.h b/sdk/objc/Framework/Classes/Audio/RTCNativeAudioSessionDelegateAdapter.h
new file mode 100644
index 0000000..487bab1
--- /dev/null
+++ b/sdk/objc/Framework/Classes/Audio/RTCNativeAudioSessionDelegateAdapter.h
@@ -0,0 +1,34 @@
+/*
+ *  Copyright 2018 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "sdk/objc/Framework/Headers/WebRTC/RTCAudioSession.h"
+
+NS_ASSUME_NONNULL_BEGIN
+
+namespace webrtc {
+class AudioSessionObserver;
+}
+
+/** Adapter that forwards RTCAudioSessionDelegate calls to the appropriate
+ *  methods on the AudioSessionObserver.
+ */
+@interface RTCNativeAudioSessionDelegateAdapter : NSObject <RTCAudioSessionDelegate>
+
+- (instancetype)init NS_UNAVAILABLE;
+
+/** |observer| is a raw pointer and should be kept alive
+ *  for this object's lifetime.
+ */
+- (instancetype)initWithObserver:(webrtc::AudioSessionObserver *)observer
+    NS_DESIGNATED_INITIALIZER;
+
+@end
+
+NS_ASSUME_NONNULL_END
diff --git a/sdk/objc/Framework/Classes/Audio/RTCNativeAudioSessionDelegateAdapter.mm b/sdk/objc/Framework/Classes/Audio/RTCNativeAudioSessionDelegateAdapter.mm
new file mode 100644
index 0000000..a443e51
--- /dev/null
+++ b/sdk/objc/Framework/Classes/Audio/RTCNativeAudioSessionDelegateAdapter.mm
@@ -0,0 +1,89 @@
+/*
+ *  Copyright 2018 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "RTCNativeAudioSessionDelegateAdapter.h"
+
+#include "sdk/objc/Framework/Native/src/audio/audio_session_observer.h"
+
+#import "WebRTC/RTCLogging.h"
+
+@implementation RTCNativeAudioSessionDelegateAdapter {
+  webrtc::AudioSessionObserver *_observer;
+}
+
+- (instancetype)initWithObserver:(webrtc::AudioSessionObserver *)observer {
+  RTC_DCHECK(observer);
+  if (self = [super init]) {
+    _observer = observer;
+  }
+  return self;
+}
+
+#pragma mark - RTCAudioSessionDelegate
+
+- (void)audioSessionDidBeginInterruption:(RTCAudioSession *)session {
+  _observer->OnInterruptionBegin();
+}
+
+- (void)audioSessionDidEndInterruption:(RTCAudioSession *)session
+                   shouldResumeSession:(BOOL)shouldResumeSession {
+  _observer->OnInterruptionEnd();
+}
+
+- (void)audioSessionDidChangeRoute:(RTCAudioSession *)session
+           reason:(AVAudioSessionRouteChangeReason)reason
+    previousRoute:(AVAudioSessionRouteDescription *)previousRoute {
+  switch (reason) {
+    case AVAudioSessionRouteChangeReasonUnknown:
+    case AVAudioSessionRouteChangeReasonNewDeviceAvailable:
+    case AVAudioSessionRouteChangeReasonOldDeviceUnavailable:
+    case AVAudioSessionRouteChangeReasonCategoryChange:
+      // It turns out that we see a category change (at least in iOS 9.2)
+      // when making a switch from a BT device to e.g. Speaker using the
+      // iOS Control Center and that we therefore must check if the sample
+      // rate has changed. And if so is the case, restart the audio unit.
+    case AVAudioSessionRouteChangeReasonOverride:
+    case AVAudioSessionRouteChangeReasonWakeFromSleep:
+    case AVAudioSessionRouteChangeReasonNoSuitableRouteForCategory:
+      _observer->OnValidRouteChange();
+      break;
+    case AVAudioSessionRouteChangeReasonRouteConfigurationChange:
+      // The set of input and output ports has not changed, but their
+      // configuration has, e.g., a port’s selected data source has
+      // changed. Ignore this type of route change since we are focusing
+      // on detecting headset changes.
+      RTCLog(@"Ignoring RouteConfigurationChange");
+      break;
+  }
+}
+
+- (void)audioSessionMediaServerTerminated:(RTCAudioSession *)session {
+}
+
+- (void)audioSessionMediaServerReset:(RTCAudioSession *)session {
+}
+
+- (void)audioSession:(RTCAudioSession *)session
+    didChangeCanPlayOrRecord:(BOOL)canPlayOrRecord {
+  _observer->OnCanPlayOrRecordChange(canPlayOrRecord);
+}
+
+- (void)audioSessionDidStartPlayOrRecord:(RTCAudioSession *)session {
+}
+
+- (void)audioSessionDidStopPlayOrRecord:(RTCAudioSession *)session {
+}
+
+- (void)audioSession:(RTCAudioSession *)audioSession
+    didChangeOutputVolume:(float)outputVolume {
+  _observer->OnChangedOutputVolume();
+}
+
+@end
diff --git a/sdk/objc/Framework/Classes/PeerConnection/RTCPeerConnectionFactory.mm b/sdk/objc/Framework/Classes/PeerConnection/RTCPeerConnectionFactory.mm
index 786d2b0..5ebd4e9 100644
--- a/sdk/objc/Framework/Classes/PeerConnection/RTCPeerConnectionFactory.mm
+++ b/sdk/objc/Framework/Classes/PeerConnection/RTCPeerConnectionFactory.mm
@@ -38,6 +38,10 @@
 #include "sdk/objc/Framework/Native/src/objc_video_encoder_factory.h"
 #endif
 
+#if defined(WEBRTC_IOS)
+#import "sdk/objc/Framework/Native/api/audio_device_module.h"
+#endif
+
 // Adding the nogncheck to disable the including header check.
 // The no-media version PeerConnectionFactory doesn't depend on media related
 // C++ target.
@@ -55,6 +59,14 @@
 
 @synthesize nativeFactory = _nativeFactory;
 
+- (rtc::scoped_refptr<webrtc::AudioDeviceModule>)audioDeviceModule {
+#if defined(WEBRTC_IOS)
+  return webrtc::CreateAudioDeviceModule();
+#else
+  return nullptr;
+#endif
+}
+
 - (instancetype)init {
 #ifdef HAVE_NO_MEDIA
   return [self initWithNoMedia];
@@ -65,7 +77,7 @@
                                                      [[RTCVideoEncoderFactoryH264 alloc] init])
                        nativeVideoDecoderFactory:webrtc::ObjCToNativeVideoDecoderFactory(
                                                      [[RTCVideoDecoderFactoryH264 alloc] init])
-                               audioDeviceModule:nullptr
+                               audioDeviceModule:[self audioDeviceModule]
                            audioProcessingModule:nullptr];
 #endif
 }
@@ -87,7 +99,7 @@
                        nativeAudioDecoderFactory:webrtc::CreateBuiltinAudioDecoderFactory()
                        nativeVideoEncoderFactory:std::move(native_encoder_factory)
                        nativeVideoDecoderFactory:std::move(native_decoder_factory)
-                               audioDeviceModule:nullptr
+                               audioDeviceModule:[self audioDeviceModule]
                            audioProcessingModule:nullptr];
 #endif
 }
diff --git a/sdk/objc/Framework/Native/api/audio_device_module.h b/sdk/objc/Framework/Native/api/audio_device_module.h
new file mode 100644
index 0000000..2c519ba
--- /dev/null
+++ b/sdk/objc/Framework/Native/api/audio_device_module.h
@@ -0,0 +1,24 @@
+/*
+ *  Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SDK_OBJC_FRAMEWORK_NATIVE_API_AUDIO_DEVICE_MODULE_H_
+#define SDK_OBJC_FRAMEWORK_NATIVE_API_AUDIO_DEVICE_MODULE_H_
+
+#include <memory>
+
+#include "modules/audio_device/include/audio_device.h"
+
+namespace webrtc {
+
+rtc::scoped_refptr<AudioDeviceModule> CreateAudioDeviceModule();
+
+}  // namespace webrtc
+
+#endif // SDK_OBJC_FRAMEWORK_NATIVE_API_AUDIO_DEVICE_MODULE_H_
diff --git a/sdk/objc/Framework/Native/api/audio_device_module.mm b/sdk/objc/Framework/Native/api/audio_device_module.mm
new file mode 100644
index 0000000..2afa6df
--- /dev/null
+++ b/sdk/objc/Framework/Native/api/audio_device_module.mm
@@ -0,0 +1,30 @@
+/*
+ *  Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "audio_device_module.h"
+
+#include "rtc_base/logging.h"
+#include "rtc_base/refcountedobject.h"
+
+#include "sdk/objc/Framework/Native/src/audio/audio_device_module_ios.h"
+
+namespace webrtc {
+
+rtc::scoped_refptr<AudioDeviceModule> CreateAudioDeviceModule() {
+  RTC_LOG(INFO) << __FUNCTION__;
+#if defined(WEBRTC_IOS)
+  return new rtc::RefCountedObject<ios_adm::AudioDeviceModuleIOS>();
+#else
+  RTC_LOG(LERROR)
+      << "current platform is not supported => this module will self destruct!";
+  return nullptr;
+#endif
+}
+}
diff --git a/sdk/objc/Framework/Native/src/audio/audio_device_ios.h b/sdk/objc/Framework/Native/src/audio/audio_device_ios.h
new file mode 100644
index 0000000..250826d
--- /dev/null
+++ b/sdk/objc/Framework/Native/src/audio/audio_device_ios.h
@@ -0,0 +1,293 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_IOS_AUDIO_DEVICE_IOS_H_
+#define MODULES_AUDIO_DEVICE_IOS_AUDIO_DEVICE_IOS_H_
+
+#include <memory>
+
+#include "sdk/objc/Framework/Headers/WebRTC/RTCMacros.h"
+#include "modules/audio_device/audio_device_generic.h"
+#include "audio_session_observer.h"
+#include "voice_processing_audio_unit.h"
+#include "rtc_base/buffer.h"
+#include "rtc_base/thread.h"
+#include "rtc_base/thread_annotations.h"
+#include "rtc_base/thread_checker.h"
+
+RTC_FWD_DECL_OBJC_CLASS(RTCNativeAudioSessionDelegateAdapter);
+
+namespace webrtc {
+
+class FineAudioBuffer;
+
+namespace ios_adm {
+
+// Implements full duplex 16-bit mono PCM audio support for iOS using a
+// Voice-Processing (VP) I/O audio unit in Core Audio. The VP I/O audio unit
+// supports audio echo cancellation. It also adds automatic gain control,
+// adjustment of voice-processing quality and muting.
+//
+// An instance must be created and destroyed on one and the same thread.
+// All supported public methods must also be called on the same thread.
+// A thread checker will RTC_DCHECK if any supported method is called on an
+// invalid thread.
+//
+// Recorded audio will be delivered on a real-time internal I/O thread in the
+// audio unit. The audio unit will also ask for audio data to play out on this
+// same thread.
+class AudioDeviceIOS : public AudioDeviceGeneric,
+                       public AudioSessionObserver,
+                       public VoiceProcessingAudioUnitObserver,
+                       public rtc::MessageHandler {
+ public:
+  AudioDeviceIOS();
+  ~AudioDeviceIOS();
+
+  void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) override;
+
+  InitStatus Init() override;
+  int32_t Terminate() override;
+  bool Initialized() const override;
+
+  int32_t InitPlayout() override;
+  bool PlayoutIsInitialized() const override;
+
+  int32_t InitRecording() override;
+  bool RecordingIsInitialized() const override;
+
+  int32_t StartPlayout() override;
+  int32_t StopPlayout() override;
+  bool Playing() const override { return playing_; }
+
+  int32_t StartRecording() override;
+  int32_t StopRecording() override;
+  bool Recording() const override { return recording_; }
+
+  // These methods returns hard-coded delay values and not dynamic delay
+  // estimates. The reason is that iOS supports a built-in AEC and the WebRTC
+  // AEC will always be disabled in the Libjingle layer to avoid running two
+  // AEC implementations at the same time. And, it saves resources to avoid
+  // updating these delay values continuously.
+  // TODO(henrika): it would be possible to mark these two methods as not
+  // implemented since they are only called for A/V-sync purposes today and
+  // A/V-sync is not supported on iOS. However, we avoid adding error messages
+  // the log by using these dummy implementations instead.
+  int32_t PlayoutDelay(uint16_t& delayMS) const override;
+
+  // Native audio parameters stored during construction.
+  // These methods are unique for the iOS implementation.
+  int GetPlayoutAudioParameters(AudioParameters* params) const override;
+  int GetRecordAudioParameters(AudioParameters* params) const override;
+
+  // These methods are currently not fully implemented on iOS:
+
+  // See audio_device_not_implemented.cc for trivial implementations.
+  int32_t ActiveAudioLayer(
+      AudioDeviceModule::AudioLayer& audioLayer) const override;
+  int32_t PlayoutIsAvailable(bool& available) override;
+  int32_t RecordingIsAvailable(bool& available) override;
+  int16_t PlayoutDevices() override;
+  int16_t RecordingDevices() override;
+  int32_t PlayoutDeviceName(uint16_t index,
+                            char name[kAdmMaxDeviceNameSize],
+                            char guid[kAdmMaxGuidSize]) override;
+  int32_t RecordingDeviceName(uint16_t index,
+                              char name[kAdmMaxDeviceNameSize],
+                              char guid[kAdmMaxGuidSize]) override;
+  int32_t SetPlayoutDevice(uint16_t index) override;
+  int32_t SetPlayoutDevice(
+      AudioDeviceModule::WindowsDeviceType device) override;
+  int32_t SetRecordingDevice(uint16_t index) override;
+  int32_t SetRecordingDevice(
+      AudioDeviceModule::WindowsDeviceType device) override;
+  int32_t InitSpeaker() override;
+  bool SpeakerIsInitialized() const override;
+  int32_t InitMicrophone() override;
+  bool MicrophoneIsInitialized() const override;
+  int32_t SpeakerVolumeIsAvailable(bool& available) override;
+  int32_t SetSpeakerVolume(uint32_t volume) override;
+  int32_t SpeakerVolume(uint32_t& volume) const override;
+  int32_t MaxSpeakerVolume(uint32_t& maxVolume) const override;
+  int32_t MinSpeakerVolume(uint32_t& minVolume) const override;
+  int32_t MicrophoneVolumeIsAvailable(bool& available) override;
+  int32_t SetMicrophoneVolume(uint32_t volume) override;
+  int32_t MicrophoneVolume(uint32_t& volume) const override;
+  int32_t MaxMicrophoneVolume(uint32_t& maxVolume) const override;
+  int32_t MinMicrophoneVolume(uint32_t& minVolume) const override;
+  int32_t MicrophoneMuteIsAvailable(bool& available) override;
+  int32_t SetMicrophoneMute(bool enable) override;
+  int32_t MicrophoneMute(bool& enabled) const override;
+  int32_t SpeakerMuteIsAvailable(bool& available) override;
+  int32_t SetSpeakerMute(bool enable) override;
+  int32_t SpeakerMute(bool& enabled) const override;
+  int32_t StereoPlayoutIsAvailable(bool& available) override;
+  int32_t SetStereoPlayout(bool enable) override;
+  int32_t StereoPlayout(bool& enabled) const override;
+  int32_t StereoRecordingIsAvailable(bool& available) override;
+  int32_t SetStereoRecording(bool enable) override;
+  int32_t StereoRecording(bool& enabled) const override;
+
+  // AudioSessionObserver methods. May be called from any thread.
+  void OnInterruptionBegin() override;
+  void OnInterruptionEnd() override;
+  void OnValidRouteChange() override;
+  void OnCanPlayOrRecordChange(bool can_play_or_record) override;
+  void OnChangedOutputVolume() override;
+
+  // VoiceProcessingAudioUnitObserver methods.
+  OSStatus OnDeliverRecordedData(AudioUnitRenderActionFlags* flags,
+                                 const AudioTimeStamp* time_stamp,
+                                 UInt32 bus_number,
+                                 UInt32 num_frames,
+                                 AudioBufferList* io_data) override;
+  OSStatus OnGetPlayoutData(AudioUnitRenderActionFlags* flags,
+                            const AudioTimeStamp* time_stamp,
+                            UInt32 bus_number,
+                            UInt32 num_frames,
+                            AudioBufferList* io_data) override;
+
+  // Handles messages from posts.
+  void OnMessage(rtc::Message *msg) override;
+
+  bool IsInterrupted();
+
+ private:
+  // Called by the relevant AudioSessionObserver methods on |thread_|.
+  void HandleInterruptionBegin();
+  void HandleInterruptionEnd();
+  void HandleValidRouteChange();
+  void HandleCanPlayOrRecordChange(bool can_play_or_record);
+  void HandleSampleRateChange(float sample_rate);
+  void HandlePlayoutGlitchDetected();
+  void HandleOutputVolumeChange();
+
+  // Uses current |playout_parameters_| and |record_parameters_| to inform the
+  // audio device buffer (ADB) about our internal audio parameters.
+  void UpdateAudioDeviceBuffer();
+
+  // Since the preferred audio parameters are only hints to the OS, the actual
+  // values may be different once the AVAudioSession has been activated.
+  // This method asks for the current hardware parameters and takes actions
+  // if they should differ from what we have asked for initially. It also
+  // defines |playout_parameters_| and |record_parameters_|.
+  void SetupAudioBuffersForActiveAudioSession();
+
+  // Creates the audio unit.
+  bool CreateAudioUnit();
+
+  // Updates the audio unit state based on current state.
+  void UpdateAudioUnit(bool can_play_or_record);
+
+  // Configures the audio session for WebRTC.
+  bool ConfigureAudioSession();
+  // Unconfigures the audio session.
+  void UnconfigureAudioSession();
+
+  // Activates our audio session, creates and initializes the voice-processing
+  // audio unit and verifies that we got the preferred native audio parameters.
+  bool InitPlayOrRecord();
+
+  // Closes and deletes the voice-processing I/O unit.
+  void ShutdownPlayOrRecord();
+
+  // Ensures that methods are called from the same thread as this object is
+  // created on.
+  rtc::ThreadChecker thread_checker_;
+
+  // Native I/O audio thread checker.
+  rtc::ThreadChecker io_thread_checker_;
+
+  // Thread that this object is created on.
+  rtc::Thread* thread_;
+
+  // Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the
+  // AudioDeviceModuleImpl class and called by AudioDeviceModule::Create().
+  // The AudioDeviceBuffer is a member of the AudioDeviceModuleImpl instance
+  // and therefore outlives this object.
+  AudioDeviceBuffer* audio_device_buffer_;
+
+  // Contains audio parameters (sample rate, #channels, buffer size etc.) for
+  // the playout and recording sides. These structure is set in two steps:
+  // first, native sample rate and #channels are defined in Init(). Next, the
+  // audio session is activated and we verify that the preferred parameters
+  // were granted by the OS. At this stage it is also possible to add a third
+  // component to the parameters; the native I/O buffer duration.
+  // A RTC_CHECK will be hit if we for some reason fail to open an audio session
+  // using the specified parameters.
+  AudioParameters playout_parameters_;
+  AudioParameters record_parameters_;
+
+  // The AudioUnit used to play and record audio.
+  std::unique_ptr<VoiceProcessingAudioUnit> audio_unit_;
+
+  // FineAudioBuffer takes an AudioDeviceBuffer which delivers audio data
+  // in chunks of 10ms. It then allows for this data to be pulled in
+  // a finer or coarser granularity. I.e. interacting with this class instead
+  // of directly with the AudioDeviceBuffer one can ask for any number of
+  // audio data samples. Is also supports a similar scheme for the recording
+  // side.
+  // Example: native buffer size can be 128 audio frames at 16kHz sample rate.
+  // WebRTC will provide 480 audio frames per 10ms but iOS asks for 128
+  // in each callback (one every 8ms). This class can then ask for 128 and the
+  // FineAudioBuffer will ask WebRTC for new data only when needed and also
+  // cache non-utilized audio between callbacks. On the recording side, iOS
+  // can provide audio data frames of size 128 and these are accumulated until
+  // enough data to supply one 10ms call exists. This 10ms chunk is then sent
+  // to WebRTC and the remaining part is stored.
+  std::unique_ptr<FineAudioBuffer> fine_audio_buffer_;
+
+  // Temporary storage for recorded data. AudioUnitRender() renders into this
+  // array as soon as a frame of the desired buffer size has been recorded.
+  // On real iOS devices, the size will be fixed and set once. For iOS
+  // simulators, the size can vary from callback to callback and the size
+  // will be changed dynamically to account for this behavior.
+  rtc::BufferT<int16_t> record_audio_buffer_;
+
+  // Set to 1 when recording is active and 0 otherwise.
+  volatile int recording_;
+
+  // Set to 1 when playout is active and 0 otherwise.
+  volatile int playing_;
+
+  // Set to true after successful call to Init(), false otherwise.
+  bool initialized_ RTC_GUARDED_BY(thread_checker_);
+
+  // Set to true after successful call to InitRecording() or InitPlayout(),
+  // false otherwise.
+  bool audio_is_initialized_;
+
+  // Set to true if audio session is interrupted, false otherwise.
+  bool is_interrupted_;
+
+  // Audio interruption observer instance.
+  RTCNativeAudioSessionDelegateAdapter* audio_session_observer_
+      RTC_GUARDED_BY(thread_checker_);
+
+  // Set to true if we've activated the audio session.
+  bool has_configured_session_ RTC_GUARDED_BY(thread_checker_);
+
+  // Counts number of detected audio glitches on the playout side.
+  int64_t num_detected_playout_glitches_ RTC_GUARDED_BY(thread_checker_);
+  int64_t last_playout_time_ RTC_GUARDED_BY(io_thread_checker_);
+
+  // Counts number of playout callbacks per call.
+  // The value isupdated on the native I/O thread and later read on the
+  // creating thread (see thread_checker_) but at this stage no audio is
+  // active. Hence, it is a "thread safe" design and no lock is needed.
+  int64_t num_playout_callbacks_;
+
+  // Contains the time for when the last output volume change was detected.
+  int64_t last_output_volume_change_time_ RTC_GUARDED_BY(thread_checker_);
+};
+}  // namespace ios_adm
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_DEVICE_IOS_AUDIO_DEVICE_IOS_H_
diff --git a/sdk/objc/Framework/Native/src/audio/audio_device_ios.mm b/sdk/objc/Framework/Native/src/audio/audio_device_ios.mm
new file mode 100644
index 0000000..cf25486
--- /dev/null
+++ b/sdk/objc/Framework/Native/src/audio/audio_device_ios.mm
@@ -0,0 +1,1104 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import <AVFoundation/AVFoundation.h>
+#import <Foundation/Foundation.h>
+
+#include "audio_device_ios.h"
+
+#include <cmath>
+
+#include "api/array_view.h"
+#include "modules/audio_device/fine_audio_buffer.h"
+#include "rtc_base/atomicops.h"
+#include "rtc_base/bind.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/thread.h"
+#include "rtc_base/thread_annotations.h"
+#include "rtc_base/timeutils.h"
+#include "sdk/objc/Framework/Classes/Common/helpers.h"
+#include "system_wrappers/include/metrics.h"
+
+#import "WebRTC/RTCLogging.h"
+#import "sdk/objc/Framework/Classes/Audio/RTCAudioSession+Private.h"
+#import "sdk/objc/Framework/Classes/Audio/RTCNativeAudioSessionDelegateAdapter.h"
+#import "sdk/objc/Framework/Headers/WebRTC/RTCAudioSession.h"
+#import "sdk/objc/Framework/Headers/WebRTC/RTCAudioSessionConfiguration.h"
+
+namespace webrtc {
+namespace ios_adm {
+
+#define LOGI() RTC_LOG(LS_INFO) << "AudioDeviceIOS::"
+
+#define LOG_AND_RETURN_IF_ERROR(error, message)    \
+  do {                                             \
+    OSStatus err = error;                          \
+    if (err) {                                     \
+      RTC_LOG(LS_ERROR) << message << ": " << err; \
+      return false;                                \
+    }                                              \
+  } while (0)
+
+#define LOG_IF_ERROR(error, message)               \
+  do {                                             \
+    OSStatus err = error;                          \
+    if (err) {                                     \
+      RTC_LOG(LS_ERROR) << message << ": " << err; \
+    }                                              \
+  } while (0)
+
+// Hardcoded delay estimates based on real measurements.
+// TODO(henrika): these value is not used in combination with built-in AEC.
+// Can most likely be removed.
+const UInt16 kFixedPlayoutDelayEstimate = 30;
+const UInt16 kFixedRecordDelayEstimate = 30;
+
+enum AudioDeviceMessageType : uint32_t {
+  kMessageTypeInterruptionBegin,
+  kMessageTypeInterruptionEnd,
+  kMessageTypeValidRouteChange,
+  kMessageTypeCanPlayOrRecordChange,
+  kMessageTypePlayoutGlitchDetected,
+  kMessageOutputVolumeChange,
+};
+
+using ios::CheckAndLogError;
+
+#if !defined(NDEBUG)
+// Returns true when the code runs on a device simulator.
+static bool DeviceIsSimulator() {
+  return ios::GetDeviceName() == "x86_64";
+}
+
+// Helper method that logs essential device information strings.
+static void LogDeviceInfo() {
+  RTC_LOG(LS_INFO) << "LogDeviceInfo";
+  @autoreleasepool {
+    RTC_LOG(LS_INFO) << " system name: " << ios::GetSystemName();
+    RTC_LOG(LS_INFO) << " system version: " << ios::GetSystemVersionAsString();
+    RTC_LOG(LS_INFO) << " device type: " << ios::GetDeviceType();
+    RTC_LOG(LS_INFO) << " device name: " << ios::GetDeviceName();
+    RTC_LOG(LS_INFO) << " process name: " << ios::GetProcessName();
+    RTC_LOG(LS_INFO) << " process ID: " << ios::GetProcessID();
+    RTC_LOG(LS_INFO) << " OS version: " << ios::GetOSVersionString();
+    RTC_LOG(LS_INFO) << " processing cores: " << ios::GetProcessorCount();
+    RTC_LOG(LS_INFO) << " low power mode: " << ios::GetLowPowerModeEnabled();
+#if TARGET_IPHONE_SIMULATOR
+    RTC_LOG(LS_INFO) << " TARGET_IPHONE_SIMULATOR is defined";
+#endif
+    RTC_LOG(LS_INFO) << " DeviceIsSimulator: " << DeviceIsSimulator();
+  }
+}
+#endif  // !defined(NDEBUG)
+
+AudioDeviceIOS::AudioDeviceIOS()
+    : audio_device_buffer_(nullptr),
+      audio_unit_(nullptr),
+      recording_(0),
+      playing_(0),
+      initialized_(false),
+      audio_is_initialized_(false),
+      is_interrupted_(false),
+      has_configured_session_(false),
+      num_detected_playout_glitches_(0),
+      last_playout_time_(0),
+      num_playout_callbacks_(0),
+      last_output_volume_change_time_(0) {
+  LOGI() << "ctor" << ios::GetCurrentThreadDescription();
+  io_thread_checker_.DetachFromThread();
+  thread_checker_.DetachFromThread();
+  thread_ = rtc::Thread::Current();
+
+  audio_session_observer_ = [[RTCNativeAudioSessionDelegateAdapter alloc] initWithObserver:this];
+}
+
+AudioDeviceIOS::~AudioDeviceIOS() {
+  LOGI() << "~dtor" << ios::GetCurrentThreadDescription();
+  audio_session_observer_ = nil;
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  Terminate();
+}
+
+void AudioDeviceIOS::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
+  LOGI() << "AttachAudioBuffer";
+  RTC_DCHECK(audioBuffer);
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  audio_device_buffer_ = audioBuffer;
+}
+
+AudioDeviceGeneric::InitStatus AudioDeviceIOS::Init() {
+  LOGI() << "Init";
+  io_thread_checker_.DetachFromThread();
+  thread_checker_.DetachFromThread();
+
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  if (initialized_) {
+    return InitStatus::OK;
+  }
+#if !defined(NDEBUG)
+  LogDeviceInfo();
+#endif
+  // Store the preferred sample rate and preferred number of channels already
+  // here. They have not been set and confirmed yet since configureForWebRTC
+  // is not called until audio is about to start. However, it makes sense to
+  // store the parameters now and then verify at a later stage.
+  RTCAudioSessionConfiguration* config = [RTCAudioSessionConfiguration webRTCConfiguration];
+  playout_parameters_.reset(config.sampleRate, config.outputNumberOfChannels);
+  record_parameters_.reset(config.sampleRate, config.inputNumberOfChannels);
+  // Ensure that the audio device buffer (ADB) knows about the internal audio
+  // parameters. Note that, even if we are unable to get a mono audio session,
+  // we will always tell the I/O audio unit to do a channel format conversion
+  // to guarantee mono on the "input side" of the audio unit.
+  UpdateAudioDeviceBuffer();
+  initialized_ = true;
+  return InitStatus::OK;
+}
+
+int32_t AudioDeviceIOS::Terminate() {
+  LOGI() << "Terminate";
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  if (!initialized_) {
+    return 0;
+  }
+  StopPlayout();
+  StopRecording();
+  initialized_ = false;
+  return 0;
+}
+
+bool AudioDeviceIOS::Initialized() const {
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  return initialized_;
+}
+
+int32_t AudioDeviceIOS::InitPlayout() {
+  LOGI() << "InitPlayout";
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  RTC_DCHECK(initialized_);
+  RTC_DCHECK(!audio_is_initialized_);
+  RTC_DCHECK(!playing_);
+  if (!audio_is_initialized_) {
+    if (!InitPlayOrRecord()) {
+      RTC_LOG_F(LS_ERROR) << "InitPlayOrRecord failed for InitPlayout!";
+      return -1;
+    }
+  }
+  audio_is_initialized_ = true;
+  return 0;
+}
+
+bool AudioDeviceIOS::PlayoutIsInitialized() const {
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  return audio_is_initialized_;
+}
+
+bool AudioDeviceIOS::RecordingIsInitialized() const {
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  return audio_is_initialized_;
+}
+
+int32_t AudioDeviceIOS::InitRecording() {
+  LOGI() << "InitRecording";
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  RTC_DCHECK(initialized_);
+  RTC_DCHECK(!audio_is_initialized_);
+  RTC_DCHECK(!recording_);
+  if (!audio_is_initialized_) {
+    if (!InitPlayOrRecord()) {
+      RTC_LOG_F(LS_ERROR) << "InitPlayOrRecord failed for InitRecording!";
+      return -1;
+    }
+  }
+  audio_is_initialized_ = true;
+  return 0;
+}
+
+int32_t AudioDeviceIOS::StartPlayout() {
+  LOGI() << "StartPlayout";
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  RTC_DCHECK(audio_is_initialized_);
+  RTC_DCHECK(!playing_);
+  RTC_DCHECK(audio_unit_);
+  if (fine_audio_buffer_) {
+    fine_audio_buffer_->ResetPlayout();
+  }
+  if (!recording_ && audio_unit_->GetState() == VoiceProcessingAudioUnit::kInitialized) {
+    if (!audio_unit_->Start()) {
+      RTCLogError(@"StartPlayout failed to start audio unit.");
+      return -1;
+    }
+    RTC_LOG(LS_INFO) << "Voice-Processing I/O audio unit is now started";
+  }
+  rtc::AtomicOps::ReleaseStore(&playing_, 1);
+  num_playout_callbacks_ = 0;
+  num_detected_playout_glitches_ = 0;
+  return 0;
+}
+
+int32_t AudioDeviceIOS::StopPlayout() {
+  LOGI() << "StopPlayout";
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  if (!audio_is_initialized_ || !playing_) {
+    return 0;
+  }
+  if (!recording_) {
+    ShutdownPlayOrRecord();
+    audio_is_initialized_ = false;
+  }
+  rtc::AtomicOps::ReleaseStore(&playing_, 0);
+
+  // Derive average number of calls to OnGetPlayoutData() between detected
+  // audio glitches and add the result to a histogram.
+  int average_number_of_playout_callbacks_between_glitches = 100000;
+  RTC_DCHECK_GE(num_playout_callbacks_, num_detected_playout_glitches_);
+  if (num_detected_playout_glitches_ > 0) {
+    average_number_of_playout_callbacks_between_glitches =
+        num_playout_callbacks_ / num_detected_playout_glitches_;
+  }
+  RTC_HISTOGRAM_COUNTS_100000("WebRTC.Audio.AveragePlayoutCallbacksBetweenGlitches",
+                              average_number_of_playout_callbacks_between_glitches);
+  RTCLog(@"Average number of playout callbacks between glitches: %d",
+         average_number_of_playout_callbacks_between_glitches);
+  return 0;
+}
+
+int32_t AudioDeviceIOS::StartRecording() {
+  LOGI() << "StartRecording";
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  RTC_DCHECK(audio_is_initialized_);
+  RTC_DCHECK(!recording_);
+  RTC_DCHECK(audio_unit_);
+  if (fine_audio_buffer_) {
+    fine_audio_buffer_->ResetRecord();
+  }
+  if (!playing_ && audio_unit_->GetState() == VoiceProcessingAudioUnit::kInitialized) {
+    if (!audio_unit_->Start()) {
+      RTCLogError(@"StartRecording failed to start audio unit.");
+      return -1;
+    }
+    RTC_LOG(LS_INFO) << "Voice-Processing I/O audio unit is now started";
+  }
+  rtc::AtomicOps::ReleaseStore(&recording_, 1);
+  return 0;
+}
+
+int32_t AudioDeviceIOS::StopRecording() {
+  LOGI() << "StopRecording";
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  if (!audio_is_initialized_ || !recording_) {
+    return 0;
+  }
+  if (!playing_) {
+    ShutdownPlayOrRecord();
+    audio_is_initialized_ = false;
+  }
+  rtc::AtomicOps::ReleaseStore(&recording_, 0);
+  return 0;
+}
+
+int32_t AudioDeviceIOS::PlayoutDelay(uint16_t& delayMS) const {
+  delayMS = kFixedPlayoutDelayEstimate;
+  return 0;
+}
+
+int AudioDeviceIOS::GetPlayoutAudioParameters(AudioParameters* params) const {
+  LOGI() << "GetPlayoutAudioParameters";
+  RTC_DCHECK(playout_parameters_.is_valid());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  *params = playout_parameters_;
+  return 0;
+}
+
+int AudioDeviceIOS::GetRecordAudioParameters(AudioParameters* params) const {
+  LOGI() << "GetRecordAudioParameters";
+  RTC_DCHECK(record_parameters_.is_valid());
+  RTC_DCHECK(thread_checker_.CalledOnValidThread());
+  *params = record_parameters_;
+  return 0;
+}
+
+void AudioDeviceIOS::OnInterruptionBegin() {
+  RTC_DCHECK(thread_);
+  LOGI() << "OnInterruptionBegin";
+  thread_->Post(RTC_FROM_HERE, this, kMessageTypeInterruptionBegin);
+}
+
+void AudioDeviceIOS::OnInterruptionEnd() {
+  RTC_DCHECK(thread_);
+  LOGI() << "OnInterruptionEnd";
+  thread_->Post(RTC_FROM_HERE, this, kMessageTypeInterruptionEnd);
+}
+
+void AudioDeviceIOS::OnValidRouteChange() {
+  RTC_DCHECK(thread_);
+  thread_->Post(RTC_FROM_HERE, this, kMessageTypeValidRouteChange);
+}
+
+void AudioDeviceIOS::OnCanPlayOrRecordChange(bool can_play_or_record) {
+  RTC_DCHECK(thread_);
+  thread_->Post(RTC_FROM_HERE,
+                this,
+                kMessageTypeCanPlayOrRecordChange,
+                new rtc::TypedMessageData<bool>(can_play_or_record));
+}
+
+void AudioDeviceIOS::OnChangedOutputVolume() {
+  RTC_DCHECK(thread_);
+  thread_->Post(RTC_FROM_HERE, this, kMessageOutputVolumeChange);
+}
+
+OSStatus AudioDeviceIOS::OnDeliverRecordedData(AudioUnitRenderActionFlags* flags,
+                                               const AudioTimeStamp* time_stamp,
+                                               UInt32 bus_number,
+                                               UInt32 num_frames,
+                                               AudioBufferList* /* io_data */) {
+  RTC_DCHECK_RUN_ON(&io_thread_checker_);
+  OSStatus result = noErr;
+  // Simply return if recording is not enabled.
+  if (!rtc::AtomicOps::AcquireLoad(&recording_)) return result;
+
+  // Set the size of our own audio buffer and clear it first to avoid copying
+  // in combination with potential reallocations.
+  // On real iOS devices, the size will only be set once (at first callback).
+  record_audio_buffer_.Clear();
+  record_audio_buffer_.SetSize(num_frames);
+
+  // Allocate AudioBuffers to be used as storage for the received audio.
+  // The AudioBufferList structure works as a placeholder for the
+  // AudioBuffer structure, which holds a pointer to the actual data buffer
+  // in |record_audio_buffer_|. Recorded audio will be rendered into this memory
+  // at each input callback when calling AudioUnitRender().
+  AudioBufferList audio_buffer_list;
+  audio_buffer_list.mNumberBuffers = 1;
+  AudioBuffer* audio_buffer = &audio_buffer_list.mBuffers[0];
+  audio_buffer->mNumberChannels = record_parameters_.channels();
+  audio_buffer->mDataByteSize =
+      record_audio_buffer_.size() * VoiceProcessingAudioUnit::kBytesPerSample;
+  audio_buffer->mData = reinterpret_cast<int8_t*>(record_audio_buffer_.data());
+
+  // Obtain the recorded audio samples by initiating a rendering cycle.
+  // Since it happens on the input bus, the |io_data| parameter is a reference
+  // to the preallocated audio buffer list that the audio unit renders into.
+  // We can make the audio unit provide a buffer instead in io_data, but we
+  // currently just use our own.
+  // TODO(henrika): should error handling be improved?
+  result = audio_unit_->Render(flags, time_stamp, bus_number, num_frames, &audio_buffer_list);
+  if (result != noErr) {
+    RTCLogError(@"Failed to render audio.");
+    return result;
+  }
+
+  // Get a pointer to the recorded audio and send it to the WebRTC ADB.
+  // Use the FineAudioBuffer instance to convert between native buffer size
+  // and the 10ms buffer size used by WebRTC.
+  fine_audio_buffer_->DeliverRecordedData(record_audio_buffer_, kFixedRecordDelayEstimate);
+  return noErr;
+}
+
+OSStatus AudioDeviceIOS::OnGetPlayoutData(AudioUnitRenderActionFlags* flags,
+                                          const AudioTimeStamp* time_stamp,
+                                          UInt32 bus_number,
+                                          UInt32 num_frames,
+                                          AudioBufferList* io_data) {
+  RTC_DCHECK_RUN_ON(&io_thread_checker_);
+  // Verify 16-bit, noninterleaved mono PCM signal format.
+  RTC_DCHECK_EQ(1, io_data->mNumberBuffers);
+  AudioBuffer* audio_buffer = &io_data->mBuffers[0];
+  RTC_DCHECK_EQ(1, audio_buffer->mNumberChannels);
+
+  // Produce silence and give audio unit a hint about it if playout is not
+  // activated.
+  if (!rtc::AtomicOps::AcquireLoad(&playing_)) {
+    const size_t size_in_bytes = audio_buffer->mDataByteSize;
+    RTC_CHECK_EQ(size_in_bytes / VoiceProcessingAudioUnit::kBytesPerSample, num_frames);
+    *flags |= kAudioUnitRenderAction_OutputIsSilence;
+    memset(static_cast<int8_t*>(audio_buffer->mData), 0, size_in_bytes);
+    return noErr;
+  }
+
+  // Measure time since last call to OnGetPlayoutData() and see if it is larger
+  // than a well defined threshold which depends on the current IO buffer size.
+  // If so, we have an indication of a glitch in the output audio since the
+  // core audio layer will most likely run dry in this state.
+  ++num_playout_callbacks_;
+  const int64_t now_time = rtc::TimeMillis();
+  if (time_stamp->mSampleTime != num_frames) {
+    const int64_t delta_time = now_time - last_playout_time_;
+    const int glitch_threshold = 1.6 * playout_parameters_.GetBufferSizeInMilliseconds();
+    if (delta_time > glitch_threshold) {
+      RTCLogWarning(@"Possible playout audio glitch detected.\n"
+                     "  Time since last OnGetPlayoutData was %lld ms.\n",
+                    delta_time);
+      // Exclude extreme delta values since they do most likely not correspond
+      // to a real glitch. Instead, the most probable cause is that a headset
+      // has been plugged in or out. There are more direct ways to detect
+      // audio device changes (see HandleValidRouteChange()) but experiments
+      // show that using it leads to more complex implementations.
+      // TODO(henrika): more tests might be needed to come up with an even
+      // better upper limit.
+      if (glitch_threshold < 120 && delta_time > 120) {
+        RTCLog(@"Glitch warning is ignored. Probably caused by device switch.");
+      } else {
+        thread_->Post(RTC_FROM_HERE, this, kMessageTypePlayoutGlitchDetected);
+      }
+    }
+  }
+  last_playout_time_ = now_time;
+
+  // Read decoded 16-bit PCM samples from WebRTC (using a size that matches
+  // the native I/O audio unit) and copy the result to the audio buffer in the
+  // |io_data| destination.
+  fine_audio_buffer_->GetPlayoutData(
+      rtc::ArrayView<int16_t>(static_cast<int16_t*>(audio_buffer->mData), num_frames),
+      kFixedPlayoutDelayEstimate);
+  return noErr;
+}
+
+void AudioDeviceIOS::OnMessage(rtc::Message* msg) {
+  switch (msg->message_id) {
+    case kMessageTypeInterruptionBegin:
+      HandleInterruptionBegin();
+      break;
+    case kMessageTypeInterruptionEnd:
+      HandleInterruptionEnd();
+      break;
+    case kMessageTypeValidRouteChange:
+      HandleValidRouteChange();
+      break;
+    case kMessageTypeCanPlayOrRecordChange: {
+      rtc::TypedMessageData<bool>* data = static_cast<rtc::TypedMessageData<bool>*>(msg->pdata);
+      HandleCanPlayOrRecordChange(data->data());
+      delete data;
+      break;
+    }
+    case kMessageTypePlayoutGlitchDetected:
+      HandlePlayoutGlitchDetected();
+      break;
+    case kMessageOutputVolumeChange:
+      HandleOutputVolumeChange();
+      break;
+  }
+}
+
+void AudioDeviceIOS::HandleInterruptionBegin() {
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  RTCLog(@"Interruption begin. IsInterrupted changed from %d to 1.", is_interrupted_);
+  if (audio_unit_ && audio_unit_->GetState() == VoiceProcessingAudioUnit::kStarted) {
+    RTCLog(@"Stopping the audio unit due to interruption begin.");
+    if (!audio_unit_->Stop()) {
+      RTCLogError(@"Failed to stop the audio unit for interruption begin.");
+    } else {
+      // The audio unit has been stopped but will be restarted when the
+      // interruption ends in HandleInterruptionEnd(). It will result in audio
+      // callbacks from a new native I/O thread which means that we must detach
+      // thread checkers here to be prepared for an upcoming new audio stream.
+      io_thread_checker_.DetachFromThread();
+      // The audio device buffer must also be informed about the interrupted
+      // state so it can detach its thread checkers as well.
+      audio_device_buffer_->NativeAudioPlayoutInterrupted();
+      audio_device_buffer_->NativeAudioRecordingInterrupted();
+    }
+  }
+  is_interrupted_ = true;
+}
+
+void AudioDeviceIOS::HandleInterruptionEnd() {
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  RTCLog(@"Interruption ended. IsInterrupted changed from %d to 0. "
+          "Updating audio unit state.",
+         is_interrupted_);
+  is_interrupted_ = false;
+  UpdateAudioUnit([RTCAudioSession sharedInstance].canPlayOrRecord);
+}
+
+void AudioDeviceIOS::HandleValidRouteChange() {
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  RTCAudioSession* session = [RTCAudioSession sharedInstance];
+  RTCLog(@"%@", session);
+  HandleSampleRateChange(session.sampleRate);
+}
+
+void AudioDeviceIOS::HandleCanPlayOrRecordChange(bool can_play_or_record) {
+  RTCLog(@"Handling CanPlayOrRecord change to: %d", can_play_or_record);
+  UpdateAudioUnit(can_play_or_record);
+}
+
+void AudioDeviceIOS::HandleSampleRateChange(float sample_rate) {
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  RTCLog(@"Handling sample rate change to %f.", sample_rate);
+
+  // Don't do anything if we're interrupted.
+  if (is_interrupted_) {
+    RTCLog(@"Ignoring sample rate change to %f due to interruption.", sample_rate);
+    return;
+  }
+
+  // If we don't have an audio unit yet, or the audio unit is uninitialized,
+  // there is no work to do.
+  if (!audio_unit_ || audio_unit_->GetState() < VoiceProcessingAudioUnit::kInitialized) {
+    return;
+  }
+
+  // The audio unit is already initialized or started.
+  // Check to see if the sample rate or buffer size has changed.
+  RTCAudioSession* session = [RTCAudioSession sharedInstance];
+  const double session_sample_rate = session.sampleRate;
+  const NSTimeInterval session_buffer_duration = session.IOBufferDuration;
+  const size_t session_frames_per_buffer =
+      static_cast<size_t>(session_sample_rate * session_buffer_duration + .5);
+  const double current_sample_rate = playout_parameters_.sample_rate();
+  const size_t current_frames_per_buffer = playout_parameters_.frames_per_buffer();
+  RTCLog(@"Handling playout sample rate change to: %f\n"
+          "  Session sample rate: %f frames_per_buffer: %lu\n"
+          "  ADM sample rate: %f frames_per_buffer: %lu",
+         sample_rate,
+         session_sample_rate,
+         (unsigned long)session_frames_per_buffer,
+         current_sample_rate,
+         (unsigned long)current_frames_per_buffer);
+
+  // Sample rate and buffer size are the same, no work to do.
+  if (std::abs(current_sample_rate - session_sample_rate) <= DBL_EPSILON &&
+      current_frames_per_buffer == session_frames_per_buffer) {
+    RTCLog(@"Ignoring sample rate change since audio parameters are intact.");
+    return;
+  }
+
+  // Extra sanity check to ensure that the new sample rate is valid.
+  if (session_sample_rate <= 0.0) {
+    RTCLogError(@"Sample rate is invalid: %f", session_sample_rate);
+    return;
+  }
+
+  // We need to adjust our format and buffer sizes.
+  // The stream format is about to be changed and it requires that we first
+  // stop and uninitialize the audio unit to deallocate its resources.
+  RTCLog(@"Stopping and uninitializing audio unit to adjust buffers.");
+  bool restart_audio_unit = false;
+  if (audio_unit_->GetState() == VoiceProcessingAudioUnit::kStarted) {
+    audio_unit_->Stop();
+    restart_audio_unit = true;
+  }
+  if (audio_unit_->GetState() == VoiceProcessingAudioUnit::kInitialized) {
+    audio_unit_->Uninitialize();
+  }
+
+  // Allocate new buffers given the new stream format.
+  SetupAudioBuffersForActiveAudioSession();
+
+  // Initialize the audio unit again with the new sample rate.
+  RTC_DCHECK_EQ(playout_parameters_.sample_rate(), session_sample_rate);
+  if (!audio_unit_->Initialize(session_sample_rate)) {
+    RTCLogError(@"Failed to initialize the audio unit with sample rate: %f", session_sample_rate);
+    return;
+  }
+
+  // Restart the audio unit if it was already running.
+  if (restart_audio_unit && !audio_unit_->Start()) {
+    RTCLogError(@"Failed to start audio unit with sample rate: %f", session_sample_rate);
+    return;
+  }
+  RTCLog(@"Successfully handled sample rate change.");
+}
+
+void AudioDeviceIOS::HandlePlayoutGlitchDetected() {
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  // Don't update metrics if we're interrupted since a "glitch" is expected
+  // in this state.
+  if (is_interrupted_) {
+    RTCLog(@"Ignoring audio glitch due to interruption.");
+    return;
+  }
+  // Avoid doing glitch detection for two seconds after a volume change
+  // has been detected to reduce the risk of false alarm.
+  if (last_output_volume_change_time_ > 0 &&
+      rtc::TimeSince(last_output_volume_change_time_) < 2000) {
+    RTCLog(@"Ignoring audio glitch due to recent output volume change.");
+    return;
+  }
+  num_detected_playout_glitches_++;
+  RTCLog(@"Number of detected playout glitches: %lld", num_detected_playout_glitches_);
+
+  int64_t glitch_count = num_detected_playout_glitches_;
+  dispatch_async(dispatch_get_main_queue(), ^{
+    RTCAudioSession* session = [RTCAudioSession sharedInstance];
+    [session notifyDidDetectPlayoutGlitch:glitch_count];
+  });
+}
+
+void AudioDeviceIOS::HandleOutputVolumeChange() {
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  RTCLog(@"Output volume change detected.");
+  // Store time of this detection so it can be used to defer detection of
+  // glitches too close in time to this event.
+  last_output_volume_change_time_ = rtc::TimeMillis();
+}
+
+void AudioDeviceIOS::UpdateAudioDeviceBuffer() {
+  LOGI() << "UpdateAudioDevicebuffer";
+  // AttachAudioBuffer() is called at construction by the main class but check
+  // just in case.
+  RTC_DCHECK(audio_device_buffer_) << "AttachAudioBuffer must be called first";
+  RTC_DCHECK_GT(playout_parameters_.sample_rate(), 0);
+  RTC_DCHECK_GT(record_parameters_.sample_rate(), 0);
+  RTC_DCHECK_EQ(playout_parameters_.channels(), 1);
+  RTC_DCHECK_EQ(record_parameters_.channels(), 1);
+  // Inform the audio device buffer (ADB) about the new audio format.
+  audio_device_buffer_->SetPlayoutSampleRate(playout_parameters_.sample_rate());
+  audio_device_buffer_->SetPlayoutChannels(playout_parameters_.channels());
+  audio_device_buffer_->SetRecordingSampleRate(record_parameters_.sample_rate());
+  audio_device_buffer_->SetRecordingChannels(record_parameters_.channels());
+}
+
+void AudioDeviceIOS::SetupAudioBuffersForActiveAudioSession() {
+  LOGI() << "SetupAudioBuffersForActiveAudioSession";
+  // Verify the current values once the audio session has been activated.
+  RTCAudioSession* session = [RTCAudioSession sharedInstance];
+  double sample_rate = session.sampleRate;
+  NSTimeInterval io_buffer_duration = session.IOBufferDuration;
+  RTCLog(@"%@", session);
+
+  // Log a warning message for the case when we are unable to set the preferred
+  // hardware sample rate but continue and use the non-ideal sample rate after
+  // reinitializing the audio parameters. Most BT headsets only support 8kHz or
+  // 16kHz.
+  RTCAudioSessionConfiguration* webRTCConfig = [RTCAudioSessionConfiguration webRTCConfiguration];
+  if (sample_rate != webRTCConfig.sampleRate) {
+    RTC_LOG(LS_WARNING) << "Unable to set the preferred sample rate";
+  }
+
+  // Crash reports indicates that it can happen in rare cases that the reported
+  // sample rate is less than or equal to zero. If that happens and if a valid
+  // sample rate has already been set during initialization, the best guess we
+  // can do is to reuse the current sample rate.
+  if (sample_rate <= DBL_EPSILON && playout_parameters_.sample_rate() > 0) {
+    RTCLogError(@"Reported rate is invalid: %f. "
+                 "Using %d as sample rate instead.",
+                sample_rate, playout_parameters_.sample_rate());
+    sample_rate = playout_parameters_.sample_rate();
+  }
+
+  // At this stage, we also know the exact IO buffer duration and can add
+  // that info to the existing audio parameters where it is converted into
+  // number of audio frames.
+  // Example: IO buffer size = 0.008 seconds <=> 128 audio frames at 16kHz.
+  // Hence, 128 is the size we expect to see in upcoming render callbacks.
+  playout_parameters_.reset(sample_rate, playout_parameters_.channels(), io_buffer_duration);
+  RTC_DCHECK(playout_parameters_.is_complete());
+  record_parameters_.reset(sample_rate, record_parameters_.channels(), io_buffer_duration);
+  RTC_DCHECK(record_parameters_.is_complete());
+  RTC_LOG(LS_INFO) << " frames per I/O buffer: " << playout_parameters_.frames_per_buffer();
+  RTC_LOG(LS_INFO) << " bytes per I/O buffer: " << playout_parameters_.GetBytesPerBuffer();
+  RTC_DCHECK_EQ(playout_parameters_.GetBytesPerBuffer(), record_parameters_.GetBytesPerBuffer());
+
+  // Update the ADB parameters since the sample rate might have changed.
+  UpdateAudioDeviceBuffer();
+
+  // Create a modified audio buffer class which allows us to ask for,
+  // or deliver, any number of samples (and not only multiple of 10ms) to match
+  // the native audio unit buffer size.
+  RTC_DCHECK(audio_device_buffer_);
+  fine_audio_buffer_.reset(new FineAudioBuffer(audio_device_buffer_));
+}
+
+bool AudioDeviceIOS::CreateAudioUnit() {
+  RTC_DCHECK(!audio_unit_);
+
+  audio_unit_.reset(new VoiceProcessingAudioUnit(this));
+  if (!audio_unit_->Init()) {
+    audio_unit_.reset();
+    return false;
+  }
+
+  return true;
+}
+
+void AudioDeviceIOS::UpdateAudioUnit(bool can_play_or_record) {
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  RTCLog(@"Updating audio unit state. CanPlayOrRecord=%d IsInterrupted=%d",
+         can_play_or_record,
+         is_interrupted_);
+
+  if (is_interrupted_) {
+    RTCLog(@"Ignoring audio unit update due to interruption.");
+    return;
+  }
+
+  // If we're not initialized we don't need to do anything. Audio unit will
+  // be initialized on initialization.
+  if (!audio_is_initialized_) return;
+
+  // If we're initialized, we must have an audio unit.
+  RTC_DCHECK(audio_unit_);
+
+  bool should_initialize_audio_unit = false;
+  bool should_uninitialize_audio_unit = false;
+  bool should_start_audio_unit = false;
+  bool should_stop_audio_unit = false;
+
+  switch (audio_unit_->GetState()) {
+    case VoiceProcessingAudioUnit::kInitRequired:
+      RTCLog(@"VPAU state: InitRequired");
+      RTC_NOTREACHED();
+      break;
+    case VoiceProcessingAudioUnit::kUninitialized:
+      RTCLog(@"VPAU state: Uninitialized");
+      should_initialize_audio_unit = can_play_or_record;
+      should_start_audio_unit = should_initialize_audio_unit && (playing_ || recording_);
+      break;
+    case VoiceProcessingAudioUnit::kInitialized:
+      RTCLog(@"VPAU state: Initialized");
+      should_start_audio_unit = can_play_or_record && (playing_ || recording_);
+      should_uninitialize_audio_unit = !can_play_or_record;
+      break;
+    case VoiceProcessingAudioUnit::kStarted:
+      RTCLog(@"VPAU state: Started");
+      RTC_DCHECK(playing_ || recording_);
+      should_stop_audio_unit = !can_play_or_record;
+      should_uninitialize_audio_unit = should_stop_audio_unit;
+      break;
+  }
+
+  if (should_initialize_audio_unit) {
+    RTCLog(@"Initializing audio unit for UpdateAudioUnit");
+    ConfigureAudioSession();
+    SetupAudioBuffersForActiveAudioSession();
+    if (!audio_unit_->Initialize(playout_parameters_.sample_rate())) {
+      RTCLogError(@"Failed to initialize audio unit.");
+      return;
+    }
+  }
+
+  if (should_start_audio_unit) {
+    RTCLog(@"Starting audio unit for UpdateAudioUnit");
+    // Log session settings before trying to start audio streaming.
+    RTCAudioSession* session = [RTCAudioSession sharedInstance];
+    RTCLog(@"%@", session);
+    if (!audio_unit_->Start()) {
+      RTCLogError(@"Failed to start audio unit.");
+      return;
+    }
+  }
+
+  if (should_stop_audio_unit) {
+    RTCLog(@"Stopping audio unit for UpdateAudioUnit");
+    if (!audio_unit_->Stop()) {
+      RTCLogError(@"Failed to stop audio unit.");
+      return;
+    }
+  }
+
+  if (should_uninitialize_audio_unit) {
+    RTCLog(@"Uninitializing audio unit for UpdateAudioUnit");
+    audio_unit_->Uninitialize();
+    UnconfigureAudioSession();
+  }
+}
+
+bool AudioDeviceIOS::ConfigureAudioSession() {
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  RTCLog(@"Configuring audio session.");
+  if (has_configured_session_) {
+    RTCLogWarning(@"Audio session already configured.");
+    return false;
+  }
+  RTCAudioSession* session = [RTCAudioSession sharedInstance];
+  [session lockForConfiguration];
+  bool success = [session configureWebRTCSession:nil];
+  [session unlockForConfiguration];
+  if (success) {
+    has_configured_session_ = true;
+    RTCLog(@"Configured audio session.");
+  } else {
+    RTCLog(@"Failed to configure audio session.");
+  }
+  return success;
+}
+
+void AudioDeviceIOS::UnconfigureAudioSession() {
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+  RTCLog(@"Unconfiguring audio session.");
+  if (!has_configured_session_) {
+    RTCLogWarning(@"Audio session already unconfigured.");
+    return;
+  }
+  RTCAudioSession* session = [RTCAudioSession sharedInstance];
+  [session lockForConfiguration];
+  [session unconfigureWebRTCSession:nil];
+  [session unlockForConfiguration];
+  has_configured_session_ = false;
+  RTCLog(@"Unconfigured audio session.");
+}
+
+bool AudioDeviceIOS::InitPlayOrRecord() {
+  LOGI() << "InitPlayOrRecord";
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+
+  // There should be no audio unit at this point.
+  if (!CreateAudioUnit()) {
+    return false;
+  }
+
+  RTCAudioSession* session = [RTCAudioSession sharedInstance];
+  // Subscribe to audio session events.
+  [session pushDelegate:audio_session_observer_];
+  is_interrupted_ = session.isInterrupted ? true : false;
+
+  // Lock the session to make configuration changes.
+  [session lockForConfiguration];
+  NSError* error = nil;
+  if (![session beginWebRTCSession:&error]) {
+    [session unlockForConfiguration];
+    RTCLogError(@"Failed to begin WebRTC session: %@", error.localizedDescription);
+    return false;
+  }
+
+  // If we are ready to play or record, and if the audio session can be
+  // configured, then initialize the audio unit.
+  if (session.canPlayOrRecord) {
+    if (!ConfigureAudioSession()) {
+      // One possible reason for failure is if an attempt was made to use the
+      // audio session during or after a Media Services failure.
+      // See AVAudioSessionErrorCodeMediaServicesFailed for details.
+      [session unlockForConfiguration];
+      return false;
+    }
+    SetupAudioBuffersForActiveAudioSession();
+    audio_unit_->Initialize(playout_parameters_.sample_rate());
+  }
+
+  // Release the lock.
+  [session unlockForConfiguration];
+  return true;
+}
+
+void AudioDeviceIOS::ShutdownPlayOrRecord() {
+  LOGI() << "ShutdownPlayOrRecord";
+  RTC_DCHECK_RUN_ON(&thread_checker_);
+
+  // Stop the audio unit to prevent any additional audio callbacks.
+  audio_unit_->Stop();
+
+  // Close and delete the voice-processing I/O unit.
+  audio_unit_.reset();
+
+  // Detach thread checker for the AURemoteIO::IOThread to ensure that the
+  // next session uses a fresh thread id.
+  io_thread_checker_.DetachFromThread();
+
+  // Remove audio session notification observers.
+  RTCAudioSession* session = [RTCAudioSession sharedInstance];
+  [session removeDelegate:audio_session_observer_];
+
+  // All I/O should be stopped or paused prior to deactivating the audio
+  // session, hence we deactivate as last action.
+  [session lockForConfiguration];
+  UnconfigureAudioSession();
+  [session endWebRTCSession:nil];
+  [session unlockForConfiguration];
+}
+
+bool AudioDeviceIOS::IsInterrupted() {
+  return is_interrupted_;
+}
+
+#pragma mark - Not Implemented
+
+int32_t AudioDeviceIOS::ActiveAudioLayer(AudioDeviceModule::AudioLayer& audioLayer) const {
+  audioLayer = AudioDeviceModule::kPlatformDefaultAudio;
+  return 0;
+}
+
+int16_t AudioDeviceIOS::PlayoutDevices() {
+  // TODO(henrika): improve.
+  RTC_LOG_F(LS_WARNING) << "Not implemented";
+  return (int16_t)1;
+}
+
+int16_t AudioDeviceIOS::RecordingDevices() {
+  // TODO(henrika): improve.
+  RTC_LOG_F(LS_WARNING) << "Not implemented";
+  return (int16_t)1;
+}
+
+int32_t AudioDeviceIOS::InitSpeaker() {
+  return 0;
+}
+
+bool AudioDeviceIOS::SpeakerIsInitialized() const {
+  return true;
+}
+
+int32_t AudioDeviceIOS::SpeakerVolumeIsAvailable(bool& available) {
+  available = false;
+  return 0;
+}
+
+int32_t AudioDeviceIOS::SetSpeakerVolume(uint32_t volume) {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::SpeakerVolume(uint32_t& volume) const {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::MaxSpeakerVolume(uint32_t& maxVolume) const {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::MinSpeakerVolume(uint32_t& minVolume) const {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::SpeakerMuteIsAvailable(bool& available) {
+  available = false;
+  return 0;
+}
+
+int32_t AudioDeviceIOS::SetSpeakerMute(bool enable) {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::SpeakerMute(bool& enabled) const {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::SetPlayoutDevice(uint16_t index) {
+  RTC_LOG_F(LS_WARNING) << "Not implemented";
+  return 0;
+}
+
+int32_t AudioDeviceIOS::SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType) {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::InitMicrophone() {
+  return 0;
+}
+
+bool AudioDeviceIOS::MicrophoneIsInitialized() const {
+  return true;
+}
+
+int32_t AudioDeviceIOS::MicrophoneMuteIsAvailable(bool& available) {
+  available = false;
+  return 0;
+}
+
+int32_t AudioDeviceIOS::SetMicrophoneMute(bool enable) {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::MicrophoneMute(bool& enabled) const {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::StereoRecordingIsAvailable(bool& available) {
+  available = false;
+  return 0;
+}
+
+int32_t AudioDeviceIOS::SetStereoRecording(bool enable) {
+  RTC_LOG_F(LS_WARNING) << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::StereoRecording(bool& enabled) const {
+  enabled = false;
+  return 0;
+}
+
+int32_t AudioDeviceIOS::StereoPlayoutIsAvailable(bool& available) {
+  available = false;
+  return 0;
+}
+
+int32_t AudioDeviceIOS::SetStereoPlayout(bool enable) {
+  RTC_LOG_F(LS_WARNING) << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::StereoPlayout(bool& enabled) const {
+  enabled = false;
+  return 0;
+}
+
+int32_t AudioDeviceIOS::MicrophoneVolumeIsAvailable(bool& available) {
+  available = false;
+  return 0;
+}
+
+int32_t AudioDeviceIOS::SetMicrophoneVolume(uint32_t volume) {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::MicrophoneVolume(uint32_t& volume) const {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::MaxMicrophoneVolume(uint32_t& maxVolume) const {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::MinMicrophoneVolume(uint32_t& minVolume) const {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::PlayoutDeviceName(uint16_t index,
+                                          char name[kAdmMaxDeviceNameSize],
+                                          char guid[kAdmMaxGuidSize]) {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::RecordingDeviceName(uint16_t index,
+                                            char name[kAdmMaxDeviceNameSize],
+                                            char guid[kAdmMaxGuidSize]) {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::SetRecordingDevice(uint16_t index) {
+  RTC_LOG_F(LS_WARNING) << "Not implemented";
+  return 0;
+}
+
+int32_t AudioDeviceIOS::SetRecordingDevice(AudioDeviceModule::WindowsDeviceType) {
+  RTC_NOTREACHED() << "Not implemented";
+  return -1;
+}
+
+int32_t AudioDeviceIOS::PlayoutIsAvailable(bool& available) {
+  available = true;
+  return 0;
+}
+
+int32_t AudioDeviceIOS::RecordingIsAvailable(bool& available) {
+  available = true;
+  return 0;
+}
+
+}  // namespace ios_adm
+}  // namespace webrtc
diff --git a/sdk/objc/Framework/Native/src/audio/audio_device_module_ios.h b/sdk/objc/Framework/Native/src/audio/audio_device_module_ios.h
new file mode 100644
index 0000000..5f9bb6a
--- /dev/null
+++ b/sdk/objc/Framework/Native/src/audio/audio_device_module_ios.h
@@ -0,0 +1,143 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SDK_IOS_NATIVE_API_AUDIO_DEVICE_MODULE_AUDIO_DEVICE_IOS_H_
+#define SDK_IOS_NATIVE_API_AUDIO_DEVICE_MODULE_AUDIO_DEVICE_IOS_H_
+
+#include <memory>
+
+#include "audio_device_ios.h"
+
+#include "modules/audio_device/audio_device_buffer.h"
+#include "modules/audio_device/include/audio_device.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/criticalsection.h"
+
+
+namespace webrtc {
+
+class AudioDeviceGeneric;
+
+namespace ios_adm {
+
+  class AudioDeviceModuleIOS : public AudioDeviceModule {
+
+  public:
+
+    int32_t AttachAudioBuffer();
+
+    AudioDeviceModuleIOS();
+    ~AudioDeviceModuleIOS() override;
+
+    // Retrieve the currently utilized audio layer
+    int32_t ActiveAudioLayer(AudioLayer* audioLayer) const override;
+
+    // Full-duplex transportation of PCM audio
+    int32_t RegisterAudioCallback(AudioTransport* audioCallback) override;
+
+    // Main initializaton and termination
+    int32_t Init() override;
+    int32_t Terminate() override;
+    bool Initialized() const override;
+
+    // Device enumeration
+    int16_t PlayoutDevices() override;
+    int16_t RecordingDevices() override;
+    int32_t PlayoutDeviceName(uint16_t index,
+                              char name[kAdmMaxDeviceNameSize],
+                              char guid[kAdmMaxGuidSize]) override;
+    int32_t RecordingDeviceName(uint16_t index,
+                                char name[kAdmMaxDeviceNameSize],
+                                char guid[kAdmMaxGuidSize]) override;
+
+    // Device selection
+    int32_t SetPlayoutDevice(uint16_t index) override;
+    int32_t SetPlayoutDevice(WindowsDeviceType device) override;
+    int32_t SetRecordingDevice(uint16_t index) override;
+    int32_t SetRecordingDevice(WindowsDeviceType device) override;
+
+    // Audio transport initialization
+    int32_t PlayoutIsAvailable(bool* available) override;
+    int32_t InitPlayout() override;
+    bool PlayoutIsInitialized() const override;
+    int32_t RecordingIsAvailable(bool* available) override;
+    int32_t InitRecording() override;
+    bool RecordingIsInitialized() const override;
+
+    // Audio transport control
+    int32_t StartPlayout() override;
+    int32_t StopPlayout() override;
+    bool Playing() const override;
+    int32_t StartRecording() override;
+    int32_t StopRecording() override;
+    bool Recording() const override;
+
+    // Audio mixer initialization
+    int32_t InitSpeaker() override;
+    bool SpeakerIsInitialized() const override;
+    int32_t InitMicrophone() override;
+    bool MicrophoneIsInitialized() const override;
+
+    // Speaker volume controls
+    int32_t SpeakerVolumeIsAvailable(bool* available) override;
+    int32_t SetSpeakerVolume(uint32_t volume) override;
+    int32_t SpeakerVolume(uint32_t* volume) const override;
+    int32_t MaxSpeakerVolume(uint32_t* maxVolume) const override;
+    int32_t MinSpeakerVolume(uint32_t* minVolume) const override;
+
+    // Microphone volume controls
+    int32_t MicrophoneVolumeIsAvailable(bool* available) override;
+    int32_t SetMicrophoneVolume(uint32_t volume) override;
+    int32_t MicrophoneVolume(uint32_t* volume) const override;
+    int32_t MaxMicrophoneVolume(uint32_t* maxVolume) const override;
+    int32_t MinMicrophoneVolume(uint32_t* minVolume) const override;
+
+    // Speaker mute control
+    int32_t SpeakerMuteIsAvailable(bool* available) override;
+    int32_t SetSpeakerMute(bool enable) override;
+    int32_t SpeakerMute(bool* enabled) const override;
+
+    // Microphone mute control
+    int32_t MicrophoneMuteIsAvailable(bool* available) override;
+    int32_t SetMicrophoneMute(bool enable) override;
+    int32_t MicrophoneMute(bool* enabled) const override;
+
+    // Stereo support
+    int32_t StereoPlayoutIsAvailable(bool* available) const override;
+    int32_t SetStereoPlayout(bool enable) override;
+    int32_t StereoPlayout(bool* enabled) const override;
+    int32_t StereoRecordingIsAvailable(bool* available) const override;
+    int32_t SetStereoRecording(bool enable) override;
+    int32_t StereoRecording(bool* enabled) const override;
+
+    // Delay information and control
+    int32_t PlayoutDelay(uint16_t* delayMS) const override;
+
+    bool BuiltInAECIsAvailable() const override;
+    int32_t EnableBuiltInAEC(bool enable) override;
+    bool BuiltInAGCIsAvailable() const override;
+    int32_t EnableBuiltInAGC(bool enable) override;
+    bool BuiltInNSIsAvailable() const override;
+    int32_t EnableBuiltInNS(bool enable) override;
+
+#if defined(WEBRTC_IOS)
+    int GetPlayoutAudioParameters(AudioParameters* params) const override;
+    int GetRecordAudioParameters(AudioParameters* params) const override;
+#endif  // WEBRTC_IOS
+    private:
+    bool initialized_ = false;
+    std::unique_ptr<AudioDeviceIOS> audio_device_;
+    std::unique_ptr<AudioDeviceBuffer> audio_device_buffer_;
+
+  };
+} // namespace ios_adm
+} // namespace webrtc
+
+#endif  // SDK_IOS_NATIVE_API_AUDIO_DEVICE_MODULE_AUDIO_DEVICE_IOS_H_
diff --git a/sdk/objc/Framework/Native/src/audio/audio_device_module_ios.mm b/sdk/objc/Framework/Native/src/audio/audio_device_module_ios.mm
new file mode 100644
index 0000000..3e918a2
--- /dev/null
+++ b/sdk/objc/Framework/Native/src/audio/audio_device_module_ios.mm
@@ -0,0 +1,673 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "audio_device_module_ios.h"
+
+#include "modules/audio_device/audio_device_config.h"
+#include "modules/audio_device/audio_device_generic.h"
+#include "rtc_base/checks.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/refcount.h"
+#include "rtc_base/refcountedobject.h"
+#include "system_wrappers/include/metrics.h"
+
+#if defined(WEBRTC_IOS)
+#include "audio_device_ios.h"
+#endif
+
+#define CHECKinitialized_() \
+  {                         \
+    if (!initialized_) {    \
+      return -1;            \
+    };                      \
+  }
+
+#define CHECKinitialized__BOOL() \
+  {                              \
+    if (!initialized_) {         \
+      return false;              \
+    };                           \
+  }
+
+
+namespace webrtc {
+namespace ios_adm {
+
+  AudioDeviceModuleIOS::AudioDeviceModuleIOS() {
+    RTC_LOG(INFO) << "current platform is IOS";
+    RTC_LOG(INFO) << "iPhone Audio APIs will be utilized.";
+  }
+
+  int32_t AudioDeviceModuleIOS::AttachAudioBuffer() {
+    RTC_LOG(INFO) << __FUNCTION__;
+    audio_device_->AttachAudioBuffer(audio_device_buffer_.get());
+    return 0;
+  }
+
+  AudioDeviceModuleIOS::~AudioDeviceModuleIOS() {
+    RTC_LOG(INFO) << __FUNCTION__;
+  }
+
+  int32_t AudioDeviceModuleIOS::ActiveAudioLayer(AudioLayer* audioLayer) const {
+    RTC_LOG(INFO) << __FUNCTION__;
+    AudioLayer activeAudio;
+    if (audio_device_->ActiveAudioLayer(activeAudio) == -1) {
+      return -1;
+    }
+    *audioLayer = activeAudio;
+    return 0;
+  }
+
+  int32_t AudioDeviceModuleIOS::Init() {
+    RTC_LOG(INFO) << __FUNCTION__;
+    if (initialized_)
+      return 0;
+
+    audio_device_buffer_.reset(new webrtc::AudioDeviceBuffer());
+    audio_device_.reset(new ios_adm::AudioDeviceIOS());
+    RTC_CHECK(audio_device_);
+
+    this->AttachAudioBuffer();
+
+    AudioDeviceGeneric::InitStatus status = audio_device_->Init();
+    RTC_HISTOGRAM_ENUMERATION(
+        "WebRTC.Audio.InitializationResult", static_cast<int>(status),
+        static_cast<int>(AudioDeviceGeneric::InitStatus::NUM_STATUSES));
+    if (status != AudioDeviceGeneric::InitStatus::OK) {
+      RTC_LOG(LS_ERROR) << "Audio device initialization failed.";
+      return -1;
+    }
+    initialized_ = true;
+    return 0;
+  }
+
+  int32_t AudioDeviceModuleIOS::Terminate() {
+    RTC_LOG(INFO) << __FUNCTION__;
+    if (!initialized_)
+      return 0;
+    if (audio_device_->Terminate() == -1) {
+      return -1;
+    }
+    initialized_ = false;
+    return 0;
+  }
+
+  bool AudioDeviceModuleIOS::Initialized() const {
+    RTC_LOG(INFO) << __FUNCTION__ << ": " << initialized_;
+    return initialized_;
+  }
+
+  int32_t AudioDeviceModuleIOS::InitSpeaker() {
+    RTC_LOG(INFO) << __FUNCTION__;
+    CHECKinitialized_();
+    return audio_device_->InitSpeaker();
+  }
+
+  int32_t AudioDeviceModuleIOS::InitMicrophone() {
+    RTC_LOG(INFO) << __FUNCTION__;
+    CHECKinitialized_();
+    return audio_device_->InitMicrophone();
+  }
+
+  int32_t AudioDeviceModuleIOS::SpeakerVolumeIsAvailable(bool* available) {
+    RTC_LOG(INFO) << __FUNCTION__;
+    CHECKinitialized_();
+    bool isAvailable = false;
+    if (audio_device_->SpeakerVolumeIsAvailable(isAvailable) == -1) {
+      return -1;
+    }
+    *available = isAvailable;
+    RTC_LOG(INFO) << "output: " << isAvailable;
+    return 0;
+  }
+
+  int32_t AudioDeviceModuleIOS::SetSpeakerVolume(uint32_t volume) {
+    RTC_LOG(INFO) << __FUNCTION__ << "(" << volume << ")";
+    CHECKinitialized_();
+    return audio_device_->SetSpeakerVolume(volume);
+  }
+
+  int32_t AudioDeviceModuleIOS::SpeakerVolume(uint32_t* volume) const {
+    RTC_LOG(INFO) << __FUNCTION__;
+    CHECKinitialized_();
+    uint32_t level = 0;
+    if (audio_device_->SpeakerVolume(level) == -1) {
+      return -1;
+    }
+    *volume = level;
+    RTC_LOG(INFO) << "output: " << *volume;
+    return 0;
+  }
+
+  bool AudioDeviceModuleIOS::SpeakerIsInitialized() const {
+    RTC_LOG(INFO) << __FUNCTION__;
+    CHECKinitialized__BOOL();
+    bool isInitialized = audio_device_->SpeakerIsInitialized();
+    RTC_LOG(INFO) << "output: " << isInitialized;
+    return isInitialized;
+  }
+
+  bool AudioDeviceModuleIOS::MicrophoneIsInitialized() const {
+    RTC_LOG(INFO) << __FUNCTION__;
+    CHECKinitialized__BOOL();
+    bool isInitialized = audio_device_->MicrophoneIsInitialized();
+    RTC_LOG(INFO) << "output: " << isInitialized;
+    return isInitialized;
+  }
+
+  int32_t AudioDeviceModuleIOS::MaxSpeakerVolume(uint32_t* maxVolume) const {
+    CHECKinitialized_();
+    uint32_t maxVol = 0;
+    if (audio_device_->MaxSpeakerVolume(maxVol) == -1) {
+      return -1;
+    }
+    *maxVolume = maxVol;
+    return 0;
+  }
+
+  int32_t AudioDeviceModuleIOS::MinSpeakerVolume(uint32_t* minVolume) const {
+    CHECKinitialized_();
+    uint32_t minVol = 0;
+    if (audio_device_->MinSpeakerVolume(minVol) == -1) {
+      return -1;
+    }
+    *minVolume = minVol;
+    return 0;
+  }
+
+  int32_t AudioDeviceModuleIOS::SpeakerMuteIsAvailable(bool* available) {
+    RTC_LOG(INFO) << __FUNCTION__;
+    CHECKinitialized_();
+    bool isAvailable = false;
+    if (audio_device_->SpeakerMuteIsAvailable(isAvailable) == -1) {
+      return -1;
+    }
+    *available = isAvailable;
+    RTC_LOG(INFO) << "output: " << isAvailable;
+    return 0;
+  }
+
+  int32_t AudioDeviceModuleIOS::SetSpeakerMute(bool enable) {
+    RTC_LOG(INFO) << __FUNCTION__ << "(" << enable << ")";
+    CHECKinitialized_();
+    return audio_device_->SetSpeakerMute(enable);
+  }
+
+  int32_t AudioDeviceModuleIOS::SpeakerMute(bool* enabled) const {
+    RTC_LOG(INFO) << __FUNCTION__;
+    CHECKinitialized_();
+    bool muted = false;
+    if (audio_device_->SpeakerMute(muted) == -1) {
+      return -1;
+    }
+    *enabled = muted;
+    RTC_LOG(INFO) << "output: " << muted;
+    return 0;
+  }
+
+  int32_t AudioDeviceModuleIOS::MicrophoneMuteIsAvailable(bool* available) {
+    RTC_LOG(INFO) << __FUNCTION__;
+    CHECKinitialized_();
+    bool isAvailable = false;
+    if (audio_device_->MicrophoneMuteIsAvailable(isAvailable) == -1) {
+      return -1;
+    }
+    *available = isAvailable;
+    RTC_LOG(INFO) << "output: " << isAvailable;
+    return 0;
+  }
+
+  int32_t AudioDeviceModuleIOS::SetMicrophoneMute(bool enable) {
+    RTC_LOG(INFO) << __FUNCTION__ << "(" << enable << ")";
+    CHECKinitialized_();
+    return (audio_device_->SetMicrophoneMute(enable));
+  }
+
+  int32_t AudioDeviceModuleIOS::MicrophoneMute(bool* enabled) const {
+    RTC_LOG(INFO) << __FUNCTION__;
+    CHECKinitialized_();
+    bool muted = false;
+    if (audio_device_->MicrophoneMute(muted) == -1) {
+      return -1;
+    }
+    *enabled = muted;
+    RTC_LOG(INFO) << "output: " << muted;
+    return 0;
+  }
+
+  int32_t AudioDeviceModuleIOS::MicrophoneVolumeIsAvailable(bool* available) {
+    RTC_LOG(INFO) << __FUNCTION__;
+    CHECKinitialized_();
+    bool isAvailable = false;
+    if (audio_device_->MicrophoneVolumeIsAvailable(isAvailable) == -1) {
+      return -1;
+    }
+    *available = isAvailable;
+    RTC_LOG(INFO) << "output: " << isAvailable;
+    return 0;
+  }
+
+  int32_t AudioDeviceModuleIOS::SetMicrophoneVolume(uint32_t volume) {
+    RTC_LOG(INFO) << __FUNCTION__ << "(" << volume << ")";
+    CHECKinitialized_();
+    return (audio_device_->SetMicrophoneVolume(volume));
+  }
+
+  int32_t AudioDeviceModuleIOS::MicrophoneVolume(uint32_t* volume) const {
+    RTC_LOG(INFO) << __FUNCTION__;
+    CHECKinitialized_();
+    uint32_t level = 0;
+    if (audio_device_->MicrophoneVolume(level) == -1) {
+      return -1;
+    }
+    *volume = level;
+    RTC_LOG(INFO) << "output: " << *volume;
+    return 0;
+  }
+
+  int32_t AudioDeviceModuleIOS::StereoRecordingIsAvailable(
+      bool* available) const {
+    RTC_LOG(INFO) << __FUNCTION__;
+    CHECKinitialized_();
+    bool isAvailable = false;
+    if (audio_device_->StereoRecordingIsAvailable(isAvailable) == -1) {
+      return -1;
+    }
+    *available = isAvailable;
+    RTC_LOG(INFO) << "output: " << isAvailable;
+    return 0;
+  }
+
+  int32_t AudioDeviceModuleIOS::SetStereoRecording(bool enable) {
+    RTC_LOG(INFO) << __FUNCTION__ << "(" << enable << ")";
+    CHECKinitialized_();
+    if (audio_device_->RecordingIsInitialized()) {
+      RTC_LOG(WARNING) << "recording in stereo is not supported";
+      return -1;
+    }
+    if (audio_device_->SetStereoRecording(enable) == -1) {
+      RTC_LOG(WARNING) << "failed to change stereo recording";
+      return -1;
+    }
+    int8_t nChannels(1);
+    if (enable) {
+      nChannels = 2;
+    }
+    audio_device_buffer_.get()->SetRecordingChannels(nChannels);
+    return 0;
+  }
+
+  int32_t AudioDeviceModuleIOS::StereoRecording(bool* enabled) const {
+    RTC_LOG(INFO) << __FUNCTION__;
+    CHECKinitialized_();
+    bool stereo = false;
+    if (audio_device_->StereoRecording(stereo) == -1) {
+      return -1;
+    }
+    *enabled = stereo;
+    RTC_LOG(INFO) << "output: " << stereo;
+    return 0;
+  }
+
+  int32_t AudioDeviceModuleIOS::StereoPlayoutIsAvailable(bool* available) const {
+    RTC_LOG(INFO) << __FUNCTION__;
+    CHECKinitialized_();
+    bool isAvailable = false;
+    if (audio_device_->StereoPlayoutIsAvailable(isAvailable) == -1) {
+      return -1;
+    }
+    *available = isAvailable;
+    RTC_LOG(INFO) << "output: " << isAvailable;
+    return 0;
+  }
+
+  int32_t AudioDeviceModuleIOS::SetStereoPlayout(bool enable) {
+    RTC_LOG(INFO) << __FUNCTION__ << "(" << enable << ")";
+    CHECKinitialized_();
+    if (audio_device_->PlayoutIsInitialized()) {
+      RTC_LOG(LERROR)
+      << "unable to set stereo mode while playing side is initialized";
+      return -1;
+    }
+    if (audio_device_->SetStereoPlayout(enable)) {
+      RTC_LOG(WARNING) << "stereo playout is not supported";
+      return -1;
+    }
+    int8_t nChannels(1);
+    if (enable) {
+      nChannels = 2;
+    }
+    audio_device_buffer_.get()->SetPlayoutChannels(nChannels);
+    return 0;
+  }
+
+  int32_t AudioDeviceModuleIOS::StereoPlayout(bool* enabled) const {
+    RTC_LOG(INFO) << __FUNCTION__;
+    CHECKinitialized_();
+    bool stereo = false;
+    if (audio_device_->StereoPlayout(stereo) == -1) {
+      return -1;
+    }
+    *enabled = stereo;
+    RTC_LOG(INFO) << "output: " << stereo;
+    return 0;
+  }
+
+  int32_t AudioDeviceModuleIOS::PlayoutIsAvailable(bool* available) {
+    RTC_LOG(INFO) << __FUNCTION__;
+    CHECKinitialized_();
+    bool isAvailable = false;
+    if (audio_device_->PlayoutIsAvailable(isAvailable) == -1) {
+      return -1;
+    }
+    *available = isAvailable;
+    RTC_LOG(INFO) << "output: " << isAvailable;
+    return 0;
+  }
+
+  int32_t AudioDeviceModuleIOS::RecordingIsAvailable(bool* available) {
+    RTC_LOG(INFO) << __FUNCTION__;
+    CHECKinitialized_();
+    bool isAvailable = false;
+    if (audio_device_->RecordingIsAvailable(isAvailable) == -1) {
+      return -1;
+    }
+    *available = isAvailable;
+    RTC_LOG(INFO) << "output: " << isAvailable;
+    return 0;
+  }
+
+  int32_t AudioDeviceModuleIOS::MaxMicrophoneVolume(uint32_t* maxVolume) const {
+    CHECKinitialized_();
+    uint32_t maxVol(0);
+    if (audio_device_->MaxMicrophoneVolume(maxVol) == -1) {
+      return -1;
+    }
+    *maxVolume = maxVol;
+    return 0;
+  }
+
+  int32_t AudioDeviceModuleIOS::MinMicrophoneVolume(uint32_t* minVolume) const {
+    CHECKinitialized_();
+    uint32_t minVol(0);
+    if (audio_device_->MinMicrophoneVolume(minVol) == -1) {
+      return -1;
+    }
+    *minVolume = minVol;
+    return 0;
+  }
+
+  int16_t AudioDeviceModuleIOS::PlayoutDevices() {
+    RTC_LOG(INFO) << __FUNCTION__;
+    CHECKinitialized_();
+    uint16_t nPlayoutDevices = audio_device_->PlayoutDevices();
+    RTC_LOG(INFO) << "output: " << nPlayoutDevices;
+    return (int16_t)(nPlayoutDevices);
+  }
+
+  int32_t AudioDeviceModuleIOS::SetPlayoutDevice(uint16_t index) {
+    RTC_LOG(INFO) << __FUNCTION__ << "(" << index << ")";
+    CHECKinitialized_();
+    return audio_device_->SetPlayoutDevice(index);
+  }
+
+  int32_t AudioDeviceModuleIOS::SetPlayoutDevice(WindowsDeviceType device) {
+    RTC_LOG(INFO) << __FUNCTION__;
+    CHECKinitialized_();
+    return audio_device_->SetPlayoutDevice(device);
+  }
+
+  int32_t AudioDeviceModuleIOS::PlayoutDeviceName(
+      uint16_t index,
+      char name[kAdmMaxDeviceNameSize],
+      char guid[kAdmMaxGuidSize]) {
+    RTC_LOG(INFO) << __FUNCTION__ << "(" << index << ", ...)";
+    CHECKinitialized_();
+    if (name == NULL) {
+      return -1;
+    }
+    if (audio_device_->PlayoutDeviceName(index, name, guid) == -1) {
+      return -1;
+    }
+    if (name != NULL) {
+      RTC_LOG(INFO) << "output: name = " << name;
+    }
+    if (guid != NULL) {
+      RTC_LOG(INFO) << "output: guid = " << guid;
+    }
+    return 0;
+  }
+
+  int32_t AudioDeviceModuleIOS::RecordingDeviceName(
+      uint16_t index,
+      char name[kAdmMaxDeviceNameSize],
+      char guid[kAdmMaxGuidSize]) {
+    RTC_LOG(INFO) << __FUNCTION__ << "(" << index << ", ...)";
+    CHECKinitialized_();
+    if (name == NULL) {
+      return -1;
+    }
+    if (audio_device_->RecordingDeviceName(index, name, guid) == -1) {
+      return -1;
+    }
+    if (name != NULL) {
+      RTC_LOG(INFO) << "output: name = " << name;
+    }
+    if (guid != NULL) {
+      RTC_LOG(INFO) << "output: guid = " << guid;
+    }
+    return 0;
+  }
+
+  int16_t AudioDeviceModuleIOS::RecordingDevices() {
+    RTC_LOG(INFO) << __FUNCTION__;
+    CHECKinitialized_();
+    uint16_t nRecordingDevices = audio_device_->RecordingDevices();
+    RTC_LOG(INFO) << "output: " << nRecordingDevices;
+    return (int16_t)nRecordingDevices;
+  }
+
+  int32_t AudioDeviceModuleIOS::SetRecordingDevice(uint16_t index) {
+    RTC_LOG(INFO) << __FUNCTION__ << "(" << index << ")";
+    CHECKinitialized_();
+    return audio_device_->SetRecordingDevice(index);
+  }
+
+  int32_t AudioDeviceModuleIOS::SetRecordingDevice(WindowsDeviceType device) {
+    RTC_LOG(INFO) << __FUNCTION__;
+    CHECKinitialized_();
+    return audio_device_->SetRecordingDevice(device);
+  }
+
+  int32_t AudioDeviceModuleIOS::InitPlayout() {
+    RTC_LOG(INFO) << __FUNCTION__;
+    CHECKinitialized_();
+    if (PlayoutIsInitialized()) {
+      return 0;
+    }
+    int32_t result = audio_device_->InitPlayout();
+    RTC_LOG(INFO) << "output: " << result;
+    RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.InitPlayoutSuccess",
+                          static_cast<int>(result == 0));
+    return result;
+  }
+
+  int32_t AudioDeviceModuleIOS::InitRecording() {
+    RTC_LOG(INFO) << __FUNCTION__;
+    CHECKinitialized_();
+    if (RecordingIsInitialized()) {
+      return 0;
+    }
+    int32_t result = audio_device_->InitRecording();
+    RTC_LOG(INFO) << "output: " << result;
+    RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.InitRecordingSuccess",
+                          static_cast<int>(result == 0));
+    return result;
+  }
+
+  bool AudioDeviceModuleIOS::PlayoutIsInitialized() const {
+    RTC_LOG(INFO) << __FUNCTION__;
+    CHECKinitialized__BOOL();
+    return audio_device_->PlayoutIsInitialized();
+  }
+
+  bool AudioDeviceModuleIOS::RecordingIsInitialized() const {
+    RTC_LOG(INFO) << __FUNCTION__;
+    CHECKinitialized__BOOL();
+    return audio_device_->RecordingIsInitialized();
+  }
+
+  int32_t AudioDeviceModuleIOS::StartPlayout() {
+    RTC_LOG(INFO) << __FUNCTION__;
+    CHECKinitialized_();
+    if (Playing()) {
+      return 0;
+    }
+    audio_device_buffer_.get()->StartPlayout();
+    int32_t result = audio_device_->StartPlayout();
+    RTC_LOG(INFO) << "output: " << result;
+    RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StartPlayoutSuccess",
+                          static_cast<int>(result == 0));
+    return result;
+  }
+
+  int32_t AudioDeviceModuleIOS::StopPlayout() {
+    RTC_LOG(INFO) << __FUNCTION__;
+    CHECKinitialized_();
+    int32_t result = audio_device_->StopPlayout();
+    audio_device_buffer_.get()->StopPlayout();
+    RTC_LOG(INFO) << "output: " << result;
+    RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StopPlayoutSuccess",
+                          static_cast<int>(result == 0));
+    return result;
+  }
+
+  bool AudioDeviceModuleIOS::Playing() const {
+    RTC_LOG(INFO) << __FUNCTION__;
+    CHECKinitialized__BOOL();
+    return audio_device_->Playing();
+  }
+
+  int32_t AudioDeviceModuleIOS::StartRecording() {
+    RTC_LOG(INFO) << __FUNCTION__;
+    CHECKinitialized_();
+    if (Recording()) {
+      return 0;
+    }
+    audio_device_buffer_.get()->StartRecording();
+    int32_t result = audio_device_->StartRecording();
+    RTC_LOG(INFO) << "output: " << result;
+    RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StartRecordingSuccess",
+                          static_cast<int>(result == 0));
+    return result;
+  }
+
+  int32_t AudioDeviceModuleIOS::StopRecording() {
+    RTC_LOG(INFO) << __FUNCTION__;
+    CHECKinitialized_();
+    int32_t result = audio_device_->StopRecording();
+    audio_device_buffer_.get()->StopRecording();
+    RTC_LOG(INFO) << "output: " << result;
+    RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StopRecordingSuccess",
+                          static_cast<int>(result == 0));
+    return result;
+  }
+
+  bool AudioDeviceModuleIOS::Recording() const {
+    RTC_LOG(INFO) << __FUNCTION__;
+    CHECKinitialized__BOOL();
+    return audio_device_->Recording();
+  }
+
+  int32_t AudioDeviceModuleIOS::RegisterAudioCallback(
+      AudioTransport* audioCallback) {
+    RTC_LOG(INFO) << __FUNCTION__;
+    return audio_device_buffer_.get()->RegisterAudioCallback(audioCallback);
+  }
+
+  int32_t AudioDeviceModuleIOS::PlayoutDelay(uint16_t* delayMS) const {
+    CHECKinitialized_();
+    uint16_t delay = 0;
+    if (audio_device_->PlayoutDelay(delay) == -1) {
+      RTC_LOG(LERROR) << "failed to retrieve the playout delay";
+      return -1;
+    }
+    *delayMS = delay;
+    return 0;
+  }
+
+  bool AudioDeviceModuleIOS::BuiltInAECIsAvailable() const {
+    RTC_LOG(INFO) << __FUNCTION__;
+    CHECKinitialized__BOOL();
+    bool isAvailable = audio_device_->BuiltInAECIsAvailable();
+    RTC_LOG(INFO) << "output: " << isAvailable;
+    return isAvailable;
+  }
+
+  int32_t AudioDeviceModuleIOS::EnableBuiltInAEC(bool enable) {
+    RTC_LOG(INFO) << __FUNCTION__ << "(" << enable << ")";
+    CHECKinitialized_();
+    int32_t ok = audio_device_->EnableBuiltInAEC(enable);
+    RTC_LOG(INFO) << "output: " << ok;
+    return ok;
+  }
+
+  bool AudioDeviceModuleIOS::BuiltInAGCIsAvailable() const {
+    RTC_LOG(INFO) << __FUNCTION__;
+    CHECKinitialized__BOOL();
+    bool isAvailable = audio_device_->BuiltInAGCIsAvailable();
+    RTC_LOG(INFO) << "output: " << isAvailable;
+    return isAvailable;
+  }
+
+  int32_t AudioDeviceModuleIOS::EnableBuiltInAGC(bool enable) {
+    RTC_LOG(INFO) << __FUNCTION__ << "(" << enable << ")";
+    CHECKinitialized_();
+    int32_t ok = audio_device_->EnableBuiltInAGC(enable);
+    RTC_LOG(INFO) << "output: " << ok;
+    return ok;
+  }
+
+  bool AudioDeviceModuleIOS::BuiltInNSIsAvailable() const {
+    RTC_LOG(INFO) << __FUNCTION__;
+    CHECKinitialized__BOOL();
+    bool isAvailable = audio_device_->BuiltInNSIsAvailable();
+    RTC_LOG(INFO) << "output: " << isAvailable;
+    return isAvailable;
+  }
+
+  int32_t AudioDeviceModuleIOS::EnableBuiltInNS(bool enable) {
+    RTC_LOG(INFO) << __FUNCTION__ << "(" << enable << ")";
+    CHECKinitialized_();
+    int32_t ok = audio_device_->EnableBuiltInNS(enable);
+    RTC_LOG(INFO) << "output: " << ok;
+    return ok;
+  }
+
+#if defined(WEBRTC_IOS)
+  int AudioDeviceModuleIOS::GetPlayoutAudioParameters(
+      AudioParameters* params) const {
+    RTC_LOG(INFO) << __FUNCTION__;
+    int r = audio_device_->GetPlayoutAudioParameters(params);
+    RTC_LOG(INFO) << "output: " << r;
+    return r;
+  }
+
+  int AudioDeviceModuleIOS::GetRecordAudioParameters(
+      AudioParameters* params) const {
+    RTC_LOG(INFO) << __FUNCTION__;
+    int r = audio_device_->GetRecordAudioParameters(params);
+    RTC_LOG(INFO) << "output: " << r;
+    return r;
+  }
+#endif  // WEBRTC_IOS
+
+}
+}
diff --git a/sdk/objc/Framework/Native/src/audio/audio_session_observer.h b/sdk/objc/Framework/Native/src/audio/audio_session_observer.h
new file mode 100644
index 0000000..13a979b
--- /dev/null
+++ b/sdk/objc/Framework/Native/src/audio/audio_session_observer.h
@@ -0,0 +1,42 @@
+/*
+ *  Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_IOS_AUDIO_SESSION_OBSERVER_H_
+#define MODULES_AUDIO_DEVICE_IOS_AUDIO_SESSION_OBSERVER_H_
+
+#include "rtc_base/asyncinvoker.h"
+#include "rtc_base/thread.h"
+
+namespace webrtc {
+
+// Observer interface for listening to AVAudioSession events.
+class AudioSessionObserver {
+ public:
+  // Called when audio session interruption begins.
+  virtual void OnInterruptionBegin() = 0;
+
+  // Called when audio session interruption ends.
+  virtual void OnInterruptionEnd() = 0;
+
+  // Called when audio route changes.
+  virtual void OnValidRouteChange() = 0;
+
+  // Called when the ability to play or record changes.
+  virtual void OnCanPlayOrRecordChange(bool can_play_or_record) = 0;
+
+  virtual void OnChangedOutputVolume() = 0;
+
+ protected:
+  virtual ~AudioSessionObserver() {}
+};
+
+}  // namespace webrtc
+
+#endif  //  MODULES_AUDIO_DEVICE_IOS_AUDIO_SESSION_OBSERVER_H_
diff --git a/sdk/objc/Framework/Native/src/audio/voice_processing_audio_unit.h b/sdk/objc/Framework/Native/src/audio/voice_processing_audio_unit.h
new file mode 100644
index 0000000..ae5e1a9
--- /dev/null
+++ b/sdk/objc/Framework/Native/src/audio/voice_processing_audio_unit.h
@@ -0,0 +1,139 @@
+/*
+ *  Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_DEVICE_IOS_VOICE_PROCESSING_AUDIO_UNIT_H_
+#define MODULES_AUDIO_DEVICE_IOS_VOICE_PROCESSING_AUDIO_UNIT_H_
+
+#include <AudioUnit/AudioUnit.h>
+
+namespace webrtc {
+namespace ios_adm {
+
+class VoiceProcessingAudioUnitObserver {
+ public:
+  // Callback function called on a real-time priority I/O thread from the audio
+  // unit. This method is used to signal that recorded audio is available.
+  virtual OSStatus OnDeliverRecordedData(AudioUnitRenderActionFlags* flags,
+                                         const AudioTimeStamp* time_stamp,
+                                         UInt32 bus_number,
+                                         UInt32 num_frames,
+                                         AudioBufferList* io_data) = 0;
+
+  // Callback function called on a real-time priority I/O thread from the audio
+  // unit. This method is used to provide audio samples to the audio unit.
+  virtual OSStatus OnGetPlayoutData(AudioUnitRenderActionFlags* io_action_flags,
+                                    const AudioTimeStamp* time_stamp,
+                                    UInt32 bus_number,
+                                    UInt32 num_frames,
+                                    AudioBufferList* io_data) = 0;
+
+ protected:
+  ~VoiceProcessingAudioUnitObserver() {}
+};
+
+// Convenience class to abstract away the management of a Voice Processing
+// I/O Audio Unit. The Voice Processing I/O unit has the same characteristics
+// as the Remote I/O unit (supports full duplex low-latency audio input and
+// output) and adds AEC for for two-way duplex communication. It also adds AGC,
+// adjustment of voice-processing quality, and muting. Hence, ideal for
+// VoIP applications.
+class VoiceProcessingAudioUnit {
+ public:
+  explicit VoiceProcessingAudioUnit(VoiceProcessingAudioUnitObserver* observer);
+  ~VoiceProcessingAudioUnit();
+
+  // TODO(tkchin): enum for state and state checking.
+  enum State : int32_t {
+    // Init() should be called.
+    kInitRequired,
+    // Audio unit created but not initialized.
+    kUninitialized,
+    // Initialized but not started. Equivalent to stopped.
+    kInitialized,
+    // Initialized and started.
+    kStarted,
+  };
+
+  // Number of bytes per audio sample for 16-bit signed integer representation.
+  static const UInt32 kBytesPerSample;
+
+  // Initializes this class by creating the underlying audio unit instance.
+  // Creates a Voice-Processing I/O unit and configures it for full-duplex
+  // audio. The selected stream format is selected to avoid internal resampling
+  // and to match the 10ms callback rate for WebRTC as well as possible.
+  // Does not intialize the audio unit.
+  bool Init();
+
+  VoiceProcessingAudioUnit::State GetState() const;
+
+  // Initializes the underlying audio unit with the given sample rate.
+  bool Initialize(Float64 sample_rate);
+
+  // Starts the underlying audio unit.
+  bool Start();
+
+  // Stops the underlying audio unit.
+  bool Stop();
+
+  // Uninitializes the underlying audio unit.
+  bool Uninitialize();
+
+  // Calls render on the underlying audio unit.
+  OSStatus Render(AudioUnitRenderActionFlags* flags,
+                  const AudioTimeStamp* time_stamp,
+                  UInt32 output_bus_number,
+                  UInt32 num_frames,
+                  AudioBufferList* io_data);
+
+ private:
+  // The C API used to set callbacks requires static functions. When these are
+  // called, they will invoke the relevant instance method by casting
+  // in_ref_con to VoiceProcessingAudioUnit*.
+  static OSStatus OnGetPlayoutData(void* in_ref_con,
+                                   AudioUnitRenderActionFlags* flags,
+                                   const AudioTimeStamp* time_stamp,
+                                   UInt32 bus_number,
+                                   UInt32 num_frames,
+                                   AudioBufferList* io_data);
+  static OSStatus OnDeliverRecordedData(void* in_ref_con,
+                                        AudioUnitRenderActionFlags* flags,
+                                        const AudioTimeStamp* time_stamp,
+                                        UInt32 bus_number,
+                                        UInt32 num_frames,
+                                        AudioBufferList* io_data);
+
+  // Notifies observer that samples are needed for playback.
+  OSStatus NotifyGetPlayoutData(AudioUnitRenderActionFlags* flags,
+                                const AudioTimeStamp* time_stamp,
+                                UInt32 bus_number,
+                                UInt32 num_frames,
+                                AudioBufferList* io_data);
+  // Notifies observer that recorded samples are available for render.
+  OSStatus NotifyDeliverRecordedData(AudioUnitRenderActionFlags* flags,
+                                     const AudioTimeStamp* time_stamp,
+                                     UInt32 bus_number,
+                                     UInt32 num_frames,
+                                     AudioBufferList* io_data);
+
+  // Returns the predetermined format with a specific sample rate. See
+  // implementation file for details on format.
+  AudioStreamBasicDescription GetFormat(Float64 sample_rate) const;
+
+  // Deletes the underlying audio unit.
+  void DisposeAudioUnit();
+
+  VoiceProcessingAudioUnitObserver* observer_;
+  AudioUnit vpio_unit_;
+  VoiceProcessingAudioUnit::State state_;
+};
+}  // namespace ios_adm
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_DEVICE_IOS_VOICE_PROCESSING_AUDIO_UNIT_H_
diff --git a/sdk/objc/Framework/Native/src/audio/voice_processing_audio_unit.mm b/sdk/objc/Framework/Native/src/audio/voice_processing_audio_unit.mm
new file mode 100644
index 0000000..eddb139
--- /dev/null
+++ b/sdk/objc/Framework/Native/src/audio/voice_processing_audio_unit.mm
@@ -0,0 +1,470 @@
+/*
+ *  Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "voice_processing_audio_unit.h"
+
+#include "rtc_base/checks.h"
+#include "rtc_base/system/fallthrough.h"
+#include "system_wrappers/include/metrics.h"
+
+#import "WebRTC/RTCLogging.h"
+#import "sdk/objc/Framework/Headers/WebRTC/RTCAudioSessionConfiguration.h"
+
+#if !defined(NDEBUG)
+static void LogStreamDescription(AudioStreamBasicDescription description) {
+  char formatIdString[5];
+  UInt32 formatId = CFSwapInt32HostToBig(description.mFormatID);
+  bcopy(&formatId, formatIdString, 4);
+  formatIdString[4] = '\0';
+  RTCLog(@"AudioStreamBasicDescription: {\n"
+          "  mSampleRate: %.2f\n"
+          "  formatIDString: %s\n"
+          "  mFormatFlags: 0x%X\n"
+          "  mBytesPerPacket: %u\n"
+          "  mFramesPerPacket: %u\n"
+          "  mBytesPerFrame: %u\n"
+          "  mChannelsPerFrame: %u\n"
+          "  mBitsPerChannel: %u\n"
+          "  mReserved: %u\n}",
+         description.mSampleRate, formatIdString,
+         static_cast<unsigned int>(description.mFormatFlags),
+         static_cast<unsigned int>(description.mBytesPerPacket),
+         static_cast<unsigned int>(description.mFramesPerPacket),
+         static_cast<unsigned int>(description.mBytesPerFrame),
+         static_cast<unsigned int>(description.mChannelsPerFrame),
+         static_cast<unsigned int>(description.mBitsPerChannel),
+         static_cast<unsigned int>(description.mReserved));
+}
+#endif
+
+namespace webrtc {
+namespace ios_adm {
+
+// Calls to AudioUnitInitialize() can fail if called back-to-back on different
+// ADM instances. A fall-back solution is to allow multiple sequential calls
+// with as small delay between each. This factor sets the max number of allowed
+// initialization attempts.
+static const int kMaxNumberOfAudioUnitInitializeAttempts = 5;
+// A VP I/O unit's bus 1 connects to input hardware (microphone).
+static const AudioUnitElement kInputBus = 1;
+// A VP I/O unit's bus 0 connects to output hardware (speaker).
+static const AudioUnitElement kOutputBus = 0;
+
+// Returns the automatic gain control (AGC) state on the processed microphone
+// signal. Should be on by default for Voice Processing audio units.
+static OSStatus GetAGCState(AudioUnit audio_unit, UInt32* enabled) {
+  RTC_DCHECK(audio_unit);
+  UInt32 size = sizeof(*enabled);
+  OSStatus result = AudioUnitGetProperty(audio_unit,
+                                         kAUVoiceIOProperty_VoiceProcessingEnableAGC,
+                                         kAudioUnitScope_Global,
+                                         kInputBus,
+                                         enabled,
+                                         &size);
+  RTCLog(@"VPIO unit AGC: %u", static_cast<unsigned int>(*enabled));
+  return result;
+}
+
+VoiceProcessingAudioUnit::VoiceProcessingAudioUnit(
+    VoiceProcessingAudioUnitObserver* observer)
+    : observer_(observer), vpio_unit_(nullptr), state_(kInitRequired) {
+  RTC_DCHECK(observer);
+}
+
+VoiceProcessingAudioUnit::~VoiceProcessingAudioUnit() {
+  DisposeAudioUnit();
+}
+
+const UInt32 VoiceProcessingAudioUnit::kBytesPerSample = 2;
+
+bool VoiceProcessingAudioUnit::Init() {
+  RTC_DCHECK_EQ(state_, kInitRequired);
+
+  // Create an audio component description to identify the Voice Processing
+  // I/O audio unit.
+  AudioComponentDescription vpio_unit_description;
+  vpio_unit_description.componentType = kAudioUnitType_Output;
+  vpio_unit_description.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
+  vpio_unit_description.componentManufacturer = kAudioUnitManufacturer_Apple;
+  vpio_unit_description.componentFlags = 0;
+  vpio_unit_description.componentFlagsMask = 0;
+
+  // Obtain an audio unit instance given the description.
+  AudioComponent found_vpio_unit_ref =
+      AudioComponentFindNext(nullptr, &vpio_unit_description);
+
+  // Create a Voice Processing IO audio unit.
+  OSStatus result = noErr;
+  result = AudioComponentInstanceNew(found_vpio_unit_ref, &vpio_unit_);
+  if (result != noErr) {
+    vpio_unit_ = nullptr;
+    RTCLogError(@"AudioComponentInstanceNew failed. Error=%ld.", (long)result);
+    return false;
+  }
+
+  // Enable input on the input scope of the input element.
+  UInt32 enable_input = 1;
+  result = AudioUnitSetProperty(vpio_unit_, kAudioOutputUnitProperty_EnableIO,
+                                kAudioUnitScope_Input, kInputBus, &enable_input,
+                                sizeof(enable_input));
+  if (result != noErr) {
+    DisposeAudioUnit();
+    RTCLogError(@"Failed to enable input on input scope of input element. "
+                 "Error=%ld.",
+                (long)result);
+    return false;
+  }
+
+  // Enable output on the output scope of the output element.
+  UInt32 enable_output = 1;
+  result = AudioUnitSetProperty(vpio_unit_, kAudioOutputUnitProperty_EnableIO,
+                                kAudioUnitScope_Output, kOutputBus,
+                                &enable_output, sizeof(enable_output));
+  if (result != noErr) {
+    DisposeAudioUnit();
+    RTCLogError(@"Failed to enable output on output scope of output element. "
+                 "Error=%ld.",
+                (long)result);
+    return false;
+  }
+
+  // Specify the callback function that provides audio samples to the audio
+  // unit.
+  AURenderCallbackStruct render_callback;
+  render_callback.inputProc = OnGetPlayoutData;
+  render_callback.inputProcRefCon = this;
+  result = AudioUnitSetProperty(
+      vpio_unit_, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input,
+      kOutputBus, &render_callback, sizeof(render_callback));
+  if (result != noErr) {
+    DisposeAudioUnit();
+    RTCLogError(@"Failed to specify the render callback on the output bus. "
+                 "Error=%ld.",
+                (long)result);
+    return false;
+  }
+
+  // Disable AU buffer allocation for the recorder, we allocate our own.
+  // TODO(henrika): not sure that it actually saves resource to make this call.
+  UInt32 flag = 0;
+  result = AudioUnitSetProperty(
+      vpio_unit_, kAudioUnitProperty_ShouldAllocateBuffer,
+      kAudioUnitScope_Output, kInputBus, &flag, sizeof(flag));
+  if (result != noErr) {
+    DisposeAudioUnit();
+    RTCLogError(@"Failed to disable buffer allocation on the input bus. "
+                 "Error=%ld.",
+                (long)result);
+    return false;
+  }
+
+  // Specify the callback to be called by the I/O thread to us when input audio
+  // is available. The recorded samples can then be obtained by calling the
+  // AudioUnitRender() method.
+  AURenderCallbackStruct input_callback;
+  input_callback.inputProc = OnDeliverRecordedData;
+  input_callback.inputProcRefCon = this;
+  result = AudioUnitSetProperty(vpio_unit_,
+                                kAudioOutputUnitProperty_SetInputCallback,
+                                kAudioUnitScope_Global, kInputBus,
+                                &input_callback, sizeof(input_callback));
+  if (result != noErr) {
+    DisposeAudioUnit();
+    RTCLogError(@"Failed to specify the input callback on the input bus. "
+                 "Error=%ld.",
+                (long)result);
+    return false;
+  }
+
+  state_ = kUninitialized;
+  return true;
+}
+
+VoiceProcessingAudioUnit::State VoiceProcessingAudioUnit::GetState() const {
+  return state_;
+}
+
+bool VoiceProcessingAudioUnit::Initialize(Float64 sample_rate) {
+  RTC_DCHECK_GE(state_, kUninitialized);
+  RTCLog(@"Initializing audio unit with sample rate: %f", sample_rate);
+
+  OSStatus result = noErr;
+  AudioStreamBasicDescription format = GetFormat(sample_rate);
+  UInt32 size = sizeof(format);
+#if !defined(NDEBUG)
+  LogStreamDescription(format);
+#endif
+
+  // Set the format on the output scope of the input element/bus.
+  result =
+      AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat,
+                           kAudioUnitScope_Output, kInputBus, &format, size);
+  if (result != noErr) {
+    RTCLogError(@"Failed to set format on output scope of input bus. "
+                 "Error=%ld.",
+                (long)result);
+    return false;
+  }
+
+  // Set the format on the input scope of the output element/bus.
+  result =
+      AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat,
+                           kAudioUnitScope_Input, kOutputBus, &format, size);
+  if (result != noErr) {
+    RTCLogError(@"Failed to set format on input scope of output bus. "
+                 "Error=%ld.",
+                (long)result);
+    return false;
+  }
+
+  // Initialize the Voice Processing I/O unit instance.
+  // Calls to AudioUnitInitialize() can fail if called back-to-back on
+  // different ADM instances. The error message in this case is -66635 which is
+  // undocumented. Tests have shown that calling AudioUnitInitialize a second
+  // time, after a short sleep, avoids this issue.
+  // See webrtc:5166 for details.
+  int failed_initalize_attempts = 0;
+  result = AudioUnitInitialize(vpio_unit_);
+  while (result != noErr) {
+    RTCLogError(@"Failed to initialize the Voice Processing I/O unit. "
+                 "Error=%ld.",
+                (long)result);
+    ++failed_initalize_attempts;
+    if (failed_initalize_attempts == kMaxNumberOfAudioUnitInitializeAttempts) {
+      // Max number of initialization attempts exceeded, hence abort.
+      RTCLogError(@"Too many initialization attempts.");
+      return false;
+    }
+    RTCLog(@"Pause 100ms and try audio unit initialization again...");
+    [NSThread sleepForTimeInterval:0.1f];
+    result = AudioUnitInitialize(vpio_unit_);
+  }
+  if (result == noErr) {
+    RTCLog(@"Voice Processing I/O unit is now initialized.");
+  }
+
+  // AGC should be enabled by default for Voice Processing I/O units but it is
+  // checked below and enabled explicitly if needed. This scheme is used
+  // to be absolutely sure that the AGC is enabled since we have seen cases
+  // where only zeros are recorded and a disabled AGC could be one of the
+  // reasons why it happens.
+  int agc_was_enabled_by_default = 0;
+  UInt32 agc_is_enabled = 0;
+  result = GetAGCState(vpio_unit_, &agc_is_enabled);
+  if (result != noErr) {
+    RTCLogError(@"Failed to get AGC state (1st attempt). "
+                 "Error=%ld.",
+                (long)result);
+    // Example of error code: kAudioUnitErr_NoConnection (-10876).
+    // All error codes related to audio units are negative and are therefore
+    // converted into a postive value to match the UMA APIs.
+    RTC_HISTOGRAM_COUNTS_SPARSE_100000(
+        "WebRTC.Audio.GetAGCStateErrorCode1", (-1) * result);
+  } else if (agc_is_enabled) {
+    // Remember that the AGC was enabled by default. Will be used in UMA.
+    agc_was_enabled_by_default = 1;
+  } else {
+    // AGC was initially disabled => try to enable it explicitly.
+    UInt32 enable_agc = 1;
+    result =
+        AudioUnitSetProperty(vpio_unit_,
+                             kAUVoiceIOProperty_VoiceProcessingEnableAGC,
+                             kAudioUnitScope_Global, kInputBus, &enable_agc,
+                             sizeof(enable_agc));
+    if (result != noErr) {
+      RTCLogError(@"Failed to enable the built-in AGC. "
+                   "Error=%ld.",
+                  (long)result);
+      RTC_HISTOGRAM_COUNTS_SPARSE_100000(
+          "WebRTC.Audio.SetAGCStateErrorCode", (-1) * result);
+    }
+    result = GetAGCState(vpio_unit_, &agc_is_enabled);
+    if (result != noErr) {
+      RTCLogError(@"Failed to get AGC state (2nd attempt). "
+                   "Error=%ld.",
+                  (long)result);
+      RTC_HISTOGRAM_COUNTS_SPARSE_100000(
+          "WebRTC.Audio.GetAGCStateErrorCode2", (-1) * result);
+    }
+  }
+
+  // Track if the built-in AGC was enabled by default (as it should) or not.
+  RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.BuiltInAGCWasEnabledByDefault",
+                        agc_was_enabled_by_default);
+  RTCLog(@"WebRTC.Audio.BuiltInAGCWasEnabledByDefault: %d",
+         agc_was_enabled_by_default);
+  // As a final step, add an UMA histogram for tracking the AGC state.
+  // At this stage, the AGC should be enabled, and if it is not, more work is
+  // needed to find out the root cause.
+  RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.BuiltInAGCIsEnabled", agc_is_enabled);
+  RTCLog(@"WebRTC.Audio.BuiltInAGCIsEnabled: %u",
+         static_cast<unsigned int>(agc_is_enabled));
+
+  state_ = kInitialized;
+  return true;
+}
+
+bool VoiceProcessingAudioUnit::Start() {
+  RTC_DCHECK_GE(state_, kUninitialized);
+  RTCLog(@"Starting audio unit.");
+
+  OSStatus result = AudioOutputUnitStart(vpio_unit_);
+  if (result != noErr) {
+    RTCLogError(@"Failed to start audio unit. Error=%ld", (long)result);
+    return false;
+  } else {
+    RTCLog(@"Started audio unit");
+  }
+  state_ = kStarted;
+  return true;
+}
+
+bool VoiceProcessingAudioUnit::Stop() {
+  RTC_DCHECK_GE(state_, kUninitialized);
+  RTCLog(@"Stopping audio unit.");
+
+  OSStatus result = AudioOutputUnitStop(vpio_unit_);
+  if (result != noErr) {
+    RTCLogError(@"Failed to stop audio unit. Error=%ld", (long)result);
+    return false;
+  } else {
+    RTCLog(@"Stopped audio unit");
+  }
+
+  state_ = kInitialized;
+  return true;
+}
+
+bool VoiceProcessingAudioUnit::Uninitialize() {
+  RTC_DCHECK_GE(state_, kUninitialized);
+  RTCLog(@"Unintializing audio unit.");
+
+  OSStatus result = AudioUnitUninitialize(vpio_unit_);
+  if (result != noErr) {
+    RTCLogError(@"Failed to uninitialize audio unit. Error=%ld", (long)result);
+    return false;
+  } else {
+    RTCLog(@"Uninitialized audio unit.");
+  }
+
+  state_ = kUninitialized;
+  return true;
+}
+
+OSStatus VoiceProcessingAudioUnit::Render(AudioUnitRenderActionFlags* flags,
+                                          const AudioTimeStamp* time_stamp,
+                                          UInt32 output_bus_number,
+                                          UInt32 num_frames,
+                                          AudioBufferList* io_data) {
+  RTC_DCHECK(vpio_unit_) << "Init() not called.";
+
+  OSStatus result = AudioUnitRender(vpio_unit_, flags, time_stamp,
+                                    output_bus_number, num_frames, io_data);
+  if (result != noErr) {
+    RTCLogError(@"Failed to render audio unit. Error=%ld", (long)result);
+  }
+  return result;
+}
+
+OSStatus VoiceProcessingAudioUnit::OnGetPlayoutData(
+    void* in_ref_con,
+    AudioUnitRenderActionFlags* flags,
+    const AudioTimeStamp* time_stamp,
+    UInt32 bus_number,
+    UInt32 num_frames,
+    AudioBufferList* io_data) {
+  VoiceProcessingAudioUnit* audio_unit =
+      static_cast<VoiceProcessingAudioUnit*>(in_ref_con);
+  return audio_unit->NotifyGetPlayoutData(flags, time_stamp, bus_number,
+                                          num_frames, io_data);
+}
+
+OSStatus VoiceProcessingAudioUnit::OnDeliverRecordedData(
+    void* in_ref_con,
+    AudioUnitRenderActionFlags* flags,
+    const AudioTimeStamp* time_stamp,
+    UInt32 bus_number,
+    UInt32 num_frames,
+    AudioBufferList* io_data) {
+  VoiceProcessingAudioUnit* audio_unit =
+      static_cast<VoiceProcessingAudioUnit*>(in_ref_con);
+  return audio_unit->NotifyDeliverRecordedData(flags, time_stamp, bus_number,
+                                               num_frames, io_data);
+}
+
+OSStatus VoiceProcessingAudioUnit::NotifyGetPlayoutData(
+    AudioUnitRenderActionFlags* flags,
+    const AudioTimeStamp* time_stamp,
+    UInt32 bus_number,
+    UInt32 num_frames,
+    AudioBufferList* io_data) {
+  return observer_->OnGetPlayoutData(flags, time_stamp, bus_number, num_frames,
+                                     io_data);
+}
+
+OSStatus VoiceProcessingAudioUnit::NotifyDeliverRecordedData(
+    AudioUnitRenderActionFlags* flags,
+    const AudioTimeStamp* time_stamp,
+    UInt32 bus_number,
+    UInt32 num_frames,
+    AudioBufferList* io_data) {
+  return observer_->OnDeliverRecordedData(flags, time_stamp, bus_number,
+                                          num_frames, io_data);
+}
+
+AudioStreamBasicDescription VoiceProcessingAudioUnit::GetFormat(
+    Float64 sample_rate) const {
+  // Set the application formats for input and output:
+  // - use same format in both directions
+  // - avoid resampling in the I/O unit by using the hardware sample rate
+  // - linear PCM => noncompressed audio data format with one frame per packet
+  // - no need to specify interleaving since only mono is supported
+  AudioStreamBasicDescription format;
+  RTC_DCHECK_EQ(1, kRTCAudioSessionPreferredNumberOfChannels);
+  format.mSampleRate = sample_rate;
+  format.mFormatID = kAudioFormatLinearPCM;
+  format.mFormatFlags =
+      kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
+  format.mBytesPerPacket = kBytesPerSample;
+  format.mFramesPerPacket = 1;  // uncompressed.
+  format.mBytesPerFrame = kBytesPerSample;
+  format.mChannelsPerFrame = kRTCAudioSessionPreferredNumberOfChannels;
+  format.mBitsPerChannel = 8 * kBytesPerSample;
+  return format;
+}
+
+void VoiceProcessingAudioUnit::DisposeAudioUnit() {
+  if (vpio_unit_) {
+    switch (state_) {
+      case kStarted:
+        Stop();
+        // Fall through.
+        RTC_FALLTHROUGH();
+      case kInitialized:
+        Uninitialize();
+        break;
+      case kUninitialized:
+        RTC_FALLTHROUGH();
+      case kInitRequired:
+        break;
+    }
+
+    RTCLog(@"Disposing audio unit.");
+    OSStatus result = AudioComponentInstanceDispose(vpio_unit_);
+    if (result != noErr) {
+      RTCLogError(@"AudioComponentInstanceDispose failed. Error=%ld.",
+                  (long)result);
+    }
+    vpio_unit_ = nullptr;
+  }
+}
+
+}  // namespace ios_adm
+}  // namespace webrtc
diff --git a/sdk/objc/Framework/UnitTests/RTCAudioDeviceModule_xctest.mm b/sdk/objc/Framework/UnitTests/RTCAudioDeviceModule_xctest.mm
new file mode 100644
index 0000000..9644b2b
--- /dev/null
+++ b/sdk/objc/Framework/UnitTests/RTCAudioDeviceModule_xctest.mm
@@ -0,0 +1,592 @@
+/*
+ *  Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import <XCTest/XCTest.h>
+
+#if defined(WEBRTC_IOS)
+#import "sdk/objc/Framework/Native/api/audio_device_module.h"
+#endif
+
+#include "system_wrappers/include/event_wrapper.h"
+
+#include "rtc_base/scoped_ref_ptr.h"
+
+typedef int32_t(^NeedMorePlayDataBlock)(const size_t nSamples,
+                                        const size_t nBytesPerSample,
+                                        const size_t nChannels,
+                                        const uint32_t samplesPerSec,
+                                        void* audioSamples,
+                                        size_t& nSamplesOut,
+                                        int64_t* elapsed_time_ms,
+                                        int64_t* ntp_time_ms);
+
+typedef int32_t(^RecordedDataIsAvailableBlock)(const void* audioSamples,
+                                               const size_t nSamples,
+                                               const size_t nBytesPerSample,
+                                               const size_t nChannels,
+                                               const uint32_t samplesPerSec,
+                                               const uint32_t totalDelayMS,
+                                               const int32_t clockDrift,
+                                               const uint32_t currentMicLevel,
+                                               const bool keyPressed,
+                                               uint32_t& newMicLevel);
+
+
+// This class implements the AudioTransport API and forwards all methods to the appropriate blocks.
+class MockAudioTransport : public webrtc::AudioTransport {
+public:
+  MockAudioTransport() {}
+  ~MockAudioTransport() {}
+
+  void expectNeedMorePlayData(NeedMorePlayDataBlock block) {
+    needMorePlayDataBlock = block;
+  }
+
+  void expectRecordedDataIsAvailable(RecordedDataIsAvailableBlock block) {
+    recordedDataIsAvailableBlock = block;
+  }
+
+  int32_t NeedMorePlayData(const size_t nSamples,
+                           const size_t nBytesPerSample,
+                           const size_t nChannels,
+                           const uint32_t samplesPerSec,
+                           void* audioSamples,
+                           size_t& nSamplesOut,
+                           int64_t* elapsed_time_ms,
+                           int64_t* ntp_time_ms) {
+    return needMorePlayDataBlock(nSamples,
+                                 nBytesPerSample,
+                                 nChannels,
+                                 samplesPerSec,
+                                 audioSamples,
+                                 nSamplesOut,
+                                 elapsed_time_ms,
+                                 ntp_time_ms);
+  }
+
+  int32_t RecordedDataIsAvailable(const void* audioSamples,
+                                  const size_t nSamples,
+                                  const size_t nBytesPerSample,
+                                  const size_t nChannels,
+                                  const uint32_t samplesPerSec,
+                                  const uint32_t totalDelayMS,
+                                  const int32_t clockDrift,
+                                  const uint32_t currentMicLevel,
+                                  const bool keyPressed,
+                                  uint32_t& newMicLevel) {
+    return recordedDataIsAvailableBlock(audioSamples,
+                                        nSamples,
+                                        nBytesPerSample,
+                                        nChannels,
+                                        samplesPerSec,
+                                        totalDelayMS,
+                                        clockDrift,
+                                        currentMicLevel,
+                                        keyPressed,
+                                        newMicLevel);
+  }
+
+  void PullRenderData(int bits_per_sample,
+                      int sample_rate,
+                      size_t number_of_channels,
+                      size_t number_of_frames,
+                      void* audio_data,
+                      int64_t* elapsed_time_ms,
+                      int64_t* ntp_time_ms) {
+
+  }
+
+private:
+  NeedMorePlayDataBlock needMorePlayDataBlock;
+  RecordedDataIsAvailableBlock recordedDataIsAvailableBlock;
+};
+
+// Number of callbacks (input or output) the tests waits for before we set
+// an event indicating that the test was OK.
+static const NSUInteger kNumCallbacks = 10;
+// Max amount of time we wait for an event to be set while counting callbacks.
+static const NSTimeInterval kTestTimeOutInSec = 20.0;
+// Number of bits per PCM audio sample.
+static const NSUInteger kBitsPerSample = 16;
+// Number of bytes per PCM audio sample.
+static const NSUInteger kBytesPerSample = kBitsPerSample / 8;
+// Average number of audio callbacks per second assuming 10ms packet size.
+static const NSUInteger kNumCallbacksPerSecond = 100;
+// Play out a test file during this time (unit is in seconds).
+static const NSUInteger kFilePlayTimeInSec = 15;
+// Run the full-duplex test during this time (unit is in seconds).
+// Note that first |kNumIgnoreFirstCallbacks| are ignored.
+static const NSUInteger kFullDuplexTimeInSec = 10;
+// Wait for the callback sequence to stabilize by ignoring this amount of the
+// initial callbacks (avoids initial FIFO access).
+// Only used in the RunPlayoutAndRecordingInFullDuplex test.
+static const NSUInteger kNumIgnoreFirstCallbacks = 50;
+
+@interface RTCAudioDeviceModuleTests : XCTestCase {
+
+  rtc::scoped_refptr<webrtc::AudioDeviceModule> audioDeviceModule;
+  webrtc::AudioParameters playoutParameters;
+  webrtc::AudioParameters recordParameters;
+  MockAudioTransport mock;
+}
+
+@end
+
+@implementation RTCAudioDeviceModuleTests
+
+- (void)setUp {
+  [super setUp];
+  audioDeviceModule = webrtc::CreateAudioDeviceModule();
+  XCTAssertEqual(0, audioDeviceModule->Init());
+  XCTAssertEqual(0, audioDeviceModule->GetPlayoutAudioParameters(&playoutParameters));
+  XCTAssertEqual(0, audioDeviceModule->GetRecordAudioParameters(&recordParameters));
+}
+
+- (void)tearDown {
+  XCTAssertEqual(0, audioDeviceModule->Terminate());
+  audioDeviceModule = nullptr;
+  [super tearDown];
+}
+
+- (void)startPlayout {
+  XCTAssertFalse(audioDeviceModule->Playing());
+  XCTAssertEqual(0, audioDeviceModule->InitPlayout());
+  XCTAssertTrue(audioDeviceModule->PlayoutIsInitialized());
+  XCTAssertEqual(0, audioDeviceModule->StartPlayout());
+  XCTAssertTrue(audioDeviceModule->Playing());
+}
+
+- (void)stopPlayout {
+  XCTAssertEqual(0, audioDeviceModule->StopPlayout());
+  XCTAssertFalse(audioDeviceModule->Playing());
+}
+
+- (void)startRecording{
+  XCTAssertFalse(audioDeviceModule->Recording());
+  XCTAssertEqual(0, audioDeviceModule->InitRecording());
+  XCTAssertTrue(audioDeviceModule->RecordingIsInitialized());
+  XCTAssertEqual(0, audioDeviceModule->StartRecording());
+  XCTAssertTrue(audioDeviceModule->Recording());
+}
+
+- (void)stopRecording{
+  XCTAssertEqual(0, audioDeviceModule->StopRecording());
+  XCTAssertFalse(audioDeviceModule->Recording());
+}
+
+- (NSURL*)fileURLForSampleRate:(int)sampleRate {
+  XCTAssertTrue(sampleRate == 48000 || sampleRate == 44100 || sampleRate == 16000);
+  NSString *filename = [NSString stringWithFormat:@"audio_short%d", sampleRate / 1000];
+  NSURL *url = [[NSBundle mainBundle] URLForResource:filename withExtension:@"pcm"];
+  XCTAssertNotNil(url);
+
+  return url;
+}
+
+#pragma mark - Tests
+
+- (void)testConstructDestruct {
+  // Using the test fixture to create and destruct the audio device module.
+}
+
+- (void)testInitTerminate {
+  // Initialization is part of the test fixture.
+  XCTAssertTrue(audioDeviceModule->Initialized());
+  XCTAssertEqual(0, audioDeviceModule->Terminate());
+  XCTAssertFalse(audioDeviceModule->Initialized());
+}
+
+// Tests that playout can be initiated, started and stopped. No audio callback
+// is registered in this test.
+// Failing when running on real iOS devices: bugs.webrtc.org/6889.
+- (void)DISABLED_testStartStopPlayout {
+  [self startPlayout];
+  [self stopPlayout];
+  [self startPlayout];
+  [self stopPlayout];
+}
+
+// Tests that recording can be initiated, started and stopped. No audio callback
+// is registered in this test.
+// Can sometimes fail when running on real devices: bugs.webrtc.org/7888.
+- (void)DISABLED_testStartStopRecording {
+  [self startRecording];
+  [self stopRecording];
+  [self startRecording];
+  [self stopRecording];
+}
+// Verify that calling StopPlayout() will leave us in an uninitialized state
+// which will require a new call to InitPlayout(). This test does not call
+// StartPlayout() while being uninitialized since doing so will hit a
+// RTC_DCHECK.
+- (void)testStopPlayoutRequiresInitToRestart {
+  XCTAssertEqual(0, audioDeviceModule->InitPlayout());
+  XCTAssertEqual(0, audioDeviceModule->StartPlayout());
+  XCTAssertEqual(0, audioDeviceModule->StopPlayout());
+  XCTAssertFalse(audioDeviceModule->PlayoutIsInitialized());
+}
+
+// Verify that we can create two ADMs and start playing on the second ADM.
+// Only the first active instance shall activate an audio session and the
+// last active instance shall deactivate the audio session. The test does not
+// explicitly verify correct audio session calls but instead focuses on
+// ensuring that audio starts for both ADMs.
+// Failing when running on real iOS devices: bugs.webrtc.org/6889.
+- (void)DISABLED_testStartPlayoutOnTwoInstances {
+  // Create and initialize a second/extra ADM instance. The default ADM is
+  // created by the test harness.
+  rtc::scoped_refptr<webrtc::AudioDeviceModule> secondAudioDeviceModule =
+      webrtc::CreateAudioDeviceModule();
+  XCTAssertNotEqual(secondAudioDeviceModule.get(), nullptr);
+  XCTAssertEqual(0, secondAudioDeviceModule->Init());
+
+  // Start playout for the default ADM but don't wait here. Instead use the
+  // upcoming second stream for that. We set the same expectation on number
+  // of callbacks as for the second stream.
+  mock.expectNeedMorePlayData(^int32_t(const size_t nSamples,
+                                       const size_t nBytesPerSample,
+                                       const size_t nChannels,
+                                       const uint32_t samplesPerSec,
+                                       void *audioSamples,
+                                       size_t &nSamplesOut,
+                                       int64_t *elapsed_time_ms,
+                                       int64_t *ntp_time_ms) {
+    nSamplesOut = nSamples;
+    XCTAssertEqual(nSamples, playoutParameters.frames_per_10ms_buffer());
+    XCTAssertEqual(nBytesPerSample, kBytesPerSample);
+    XCTAssertEqual(nChannels, playoutParameters.channels());
+    XCTAssertEqual((int) samplesPerSec, playoutParameters.sample_rate());
+    XCTAssertNotEqual((void*)NULL, audioSamples);
+
+    return 0;
+  });
+
+  XCTAssertEqual(0, audioDeviceModule->RegisterAudioCallback(&mock));
+  [self startPlayout];
+
+  // Initialize playout for the second ADM. If all is OK, the second ADM shall
+  // reuse the audio session activated when the first ADM started playing.
+  // This call will also ensure that we avoid a problem related to initializing
+  // two different audio unit instances back to back (see webrtc:5166 for
+  // details).
+  XCTAssertEqual(0, secondAudioDeviceModule->InitPlayout());
+  XCTAssertTrue(secondAudioDeviceModule->PlayoutIsInitialized());
+
+  // Start playout for the second ADM and verify that it starts as intended.
+  // Passing this test ensures that initialization of the second audio unit
+  // has been done successfully and that there is no conflict with the already
+  // playing first ADM.
+  XCTestExpectation *playoutExpectation = [self expectationWithDescription:@"NeedMorePlayoutData"];
+  playoutExpectation.expectedFulfillmentCount = kNumCallbacks;
+
+  MockAudioTransport mock2;
+  mock2.expectNeedMorePlayData(^int32_t(const size_t nSamples,
+                                        const size_t nBytesPerSample,
+                                        const size_t nChannels,
+                                        const uint32_t samplesPerSec,
+                                        void *audioSamples,
+                                        size_t &nSamplesOut,
+                                        int64_t *elapsed_time_ms,
+                                        int64_t *ntp_time_ms) {
+    nSamplesOut = nSamples;
+    XCTAssertEqual(nSamples, playoutParameters.frames_per_10ms_buffer());
+    XCTAssertEqual(nBytesPerSample, kBytesPerSample);
+    XCTAssertEqual(nChannels, playoutParameters.channels());
+    XCTAssertEqual((int) samplesPerSec, playoutParameters.sample_rate());
+    XCTAssertNotEqual((void*)NULL, audioSamples);
+    [playoutExpectation fulfill];
+
+    return 0;
+  });
+
+  XCTAssertEqual(0, secondAudioDeviceModule->RegisterAudioCallback(&mock2));
+  XCTAssertEqual(0, secondAudioDeviceModule->StartPlayout());
+  XCTAssertTrue(secondAudioDeviceModule->Playing());
+  [self waitForExpectationsWithTimeout:kTestTimeOutInSec handler:nil];
+  XCTAssertEqual(0, secondAudioDeviceModule->StopPlayout());
+  XCTAssertFalse(secondAudioDeviceModule->Playing());
+  XCTAssertFalse(secondAudioDeviceModule->PlayoutIsInitialized());
+
+  XCTAssertEqual(0, secondAudioDeviceModule->Terminate());
+}
+
+// Start playout and verify that the native audio layer starts asking for real
+// audio samples to play out using the NeedMorePlayData callback.
+- (void)testStartPlayoutVerifyCallbacks {
+
+  XCTestExpectation *playoutExpectation = [self expectationWithDescription:@"NeedMorePlayoutData"];
+  playoutExpectation.expectedFulfillmentCount = kNumCallbacks;
+
+  mock.expectNeedMorePlayData(^int32_t(const size_t nSamples,
+                                       const size_t nBytesPerSample,
+                                       const size_t nChannels,
+                                       const uint32_t samplesPerSec,
+                                       void *audioSamples,
+                                       size_t &nSamplesOut,
+                                       int64_t *elapsed_time_ms,
+                                       int64_t *ntp_time_ms) {
+    nSamplesOut = nSamples;
+    XCTAssertEqual(nSamples, playoutParameters.frames_per_10ms_buffer());
+    XCTAssertEqual(nBytesPerSample, kBytesPerSample);
+    XCTAssertEqual(nChannels, playoutParameters.channels());
+    XCTAssertEqual((int) samplesPerSec, playoutParameters.sample_rate());
+    XCTAssertNotEqual((void*)NULL, audioSamples);
+    [playoutExpectation fulfill];
+
+    return 0;
+  });
+
+  XCTAssertEqual(0, audioDeviceModule->RegisterAudioCallback(&mock));
+
+  [self startPlayout];
+  [self waitForExpectationsWithTimeout:kTestTimeOutInSec handler:nil];
+  [self stopPlayout];
+}
+
+// Start recording and verify that the native audio layer starts feeding real
+// audio samples via the RecordedDataIsAvailable callback.
+- (void)testStartRecordingVerifyCallbacks {
+  XCTestExpectation *recordExpectation =
+  [self expectationWithDescription:@"RecordedDataIsAvailable"];
+  recordExpectation.expectedFulfillmentCount = kNumCallbacks;
+
+  mock.expectRecordedDataIsAvailable(^(const void* audioSamples,
+                                       const size_t nSamples,
+                                       const size_t nBytesPerSample,
+                                       const size_t nChannels,
+                                       const uint32_t samplesPerSec,
+                                       const uint32_t totalDelayMS,
+                                       const int32_t clockDrift,
+                                       const uint32_t currentMicLevel,
+                                       const bool keyPressed,
+                                       uint32_t& newMicLevel) {
+    XCTAssertNotEqual((void*)NULL, audioSamples);
+    XCTAssertEqual(nSamples, recordParameters.frames_per_10ms_buffer());
+    XCTAssertEqual(nBytesPerSample, kBytesPerSample);
+    XCTAssertEqual(nChannels, recordParameters.channels());
+    XCTAssertEqual((int) samplesPerSec, recordParameters.sample_rate());
+    XCTAssertEqual(0, clockDrift);
+    XCTAssertEqual(0u, currentMicLevel);
+    XCTAssertFalse(keyPressed);
+    [recordExpectation fulfill];
+
+    return 0;
+  });
+
+  XCTAssertEqual(0, audioDeviceModule->RegisterAudioCallback(&mock));
+  [self startRecording];
+  [self waitForExpectationsWithTimeout:kTestTimeOutInSec handler:nil];
+  [self stopRecording];
+}
+
+// Start playout and recording (full-duplex audio) and verify that audio is
+// active in both directions.
+- (void)testStartPlayoutAndRecordingVerifyCallbacks {
+  XCTestExpectation *playoutExpectation = [self expectationWithDescription:@"NeedMorePlayoutData"];
+  __block NSUInteger callbackCount = 0;
+
+  XCTestExpectation *recordExpectation =
+  [self expectationWithDescription:@"RecordedDataIsAvailable"];
+  recordExpectation.expectedFulfillmentCount = kNumCallbacks;
+
+  mock.expectNeedMorePlayData(^int32_t(const size_t nSamples,
+                                       const size_t nBytesPerSample,
+                                       const size_t nChannels,
+                                       const uint32_t samplesPerSec,
+                                       void *audioSamples,
+                                       size_t &nSamplesOut,
+                                       int64_t *elapsed_time_ms,
+                                       int64_t *ntp_time_ms) {
+    nSamplesOut = nSamples;
+    XCTAssertEqual(nSamples, playoutParameters.frames_per_10ms_buffer());
+    XCTAssertEqual(nBytesPerSample, kBytesPerSample);
+    XCTAssertEqual(nChannels, playoutParameters.channels());
+    XCTAssertEqual((int) samplesPerSec, playoutParameters.sample_rate());
+    XCTAssertNotEqual((void*)NULL, audioSamples);
+    if (callbackCount++ >= kNumCallbacks) {
+      [playoutExpectation fulfill];
+    }
+
+    return 0;
+  });
+
+  mock.expectRecordedDataIsAvailable(^(const void* audioSamples,
+                                       const size_t nSamples,
+                                       const size_t nBytesPerSample,
+                                       const size_t nChannels,
+                                       const uint32_t samplesPerSec,
+                                       const uint32_t totalDelayMS,
+                                       const int32_t clockDrift,
+                                       const uint32_t currentMicLevel,
+                                       const bool keyPressed,
+                                       uint32_t& newMicLevel) {
+    XCTAssertNotEqual((void*)NULL, audioSamples);
+    XCTAssertEqual(nSamples, recordParameters.frames_per_10ms_buffer());
+    XCTAssertEqual(nBytesPerSample, kBytesPerSample);
+    XCTAssertEqual(nChannels, recordParameters.channels());
+    XCTAssertEqual((int) samplesPerSec, recordParameters.sample_rate());
+    XCTAssertEqual(0, clockDrift);
+    XCTAssertEqual(0u, currentMicLevel);
+    XCTAssertFalse(keyPressed);
+    [recordExpectation fulfill];
+
+    return 0;
+  });
+
+  XCTAssertEqual(0, audioDeviceModule->RegisterAudioCallback(&mock));
+  [self startPlayout];
+  [self startRecording];
+  [self waitForExpectationsWithTimeout:kTestTimeOutInSec handler:nil];
+  [self stopRecording];
+  [self stopPlayout];
+}
+
+// Start playout and read audio from an external PCM file when the audio layer
+// asks for data to play out. Real audio is played out in this test but it does
+// not contain any explicit verification that the audio quality is perfect.
+- (void)testRunPlayoutWithFileAsSource {
+  XCTAssertEqual(1u, playoutParameters.channels());
+
+  // Using XCTestExpectation to count callbacks is very slow.
+  XCTestExpectation *playoutExpectation = [self expectationWithDescription:@"NeedMorePlayoutData"];
+  const int expectedCallbackCount = kFilePlayTimeInSec * kNumCallbacksPerSecond;
+  __block int callbackCount = 0;
+
+  NSURL *fileURL = [self fileURLForSampleRate:playoutParameters.sample_rate()];
+  NSInputStream *inputStream = [[NSInputStream alloc] initWithURL:fileURL];
+
+  mock.expectNeedMorePlayData(^int32_t(const size_t nSamples,
+                                       const size_t nBytesPerSample,
+                                       const size_t nChannels,
+                                       const uint32_t samplesPerSec,
+                                       void *audioSamples,
+                                       size_t &nSamplesOut,
+                                       int64_t *elapsed_time_ms,
+                                       int64_t *ntp_time_ms) {
+    [inputStream read:(uint8_t *)audioSamples maxLength:nSamples*nBytesPerSample*nChannels];
+    nSamplesOut = nSamples;
+    if (callbackCount++ == expectedCallbackCount) {
+      [playoutExpectation fulfill];
+    }
+
+    return 0;
+  });
+
+  XCTAssertEqual(0, audioDeviceModule->RegisterAudioCallback(&mock));
+  [self startPlayout];
+  NSTimeInterval waitTimeout = kFilePlayTimeInSec * 2.0;
+  [self waitForExpectationsWithTimeout:waitTimeout handler:nil];
+  [self stopPlayout];
+}
+
+- (void)testDevices {
+  // Device enumeration is not supported. Verify fixed values only.
+  XCTAssertEqual(1, audioDeviceModule->PlayoutDevices());
+  XCTAssertEqual(1, audioDeviceModule->RecordingDevices());
+}
+
+// Start playout and recording and store recorded data in an intermediate FIFO
+// buffer from which the playout side then reads its samples in the same order
+// as they were stored. Under ideal circumstances, a callback sequence would
+// look like: ...+-+-+-+-+-+-+-..., where '+' means 'packet recorded' and '-'
+// means 'packet played'. Under such conditions, the FIFO would only contain
+// one packet on average. However, under more realistic conditions, the size
+// of the FIFO will vary more due to an unbalance between the two sides.
+// This test tries to verify that the device maintains a balanced callback-
+// sequence by running in loopback for ten seconds while measuring the size
+// (max and average) of the FIFO. The size of the FIFO is increased by the
+// recording side and decreased by the playout side.
+// TODO(henrika): tune the final test parameters after running tests on several
+// different devices.
+- (void)testRunPlayoutAndRecordingInFullDuplex {
+  XCTAssertEqual(recordParameters.channels(), playoutParameters.channels());
+  XCTAssertEqual(recordParameters.sample_rate(), playoutParameters.sample_rate());
+
+  XCTestExpectation *playoutExpectation = [self expectationWithDescription:@"NeedMorePlayoutData"];
+  __block NSUInteger playoutCallbacks = 0;
+  NSUInteger expectedPlayoutCallbacks = kFullDuplexTimeInSec * kNumCallbacksPerSecond;
+
+  // FIFO queue and measurements
+  NSMutableArray *fifoBuffer = [NSMutableArray arrayWithCapacity:20];
+  __block NSUInteger fifoMaxSize = 0;
+  __block NSUInteger fifoTotalWrittenElements = 0;
+  __block NSUInteger fifoWriteCount = 0;
+
+  mock.expectRecordedDataIsAvailable(^(const void* audioSamples,
+                                       const size_t nSamples,
+                                       const size_t nBytesPerSample,
+                                       const size_t nChannels,
+                                       const uint32_t samplesPerSec,
+                                       const uint32_t totalDelayMS,
+                                       const int32_t clockDrift,
+                                       const uint32_t currentMicLevel,
+                                       const bool keyPressed,
+                                       uint32_t& newMicLevel) {
+    if (fifoWriteCount++ < kNumIgnoreFirstCallbacks) {
+      return 0;
+    }
+
+    NSData *data = [NSData dataWithBytes:audioSamples length:nSamples*nBytesPerSample*nChannels];
+    @synchronized(fifoBuffer) {
+      [fifoBuffer addObject:data];
+      fifoMaxSize = MAX(fifoMaxSize, fifoBuffer.count);
+      fifoTotalWrittenElements += fifoBuffer.count;
+    }
+
+    return 0;
+  });
+
+  mock.expectNeedMorePlayData(^int32_t(const size_t nSamples,
+                                       const size_t nBytesPerSample,
+                                       const size_t nChannels,
+                                       const uint32_t samplesPerSec,
+                                       void *audioSamples,
+                                       size_t &nSamplesOut,
+                                       int64_t *elapsed_time_ms,
+                                       int64_t *ntp_time_ms) {
+    nSamplesOut = nSamples;
+    NSData *data;
+    @synchronized(fifoBuffer) {
+      data = fifoBuffer.firstObject;
+      if (data) {
+        [fifoBuffer removeObjectAtIndex:0];
+      }
+    }
+
+    if (data) {
+      memcpy(audioSamples, (char*) data.bytes, data.length);
+    } else {
+      memset(audioSamples, 0, nSamples*nBytesPerSample*nChannels);
+    }
+
+    if (playoutCallbacks++ == expectedPlayoutCallbacks) {
+      [playoutExpectation fulfill];
+    }
+    return 0;
+  });
+
+  XCTAssertEqual(0, audioDeviceModule->RegisterAudioCallback(&mock));
+  [self startRecording];
+  [self startPlayout];
+  NSTimeInterval waitTimeout = kFullDuplexTimeInSec * 2.0;
+  [self waitForExpectationsWithTimeout:waitTimeout handler:nil];
+
+  size_t fifoAverageSize =
+      (fifoTotalWrittenElements == 0)
+        ? 0.0
+        : 0.5 + (double)fifoTotalWrittenElements / (fifoWriteCount - kNumIgnoreFirstCallbacks);
+
+  [self stopPlayout];
+  [self stopRecording];
+  XCTAssertLessThan(fifoAverageSize, 10u);
+  XCTAssertLessThan(fifoMaxSize, 20u);
+}
+
+@end
diff --git a/sdk/objc/Framework/UnitTests/RTCAudioDevice_xctest.mm b/sdk/objc/Framework/UnitTests/RTCAudioDevice_xctest.mm
new file mode 100644
index 0000000..fa99d90
--- /dev/null
+++ b/sdk/objc/Framework/UnitTests/RTCAudioDevice_xctest.mm
@@ -0,0 +1,112 @@
+/*
+ *  Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import <XCTest/XCTest.h>
+#import "sdk/objc/Framework/Native/src/audio/audio_device_ios.h"
+#import "sdk/objc/Framework/Native/api/audio_device_module.h"
+#import "sdk/objc/Framework/Classes/Audio/RTCAudioSession+Private.h"
+
+@interface RTCAudioDeviceTests: XCTestCase {
+  rtc::scoped_refptr<webrtc::AudioDeviceModule> _audioDeviceModule;
+  std::unique_ptr<webrtc::ios_adm::AudioDeviceIOS> _audio_device;
+}
+
+@property(nonatomic) RTCAudioSession *audioSession;
+
+@end
+
+@implementation RTCAudioDeviceTests
+
+@synthesize audioSession = _audioSession;
+
+- (void)setUp {
+  [super setUp];
+
+  _audioDeviceModule = webrtc::CreateAudioDeviceModule();
+  _audio_device.reset(new webrtc::ios_adm::AudioDeviceIOS());
+  self.audioSession = [RTCAudioSession sharedInstance];
+
+  NSError *error = nil;
+  [self.audioSession lockForConfiguration];
+  [self.audioSession setCategory:AVAudioSessionCategoryPlayAndRecord
+                     withOptions:0
+                           error:&error];
+  XCTAssertNil(error);
+
+  [self.audioSession setMode:AVAudioSessionModeVoiceChat error:&error];
+  XCTAssertNil(error);
+
+  [self.audioSession setActive:YES error:&error];
+  XCTAssertNil(error);
+
+  [self.audioSession unlockForConfiguration];
+}
+
+- (void)tearDown {
+  _audio_device->Terminate();
+  _audio_device.reset(nullptr);
+  _audioDeviceModule = nullptr;
+  [self.audioSession notifyDidEndInterruptionWithShouldResumeSession:NO];
+
+  [super tearDown];
+}
+
+// Verifies that the AudioDeviceIOS is_interrupted_ flag is reset correctly
+// after an iOS AVAudioSessionInterruptionTypeEnded notification event.
+// AudioDeviceIOS listens to RTCAudioSession interrupted notifications by:
+// - In AudioDeviceIOS.InitPlayOrRecord registers its audio_session_observer_
+//   callback with RTCAudioSession's delegate list.
+// - When RTCAudioSession receives an iOS audio interrupted notification, it
+//   passes the notification to callbacks in its delegate list which sets
+//   AudioDeviceIOS's is_interrupted_ flag to true.
+// - When AudioDeviceIOS.ShutdownPlayOrRecord is called, its
+//   audio_session_observer_ callback is removed from RTCAudioSessions's
+//   delegate list.
+//   So if RTCAudioSession receives an iOS end audio interruption notification,
+//   AudioDeviceIOS is not notified as its callback is not in RTCAudioSession's
+//   delegate list. This causes AudioDeviceIOS's is_interrupted_ flag to be in
+//   the wrong (true) state and the audio session will ignore audio changes.
+// As RTCAudioSession keeps its own interrupted state, the fix is to initialize
+// AudioDeviceIOS's is_interrupted_ flag to RTCAudioSession's isInterrupted
+// flag in AudioDeviceIOS.InitPlayOrRecord.
+- (void)testInterruptedAudioSession {
+  XCTAssertTrue(self.audioSession.isActive);
+  XCTAssertTrue([self.audioSession.category isEqual:AVAudioSessionCategoryPlayAndRecord] ||
+                [self.audioSession.category isEqual:AVAudioSessionCategoryPlayback]);
+  XCTAssertEqual(AVAudioSessionModeVoiceChat, self.audioSession.mode);
+
+  std::unique_ptr<webrtc::AudioDeviceBuffer> audio_buffer;
+  audio_buffer.reset(new webrtc::AudioDeviceBuffer());
+  _audio_device->AttachAudioBuffer(audio_buffer.get());
+  XCTAssertEqual(webrtc::AudioDeviceGeneric::InitStatus::OK, _audio_device->Init());
+  XCTAssertEqual(0, _audio_device->InitPlayout());
+  XCTAssertEqual(0, _audio_device->StartPlayout());
+
+  // Force interruption.
+  [self.audioSession notifyDidBeginInterruption];
+
+  // Wait for notification to propagate.
+  rtc::MessageQueueManager::ProcessAllMessageQueues();
+  XCTAssertTrue(_audio_device->IsInterrupted());
+
+  // Force it for testing.
+  _audio_device->StopPlayout();
+
+  [self.audioSession notifyDidEndInterruptionWithShouldResumeSession:YES];
+  // Wait for notification to propagate.
+  rtc::MessageQueueManager::ProcessAllMessageQueues();
+  XCTAssertTrue(_audio_device->IsInterrupted());
+
+  _audio_device->Init();
+  _audio_device->InitPlayout();
+  XCTAssertFalse(_audio_device->IsInterrupted());
+}
+
+@end
diff --git a/sdk/objc/Framework/UnitTests/audio_short16.pcm b/sdk/objc/Framework/UnitTests/audio_short16.pcm
new file mode 100644
index 0000000..15a0f18
--- /dev/null
+++ b/sdk/objc/Framework/UnitTests/audio_short16.pcm
Binary files differ
diff --git a/sdk/objc/Framework/UnitTests/audio_short44.pcm b/sdk/objc/Framework/UnitTests/audio_short44.pcm
new file mode 100644
index 0000000..011cdce
--- /dev/null
+++ b/sdk/objc/Framework/UnitTests/audio_short44.pcm
Binary files differ
diff --git a/sdk/objc/Framework/UnitTests/audio_short48.pcm b/sdk/objc/Framework/UnitTests/audio_short48.pcm
new file mode 100644
index 0000000..06fd826
--- /dev/null
+++ b/sdk/objc/Framework/UnitTests/audio_short48.pcm
Binary files differ