Move RTCAudioSession* files  modules/audio_device/ -> sdk/Framework.

BUG=NONE

Review-Url: https://codereview.webrtc.org/2855023003
Cr-Commit-Position: refs/heads/master@{#18443}
diff --git a/webrtc/sdk/objc/Framework/Classes/Audio/RTCAudioSession+Configuration.mm b/webrtc/sdk/objc/Framework/Classes/Audio/RTCAudioSession+Configuration.mm
new file mode 100644
index 0000000..c4d0d0c
--- /dev/null
+++ b/webrtc/sdk/objc/Framework/Classes/Audio/RTCAudioSession+Configuration.mm
@@ -0,0 +1,172 @@
+ /*
+ *  Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "WebRTC/RTCAudioSession.h"
+#import "WebRTC/RTCAudioSessionConfiguration.h"
+
+#import "WebRTC/RTCLogging.h"
+#import "RTCAudioSession+Private.h"
+
+
+@implementation RTCAudioSession (Configuration)
+
+- (BOOL)setConfiguration:(RTCAudioSessionConfiguration *)configuration
+                   error:(NSError **)outError {
+  return [self setConfiguration:configuration
+                         active:NO
+                shouldSetActive:NO
+                          error:outError];
+}
+
+- (BOOL)setConfiguration:(RTCAudioSessionConfiguration *)configuration
+                  active:(BOOL)active
+                   error:(NSError **)outError {
+  return [self setConfiguration:configuration
+                         active:active
+                shouldSetActive:YES
+                          error:outError];
+}
+
+#pragma mark - Private
+
+- (BOOL)setConfiguration:(RTCAudioSessionConfiguration *)configuration
+                  active:(BOOL)active
+         shouldSetActive:(BOOL)shouldSetActive
+                   error:(NSError **)outError {
+  NSParameterAssert(configuration);
+  if (outError) {
+    *outError = nil;
+  }
+  if (![self checkLock:outError]) {
+    return NO;
+  }
+
+  // Provide an error even if there isn't one so we can log it. We will not
+  // return immediately on error in this function and instead try to set
+  // everything we can.
+  NSError *error = nil;
+
+  if (self.category != configuration.category ||
+      self.categoryOptions != configuration.categoryOptions) {
+    NSError *categoryError = nil;
+    if (![self setCategory:configuration.category
+               withOptions:configuration.categoryOptions
+                     error:&categoryError]) {
+      RTCLogError(@"Failed to set category: %@",
+                  categoryError.localizedDescription);
+      error = categoryError;
+    } else {
+      RTCLog(@"Set category to: %@", configuration.category);
+    }
+  }
+
+  if (self.mode != configuration.mode) {
+    NSError *modeError = nil;
+    if (![self setMode:configuration.mode error:&modeError]) {
+      RTCLogError(@"Failed to set mode: %@",
+                  modeError.localizedDescription);
+      error = modeError;
+    } else {
+      RTCLog(@"Set mode to: %@", configuration.mode);
+    }
+  }
+
+  // Sometimes category options don't stick after setting mode.
+  if (self.categoryOptions != configuration.categoryOptions) {
+    NSError *categoryError = nil;
+    if (![self setCategory:configuration.category
+               withOptions:configuration.categoryOptions
+                     error:&categoryError]) {
+      RTCLogError(@"Failed to set category options: %@",
+                  categoryError.localizedDescription);
+      error = categoryError;
+    } else {
+      RTCLog(@"Set category options to: %ld",
+             (long)configuration.categoryOptions);
+    }
+  }
+
+  if (self.preferredSampleRate != configuration.sampleRate) {
+    NSError *sampleRateError = nil;
+    if (![self setPreferredSampleRate:configuration.sampleRate
+                                error:&sampleRateError]) {
+      RTCLogError(@"Failed to set preferred sample rate: %@",
+                  sampleRateError.localizedDescription);
+      error = sampleRateError;
+    } else {
+      RTCLog(@"Set preferred sample rate to: %.2f",
+             configuration.sampleRate);
+    }
+  }
+
+  if (self.preferredIOBufferDuration != configuration.ioBufferDuration) {
+    NSError *bufferDurationError = nil;
+    if (![self setPreferredIOBufferDuration:configuration.ioBufferDuration
+                                      error:&bufferDurationError]) {
+      RTCLogError(@"Failed to set preferred IO buffer duration: %@",
+                  bufferDurationError.localizedDescription);
+      error = bufferDurationError;
+    } else {
+      RTCLog(@"Set preferred IO buffer duration to: %f",
+             configuration.ioBufferDuration);
+    }
+  }
+
+  if (shouldSetActive) {
+    NSError *activeError = nil;
+    if (![self setActive:active error:&activeError]) {
+      RTCLogError(@"Failed to setActive to %d: %@",
+                  active, activeError.localizedDescription);
+      error = activeError;
+    }
+  }
+
+  if (self.isActive &&
+      // TODO(tkchin): Figure out which category/mode numChannels is valid for.
+      [self.mode isEqualToString:AVAudioSessionModeVoiceChat]) {
+    // Try to set the preferred number of hardware audio channels. These calls
+    // must be done after setting the audio session’s category and mode and
+    // activating the session.
+    NSInteger inputNumberOfChannels = configuration.inputNumberOfChannels;
+    if (self.inputNumberOfChannels != inputNumberOfChannels) {
+      NSError *inputChannelsError = nil;
+      if (![self setPreferredInputNumberOfChannels:inputNumberOfChannels
+                                             error:&inputChannelsError]) {
+       RTCLogError(@"Failed to set preferred input number of channels: %@",
+                   inputChannelsError.localizedDescription);
+       error = inputChannelsError;
+      } else {
+        RTCLog(@"Set input number of channels to: %ld",
+               (long)inputNumberOfChannels);
+      }
+    }
+    NSInteger outputNumberOfChannels = configuration.outputNumberOfChannels;
+    if (self.outputNumberOfChannels != outputNumberOfChannels) {
+      NSError *outputChannelsError = nil;
+      if (![self setPreferredOutputNumberOfChannels:outputNumberOfChannels
+                                              error:&outputChannelsError]) {
+        RTCLogError(@"Failed to set preferred output number of channels: %@",
+                    outputChannelsError.localizedDescription);
+        error = outputChannelsError;
+      } else {
+        RTCLog(@"Set output number of channels to: %ld",
+               (long)outputNumberOfChannels);
+      }
+    }
+  }
+
+  if (outError) {
+    *outError = error;
+  }
+
+  return error == nil;
+}
+
+@end
diff --git a/webrtc/sdk/objc/Framework/Classes/Audio/RTCAudioSession+Private.h b/webrtc/sdk/objc/Framework/Classes/Audio/RTCAudioSession+Private.h
new file mode 100644
index 0000000..5a063ed
--- /dev/null
+++ b/webrtc/sdk/objc/Framework/Classes/Audio/RTCAudioSession+Private.h
@@ -0,0 +1,96 @@
+/*
+ *  Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "WebRTC/RTCAudioSession.h"
+
+#include <vector>
+
+NS_ASSUME_NONNULL_BEGIN
+
+@class RTCAudioSessionConfiguration;
+
+@interface RTCAudioSession ()
+
+/** Number of times setActive:YES has succeeded without a balanced call to
+ *  setActive:NO.
+ */
+@property(nonatomic, readonly) int activationCount;
+
+/** The number of times |beginWebRTCSession| was called without a balanced call
+ *  to |endWebRTCSession|.
+ */
+@property(nonatomic, readonly) int webRTCSessionCount;
+
+/** Convenience BOOL that checks useManualAudio and isAudioEnebled. */
+@property(readonly) BOOL canPlayOrRecord;
+
+/** Tracks whether we have been sent an interruption event that hasn't been matched by either an
+ *  interrupted end event or a foreground event.
+ */
+@property(nonatomic, assign) BOOL isInterrupted;
+
+- (BOOL)checkLock:(NSError **)outError;
+
+/** Adds the delegate to the list of delegates, and places it at the front of
+ *  the list. This delegate will be notified before other delegates of
+ *  audio events.
+ */
+- (void)pushDelegate:(id<RTCAudioSessionDelegate>)delegate;
+
+/** Signals RTCAudioSession that a WebRTC session is about to begin and
+ *  audio configuration is needed. Will configure the audio session for WebRTC
+ *  if not already configured and if configuration is not delayed.
+ *  Successful calls must be balanced by a call to endWebRTCSession.
+ */
+- (BOOL)beginWebRTCSession:(NSError **)outError;
+
+/** Signals RTCAudioSession that a WebRTC session is about to end and audio
+ *  unconfiguration is needed. Will unconfigure the audio session for WebRTC
+ *  if this is the last unmatched call and if configuration is not delayed.
+ */
+- (BOOL)endWebRTCSession:(NSError **)outError;
+
+/** Configure the audio session for WebRTC. This call will fail if the session
+ *  is already configured. On other failures, we will attempt to restore the
+ *  previously used audio session configuration.
+ *  |lockForConfiguration| must be called first.
+ *  Successful calls to configureWebRTCSession must be matched by calls to
+ *  |unconfigureWebRTCSession|.
+ */
+- (BOOL)configureWebRTCSession:(NSError **)outError;
+
+/** Unconfigures the session for WebRTC. This will attempt to restore the
+ *  audio session to the settings used before |configureWebRTCSession| was
+ *  called.
+ *  |lockForConfiguration| must be called first.
+ */
+- (BOOL)unconfigureWebRTCSession:(NSError **)outError;
+
+/** Returns a configuration error with the given description. */
+- (NSError *)configurationErrorWithDescription:(NSString *)description;
+
+// Properties and methods for tests.
+@property(nonatomic, readonly)
+    std::vector<__weak id<RTCAudioSessionDelegate> > delegates;
+
+- (void)notifyDidBeginInterruption;
+- (void)notifyDidEndInterruptionWithShouldResumeSession:
+    (BOOL)shouldResumeSession;
+- (void)notifyDidChangeRouteWithReason:(AVAudioSessionRouteChangeReason)reason
+    previousRoute:(AVAudioSessionRouteDescription *)previousRoute;
+- (void)notifyMediaServicesWereLost;
+- (void)notifyMediaServicesWereReset;
+- (void)notifyDidChangeCanPlayOrRecord:(BOOL)canPlayOrRecord;
+- (void)notifyDidStartPlayOrRecord;
+- (void)notifyDidStopPlayOrRecord;
+
+@end
+
+NS_ASSUME_NONNULL_END
diff --git a/webrtc/sdk/objc/Framework/Classes/Audio/RTCAudioSession.mm b/webrtc/sdk/objc/Framework/Classes/Audio/RTCAudioSession.mm
new file mode 100644
index 0000000..ce0e263
--- /dev/null
+++ b/webrtc/sdk/objc/Framework/Classes/Audio/RTCAudioSession.mm
@@ -0,0 +1,917 @@
+/*
+ *  Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "WebRTC/RTCAudioSession.h"
+
+#import <UIKit/UIKit.h>
+
+#include "webrtc/base/atomicops.h"
+#include "webrtc/base/checks.h"
+#include "webrtc/base/criticalsection.h"
+
+
+#import "WebRTC/RTCAudioSessionConfiguration.h"
+#import "WebRTC/RTCLogging.h"
+
+#import "RTCAudioSession+Private.h"
+
+
+NSString * const kRTCAudioSessionErrorDomain = @"org.webrtc.RTCAudioSession";
+NSInteger const kRTCAudioSessionErrorLockRequired = -1;
+NSInteger const kRTCAudioSessionErrorConfiguration = -2;
+NSString * const kRTCAudioSessionOutputVolumeSelector = @"outputVolume";
+
+// This class needs to be thread-safe because it is accessed from many threads.
+// TODO(tkchin): Consider more granular locking. We're not expecting a lot of
+// lock contention so coarse locks should be fine for now.
+@implementation RTCAudioSession {
+  rtc::CriticalSection _crit;
+  AVAudioSession *_session;
+  volatile int _activationCount;
+  volatile int _lockRecursionCount;
+  volatile int _webRTCSessionCount;
+  BOOL _isActive;
+  BOOL _useManualAudio;
+  BOOL _isAudioEnabled;
+  BOOL _canPlayOrRecord;
+  BOOL _isInterrupted;
+}
+
+@synthesize session = _session;
+@synthesize delegates = _delegates;
+
++ (instancetype)sharedInstance {
+  static dispatch_once_t onceToken;
+  static RTCAudioSession *sharedInstance = nil;
+  dispatch_once(&onceToken, ^{
+    sharedInstance = [[self alloc] init];
+  });
+  return sharedInstance;
+}
+
+- (instancetype)init {
+  if (self = [super init]) {
+    _session = [AVAudioSession sharedInstance];
+
+    NSNotificationCenter *center = [NSNotificationCenter defaultCenter];
+    [center addObserver:self
+               selector:@selector(handleInterruptionNotification:)
+                   name:AVAudioSessionInterruptionNotification
+                 object:nil];
+    [center addObserver:self
+               selector:@selector(handleRouteChangeNotification:)
+                   name:AVAudioSessionRouteChangeNotification
+                 object:nil];
+    [center addObserver:self
+               selector:@selector(handleMediaServicesWereLost:)
+                   name:AVAudioSessionMediaServicesWereLostNotification
+                 object:nil];
+    [center addObserver:self
+               selector:@selector(handleMediaServicesWereReset:)
+                   name:AVAudioSessionMediaServicesWereResetNotification
+                 object:nil];
+    // Posted on the main thread when the primary audio from other applications
+    // starts and stops. Foreground applications may use this notification as a
+    // hint to enable or disable audio that is secondary.
+    [center addObserver:self
+               selector:@selector(handleSilenceSecondaryAudioHintNotification:)
+                   name:AVAudioSessionSilenceSecondaryAudioHintNotification
+                 object:nil];
+    // Also track foreground event in order to deal with interruption ended situation.
+    [center addObserver:self
+               selector:@selector(handleApplicationDidBecomeActive:)
+                   name:UIApplicationDidBecomeActiveNotification
+                 object:nil];
+    [_session addObserver:self
+               forKeyPath:kRTCAudioSessionOutputVolumeSelector
+                  options:NSKeyValueObservingOptionNew | NSKeyValueObservingOptionOld
+                  context:nil];
+
+    RTCLog(@"RTCAudioSession (%p): init.", self);
+  }
+  return self;
+}
+
+- (void)dealloc {
+  [[NSNotificationCenter defaultCenter] removeObserver:self];
+  [_session removeObserver:self forKeyPath:kRTCAudioSessionOutputVolumeSelector context:nil];
+  RTCLog(@"RTCAudioSession (%p): dealloc.", self);
+}
+
+- (NSString *)description {
+  NSString *format =
+      @"RTCAudioSession: {\n"
+       "  category: %@\n"
+       "  categoryOptions: %ld\n"
+       "  mode: %@\n"
+       "  isActive: %d\n"
+       "  sampleRate: %.2f\n"
+       "  IOBufferDuration: %f\n"
+       "  outputNumberOfChannels: %ld\n"
+       "  inputNumberOfChannels: %ld\n"
+       "  outputLatency: %f\n"
+       "  inputLatency: %f\n"
+       "  outputVolume: %f\n"
+       "}";
+  NSString *description = [NSString stringWithFormat:format,
+      self.category, (long)self.categoryOptions, self.mode,
+      self.isActive, self.sampleRate, self.IOBufferDuration,
+      self.outputNumberOfChannels, self.inputNumberOfChannels,
+      self.outputLatency, self.inputLatency, self.outputVolume];
+  return description;
+}
+
+- (void)setIsActive:(BOOL)isActive {
+  @synchronized(self) {
+    _isActive = isActive;
+  }
+}
+
+- (BOOL)isActive {
+  @synchronized(self) {
+    return _isActive;
+  }
+}
+
+- (BOOL)isLocked {
+  return _lockRecursionCount > 0;
+}
+
+- (void)setUseManualAudio:(BOOL)useManualAudio {
+  @synchronized(self) {
+    if (_useManualAudio == useManualAudio) {
+      return;
+    }
+    _useManualAudio = useManualAudio;
+  }
+  [self updateCanPlayOrRecord];
+}
+
+- (BOOL)useManualAudio {
+  @synchronized(self) {
+    return _useManualAudio;
+  }
+}
+
+- (void)setIsAudioEnabled:(BOOL)isAudioEnabled {
+  @synchronized(self) {
+    if (_isAudioEnabled == isAudioEnabled) {
+      return;
+    }
+    _isAudioEnabled = isAudioEnabled;
+  }
+  [self updateCanPlayOrRecord];
+}
+
+- (BOOL)isAudioEnabled {
+  @synchronized(self) {
+    return _isAudioEnabled;
+  }
+}
+
+// TODO(tkchin): Check for duplicates.
+- (void)addDelegate:(id<RTCAudioSessionDelegate>)delegate {
+  RTCLog(@"Adding delegate: (%p)", delegate);
+  if (!delegate) {
+    return;
+  }
+  @synchronized(self) {
+    _delegates.push_back(delegate);
+    [self removeZeroedDelegates];
+  }
+}
+
+- (void)removeDelegate:(id<RTCAudioSessionDelegate>)delegate {
+  RTCLog(@"Removing delegate: (%p)", delegate);
+  if (!delegate) {
+    return;
+  }
+  @synchronized(self) {
+    _delegates.erase(std::remove(_delegates.begin(),
+                                 _delegates.end(),
+                                 delegate),
+                     _delegates.end());
+    [self removeZeroedDelegates];
+  }
+}
+
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wthread-safety-analysis"
+
+- (void)lockForConfiguration {
+  _crit.Enter();
+  rtc::AtomicOps::Increment(&_lockRecursionCount);
+}
+
+- (void)unlockForConfiguration {
+  // Don't let threads other than the one that called lockForConfiguration
+  // unlock.
+  if (_crit.TryEnter()) {
+    rtc::AtomicOps::Decrement(&_lockRecursionCount);
+    // One unlock for the tryLock, and another one to actually unlock. If this
+    // was called without anyone calling lock, we will hit an assertion.
+    _crit.Leave();
+    _crit.Leave();
+  }
+}
+
+#pragma clang diagnostic pop
+
+#pragma mark - AVAudioSession proxy methods
+
+- (NSString *)category {
+  return self.session.category;
+}
+
+- (AVAudioSessionCategoryOptions)categoryOptions {
+  return self.session.categoryOptions;
+}
+
+- (NSString *)mode {
+  return self.session.mode;
+}
+
+- (BOOL)secondaryAudioShouldBeSilencedHint {
+  return self.session.secondaryAudioShouldBeSilencedHint;
+}
+
+- (AVAudioSessionRouteDescription *)currentRoute {
+  return self.session.currentRoute;
+}
+
+- (NSInteger)maximumInputNumberOfChannels {
+  return self.session.maximumInputNumberOfChannels;
+}
+
+- (NSInteger)maximumOutputNumberOfChannels {
+  return self.session.maximumOutputNumberOfChannels;
+}
+
+- (float)inputGain {
+  return self.session.inputGain;
+}
+
+- (BOOL)inputGainSettable {
+  return self.session.inputGainSettable;
+}
+
+- (BOOL)inputAvailable {
+  return self.session.inputAvailable;
+}
+
+- (NSArray<AVAudioSessionDataSourceDescription *> *)inputDataSources {
+  return self.session.inputDataSources;
+}
+
+- (AVAudioSessionDataSourceDescription *)inputDataSource {
+  return self.session.inputDataSource;
+}
+
+- (NSArray<AVAudioSessionDataSourceDescription *> *)outputDataSources {
+  return self.session.outputDataSources;
+}
+
+- (AVAudioSessionDataSourceDescription *)outputDataSource {
+  return self.session.outputDataSource;
+}
+
+- (double)sampleRate {
+  return self.session.sampleRate;
+}
+
+- (double)preferredSampleRate {
+  return self.session.preferredSampleRate;
+}
+
+- (NSInteger)inputNumberOfChannels {
+  return self.session.inputNumberOfChannels;
+}
+
+- (NSInteger)outputNumberOfChannels {
+  return self.session.outputNumberOfChannels;
+}
+
+- (float)outputVolume {
+  return self.session.outputVolume;
+}
+
+- (NSTimeInterval)inputLatency {
+  return self.session.inputLatency;
+}
+
+- (NSTimeInterval)outputLatency {
+  return self.session.outputLatency;
+}
+
+- (NSTimeInterval)IOBufferDuration {
+  return self.session.IOBufferDuration;
+}
+
+- (NSTimeInterval)preferredIOBufferDuration {
+  return self.session.preferredIOBufferDuration;
+}
+
+// TODO(tkchin): Simplify the amount of locking happening here. Likely that we
+// can just do atomic increments / decrements.
+- (BOOL)setActive:(BOOL)active
+            error:(NSError **)outError {
+  if (![self checkLock:outError]) {
+    return NO;
+  }
+  int activationCount = _activationCount;
+  if (!active && activationCount == 0) {
+    RTCLogWarning(@"Attempting to deactivate without prior activation.");
+  }
+  BOOL success = YES;
+  BOOL isActive = self.isActive;
+  // Keep a local error so we can log it.
+  NSError *error = nil;
+  BOOL shouldSetActive =
+      (active && !isActive) || (!active && isActive && activationCount == 1);
+  // Attempt to activate if we're not active.
+  // Attempt to deactivate if we're active and it's the last unbalanced call.
+  if (shouldSetActive) {
+    AVAudioSession *session = self.session;
+    // AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation is used to ensure
+    // that other audio sessions that were interrupted by our session can return
+    // to their active state. It is recommended for VoIP apps to use this
+    // option.
+    AVAudioSessionSetActiveOptions options =
+        active ? 0 : AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation;
+    success = [session setActive:active
+                     withOptions:options
+                           error:&error];
+    if (outError) {
+      *outError = error;
+    }
+  }
+  if (success) {
+    if (shouldSetActive) {
+      self.isActive = active;
+    }
+    if (active) {
+      [self incrementActivationCount];
+    }
+  } else {
+    RTCLogError(@"Failed to setActive:%d. Error: %@",
+                active, error.localizedDescription);
+  }
+  // Decrement activation count on deactivation whether or not it succeeded.
+  if (!active) {
+    [self decrementActivationCount];
+  }
+  RTCLog(@"Number of current activations: %d", _activationCount);
+  return success;
+}
+
+- (BOOL)setCategory:(NSString *)category
+        withOptions:(AVAudioSessionCategoryOptions)options
+              error:(NSError **)outError {
+  if (![self checkLock:outError]) {
+    return NO;
+  }
+  return [self.session setCategory:category withOptions:options error:outError];
+}
+
+- (BOOL)setMode:(NSString *)mode error:(NSError **)outError {
+  if (![self checkLock:outError]) {
+    return NO;
+  }
+  return [self.session setMode:mode error:outError];
+}
+
+- (BOOL)setInputGain:(float)gain error:(NSError **)outError {
+  if (![self checkLock:outError]) {
+    return NO;
+  }
+  return [self.session setInputGain:gain error:outError];
+}
+
+- (BOOL)setPreferredSampleRate:(double)sampleRate error:(NSError **)outError {
+  if (![self checkLock:outError]) {
+    return NO;
+  }
+  return [self.session setPreferredSampleRate:sampleRate error:outError];
+}
+
+- (BOOL)setPreferredIOBufferDuration:(NSTimeInterval)duration
+                               error:(NSError **)outError {
+  if (![self checkLock:outError]) {
+    return NO;
+  }
+  return [self.session setPreferredIOBufferDuration:duration error:outError];
+}
+
+- (BOOL)setPreferredInputNumberOfChannels:(NSInteger)count
+                                    error:(NSError **)outError {
+  if (![self checkLock:outError]) {
+    return NO;
+  }
+  return [self.session setPreferredInputNumberOfChannels:count error:outError];
+}
+- (BOOL)setPreferredOutputNumberOfChannels:(NSInteger)count
+                                     error:(NSError **)outError {
+  if (![self checkLock:outError]) {
+    return NO;
+  }
+  return [self.session setPreferredOutputNumberOfChannels:count error:outError];
+}
+
+- (BOOL)overrideOutputAudioPort:(AVAudioSessionPortOverride)portOverride
+                          error:(NSError **)outError {
+  if (![self checkLock:outError]) {
+    return NO;
+  }
+  return [self.session overrideOutputAudioPort:portOverride error:outError];
+}
+
+- (BOOL)setPreferredInput:(AVAudioSessionPortDescription *)inPort
+                    error:(NSError **)outError {
+  if (![self checkLock:outError]) {
+    return NO;
+  }
+  return [self.session setPreferredInput:inPort error:outError];
+}
+
+- (BOOL)setInputDataSource:(AVAudioSessionDataSourceDescription *)dataSource
+                     error:(NSError **)outError {
+  if (![self checkLock:outError]) {
+    return NO;
+  }
+  return [self.session setInputDataSource:dataSource error:outError];
+}
+
+- (BOOL)setOutputDataSource:(AVAudioSessionDataSourceDescription *)dataSource
+                      error:(NSError **)outError {
+  if (![self checkLock:outError]) {
+    return NO;
+  }
+  return [self.session setOutputDataSource:dataSource error:outError];
+}
+
+#pragma mark - Notifications
+
+- (void)handleInterruptionNotification:(NSNotification *)notification {
+  NSNumber* typeNumber =
+      notification.userInfo[AVAudioSessionInterruptionTypeKey];
+  AVAudioSessionInterruptionType type =
+      (AVAudioSessionInterruptionType)typeNumber.unsignedIntegerValue;
+  switch (type) {
+    case AVAudioSessionInterruptionTypeBegan:
+      RTCLog(@"Audio session interruption began.");
+      self.isActive = NO;
+      self.isInterrupted = YES;
+      [self notifyDidBeginInterruption];
+      break;
+    case AVAudioSessionInterruptionTypeEnded: {
+      RTCLog(@"Audio session interruption ended.");
+      self.isInterrupted = NO;
+      [self updateAudioSessionAfterEvent];
+      NSNumber *optionsNumber =
+          notification.userInfo[AVAudioSessionInterruptionOptionKey];
+      AVAudioSessionInterruptionOptions options =
+          optionsNumber.unsignedIntegerValue;
+      BOOL shouldResume =
+          options & AVAudioSessionInterruptionOptionShouldResume;
+      [self notifyDidEndInterruptionWithShouldResumeSession:shouldResume];
+      break;
+    }
+  }
+}
+
+- (void)handleRouteChangeNotification:(NSNotification *)notification {
+  // Get reason for current route change.
+  NSNumber* reasonNumber =
+      notification.userInfo[AVAudioSessionRouteChangeReasonKey];
+  AVAudioSessionRouteChangeReason reason =
+      (AVAudioSessionRouteChangeReason)reasonNumber.unsignedIntegerValue;
+  RTCLog(@"Audio route changed:");
+  switch (reason) {
+    case AVAudioSessionRouteChangeReasonUnknown:
+      RTCLog(@"Audio route changed: ReasonUnknown");
+      break;
+    case AVAudioSessionRouteChangeReasonNewDeviceAvailable:
+      RTCLog(@"Audio route changed: NewDeviceAvailable");
+      break;
+    case AVAudioSessionRouteChangeReasonOldDeviceUnavailable:
+      RTCLog(@"Audio route changed: OldDeviceUnavailable");
+      break;
+    case AVAudioSessionRouteChangeReasonCategoryChange:
+      RTCLog(@"Audio route changed: CategoryChange to :%@",
+             self.session.category);
+      break;
+    case AVAudioSessionRouteChangeReasonOverride:
+      RTCLog(@"Audio route changed: Override");
+      break;
+    case AVAudioSessionRouteChangeReasonWakeFromSleep:
+      RTCLog(@"Audio route changed: WakeFromSleep");
+      break;
+    case AVAudioSessionRouteChangeReasonNoSuitableRouteForCategory:
+      RTCLog(@"Audio route changed: NoSuitableRouteForCategory");
+      break;
+    case AVAudioSessionRouteChangeReasonRouteConfigurationChange:
+      RTCLog(@"Audio route changed: RouteConfigurationChange");
+      break;
+  }
+  AVAudioSessionRouteDescription* previousRoute =
+      notification.userInfo[AVAudioSessionRouteChangePreviousRouteKey];
+  // Log previous route configuration.
+  RTCLog(@"Previous route: %@\nCurrent route:%@",
+         previousRoute, self.session.currentRoute);
+  [self notifyDidChangeRouteWithReason:reason previousRoute:previousRoute];
+}
+
+- (void)handleMediaServicesWereLost:(NSNotification *)notification {
+  RTCLog(@"Media services were lost.");
+  [self updateAudioSessionAfterEvent];
+  [self notifyMediaServicesWereLost];
+}
+
+- (void)handleMediaServicesWereReset:(NSNotification *)notification {
+  RTCLog(@"Media services were reset.");
+  [self updateAudioSessionAfterEvent];
+  [self notifyMediaServicesWereReset];
+}
+
+- (void)handleSilenceSecondaryAudioHintNotification:(NSNotification *)notification {
+  // TODO(henrika): just adding logs here for now until we know if we are ever
+  // see this notification and might be affected by it or if further actions
+  // are required.
+  NSNumber *typeNumber =
+      notification.userInfo[AVAudioSessionSilenceSecondaryAudioHintTypeKey];
+  AVAudioSessionSilenceSecondaryAudioHintType type =
+      (AVAudioSessionSilenceSecondaryAudioHintType)typeNumber.unsignedIntegerValue;
+  switch (type) {
+    case AVAudioSessionSilenceSecondaryAudioHintTypeBegin:
+      RTCLog(@"Another application's primary audio has started.");
+      break;
+    case AVAudioSessionSilenceSecondaryAudioHintTypeEnd:
+      RTCLog(@"Another application's primary audio has stopped.");
+      break;
+  }
+}
+
+- (void)handleApplicationDidBecomeActive:(NSNotification *)notification {
+  RTCLog(@"Application became active after an interruption. Treating as interruption "
+         " end. isInterrupted changed from %d to 0.", self.isInterrupted);
+  if (self.isInterrupted) {
+    self.isInterrupted = NO;
+    [self updateAudioSessionAfterEvent];
+  }
+  // Always treat application becoming active as an interruption end event.
+  [self notifyDidEndInterruptionWithShouldResumeSession:YES];
+}
+
+#pragma mark - Private
+
++ (NSError *)lockError {
+  NSDictionary *userInfo = @{
+    NSLocalizedDescriptionKey:
+        @"Must call lockForConfiguration before calling this method."
+  };
+  NSError *error =
+      [[NSError alloc] initWithDomain:kRTCAudioSessionErrorDomain
+                                 code:kRTCAudioSessionErrorLockRequired
+                             userInfo:userInfo];
+  return error;
+}
+
+- (std::vector<__weak id<RTCAudioSessionDelegate> >)delegates {
+  @synchronized(self) {
+    // Note: this returns a copy.
+    return _delegates;
+  }
+}
+
+// TODO(tkchin): check for duplicates.
+- (void)pushDelegate:(id<RTCAudioSessionDelegate>)delegate {
+  @synchronized(self) {
+    _delegates.insert(_delegates.begin(), delegate);
+  }
+}
+
+- (void)removeZeroedDelegates {
+  @synchronized(self) {
+    _delegates.erase(
+        std::remove_if(_delegates.begin(),
+                       _delegates.end(),
+                       [](id delegate) -> bool { return delegate == nil; }),
+        _delegates.end());
+  }
+}
+
+- (int)activationCount {
+  return _activationCount;
+}
+
+- (int)incrementActivationCount {
+  RTCLog(@"Incrementing activation count.");
+  return rtc::AtomicOps::Increment(&_activationCount);
+}
+
+- (NSInteger)decrementActivationCount {
+  RTCLog(@"Decrementing activation count.");
+  return rtc::AtomicOps::Decrement(&_activationCount);
+}
+
+- (int)webRTCSessionCount {
+  return _webRTCSessionCount;
+}
+
+- (BOOL)canPlayOrRecord {
+  return !self.useManualAudio || self.isAudioEnabled;
+}
+
+- (BOOL)isInterrupted {
+  @synchronized(self) {
+    return _isInterrupted;
+  }
+}
+
+- (void)setIsInterrupted:(BOOL)isInterrupted {
+  @synchronized(self) {
+    if (_isInterrupted == isInterrupted) {
+      return;
+   }
+   _isInterrupted = isInterrupted;
+  }
+}
+
+- (BOOL)checkLock:(NSError **)outError {
+  // Check ivar instead of trying to acquire lock so that we won't accidentally
+  // acquire lock if it hasn't already been called.
+  if (!self.isLocked) {
+    if (outError) {
+      *outError = [RTCAudioSession lockError];
+    }
+    return NO;
+  }
+  return YES;
+}
+
+- (BOOL)beginWebRTCSession:(NSError **)outError {
+  if (outError) {
+    *outError = nil;
+  }
+  if (![self checkLock:outError]) {
+    return NO;
+  }
+  rtc::AtomicOps::Increment(&_webRTCSessionCount);
+  [self notifyDidStartPlayOrRecord];
+  return YES;
+}
+
+- (BOOL)endWebRTCSession:(NSError **)outError {
+  if (outError) {
+    *outError = nil;
+  }
+  if (![self checkLock:outError]) {
+    return NO;
+  }
+  rtc::AtomicOps::Decrement(&_webRTCSessionCount);
+  [self notifyDidStopPlayOrRecord];
+  return YES;
+}
+
+- (BOOL)configureWebRTCSession:(NSError **)outError {
+  if (outError) {
+    *outError = nil;
+  }
+  if (![self checkLock:outError]) {
+    return NO;
+  }
+  RTCLog(@"Configuring audio session for WebRTC.");
+
+  // Configure the AVAudioSession and activate it.
+  // Provide an error even if there isn't one so we can log it.
+  NSError *error = nil;
+  RTCAudioSessionConfiguration *webRTCConfig =
+      [RTCAudioSessionConfiguration webRTCConfiguration];
+  if (![self setConfiguration:webRTCConfig active:YES error:&error]) {
+    RTCLogError(@"Failed to set WebRTC audio configuration: %@",
+                error.localizedDescription);
+    // Do not call setActive:NO if setActive:YES failed.
+    if (outError) {
+      *outError = error;
+    }
+    return NO;
+  }
+
+  // Ensure that the device currently supports audio input.
+  // TODO(tkchin): Figure out if this is really necessary.
+  if (!self.inputAvailable) {
+    RTCLogError(@"No audio input path is available!");
+    [self unconfigureWebRTCSession:nil];
+    if (outError) {
+      *outError = [self configurationErrorWithDescription:@"No input path."];
+    }
+    return NO;
+  }
+
+  // It can happen (e.g. in combination with BT devices) that the attempt to set
+  // the preferred sample rate for WebRTC (48kHz) fails. If so, make a new
+  // configuration attempt using the sample rate that worked using the active
+  // audio session. A typical case is that only 8 or 16kHz can be set, e.g. in
+  // combination with BT headsets. Using this "trick" seems to avoid a state
+  // where Core Audio asks for a different number of audio frames than what the
+  // session's I/O buffer duration corresponds to.
+  // TODO(henrika): this fix resolves bugs.webrtc.org/6004 but it has only been
+  // tested on a limited set of iOS devices and BT devices.
+  double sessionSampleRate = self.sampleRate;
+  double preferredSampleRate = webRTCConfig.sampleRate;
+  if (sessionSampleRate != preferredSampleRate) {
+    RTCLogWarning(
+        @"Current sample rate (%.2f) is not the preferred rate (%.2f)",
+        sessionSampleRate, preferredSampleRate);
+    if (![self setPreferredSampleRate:sessionSampleRate
+                                error:&error]) {
+      RTCLogError(@"Failed to set preferred sample rate: %@",
+                  error.localizedDescription);
+      if (outError) {
+        *outError = error;
+      }
+    }
+  }
+
+  return YES;
+}
+
+- (BOOL)unconfigureWebRTCSession:(NSError **)outError {
+  if (outError) {
+    *outError = nil;
+  }
+  if (![self checkLock:outError]) {
+    return NO;
+  }
+  RTCLog(@"Unconfiguring audio session for WebRTC.");
+  [self setActive:NO error:outError];
+
+  return YES;
+}
+
+- (NSError *)configurationErrorWithDescription:(NSString *)description {
+  NSDictionary* userInfo = @{
+    NSLocalizedDescriptionKey: description,
+  };
+  return [[NSError alloc] initWithDomain:kRTCAudioSessionErrorDomain
+                                    code:kRTCAudioSessionErrorConfiguration
+                                userInfo:userInfo];
+}
+
+- (void)updateAudioSessionAfterEvent {
+  BOOL shouldActivate = self.activationCount > 0;
+  AVAudioSessionSetActiveOptions options = shouldActivate ?
+      0 : AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation;
+  NSError *error = nil;
+  if ([self.session setActive:shouldActivate
+                  withOptions:options
+                        error:&error]) {
+    self.isActive = shouldActivate;
+  } else {
+    RTCLogError(@"Failed to set session active to %d. Error:%@",
+                shouldActivate, error.localizedDescription);
+  }
+}
+
+- (void)updateCanPlayOrRecord {
+  BOOL canPlayOrRecord = NO;
+  BOOL shouldNotify = NO;
+  @synchronized(self) {
+    canPlayOrRecord = !self.useManualAudio || self.isAudioEnabled;
+    if (_canPlayOrRecord == canPlayOrRecord) {
+      return;
+    }
+    _canPlayOrRecord = canPlayOrRecord;
+    shouldNotify = YES;
+  }
+  if (shouldNotify) {
+    [self notifyDidChangeCanPlayOrRecord:canPlayOrRecord];
+  }
+}
+
+- (void)audioSessionDidActivate:(AVAudioSession *)session {
+  if (_session != session) {
+    RTCLogError(@"audioSessionDidActivate called on different AVAudioSession");
+  }
+  [self incrementActivationCount];
+  self.isActive = YES;
+}
+
+- (void)audioSessionDidDeactivate:(AVAudioSession *)session {
+  if (_session != session) {
+    RTCLogError(@"audioSessionDidDeactivate called on different AVAudioSession");
+  }
+  self.isActive = NO;
+  [self decrementActivationCount];
+}
+
+- (void)observeValueForKeyPath:(NSString *)keyPath
+                      ofObject:(id)object
+                        change:(NSDictionary *)change
+                       context:(void *)context {
+  if (object == _session) {
+    NSNumber *newVolume = change[NSKeyValueChangeNewKey];
+    RTCLog(@"OutputVolumeDidChange to %f", newVolume.floatValue);
+    [self notifyDidChangeOutputVolume:newVolume.floatValue];
+  } else {
+    [super observeValueForKeyPath:keyPath
+                         ofObject:object
+                           change:change
+                          context:context];
+  }
+}
+
+- (void)notifyDidBeginInterruption {
+  for (auto delegate : self.delegates) {
+    SEL sel = @selector(audioSessionDidBeginInterruption:);
+    if ([delegate respondsToSelector:sel]) {
+      [delegate audioSessionDidBeginInterruption:self];
+    }
+  }
+}
+
+- (void)notifyDidEndInterruptionWithShouldResumeSession:
+    (BOOL)shouldResumeSession {
+  for (auto delegate : self.delegates) {
+    SEL sel = @selector(audioSessionDidEndInterruption:shouldResumeSession:);
+    if ([delegate respondsToSelector:sel]) {
+      [delegate audioSessionDidEndInterruption:self
+                           shouldResumeSession:shouldResumeSession];
+    }
+  }
+}
+
+- (void)notifyDidChangeRouteWithReason:(AVAudioSessionRouteChangeReason)reason
+    previousRoute:(AVAudioSessionRouteDescription *)previousRoute {
+  for (auto delegate : self.delegates) {
+    SEL sel = @selector(audioSessionDidChangeRoute:reason:previousRoute:);
+    if ([delegate respondsToSelector:sel]) {
+      [delegate audioSessionDidChangeRoute:self
+                                    reason:reason
+                             previousRoute:previousRoute];
+    }
+  }
+}
+
+- (void)notifyMediaServicesWereLost {
+  for (auto delegate : self.delegates) {
+    SEL sel = @selector(audioSessionMediaServerTerminated:);
+    if ([delegate respondsToSelector:sel]) {
+      [delegate audioSessionMediaServerTerminated:self];
+    }
+  }
+}
+
+- (void)notifyMediaServicesWereReset {
+  for (auto delegate : self.delegates) {
+    SEL sel = @selector(audioSessionMediaServerReset:);
+    if ([delegate respondsToSelector:sel]) {
+      [delegate audioSessionMediaServerReset:self];
+    }
+  }
+}
+
+- (void)notifyDidChangeCanPlayOrRecord:(BOOL)canPlayOrRecord {
+  for (auto delegate : self.delegates) {
+    SEL sel = @selector(audioSession:didChangeCanPlayOrRecord:);
+    if ([delegate respondsToSelector:sel]) {
+      [delegate audioSession:self didChangeCanPlayOrRecord:canPlayOrRecord];
+    }
+  }
+}
+
+- (void)notifyDidStartPlayOrRecord {
+  for (auto delegate : self.delegates) {
+    SEL sel = @selector(audioSessionDidStartPlayOrRecord:);
+    if ([delegate respondsToSelector:sel]) {
+      [delegate audioSessionDidStartPlayOrRecord:self];
+    }
+  }
+}
+
+- (void)notifyDidStopPlayOrRecord {
+  for (auto delegate : self.delegates) {
+    SEL sel = @selector(audioSessionDidStopPlayOrRecord:);
+    if ([delegate respondsToSelector:sel]) {
+      [delegate audioSessionDidStopPlayOrRecord:self];
+    }
+  }
+}
+
+- (void)notifyDidChangeOutputVolume:(float)volume {
+  for (auto delegate : self.delegates) {
+    SEL sel = @selector(audioSession:didChangeOutputVolume:);
+    if ([delegate respondsToSelector:sel]) {
+      [delegate audioSession:self didChangeOutputVolume:volume];
+    }
+  }
+}
+
+@end
diff --git a/webrtc/sdk/objc/Framework/Classes/Audio/RTCAudioSessionConfiguration.m b/webrtc/sdk/objc/Framework/Classes/Audio/RTCAudioSessionConfiguration.m
new file mode 100644
index 0000000..fe7b544
--- /dev/null
+++ b/webrtc/sdk/objc/Framework/Classes/Audio/RTCAudioSessionConfiguration.m
@@ -0,0 +1,134 @@
+/*
+ *  Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import "WebRTC/RTCAudioSession.h"
+#import "WebRTC/RTCAudioSessionConfiguration.h"
+
+#import "WebRTC/RTCDispatcher.h"
+#import "WebRTC/UIDevice+RTCDevice.h"
+
+
+// Try to use mono to save resources. Also avoids channel format conversion
+// in the I/O audio unit. Initial tests have shown that it is possible to use
+// mono natively for built-in microphones and for BT headsets but not for
+// wired headsets. Wired headsets only support stereo as native channel format
+// but it is a low cost operation to do a format conversion to mono in the
+// audio unit. Hence, we will not hit a RTC_CHECK in
+// VerifyAudioParametersForActiveAudioSession() for a mismatch between the
+// preferred number of channels and the actual number of channels.
+const int kRTCAudioSessionPreferredNumberOfChannels = 1;
+
+// Preferred hardware sample rate (unit is in Hertz). The client sample rate
+// will be set to this value as well to avoid resampling the the audio unit's
+// format converter. Note that, some devices, e.g. BT headsets, only supports
+// 8000Hz as native sample rate.
+const double kRTCAudioSessionHighPerformanceSampleRate = 48000.0;
+
+// A lower sample rate will be used for devices with only one core
+// (e.g. iPhone 4). The goal is to reduce the CPU load of the application.
+const double kRTCAudioSessionLowComplexitySampleRate = 16000.0;
+
+// Use a hardware I/O buffer size (unit is in seconds) that matches the 10ms
+// size used by WebRTC. The exact actual size will differ between devices.
+// Example: using 48kHz on iPhone 6 results in a native buffer size of
+// ~10.6667ms or 512 audio frames per buffer. The FineAudioBuffer instance will
+// take care of any buffering required to convert between native buffers and
+// buffers used by WebRTC. It is beneficial for the performance if the native
+// size is as close to 10ms as possible since it results in "clean" callback
+// sequence without bursts of callbacks back to back.
+const double kRTCAudioSessionHighPerformanceIOBufferDuration = 0.01;
+
+// Use a larger buffer size on devices with only one core (e.g. iPhone 4).
+// It will result in a lower CPU consumption at the cost of a larger latency.
+// The size of 60ms is based on instrumentation that shows a significant
+// reduction in CPU load compared with 10ms on low-end devices.
+// TODO(henrika): monitor this size and determine if it should be modified.
+const double kRTCAudioSessionLowComplexityIOBufferDuration = 0.06;
+
+static RTCAudioSessionConfiguration *gWebRTCConfiguration = nil;
+
+@implementation RTCAudioSessionConfiguration
+
+@synthesize category = _category;
+@synthesize categoryOptions = _categoryOptions;
+@synthesize mode = _mode;
+@synthesize sampleRate = _sampleRate;
+@synthesize ioBufferDuration = _ioBufferDuration;
+@synthesize inputNumberOfChannels = _inputNumberOfChannels;
+@synthesize outputNumberOfChannels = _outputNumberOfChannels;
+
+- (instancetype)init {
+  if (self = [super init]) {
+    // Use a category which supports simultaneous recording and playback.
+    // By default, using this category implies that our app’s audio is
+    // nonmixable, hence activating the session will interrupt any other
+    // audio sessions which are also nonmixable.
+    _category = AVAudioSessionCategoryPlayAndRecord;
+    _categoryOptions = AVAudioSessionCategoryOptionAllowBluetooth;
+
+    // Specify mode for two-way voice communication (e.g. VoIP).
+    _mode = AVAudioSessionModeVoiceChat;
+
+    // Set the session's sample rate or the hardware sample rate.
+    // It is essential that we use the same sample rate as stream format
+    // to ensure that the I/O unit does not have to do sample rate conversion.
+    // Set the preferred audio I/O buffer duration, in seconds.
+    NSUInteger processorCount = [NSProcessInfo processInfo].processorCount;
+    // Use best sample rate and buffer duration if the CPU has more than one
+    // core.
+    if (processorCount > 1 && [UIDevice deviceType] != RTCDeviceTypeIPhone4S) {
+      _sampleRate = kRTCAudioSessionHighPerformanceSampleRate;
+      _ioBufferDuration = kRTCAudioSessionHighPerformanceIOBufferDuration;
+    } else {
+      _sampleRate = kRTCAudioSessionLowComplexitySampleRate;
+      _ioBufferDuration = kRTCAudioSessionLowComplexityIOBufferDuration;
+    }
+
+    // We try to use mono in both directions to save resources and format
+    // conversions in the audio unit. Some devices does only support stereo;
+    // e.g. wired headset on iPhone 6.
+    // TODO(henrika): add support for stereo if needed.
+    _inputNumberOfChannels = kRTCAudioSessionPreferredNumberOfChannels;
+    _outputNumberOfChannels = kRTCAudioSessionPreferredNumberOfChannels;
+  }
+  return self;
+}
+
++ (void)initialize {
+  gWebRTCConfiguration = [[self alloc] init];
+}
+
++ (instancetype)currentConfiguration {
+  RTCAudioSession *session = [RTCAudioSession sharedInstance];
+  RTCAudioSessionConfiguration *config =
+      [[RTCAudioSessionConfiguration alloc] init];
+  config.category = session.category;
+  config.categoryOptions = session.categoryOptions;
+  config.mode = session.mode;
+  config.sampleRate = session.sampleRate;
+  config.ioBufferDuration = session.IOBufferDuration;
+  config.inputNumberOfChannels = session.inputNumberOfChannels;
+  config.outputNumberOfChannels = session.outputNumberOfChannels;
+  return config;
+}
+
++ (instancetype)webRTCConfiguration {
+  @synchronized(self) {
+    return (RTCAudioSessionConfiguration *)gWebRTCConfiguration;
+  }
+}
+
++ (void)setWebRTCConfiguration:(RTCAudioSessionConfiguration *)configuration {
+  @synchronized(self) {
+    gWebRTCConfiguration = configuration;
+  }
+}
+
+@end
diff --git a/webrtc/sdk/objc/Framework/Headers/WebRTC/RTCAudioSession.h b/webrtc/sdk/objc/Framework/Headers/WebRTC/RTCAudioSession.h
new file mode 100644
index 0000000..c0ea216
--- /dev/null
+++ b/webrtc/sdk/objc/Framework/Headers/WebRTC/RTCAudioSession.h
@@ -0,0 +1,242 @@
+/*
+ *  Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import <AVFoundation/AVFoundation.h>
+#import <Foundation/Foundation.h>
+
+#import "WebRTC/RTCMacros.h"
+
+NS_ASSUME_NONNULL_BEGIN
+
+extern NSString * const kRTCAudioSessionErrorDomain;
+/** Method that requires lock was called without lock. */
+extern NSInteger const kRTCAudioSessionErrorLockRequired;
+/** Unknown configuration error occurred. */
+extern NSInteger const kRTCAudioSessionErrorConfiguration;
+
+@class RTCAudioSession;
+@class RTCAudioSessionConfiguration;
+
+// Surfaces AVAudioSession events. WebRTC will listen directly for notifications
+// from AVAudioSession and handle them before calling these delegate methods,
+// at which point applications can perform additional processing if required.
+RTC_EXPORT
+@protocol RTCAudioSessionDelegate <NSObject>
+
+@optional
+/** Called on a system notification thread when AVAudioSession starts an
+ *  interruption event.
+ */
+- (void)audioSessionDidBeginInterruption:(RTCAudioSession *)session;
+
+/** Called on a system notification thread when AVAudioSession ends an
+ *  interruption event.
+ */
+- (void)audioSessionDidEndInterruption:(RTCAudioSession *)session
+                   shouldResumeSession:(BOOL)shouldResumeSession;
+
+/** Called on a system notification thread when AVAudioSession changes the
+ *  route.
+ */
+- (void)audioSessionDidChangeRoute:(RTCAudioSession *)session
+           reason:(AVAudioSessionRouteChangeReason)reason
+    previousRoute:(AVAudioSessionRouteDescription *)previousRoute;
+
+/** Called on a system notification thread when AVAudioSession media server
+ *  terminates.
+ */
+- (void)audioSessionMediaServerTerminated:(RTCAudioSession *)session;
+
+/** Called on a system notification thread when AVAudioSession media server
+ *  restarts.
+ */
+- (void)audioSessionMediaServerReset:(RTCAudioSession *)session;
+
+// TODO(tkchin): Maybe handle SilenceSecondaryAudioHintNotification.
+
+- (void)audioSession:(RTCAudioSession *)session
+    didChangeCanPlayOrRecord:(BOOL)canPlayOrRecord;
+
+/** Called on a WebRTC thread when the audio device is notified to begin
+ *  playback or recording.
+ */
+- (void)audioSessionDidStartPlayOrRecord:(RTCAudioSession *)session;
+
+/** Called on a WebRTC thread when the audio device is notified to stop
+ *  playback or recording.
+ */
+- (void)audioSessionDidStopPlayOrRecord:(RTCAudioSession *)session;
+
+/** Called when the AVAudioSession output volume value changes. */
+- (void)audioSession:(RTCAudioSession *)audioSession
+    didChangeOutputVolume:(float)outputVolume;
+
+@end
+
+/** This is a protocol used to inform RTCAudioSession when the audio session
+ *  activation state has changed outside of RTCAudioSession. The current known use
+ *  case of this is when CallKit activates the audio session for the application
+ */
+RTC_EXPORT
+@protocol RTCAudioSessionActivationDelegate <NSObject>
+
+/** Called when the audio session is activated outside of the app by iOS. */
+- (void)audioSessionDidActivate:(AVAudioSession *)session;
+
+/** Called when the audio session is deactivated outside of the app by iOS. */
+- (void)audioSessionDidDeactivate:(AVAudioSession *)session;
+
+@end
+
+/** Proxy class for AVAudioSession that adds a locking mechanism similar to
+ *  AVCaptureDevice. This is used to that interleaving configurations between
+ *  WebRTC and the application layer are avoided.
+ *
+ *  RTCAudioSession also coordinates activation so that the audio session is
+ *  activated only once. See |setActive:error:|.
+ */
+RTC_EXPORT
+@interface RTCAudioSession : NSObject <RTCAudioSessionActivationDelegate>
+
+/** Convenience property to access the AVAudioSession singleton. Callers should
+ *  not call setters on AVAudioSession directly, but other method invocations
+ *  are fine.
+ */
+@property(nonatomic, readonly) AVAudioSession *session;
+
+/** Our best guess at whether the session is active based on results of calls to
+ *  AVAudioSession.
+ */
+@property(nonatomic, readonly) BOOL isActive;
+/** Whether RTCAudioSession is currently locked for configuration. */
+@property(nonatomic, readonly) BOOL isLocked;
+
+/** If YES, WebRTC will not initialize the audio unit automatically when an
+ *  audio track is ready for playout or recording. Instead, applications should
+ *  call setIsAudioEnabled. If NO, WebRTC will initialize the audio unit
+ *  as soon as an audio track is ready for playout or recording.
+ */
+@property(nonatomic, assign) BOOL useManualAudio;
+
+/** This property is only effective if useManualAudio is YES.
+ *  Represents permission for WebRTC to initialize the VoIP audio unit.
+ *  When set to NO, if the VoIP audio unit used by WebRTC is active, it will be
+ *  stopped and uninitialized. This will stop incoming and outgoing audio.
+ *  When set to YES, WebRTC will initialize and start the audio unit when it is
+ *  needed (e.g. due to establishing an audio connection).
+ *  This property was introduced to work around an issue where if an AVPlayer is
+ *  playing audio while the VoIP audio unit is initialized, its audio would be
+ *  either cut off completely or played at a reduced volume. By preventing
+ *  the audio unit from being initialized until after the audio has completed,
+ *  we are able to prevent the abrupt cutoff.
+ */
+@property(nonatomic, assign) BOOL isAudioEnabled;
+
+// Proxy properties.
+@property(readonly) NSString *category;
+@property(readonly) AVAudioSessionCategoryOptions categoryOptions;
+@property(readonly) NSString *mode;
+@property(readonly) BOOL secondaryAudioShouldBeSilencedHint;
+@property(readonly) AVAudioSessionRouteDescription *currentRoute;
+@property(readonly) NSInteger maximumInputNumberOfChannels;
+@property(readonly) NSInteger maximumOutputNumberOfChannels;
+@property(readonly) float inputGain;
+@property(readonly) BOOL inputGainSettable;
+@property(readonly) BOOL inputAvailable;
+@property(readonly, nullable)
+    NSArray<AVAudioSessionDataSourceDescription *> * inputDataSources;
+@property(readonly, nullable)
+  AVAudioSessionDataSourceDescription *inputDataSource;
+@property(readonly, nullable)
+    NSArray<AVAudioSessionDataSourceDescription *> * outputDataSources;
+@property(readonly, nullable)
+    AVAudioSessionDataSourceDescription *outputDataSource;
+@property(readonly) double sampleRate;
+@property(readonly) double preferredSampleRate;
+@property(readonly) NSInteger inputNumberOfChannels;
+@property(readonly) NSInteger outputNumberOfChannels;
+@property(readonly) float outputVolume;
+@property(readonly) NSTimeInterval inputLatency;
+@property(readonly) NSTimeInterval outputLatency;
+@property(readonly) NSTimeInterval IOBufferDuration;
+@property(readonly) NSTimeInterval preferredIOBufferDuration;
+
+/** Default constructor. */
++ (instancetype)sharedInstance;
+- (instancetype)init NS_UNAVAILABLE;
+
+/** Adds a delegate, which is held weakly. */
+- (void)addDelegate:(id<RTCAudioSessionDelegate>)delegate;
+/** Removes an added delegate. */
+- (void)removeDelegate:(id<RTCAudioSessionDelegate>)delegate;
+
+/** Request exclusive access to the audio session for configuration. This call
+ *  will block if the lock is held by another object.
+ */
+- (void)lockForConfiguration;
+/** Relinquishes exclusive access to the audio session. */
+- (void)unlockForConfiguration;
+
+/** If |active|, activates the audio session if it isn't already active.
+ *  Successful calls must be balanced with a setActive:NO when activation is no
+ *  longer required. If not |active|, deactivates the audio session if one is
+ *  active and this is the last balanced call. When deactivating, the
+ *  AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation option is passed to
+ *  AVAudioSession.
+ */
+- (BOOL)setActive:(BOOL)active
+            error:(NSError **)outError;
+
+// The following methods are proxies for the associated methods on
+// AVAudioSession. |lockForConfiguration| must be called before using them
+// otherwise they will fail with kRTCAudioSessionErrorLockRequired.
+
+- (BOOL)setCategory:(NSString *)category
+        withOptions:(AVAudioSessionCategoryOptions)options
+              error:(NSError **)outError;
+- (BOOL)setMode:(NSString *)mode error:(NSError **)outError;
+- (BOOL)setInputGain:(float)gain error:(NSError **)outError;
+- (BOOL)setPreferredSampleRate:(double)sampleRate error:(NSError **)outError;
+- (BOOL)setPreferredIOBufferDuration:(NSTimeInterval)duration
+                               error:(NSError **)outError;
+- (BOOL)setPreferredInputNumberOfChannels:(NSInteger)count
+                                    error:(NSError **)outError;
+- (BOOL)setPreferredOutputNumberOfChannels:(NSInteger)count
+                                     error:(NSError **)outError;
+- (BOOL)overrideOutputAudioPort:(AVAudioSessionPortOverride)portOverride
+                          error:(NSError **)outError;
+- (BOOL)setPreferredInput:(AVAudioSessionPortDescription *)inPort
+                    error:(NSError **)outError;
+- (BOOL)setInputDataSource:(AVAudioSessionDataSourceDescription *)dataSource
+                     error:(NSError **)outError;
+- (BOOL)setOutputDataSource:(AVAudioSessionDataSourceDescription *)dataSource
+                      error:(NSError **)outError;
+@end
+
+@interface RTCAudioSession (Configuration)
+
+/** Applies the configuration to the current session. Attempts to set all
+ *  properties even if previous ones fail. Only the last error will be
+ *  returned.
+ *  |lockForConfiguration| must be called first.
+ */
+- (BOOL)setConfiguration:(RTCAudioSessionConfiguration *)configuration
+                   error:(NSError **)outError;
+
+/** Convenience method that calls both setConfiguration and setActive.
+ *  |lockForConfiguration| must be called first.
+ */
+- (BOOL)setConfiguration:(RTCAudioSessionConfiguration *)configuration
+                  active:(BOOL)active
+                   error:(NSError **)outError;
+
+@end
+
+NS_ASSUME_NONNULL_END
diff --git a/webrtc/sdk/objc/Framework/Headers/WebRTC/RTCAudioSessionConfiguration.h b/webrtc/sdk/objc/Framework/Headers/WebRTC/RTCAudioSessionConfiguration.h
new file mode 100644
index 0000000..6a02751
--- /dev/null
+++ b/webrtc/sdk/objc/Framework/Headers/WebRTC/RTCAudioSessionConfiguration.h
@@ -0,0 +1,48 @@
+/*
+ *  Copyright 2016 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import <AVFoundation/AVFoundation.h>
+#import <Foundation/Foundation.h>
+
+#import "WebRTC/RTCMacros.h"
+
+NS_ASSUME_NONNULL_BEGIN
+
+extern const int kRTCAudioSessionPreferredNumberOfChannels;
+extern const double kRTCAudioSessionHighPerformanceSampleRate;
+extern const double kRTCAudioSessionLowComplexitySampleRate;
+extern const double kRTCAudioSessionHighPerformanceIOBufferDuration;
+extern const double kRTCAudioSessionLowComplexityIOBufferDuration;
+
+// Struct to hold configuration values.
+RTC_EXPORT
+@interface RTCAudioSessionConfiguration : NSObject
+
+@property(nonatomic, strong) NSString *category;
+@property(nonatomic, assign) AVAudioSessionCategoryOptions categoryOptions;
+@property(nonatomic, strong) NSString *mode;
+@property(nonatomic, assign) double sampleRate;
+@property(nonatomic, assign) NSTimeInterval ioBufferDuration;
+@property(nonatomic, assign) NSInteger inputNumberOfChannels;
+@property(nonatomic, assign) NSInteger outputNumberOfChannels;
+
+/** Initializes configuration to defaults. */
+- (instancetype)init NS_DESIGNATED_INITIALIZER;
+
+/** Returns the current configuration of the audio session. */
++ (instancetype)currentConfiguration;
+/** Returns the configuration that WebRTC needs. */
++ (instancetype)webRTCConfiguration;
+/** Provide a way to override the default configuration. */
++ (void)setWebRTCConfiguration:(RTCAudioSessionConfiguration *)configuration;
+
+@end
+
+NS_ASSUME_NONNULL_END
diff --git a/webrtc/sdk/objc/Framework/UnitTests/RTCAudioSessionTest.mm b/webrtc/sdk/objc/Framework/UnitTests/RTCAudioSessionTest.mm
new file mode 100644
index 0000000..f932457
--- /dev/null
+++ b/webrtc/sdk/objc/Framework/UnitTests/RTCAudioSessionTest.mm
@@ -0,0 +1,338 @@
+/*
+ *  Copyright 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#import <Foundation/Foundation.h>
+#import <OCMock/OCMock.h>
+
+#include "webrtc/base/gunit.h"
+
+#import "RTCAudioSession+Private.h"
+
+#import "WebRTC/RTCAudioSession.h"
+#import "WebRTC/RTCAudioSessionConfiguration.h"
+
+@interface RTCAudioSessionTestDelegate : NSObject <RTCAudioSessionDelegate>
+
+@property (nonatomic, readonly) float outputVolume;
+
+@end
+
+@implementation RTCAudioSessionTestDelegate
+
+@synthesize outputVolume = _outputVolume;
+
+- (instancetype)init {
+  if (self = [super init]) {
+    _outputVolume = -1;
+  }
+  return self;
+}
+
+- (void)audioSessionDidBeginInterruption:(RTCAudioSession *)session {
+}
+
+- (void)audioSessionDidEndInterruption:(RTCAudioSession *)session
+                   shouldResumeSession:(BOOL)shouldResumeSession {
+}
+
+- (void)audioSessionDidChangeRoute:(RTCAudioSession *)session
+           reason:(AVAudioSessionRouteChangeReason)reason
+    previousRoute:(AVAudioSessionRouteDescription *)previousRoute {
+}
+
+- (void)audioSessionMediaServerTerminated:(RTCAudioSession *)session {
+}
+
+- (void)audioSessionMediaServerReset:(RTCAudioSession *)session {
+}
+
+- (void)audioSessionShouldConfigure:(RTCAudioSession *)session {
+}
+
+- (void)audioSessionShouldUnconfigure:(RTCAudioSession *)session {
+}
+
+- (void)audioSession:(RTCAudioSession *)audioSession
+    didChangeOutputVolume:(float)outputVolume {
+  _outputVolume = outputVolume;
+}
+
+@end
+
+// A delegate that adds itself to the audio session on init and removes itself
+// in its dealloc.
+@interface RTCTestRemoveOnDeallocDelegate : RTCAudioSessionTestDelegate
+@end
+
+@implementation RTCTestRemoveOnDeallocDelegate
+
+- (instancetype)init {
+  if (self = [super init]) {
+    RTCAudioSession *session = [RTCAudioSession sharedInstance];
+    [session addDelegate:self];
+  }
+  return self;
+}
+
+- (void)dealloc {
+  RTCAudioSession *session = [RTCAudioSession sharedInstance];
+  [session removeDelegate:self];
+}
+
+@end
+
+
+@interface RTCAudioSessionTest : NSObject
+
+- (void)testLockForConfiguration;
+
+@end
+
+@implementation RTCAudioSessionTest
+
+- (void)testLockForConfiguration {
+  RTCAudioSession *session = [RTCAudioSession sharedInstance];
+
+  for (size_t i = 0; i < 2; i++) {
+    [session lockForConfiguration];
+    EXPECT_TRUE(session.isLocked);
+  }
+  for (size_t i = 0; i < 2; i++) {
+    EXPECT_TRUE(session.isLocked);
+    [session unlockForConfiguration];
+  }
+  EXPECT_FALSE(session.isLocked);
+}
+
+- (void)testAddAndRemoveDelegates {
+  RTCAudioSession *session = [RTCAudioSession sharedInstance];
+  NSMutableArray *delegates = [NSMutableArray array];
+  const size_t count = 5;
+  for (size_t i = 0; i < count; ++i) {
+    RTCAudioSessionTestDelegate *delegate =
+        [[RTCAudioSessionTestDelegate alloc] init];
+    [session addDelegate:delegate];
+    [delegates addObject:delegate];
+    EXPECT_EQ(i + 1, session.delegates.size());
+  }
+  [delegates enumerateObjectsUsingBlock:^(RTCAudioSessionTestDelegate *obj,
+                                          NSUInteger idx,
+                                          BOOL *stop) {
+    [session removeDelegate:obj];
+  }];
+  EXPECT_EQ(0u, session.delegates.size());
+}
+
+- (void)testPushDelegate {
+  RTCAudioSession *session = [RTCAudioSession sharedInstance];
+  NSMutableArray *delegates = [NSMutableArray array];
+  const size_t count = 2;
+  for (size_t i = 0; i < count; ++i) {
+    RTCAudioSessionTestDelegate *delegate =
+        [[RTCAudioSessionTestDelegate alloc] init];
+    [session addDelegate:delegate];
+    [delegates addObject:delegate];
+  }
+  // Test that it gets added to the front of the list.
+  RTCAudioSessionTestDelegate *pushedDelegate =
+      [[RTCAudioSessionTestDelegate alloc] init];
+  [session pushDelegate:pushedDelegate];
+  EXPECT_TRUE(pushedDelegate == session.delegates[0]);
+
+  // Test that it stays at the front of the list.
+  for (size_t i = 0; i < count; ++i) {
+    RTCAudioSessionTestDelegate *delegate =
+        [[RTCAudioSessionTestDelegate alloc] init];
+    [session addDelegate:delegate];
+    [delegates addObject:delegate];
+  }
+  EXPECT_TRUE(pushedDelegate == session.delegates[0]);
+
+  // Test that the next one goes to the front too.
+  pushedDelegate = [[RTCAudioSessionTestDelegate alloc] init];
+  [session pushDelegate:pushedDelegate];
+  EXPECT_TRUE(pushedDelegate == session.delegates[0]);
+}
+
+// Tests that delegates added to the audio session properly zero out. This is
+// checking an implementation detail (that vectors of __weak work as expected).
+- (void)testZeroingWeakDelegate {
+  RTCAudioSession *session = [RTCAudioSession sharedInstance];
+  @autoreleasepool {
+    // Add a delegate to the session. There should be one delegate at this
+    // point.
+    RTCAudioSessionTestDelegate *delegate =
+        [[RTCAudioSessionTestDelegate alloc] init];
+    [session addDelegate:delegate];
+    EXPECT_EQ(1u, session.delegates.size());
+    EXPECT_TRUE(session.delegates[0]);
+  }
+  // The previously created delegate should've de-alloced, leaving a nil ptr.
+  EXPECT_FALSE(session.delegates[0]);
+  RTCAudioSessionTestDelegate *delegate =
+      [[RTCAudioSessionTestDelegate alloc] init];
+  [session addDelegate:delegate];
+  // On adding a new delegate, nil ptrs should've been cleared.
+  EXPECT_EQ(1u, session.delegates.size());
+  EXPECT_TRUE(session.delegates[0]);
+}
+
+// Tests that we don't crash when removing delegates in dealloc.
+// Added as a regression test.
+- (void)testRemoveDelegateOnDealloc {
+  @autoreleasepool {
+    RTCTestRemoveOnDeallocDelegate *delegate =
+        [[RTCTestRemoveOnDeallocDelegate alloc] init];
+    EXPECT_TRUE(delegate);
+  }
+  RTCAudioSession *session = [RTCAudioSession sharedInstance];
+  EXPECT_EQ(0u, session.delegates.size());
+}
+
+- (void)testAudioSessionActivation {
+  RTCAudioSession *audioSession = [RTCAudioSession sharedInstance];
+  EXPECT_EQ(0, audioSession.activationCount);
+  [audioSession audioSessionDidActivate:[AVAudioSession sharedInstance]];
+  EXPECT_EQ(1, audioSession.activationCount);
+  [audioSession audioSessionDidDeactivate:[AVAudioSession sharedInstance]];
+  EXPECT_EQ(0, audioSession.activationCount);
+}
+
+// Hack - fixes OCMVerify link error
+// Link error is: Undefined symbols for architecture i386:
+// "OCMMakeLocation(objc_object*, char const*, int)", referenced from:
+// -[RTCAudioSessionTest testConfigureWebRTCSession] in RTCAudioSessionTest.o
+// ld: symbol(s) not found for architecture i386
+// REASON: https://github.com/erikdoe/ocmock/issues/238
+OCMLocation *OCMMakeLocation(id testCase, const char *fileCString, int line){
+  return [OCMLocation locationWithTestCase:testCase
+                                      file:[NSString stringWithUTF8String:fileCString]
+                                      line:line];
+}
+
+- (void)testConfigureWebRTCSession {
+  NSError *error = nil;
+
+  void (^setActiveBlock)(NSInvocation *invocation) = ^(NSInvocation *invocation) {
+    __autoreleasing NSError **retError;
+    [invocation getArgument:&retError atIndex:4];
+    *retError = [NSError errorWithDomain:@"AVAudioSession"
+                                    code:AVAudioSessionErrorInsufficientPriority
+                                userInfo:nil];
+    BOOL failure = NO;
+    [invocation setReturnValue:&failure];
+  };
+
+  id mockAVAudioSession = OCMPartialMock([AVAudioSession sharedInstance]);
+  OCMStub([[mockAVAudioSession ignoringNonObjectArgs]
+      setActive:YES withOptions:0 error:((NSError __autoreleasing **)[OCMArg anyPointer])]).
+      andDo(setActiveBlock);
+
+  id mockAudioSession = OCMPartialMock([RTCAudioSession sharedInstance]);
+  OCMStub([mockAudioSession session]).andReturn(mockAVAudioSession);
+
+  RTCAudioSession *audioSession = mockAudioSession;
+  EXPECT_EQ(0, audioSession.activationCount);
+  [audioSession lockForConfiguration];
+  EXPECT_TRUE([audioSession checkLock:nil]);
+  // configureWebRTCSession is forced to fail in the above mock interface,
+  // so activationCount should remain 0
+  OCMExpect([[mockAVAudioSession ignoringNonObjectArgs]
+      setActive:YES withOptions:0 error:((NSError __autoreleasing **)[OCMArg anyPointer])]).
+      andDo(setActiveBlock);
+  OCMExpect([mockAudioSession session]).andReturn(mockAVAudioSession);
+  EXPECT_FALSE([audioSession configureWebRTCSession:&error]);
+  EXPECT_EQ(0, audioSession.activationCount);
+
+  id session = audioSession.session;
+  EXPECT_EQ(session, mockAVAudioSession);
+  EXPECT_EQ(NO, [mockAVAudioSession setActive:YES withOptions:0 error:&error]);
+  [audioSession unlockForConfiguration];
+
+  OCMVerify([mockAudioSession session]);
+  OCMVerify([[mockAVAudioSession ignoringNonObjectArgs] setActive:YES withOptions:0 error:&error]);
+  OCMVerify([[mockAVAudioSession ignoringNonObjectArgs] setActive:NO withOptions:0 error:&error]);
+
+  [mockAVAudioSession stopMocking];
+  [mockAudioSession stopMocking];
+}
+
+- (void)testAudioVolumeDidNotify {
+  RTCAudioSession *session = [RTCAudioSession sharedInstance];
+  RTCAudioSessionTestDelegate *delegate =
+      [[RTCAudioSessionTestDelegate alloc] init];
+  [session addDelegate:delegate];
+
+  [session observeValueForKeyPath:@"outputVolume"
+                         ofObject:[AVAudioSession sharedInstance]
+                           change:
+        @{NSKeyValueChangeNewKey :
+            @([AVAudioSession sharedInstance].outputVolume) }
+                          context:nil];
+
+  EXPECT_NE(delegate.outputVolume, -1);
+  EXPECT_EQ([AVAudioSession sharedInstance].outputVolume, delegate.outputVolume);
+}
+
+@end
+
+namespace webrtc {
+
+class AudioSessionTest : public ::testing::Test {
+ protected:
+  void TearDown() {
+    RTCAudioSession *session = [RTCAudioSession sharedInstance];
+    for (id<RTCAudioSessionDelegate> delegate : session.delegates) {
+      [session removeDelegate:delegate];
+    }
+  }
+};
+
+TEST_F(AudioSessionTest, LockForConfiguration) {
+  RTCAudioSessionTest *test = [[RTCAudioSessionTest alloc] init];
+  [test testLockForConfiguration];
+}
+
+TEST_F(AudioSessionTest, AddAndRemoveDelegates) {
+  RTCAudioSessionTest *test = [[RTCAudioSessionTest alloc] init];
+  [test testAddAndRemoveDelegates];
+}
+
+TEST_F(AudioSessionTest, PushDelegate) {
+  RTCAudioSessionTest *test = [[RTCAudioSessionTest alloc] init];
+  [test testPushDelegate];
+}
+
+TEST_F(AudioSessionTest, ZeroingWeakDelegate) {
+  RTCAudioSessionTest *test = [[RTCAudioSessionTest alloc] init];
+  [test testZeroingWeakDelegate];
+}
+
+TEST_F(AudioSessionTest, RemoveDelegateOnDealloc) {
+  RTCAudioSessionTest *test = [[RTCAudioSessionTest alloc] init];
+  [test testRemoveDelegateOnDealloc];
+}
+
+TEST_F(AudioSessionTest, AudioSessionActivation) {
+  RTCAudioSessionTest *test = [[RTCAudioSessionTest alloc] init];
+  [test testAudioSessionActivation];
+}
+
+TEST_F(AudioSessionTest, ConfigureWebRTCSession) {
+  RTCAudioSessionTest *test = [[RTCAudioSessionTest alloc] init];
+  [test testConfigureWebRTCSession];
+}
+
+TEST_F(AudioSessionTest, AudioVolumeDidNotify) {
+  RTCAudioSessionTest *test = [[RTCAudioSessionTest alloc] init];
+  [test testAudioVolumeDidNotify];
+}
+
+}  // namespace webrtc