Peter Hanspers | 8d95e3b | 2018-05-15 10:22:36 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2018 The WebRTC project authors. All Rights Reserved. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style license |
| 5 | * that can be found in the LICENSE file in the root of the source |
| 6 | * tree. An additional intellectual property rights grant can be found |
| 7 | * in the file PATENTS. All contributing project authors may |
| 8 | * be found in the AUTHORS file in the root of the source tree. |
| 9 | */ |
| 10 | |
| 11 | #import <XCTest/XCTest.h> |
| 12 | |
| 13 | #if defined(WEBRTC_IOS) |
Anders Carlsson | 7bca8ca | 2018-08-30 09:30:29 +0200 | [diff] [blame] | 14 | #import "sdk/objc/native/api/audio_device_module.h" |
Peter Hanspers | 8d95e3b | 2018-05-15 10:22:36 +0200 | [diff] [blame] | 15 | #endif |
| 16 | |
Peter Hanspers | 8d95e3b | 2018-05-15 10:22:36 +0200 | [diff] [blame] | 17 | #include "rtc_base/scoped_ref_ptr.h" |
| 18 | |
| 19 | typedef int32_t(^NeedMorePlayDataBlock)(const size_t nSamples, |
| 20 | const size_t nBytesPerSample, |
| 21 | const size_t nChannels, |
| 22 | const uint32_t samplesPerSec, |
| 23 | void* audioSamples, |
| 24 | size_t& nSamplesOut, |
| 25 | int64_t* elapsed_time_ms, |
| 26 | int64_t* ntp_time_ms); |
| 27 | |
| 28 | typedef int32_t(^RecordedDataIsAvailableBlock)(const void* audioSamples, |
| 29 | const size_t nSamples, |
| 30 | const size_t nBytesPerSample, |
| 31 | const size_t nChannels, |
| 32 | const uint32_t samplesPerSec, |
| 33 | const uint32_t totalDelayMS, |
| 34 | const int32_t clockDrift, |
| 35 | const uint32_t currentMicLevel, |
| 36 | const bool keyPressed, |
| 37 | uint32_t& newMicLevel); |
| 38 | |
| 39 | |
| 40 | // This class implements the AudioTransport API and forwards all methods to the appropriate blocks. |
| 41 | class MockAudioTransport : public webrtc::AudioTransport { |
| 42 | public: |
| 43 | MockAudioTransport() {} |
Mirko Bonadei | 17aff35 | 2018-07-26 12:20:40 +0200 | [diff] [blame] | 44 | ~MockAudioTransport() override {} |
Peter Hanspers | 8d95e3b | 2018-05-15 10:22:36 +0200 | [diff] [blame] | 45 | |
| 46 | void expectNeedMorePlayData(NeedMorePlayDataBlock block) { |
| 47 | needMorePlayDataBlock = block; |
| 48 | } |
| 49 | |
| 50 | void expectRecordedDataIsAvailable(RecordedDataIsAvailableBlock block) { |
| 51 | recordedDataIsAvailableBlock = block; |
| 52 | } |
| 53 | |
| 54 | int32_t NeedMorePlayData(const size_t nSamples, |
| 55 | const size_t nBytesPerSample, |
| 56 | const size_t nChannels, |
| 57 | const uint32_t samplesPerSec, |
| 58 | void* audioSamples, |
| 59 | size_t& nSamplesOut, |
| 60 | int64_t* elapsed_time_ms, |
Mirko Bonadei | 17aff35 | 2018-07-26 12:20:40 +0200 | [diff] [blame] | 61 | int64_t* ntp_time_ms) override { |
Peter Hanspers | 8d95e3b | 2018-05-15 10:22:36 +0200 | [diff] [blame] | 62 | return needMorePlayDataBlock(nSamples, |
| 63 | nBytesPerSample, |
| 64 | nChannels, |
| 65 | samplesPerSec, |
| 66 | audioSamples, |
| 67 | nSamplesOut, |
| 68 | elapsed_time_ms, |
| 69 | ntp_time_ms); |
| 70 | } |
| 71 | |
| 72 | int32_t RecordedDataIsAvailable(const void* audioSamples, |
| 73 | const size_t nSamples, |
| 74 | const size_t nBytesPerSample, |
| 75 | const size_t nChannels, |
| 76 | const uint32_t samplesPerSec, |
| 77 | const uint32_t totalDelayMS, |
| 78 | const int32_t clockDrift, |
| 79 | const uint32_t currentMicLevel, |
| 80 | const bool keyPressed, |
Mirko Bonadei | 17aff35 | 2018-07-26 12:20:40 +0200 | [diff] [blame] | 81 | uint32_t& newMicLevel) override { |
Peter Hanspers | 8d95e3b | 2018-05-15 10:22:36 +0200 | [diff] [blame] | 82 | return recordedDataIsAvailableBlock(audioSamples, |
| 83 | nSamples, |
| 84 | nBytesPerSample, |
| 85 | nChannels, |
| 86 | samplesPerSec, |
| 87 | totalDelayMS, |
| 88 | clockDrift, |
| 89 | currentMicLevel, |
| 90 | keyPressed, |
| 91 | newMicLevel); |
| 92 | } |
| 93 | |
| 94 | void PullRenderData(int bits_per_sample, |
| 95 | int sample_rate, |
| 96 | size_t number_of_channels, |
| 97 | size_t number_of_frames, |
| 98 | void* audio_data, |
| 99 | int64_t* elapsed_time_ms, |
Mirko Bonadei | 17aff35 | 2018-07-26 12:20:40 +0200 | [diff] [blame] | 100 | int64_t* ntp_time_ms) override {} |
Peter Hanspers | 8d95e3b | 2018-05-15 10:22:36 +0200 | [diff] [blame] | 101 | |
Mirko Bonadei | 17aff35 | 2018-07-26 12:20:40 +0200 | [diff] [blame] | 102 | private: |
Peter Hanspers | 8d95e3b | 2018-05-15 10:22:36 +0200 | [diff] [blame] | 103 | NeedMorePlayDataBlock needMorePlayDataBlock; |
| 104 | RecordedDataIsAvailableBlock recordedDataIsAvailableBlock; |
| 105 | }; |
| 106 | |
| 107 | // Number of callbacks (input or output) the tests waits for before we set |
| 108 | // an event indicating that the test was OK. |
| 109 | static const NSUInteger kNumCallbacks = 10; |
| 110 | // Max amount of time we wait for an event to be set while counting callbacks. |
| 111 | static const NSTimeInterval kTestTimeOutInSec = 20.0; |
| 112 | // Number of bits per PCM audio sample. |
| 113 | static const NSUInteger kBitsPerSample = 16; |
| 114 | // Number of bytes per PCM audio sample. |
| 115 | static const NSUInteger kBytesPerSample = kBitsPerSample / 8; |
| 116 | // Average number of audio callbacks per second assuming 10ms packet size. |
| 117 | static const NSUInteger kNumCallbacksPerSecond = 100; |
| 118 | // Play out a test file during this time (unit is in seconds). |
| 119 | static const NSUInteger kFilePlayTimeInSec = 15; |
| 120 | // Run the full-duplex test during this time (unit is in seconds). |
| 121 | // Note that first |kNumIgnoreFirstCallbacks| are ignored. |
| 122 | static const NSUInteger kFullDuplexTimeInSec = 10; |
| 123 | // Wait for the callback sequence to stabilize by ignoring this amount of the |
| 124 | // initial callbacks (avoids initial FIFO access). |
| 125 | // Only used in the RunPlayoutAndRecordingInFullDuplex test. |
| 126 | static const NSUInteger kNumIgnoreFirstCallbacks = 50; |
| 127 | |
| 128 | @interface RTCAudioDeviceModuleTests : XCTestCase { |
Peter Hanspers | 8d95e3b | 2018-05-15 10:22:36 +0200 | [diff] [blame] | 129 | rtc::scoped_refptr<webrtc::AudioDeviceModule> audioDeviceModule; |
Peter Hanspers | 8d95e3b | 2018-05-15 10:22:36 +0200 | [diff] [blame] | 130 | MockAudioTransport mock; |
| 131 | } |
| 132 | |
Jiawei Ou | 4aeb35b | 2018-11-09 13:55:45 -0800 | [diff] [blame] | 133 | @property(nonatomic, assign) webrtc::AudioParameters playoutParameters; |
| 134 | @property(nonatomic, assign) webrtc::AudioParameters recordParameters; |
| 135 | |
Peter Hanspers | 8d95e3b | 2018-05-15 10:22:36 +0200 | [diff] [blame] | 136 | @end |
| 137 | |
| 138 | @implementation RTCAudioDeviceModuleTests |
| 139 | |
Jiawei Ou | 4aeb35b | 2018-11-09 13:55:45 -0800 | [diff] [blame] | 140 | @synthesize playoutParameters; |
| 141 | @synthesize recordParameters; |
| 142 | |
Peter Hanspers | 8d95e3b | 2018-05-15 10:22:36 +0200 | [diff] [blame] | 143 | - (void)setUp { |
| 144 | [super setUp]; |
| 145 | audioDeviceModule = webrtc::CreateAudioDeviceModule(); |
| 146 | XCTAssertEqual(0, audioDeviceModule->Init()); |
| 147 | XCTAssertEqual(0, audioDeviceModule->GetPlayoutAudioParameters(&playoutParameters)); |
| 148 | XCTAssertEqual(0, audioDeviceModule->GetRecordAudioParameters(&recordParameters)); |
| 149 | } |
| 150 | |
| 151 | - (void)tearDown { |
| 152 | XCTAssertEqual(0, audioDeviceModule->Terminate()); |
| 153 | audioDeviceModule = nullptr; |
| 154 | [super tearDown]; |
| 155 | } |
| 156 | |
| 157 | - (void)startPlayout { |
| 158 | XCTAssertFalse(audioDeviceModule->Playing()); |
| 159 | XCTAssertEqual(0, audioDeviceModule->InitPlayout()); |
| 160 | XCTAssertTrue(audioDeviceModule->PlayoutIsInitialized()); |
| 161 | XCTAssertEqual(0, audioDeviceModule->StartPlayout()); |
| 162 | XCTAssertTrue(audioDeviceModule->Playing()); |
| 163 | } |
| 164 | |
| 165 | - (void)stopPlayout { |
| 166 | XCTAssertEqual(0, audioDeviceModule->StopPlayout()); |
| 167 | XCTAssertFalse(audioDeviceModule->Playing()); |
| 168 | } |
| 169 | |
| 170 | - (void)startRecording{ |
| 171 | XCTAssertFalse(audioDeviceModule->Recording()); |
| 172 | XCTAssertEqual(0, audioDeviceModule->InitRecording()); |
| 173 | XCTAssertTrue(audioDeviceModule->RecordingIsInitialized()); |
| 174 | XCTAssertEqual(0, audioDeviceModule->StartRecording()); |
| 175 | XCTAssertTrue(audioDeviceModule->Recording()); |
| 176 | } |
| 177 | |
| 178 | - (void)stopRecording{ |
| 179 | XCTAssertEqual(0, audioDeviceModule->StopRecording()); |
| 180 | XCTAssertFalse(audioDeviceModule->Recording()); |
| 181 | } |
| 182 | |
| 183 | - (NSURL*)fileURLForSampleRate:(int)sampleRate { |
| 184 | XCTAssertTrue(sampleRate == 48000 || sampleRate == 44100 || sampleRate == 16000); |
| 185 | NSString *filename = [NSString stringWithFormat:@"audio_short%d", sampleRate / 1000]; |
| 186 | NSURL *url = [[NSBundle mainBundle] URLForResource:filename withExtension:@"pcm"]; |
| 187 | XCTAssertNotNil(url); |
| 188 | |
| 189 | return url; |
| 190 | } |
| 191 | |
| 192 | #pragma mark - Tests |
| 193 | |
| 194 | - (void)testConstructDestruct { |
| 195 | // Using the test fixture to create and destruct the audio device module. |
| 196 | } |
| 197 | |
| 198 | - (void)testInitTerminate { |
| 199 | // Initialization is part of the test fixture. |
| 200 | XCTAssertTrue(audioDeviceModule->Initialized()); |
| 201 | XCTAssertEqual(0, audioDeviceModule->Terminate()); |
| 202 | XCTAssertFalse(audioDeviceModule->Initialized()); |
| 203 | } |
| 204 | |
| 205 | // Tests that playout can be initiated, started and stopped. No audio callback |
| 206 | // is registered in this test. |
Kári Tristan Helgason | db543c9 | 2018-09-06 13:49:37 +0200 | [diff] [blame] | 207 | - (void)testStartStopPlayout { |
Peter Hanspers | 8d95e3b | 2018-05-15 10:22:36 +0200 | [diff] [blame] | 208 | [self startPlayout]; |
| 209 | [self stopPlayout]; |
| 210 | [self startPlayout]; |
| 211 | [self stopPlayout]; |
| 212 | } |
| 213 | |
| 214 | // Tests that recording can be initiated, started and stopped. No audio callback |
| 215 | // is registered in this test. |
Kári Tristan Helgason | db543c9 | 2018-09-06 13:49:37 +0200 | [diff] [blame] | 216 | - (void)testStartStopRecording { |
Peter Hanspers | 8d95e3b | 2018-05-15 10:22:36 +0200 | [diff] [blame] | 217 | [self startRecording]; |
| 218 | [self stopRecording]; |
| 219 | [self startRecording]; |
| 220 | [self stopRecording]; |
| 221 | } |
| 222 | // Verify that calling StopPlayout() will leave us in an uninitialized state |
| 223 | // which will require a new call to InitPlayout(). This test does not call |
| 224 | // StartPlayout() while being uninitialized since doing so will hit a |
| 225 | // RTC_DCHECK. |
| 226 | - (void)testStopPlayoutRequiresInitToRestart { |
| 227 | XCTAssertEqual(0, audioDeviceModule->InitPlayout()); |
| 228 | XCTAssertEqual(0, audioDeviceModule->StartPlayout()); |
| 229 | XCTAssertEqual(0, audioDeviceModule->StopPlayout()); |
| 230 | XCTAssertFalse(audioDeviceModule->PlayoutIsInitialized()); |
| 231 | } |
| 232 | |
| 233 | // Verify that we can create two ADMs and start playing on the second ADM. |
| 234 | // Only the first active instance shall activate an audio session and the |
| 235 | // last active instance shall deactivate the audio session. The test does not |
| 236 | // explicitly verify correct audio session calls but instead focuses on |
| 237 | // ensuring that audio starts for both ADMs. |
Kári Tristan Helgason | db543c9 | 2018-09-06 13:49:37 +0200 | [diff] [blame] | 238 | - (void)testStartPlayoutOnTwoInstances { |
Peter Hanspers | 8d95e3b | 2018-05-15 10:22:36 +0200 | [diff] [blame] | 239 | // Create and initialize a second/extra ADM instance. The default ADM is |
| 240 | // created by the test harness. |
| 241 | rtc::scoped_refptr<webrtc::AudioDeviceModule> secondAudioDeviceModule = |
| 242 | webrtc::CreateAudioDeviceModule(); |
| 243 | XCTAssertNotEqual(secondAudioDeviceModule.get(), nullptr); |
| 244 | XCTAssertEqual(0, secondAudioDeviceModule->Init()); |
| 245 | |
| 246 | // Start playout for the default ADM but don't wait here. Instead use the |
| 247 | // upcoming second stream for that. We set the same expectation on number |
| 248 | // of callbacks as for the second stream. |
| 249 | mock.expectNeedMorePlayData(^int32_t(const size_t nSamples, |
| 250 | const size_t nBytesPerSample, |
| 251 | const size_t nChannels, |
| 252 | const uint32_t samplesPerSec, |
| 253 | void *audioSamples, |
| 254 | size_t &nSamplesOut, |
| 255 | int64_t *elapsed_time_ms, |
| 256 | int64_t *ntp_time_ms) { |
| 257 | nSamplesOut = nSamples; |
Jiawei Ou | 4aeb35b | 2018-11-09 13:55:45 -0800 | [diff] [blame] | 258 | XCTAssertEqual(nSamples, self.playoutParameters.frames_per_10ms_buffer()); |
Peter Hanspers | 8d95e3b | 2018-05-15 10:22:36 +0200 | [diff] [blame] | 259 | XCTAssertEqual(nBytesPerSample, kBytesPerSample); |
Jiawei Ou | 4aeb35b | 2018-11-09 13:55:45 -0800 | [diff] [blame] | 260 | XCTAssertEqual(nChannels, self.playoutParameters.channels()); |
| 261 | XCTAssertEqual((int)samplesPerSec, self.playoutParameters.sample_rate()); |
Peter Hanspers | 8d95e3b | 2018-05-15 10:22:36 +0200 | [diff] [blame] | 262 | XCTAssertNotEqual((void*)NULL, audioSamples); |
| 263 | |
| 264 | return 0; |
| 265 | }); |
| 266 | |
| 267 | XCTAssertEqual(0, audioDeviceModule->RegisterAudioCallback(&mock)); |
| 268 | [self startPlayout]; |
| 269 | |
| 270 | // Initialize playout for the second ADM. If all is OK, the second ADM shall |
| 271 | // reuse the audio session activated when the first ADM started playing. |
| 272 | // This call will also ensure that we avoid a problem related to initializing |
| 273 | // two different audio unit instances back to back (see webrtc:5166 for |
| 274 | // details). |
| 275 | XCTAssertEqual(0, secondAudioDeviceModule->InitPlayout()); |
| 276 | XCTAssertTrue(secondAudioDeviceModule->PlayoutIsInitialized()); |
| 277 | |
| 278 | // Start playout for the second ADM and verify that it starts as intended. |
| 279 | // Passing this test ensures that initialization of the second audio unit |
| 280 | // has been done successfully and that there is no conflict with the already |
| 281 | // playing first ADM. |
| 282 | XCTestExpectation *playoutExpectation = [self expectationWithDescription:@"NeedMorePlayoutData"]; |
Kári Tristan Helgason | db543c9 | 2018-09-06 13:49:37 +0200 | [diff] [blame] | 283 | __block int num_callbacks = 0; |
Peter Hanspers | 8d95e3b | 2018-05-15 10:22:36 +0200 | [diff] [blame] | 284 | |
| 285 | MockAudioTransport mock2; |
| 286 | mock2.expectNeedMorePlayData(^int32_t(const size_t nSamples, |
| 287 | const size_t nBytesPerSample, |
| 288 | const size_t nChannels, |
| 289 | const uint32_t samplesPerSec, |
| 290 | void *audioSamples, |
| 291 | size_t &nSamplesOut, |
| 292 | int64_t *elapsed_time_ms, |
| 293 | int64_t *ntp_time_ms) { |
| 294 | nSamplesOut = nSamples; |
Jiawei Ou | 4aeb35b | 2018-11-09 13:55:45 -0800 | [diff] [blame] | 295 | XCTAssertEqual(nSamples, self.playoutParameters.frames_per_10ms_buffer()); |
Peter Hanspers | 8d95e3b | 2018-05-15 10:22:36 +0200 | [diff] [blame] | 296 | XCTAssertEqual(nBytesPerSample, kBytesPerSample); |
Jiawei Ou | 4aeb35b | 2018-11-09 13:55:45 -0800 | [diff] [blame] | 297 | XCTAssertEqual(nChannels, self.playoutParameters.channels()); |
| 298 | XCTAssertEqual((int)samplesPerSec, self.playoutParameters.sample_rate()); |
Peter Hanspers | 8d95e3b | 2018-05-15 10:22:36 +0200 | [diff] [blame] | 299 | XCTAssertNotEqual((void*)NULL, audioSamples); |
Kári Tristan Helgason | db543c9 | 2018-09-06 13:49:37 +0200 | [diff] [blame] | 300 | if (++num_callbacks == kNumCallbacks) { |
| 301 | [playoutExpectation fulfill]; |
| 302 | } |
Peter Hanspers | 8d95e3b | 2018-05-15 10:22:36 +0200 | [diff] [blame] | 303 | |
| 304 | return 0; |
| 305 | }); |
| 306 | |
| 307 | XCTAssertEqual(0, secondAudioDeviceModule->RegisterAudioCallback(&mock2)); |
| 308 | XCTAssertEqual(0, secondAudioDeviceModule->StartPlayout()); |
| 309 | XCTAssertTrue(secondAudioDeviceModule->Playing()); |
| 310 | [self waitForExpectationsWithTimeout:kTestTimeOutInSec handler:nil]; |
Kári Tristan Helgason | db543c9 | 2018-09-06 13:49:37 +0200 | [diff] [blame] | 311 | [self stopPlayout]; |
Peter Hanspers | 8d95e3b | 2018-05-15 10:22:36 +0200 | [diff] [blame] | 312 | XCTAssertEqual(0, secondAudioDeviceModule->StopPlayout()); |
| 313 | XCTAssertFalse(secondAudioDeviceModule->Playing()); |
| 314 | XCTAssertFalse(secondAudioDeviceModule->PlayoutIsInitialized()); |
| 315 | |
| 316 | XCTAssertEqual(0, secondAudioDeviceModule->Terminate()); |
| 317 | } |
| 318 | |
| 319 | // Start playout and verify that the native audio layer starts asking for real |
| 320 | // audio samples to play out using the NeedMorePlayData callback. |
| 321 | - (void)testStartPlayoutVerifyCallbacks { |
| 322 | |
| 323 | XCTestExpectation *playoutExpectation = [self expectationWithDescription:@"NeedMorePlayoutData"]; |
Kári Tristan Helgason | db543c9 | 2018-09-06 13:49:37 +0200 | [diff] [blame] | 324 | __block int num_callbacks = 0; |
Peter Hanspers | 8d95e3b | 2018-05-15 10:22:36 +0200 | [diff] [blame] | 325 | mock.expectNeedMorePlayData(^int32_t(const size_t nSamples, |
| 326 | const size_t nBytesPerSample, |
| 327 | const size_t nChannels, |
| 328 | const uint32_t samplesPerSec, |
| 329 | void *audioSamples, |
| 330 | size_t &nSamplesOut, |
| 331 | int64_t *elapsed_time_ms, |
| 332 | int64_t *ntp_time_ms) { |
| 333 | nSamplesOut = nSamples; |
Jiawei Ou | 4aeb35b | 2018-11-09 13:55:45 -0800 | [diff] [blame] | 334 | XCTAssertEqual(nSamples, self.playoutParameters.frames_per_10ms_buffer()); |
Peter Hanspers | 8d95e3b | 2018-05-15 10:22:36 +0200 | [diff] [blame] | 335 | XCTAssertEqual(nBytesPerSample, kBytesPerSample); |
Jiawei Ou | 4aeb35b | 2018-11-09 13:55:45 -0800 | [diff] [blame] | 336 | XCTAssertEqual(nChannels, self.playoutParameters.channels()); |
| 337 | XCTAssertEqual((int)samplesPerSec, self.playoutParameters.sample_rate()); |
Peter Hanspers | 8d95e3b | 2018-05-15 10:22:36 +0200 | [diff] [blame] | 338 | XCTAssertNotEqual((void*)NULL, audioSamples); |
Kári Tristan Helgason | db543c9 | 2018-09-06 13:49:37 +0200 | [diff] [blame] | 339 | if (++num_callbacks == kNumCallbacks) { |
| 340 | [playoutExpectation fulfill]; |
| 341 | } |
Peter Hanspers | 8d95e3b | 2018-05-15 10:22:36 +0200 | [diff] [blame] | 342 | return 0; |
| 343 | }); |
| 344 | |
| 345 | XCTAssertEqual(0, audioDeviceModule->RegisterAudioCallback(&mock)); |
| 346 | |
| 347 | [self startPlayout]; |
| 348 | [self waitForExpectationsWithTimeout:kTestTimeOutInSec handler:nil]; |
| 349 | [self stopPlayout]; |
| 350 | } |
| 351 | |
| 352 | // Start recording and verify that the native audio layer starts feeding real |
| 353 | // audio samples via the RecordedDataIsAvailable callback. |
| 354 | - (void)testStartRecordingVerifyCallbacks { |
| 355 | XCTestExpectation *recordExpectation = |
| 356 | [self expectationWithDescription:@"RecordedDataIsAvailable"]; |
Kári Tristan Helgason | db543c9 | 2018-09-06 13:49:37 +0200 | [diff] [blame] | 357 | __block int num_callbacks = 0; |
Peter Hanspers | 8d95e3b | 2018-05-15 10:22:36 +0200 | [diff] [blame] | 358 | |
| 359 | mock.expectRecordedDataIsAvailable(^(const void* audioSamples, |
| 360 | const size_t nSamples, |
| 361 | const size_t nBytesPerSample, |
| 362 | const size_t nChannels, |
| 363 | const uint32_t samplesPerSec, |
| 364 | const uint32_t totalDelayMS, |
| 365 | const int32_t clockDrift, |
| 366 | const uint32_t currentMicLevel, |
| 367 | const bool keyPressed, |
| 368 | uint32_t& newMicLevel) { |
| 369 | XCTAssertNotEqual((void*)NULL, audioSamples); |
Jiawei Ou | 4aeb35b | 2018-11-09 13:55:45 -0800 | [diff] [blame] | 370 | XCTAssertEqual(nSamples, self.recordParameters.frames_per_10ms_buffer()); |
Peter Hanspers | 8d95e3b | 2018-05-15 10:22:36 +0200 | [diff] [blame] | 371 | XCTAssertEqual(nBytesPerSample, kBytesPerSample); |
Jiawei Ou | 4aeb35b | 2018-11-09 13:55:45 -0800 | [diff] [blame] | 372 | XCTAssertEqual(nChannels, self.recordParameters.channels()); |
| 373 | XCTAssertEqual((int)samplesPerSec, self.recordParameters.sample_rate()); |
Peter Hanspers | 8d95e3b | 2018-05-15 10:22:36 +0200 | [diff] [blame] | 374 | XCTAssertEqual(0, clockDrift); |
| 375 | XCTAssertEqual(0u, currentMicLevel); |
| 376 | XCTAssertFalse(keyPressed); |
Kári Tristan Helgason | db543c9 | 2018-09-06 13:49:37 +0200 | [diff] [blame] | 377 | if (++num_callbacks == kNumCallbacks) { |
| 378 | [recordExpectation fulfill]; |
| 379 | } |
Peter Hanspers | 8d95e3b | 2018-05-15 10:22:36 +0200 | [diff] [blame] | 380 | |
| 381 | return 0; |
| 382 | }); |
| 383 | |
| 384 | XCTAssertEqual(0, audioDeviceModule->RegisterAudioCallback(&mock)); |
| 385 | [self startRecording]; |
| 386 | [self waitForExpectationsWithTimeout:kTestTimeOutInSec handler:nil]; |
| 387 | [self stopRecording]; |
| 388 | } |
| 389 | |
| 390 | // Start playout and recording (full-duplex audio) and verify that audio is |
| 391 | // active in both directions. |
| 392 | - (void)testStartPlayoutAndRecordingVerifyCallbacks { |
| 393 | XCTestExpectation *playoutExpectation = [self expectationWithDescription:@"NeedMorePlayoutData"]; |
| 394 | __block NSUInteger callbackCount = 0; |
| 395 | |
| 396 | XCTestExpectation *recordExpectation = |
| 397 | [self expectationWithDescription:@"RecordedDataIsAvailable"]; |
| 398 | recordExpectation.expectedFulfillmentCount = kNumCallbacks; |
| 399 | |
| 400 | mock.expectNeedMorePlayData(^int32_t(const size_t nSamples, |
| 401 | const size_t nBytesPerSample, |
| 402 | const size_t nChannels, |
| 403 | const uint32_t samplesPerSec, |
| 404 | void *audioSamples, |
| 405 | size_t &nSamplesOut, |
| 406 | int64_t *elapsed_time_ms, |
| 407 | int64_t *ntp_time_ms) { |
| 408 | nSamplesOut = nSamples; |
Jiawei Ou | 4aeb35b | 2018-11-09 13:55:45 -0800 | [diff] [blame] | 409 | XCTAssertEqual(nSamples, self.playoutParameters.frames_per_10ms_buffer()); |
Peter Hanspers | 8d95e3b | 2018-05-15 10:22:36 +0200 | [diff] [blame] | 410 | XCTAssertEqual(nBytesPerSample, kBytesPerSample); |
Jiawei Ou | 4aeb35b | 2018-11-09 13:55:45 -0800 | [diff] [blame] | 411 | XCTAssertEqual(nChannels, self.playoutParameters.channels()); |
| 412 | XCTAssertEqual((int)samplesPerSec, self.playoutParameters.sample_rate()); |
Peter Hanspers | 8d95e3b | 2018-05-15 10:22:36 +0200 | [diff] [blame] | 413 | XCTAssertNotEqual((void*)NULL, audioSamples); |
| 414 | if (callbackCount++ >= kNumCallbacks) { |
| 415 | [playoutExpectation fulfill]; |
| 416 | } |
| 417 | |
| 418 | return 0; |
| 419 | }); |
| 420 | |
| 421 | mock.expectRecordedDataIsAvailable(^(const void* audioSamples, |
| 422 | const size_t nSamples, |
| 423 | const size_t nBytesPerSample, |
| 424 | const size_t nChannels, |
| 425 | const uint32_t samplesPerSec, |
| 426 | const uint32_t totalDelayMS, |
| 427 | const int32_t clockDrift, |
| 428 | const uint32_t currentMicLevel, |
| 429 | const bool keyPressed, |
| 430 | uint32_t& newMicLevel) { |
| 431 | XCTAssertNotEqual((void*)NULL, audioSamples); |
Jiawei Ou | 4aeb35b | 2018-11-09 13:55:45 -0800 | [diff] [blame] | 432 | XCTAssertEqual(nSamples, self.recordParameters.frames_per_10ms_buffer()); |
Peter Hanspers | 8d95e3b | 2018-05-15 10:22:36 +0200 | [diff] [blame] | 433 | XCTAssertEqual(nBytesPerSample, kBytesPerSample); |
Jiawei Ou | 4aeb35b | 2018-11-09 13:55:45 -0800 | [diff] [blame] | 434 | XCTAssertEqual(nChannels, self.recordParameters.channels()); |
| 435 | XCTAssertEqual((int)samplesPerSec, self.recordParameters.sample_rate()); |
Peter Hanspers | 8d95e3b | 2018-05-15 10:22:36 +0200 | [diff] [blame] | 436 | XCTAssertEqual(0, clockDrift); |
| 437 | XCTAssertEqual(0u, currentMicLevel); |
| 438 | XCTAssertFalse(keyPressed); |
| 439 | [recordExpectation fulfill]; |
| 440 | |
| 441 | return 0; |
| 442 | }); |
| 443 | |
| 444 | XCTAssertEqual(0, audioDeviceModule->RegisterAudioCallback(&mock)); |
| 445 | [self startPlayout]; |
| 446 | [self startRecording]; |
| 447 | [self waitForExpectationsWithTimeout:kTestTimeOutInSec handler:nil]; |
| 448 | [self stopRecording]; |
| 449 | [self stopPlayout]; |
| 450 | } |
| 451 | |
| 452 | // Start playout and read audio from an external PCM file when the audio layer |
| 453 | // asks for data to play out. Real audio is played out in this test but it does |
| 454 | // not contain any explicit verification that the audio quality is perfect. |
| 455 | - (void)testRunPlayoutWithFileAsSource { |
| 456 | XCTAssertEqual(1u, playoutParameters.channels()); |
| 457 | |
| 458 | // Using XCTestExpectation to count callbacks is very slow. |
| 459 | XCTestExpectation *playoutExpectation = [self expectationWithDescription:@"NeedMorePlayoutData"]; |
| 460 | const int expectedCallbackCount = kFilePlayTimeInSec * kNumCallbacksPerSecond; |
| 461 | __block int callbackCount = 0; |
| 462 | |
| 463 | NSURL *fileURL = [self fileURLForSampleRate:playoutParameters.sample_rate()]; |
| 464 | NSInputStream *inputStream = [[NSInputStream alloc] initWithURL:fileURL]; |
| 465 | |
| 466 | mock.expectNeedMorePlayData(^int32_t(const size_t nSamples, |
| 467 | const size_t nBytesPerSample, |
| 468 | const size_t nChannels, |
| 469 | const uint32_t samplesPerSec, |
| 470 | void *audioSamples, |
| 471 | size_t &nSamplesOut, |
| 472 | int64_t *elapsed_time_ms, |
| 473 | int64_t *ntp_time_ms) { |
| 474 | [inputStream read:(uint8_t *)audioSamples maxLength:nSamples*nBytesPerSample*nChannels]; |
| 475 | nSamplesOut = nSamples; |
| 476 | if (callbackCount++ == expectedCallbackCount) { |
| 477 | [playoutExpectation fulfill]; |
| 478 | } |
| 479 | |
| 480 | return 0; |
| 481 | }); |
| 482 | |
| 483 | XCTAssertEqual(0, audioDeviceModule->RegisterAudioCallback(&mock)); |
| 484 | [self startPlayout]; |
| 485 | NSTimeInterval waitTimeout = kFilePlayTimeInSec * 2.0; |
| 486 | [self waitForExpectationsWithTimeout:waitTimeout handler:nil]; |
| 487 | [self stopPlayout]; |
| 488 | } |
| 489 | |
| 490 | - (void)testDevices { |
| 491 | // Device enumeration is not supported. Verify fixed values only. |
| 492 | XCTAssertEqual(1, audioDeviceModule->PlayoutDevices()); |
| 493 | XCTAssertEqual(1, audioDeviceModule->RecordingDevices()); |
| 494 | } |
| 495 | |
| 496 | // Start playout and recording and store recorded data in an intermediate FIFO |
| 497 | // buffer from which the playout side then reads its samples in the same order |
| 498 | // as they were stored. Under ideal circumstances, a callback sequence would |
| 499 | // look like: ...+-+-+-+-+-+-+-..., where '+' means 'packet recorded' and '-' |
| 500 | // means 'packet played'. Under such conditions, the FIFO would only contain |
| 501 | // one packet on average. However, under more realistic conditions, the size |
| 502 | // of the FIFO will vary more due to an unbalance between the two sides. |
| 503 | // This test tries to verify that the device maintains a balanced callback- |
| 504 | // sequence by running in loopback for ten seconds while measuring the size |
| 505 | // (max and average) of the FIFO. The size of the FIFO is increased by the |
| 506 | // recording side and decreased by the playout side. |
| 507 | // TODO(henrika): tune the final test parameters after running tests on several |
| 508 | // different devices. |
| 509 | - (void)testRunPlayoutAndRecordingInFullDuplex { |
| 510 | XCTAssertEqual(recordParameters.channels(), playoutParameters.channels()); |
| 511 | XCTAssertEqual(recordParameters.sample_rate(), playoutParameters.sample_rate()); |
| 512 | |
| 513 | XCTestExpectation *playoutExpectation = [self expectationWithDescription:@"NeedMorePlayoutData"]; |
| 514 | __block NSUInteger playoutCallbacks = 0; |
| 515 | NSUInteger expectedPlayoutCallbacks = kFullDuplexTimeInSec * kNumCallbacksPerSecond; |
| 516 | |
| 517 | // FIFO queue and measurements |
| 518 | NSMutableArray *fifoBuffer = [NSMutableArray arrayWithCapacity:20]; |
| 519 | __block NSUInteger fifoMaxSize = 0; |
| 520 | __block NSUInteger fifoTotalWrittenElements = 0; |
| 521 | __block NSUInteger fifoWriteCount = 0; |
| 522 | |
| 523 | mock.expectRecordedDataIsAvailable(^(const void* audioSamples, |
| 524 | const size_t nSamples, |
| 525 | const size_t nBytesPerSample, |
| 526 | const size_t nChannels, |
| 527 | const uint32_t samplesPerSec, |
| 528 | const uint32_t totalDelayMS, |
| 529 | const int32_t clockDrift, |
| 530 | const uint32_t currentMicLevel, |
| 531 | const bool keyPressed, |
| 532 | uint32_t& newMicLevel) { |
| 533 | if (fifoWriteCount++ < kNumIgnoreFirstCallbacks) { |
| 534 | return 0; |
| 535 | } |
| 536 | |
| 537 | NSData *data = [NSData dataWithBytes:audioSamples length:nSamples*nBytesPerSample*nChannels]; |
| 538 | @synchronized(fifoBuffer) { |
| 539 | [fifoBuffer addObject:data]; |
| 540 | fifoMaxSize = MAX(fifoMaxSize, fifoBuffer.count); |
| 541 | fifoTotalWrittenElements += fifoBuffer.count; |
| 542 | } |
| 543 | |
| 544 | return 0; |
| 545 | }); |
| 546 | |
| 547 | mock.expectNeedMorePlayData(^int32_t(const size_t nSamples, |
| 548 | const size_t nBytesPerSample, |
| 549 | const size_t nChannels, |
| 550 | const uint32_t samplesPerSec, |
| 551 | void *audioSamples, |
| 552 | size_t &nSamplesOut, |
| 553 | int64_t *elapsed_time_ms, |
| 554 | int64_t *ntp_time_ms) { |
| 555 | nSamplesOut = nSamples; |
| 556 | NSData *data; |
| 557 | @synchronized(fifoBuffer) { |
| 558 | data = fifoBuffer.firstObject; |
| 559 | if (data) { |
| 560 | [fifoBuffer removeObjectAtIndex:0]; |
| 561 | } |
| 562 | } |
| 563 | |
| 564 | if (data) { |
| 565 | memcpy(audioSamples, (char*) data.bytes, data.length); |
| 566 | } else { |
| 567 | memset(audioSamples, 0, nSamples*nBytesPerSample*nChannels); |
| 568 | } |
| 569 | |
| 570 | if (playoutCallbacks++ == expectedPlayoutCallbacks) { |
| 571 | [playoutExpectation fulfill]; |
| 572 | } |
| 573 | return 0; |
| 574 | }); |
| 575 | |
| 576 | XCTAssertEqual(0, audioDeviceModule->RegisterAudioCallback(&mock)); |
| 577 | [self startRecording]; |
| 578 | [self startPlayout]; |
| 579 | NSTimeInterval waitTimeout = kFullDuplexTimeInSec * 2.0; |
| 580 | [self waitForExpectationsWithTimeout:waitTimeout handler:nil]; |
| 581 | |
| 582 | size_t fifoAverageSize = |
| 583 | (fifoTotalWrittenElements == 0) |
| 584 | ? 0.0 |
| 585 | : 0.5 + (double)fifoTotalWrittenElements / (fifoWriteCount - kNumIgnoreFirstCallbacks); |
| 586 | |
| 587 | [self stopPlayout]; |
| 588 | [self stopRecording]; |
| 589 | XCTAssertLessThan(fifoAverageSize, 10u); |
| 590 | XCTAssertLessThan(fifoMaxSize, 20u); |
| 591 | } |
| 592 | |
| 593 | @end |