blob: c297aaa636c3ee2678de27d9484bf71d1efb62e3 [file] [log] [blame]
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +00001/*
2 * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
henrikaee369e42015-05-25 10:11:27 +020011#include <algorithm>
12#include <limits>
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000013#include <list>
kwibergf01633e2016-02-24 05:00:36 -080014#include <memory>
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000015#include <numeric>
henrikaee369e42015-05-25 10:11:27 +020016#include <string>
17#include <vector>
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000018
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020019#include "modules/audio_device/android/audio_common.h"
20#include "modules/audio_device/android/audio_manager.h"
21#include "modules/audio_device/android/build_info.h"
22#include "modules/audio_device/android/ensure_initialized.h"
23#include "modules/audio_device/audio_device_impl.h"
24#include "modules/audio_device/include/audio_device.h"
25#include "modules/audio_device/include/mock_audio_transport.h"
26#include "rtc_base/arraysize.h"
27#include "rtc_base/criticalsection.h"
28#include "rtc_base/format_macros.h"
29#include "rtc_base/scoped_ref_ptr.h"
30#include "rtc_base/timeutils.h"
31#include "system_wrappers/include/event_wrapper.h"
32#include "test/gmock.h"
33#include "test/gtest.h"
34#include "test/testsupport/fileutils.h"
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +000035
36using std::cout;
37using std::endl;
38using ::testing::_;
39using ::testing::AtLeast;
40using ::testing::Gt;
41using ::testing::Invoke;
42using ::testing::NiceMock;
43using ::testing::NotNull;
44using ::testing::Return;
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +000045
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000046// #define ENABLE_DEBUG_PRINTF
47#ifdef ENABLE_DEBUG_PRINTF
48#define PRINTD(...) fprintf(stderr, __VA_ARGS__);
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +000049#else
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000050#define PRINTD(...) ((void)0)
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +000051#endif
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000052#define PRINT(...) fprintf(stderr, __VA_ARGS__);
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +000053
54namespace webrtc {
55
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +000056// Number of callbacks (input or output) the tests waits for before we set
57// an event indicating that the test was OK.
Peter Kastingdce40cf2015-08-24 14:52:23 -070058static const size_t kNumCallbacks = 10;
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +000059// Max amount of time we wait for an event to be set while counting callbacks.
60static const int kTestTimeOutInMilliseconds = 10 * 1000;
61// Average number of audio callbacks per second assuming 10ms packet size.
Peter Kastingdce40cf2015-08-24 14:52:23 -070062static const size_t kNumCallbacksPerSecond = 100;
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +000063// Play out a test file during this time (unit is in seconds).
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000064static const int kFilePlayTimeInSec = 5;
Peter Kastingdce40cf2015-08-24 14:52:23 -070065static const size_t kBitsPerSample = 16;
66static const size_t kBytesPerSample = kBitsPerSample / 8;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000067// Run the full-duplex test during this time (unit is in seconds).
68// Note that first |kNumIgnoreFirstCallbacks| are ignored.
henrika8324b522015-03-27 10:56:23 +010069static const int kFullDuplexTimeInSec = 5;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000070// Wait for the callback sequence to stabilize by ignoring this amount of the
71// initial callbacks (avoids initial FIFO access).
72// Only used in the RunPlayoutAndRecordingInFullDuplex test.
Peter Kastingdce40cf2015-08-24 14:52:23 -070073static const size_t kNumIgnoreFirstCallbacks = 50;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000074// Sets the number of impulses per second in the latency test.
75static const int kImpulseFrequencyInHz = 1;
76// Length of round-trip latency measurements. Number of transmitted impulses
77// is kImpulseFrequencyInHz * kMeasureLatencyTimeInSec - 1.
78static const int kMeasureLatencyTimeInSec = 11;
79// Utilized in round-trip latency measurements to avoid capturing noise samples.
henrikab2619892015-05-18 16:49:16 +020080static const int kImpulseThreshold = 1000;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000081static const char kTag[] = "[..........] ";
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +000082
83enum TransportType {
84 kPlayout = 0x1,
85 kRecording = 0x2,
86};
87
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000088// Interface for processing the audio stream. Real implementations can e.g.
89// run audio in loopback, read audio from a file or perform latency
90// measurements.
91class AudioStreamInterface {
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +000092 public:
Peter Kastingdce40cf2015-08-24 14:52:23 -070093 virtual void Write(const void* source, size_t num_frames) = 0;
94 virtual void Read(void* destination, size_t num_frames) = 0;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000095 protected:
96 virtual ~AudioStreamInterface() {}
97};
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +000098
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000099// Reads audio samples from a PCM file where the file is stored in memory at
100// construction.
101class FileAudioStream : public AudioStreamInterface {
102 public:
103 FileAudioStream(
Peter Kastingdce40cf2015-08-24 14:52:23 -0700104 size_t num_callbacks, const std::string& file_name, int sample_rate)
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000105 : file_size_in_bytes_(0),
106 sample_rate_(sample_rate),
107 file_pos_(0) {
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000108 file_size_in_bytes_ = test::GetFileSize(file_name);
109 sample_rate_ = sample_rate;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000110 EXPECT_GE(file_size_in_callbacks(), num_callbacks)
henrika@webrtc.org74d47922015-03-10 11:59:03 +0000111 << "Size of test file is not large enough to last during the test.";
Peter Kastingdce40cf2015-08-24 14:52:23 -0700112 const size_t num_16bit_samples =
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000113 test::GetFileSize(file_name) / kBytesPerSample;
114 file_.reset(new int16_t[num_16bit_samples]);
115 FILE* audio_file = fopen(file_name.c_str(), "rb");
116 EXPECT_NE(audio_file, nullptr);
Peter Kastingdce40cf2015-08-24 14:52:23 -0700117 size_t num_samples_read = fread(
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000118 file_.get(), sizeof(int16_t), num_16bit_samples, audio_file);
119 EXPECT_EQ(num_samples_read, num_16bit_samples);
120 fclose(audio_file);
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000121 }
122
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000123 // AudioStreamInterface::Write() is not implemented.
Peter Kastingdce40cf2015-08-24 14:52:23 -0700124 void Write(const void* source, size_t num_frames) override {}
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000125
126 // Read samples from file stored in memory (at construction) and copy
127 // |num_frames| (<=> 10ms) to the |destination| byte buffer.
Peter Kastingdce40cf2015-08-24 14:52:23 -0700128 void Read(void* destination, size_t num_frames) override {
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000129 memcpy(destination,
130 static_cast<int16_t*> (&file_[file_pos_]),
131 num_frames * sizeof(int16_t));
132 file_pos_ += num_frames;
133 }
134
135 int file_size_in_seconds() const {
Peter Kastingdce40cf2015-08-24 14:52:23 -0700136 return static_cast<int>(
137 file_size_in_bytes_ / (kBytesPerSample * sample_rate_));
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000138 }
Peter Kastingdce40cf2015-08-24 14:52:23 -0700139 size_t file_size_in_callbacks() const {
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000140 return file_size_in_seconds() * kNumCallbacksPerSecond;
141 }
142
143 private:
Peter Kastingdce40cf2015-08-24 14:52:23 -0700144 size_t file_size_in_bytes_;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000145 int sample_rate_;
kwibergf01633e2016-02-24 05:00:36 -0800146 std::unique_ptr<int16_t[]> file_;
Peter Kastingdce40cf2015-08-24 14:52:23 -0700147 size_t file_pos_;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000148};
149
150// Simple first in first out (FIFO) class that wraps a list of 16-bit audio
151// buffers of fixed size and allows Write and Read operations. The idea is to
152// store recorded audio buffers (using Write) and then read (using Read) these
153// stored buffers with as short delay as possible when the audio layer needs
154// data to play out. The number of buffers in the FIFO will stabilize under
155// normal conditions since there will be a balance between Write and Read calls.
156// The container is a std::list container and access is protected with a lock
157// since both sides (playout and recording) are driven by its own thread.
158class FifoAudioStream : public AudioStreamInterface {
159 public:
Peter Kastingdce40cf2015-08-24 14:52:23 -0700160 explicit FifoAudioStream(size_t frames_per_buffer)
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000161 : frames_per_buffer_(frames_per_buffer),
162 bytes_per_buffer_(frames_per_buffer_ * sizeof(int16_t)),
163 fifo_(new AudioBufferList),
164 largest_size_(0),
165 total_written_elements_(0),
166 write_count_(0) {
167 EXPECT_NE(fifo_.get(), nullptr);
168 }
169
170 ~FifoAudioStream() {
171 Flush();
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000172 }
173
174 // Allocate new memory, copy |num_frames| samples from |source| into memory
175 // and add pointer to the memory location to end of the list.
176 // Increases the size of the FIFO by one element.
Peter Kastingdce40cf2015-08-24 14:52:23 -0700177 void Write(const void* source, size_t num_frames) override {
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000178 ASSERT_EQ(num_frames, frames_per_buffer_);
179 PRINTD("+");
180 if (write_count_++ < kNumIgnoreFirstCallbacks) {
181 return;
182 }
183 int16_t* memory = new int16_t[frames_per_buffer_];
184 memcpy(static_cast<int16_t*> (&memory[0]),
185 source,
186 bytes_per_buffer_);
187 rtc::CritScope lock(&lock_);
188 fifo_->push_back(memory);
Peter Kastingdce40cf2015-08-24 14:52:23 -0700189 const size_t size = fifo_->size();
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000190 if (size > largest_size_) {
191 largest_size_ = size;
Peter Kastingdce40cf2015-08-24 14:52:23 -0700192 PRINTD("(%" PRIuS ")", largest_size_);
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000193 }
194 total_written_elements_ += size;
195 }
196
197 // Read pointer to data buffer from front of list, copy |num_frames| of stored
198 // data into |destination| and delete the utilized memory allocation.
199 // Decreases the size of the FIFO by one element.
Peter Kastingdce40cf2015-08-24 14:52:23 -0700200 void Read(void* destination, size_t num_frames) override {
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000201 ASSERT_EQ(num_frames, frames_per_buffer_);
202 PRINTD("-");
203 rtc::CritScope lock(&lock_);
204 if (fifo_->empty()) {
205 memset(destination, 0, bytes_per_buffer_);
206 } else {
207 int16_t* memory = fifo_->front();
208 fifo_->pop_front();
209 memcpy(destination,
210 static_cast<int16_t*> (&memory[0]),
211 bytes_per_buffer_);
212 delete memory;
213 }
214 }
215
Peter Kastingdce40cf2015-08-24 14:52:23 -0700216 size_t size() const {
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000217 return fifo_->size();
218 }
219
Peter Kastingdce40cf2015-08-24 14:52:23 -0700220 size_t largest_size() const {
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000221 return largest_size_;
222 }
223
Peter Kastingdce40cf2015-08-24 14:52:23 -0700224 size_t average_size() const {
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000225 return (total_written_elements_ == 0) ? 0.0 : 0.5 + static_cast<float> (
226 total_written_elements_) / (write_count_ - kNumIgnoreFirstCallbacks);
227 }
228
229 private:
230 void Flush() {
231 for (auto it = fifo_->begin(); it != fifo_->end(); ++it) {
232 delete *it;
233 }
234 fifo_->clear();
235 }
236
237 using AudioBufferList = std::list<int16_t*>;
238 rtc::CriticalSection lock_;
Peter Kastingdce40cf2015-08-24 14:52:23 -0700239 const size_t frames_per_buffer_;
240 const size_t bytes_per_buffer_;
kwibergf01633e2016-02-24 05:00:36 -0800241 std::unique_ptr<AudioBufferList> fifo_;
Peter Kastingdce40cf2015-08-24 14:52:23 -0700242 size_t largest_size_;
243 size_t total_written_elements_;
244 size_t write_count_;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000245};
246
247// Inserts periodic impulses and measures the latency between the time of
248// transmission and time of receiving the same impulse.
249// Usage requires a special hardware called Audio Loopback Dongle.
250// See http://source.android.com/devices/audio/loopback.html for details.
251class LatencyMeasuringAudioStream : public AudioStreamInterface {
252 public:
Peter Kastingdce40cf2015-08-24 14:52:23 -0700253 explicit LatencyMeasuringAudioStream(size_t frames_per_buffer)
henrika92fd8e62016-11-15 05:37:58 -0800254 : frames_per_buffer_(frames_per_buffer),
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000255 bytes_per_buffer_(frames_per_buffer_ * sizeof(int16_t)),
256 play_count_(0),
257 rec_count_(0),
258 pulse_time_(0) {
259 }
260
261 // Insert periodic impulses in first two samples of |destination|.
Peter Kastingdce40cf2015-08-24 14:52:23 -0700262 void Read(void* destination, size_t num_frames) override {
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000263 ASSERT_EQ(num_frames, frames_per_buffer_);
264 if (play_count_ == 0) {
265 PRINT("[");
266 }
267 play_count_++;
268 memset(destination, 0, bytes_per_buffer_);
269 if (play_count_ % (kNumCallbacksPerSecond / kImpulseFrequencyInHz) == 0) {
270 if (pulse_time_ == 0) {
henrika92fd8e62016-11-15 05:37:58 -0800271 pulse_time_ = rtc::TimeMillis();
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000272 }
273 PRINT(".");
274 const int16_t impulse = std::numeric_limits<int16_t>::max();
275 int16_t* ptr16 = static_cast<int16_t*> (destination);
Peter Kastingdce40cf2015-08-24 14:52:23 -0700276 for (size_t i = 0; i < 2; ++i) {
277 ptr16[i] = impulse;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000278 }
279 }
280 }
281
282 // Detect received impulses in |source|, derive time between transmission and
283 // detection and add the calculated delay to list of latencies.
Peter Kastingdce40cf2015-08-24 14:52:23 -0700284 void Write(const void* source, size_t num_frames) override {
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000285 ASSERT_EQ(num_frames, frames_per_buffer_);
286 rec_count_++;
287 if (pulse_time_ == 0) {
288 // Avoid detection of new impulse response until a new impulse has
289 // been transmitted (sets |pulse_time_| to value larger than zero).
290 return;
291 }
292 const int16_t* ptr16 = static_cast<const int16_t*> (source);
293 std::vector<int16_t> vec(ptr16, ptr16 + num_frames);
294 // Find max value in the audio buffer.
295 int max = *std::max_element(vec.begin(), vec.end());
296 // Find index (element position in vector) of the max element.
297 int index_of_max = std::distance(vec.begin(),
298 std::find(vec.begin(), vec.end(),
299 max));
300 if (max > kImpulseThreshold) {
301 PRINTD("(%d,%d)", max, index_of_max);
henrika92fd8e62016-11-15 05:37:58 -0800302 int64_t now_time = rtc::TimeMillis();
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000303 int extra_delay = IndexToMilliseconds(static_cast<double> (index_of_max));
304 PRINTD("[%d]", static_cast<int> (now_time - pulse_time_));
305 PRINTD("[%d]", extra_delay);
306 // Total latency is the difference between transmit time and detection
307 // tome plus the extra delay within the buffer in which we detected the
308 // received impulse. It is transmitted at sample 0 but can be received
309 // at sample N where N > 0. The term |extra_delay| accounts for N and it
310 // is a value between 0 and 10ms.
311 latencies_.push_back(now_time - pulse_time_ + extra_delay);
312 pulse_time_ = 0;
313 } else {
314 PRINTD("-");
315 }
316 }
317
Peter Kastingdce40cf2015-08-24 14:52:23 -0700318 size_t num_latency_values() const {
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000319 return latencies_.size();
320 }
321
322 int min_latency() const {
323 if (latencies_.empty())
324 return 0;
325 return *std::min_element(latencies_.begin(), latencies_.end());
326 }
327
328 int max_latency() const {
329 if (latencies_.empty())
330 return 0;
331 return *std::max_element(latencies_.begin(), latencies_.end());
332 }
333
334 int average_latency() const {
335 if (latencies_.empty())
336 return 0;
337 return 0.5 + static_cast<double> (
338 std::accumulate(latencies_.begin(), latencies_.end(), 0)) /
339 latencies_.size();
340 }
341
342 void PrintResults() const {
343 PRINT("] ");
344 for (auto it = latencies_.begin(); it != latencies_.end(); ++it) {
345 PRINT("%d ", *it);
346 }
347 PRINT("\n");
348 PRINT("%s[min, max, avg]=[%d, %d, %d] ms\n", kTag,
349 min_latency(), max_latency(), average_latency());
350 }
351
352 int IndexToMilliseconds(double index) const {
pkastingb297c5a2015-07-22 15:17:22 -0700353 return static_cast<int>(10.0 * (index / frames_per_buffer_) + 0.5);
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000354 }
355
356 private:
Peter Kastingdce40cf2015-08-24 14:52:23 -0700357 const size_t frames_per_buffer_;
358 const size_t bytes_per_buffer_;
359 size_t play_count_;
360 size_t rec_count_;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000361 int64_t pulse_time_;
362 std::vector<int> latencies_;
363};
364
365// Mocks the AudioTransport object and proxies actions for the two callbacks
366// (RecordedDataIsAvailable and NeedMorePlayData) to different implementations
367// of AudioStreamInterface.
aleloi5de52fd2016-11-10 01:05:34 -0800368class MockAudioTransportAndroid : public test::MockAudioTransport {
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000369 public:
aleloi5de52fd2016-11-10 01:05:34 -0800370 explicit MockAudioTransportAndroid(int type)
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000371 : num_callbacks_(0),
372 type_(type),
373 play_count_(0),
374 rec_count_(0),
375 audio_stream_(nullptr) {}
376
aleloi5de52fd2016-11-10 01:05:34 -0800377 virtual ~MockAudioTransportAndroid() {}
maxmorin1aee0b52016-08-15 11:46:19 -0700378
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000379 // Set default actions of the mock object. We are delegating to fake
380 // implementations (of AudioStreamInterface) here.
381 void HandleCallbacks(EventWrapper* test_is_done,
382 AudioStreamInterface* audio_stream,
383 int num_callbacks) {
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000384 test_is_done_ = test_is_done;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000385 audio_stream_ = audio_stream;
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000386 num_callbacks_ = num_callbacks;
387 if (play_mode()) {
388 ON_CALL(*this, NeedMorePlayData(_, _, _, _, _, _, _, _))
389 .WillByDefault(
aleloi5de52fd2016-11-10 01:05:34 -0800390 Invoke(this, &MockAudioTransportAndroid::RealNeedMorePlayData));
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000391 }
392 if (rec_mode()) {
393 ON_CALL(*this, RecordedDataIsAvailable(_, _, _, _, _, _, _, _, _, _))
aleloi5de52fd2016-11-10 01:05:34 -0800394 .WillByDefault(Invoke(
395 this, &MockAudioTransportAndroid::RealRecordedDataIsAvailable));
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000396 }
397 }
398
399 int32_t RealRecordedDataIsAvailable(const void* audioSamples,
Peter Kastingdce40cf2015-08-24 14:52:23 -0700400 const size_t nSamples,
401 const size_t nBytesPerSample,
Peter Kasting69558702016-01-12 16:26:35 -0800402 const size_t nChannels,
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000403 const uint32_t samplesPerSec,
404 const uint32_t totalDelayMS,
405 const int32_t clockDrift,
406 const uint32_t currentMicLevel,
407 const bool keyPressed,
henrika883d00f2018-03-16 10:09:49 +0100408 uint32_t& newMicLevel) { // NOLINT
henrika@webrtc.org74d47922015-03-10 11:59:03 +0000409 EXPECT_TRUE(rec_mode()) << "No test is expecting these callbacks.";
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000410 rec_count_++;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000411 // Process the recorded audio stream if an AudioStreamInterface
412 // implementation exists.
413 if (audio_stream_) {
414 audio_stream_->Write(audioSamples, nSamples);
415 }
416 if (ReceivedEnoughCallbacks()) {
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000417 test_is_done_->Set();
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000418 }
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000419 return 0;
420 }
421
Peter Kastingdce40cf2015-08-24 14:52:23 -0700422 int32_t RealNeedMorePlayData(const size_t nSamples,
423 const size_t nBytesPerSample,
Peter Kasting69558702016-01-12 16:26:35 -0800424 const size_t nChannels,
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000425 const uint32_t samplesPerSec,
426 void* audioSamples,
henrika883d00f2018-03-16 10:09:49 +0100427 size_t& nSamplesOut, // NOLINT
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000428 int64_t* elapsed_time_ms,
429 int64_t* ntp_time_ms) {
henrika@webrtc.org74d47922015-03-10 11:59:03 +0000430 EXPECT_TRUE(play_mode()) << "No test is expecting these callbacks.";
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000431 play_count_++;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000432 nSamplesOut = nSamples;
433 // Read (possibly processed) audio stream samples to be played out if an
434 // AudioStreamInterface implementation exists.
435 if (audio_stream_) {
436 audio_stream_->Read(audioSamples, nSamples);
437 }
438 if (ReceivedEnoughCallbacks()) {
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000439 test_is_done_->Set();
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000440 }
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000441 return 0;
442 }
443
444 bool ReceivedEnoughCallbacks() {
445 bool recording_done = false;
446 if (rec_mode())
447 recording_done = rec_count_ >= num_callbacks_;
448 else
449 recording_done = true;
450
451 bool playout_done = false;
452 if (play_mode())
453 playout_done = play_count_ >= num_callbacks_;
454 else
455 playout_done = true;
456
457 return recording_done && playout_done;
458 }
459
460 bool play_mode() const { return type_ & kPlayout; }
461 bool rec_mode() const { return type_ & kRecording; }
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000462
463 private:
464 EventWrapper* test_is_done_;
Peter Kastingdce40cf2015-08-24 14:52:23 -0700465 size_t num_callbacks_;
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000466 int type_;
Peter Kastingdce40cf2015-08-24 14:52:23 -0700467 size_t play_count_;
468 size_t rec_count_;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000469 AudioStreamInterface* audio_stream_;
kwibergf01633e2016-02-24 05:00:36 -0800470 std::unique_ptr<LatencyMeasuringAudioStream> latency_audio_stream_;
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000471};
472
henrikab2619892015-05-18 16:49:16 +0200473// AudioDeviceTest test fixture.
474class AudioDeviceTest : public ::testing::Test {
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000475 protected:
476 AudioDeviceTest()
477 : test_is_done_(EventWrapper::Create()) {
478 // One-time initialization of JVM and application context. Ensures that we
479 // can do calls between C++ and Java. Initializes both Java and OpenSL ES
480 // implementations.
481 webrtc::audiodevicemodule::EnsureInitialized();
henrikab2619892015-05-18 16:49:16 +0200482 // Creates an audio device using a default audio layer.
483 audio_device_ = CreateAudioDevice(AudioDeviceModule::kPlatformDefaultAudio);
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000484 EXPECT_NE(audio_device_.get(), nullptr);
485 EXPECT_EQ(0, audio_device_->Init());
henrikab2619892015-05-18 16:49:16 +0200486 playout_parameters_ = audio_manager()->GetPlayoutAudioParameters();
487 record_parameters_ = audio_manager()->GetRecordAudioParameters();
henrika523183b2015-05-21 13:43:08 +0200488 build_info_.reset(new BuildInfo());
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000489 }
490 virtual ~AudioDeviceTest() {
491 EXPECT_EQ(0, audio_device_->Terminate());
492 }
493
494 int playout_sample_rate() const {
henrikab2619892015-05-18 16:49:16 +0200495 return playout_parameters_.sample_rate();
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000496 }
henrikab2619892015-05-18 16:49:16 +0200497 int record_sample_rate() const {
498 return record_parameters_.sample_rate();
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000499 }
Peter Kasting69558702016-01-12 16:26:35 -0800500 size_t playout_channels() const {
henrikab2619892015-05-18 16:49:16 +0200501 return playout_parameters_.channels();
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000502 }
Peter Kasting69558702016-01-12 16:26:35 -0800503 size_t record_channels() const {
henrikab2619892015-05-18 16:49:16 +0200504 return record_parameters_.channels();
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000505 }
Peter Kastingdce40cf2015-08-24 14:52:23 -0700506 size_t playout_frames_per_10ms_buffer() const {
henrikab2619892015-05-18 16:49:16 +0200507 return playout_parameters_.frames_per_10ms_buffer();
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000508 }
Peter Kastingdce40cf2015-08-24 14:52:23 -0700509 size_t record_frames_per_10ms_buffer() const {
henrikab2619892015-05-18 16:49:16 +0200510 return record_parameters_.frames_per_10ms_buffer();
511 }
512
513 int total_delay_ms() const {
514 return audio_manager()->GetDelayEstimateInMilliseconds();
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000515 }
516
Peter Boström26b08602015-06-04 15:18:17 +0200517 rtc::scoped_refptr<AudioDeviceModule> audio_device() const {
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000518 return audio_device_;
519 }
520
henrikab2619892015-05-18 16:49:16 +0200521 AudioDeviceModuleImpl* audio_device_impl() const {
522 return static_cast<AudioDeviceModuleImpl*>(audio_device_.get());
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000523 }
524
henrikab2619892015-05-18 16:49:16 +0200525 AudioManager* audio_manager() const {
526 return audio_device_impl()->GetAndroidAudioManagerForTest();
527 }
528
529 AudioManager* GetAudioManager(AudioDeviceModule* adm) const {
530 return static_cast<AudioDeviceModuleImpl*>(adm)->
531 GetAndroidAudioManagerForTest();
532 }
533
534 AudioDeviceBuffer* audio_device_buffer() const {
535 return audio_device_impl()->GetAudioDeviceBuffer();
536 }
537
Peter Boström26b08602015-06-04 15:18:17 +0200538 rtc::scoped_refptr<AudioDeviceModule> CreateAudioDevice(
henrikab2619892015-05-18 16:49:16 +0200539 AudioDeviceModule::AudioLayer audio_layer) {
Peter Boström26b08602015-06-04 15:18:17 +0200540 rtc::scoped_refptr<AudioDeviceModule> module(
Peter Boström4adbbcf2016-05-03 15:51:26 -0400541 AudioDeviceModule::Create(0, audio_layer));
henrikab2619892015-05-18 16:49:16 +0200542 return module;
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000543 }
544
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000545 // Returns file name relative to the resource root given a sample rate.
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000546 std::string GetFileName(int sample_rate) {
547 EXPECT_TRUE(sample_rate == 48000 || sample_rate == 44100);
548 char fname[64];
549 snprintf(fname,
550 sizeof(fname),
551 "audio_device/audio_short%d",
552 sample_rate / 1000);
553 std::string file_name(webrtc::test::ResourcePath(fname, "pcm"));
554 EXPECT_TRUE(test::FileExists(file_name));
555#ifdef ENABLE_PRINTF
556 PRINT("file name: %s\n", file_name.c_str());
Peter Kastingdce40cf2015-08-24 14:52:23 -0700557 const size_t bytes = test::GetFileSize(file_name);
558 PRINT("file size: %" PRIuS " [bytes]\n", bytes);
559 PRINT("file size: %" PRIuS " [samples]\n", bytes / kBytesPerSample);
560 const int seconds =
561 static_cast<int>(bytes / (sample_rate * kBytesPerSample));
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000562 PRINT("file size: %d [secs]\n", seconds);
Peter Kastingdce40cf2015-08-24 14:52:23 -0700563 PRINT("file size: %" PRIuS " [callbacks]\n",
564 seconds * kNumCallbacksPerSecond);
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000565#endif
566 return file_name;
567 }
568
henrikab2619892015-05-18 16:49:16 +0200569 AudioDeviceModule::AudioLayer GetActiveAudioLayer() const {
570 AudioDeviceModule::AudioLayer audio_layer;
571 EXPECT_EQ(0, audio_device()->ActiveAudioLayer(&audio_layer));
572 return audio_layer;
573 }
574
575 int TestDelayOnAudioLayer(
576 const AudioDeviceModule::AudioLayer& layer_to_test) {
Peter Boström26b08602015-06-04 15:18:17 +0200577 rtc::scoped_refptr<AudioDeviceModule> audio_device;
henrikab2619892015-05-18 16:49:16 +0200578 audio_device = CreateAudioDevice(layer_to_test);
579 EXPECT_NE(audio_device.get(), nullptr);
580 AudioManager* audio_manager = GetAudioManager(audio_device.get());
581 EXPECT_NE(audio_manager, nullptr);
582 return audio_manager->GetDelayEstimateInMilliseconds();
583 }
584
585 AudioDeviceModule::AudioLayer TestActiveAudioLayer(
586 const AudioDeviceModule::AudioLayer& layer_to_test) {
Peter Boström26b08602015-06-04 15:18:17 +0200587 rtc::scoped_refptr<AudioDeviceModule> audio_device;
henrikab2619892015-05-18 16:49:16 +0200588 audio_device = CreateAudioDevice(layer_to_test);
589 EXPECT_NE(audio_device.get(), nullptr);
590 AudioDeviceModule::AudioLayer active;
591 EXPECT_EQ(0, audio_device->ActiveAudioLayer(&active));
592 return active;
593 }
594
henrika523183b2015-05-21 13:43:08 +0200595 bool DisableTestForThisDevice(const std::string& model) {
596 return (build_info_->GetDeviceModel() == model);
597 }
598
henrikab2619892015-05-18 16:49:16 +0200599 // Volume control is currently only supported for the Java output audio layer.
600 // For OpenSL ES, the internal stream volume is always on max level and there
601 // is no need for this test to set it to max.
602 bool AudioLayerSupportsVolumeControl() const {
603 return GetActiveAudioLayer() == AudioDeviceModule::kAndroidJavaAudio;
604 }
605
henrika8324b522015-03-27 10:56:23 +0100606 void SetMaxPlayoutVolume() {
henrikab2619892015-05-18 16:49:16 +0200607 if (!AudioLayerSupportsVolumeControl())
608 return;
henrika8324b522015-03-27 10:56:23 +0100609 uint32_t max_volume;
610 EXPECT_EQ(0, audio_device()->MaxSpeakerVolume(&max_volume));
611 EXPECT_EQ(0, audio_device()->SetSpeakerVolume(max_volume));
612 }
613
henrikab2619892015-05-18 16:49:16 +0200614 void DisableBuiltInAECIfAvailable() {
615 if (audio_device()->BuiltInAECIsAvailable()) {
616 EXPECT_EQ(0, audio_device()->EnableBuiltInAEC(false));
617 }
618 }
619
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000620 void StartPlayout() {
621 EXPECT_FALSE(audio_device()->PlayoutIsInitialized());
622 EXPECT_FALSE(audio_device()->Playing());
623 EXPECT_EQ(0, audio_device()->InitPlayout());
624 EXPECT_TRUE(audio_device()->PlayoutIsInitialized());
625 EXPECT_EQ(0, audio_device()->StartPlayout());
626 EXPECT_TRUE(audio_device()->Playing());
627 }
628
629 void StopPlayout() {
630 EXPECT_EQ(0, audio_device()->StopPlayout());
631 EXPECT_FALSE(audio_device()->Playing());
henrikab2619892015-05-18 16:49:16 +0200632 EXPECT_FALSE(audio_device()->PlayoutIsInitialized());
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000633 }
634
635 void StartRecording() {
636 EXPECT_FALSE(audio_device()->RecordingIsInitialized());
637 EXPECT_FALSE(audio_device()->Recording());
638 EXPECT_EQ(0, audio_device()->InitRecording());
639 EXPECT_TRUE(audio_device()->RecordingIsInitialized());
640 EXPECT_EQ(0, audio_device()->StartRecording());
641 EXPECT_TRUE(audio_device()->Recording());
642 }
643
644 void StopRecording() {
645 EXPECT_EQ(0, audio_device()->StopRecording());
646 EXPECT_FALSE(audio_device()->Recording());
647 }
648
henrika8324b522015-03-27 10:56:23 +0100649 int GetMaxSpeakerVolume() const {
650 uint32_t max_volume(0);
651 EXPECT_EQ(0, audio_device()->MaxSpeakerVolume(&max_volume));
652 return max_volume;
653 }
654
655 int GetMinSpeakerVolume() const {
656 uint32_t min_volume(0);
657 EXPECT_EQ(0, audio_device()->MinSpeakerVolume(&min_volume));
658 return min_volume;
659 }
660
661 int GetSpeakerVolume() const {
662 uint32_t volume(0);
663 EXPECT_EQ(0, audio_device()->SpeakerVolume(&volume));
664 return volume;
665 }
666
kwibergf01633e2016-02-24 05:00:36 -0800667 std::unique_ptr<EventWrapper> test_is_done_;
Peter Boström26b08602015-06-04 15:18:17 +0200668 rtc::scoped_refptr<AudioDeviceModule> audio_device_;
henrikab2619892015-05-18 16:49:16 +0200669 AudioParameters playout_parameters_;
670 AudioParameters record_parameters_;
kwibergf01633e2016-02-24 05:00:36 -0800671 std::unique_ptr<BuildInfo> build_info_;
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000672};
673
henrikab2619892015-05-18 16:49:16 +0200674TEST_F(AudioDeviceTest, ConstructDestruct) {
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000675 // Using the test fixture to create and destruct the audio device module.
676}
677
henrikab2619892015-05-18 16:49:16 +0200678// We always ask for a default audio layer when the ADM is constructed. But the
679// ADM will then internally set the best suitable combination of audio layers,
henrika918b5542016-09-19 15:44:09 +0200680// for input and output based on if low-latency output and/or input audio in
681// combination with OpenSL ES is supported or not. This test ensures that the
682// correct selection is done.
henrikab2619892015-05-18 16:49:16 +0200683TEST_F(AudioDeviceTest, VerifyDefaultAudioLayer) {
684 const AudioDeviceModule::AudioLayer audio_layer = GetActiveAudioLayer();
685 bool low_latency_output = audio_manager()->IsLowLatencyPlayoutSupported();
henrika918b5542016-09-19 15:44:09 +0200686 bool low_latency_input = audio_manager()->IsLowLatencyRecordSupported();
henrika883d00f2018-03-16 10:09:49 +0100687 bool aaudio = audio_manager()->IsAAudioSupported();
henrika918b5542016-09-19 15:44:09 +0200688 AudioDeviceModule::AudioLayer expected_audio_layer;
henrika883d00f2018-03-16 10:09:49 +0100689 if (aaudio) {
690 expected_audio_layer = AudioDeviceModule::kAndroidAAudioAudio;
691 } else if (low_latency_output && low_latency_input) {
henrika918b5542016-09-19 15:44:09 +0200692 expected_audio_layer = AudioDeviceModule::kAndroidOpenSLESAudio;
693 } else if (low_latency_output && !low_latency_input) {
694 expected_audio_layer =
695 AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio;
696 } else {
697 expected_audio_layer = AudioDeviceModule::kAndroidJavaAudio;
698 }
henrikab2619892015-05-18 16:49:16 +0200699 EXPECT_EQ(expected_audio_layer, audio_layer);
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000700}
701
henrikab2619892015-05-18 16:49:16 +0200702// Verify that it is possible to explicitly create the two types of supported
703// ADMs. These two tests overrides the default selection of native audio layer
704// by ignoring if the device supports low-latency output or not.
705TEST_F(AudioDeviceTest, CorrectAudioLayerIsUsedForCombinedJavaOpenSLCombo) {
706 AudioDeviceModule::AudioLayer expected_layer =
707 AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio;
708 AudioDeviceModule::AudioLayer active_layer = TestActiveAudioLayer(
709 expected_layer);
710 EXPECT_EQ(expected_layer, active_layer);
711}
712
713TEST_F(AudioDeviceTest, CorrectAudioLayerIsUsedForJavaInBothDirections) {
714 AudioDeviceModule::AudioLayer expected_layer =
715 AudioDeviceModule::kAndroidJavaAudio;
716 AudioDeviceModule::AudioLayer active_layer = TestActiveAudioLayer(
717 expected_layer);
718 EXPECT_EQ(expected_layer, active_layer);
719}
720
henrika918b5542016-09-19 15:44:09 +0200721TEST_F(AudioDeviceTest, CorrectAudioLayerIsUsedForOpenSLInBothDirections) {
722 AudioDeviceModule::AudioLayer expected_layer =
723 AudioDeviceModule::kAndroidOpenSLESAudio;
724 AudioDeviceModule::AudioLayer active_layer =
725 TestActiveAudioLayer(expected_layer);
726 EXPECT_EQ(expected_layer, active_layer);
727}
728
henrika883d00f2018-03-16 10:09:49 +0100729// TODO(bugs.webrtc.org/8914)
730#if !defined(AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO)
731#define MAYBE_CorrectAudioLayerIsUsedForAAudioInBothDirections \
732 DISABLED_CorrectAudioLayerIsUsedForAAudioInBothDirections
733#else
734#define MAYBE_CorrectAudioLayerIsUsedForAAudioInBothDirections \
735 CorrectAudioLayerIsUsedForAAudioInBothDirections
736#endif
737TEST_F(AudioDeviceTest,
738 MAYBE_CorrectAudioLayerIsUsedForAAudioInBothDirections) {
739 AudioDeviceModule::AudioLayer expected_layer =
740 AudioDeviceModule::kAndroidAAudioAudio;
741 AudioDeviceModule::AudioLayer active_layer =
742 TestActiveAudioLayer(expected_layer);
743 EXPECT_EQ(expected_layer, active_layer);
744}
745
746// TODO(bugs.webrtc.org/8914)
747#if !defined(AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO)
748#define MAYBE_CorrectAudioLayerIsUsedForCombinedJavaAAudioCombo \
749 DISABLED_CorrectAudioLayerIsUsedForCombinedJavaAAudioCombo
750#else
751#define MAYBE_CorrectAudioLayerIsUsedForCombinedJavaAAudioCombo \
752 CorrectAudioLayerIsUsedForCombinedJavaAAudioCombo
753#endif
754TEST_F(AudioDeviceTest,
755 MAYBE_CorrectAudioLayerIsUsedForCombinedJavaAAudioCombo) {
756 AudioDeviceModule::AudioLayer expected_layer =
757 AudioDeviceModule::kAndroidJavaInputAndAAudioOutputAudio;
758 AudioDeviceModule::AudioLayer active_layer =
759 TestActiveAudioLayer(expected_layer);
760 EXPECT_EQ(expected_layer, active_layer);
761}
762
henrikab2619892015-05-18 16:49:16 +0200763// The Android ADM supports two different delay reporting modes. One for the
764// low-latency output path (in combination with OpenSL ES), and one for the
765// high-latency output path (Java backends in both directions). These two tests
766// verifies that the audio manager reports correct delay estimate given the
767// selected audio layer. Note that, this delay estimate will only be utilized
768// if the HW AEC is disabled.
769TEST_F(AudioDeviceTest, UsesCorrectDelayEstimateForHighLatencyOutputPath) {
770 EXPECT_EQ(kHighLatencyModeDelayEstimateInMilliseconds,
771 TestDelayOnAudioLayer(AudioDeviceModule::kAndroidJavaAudio));
772}
773
774TEST_F(AudioDeviceTest, UsesCorrectDelayEstimateForLowLatencyOutputPath) {
775 EXPECT_EQ(kLowLatencyModeDelayEstimateInMilliseconds,
776 TestDelayOnAudioLayer(
777 AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio));
778}
779
780// Ensure that the ADM internal audio device buffer is configured to use the
781// correct set of parameters.
782TEST_F(AudioDeviceTest, VerifyAudioDeviceBufferParameters) {
783 EXPECT_EQ(playout_parameters_.sample_rate(),
784 audio_device_buffer()->PlayoutSampleRate());
785 EXPECT_EQ(record_parameters_.sample_rate(),
786 audio_device_buffer()->RecordingSampleRate());
787 EXPECT_EQ(playout_parameters_.channels(),
788 audio_device_buffer()->PlayoutChannels());
789 EXPECT_EQ(record_parameters_.channels(),
790 audio_device_buffer()->RecordingChannels());
791}
792
793
794TEST_F(AudioDeviceTest, InitTerminate) {
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000795 // Initialization is part of the test fixture.
796 EXPECT_TRUE(audio_device()->Initialized());
797 EXPECT_EQ(0, audio_device()->Terminate());
798 EXPECT_FALSE(audio_device()->Initialized());
799}
800
henrikab2619892015-05-18 16:49:16 +0200801TEST_F(AudioDeviceTest, Devices) {
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000802 // Device enumeration is not supported. Verify fixed values only.
803 EXPECT_EQ(1, audio_device()->PlayoutDevices());
804 EXPECT_EQ(1, audio_device()->RecordingDevices());
805}
806
henrikab2619892015-05-18 16:49:16 +0200807TEST_F(AudioDeviceTest, SpeakerVolumeShouldBeAvailable) {
808 // The OpenSL ES output audio path does not support volume control.
809 if (!AudioLayerSupportsVolumeControl())
810 return;
henrika8324b522015-03-27 10:56:23 +0100811 bool available;
812 EXPECT_EQ(0, audio_device()->SpeakerVolumeIsAvailable(&available));
813 EXPECT_TRUE(available);
814}
815
henrikab2619892015-05-18 16:49:16 +0200816TEST_F(AudioDeviceTest, MaxSpeakerVolumeIsPositive) {
817 // The OpenSL ES output audio path does not support volume control.
818 if (!AudioLayerSupportsVolumeControl())
819 return;
820 StartPlayout();
henrika8324b522015-03-27 10:56:23 +0100821 EXPECT_GT(GetMaxSpeakerVolume(), 0);
henrikab2619892015-05-18 16:49:16 +0200822 StopPlayout();
henrika8324b522015-03-27 10:56:23 +0100823}
824
henrikab2619892015-05-18 16:49:16 +0200825TEST_F(AudioDeviceTest, MinSpeakerVolumeIsZero) {
826 // The OpenSL ES output audio path does not support volume control.
827 if (!AudioLayerSupportsVolumeControl())
828 return;
henrika8324b522015-03-27 10:56:23 +0100829 EXPECT_EQ(GetMinSpeakerVolume(), 0);
830}
831
henrikab2619892015-05-18 16:49:16 +0200832TEST_F(AudioDeviceTest, DefaultSpeakerVolumeIsWithinMinMax) {
833 // The OpenSL ES output audio path does not support volume control.
834 if (!AudioLayerSupportsVolumeControl())
835 return;
henrika8324b522015-03-27 10:56:23 +0100836 const int default_volume = GetSpeakerVolume();
837 EXPECT_GE(default_volume, GetMinSpeakerVolume());
838 EXPECT_LE(default_volume, GetMaxSpeakerVolume());
839}
840
henrikab2619892015-05-18 16:49:16 +0200841TEST_F(AudioDeviceTest, SetSpeakerVolumeActuallySetsVolume) {
842 // The OpenSL ES output audio path does not support volume control.
843 if (!AudioLayerSupportsVolumeControl())
844 return;
henrika8324b522015-03-27 10:56:23 +0100845 const int default_volume = GetSpeakerVolume();
846 const int max_volume = GetMaxSpeakerVolume();
847 EXPECT_EQ(0, audio_device()->SetSpeakerVolume(max_volume));
848 int new_volume = GetSpeakerVolume();
849 EXPECT_EQ(new_volume, max_volume);
850 EXPECT_EQ(0, audio_device()->SetSpeakerVolume(default_volume));
851}
852
henrikab2619892015-05-18 16:49:16 +0200853// Tests that playout can be initiated, started and stopped. No audio callback
854// is registered in this test.
henrika817208b2016-11-23 06:49:44 -0800855TEST_F(AudioDeviceTest, StartStopPlayout) {
henrikab2619892015-05-18 16:49:16 +0200856 StartPlayout();
857 StopPlayout();
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000858 StartPlayout();
859 StopPlayout();
860}
861
henrika82e20552015-09-25 04:26:14 -0700862// Tests that recording can be initiated, started and stopped. No audio callback
863// is registered in this test.
864TEST_F(AudioDeviceTest, StartStopRecording) {
865 StartRecording();
866 StopRecording();
867 StartRecording();
868 StopRecording();
869}
870
henrikab2619892015-05-18 16:49:16 +0200871// Verify that calling StopPlayout() will leave us in an uninitialized state
872// which will require a new call to InitPlayout(). This test does not call
henrikg91d6ede2015-09-17 00:24:34 -0700873// StartPlayout() while being uninitialized since doing so will hit a
henrika918b5542016-09-19 15:44:09 +0200874// RTC_DCHECK and death tests are not supported on Android.
henrikab2619892015-05-18 16:49:16 +0200875TEST_F(AudioDeviceTest, StopPlayoutRequiresInitToRestart) {
876 EXPECT_EQ(0, audio_device()->InitPlayout());
877 EXPECT_EQ(0, audio_device()->StartPlayout());
878 EXPECT_EQ(0, audio_device()->StopPlayout());
879 EXPECT_FALSE(audio_device()->PlayoutIsInitialized());
880}
881
henrika918b5542016-09-19 15:44:09 +0200882// Verify that calling StopRecording() will leave us in an uninitialized state
883// which will require a new call to InitRecording(). This test does not call
884// StartRecording() while being uninitialized since doing so will hit a
885// RTC_DCHECK and death tests are not supported on Android.
886TEST_F(AudioDeviceTest, StopRecordingRequiresInitToRestart) {
887 EXPECT_EQ(0, audio_device()->InitRecording());
888 EXPECT_EQ(0, audio_device()->StartRecording());
889 EXPECT_EQ(0, audio_device()->StopRecording());
890 EXPECT_FALSE(audio_device()->RecordingIsInitialized());
891}
892
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000893// Start playout and verify that the native audio layer starts asking for real
894// audio samples to play out using the NeedMorePlayData callback.
henrikab2619892015-05-18 16:49:16 +0200895TEST_F(AudioDeviceTest, StartPlayoutVerifyCallbacks) {
aleloi5de52fd2016-11-10 01:05:34 -0800896 MockAudioTransportAndroid mock(kPlayout);
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000897 mock.HandleCallbacks(test_is_done_.get(), nullptr, kNumCallbacks);
henrikab2619892015-05-18 16:49:16 +0200898 EXPECT_CALL(mock, NeedMorePlayData(playout_frames_per_10ms_buffer(),
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000899 kBytesPerSample,
900 playout_channels(),
901 playout_sample_rate(),
902 NotNull(),
903 _, _, _))
904 .Times(AtLeast(kNumCallbacks));
905 EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
906 StartPlayout();
907 test_is_done_->Wait(kTestTimeOutInMilliseconds);
908 StopPlayout();
909}
910
911// Start recording and verify that the native audio layer starts feeding real
912// audio samples via the RecordedDataIsAvailable callback.
henrika883d00f2018-03-16 10:09:49 +0100913// TODO(henrika): investigate if it is possible to perform a sanity check of
914// delay estimates as well (argument #6).
henrikab2619892015-05-18 16:49:16 +0200915TEST_F(AudioDeviceTest, StartRecordingVerifyCallbacks) {
aleloi5de52fd2016-11-10 01:05:34 -0800916 MockAudioTransportAndroid mock(kRecording);
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000917 mock.HandleCallbacks(test_is_done_.get(), nullptr, kNumCallbacks);
henrika883d00f2018-03-16 10:09:49 +0100918 EXPECT_CALL(
919 mock, RecordedDataIsAvailable(NotNull(), record_frames_per_10ms_buffer(),
920 kBytesPerSample, record_channels(),
921 record_sample_rate(), _, 0, 0, false, _))
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000922 .Times(AtLeast(kNumCallbacks));
923
924 EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
925 StartRecording();
926 test_is_done_->Wait(kTestTimeOutInMilliseconds);
927 StopRecording();
928}
929
930
931// Start playout and recording (full-duplex audio) and verify that audio is
932// active in both directions.
henrikab2619892015-05-18 16:49:16 +0200933TEST_F(AudioDeviceTest, StartPlayoutAndRecordingVerifyCallbacks) {
aleloi5de52fd2016-11-10 01:05:34 -0800934 MockAudioTransportAndroid mock(kPlayout | kRecording);
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000935 mock.HandleCallbacks(test_is_done_.get(), nullptr, kNumCallbacks);
henrikab2619892015-05-18 16:49:16 +0200936 EXPECT_CALL(mock, NeedMorePlayData(playout_frames_per_10ms_buffer(),
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000937 kBytesPerSample,
938 playout_channels(),
939 playout_sample_rate(),
940 NotNull(),
941 _, _, _))
942 .Times(AtLeast(kNumCallbacks));
henrika883d00f2018-03-16 10:09:49 +0100943 EXPECT_CALL(
944 mock, RecordedDataIsAvailable(NotNull(), record_frames_per_10ms_buffer(),
945 kBytesPerSample, record_channels(),
946 record_sample_rate(), _, 0, 0, false, _))
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000947 .Times(AtLeast(kNumCallbacks));
948 EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
949 StartPlayout();
950 StartRecording();
951 test_is_done_->Wait(kTestTimeOutInMilliseconds);
952 StopRecording();
953 StopPlayout();
954}
955
956// Start playout and read audio from an external PCM file when the audio layer
957// asks for data to play out. Real audio is played out in this test but it does
958// not contain any explicit verification that the audio quality is perfect.
henrikab2619892015-05-18 16:49:16 +0200959TEST_F(AudioDeviceTest, RunPlayoutWithFileAsSource) {
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000960 // TODO(henrika): extend test when mono output is supported.
Peter Kasting69558702016-01-12 16:26:35 -0800961 EXPECT_EQ(1u, playout_channels());
aleloi5de52fd2016-11-10 01:05:34 -0800962 NiceMock<MockAudioTransportAndroid> mock(kPlayout);
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000963 const int num_callbacks = kFilePlayTimeInSec * kNumCallbacksPerSecond;
henrika@webrtc.org74d47922015-03-10 11:59:03 +0000964 std::string file_name = GetFileName(playout_sample_rate());
kwibergf01633e2016-02-24 05:00:36 -0800965 std::unique_ptr<FileAudioStream> file_audio_stream(
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000966 new FileAudioStream(num_callbacks, file_name, playout_sample_rate()));
967 mock.HandleCallbacks(test_is_done_.get(),
968 file_audio_stream.get(),
969 num_callbacks);
henrikab2619892015-05-18 16:49:16 +0200970 // SetMaxPlayoutVolume();
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000971 EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
972 StartPlayout();
973 test_is_done_->Wait(kTestTimeOutInMilliseconds);
974 StopPlayout();
975}
976
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000977// Start playout and recording and store recorded data in an intermediate FIFO
978// buffer from which the playout side then reads its samples in the same order
979// as they were stored. Under ideal circumstances, a callback sequence would
980// look like: ...+-+-+-+-+-+-+-..., where '+' means 'packet recorded' and '-'
981// means 'packet played'. Under such conditions, the FIFO would only contain
982// one packet on average. However, under more realistic conditions, the size
983// of the FIFO will vary more due to an unbalance between the two sides.
984// This test tries to verify that the device maintains a balanced callback-
985// sequence by running in loopback for ten seconds while measuring the size
986// (max and average) of the FIFO. The size of the FIFO is increased by the
987// recording side and decreased by the playout side.
988// TODO(henrika): tune the final test parameters after running tests on several
989// different devices.
henrika3def74b2017-10-06 11:23:30 +0200990// Disabling this test on bots since it is difficult to come up with a robust
991// test condition that all worked as intended. The main issue is that, when
992// swarming is used, an initial latency can be built up when the both sides
993// starts at different times. Hence, the test can fail even if audio works
994// as intended. Keeping the test so it can be enabled manually.
995// http://bugs.webrtc.org/7744
996TEST_F(AudioDeviceTest, DISABLED_RunPlayoutAndRecordingInFullDuplex) {
henrikab2619892015-05-18 16:49:16 +0200997 EXPECT_EQ(record_channels(), playout_channels());
998 EXPECT_EQ(record_sample_rate(), playout_sample_rate());
aleloi5de52fd2016-11-10 01:05:34 -0800999 NiceMock<MockAudioTransportAndroid> mock(kPlayout | kRecording);
kwibergf01633e2016-02-24 05:00:36 -08001000 std::unique_ptr<FifoAudioStream> fifo_audio_stream(
henrikab2619892015-05-18 16:49:16 +02001001 new FifoAudioStream(playout_frames_per_10ms_buffer()));
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +00001002 mock.HandleCallbacks(test_is_done_.get(),
1003 fifo_audio_stream.get(),
1004 kFullDuplexTimeInSec * kNumCallbacksPerSecond);
henrika8324b522015-03-27 10:56:23 +01001005 SetMaxPlayoutVolume();
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +00001006 EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
1007 StartRecording();
1008 StartPlayout();
1009 test_is_done_->Wait(std::max(kTestTimeOutInMilliseconds,
1010 1000 * kFullDuplexTimeInSec));
1011 StopPlayout();
1012 StopRecording();
ehmaldonadoebb0b8e2016-10-04 01:58:57 -07001013
1014 // These thresholds are set rather high to accomodate differences in hardware
1015 // in several devices, so this test can be used in swarming.
1016 // See http://bugs.webrtc.org/6464
ehmaldonado37a21112016-11-24 03:13:16 -08001017 EXPECT_LE(fifo_audio_stream->average_size(), 60u);
1018 EXPECT_LE(fifo_audio_stream->largest_size(), 70u);
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +00001019}
1020
1021// Measures loopback latency and reports the min, max and average values for
1022// a full duplex audio session.
1023// The latency is measured like so:
1024// - Insert impulses periodically on the output side.
1025// - Detect the impulses on the input side.
1026// - Measure the time difference between the transmit time and receive time.
1027// - Store time differences in a vector and calculate min, max and average.
1028// This test requires a special hardware called Audio Loopback Dongle.
1029// See http://source.android.com/devices/audio/loopback.html for details.
henrikab2619892015-05-18 16:49:16 +02001030TEST_F(AudioDeviceTest, DISABLED_MeasureLoopbackLatency) {
1031 EXPECT_EQ(record_channels(), playout_channels());
1032 EXPECT_EQ(record_sample_rate(), playout_sample_rate());
aleloi5de52fd2016-11-10 01:05:34 -08001033 NiceMock<MockAudioTransportAndroid> mock(kPlayout | kRecording);
kwibergf01633e2016-02-24 05:00:36 -08001034 std::unique_ptr<LatencyMeasuringAudioStream> latency_audio_stream(
henrikab2619892015-05-18 16:49:16 +02001035 new LatencyMeasuringAudioStream(playout_frames_per_10ms_buffer()));
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +00001036 mock.HandleCallbacks(test_is_done_.get(),
1037 latency_audio_stream.get(),
1038 kMeasureLatencyTimeInSec * kNumCallbacksPerSecond);
1039 EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
henrika8324b522015-03-27 10:56:23 +01001040 SetMaxPlayoutVolume();
henrikab2619892015-05-18 16:49:16 +02001041 DisableBuiltInAECIfAvailable();
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +00001042 StartRecording();
1043 StartPlayout();
1044 test_is_done_->Wait(std::max(kTestTimeOutInMilliseconds,
1045 1000 * kMeasureLatencyTimeInSec));
1046 StopPlayout();
1047 StopRecording();
1048 // Verify that the correct number of transmitted impulses are detected.
1049 EXPECT_EQ(latency_audio_stream->num_latency_values(),
Peter Kastingdce40cf2015-08-24 14:52:23 -07001050 static_cast<size_t>(
1051 kImpulseFrequencyInHz * kMeasureLatencyTimeInSec - 1));
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +00001052 latency_audio_stream->PrintResults();
1053}
1054
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +00001055} // namespace webrtc