blob: b0cf42edec53a31595f9db47952e404f0db0a680 [file] [log] [blame]
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +00001/*
2 * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
henrikaee369e42015-05-25 10:11:27 +020011#include <algorithm>
12#include <limits>
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000013#include <list>
kwibergf01633e2016-02-24 05:00:36 -080014#include <memory>
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000015#include <numeric>
henrikaee369e42015-05-25 10:11:27 +020016#include <string>
17#include <vector>
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000018
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020019#include "modules/audio_device/android/audio_common.h"
20#include "modules/audio_device/android/audio_manager.h"
21#include "modules/audio_device/android/build_info.h"
22#include "modules/audio_device/android/ensure_initialized.h"
23#include "modules/audio_device/audio_device_impl.h"
24#include "modules/audio_device/include/audio_device.h"
25#include "modules/audio_device/include/mock_audio_transport.h"
26#include "rtc_base/arraysize.h"
27#include "rtc_base/criticalsection.h"
28#include "rtc_base/format_macros.h"
29#include "rtc_base/scoped_ref_ptr.h"
30#include "rtc_base/timeutils.h"
31#include "system_wrappers/include/event_wrapper.h"
32#include "test/gmock.h"
33#include "test/gtest.h"
34#include "test/testsupport/fileutils.h"
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +000035
36using std::cout;
37using std::endl;
38using ::testing::_;
39using ::testing::AtLeast;
40using ::testing::Gt;
41using ::testing::Invoke;
42using ::testing::NiceMock;
43using ::testing::NotNull;
44using ::testing::Return;
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +000045
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000046// #define ENABLE_DEBUG_PRINTF
47#ifdef ENABLE_DEBUG_PRINTF
48#define PRINTD(...) fprintf(stderr, __VA_ARGS__);
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +000049#else
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000050#define PRINTD(...) ((void)0)
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +000051#endif
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000052#define PRINT(...) fprintf(stderr, __VA_ARGS__);
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +000053
54namespace webrtc {
55
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +000056// Number of callbacks (input or output) the tests waits for before we set
57// an event indicating that the test was OK.
Peter Kastingdce40cf2015-08-24 14:52:23 -070058static const size_t kNumCallbacks = 10;
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +000059// Max amount of time we wait for an event to be set while counting callbacks.
60static const int kTestTimeOutInMilliseconds = 10 * 1000;
61// Average number of audio callbacks per second assuming 10ms packet size.
Peter Kastingdce40cf2015-08-24 14:52:23 -070062static const size_t kNumCallbacksPerSecond = 100;
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +000063// Play out a test file during this time (unit is in seconds).
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000064static const int kFilePlayTimeInSec = 5;
Peter Kastingdce40cf2015-08-24 14:52:23 -070065static const size_t kBitsPerSample = 16;
66static const size_t kBytesPerSample = kBitsPerSample / 8;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000067// Run the full-duplex test during this time (unit is in seconds).
68// Note that first |kNumIgnoreFirstCallbacks| are ignored.
henrika8324b522015-03-27 10:56:23 +010069static const int kFullDuplexTimeInSec = 5;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000070// Wait for the callback sequence to stabilize by ignoring this amount of the
71// initial callbacks (avoids initial FIFO access).
72// Only used in the RunPlayoutAndRecordingInFullDuplex test.
Peter Kastingdce40cf2015-08-24 14:52:23 -070073static const size_t kNumIgnoreFirstCallbacks = 50;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000074// Sets the number of impulses per second in the latency test.
75static const int kImpulseFrequencyInHz = 1;
76// Length of round-trip latency measurements. Number of transmitted impulses
77// is kImpulseFrequencyInHz * kMeasureLatencyTimeInSec - 1.
78static const int kMeasureLatencyTimeInSec = 11;
79// Utilized in round-trip latency measurements to avoid capturing noise samples.
henrikab2619892015-05-18 16:49:16 +020080static const int kImpulseThreshold = 1000;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000081static const char kTag[] = "[..........] ";
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +000082
83enum TransportType {
84 kPlayout = 0x1,
85 kRecording = 0x2,
86};
87
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000088// Interface for processing the audio stream. Real implementations can e.g.
89// run audio in loopback, read audio from a file or perform latency
90// measurements.
91class AudioStreamInterface {
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +000092 public:
Peter Kastingdce40cf2015-08-24 14:52:23 -070093 virtual void Write(const void* source, size_t num_frames) = 0;
94 virtual void Read(void* destination, size_t num_frames) = 0;
Yves Gerey665174f2018-06-19 15:03:05 +020095
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000096 protected:
97 virtual ~AudioStreamInterface() {}
98};
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +000099
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000100// Reads audio samples from a PCM file where the file is stored in memory at
101// construction.
102class FileAudioStream : public AudioStreamInterface {
103 public:
Yves Gerey665174f2018-06-19 15:03:05 +0200104 FileAudioStream(size_t num_callbacks,
105 const std::string& file_name,
106 int sample_rate)
107 : file_size_in_bytes_(0), sample_rate_(sample_rate), file_pos_(0) {
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000108 file_size_in_bytes_ = test::GetFileSize(file_name);
109 sample_rate_ = sample_rate;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000110 EXPECT_GE(file_size_in_callbacks(), num_callbacks)
henrika@webrtc.org74d47922015-03-10 11:59:03 +0000111 << "Size of test file is not large enough to last during the test.";
Peter Kastingdce40cf2015-08-24 14:52:23 -0700112 const size_t num_16bit_samples =
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000113 test::GetFileSize(file_name) / kBytesPerSample;
114 file_.reset(new int16_t[num_16bit_samples]);
115 FILE* audio_file = fopen(file_name.c_str(), "rb");
116 EXPECT_NE(audio_file, nullptr);
Yves Gerey665174f2018-06-19 15:03:05 +0200117 size_t num_samples_read =
118 fread(file_.get(), sizeof(int16_t), num_16bit_samples, audio_file);
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000119 EXPECT_EQ(num_samples_read, num_16bit_samples);
120 fclose(audio_file);
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000121 }
122
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000123 // AudioStreamInterface::Write() is not implemented.
Peter Kastingdce40cf2015-08-24 14:52:23 -0700124 void Write(const void* source, size_t num_frames) override {}
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000125
126 // Read samples from file stored in memory (at construction) and copy
127 // |num_frames| (<=> 10ms) to the |destination| byte buffer.
Peter Kastingdce40cf2015-08-24 14:52:23 -0700128 void Read(void* destination, size_t num_frames) override {
Yves Gerey665174f2018-06-19 15:03:05 +0200129 memcpy(destination, static_cast<int16_t*>(&file_[file_pos_]),
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000130 num_frames * sizeof(int16_t));
131 file_pos_ += num_frames;
132 }
133
134 int file_size_in_seconds() const {
Yves Gerey665174f2018-06-19 15:03:05 +0200135 return static_cast<int>(file_size_in_bytes_ /
136 (kBytesPerSample * sample_rate_));
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000137 }
Peter Kastingdce40cf2015-08-24 14:52:23 -0700138 size_t file_size_in_callbacks() const {
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000139 return file_size_in_seconds() * kNumCallbacksPerSecond;
140 }
141
142 private:
Peter Kastingdce40cf2015-08-24 14:52:23 -0700143 size_t file_size_in_bytes_;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000144 int sample_rate_;
kwibergf01633e2016-02-24 05:00:36 -0800145 std::unique_ptr<int16_t[]> file_;
Peter Kastingdce40cf2015-08-24 14:52:23 -0700146 size_t file_pos_;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000147};
148
149// Simple first in first out (FIFO) class that wraps a list of 16-bit audio
150// buffers of fixed size and allows Write and Read operations. The idea is to
151// store recorded audio buffers (using Write) and then read (using Read) these
152// stored buffers with as short delay as possible when the audio layer needs
153// data to play out. The number of buffers in the FIFO will stabilize under
154// normal conditions since there will be a balance between Write and Read calls.
155// The container is a std::list container and access is protected with a lock
156// since both sides (playout and recording) are driven by its own thread.
157class FifoAudioStream : public AudioStreamInterface {
158 public:
Peter Kastingdce40cf2015-08-24 14:52:23 -0700159 explicit FifoAudioStream(size_t frames_per_buffer)
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000160 : frames_per_buffer_(frames_per_buffer),
161 bytes_per_buffer_(frames_per_buffer_ * sizeof(int16_t)),
162 fifo_(new AudioBufferList),
163 largest_size_(0),
164 total_written_elements_(0),
165 write_count_(0) {
166 EXPECT_NE(fifo_.get(), nullptr);
167 }
168
Yves Gerey665174f2018-06-19 15:03:05 +0200169 ~FifoAudioStream() { Flush(); }
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000170
171 // Allocate new memory, copy |num_frames| samples from |source| into memory
172 // and add pointer to the memory location to end of the list.
173 // Increases the size of the FIFO by one element.
Peter Kastingdce40cf2015-08-24 14:52:23 -0700174 void Write(const void* source, size_t num_frames) override {
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000175 ASSERT_EQ(num_frames, frames_per_buffer_);
176 PRINTD("+");
177 if (write_count_++ < kNumIgnoreFirstCallbacks) {
178 return;
179 }
180 int16_t* memory = new int16_t[frames_per_buffer_];
Yves Gerey665174f2018-06-19 15:03:05 +0200181 memcpy(static_cast<int16_t*>(&memory[0]), source, bytes_per_buffer_);
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000182 rtc::CritScope lock(&lock_);
183 fifo_->push_back(memory);
Peter Kastingdce40cf2015-08-24 14:52:23 -0700184 const size_t size = fifo_->size();
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000185 if (size > largest_size_) {
186 largest_size_ = size;
Peter Kastingdce40cf2015-08-24 14:52:23 -0700187 PRINTD("(%" PRIuS ")", largest_size_);
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000188 }
189 total_written_elements_ += size;
190 }
191
192 // Read pointer to data buffer from front of list, copy |num_frames| of stored
193 // data into |destination| and delete the utilized memory allocation.
194 // Decreases the size of the FIFO by one element.
Peter Kastingdce40cf2015-08-24 14:52:23 -0700195 void Read(void* destination, size_t num_frames) override {
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000196 ASSERT_EQ(num_frames, frames_per_buffer_);
197 PRINTD("-");
198 rtc::CritScope lock(&lock_);
199 if (fifo_->empty()) {
200 memset(destination, 0, bytes_per_buffer_);
201 } else {
202 int16_t* memory = fifo_->front();
203 fifo_->pop_front();
Yves Gerey665174f2018-06-19 15:03:05 +0200204 memcpy(destination, static_cast<int16_t*>(&memory[0]), bytes_per_buffer_);
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000205 delete memory;
206 }
207 }
208
Yves Gerey665174f2018-06-19 15:03:05 +0200209 size_t size() const { return fifo_->size(); }
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000210
Yves Gerey665174f2018-06-19 15:03:05 +0200211 size_t largest_size() const { return largest_size_; }
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000212
Peter Kastingdce40cf2015-08-24 14:52:23 -0700213 size_t average_size() const {
Yves Gerey665174f2018-06-19 15:03:05 +0200214 return (total_written_elements_ == 0)
215 ? 0.0
216 : 0.5 + static_cast<float>(total_written_elements_) /
217 (write_count_ - kNumIgnoreFirstCallbacks);
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000218 }
219
220 private:
221 void Flush() {
222 for (auto it = fifo_->begin(); it != fifo_->end(); ++it) {
223 delete *it;
224 }
225 fifo_->clear();
226 }
227
228 using AudioBufferList = std::list<int16_t*>;
229 rtc::CriticalSection lock_;
Peter Kastingdce40cf2015-08-24 14:52:23 -0700230 const size_t frames_per_buffer_;
231 const size_t bytes_per_buffer_;
kwibergf01633e2016-02-24 05:00:36 -0800232 std::unique_ptr<AudioBufferList> fifo_;
Peter Kastingdce40cf2015-08-24 14:52:23 -0700233 size_t largest_size_;
234 size_t total_written_elements_;
235 size_t write_count_;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000236};
237
238// Inserts periodic impulses and measures the latency between the time of
239// transmission and time of receiving the same impulse.
240// Usage requires a special hardware called Audio Loopback Dongle.
241// See http://source.android.com/devices/audio/loopback.html for details.
242class LatencyMeasuringAudioStream : public AudioStreamInterface {
243 public:
Peter Kastingdce40cf2015-08-24 14:52:23 -0700244 explicit LatencyMeasuringAudioStream(size_t frames_per_buffer)
henrika92fd8e62016-11-15 05:37:58 -0800245 : frames_per_buffer_(frames_per_buffer),
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000246 bytes_per_buffer_(frames_per_buffer_ * sizeof(int16_t)),
247 play_count_(0),
248 rec_count_(0),
Yves Gerey665174f2018-06-19 15:03:05 +0200249 pulse_time_(0) {}
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000250
251 // Insert periodic impulses in first two samples of |destination|.
Peter Kastingdce40cf2015-08-24 14:52:23 -0700252 void Read(void* destination, size_t num_frames) override {
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000253 ASSERT_EQ(num_frames, frames_per_buffer_);
254 if (play_count_ == 0) {
255 PRINT("[");
256 }
257 play_count_++;
258 memset(destination, 0, bytes_per_buffer_);
259 if (play_count_ % (kNumCallbacksPerSecond / kImpulseFrequencyInHz) == 0) {
260 if (pulse_time_ == 0) {
henrika92fd8e62016-11-15 05:37:58 -0800261 pulse_time_ = rtc::TimeMillis();
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000262 }
263 PRINT(".");
264 const int16_t impulse = std::numeric_limits<int16_t>::max();
Yves Gerey665174f2018-06-19 15:03:05 +0200265 int16_t* ptr16 = static_cast<int16_t*>(destination);
Peter Kastingdce40cf2015-08-24 14:52:23 -0700266 for (size_t i = 0; i < 2; ++i) {
267 ptr16[i] = impulse;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000268 }
269 }
270 }
271
272 // Detect received impulses in |source|, derive time between transmission and
273 // detection and add the calculated delay to list of latencies.
Peter Kastingdce40cf2015-08-24 14:52:23 -0700274 void Write(const void* source, size_t num_frames) override {
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000275 ASSERT_EQ(num_frames, frames_per_buffer_);
276 rec_count_++;
277 if (pulse_time_ == 0) {
278 // Avoid detection of new impulse response until a new impulse has
279 // been transmitted (sets |pulse_time_| to value larger than zero).
280 return;
281 }
Yves Gerey665174f2018-06-19 15:03:05 +0200282 const int16_t* ptr16 = static_cast<const int16_t*>(source);
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000283 std::vector<int16_t> vec(ptr16, ptr16 + num_frames);
284 // Find max value in the audio buffer.
285 int max = *std::max_element(vec.begin(), vec.end());
286 // Find index (element position in vector) of the max element.
Yves Gerey665174f2018-06-19 15:03:05 +0200287 int index_of_max =
288 std::distance(vec.begin(), std::find(vec.begin(), vec.end(), max));
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000289 if (max > kImpulseThreshold) {
290 PRINTD("(%d,%d)", max, index_of_max);
henrika92fd8e62016-11-15 05:37:58 -0800291 int64_t now_time = rtc::TimeMillis();
Yves Gerey665174f2018-06-19 15:03:05 +0200292 int extra_delay = IndexToMilliseconds(static_cast<double>(index_of_max));
293 PRINTD("[%d]", static_cast<int>(now_time - pulse_time_));
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000294 PRINTD("[%d]", extra_delay);
295 // Total latency is the difference between transmit time and detection
296 // tome plus the extra delay within the buffer in which we detected the
297 // received impulse. It is transmitted at sample 0 but can be received
298 // at sample N where N > 0. The term |extra_delay| accounts for N and it
299 // is a value between 0 and 10ms.
300 latencies_.push_back(now_time - pulse_time_ + extra_delay);
301 pulse_time_ = 0;
302 } else {
303 PRINTD("-");
304 }
305 }
306
Yves Gerey665174f2018-06-19 15:03:05 +0200307 size_t num_latency_values() const { return latencies_.size(); }
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000308
309 int min_latency() const {
310 if (latencies_.empty())
311 return 0;
312 return *std::min_element(latencies_.begin(), latencies_.end());
313 }
314
315 int max_latency() const {
316 if (latencies_.empty())
317 return 0;
318 return *std::max_element(latencies_.begin(), latencies_.end());
319 }
320
321 int average_latency() const {
322 if (latencies_.empty())
323 return 0;
Yves Gerey665174f2018-06-19 15:03:05 +0200324 return 0.5 + static_cast<double>(
325 std::accumulate(latencies_.begin(), latencies_.end(), 0)) /
326 latencies_.size();
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000327 }
328
329 void PrintResults() const {
330 PRINT("] ");
331 for (auto it = latencies_.begin(); it != latencies_.end(); ++it) {
332 PRINT("%d ", *it);
333 }
334 PRINT("\n");
Yves Gerey665174f2018-06-19 15:03:05 +0200335 PRINT("%s[min, max, avg]=[%d, %d, %d] ms\n", kTag, min_latency(),
336 max_latency(), average_latency());
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000337 }
338
339 int IndexToMilliseconds(double index) const {
pkastingb297c5a2015-07-22 15:17:22 -0700340 return static_cast<int>(10.0 * (index / frames_per_buffer_) + 0.5);
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000341 }
342
343 private:
Peter Kastingdce40cf2015-08-24 14:52:23 -0700344 const size_t frames_per_buffer_;
345 const size_t bytes_per_buffer_;
346 size_t play_count_;
347 size_t rec_count_;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000348 int64_t pulse_time_;
349 std::vector<int> latencies_;
350};
351
352// Mocks the AudioTransport object and proxies actions for the two callbacks
353// (RecordedDataIsAvailable and NeedMorePlayData) to different implementations
354// of AudioStreamInterface.
aleloi5de52fd2016-11-10 01:05:34 -0800355class MockAudioTransportAndroid : public test::MockAudioTransport {
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000356 public:
aleloi5de52fd2016-11-10 01:05:34 -0800357 explicit MockAudioTransportAndroid(int type)
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000358 : num_callbacks_(0),
359 type_(type),
360 play_count_(0),
361 rec_count_(0),
362 audio_stream_(nullptr) {}
363
aleloi5de52fd2016-11-10 01:05:34 -0800364 virtual ~MockAudioTransportAndroid() {}
maxmorin1aee0b52016-08-15 11:46:19 -0700365
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000366 // Set default actions of the mock object. We are delegating to fake
367 // implementations (of AudioStreamInterface) here.
368 void HandleCallbacks(EventWrapper* test_is_done,
369 AudioStreamInterface* audio_stream,
370 int num_callbacks) {
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000371 test_is_done_ = test_is_done;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000372 audio_stream_ = audio_stream;
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000373 num_callbacks_ = num_callbacks;
374 if (play_mode()) {
375 ON_CALL(*this, NeedMorePlayData(_, _, _, _, _, _, _, _))
376 .WillByDefault(
aleloi5de52fd2016-11-10 01:05:34 -0800377 Invoke(this, &MockAudioTransportAndroid::RealNeedMorePlayData));
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000378 }
379 if (rec_mode()) {
380 ON_CALL(*this, RecordedDataIsAvailable(_, _, _, _, _, _, _, _, _, _))
aleloi5de52fd2016-11-10 01:05:34 -0800381 .WillByDefault(Invoke(
382 this, &MockAudioTransportAndroid::RealRecordedDataIsAvailable));
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000383 }
384 }
385
386 int32_t RealRecordedDataIsAvailable(const void* audioSamples,
Peter Kastingdce40cf2015-08-24 14:52:23 -0700387 const size_t nSamples,
388 const size_t nBytesPerSample,
Peter Kasting69558702016-01-12 16:26:35 -0800389 const size_t nChannels,
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000390 const uint32_t samplesPerSec,
391 const uint32_t totalDelayMS,
392 const int32_t clockDrift,
393 const uint32_t currentMicLevel,
394 const bool keyPressed,
henrika883d00f2018-03-16 10:09:49 +0100395 uint32_t& newMicLevel) { // NOLINT
henrika@webrtc.org74d47922015-03-10 11:59:03 +0000396 EXPECT_TRUE(rec_mode()) << "No test is expecting these callbacks.";
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000397 rec_count_++;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000398 // Process the recorded audio stream if an AudioStreamInterface
399 // implementation exists.
400 if (audio_stream_) {
401 audio_stream_->Write(audioSamples, nSamples);
402 }
403 if (ReceivedEnoughCallbacks()) {
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000404 test_is_done_->Set();
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000405 }
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000406 return 0;
407 }
408
Peter Kastingdce40cf2015-08-24 14:52:23 -0700409 int32_t RealNeedMorePlayData(const size_t nSamples,
410 const size_t nBytesPerSample,
Peter Kasting69558702016-01-12 16:26:35 -0800411 const size_t nChannels,
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000412 const uint32_t samplesPerSec,
413 void* audioSamples,
henrika883d00f2018-03-16 10:09:49 +0100414 size_t& nSamplesOut, // NOLINT
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000415 int64_t* elapsed_time_ms,
416 int64_t* ntp_time_ms) {
henrika@webrtc.org74d47922015-03-10 11:59:03 +0000417 EXPECT_TRUE(play_mode()) << "No test is expecting these callbacks.";
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000418 play_count_++;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000419 nSamplesOut = nSamples;
420 // Read (possibly processed) audio stream samples to be played out if an
421 // AudioStreamInterface implementation exists.
422 if (audio_stream_) {
423 audio_stream_->Read(audioSamples, nSamples);
424 }
425 if (ReceivedEnoughCallbacks()) {
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000426 test_is_done_->Set();
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000427 }
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000428 return 0;
429 }
430
431 bool ReceivedEnoughCallbacks() {
432 bool recording_done = false;
433 if (rec_mode())
434 recording_done = rec_count_ >= num_callbacks_;
435 else
436 recording_done = true;
437
438 bool playout_done = false;
439 if (play_mode())
440 playout_done = play_count_ >= num_callbacks_;
441 else
442 playout_done = true;
443
444 return recording_done && playout_done;
445 }
446
447 bool play_mode() const { return type_ & kPlayout; }
448 bool rec_mode() const { return type_ & kRecording; }
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000449
450 private:
451 EventWrapper* test_is_done_;
Peter Kastingdce40cf2015-08-24 14:52:23 -0700452 size_t num_callbacks_;
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000453 int type_;
Peter Kastingdce40cf2015-08-24 14:52:23 -0700454 size_t play_count_;
455 size_t rec_count_;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000456 AudioStreamInterface* audio_stream_;
kwibergf01633e2016-02-24 05:00:36 -0800457 std::unique_ptr<LatencyMeasuringAudioStream> latency_audio_stream_;
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000458};
459
henrikab2619892015-05-18 16:49:16 +0200460// AudioDeviceTest test fixture.
461class AudioDeviceTest : public ::testing::Test {
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000462 protected:
Yves Gerey665174f2018-06-19 15:03:05 +0200463 AudioDeviceTest() : test_is_done_(EventWrapper::Create()) {
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000464 // One-time initialization of JVM and application context. Ensures that we
465 // can do calls between C++ and Java. Initializes both Java and OpenSL ES
466 // implementations.
467 webrtc::audiodevicemodule::EnsureInitialized();
henrikab2619892015-05-18 16:49:16 +0200468 // Creates an audio device using a default audio layer.
469 audio_device_ = CreateAudioDevice(AudioDeviceModule::kPlatformDefaultAudio);
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000470 EXPECT_NE(audio_device_.get(), nullptr);
471 EXPECT_EQ(0, audio_device_->Init());
henrikab2619892015-05-18 16:49:16 +0200472 playout_parameters_ = audio_manager()->GetPlayoutAudioParameters();
473 record_parameters_ = audio_manager()->GetRecordAudioParameters();
henrika523183b2015-05-21 13:43:08 +0200474 build_info_.reset(new BuildInfo());
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000475 }
Yves Gerey665174f2018-06-19 15:03:05 +0200476 virtual ~AudioDeviceTest() { EXPECT_EQ(0, audio_device_->Terminate()); }
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000477
Yves Gerey665174f2018-06-19 15:03:05 +0200478 int playout_sample_rate() const { return playout_parameters_.sample_rate(); }
479 int record_sample_rate() const { return record_parameters_.sample_rate(); }
480 size_t playout_channels() const { return playout_parameters_.channels(); }
481 size_t record_channels() const { return record_parameters_.channels(); }
Peter Kastingdce40cf2015-08-24 14:52:23 -0700482 size_t playout_frames_per_10ms_buffer() const {
henrikab2619892015-05-18 16:49:16 +0200483 return playout_parameters_.frames_per_10ms_buffer();
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000484 }
Peter Kastingdce40cf2015-08-24 14:52:23 -0700485 size_t record_frames_per_10ms_buffer() const {
henrikab2619892015-05-18 16:49:16 +0200486 return record_parameters_.frames_per_10ms_buffer();
487 }
488
489 int total_delay_ms() const {
490 return audio_manager()->GetDelayEstimateInMilliseconds();
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000491 }
492
Peter Boström26b08602015-06-04 15:18:17 +0200493 rtc::scoped_refptr<AudioDeviceModule> audio_device() const {
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000494 return audio_device_;
495 }
496
henrikab2619892015-05-18 16:49:16 +0200497 AudioDeviceModuleImpl* audio_device_impl() const {
498 return static_cast<AudioDeviceModuleImpl*>(audio_device_.get());
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000499 }
500
henrikab2619892015-05-18 16:49:16 +0200501 AudioManager* audio_manager() const {
502 return audio_device_impl()->GetAndroidAudioManagerForTest();
503 }
504
505 AudioManager* GetAudioManager(AudioDeviceModule* adm) const {
Yves Gerey665174f2018-06-19 15:03:05 +0200506 return static_cast<AudioDeviceModuleImpl*>(adm)
507 ->GetAndroidAudioManagerForTest();
henrikab2619892015-05-18 16:49:16 +0200508 }
509
510 AudioDeviceBuffer* audio_device_buffer() const {
511 return audio_device_impl()->GetAudioDeviceBuffer();
512 }
513
Peter Boström26b08602015-06-04 15:18:17 +0200514 rtc::scoped_refptr<AudioDeviceModule> CreateAudioDevice(
henrikab2619892015-05-18 16:49:16 +0200515 AudioDeviceModule::AudioLayer audio_layer) {
Peter Boström26b08602015-06-04 15:18:17 +0200516 rtc::scoped_refptr<AudioDeviceModule> module(
Peter Boström4adbbcf2016-05-03 15:51:26 -0400517 AudioDeviceModule::Create(0, audio_layer));
henrikab2619892015-05-18 16:49:16 +0200518 return module;
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000519 }
520
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000521 // Returns file name relative to the resource root given a sample rate.
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000522 std::string GetFileName(int sample_rate) {
523 EXPECT_TRUE(sample_rate == 48000 || sample_rate == 44100);
524 char fname[64];
Yves Gerey665174f2018-06-19 15:03:05 +0200525 snprintf(fname, sizeof(fname), "audio_device/audio_short%d",
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000526 sample_rate / 1000);
527 std::string file_name(webrtc::test::ResourcePath(fname, "pcm"));
528 EXPECT_TRUE(test::FileExists(file_name));
529#ifdef ENABLE_PRINTF
530 PRINT("file name: %s\n", file_name.c_str());
Peter Kastingdce40cf2015-08-24 14:52:23 -0700531 const size_t bytes = test::GetFileSize(file_name);
532 PRINT("file size: %" PRIuS " [bytes]\n", bytes);
533 PRINT("file size: %" PRIuS " [samples]\n", bytes / kBytesPerSample);
534 const int seconds =
535 static_cast<int>(bytes / (sample_rate * kBytesPerSample));
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000536 PRINT("file size: %d [secs]\n", seconds);
Peter Kastingdce40cf2015-08-24 14:52:23 -0700537 PRINT("file size: %" PRIuS " [callbacks]\n",
538 seconds * kNumCallbacksPerSecond);
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000539#endif
540 return file_name;
541 }
542
henrikab2619892015-05-18 16:49:16 +0200543 AudioDeviceModule::AudioLayer GetActiveAudioLayer() const {
544 AudioDeviceModule::AudioLayer audio_layer;
545 EXPECT_EQ(0, audio_device()->ActiveAudioLayer(&audio_layer));
546 return audio_layer;
547 }
548
549 int TestDelayOnAudioLayer(
550 const AudioDeviceModule::AudioLayer& layer_to_test) {
Peter Boström26b08602015-06-04 15:18:17 +0200551 rtc::scoped_refptr<AudioDeviceModule> audio_device;
henrikab2619892015-05-18 16:49:16 +0200552 audio_device = CreateAudioDevice(layer_to_test);
553 EXPECT_NE(audio_device.get(), nullptr);
554 AudioManager* audio_manager = GetAudioManager(audio_device.get());
555 EXPECT_NE(audio_manager, nullptr);
556 return audio_manager->GetDelayEstimateInMilliseconds();
557 }
558
559 AudioDeviceModule::AudioLayer TestActiveAudioLayer(
560 const AudioDeviceModule::AudioLayer& layer_to_test) {
Peter Boström26b08602015-06-04 15:18:17 +0200561 rtc::scoped_refptr<AudioDeviceModule> audio_device;
henrikab2619892015-05-18 16:49:16 +0200562 audio_device = CreateAudioDevice(layer_to_test);
563 EXPECT_NE(audio_device.get(), nullptr);
564 AudioDeviceModule::AudioLayer active;
565 EXPECT_EQ(0, audio_device->ActiveAudioLayer(&active));
566 return active;
567 }
568
henrika523183b2015-05-21 13:43:08 +0200569 bool DisableTestForThisDevice(const std::string& model) {
570 return (build_info_->GetDeviceModel() == model);
571 }
572
henrikab2619892015-05-18 16:49:16 +0200573 // Volume control is currently only supported for the Java output audio layer.
574 // For OpenSL ES, the internal stream volume is always on max level and there
575 // is no need for this test to set it to max.
576 bool AudioLayerSupportsVolumeControl() const {
577 return GetActiveAudioLayer() == AudioDeviceModule::kAndroidJavaAudio;
578 }
579
henrika8324b522015-03-27 10:56:23 +0100580 void SetMaxPlayoutVolume() {
henrikab2619892015-05-18 16:49:16 +0200581 if (!AudioLayerSupportsVolumeControl())
582 return;
henrika8324b522015-03-27 10:56:23 +0100583 uint32_t max_volume;
584 EXPECT_EQ(0, audio_device()->MaxSpeakerVolume(&max_volume));
585 EXPECT_EQ(0, audio_device()->SetSpeakerVolume(max_volume));
586 }
587
henrikab2619892015-05-18 16:49:16 +0200588 void DisableBuiltInAECIfAvailable() {
589 if (audio_device()->BuiltInAECIsAvailable()) {
590 EXPECT_EQ(0, audio_device()->EnableBuiltInAEC(false));
591 }
592 }
593
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000594 void StartPlayout() {
595 EXPECT_FALSE(audio_device()->PlayoutIsInitialized());
596 EXPECT_FALSE(audio_device()->Playing());
597 EXPECT_EQ(0, audio_device()->InitPlayout());
598 EXPECT_TRUE(audio_device()->PlayoutIsInitialized());
599 EXPECT_EQ(0, audio_device()->StartPlayout());
600 EXPECT_TRUE(audio_device()->Playing());
601 }
602
603 void StopPlayout() {
604 EXPECT_EQ(0, audio_device()->StopPlayout());
605 EXPECT_FALSE(audio_device()->Playing());
henrikab2619892015-05-18 16:49:16 +0200606 EXPECT_FALSE(audio_device()->PlayoutIsInitialized());
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000607 }
608
609 void StartRecording() {
610 EXPECT_FALSE(audio_device()->RecordingIsInitialized());
611 EXPECT_FALSE(audio_device()->Recording());
612 EXPECT_EQ(0, audio_device()->InitRecording());
613 EXPECT_TRUE(audio_device()->RecordingIsInitialized());
614 EXPECT_EQ(0, audio_device()->StartRecording());
615 EXPECT_TRUE(audio_device()->Recording());
616 }
617
618 void StopRecording() {
619 EXPECT_EQ(0, audio_device()->StopRecording());
620 EXPECT_FALSE(audio_device()->Recording());
621 }
622
henrika8324b522015-03-27 10:56:23 +0100623 int GetMaxSpeakerVolume() const {
624 uint32_t max_volume(0);
625 EXPECT_EQ(0, audio_device()->MaxSpeakerVolume(&max_volume));
626 return max_volume;
627 }
628
629 int GetMinSpeakerVolume() const {
630 uint32_t min_volume(0);
631 EXPECT_EQ(0, audio_device()->MinSpeakerVolume(&min_volume));
632 return min_volume;
633 }
634
635 int GetSpeakerVolume() const {
636 uint32_t volume(0);
637 EXPECT_EQ(0, audio_device()->SpeakerVolume(&volume));
638 return volume;
639 }
640
kwibergf01633e2016-02-24 05:00:36 -0800641 std::unique_ptr<EventWrapper> test_is_done_;
Peter Boström26b08602015-06-04 15:18:17 +0200642 rtc::scoped_refptr<AudioDeviceModule> audio_device_;
henrikab2619892015-05-18 16:49:16 +0200643 AudioParameters playout_parameters_;
644 AudioParameters record_parameters_;
kwibergf01633e2016-02-24 05:00:36 -0800645 std::unique_ptr<BuildInfo> build_info_;
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000646};
647
henrikab2619892015-05-18 16:49:16 +0200648TEST_F(AudioDeviceTest, ConstructDestruct) {
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000649 // Using the test fixture to create and destruct the audio device module.
650}
651
henrikab2619892015-05-18 16:49:16 +0200652// We always ask for a default audio layer when the ADM is constructed. But the
653// ADM will then internally set the best suitable combination of audio layers,
henrika918b5542016-09-19 15:44:09 +0200654// for input and output based on if low-latency output and/or input audio in
655// combination with OpenSL ES is supported or not. This test ensures that the
656// correct selection is done.
henrikab2619892015-05-18 16:49:16 +0200657TEST_F(AudioDeviceTest, VerifyDefaultAudioLayer) {
658 const AudioDeviceModule::AudioLayer audio_layer = GetActiveAudioLayer();
659 bool low_latency_output = audio_manager()->IsLowLatencyPlayoutSupported();
henrika918b5542016-09-19 15:44:09 +0200660 bool low_latency_input = audio_manager()->IsLowLatencyRecordSupported();
henrika883d00f2018-03-16 10:09:49 +0100661 bool aaudio = audio_manager()->IsAAudioSupported();
henrika918b5542016-09-19 15:44:09 +0200662 AudioDeviceModule::AudioLayer expected_audio_layer;
henrika883d00f2018-03-16 10:09:49 +0100663 if (aaudio) {
664 expected_audio_layer = AudioDeviceModule::kAndroidAAudioAudio;
665 } else if (low_latency_output && low_latency_input) {
henrika918b5542016-09-19 15:44:09 +0200666 expected_audio_layer = AudioDeviceModule::kAndroidOpenSLESAudio;
667 } else if (low_latency_output && !low_latency_input) {
668 expected_audio_layer =
669 AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio;
670 } else {
671 expected_audio_layer = AudioDeviceModule::kAndroidJavaAudio;
672 }
henrikab2619892015-05-18 16:49:16 +0200673 EXPECT_EQ(expected_audio_layer, audio_layer);
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000674}
675
henrikab2619892015-05-18 16:49:16 +0200676// Verify that it is possible to explicitly create the two types of supported
677// ADMs. These two tests overrides the default selection of native audio layer
678// by ignoring if the device supports low-latency output or not.
679TEST_F(AudioDeviceTest, CorrectAudioLayerIsUsedForCombinedJavaOpenSLCombo) {
680 AudioDeviceModule::AudioLayer expected_layer =
681 AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio;
Yves Gerey665174f2018-06-19 15:03:05 +0200682 AudioDeviceModule::AudioLayer active_layer =
683 TestActiveAudioLayer(expected_layer);
henrikab2619892015-05-18 16:49:16 +0200684 EXPECT_EQ(expected_layer, active_layer);
685}
686
687TEST_F(AudioDeviceTest, CorrectAudioLayerIsUsedForJavaInBothDirections) {
688 AudioDeviceModule::AudioLayer expected_layer =
689 AudioDeviceModule::kAndroidJavaAudio;
Yves Gerey665174f2018-06-19 15:03:05 +0200690 AudioDeviceModule::AudioLayer active_layer =
691 TestActiveAudioLayer(expected_layer);
henrikab2619892015-05-18 16:49:16 +0200692 EXPECT_EQ(expected_layer, active_layer);
693}
694
henrika918b5542016-09-19 15:44:09 +0200695TEST_F(AudioDeviceTest, CorrectAudioLayerIsUsedForOpenSLInBothDirections) {
696 AudioDeviceModule::AudioLayer expected_layer =
697 AudioDeviceModule::kAndroidOpenSLESAudio;
698 AudioDeviceModule::AudioLayer active_layer =
699 TestActiveAudioLayer(expected_layer);
700 EXPECT_EQ(expected_layer, active_layer);
701}
702
henrika883d00f2018-03-16 10:09:49 +0100703// TODO(bugs.webrtc.org/8914)
704#if !defined(AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO)
705#define MAYBE_CorrectAudioLayerIsUsedForAAudioInBothDirections \
706 DISABLED_CorrectAudioLayerIsUsedForAAudioInBothDirections
707#else
708#define MAYBE_CorrectAudioLayerIsUsedForAAudioInBothDirections \
709 CorrectAudioLayerIsUsedForAAudioInBothDirections
710#endif
711TEST_F(AudioDeviceTest,
712 MAYBE_CorrectAudioLayerIsUsedForAAudioInBothDirections) {
713 AudioDeviceModule::AudioLayer expected_layer =
714 AudioDeviceModule::kAndroidAAudioAudio;
715 AudioDeviceModule::AudioLayer active_layer =
716 TestActiveAudioLayer(expected_layer);
717 EXPECT_EQ(expected_layer, active_layer);
718}
719
720// TODO(bugs.webrtc.org/8914)
721#if !defined(AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO)
722#define MAYBE_CorrectAudioLayerIsUsedForCombinedJavaAAudioCombo \
723 DISABLED_CorrectAudioLayerIsUsedForCombinedJavaAAudioCombo
724#else
725#define MAYBE_CorrectAudioLayerIsUsedForCombinedJavaAAudioCombo \
726 CorrectAudioLayerIsUsedForCombinedJavaAAudioCombo
727#endif
728TEST_F(AudioDeviceTest,
729 MAYBE_CorrectAudioLayerIsUsedForCombinedJavaAAudioCombo) {
730 AudioDeviceModule::AudioLayer expected_layer =
731 AudioDeviceModule::kAndroidJavaInputAndAAudioOutputAudio;
732 AudioDeviceModule::AudioLayer active_layer =
733 TestActiveAudioLayer(expected_layer);
734 EXPECT_EQ(expected_layer, active_layer);
735}
736
henrikab2619892015-05-18 16:49:16 +0200737// The Android ADM supports two different delay reporting modes. One for the
738// low-latency output path (in combination with OpenSL ES), and one for the
739// high-latency output path (Java backends in both directions). These two tests
740// verifies that the audio manager reports correct delay estimate given the
741// selected audio layer. Note that, this delay estimate will only be utilized
742// if the HW AEC is disabled.
743TEST_F(AudioDeviceTest, UsesCorrectDelayEstimateForHighLatencyOutputPath) {
744 EXPECT_EQ(kHighLatencyModeDelayEstimateInMilliseconds,
745 TestDelayOnAudioLayer(AudioDeviceModule::kAndroidJavaAudio));
746}
747
748TEST_F(AudioDeviceTest, UsesCorrectDelayEstimateForLowLatencyOutputPath) {
749 EXPECT_EQ(kLowLatencyModeDelayEstimateInMilliseconds,
750 TestDelayOnAudioLayer(
Yves Gerey665174f2018-06-19 15:03:05 +0200751 AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio));
henrikab2619892015-05-18 16:49:16 +0200752}
753
754// Ensure that the ADM internal audio device buffer is configured to use the
755// correct set of parameters.
756TEST_F(AudioDeviceTest, VerifyAudioDeviceBufferParameters) {
757 EXPECT_EQ(playout_parameters_.sample_rate(),
henrikacfbd26d2018-09-05 11:36:22 +0200758 static_cast<int>(audio_device_buffer()->PlayoutSampleRate()));
henrikab2619892015-05-18 16:49:16 +0200759 EXPECT_EQ(record_parameters_.sample_rate(),
henrikacfbd26d2018-09-05 11:36:22 +0200760 static_cast<int>(audio_device_buffer()->RecordingSampleRate()));
henrikab2619892015-05-18 16:49:16 +0200761 EXPECT_EQ(playout_parameters_.channels(),
762 audio_device_buffer()->PlayoutChannels());
763 EXPECT_EQ(record_parameters_.channels(),
764 audio_device_buffer()->RecordingChannels());
765}
766
henrikab2619892015-05-18 16:49:16 +0200767TEST_F(AudioDeviceTest, InitTerminate) {
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000768 // Initialization is part of the test fixture.
769 EXPECT_TRUE(audio_device()->Initialized());
770 EXPECT_EQ(0, audio_device()->Terminate());
771 EXPECT_FALSE(audio_device()->Initialized());
772}
773
henrikab2619892015-05-18 16:49:16 +0200774TEST_F(AudioDeviceTest, Devices) {
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000775 // Device enumeration is not supported. Verify fixed values only.
776 EXPECT_EQ(1, audio_device()->PlayoutDevices());
777 EXPECT_EQ(1, audio_device()->RecordingDevices());
778}
779
henrikab2619892015-05-18 16:49:16 +0200780TEST_F(AudioDeviceTest, SpeakerVolumeShouldBeAvailable) {
781 // The OpenSL ES output audio path does not support volume control.
782 if (!AudioLayerSupportsVolumeControl())
783 return;
henrika8324b522015-03-27 10:56:23 +0100784 bool available;
785 EXPECT_EQ(0, audio_device()->SpeakerVolumeIsAvailable(&available));
786 EXPECT_TRUE(available);
787}
788
henrikab2619892015-05-18 16:49:16 +0200789TEST_F(AudioDeviceTest, MaxSpeakerVolumeIsPositive) {
790 // The OpenSL ES output audio path does not support volume control.
791 if (!AudioLayerSupportsVolumeControl())
792 return;
793 StartPlayout();
henrika8324b522015-03-27 10:56:23 +0100794 EXPECT_GT(GetMaxSpeakerVolume(), 0);
henrikab2619892015-05-18 16:49:16 +0200795 StopPlayout();
henrika8324b522015-03-27 10:56:23 +0100796}
797
henrikab2619892015-05-18 16:49:16 +0200798TEST_F(AudioDeviceTest, MinSpeakerVolumeIsZero) {
799 // The OpenSL ES output audio path does not support volume control.
800 if (!AudioLayerSupportsVolumeControl())
801 return;
henrika8324b522015-03-27 10:56:23 +0100802 EXPECT_EQ(GetMinSpeakerVolume(), 0);
803}
804
henrikab2619892015-05-18 16:49:16 +0200805TEST_F(AudioDeviceTest, DefaultSpeakerVolumeIsWithinMinMax) {
806 // The OpenSL ES output audio path does not support volume control.
807 if (!AudioLayerSupportsVolumeControl())
808 return;
henrika8324b522015-03-27 10:56:23 +0100809 const int default_volume = GetSpeakerVolume();
810 EXPECT_GE(default_volume, GetMinSpeakerVolume());
811 EXPECT_LE(default_volume, GetMaxSpeakerVolume());
812}
813
henrikab2619892015-05-18 16:49:16 +0200814TEST_F(AudioDeviceTest, SetSpeakerVolumeActuallySetsVolume) {
815 // The OpenSL ES output audio path does not support volume control.
816 if (!AudioLayerSupportsVolumeControl())
817 return;
henrika8324b522015-03-27 10:56:23 +0100818 const int default_volume = GetSpeakerVolume();
819 const int max_volume = GetMaxSpeakerVolume();
820 EXPECT_EQ(0, audio_device()->SetSpeakerVolume(max_volume));
821 int new_volume = GetSpeakerVolume();
822 EXPECT_EQ(new_volume, max_volume);
823 EXPECT_EQ(0, audio_device()->SetSpeakerVolume(default_volume));
824}
825
henrikab2619892015-05-18 16:49:16 +0200826// Tests that playout can be initiated, started and stopped. No audio callback
827// is registered in this test.
henrika817208b2016-11-23 06:49:44 -0800828TEST_F(AudioDeviceTest, StartStopPlayout) {
henrikab2619892015-05-18 16:49:16 +0200829 StartPlayout();
830 StopPlayout();
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000831 StartPlayout();
832 StopPlayout();
833}
834
henrika82e20552015-09-25 04:26:14 -0700835// Tests that recording can be initiated, started and stopped. No audio callback
836// is registered in this test.
837TEST_F(AudioDeviceTest, StartStopRecording) {
838 StartRecording();
839 StopRecording();
840 StartRecording();
841 StopRecording();
842}
843
henrikab2619892015-05-18 16:49:16 +0200844// Verify that calling StopPlayout() will leave us in an uninitialized state
845// which will require a new call to InitPlayout(). This test does not call
henrikg91d6ede2015-09-17 00:24:34 -0700846// StartPlayout() while being uninitialized since doing so will hit a
henrika918b5542016-09-19 15:44:09 +0200847// RTC_DCHECK and death tests are not supported on Android.
henrikab2619892015-05-18 16:49:16 +0200848TEST_F(AudioDeviceTest, StopPlayoutRequiresInitToRestart) {
849 EXPECT_EQ(0, audio_device()->InitPlayout());
850 EXPECT_EQ(0, audio_device()->StartPlayout());
851 EXPECT_EQ(0, audio_device()->StopPlayout());
852 EXPECT_FALSE(audio_device()->PlayoutIsInitialized());
853}
854
henrika918b5542016-09-19 15:44:09 +0200855// Verify that calling StopRecording() will leave us in an uninitialized state
856// which will require a new call to InitRecording(). This test does not call
857// StartRecording() while being uninitialized since doing so will hit a
858// RTC_DCHECK and death tests are not supported on Android.
859TEST_F(AudioDeviceTest, StopRecordingRequiresInitToRestart) {
860 EXPECT_EQ(0, audio_device()->InitRecording());
861 EXPECT_EQ(0, audio_device()->StartRecording());
862 EXPECT_EQ(0, audio_device()->StopRecording());
863 EXPECT_FALSE(audio_device()->RecordingIsInitialized());
864}
865
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000866// Start playout and verify that the native audio layer starts asking for real
867// audio samples to play out using the NeedMorePlayData callback.
henrikab2619892015-05-18 16:49:16 +0200868TEST_F(AudioDeviceTest, StartPlayoutVerifyCallbacks) {
aleloi5de52fd2016-11-10 01:05:34 -0800869 MockAudioTransportAndroid mock(kPlayout);
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000870 mock.HandleCallbacks(test_is_done_.get(), nullptr, kNumCallbacks);
henrikab2619892015-05-18 16:49:16 +0200871 EXPECT_CALL(mock, NeedMorePlayData(playout_frames_per_10ms_buffer(),
Yves Gerey665174f2018-06-19 15:03:05 +0200872 kBytesPerSample, playout_channels(),
873 playout_sample_rate(), NotNull(), _, _, _))
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000874 .Times(AtLeast(kNumCallbacks));
875 EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
876 StartPlayout();
877 test_is_done_->Wait(kTestTimeOutInMilliseconds);
878 StopPlayout();
879}
880
881// Start recording and verify that the native audio layer starts feeding real
882// audio samples via the RecordedDataIsAvailable callback.
henrika883d00f2018-03-16 10:09:49 +0100883// TODO(henrika): investigate if it is possible to perform a sanity check of
884// delay estimates as well (argument #6).
henrikab2619892015-05-18 16:49:16 +0200885TEST_F(AudioDeviceTest, StartRecordingVerifyCallbacks) {
aleloi5de52fd2016-11-10 01:05:34 -0800886 MockAudioTransportAndroid mock(kRecording);
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000887 mock.HandleCallbacks(test_is_done_.get(), nullptr, kNumCallbacks);
henrika883d00f2018-03-16 10:09:49 +0100888 EXPECT_CALL(
889 mock, RecordedDataIsAvailable(NotNull(), record_frames_per_10ms_buffer(),
890 kBytesPerSample, record_channels(),
891 record_sample_rate(), _, 0, 0, false, _))
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000892 .Times(AtLeast(kNumCallbacks));
893
894 EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
895 StartRecording();
896 test_is_done_->Wait(kTestTimeOutInMilliseconds);
897 StopRecording();
898}
899
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000900// Start playout and recording (full-duplex audio) and verify that audio is
901// active in both directions.
henrikab2619892015-05-18 16:49:16 +0200902TEST_F(AudioDeviceTest, StartPlayoutAndRecordingVerifyCallbacks) {
aleloi5de52fd2016-11-10 01:05:34 -0800903 MockAudioTransportAndroid mock(kPlayout | kRecording);
Yves Gerey665174f2018-06-19 15:03:05 +0200904 mock.HandleCallbacks(test_is_done_.get(), nullptr, kNumCallbacks);
henrikab2619892015-05-18 16:49:16 +0200905 EXPECT_CALL(mock, NeedMorePlayData(playout_frames_per_10ms_buffer(),
Yves Gerey665174f2018-06-19 15:03:05 +0200906 kBytesPerSample, playout_channels(),
907 playout_sample_rate(), NotNull(), _, _, _))
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000908 .Times(AtLeast(kNumCallbacks));
henrika883d00f2018-03-16 10:09:49 +0100909 EXPECT_CALL(
910 mock, RecordedDataIsAvailable(NotNull(), record_frames_per_10ms_buffer(),
911 kBytesPerSample, record_channels(),
912 record_sample_rate(), _, 0, 0, false, _))
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000913 .Times(AtLeast(kNumCallbacks));
914 EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
915 StartPlayout();
916 StartRecording();
917 test_is_done_->Wait(kTestTimeOutInMilliseconds);
918 StopRecording();
919 StopPlayout();
920}
921
922// Start playout and read audio from an external PCM file when the audio layer
923// asks for data to play out. Real audio is played out in this test but it does
924// not contain any explicit verification that the audio quality is perfect.
henrikab2619892015-05-18 16:49:16 +0200925TEST_F(AudioDeviceTest, RunPlayoutWithFileAsSource) {
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000926 // TODO(henrika): extend test when mono output is supported.
Peter Kasting69558702016-01-12 16:26:35 -0800927 EXPECT_EQ(1u, playout_channels());
aleloi5de52fd2016-11-10 01:05:34 -0800928 NiceMock<MockAudioTransportAndroid> mock(kPlayout);
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000929 const int num_callbacks = kFilePlayTimeInSec * kNumCallbacksPerSecond;
henrika@webrtc.org74d47922015-03-10 11:59:03 +0000930 std::string file_name = GetFileName(playout_sample_rate());
kwibergf01633e2016-02-24 05:00:36 -0800931 std::unique_ptr<FileAudioStream> file_audio_stream(
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000932 new FileAudioStream(num_callbacks, file_name, playout_sample_rate()));
Yves Gerey665174f2018-06-19 15:03:05 +0200933 mock.HandleCallbacks(test_is_done_.get(), file_audio_stream.get(),
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000934 num_callbacks);
henrikab2619892015-05-18 16:49:16 +0200935 // SetMaxPlayoutVolume();
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000936 EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
937 StartPlayout();
938 test_is_done_->Wait(kTestTimeOutInMilliseconds);
939 StopPlayout();
940}
941
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000942// Start playout and recording and store recorded data in an intermediate FIFO
943// buffer from which the playout side then reads its samples in the same order
944// as they were stored. Under ideal circumstances, a callback sequence would
945// look like: ...+-+-+-+-+-+-+-..., where '+' means 'packet recorded' and '-'
946// means 'packet played'. Under such conditions, the FIFO would only contain
947// one packet on average. However, under more realistic conditions, the size
948// of the FIFO will vary more due to an unbalance between the two sides.
949// This test tries to verify that the device maintains a balanced callback-
950// sequence by running in loopback for ten seconds while measuring the size
951// (max and average) of the FIFO. The size of the FIFO is increased by the
952// recording side and decreased by the playout side.
953// TODO(henrika): tune the final test parameters after running tests on several
954// different devices.
henrika3def74b2017-10-06 11:23:30 +0200955// Disabling this test on bots since it is difficult to come up with a robust
956// test condition that all worked as intended. The main issue is that, when
957// swarming is used, an initial latency can be built up when the both sides
958// starts at different times. Hence, the test can fail even if audio works
959// as intended. Keeping the test so it can be enabled manually.
960// http://bugs.webrtc.org/7744
961TEST_F(AudioDeviceTest, DISABLED_RunPlayoutAndRecordingInFullDuplex) {
henrikab2619892015-05-18 16:49:16 +0200962 EXPECT_EQ(record_channels(), playout_channels());
963 EXPECT_EQ(record_sample_rate(), playout_sample_rate());
aleloi5de52fd2016-11-10 01:05:34 -0800964 NiceMock<MockAudioTransportAndroid> mock(kPlayout | kRecording);
kwibergf01633e2016-02-24 05:00:36 -0800965 std::unique_ptr<FifoAudioStream> fifo_audio_stream(
henrikab2619892015-05-18 16:49:16 +0200966 new FifoAudioStream(playout_frames_per_10ms_buffer()));
Yves Gerey665174f2018-06-19 15:03:05 +0200967 mock.HandleCallbacks(test_is_done_.get(), fifo_audio_stream.get(),
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000968 kFullDuplexTimeInSec * kNumCallbacksPerSecond);
henrika8324b522015-03-27 10:56:23 +0100969 SetMaxPlayoutVolume();
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000970 EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
971 StartRecording();
972 StartPlayout();
Yves Gerey665174f2018-06-19 15:03:05 +0200973 test_is_done_->Wait(
974 std::max(kTestTimeOutInMilliseconds, 1000 * kFullDuplexTimeInSec));
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000975 StopPlayout();
976 StopRecording();
ehmaldonadoebb0b8e2016-10-04 01:58:57 -0700977
978 // These thresholds are set rather high to accomodate differences in hardware
979 // in several devices, so this test can be used in swarming.
980 // See http://bugs.webrtc.org/6464
ehmaldonado37a21112016-11-24 03:13:16 -0800981 EXPECT_LE(fifo_audio_stream->average_size(), 60u);
982 EXPECT_LE(fifo_audio_stream->largest_size(), 70u);
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000983}
984
985// Measures loopback latency and reports the min, max and average values for
986// a full duplex audio session.
987// The latency is measured like so:
988// - Insert impulses periodically on the output side.
989// - Detect the impulses on the input side.
990// - Measure the time difference between the transmit time and receive time.
991// - Store time differences in a vector and calculate min, max and average.
992// This test requires a special hardware called Audio Loopback Dongle.
993// See http://source.android.com/devices/audio/loopback.html for details.
henrikab2619892015-05-18 16:49:16 +0200994TEST_F(AudioDeviceTest, DISABLED_MeasureLoopbackLatency) {
995 EXPECT_EQ(record_channels(), playout_channels());
996 EXPECT_EQ(record_sample_rate(), playout_sample_rate());
aleloi5de52fd2016-11-10 01:05:34 -0800997 NiceMock<MockAudioTransportAndroid> mock(kPlayout | kRecording);
kwibergf01633e2016-02-24 05:00:36 -0800998 std::unique_ptr<LatencyMeasuringAudioStream> latency_audio_stream(
henrikab2619892015-05-18 16:49:16 +0200999 new LatencyMeasuringAudioStream(playout_frames_per_10ms_buffer()));
Yves Gerey665174f2018-06-19 15:03:05 +02001000 mock.HandleCallbacks(test_is_done_.get(), latency_audio_stream.get(),
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +00001001 kMeasureLatencyTimeInSec * kNumCallbacksPerSecond);
1002 EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
henrika8324b522015-03-27 10:56:23 +01001003 SetMaxPlayoutVolume();
henrikab2619892015-05-18 16:49:16 +02001004 DisableBuiltInAECIfAvailable();
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +00001005 StartRecording();
1006 StartPlayout();
Yves Gerey665174f2018-06-19 15:03:05 +02001007 test_is_done_->Wait(
1008 std::max(kTestTimeOutInMilliseconds, 1000 * kMeasureLatencyTimeInSec));
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +00001009 StopPlayout();
1010 StopRecording();
1011 // Verify that the correct number of transmitted impulses are detected.
1012 EXPECT_EQ(latency_audio_stream->num_latency_values(),
Peter Kastingdce40cf2015-08-24 14:52:23 -07001013 static_cast<size_t>(
1014 kImpulseFrequencyInHz * kMeasureLatencyTimeInSec - 1));
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +00001015 latency_audio_stream->PrintResults();
1016}
1017
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +00001018} // namespace webrtc