blob: e9daf1c8e01b2e0e688faa9d491cd75698bde96e [file] [log] [blame]
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +00001/*
2 * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
henrikaee369e42015-05-25 10:11:27 +020011#include <algorithm>
12#include <limits>
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000013#include <list>
kwibergf01633e2016-02-24 05:00:36 -080014#include <memory>
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000015#include <numeric>
henrikaee369e42015-05-25 10:11:27 +020016#include <string>
17#include <vector>
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000018
Mirko Bonadeid9708072019-01-25 20:26:48 +010019#include "api/scoped_refptr.h"
Danil Chapovalov1c41be62019-04-01 09:16:12 +020020#include "api/task_queue/default_task_queue_factory.h"
21#include "api/task_queue/task_queue_factory.h"
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020022#include "modules/audio_device/android/audio_common.h"
23#include "modules/audio_device/android/audio_manager.h"
24#include "modules/audio_device/android/build_info.h"
25#include "modules/audio_device/android/ensure_initialized.h"
26#include "modules/audio_device/audio_device_impl.h"
27#include "modules/audio_device/include/audio_device.h"
28#include "modules/audio_device/include/mock_audio_transport.h"
29#include "rtc_base/arraysize.h"
Steve Anton10542f22019-01-11 09:11:00 -080030#include "rtc_base/critical_section.h"
Niels Möller140b1d92018-11-08 14:52:19 +010031#include "rtc_base/event.h"
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020032#include "rtc_base/format_macros.h"
Steve Anton10542f22019-01-11 09:11:00 -080033#include "rtc_base/time_utils.h"
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020034#include "test/gmock.h"
35#include "test/gtest.h"
Steve Anton10542f22019-01-11 09:11:00 -080036#include "test/testsupport/file_utils.h"
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +000037
38using std::cout;
39using std::endl;
40using ::testing::_;
41using ::testing::AtLeast;
42using ::testing::Gt;
43using ::testing::Invoke;
44using ::testing::NiceMock;
45using ::testing::NotNull;
46using ::testing::Return;
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +000047
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000048// #define ENABLE_DEBUG_PRINTF
49#ifdef ENABLE_DEBUG_PRINTF
50#define PRINTD(...) fprintf(stderr, __VA_ARGS__);
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +000051#else
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000052#define PRINTD(...) ((void)0)
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +000053#endif
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000054#define PRINT(...) fprintf(stderr, __VA_ARGS__);
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +000055
56namespace webrtc {
57
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +000058// Number of callbacks (input or output) the tests waits for before we set
59// an event indicating that the test was OK.
Peter Kastingdce40cf2015-08-24 14:52:23 -070060static const size_t kNumCallbacks = 10;
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +000061// Max amount of time we wait for an event to be set while counting callbacks.
62static const int kTestTimeOutInMilliseconds = 10 * 1000;
63// Average number of audio callbacks per second assuming 10ms packet size.
Peter Kastingdce40cf2015-08-24 14:52:23 -070064static const size_t kNumCallbacksPerSecond = 100;
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +000065// Play out a test file during this time (unit is in seconds).
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000066static const int kFilePlayTimeInSec = 5;
Peter Kastingdce40cf2015-08-24 14:52:23 -070067static const size_t kBitsPerSample = 16;
68static const size_t kBytesPerSample = kBitsPerSample / 8;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000069// Run the full-duplex test during this time (unit is in seconds).
70// Note that first |kNumIgnoreFirstCallbacks| are ignored.
henrika8324b522015-03-27 10:56:23 +010071static const int kFullDuplexTimeInSec = 5;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000072// Wait for the callback sequence to stabilize by ignoring this amount of the
73// initial callbacks (avoids initial FIFO access).
74// Only used in the RunPlayoutAndRecordingInFullDuplex test.
Peter Kastingdce40cf2015-08-24 14:52:23 -070075static const size_t kNumIgnoreFirstCallbacks = 50;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000076// Sets the number of impulses per second in the latency test.
77static const int kImpulseFrequencyInHz = 1;
78// Length of round-trip latency measurements. Number of transmitted impulses
79// is kImpulseFrequencyInHz * kMeasureLatencyTimeInSec - 1.
80static const int kMeasureLatencyTimeInSec = 11;
81// Utilized in round-trip latency measurements to avoid capturing noise samples.
henrikab2619892015-05-18 16:49:16 +020082static const int kImpulseThreshold = 1000;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000083static const char kTag[] = "[..........] ";
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +000084
85enum TransportType {
86 kPlayout = 0x1,
87 kRecording = 0x2,
88};
89
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000090// Interface for processing the audio stream. Real implementations can e.g.
91// run audio in loopback, read audio from a file or perform latency
92// measurements.
93class AudioStreamInterface {
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +000094 public:
Peter Kastingdce40cf2015-08-24 14:52:23 -070095 virtual void Write(const void* source, size_t num_frames) = 0;
96 virtual void Read(void* destination, size_t num_frames) = 0;
Yves Gerey665174f2018-06-19 15:03:05 +020097
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000098 protected:
99 virtual ~AudioStreamInterface() {}
100};
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000101
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000102// Reads audio samples from a PCM file where the file is stored in memory at
103// construction.
104class FileAudioStream : public AudioStreamInterface {
105 public:
Yves Gerey665174f2018-06-19 15:03:05 +0200106 FileAudioStream(size_t num_callbacks,
107 const std::string& file_name,
108 int sample_rate)
109 : file_size_in_bytes_(0), sample_rate_(sample_rate), file_pos_(0) {
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000110 file_size_in_bytes_ = test::GetFileSize(file_name);
111 sample_rate_ = sample_rate;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000112 EXPECT_GE(file_size_in_callbacks(), num_callbacks)
henrika@webrtc.org74d47922015-03-10 11:59:03 +0000113 << "Size of test file is not large enough to last during the test.";
Peter Kastingdce40cf2015-08-24 14:52:23 -0700114 const size_t num_16bit_samples =
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000115 test::GetFileSize(file_name) / kBytesPerSample;
116 file_.reset(new int16_t[num_16bit_samples]);
117 FILE* audio_file = fopen(file_name.c_str(), "rb");
118 EXPECT_NE(audio_file, nullptr);
Yves Gerey665174f2018-06-19 15:03:05 +0200119 size_t num_samples_read =
120 fread(file_.get(), sizeof(int16_t), num_16bit_samples, audio_file);
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000121 EXPECT_EQ(num_samples_read, num_16bit_samples);
122 fclose(audio_file);
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000123 }
124
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000125 // AudioStreamInterface::Write() is not implemented.
Peter Kastingdce40cf2015-08-24 14:52:23 -0700126 void Write(const void* source, size_t num_frames) override {}
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000127
128 // Read samples from file stored in memory (at construction) and copy
129 // |num_frames| (<=> 10ms) to the |destination| byte buffer.
Peter Kastingdce40cf2015-08-24 14:52:23 -0700130 void Read(void* destination, size_t num_frames) override {
Yves Gerey665174f2018-06-19 15:03:05 +0200131 memcpy(destination, static_cast<int16_t*>(&file_[file_pos_]),
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000132 num_frames * sizeof(int16_t));
133 file_pos_ += num_frames;
134 }
135
136 int file_size_in_seconds() const {
Yves Gerey665174f2018-06-19 15:03:05 +0200137 return static_cast<int>(file_size_in_bytes_ /
138 (kBytesPerSample * sample_rate_));
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000139 }
Peter Kastingdce40cf2015-08-24 14:52:23 -0700140 size_t file_size_in_callbacks() const {
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000141 return file_size_in_seconds() * kNumCallbacksPerSecond;
142 }
143
144 private:
Peter Kastingdce40cf2015-08-24 14:52:23 -0700145 size_t file_size_in_bytes_;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000146 int sample_rate_;
kwibergf01633e2016-02-24 05:00:36 -0800147 std::unique_ptr<int16_t[]> file_;
Peter Kastingdce40cf2015-08-24 14:52:23 -0700148 size_t file_pos_;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000149};
150
151// Simple first in first out (FIFO) class that wraps a list of 16-bit audio
152// buffers of fixed size and allows Write and Read operations. The idea is to
153// store recorded audio buffers (using Write) and then read (using Read) these
154// stored buffers with as short delay as possible when the audio layer needs
155// data to play out. The number of buffers in the FIFO will stabilize under
156// normal conditions since there will be a balance between Write and Read calls.
157// The container is a std::list container and access is protected with a lock
158// since both sides (playout and recording) are driven by its own thread.
159class FifoAudioStream : public AudioStreamInterface {
160 public:
Peter Kastingdce40cf2015-08-24 14:52:23 -0700161 explicit FifoAudioStream(size_t frames_per_buffer)
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000162 : frames_per_buffer_(frames_per_buffer),
163 bytes_per_buffer_(frames_per_buffer_ * sizeof(int16_t)),
164 fifo_(new AudioBufferList),
165 largest_size_(0),
166 total_written_elements_(0),
167 write_count_(0) {
168 EXPECT_NE(fifo_.get(), nullptr);
169 }
170
Yves Gerey665174f2018-06-19 15:03:05 +0200171 ~FifoAudioStream() { Flush(); }
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000172
173 // Allocate new memory, copy |num_frames| samples from |source| into memory
174 // and add pointer to the memory location to end of the list.
175 // Increases the size of the FIFO by one element.
Peter Kastingdce40cf2015-08-24 14:52:23 -0700176 void Write(const void* source, size_t num_frames) override {
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000177 ASSERT_EQ(num_frames, frames_per_buffer_);
178 PRINTD("+");
179 if (write_count_++ < kNumIgnoreFirstCallbacks) {
180 return;
181 }
182 int16_t* memory = new int16_t[frames_per_buffer_];
Yves Gerey665174f2018-06-19 15:03:05 +0200183 memcpy(static_cast<int16_t*>(&memory[0]), source, bytes_per_buffer_);
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000184 rtc::CritScope lock(&lock_);
185 fifo_->push_back(memory);
Peter Kastingdce40cf2015-08-24 14:52:23 -0700186 const size_t size = fifo_->size();
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000187 if (size > largest_size_) {
188 largest_size_ = size;
Peter Kastingdce40cf2015-08-24 14:52:23 -0700189 PRINTD("(%" PRIuS ")", largest_size_);
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000190 }
191 total_written_elements_ += size;
192 }
193
194 // Read pointer to data buffer from front of list, copy |num_frames| of stored
195 // data into |destination| and delete the utilized memory allocation.
196 // Decreases the size of the FIFO by one element.
Peter Kastingdce40cf2015-08-24 14:52:23 -0700197 void Read(void* destination, size_t num_frames) override {
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000198 ASSERT_EQ(num_frames, frames_per_buffer_);
199 PRINTD("-");
200 rtc::CritScope lock(&lock_);
201 if (fifo_->empty()) {
202 memset(destination, 0, bytes_per_buffer_);
203 } else {
204 int16_t* memory = fifo_->front();
205 fifo_->pop_front();
Yves Gerey665174f2018-06-19 15:03:05 +0200206 memcpy(destination, static_cast<int16_t*>(&memory[0]), bytes_per_buffer_);
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000207 delete memory;
208 }
209 }
210
Yves Gerey665174f2018-06-19 15:03:05 +0200211 size_t size() const { return fifo_->size(); }
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000212
Yves Gerey665174f2018-06-19 15:03:05 +0200213 size_t largest_size() const { return largest_size_; }
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000214
Peter Kastingdce40cf2015-08-24 14:52:23 -0700215 size_t average_size() const {
Yves Gerey665174f2018-06-19 15:03:05 +0200216 return (total_written_elements_ == 0)
217 ? 0.0
218 : 0.5 + static_cast<float>(total_written_elements_) /
219 (write_count_ - kNumIgnoreFirstCallbacks);
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000220 }
221
222 private:
223 void Flush() {
224 for (auto it = fifo_->begin(); it != fifo_->end(); ++it) {
225 delete *it;
226 }
227 fifo_->clear();
228 }
229
230 using AudioBufferList = std::list<int16_t*>;
231 rtc::CriticalSection lock_;
Peter Kastingdce40cf2015-08-24 14:52:23 -0700232 const size_t frames_per_buffer_;
233 const size_t bytes_per_buffer_;
kwibergf01633e2016-02-24 05:00:36 -0800234 std::unique_ptr<AudioBufferList> fifo_;
Peter Kastingdce40cf2015-08-24 14:52:23 -0700235 size_t largest_size_;
236 size_t total_written_elements_;
237 size_t write_count_;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000238};
239
240// Inserts periodic impulses and measures the latency between the time of
241// transmission and time of receiving the same impulse.
242// Usage requires a special hardware called Audio Loopback Dongle.
243// See http://source.android.com/devices/audio/loopback.html for details.
244class LatencyMeasuringAudioStream : public AudioStreamInterface {
245 public:
Peter Kastingdce40cf2015-08-24 14:52:23 -0700246 explicit LatencyMeasuringAudioStream(size_t frames_per_buffer)
henrika92fd8e62016-11-15 05:37:58 -0800247 : frames_per_buffer_(frames_per_buffer),
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000248 bytes_per_buffer_(frames_per_buffer_ * sizeof(int16_t)),
249 play_count_(0),
250 rec_count_(0),
Yves Gerey665174f2018-06-19 15:03:05 +0200251 pulse_time_(0) {}
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000252
253 // Insert periodic impulses in first two samples of |destination|.
Peter Kastingdce40cf2015-08-24 14:52:23 -0700254 void Read(void* destination, size_t num_frames) override {
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000255 ASSERT_EQ(num_frames, frames_per_buffer_);
256 if (play_count_ == 0) {
257 PRINT("[");
258 }
259 play_count_++;
260 memset(destination, 0, bytes_per_buffer_);
261 if (play_count_ % (kNumCallbacksPerSecond / kImpulseFrequencyInHz) == 0) {
262 if (pulse_time_ == 0) {
henrika92fd8e62016-11-15 05:37:58 -0800263 pulse_time_ = rtc::TimeMillis();
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000264 }
265 PRINT(".");
266 const int16_t impulse = std::numeric_limits<int16_t>::max();
Yves Gerey665174f2018-06-19 15:03:05 +0200267 int16_t* ptr16 = static_cast<int16_t*>(destination);
Peter Kastingdce40cf2015-08-24 14:52:23 -0700268 for (size_t i = 0; i < 2; ++i) {
269 ptr16[i] = impulse;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000270 }
271 }
272 }
273
274 // Detect received impulses in |source|, derive time between transmission and
275 // detection and add the calculated delay to list of latencies.
Peter Kastingdce40cf2015-08-24 14:52:23 -0700276 void Write(const void* source, size_t num_frames) override {
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000277 ASSERT_EQ(num_frames, frames_per_buffer_);
278 rec_count_++;
279 if (pulse_time_ == 0) {
280 // Avoid detection of new impulse response until a new impulse has
281 // been transmitted (sets |pulse_time_| to value larger than zero).
282 return;
283 }
Yves Gerey665174f2018-06-19 15:03:05 +0200284 const int16_t* ptr16 = static_cast<const int16_t*>(source);
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000285 std::vector<int16_t> vec(ptr16, ptr16 + num_frames);
286 // Find max value in the audio buffer.
287 int max = *std::max_element(vec.begin(), vec.end());
288 // Find index (element position in vector) of the max element.
Yves Gerey665174f2018-06-19 15:03:05 +0200289 int index_of_max =
290 std::distance(vec.begin(), std::find(vec.begin(), vec.end(), max));
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000291 if (max > kImpulseThreshold) {
292 PRINTD("(%d,%d)", max, index_of_max);
henrika92fd8e62016-11-15 05:37:58 -0800293 int64_t now_time = rtc::TimeMillis();
Yves Gerey665174f2018-06-19 15:03:05 +0200294 int extra_delay = IndexToMilliseconds(static_cast<double>(index_of_max));
295 PRINTD("[%d]", static_cast<int>(now_time - pulse_time_));
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000296 PRINTD("[%d]", extra_delay);
297 // Total latency is the difference between transmit time and detection
298 // tome plus the extra delay within the buffer in which we detected the
299 // received impulse. It is transmitted at sample 0 but can be received
300 // at sample N where N > 0. The term |extra_delay| accounts for N and it
301 // is a value between 0 and 10ms.
302 latencies_.push_back(now_time - pulse_time_ + extra_delay);
303 pulse_time_ = 0;
304 } else {
305 PRINTD("-");
306 }
307 }
308
Yves Gerey665174f2018-06-19 15:03:05 +0200309 size_t num_latency_values() const { return latencies_.size(); }
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000310
311 int min_latency() const {
312 if (latencies_.empty())
313 return 0;
314 return *std::min_element(latencies_.begin(), latencies_.end());
315 }
316
317 int max_latency() const {
318 if (latencies_.empty())
319 return 0;
320 return *std::max_element(latencies_.begin(), latencies_.end());
321 }
322
323 int average_latency() const {
324 if (latencies_.empty())
325 return 0;
Yves Gerey665174f2018-06-19 15:03:05 +0200326 return 0.5 + static_cast<double>(
327 std::accumulate(latencies_.begin(), latencies_.end(), 0)) /
328 latencies_.size();
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000329 }
330
331 void PrintResults() const {
332 PRINT("] ");
333 for (auto it = latencies_.begin(); it != latencies_.end(); ++it) {
334 PRINT("%d ", *it);
335 }
336 PRINT("\n");
Yves Gerey665174f2018-06-19 15:03:05 +0200337 PRINT("%s[min, max, avg]=[%d, %d, %d] ms\n", kTag, min_latency(),
338 max_latency(), average_latency());
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000339 }
340
341 int IndexToMilliseconds(double index) const {
pkastingb297c5a2015-07-22 15:17:22 -0700342 return static_cast<int>(10.0 * (index / frames_per_buffer_) + 0.5);
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000343 }
344
345 private:
Peter Kastingdce40cf2015-08-24 14:52:23 -0700346 const size_t frames_per_buffer_;
347 const size_t bytes_per_buffer_;
348 size_t play_count_;
349 size_t rec_count_;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000350 int64_t pulse_time_;
351 std::vector<int> latencies_;
352};
353
354// Mocks the AudioTransport object and proxies actions for the two callbacks
355// (RecordedDataIsAvailable and NeedMorePlayData) to different implementations
356// of AudioStreamInterface.
aleloi5de52fd2016-11-10 01:05:34 -0800357class MockAudioTransportAndroid : public test::MockAudioTransport {
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000358 public:
aleloi5de52fd2016-11-10 01:05:34 -0800359 explicit MockAudioTransportAndroid(int type)
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000360 : num_callbacks_(0),
361 type_(type),
362 play_count_(0),
363 rec_count_(0),
364 audio_stream_(nullptr) {}
365
aleloi5de52fd2016-11-10 01:05:34 -0800366 virtual ~MockAudioTransportAndroid() {}
maxmorin1aee0b52016-08-15 11:46:19 -0700367
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000368 // Set default actions of the mock object. We are delegating to fake
369 // implementations (of AudioStreamInterface) here.
Niels Möller140b1d92018-11-08 14:52:19 +0100370 void HandleCallbacks(rtc::Event* test_is_done,
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000371 AudioStreamInterface* audio_stream,
372 int num_callbacks) {
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000373 test_is_done_ = test_is_done;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000374 audio_stream_ = audio_stream;
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000375 num_callbacks_ = num_callbacks;
376 if (play_mode()) {
377 ON_CALL(*this, NeedMorePlayData(_, _, _, _, _, _, _, _))
378 .WillByDefault(
aleloi5de52fd2016-11-10 01:05:34 -0800379 Invoke(this, &MockAudioTransportAndroid::RealNeedMorePlayData));
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000380 }
381 if (rec_mode()) {
382 ON_CALL(*this, RecordedDataIsAvailable(_, _, _, _, _, _, _, _, _, _))
aleloi5de52fd2016-11-10 01:05:34 -0800383 .WillByDefault(Invoke(
384 this, &MockAudioTransportAndroid::RealRecordedDataIsAvailable));
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000385 }
386 }
387
388 int32_t RealRecordedDataIsAvailable(const void* audioSamples,
Peter Kastingdce40cf2015-08-24 14:52:23 -0700389 const size_t nSamples,
390 const size_t nBytesPerSample,
Peter Kasting69558702016-01-12 16:26:35 -0800391 const size_t nChannels,
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000392 const uint32_t samplesPerSec,
393 const uint32_t totalDelayMS,
394 const int32_t clockDrift,
395 const uint32_t currentMicLevel,
396 const bool keyPressed,
henrika883d00f2018-03-16 10:09:49 +0100397 uint32_t& newMicLevel) { // NOLINT
henrika@webrtc.org74d47922015-03-10 11:59:03 +0000398 EXPECT_TRUE(rec_mode()) << "No test is expecting these callbacks.";
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000399 rec_count_++;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000400 // Process the recorded audio stream if an AudioStreamInterface
401 // implementation exists.
402 if (audio_stream_) {
403 audio_stream_->Write(audioSamples, nSamples);
404 }
405 if (ReceivedEnoughCallbacks()) {
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000406 test_is_done_->Set();
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000407 }
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000408 return 0;
409 }
410
Peter Kastingdce40cf2015-08-24 14:52:23 -0700411 int32_t RealNeedMorePlayData(const size_t nSamples,
412 const size_t nBytesPerSample,
Peter Kasting69558702016-01-12 16:26:35 -0800413 const size_t nChannels,
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000414 const uint32_t samplesPerSec,
415 void* audioSamples,
henrika883d00f2018-03-16 10:09:49 +0100416 size_t& nSamplesOut, // NOLINT
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000417 int64_t* elapsed_time_ms,
418 int64_t* ntp_time_ms) {
henrika@webrtc.org74d47922015-03-10 11:59:03 +0000419 EXPECT_TRUE(play_mode()) << "No test is expecting these callbacks.";
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000420 play_count_++;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000421 nSamplesOut = nSamples;
422 // Read (possibly processed) audio stream samples to be played out if an
423 // AudioStreamInterface implementation exists.
424 if (audio_stream_) {
425 audio_stream_->Read(audioSamples, nSamples);
426 }
427 if (ReceivedEnoughCallbacks()) {
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000428 test_is_done_->Set();
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000429 }
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000430 return 0;
431 }
432
433 bool ReceivedEnoughCallbacks() {
434 bool recording_done = false;
435 if (rec_mode())
436 recording_done = rec_count_ >= num_callbacks_;
437 else
438 recording_done = true;
439
440 bool playout_done = false;
441 if (play_mode())
442 playout_done = play_count_ >= num_callbacks_;
443 else
444 playout_done = true;
445
446 return recording_done && playout_done;
447 }
448
449 bool play_mode() const { return type_ & kPlayout; }
450 bool rec_mode() const { return type_ & kRecording; }
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000451
452 private:
Niels Möller140b1d92018-11-08 14:52:19 +0100453 rtc::Event* test_is_done_;
Peter Kastingdce40cf2015-08-24 14:52:23 -0700454 size_t num_callbacks_;
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000455 int type_;
Peter Kastingdce40cf2015-08-24 14:52:23 -0700456 size_t play_count_;
457 size_t rec_count_;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000458 AudioStreamInterface* audio_stream_;
kwibergf01633e2016-02-24 05:00:36 -0800459 std::unique_ptr<LatencyMeasuringAudioStream> latency_audio_stream_;
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000460};
461
henrikab2619892015-05-18 16:49:16 +0200462// AudioDeviceTest test fixture.
463class AudioDeviceTest : public ::testing::Test {
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000464 protected:
Danil Chapovalov1c41be62019-04-01 09:16:12 +0200465 AudioDeviceTest() : task_queue_factory_(CreateDefaultTaskQueueFactory()) {
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000466 // One-time initialization of JVM and application context. Ensures that we
467 // can do calls between C++ and Java. Initializes both Java and OpenSL ES
468 // implementations.
469 webrtc::audiodevicemodule::EnsureInitialized();
henrikab2619892015-05-18 16:49:16 +0200470 // Creates an audio device using a default audio layer.
471 audio_device_ = CreateAudioDevice(AudioDeviceModule::kPlatformDefaultAudio);
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000472 EXPECT_NE(audio_device_.get(), nullptr);
473 EXPECT_EQ(0, audio_device_->Init());
henrikab2619892015-05-18 16:49:16 +0200474 playout_parameters_ = audio_manager()->GetPlayoutAudioParameters();
475 record_parameters_ = audio_manager()->GetRecordAudioParameters();
henrika523183b2015-05-21 13:43:08 +0200476 build_info_.reset(new BuildInfo());
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000477 }
Yves Gerey665174f2018-06-19 15:03:05 +0200478 virtual ~AudioDeviceTest() { EXPECT_EQ(0, audio_device_->Terminate()); }
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000479
Yves Gerey665174f2018-06-19 15:03:05 +0200480 int playout_sample_rate() const { return playout_parameters_.sample_rate(); }
481 int record_sample_rate() const { return record_parameters_.sample_rate(); }
482 size_t playout_channels() const { return playout_parameters_.channels(); }
483 size_t record_channels() const { return record_parameters_.channels(); }
Peter Kastingdce40cf2015-08-24 14:52:23 -0700484 size_t playout_frames_per_10ms_buffer() const {
henrikab2619892015-05-18 16:49:16 +0200485 return playout_parameters_.frames_per_10ms_buffer();
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000486 }
Peter Kastingdce40cf2015-08-24 14:52:23 -0700487 size_t record_frames_per_10ms_buffer() const {
henrikab2619892015-05-18 16:49:16 +0200488 return record_parameters_.frames_per_10ms_buffer();
489 }
490
491 int total_delay_ms() const {
492 return audio_manager()->GetDelayEstimateInMilliseconds();
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000493 }
494
Peter Boström26b08602015-06-04 15:18:17 +0200495 rtc::scoped_refptr<AudioDeviceModule> audio_device() const {
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000496 return audio_device_;
497 }
498
henrikab2619892015-05-18 16:49:16 +0200499 AudioDeviceModuleImpl* audio_device_impl() const {
500 return static_cast<AudioDeviceModuleImpl*>(audio_device_.get());
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000501 }
502
henrikab2619892015-05-18 16:49:16 +0200503 AudioManager* audio_manager() const {
504 return audio_device_impl()->GetAndroidAudioManagerForTest();
505 }
506
507 AudioManager* GetAudioManager(AudioDeviceModule* adm) const {
Yves Gerey665174f2018-06-19 15:03:05 +0200508 return static_cast<AudioDeviceModuleImpl*>(adm)
509 ->GetAndroidAudioManagerForTest();
henrikab2619892015-05-18 16:49:16 +0200510 }
511
512 AudioDeviceBuffer* audio_device_buffer() const {
513 return audio_device_impl()->GetAudioDeviceBuffer();
514 }
515
Peter Boström26b08602015-06-04 15:18:17 +0200516 rtc::scoped_refptr<AudioDeviceModule> CreateAudioDevice(
henrikab2619892015-05-18 16:49:16 +0200517 AudioDeviceModule::AudioLayer audio_layer) {
Peter Boström26b08602015-06-04 15:18:17 +0200518 rtc::scoped_refptr<AudioDeviceModule> module(
Danil Chapovalov1c41be62019-04-01 09:16:12 +0200519 AudioDeviceModule::Create(audio_layer, task_queue_factory_.get()));
henrikab2619892015-05-18 16:49:16 +0200520 return module;
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000521 }
522
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000523 // Returns file name relative to the resource root given a sample rate.
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000524 std::string GetFileName(int sample_rate) {
525 EXPECT_TRUE(sample_rate == 48000 || sample_rate == 44100);
526 char fname[64];
Yves Gerey665174f2018-06-19 15:03:05 +0200527 snprintf(fname, sizeof(fname), "audio_device/audio_short%d",
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000528 sample_rate / 1000);
529 std::string file_name(webrtc::test::ResourcePath(fname, "pcm"));
530 EXPECT_TRUE(test::FileExists(file_name));
531#ifdef ENABLE_PRINTF
532 PRINT("file name: %s\n", file_name.c_str());
Peter Kastingdce40cf2015-08-24 14:52:23 -0700533 const size_t bytes = test::GetFileSize(file_name);
534 PRINT("file size: %" PRIuS " [bytes]\n", bytes);
535 PRINT("file size: %" PRIuS " [samples]\n", bytes / kBytesPerSample);
536 const int seconds =
537 static_cast<int>(bytes / (sample_rate * kBytesPerSample));
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000538 PRINT("file size: %d [secs]\n", seconds);
Peter Kastingdce40cf2015-08-24 14:52:23 -0700539 PRINT("file size: %" PRIuS " [callbacks]\n",
540 seconds * kNumCallbacksPerSecond);
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000541#endif
542 return file_name;
543 }
544
henrikab2619892015-05-18 16:49:16 +0200545 AudioDeviceModule::AudioLayer GetActiveAudioLayer() const {
546 AudioDeviceModule::AudioLayer audio_layer;
547 EXPECT_EQ(0, audio_device()->ActiveAudioLayer(&audio_layer));
548 return audio_layer;
549 }
550
551 int TestDelayOnAudioLayer(
552 const AudioDeviceModule::AudioLayer& layer_to_test) {
Peter Boström26b08602015-06-04 15:18:17 +0200553 rtc::scoped_refptr<AudioDeviceModule> audio_device;
henrikab2619892015-05-18 16:49:16 +0200554 audio_device = CreateAudioDevice(layer_to_test);
555 EXPECT_NE(audio_device.get(), nullptr);
556 AudioManager* audio_manager = GetAudioManager(audio_device.get());
557 EXPECT_NE(audio_manager, nullptr);
558 return audio_manager->GetDelayEstimateInMilliseconds();
559 }
560
561 AudioDeviceModule::AudioLayer TestActiveAudioLayer(
562 const AudioDeviceModule::AudioLayer& layer_to_test) {
Peter Boström26b08602015-06-04 15:18:17 +0200563 rtc::scoped_refptr<AudioDeviceModule> audio_device;
henrikab2619892015-05-18 16:49:16 +0200564 audio_device = CreateAudioDevice(layer_to_test);
565 EXPECT_NE(audio_device.get(), nullptr);
566 AudioDeviceModule::AudioLayer active;
567 EXPECT_EQ(0, audio_device->ActiveAudioLayer(&active));
568 return active;
569 }
570
henrika523183b2015-05-21 13:43:08 +0200571 bool DisableTestForThisDevice(const std::string& model) {
572 return (build_info_->GetDeviceModel() == model);
573 }
574
henrikab2619892015-05-18 16:49:16 +0200575 // Volume control is currently only supported for the Java output audio layer.
576 // For OpenSL ES, the internal stream volume is always on max level and there
577 // is no need for this test to set it to max.
578 bool AudioLayerSupportsVolumeControl() const {
579 return GetActiveAudioLayer() == AudioDeviceModule::kAndroidJavaAudio;
580 }
581
henrika8324b522015-03-27 10:56:23 +0100582 void SetMaxPlayoutVolume() {
henrikab2619892015-05-18 16:49:16 +0200583 if (!AudioLayerSupportsVolumeControl())
584 return;
henrika8324b522015-03-27 10:56:23 +0100585 uint32_t max_volume;
586 EXPECT_EQ(0, audio_device()->MaxSpeakerVolume(&max_volume));
587 EXPECT_EQ(0, audio_device()->SetSpeakerVolume(max_volume));
588 }
589
henrikab2619892015-05-18 16:49:16 +0200590 void DisableBuiltInAECIfAvailable() {
591 if (audio_device()->BuiltInAECIsAvailable()) {
592 EXPECT_EQ(0, audio_device()->EnableBuiltInAEC(false));
593 }
594 }
595
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000596 void StartPlayout() {
597 EXPECT_FALSE(audio_device()->PlayoutIsInitialized());
598 EXPECT_FALSE(audio_device()->Playing());
599 EXPECT_EQ(0, audio_device()->InitPlayout());
600 EXPECT_TRUE(audio_device()->PlayoutIsInitialized());
601 EXPECT_EQ(0, audio_device()->StartPlayout());
602 EXPECT_TRUE(audio_device()->Playing());
603 }
604
605 void StopPlayout() {
606 EXPECT_EQ(0, audio_device()->StopPlayout());
607 EXPECT_FALSE(audio_device()->Playing());
henrikab2619892015-05-18 16:49:16 +0200608 EXPECT_FALSE(audio_device()->PlayoutIsInitialized());
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000609 }
610
611 void StartRecording() {
612 EXPECT_FALSE(audio_device()->RecordingIsInitialized());
613 EXPECT_FALSE(audio_device()->Recording());
614 EXPECT_EQ(0, audio_device()->InitRecording());
615 EXPECT_TRUE(audio_device()->RecordingIsInitialized());
616 EXPECT_EQ(0, audio_device()->StartRecording());
617 EXPECT_TRUE(audio_device()->Recording());
618 }
619
620 void StopRecording() {
621 EXPECT_EQ(0, audio_device()->StopRecording());
622 EXPECT_FALSE(audio_device()->Recording());
623 }
624
henrika8324b522015-03-27 10:56:23 +0100625 int GetMaxSpeakerVolume() const {
626 uint32_t max_volume(0);
627 EXPECT_EQ(0, audio_device()->MaxSpeakerVolume(&max_volume));
628 return max_volume;
629 }
630
631 int GetMinSpeakerVolume() const {
632 uint32_t min_volume(0);
633 EXPECT_EQ(0, audio_device()->MinSpeakerVolume(&min_volume));
634 return min_volume;
635 }
636
637 int GetSpeakerVolume() const {
638 uint32_t volume(0);
639 EXPECT_EQ(0, audio_device()->SpeakerVolume(&volume));
640 return volume;
641 }
642
Niels Möller140b1d92018-11-08 14:52:19 +0100643 rtc::Event test_is_done_;
Danil Chapovalov1c41be62019-04-01 09:16:12 +0200644 std::unique_ptr<TaskQueueFactory> task_queue_factory_;
Peter Boström26b08602015-06-04 15:18:17 +0200645 rtc::scoped_refptr<AudioDeviceModule> audio_device_;
henrikab2619892015-05-18 16:49:16 +0200646 AudioParameters playout_parameters_;
647 AudioParameters record_parameters_;
kwibergf01633e2016-02-24 05:00:36 -0800648 std::unique_ptr<BuildInfo> build_info_;
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000649};
650
henrikab2619892015-05-18 16:49:16 +0200651TEST_F(AudioDeviceTest, ConstructDestruct) {
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000652 // Using the test fixture to create and destruct the audio device module.
653}
654
henrikab2619892015-05-18 16:49:16 +0200655// We always ask for a default audio layer when the ADM is constructed. But the
656// ADM will then internally set the best suitable combination of audio layers,
henrika918b5542016-09-19 15:44:09 +0200657// for input and output based on if low-latency output and/or input audio in
658// combination with OpenSL ES is supported or not. This test ensures that the
659// correct selection is done.
henrikab2619892015-05-18 16:49:16 +0200660TEST_F(AudioDeviceTest, VerifyDefaultAudioLayer) {
661 const AudioDeviceModule::AudioLayer audio_layer = GetActiveAudioLayer();
662 bool low_latency_output = audio_manager()->IsLowLatencyPlayoutSupported();
henrika918b5542016-09-19 15:44:09 +0200663 bool low_latency_input = audio_manager()->IsLowLatencyRecordSupported();
henrika883d00f2018-03-16 10:09:49 +0100664 bool aaudio = audio_manager()->IsAAudioSupported();
henrika918b5542016-09-19 15:44:09 +0200665 AudioDeviceModule::AudioLayer expected_audio_layer;
henrika883d00f2018-03-16 10:09:49 +0100666 if (aaudio) {
667 expected_audio_layer = AudioDeviceModule::kAndroidAAudioAudio;
668 } else if (low_latency_output && low_latency_input) {
henrika918b5542016-09-19 15:44:09 +0200669 expected_audio_layer = AudioDeviceModule::kAndroidOpenSLESAudio;
670 } else if (low_latency_output && !low_latency_input) {
671 expected_audio_layer =
672 AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio;
673 } else {
674 expected_audio_layer = AudioDeviceModule::kAndroidJavaAudio;
675 }
henrikab2619892015-05-18 16:49:16 +0200676 EXPECT_EQ(expected_audio_layer, audio_layer);
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000677}
678
henrikab2619892015-05-18 16:49:16 +0200679// Verify that it is possible to explicitly create the two types of supported
680// ADMs. These two tests overrides the default selection of native audio layer
681// by ignoring if the device supports low-latency output or not.
682TEST_F(AudioDeviceTest, CorrectAudioLayerIsUsedForCombinedJavaOpenSLCombo) {
683 AudioDeviceModule::AudioLayer expected_layer =
684 AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio;
Yves Gerey665174f2018-06-19 15:03:05 +0200685 AudioDeviceModule::AudioLayer active_layer =
686 TestActiveAudioLayer(expected_layer);
henrikab2619892015-05-18 16:49:16 +0200687 EXPECT_EQ(expected_layer, active_layer);
688}
689
690TEST_F(AudioDeviceTest, CorrectAudioLayerIsUsedForJavaInBothDirections) {
691 AudioDeviceModule::AudioLayer expected_layer =
692 AudioDeviceModule::kAndroidJavaAudio;
Yves Gerey665174f2018-06-19 15:03:05 +0200693 AudioDeviceModule::AudioLayer active_layer =
694 TestActiveAudioLayer(expected_layer);
henrikab2619892015-05-18 16:49:16 +0200695 EXPECT_EQ(expected_layer, active_layer);
696}
697
henrika918b5542016-09-19 15:44:09 +0200698TEST_F(AudioDeviceTest, CorrectAudioLayerIsUsedForOpenSLInBothDirections) {
699 AudioDeviceModule::AudioLayer expected_layer =
700 AudioDeviceModule::kAndroidOpenSLESAudio;
701 AudioDeviceModule::AudioLayer active_layer =
702 TestActiveAudioLayer(expected_layer);
703 EXPECT_EQ(expected_layer, active_layer);
704}
705
henrika883d00f2018-03-16 10:09:49 +0100706// TODO(bugs.webrtc.org/8914)
Mirko Bonadei185e8022019-03-27 21:11:17 +0100707#if !defined(WEBRTC_AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO)
henrika883d00f2018-03-16 10:09:49 +0100708#define MAYBE_CorrectAudioLayerIsUsedForAAudioInBothDirections \
709 DISABLED_CorrectAudioLayerIsUsedForAAudioInBothDirections
710#else
711#define MAYBE_CorrectAudioLayerIsUsedForAAudioInBothDirections \
712 CorrectAudioLayerIsUsedForAAudioInBothDirections
713#endif
714TEST_F(AudioDeviceTest,
715 MAYBE_CorrectAudioLayerIsUsedForAAudioInBothDirections) {
716 AudioDeviceModule::AudioLayer expected_layer =
717 AudioDeviceModule::kAndroidAAudioAudio;
718 AudioDeviceModule::AudioLayer active_layer =
719 TestActiveAudioLayer(expected_layer);
720 EXPECT_EQ(expected_layer, active_layer);
721}
722
723// TODO(bugs.webrtc.org/8914)
Mirko Bonadei185e8022019-03-27 21:11:17 +0100724#if !defined(WEBRTC_AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO)
henrika883d00f2018-03-16 10:09:49 +0100725#define MAYBE_CorrectAudioLayerIsUsedForCombinedJavaAAudioCombo \
726 DISABLED_CorrectAudioLayerIsUsedForCombinedJavaAAudioCombo
727#else
728#define MAYBE_CorrectAudioLayerIsUsedForCombinedJavaAAudioCombo \
729 CorrectAudioLayerIsUsedForCombinedJavaAAudioCombo
730#endif
731TEST_F(AudioDeviceTest,
732 MAYBE_CorrectAudioLayerIsUsedForCombinedJavaAAudioCombo) {
733 AudioDeviceModule::AudioLayer expected_layer =
734 AudioDeviceModule::kAndroidJavaInputAndAAudioOutputAudio;
735 AudioDeviceModule::AudioLayer active_layer =
736 TestActiveAudioLayer(expected_layer);
737 EXPECT_EQ(expected_layer, active_layer);
738}
739
henrikab2619892015-05-18 16:49:16 +0200740// The Android ADM supports two different delay reporting modes. One for the
741// low-latency output path (in combination with OpenSL ES), and one for the
742// high-latency output path (Java backends in both directions). These two tests
743// verifies that the audio manager reports correct delay estimate given the
744// selected audio layer. Note that, this delay estimate will only be utilized
745// if the HW AEC is disabled.
746TEST_F(AudioDeviceTest, UsesCorrectDelayEstimateForHighLatencyOutputPath) {
747 EXPECT_EQ(kHighLatencyModeDelayEstimateInMilliseconds,
748 TestDelayOnAudioLayer(AudioDeviceModule::kAndroidJavaAudio));
749}
750
751TEST_F(AudioDeviceTest, UsesCorrectDelayEstimateForLowLatencyOutputPath) {
752 EXPECT_EQ(kLowLatencyModeDelayEstimateInMilliseconds,
753 TestDelayOnAudioLayer(
Yves Gerey665174f2018-06-19 15:03:05 +0200754 AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio));
henrikab2619892015-05-18 16:49:16 +0200755}
756
757// Ensure that the ADM internal audio device buffer is configured to use the
758// correct set of parameters.
759TEST_F(AudioDeviceTest, VerifyAudioDeviceBufferParameters) {
760 EXPECT_EQ(playout_parameters_.sample_rate(),
henrikacfbd26d2018-09-05 11:36:22 +0200761 static_cast<int>(audio_device_buffer()->PlayoutSampleRate()));
henrikab2619892015-05-18 16:49:16 +0200762 EXPECT_EQ(record_parameters_.sample_rate(),
henrikacfbd26d2018-09-05 11:36:22 +0200763 static_cast<int>(audio_device_buffer()->RecordingSampleRate()));
henrikab2619892015-05-18 16:49:16 +0200764 EXPECT_EQ(playout_parameters_.channels(),
765 audio_device_buffer()->PlayoutChannels());
766 EXPECT_EQ(record_parameters_.channels(),
767 audio_device_buffer()->RecordingChannels());
768}
769
henrikab2619892015-05-18 16:49:16 +0200770TEST_F(AudioDeviceTest, InitTerminate) {
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000771 // Initialization is part of the test fixture.
772 EXPECT_TRUE(audio_device()->Initialized());
773 EXPECT_EQ(0, audio_device()->Terminate());
774 EXPECT_FALSE(audio_device()->Initialized());
775}
776
henrikab2619892015-05-18 16:49:16 +0200777TEST_F(AudioDeviceTest, Devices) {
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000778 // Device enumeration is not supported. Verify fixed values only.
779 EXPECT_EQ(1, audio_device()->PlayoutDevices());
780 EXPECT_EQ(1, audio_device()->RecordingDevices());
781}
782
henrikab2619892015-05-18 16:49:16 +0200783TEST_F(AudioDeviceTest, SpeakerVolumeShouldBeAvailable) {
784 // The OpenSL ES output audio path does not support volume control.
785 if (!AudioLayerSupportsVolumeControl())
786 return;
henrika8324b522015-03-27 10:56:23 +0100787 bool available;
788 EXPECT_EQ(0, audio_device()->SpeakerVolumeIsAvailable(&available));
789 EXPECT_TRUE(available);
790}
791
henrikab2619892015-05-18 16:49:16 +0200792TEST_F(AudioDeviceTest, MaxSpeakerVolumeIsPositive) {
793 // The OpenSL ES output audio path does not support volume control.
794 if (!AudioLayerSupportsVolumeControl())
795 return;
796 StartPlayout();
henrika8324b522015-03-27 10:56:23 +0100797 EXPECT_GT(GetMaxSpeakerVolume(), 0);
henrikab2619892015-05-18 16:49:16 +0200798 StopPlayout();
henrika8324b522015-03-27 10:56:23 +0100799}
800
henrikab2619892015-05-18 16:49:16 +0200801TEST_F(AudioDeviceTest, MinSpeakerVolumeIsZero) {
802 // The OpenSL ES output audio path does not support volume control.
803 if (!AudioLayerSupportsVolumeControl())
804 return;
henrika8324b522015-03-27 10:56:23 +0100805 EXPECT_EQ(GetMinSpeakerVolume(), 0);
806}
807
henrikab2619892015-05-18 16:49:16 +0200808TEST_F(AudioDeviceTest, DefaultSpeakerVolumeIsWithinMinMax) {
809 // The OpenSL ES output audio path does not support volume control.
810 if (!AudioLayerSupportsVolumeControl())
811 return;
henrika8324b522015-03-27 10:56:23 +0100812 const int default_volume = GetSpeakerVolume();
813 EXPECT_GE(default_volume, GetMinSpeakerVolume());
814 EXPECT_LE(default_volume, GetMaxSpeakerVolume());
815}
816
henrikab2619892015-05-18 16:49:16 +0200817TEST_F(AudioDeviceTest, SetSpeakerVolumeActuallySetsVolume) {
818 // The OpenSL ES output audio path does not support volume control.
819 if (!AudioLayerSupportsVolumeControl())
820 return;
henrika8324b522015-03-27 10:56:23 +0100821 const int default_volume = GetSpeakerVolume();
822 const int max_volume = GetMaxSpeakerVolume();
823 EXPECT_EQ(0, audio_device()->SetSpeakerVolume(max_volume));
824 int new_volume = GetSpeakerVolume();
825 EXPECT_EQ(new_volume, max_volume);
826 EXPECT_EQ(0, audio_device()->SetSpeakerVolume(default_volume));
827}
828
henrikab2619892015-05-18 16:49:16 +0200829// Tests that playout can be initiated, started and stopped. No audio callback
830// is registered in this test.
henrika817208b2016-11-23 06:49:44 -0800831TEST_F(AudioDeviceTest, StartStopPlayout) {
henrikab2619892015-05-18 16:49:16 +0200832 StartPlayout();
833 StopPlayout();
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000834 StartPlayout();
835 StopPlayout();
836}
837
henrika82e20552015-09-25 04:26:14 -0700838// Tests that recording can be initiated, started and stopped. No audio callback
839// is registered in this test.
840TEST_F(AudioDeviceTest, StartStopRecording) {
841 StartRecording();
842 StopRecording();
843 StartRecording();
844 StopRecording();
845}
846
henrikab2619892015-05-18 16:49:16 +0200847// Verify that calling StopPlayout() will leave us in an uninitialized state
848// which will require a new call to InitPlayout(). This test does not call
henrikg91d6ede2015-09-17 00:24:34 -0700849// StartPlayout() while being uninitialized since doing so will hit a
henrika918b5542016-09-19 15:44:09 +0200850// RTC_DCHECK and death tests are not supported on Android.
henrikab2619892015-05-18 16:49:16 +0200851TEST_F(AudioDeviceTest, StopPlayoutRequiresInitToRestart) {
852 EXPECT_EQ(0, audio_device()->InitPlayout());
853 EXPECT_EQ(0, audio_device()->StartPlayout());
854 EXPECT_EQ(0, audio_device()->StopPlayout());
855 EXPECT_FALSE(audio_device()->PlayoutIsInitialized());
856}
857
henrika918b5542016-09-19 15:44:09 +0200858// Verify that calling StopRecording() will leave us in an uninitialized state
859// which will require a new call to InitRecording(). This test does not call
860// StartRecording() while being uninitialized since doing so will hit a
861// RTC_DCHECK and death tests are not supported on Android.
862TEST_F(AudioDeviceTest, StopRecordingRequiresInitToRestart) {
863 EXPECT_EQ(0, audio_device()->InitRecording());
864 EXPECT_EQ(0, audio_device()->StartRecording());
865 EXPECT_EQ(0, audio_device()->StopRecording());
866 EXPECT_FALSE(audio_device()->RecordingIsInitialized());
867}
868
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000869// Start playout and verify that the native audio layer starts asking for real
870// audio samples to play out using the NeedMorePlayData callback.
henrikab2619892015-05-18 16:49:16 +0200871TEST_F(AudioDeviceTest, StartPlayoutVerifyCallbacks) {
aleloi5de52fd2016-11-10 01:05:34 -0800872 MockAudioTransportAndroid mock(kPlayout);
Niels Möller140b1d92018-11-08 14:52:19 +0100873 mock.HandleCallbacks(&test_is_done_, nullptr, kNumCallbacks);
henrikab2619892015-05-18 16:49:16 +0200874 EXPECT_CALL(mock, NeedMorePlayData(playout_frames_per_10ms_buffer(),
Yves Gerey665174f2018-06-19 15:03:05 +0200875 kBytesPerSample, playout_channels(),
876 playout_sample_rate(), NotNull(), _, _, _))
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000877 .Times(AtLeast(kNumCallbacks));
878 EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
879 StartPlayout();
Niels Möller140b1d92018-11-08 14:52:19 +0100880 test_is_done_.Wait(kTestTimeOutInMilliseconds);
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000881 StopPlayout();
882}
883
884// Start recording and verify that the native audio layer starts feeding real
885// audio samples via the RecordedDataIsAvailable callback.
henrika883d00f2018-03-16 10:09:49 +0100886// TODO(henrika): investigate if it is possible to perform a sanity check of
887// delay estimates as well (argument #6).
henrikab2619892015-05-18 16:49:16 +0200888TEST_F(AudioDeviceTest, StartRecordingVerifyCallbacks) {
aleloi5de52fd2016-11-10 01:05:34 -0800889 MockAudioTransportAndroid mock(kRecording);
Niels Möller140b1d92018-11-08 14:52:19 +0100890 mock.HandleCallbacks(&test_is_done_, nullptr, kNumCallbacks);
henrika883d00f2018-03-16 10:09:49 +0100891 EXPECT_CALL(
892 mock, RecordedDataIsAvailable(NotNull(), record_frames_per_10ms_buffer(),
893 kBytesPerSample, record_channels(),
894 record_sample_rate(), _, 0, 0, false, _))
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000895 .Times(AtLeast(kNumCallbacks));
896
897 EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
898 StartRecording();
Niels Möller140b1d92018-11-08 14:52:19 +0100899 test_is_done_.Wait(kTestTimeOutInMilliseconds);
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000900 StopRecording();
901}
902
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000903// Start playout and recording (full-duplex audio) and verify that audio is
904// active in both directions.
henrikab2619892015-05-18 16:49:16 +0200905TEST_F(AudioDeviceTest, StartPlayoutAndRecordingVerifyCallbacks) {
aleloi5de52fd2016-11-10 01:05:34 -0800906 MockAudioTransportAndroid mock(kPlayout | kRecording);
Niels Möller140b1d92018-11-08 14:52:19 +0100907 mock.HandleCallbacks(&test_is_done_, nullptr, kNumCallbacks);
henrikab2619892015-05-18 16:49:16 +0200908 EXPECT_CALL(mock, NeedMorePlayData(playout_frames_per_10ms_buffer(),
Yves Gerey665174f2018-06-19 15:03:05 +0200909 kBytesPerSample, playout_channels(),
910 playout_sample_rate(), NotNull(), _, _, _))
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000911 .Times(AtLeast(kNumCallbacks));
henrika883d00f2018-03-16 10:09:49 +0100912 EXPECT_CALL(
913 mock, RecordedDataIsAvailable(NotNull(), record_frames_per_10ms_buffer(),
914 kBytesPerSample, record_channels(),
915 record_sample_rate(), _, 0, 0, false, _))
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000916 .Times(AtLeast(kNumCallbacks));
917 EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
918 StartPlayout();
919 StartRecording();
Niels Möller140b1d92018-11-08 14:52:19 +0100920 test_is_done_.Wait(kTestTimeOutInMilliseconds);
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000921 StopRecording();
922 StopPlayout();
923}
924
925// Start playout and read audio from an external PCM file when the audio layer
926// asks for data to play out. Real audio is played out in this test but it does
927// not contain any explicit verification that the audio quality is perfect.
henrikab2619892015-05-18 16:49:16 +0200928TEST_F(AudioDeviceTest, RunPlayoutWithFileAsSource) {
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000929 // TODO(henrika): extend test when mono output is supported.
Peter Kasting69558702016-01-12 16:26:35 -0800930 EXPECT_EQ(1u, playout_channels());
aleloi5de52fd2016-11-10 01:05:34 -0800931 NiceMock<MockAudioTransportAndroid> mock(kPlayout);
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000932 const int num_callbacks = kFilePlayTimeInSec * kNumCallbacksPerSecond;
henrika@webrtc.org74d47922015-03-10 11:59:03 +0000933 std::string file_name = GetFileName(playout_sample_rate());
kwibergf01633e2016-02-24 05:00:36 -0800934 std::unique_ptr<FileAudioStream> file_audio_stream(
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000935 new FileAudioStream(num_callbacks, file_name, playout_sample_rate()));
Niels Möller140b1d92018-11-08 14:52:19 +0100936 mock.HandleCallbacks(&test_is_done_, file_audio_stream.get(), num_callbacks);
henrikab2619892015-05-18 16:49:16 +0200937 // SetMaxPlayoutVolume();
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000938 EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
939 StartPlayout();
Niels Möller140b1d92018-11-08 14:52:19 +0100940 test_is_done_.Wait(kTestTimeOutInMilliseconds);
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000941 StopPlayout();
942}
943
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000944// Start playout and recording and store recorded data in an intermediate FIFO
945// buffer from which the playout side then reads its samples in the same order
946// as they were stored. Under ideal circumstances, a callback sequence would
947// look like: ...+-+-+-+-+-+-+-..., where '+' means 'packet recorded' and '-'
948// means 'packet played'. Under such conditions, the FIFO would only contain
949// one packet on average. However, under more realistic conditions, the size
950// of the FIFO will vary more due to an unbalance between the two sides.
951// This test tries to verify that the device maintains a balanced callback-
952// sequence by running in loopback for ten seconds while measuring the size
953// (max and average) of the FIFO. The size of the FIFO is increased by the
954// recording side and decreased by the playout side.
955// TODO(henrika): tune the final test parameters after running tests on several
956// different devices.
henrika3def74b2017-10-06 11:23:30 +0200957// Disabling this test on bots since it is difficult to come up with a robust
958// test condition that all worked as intended. The main issue is that, when
959// swarming is used, an initial latency can be built up when the both sides
960// starts at different times. Hence, the test can fail even if audio works
961// as intended. Keeping the test so it can be enabled manually.
962// http://bugs.webrtc.org/7744
963TEST_F(AudioDeviceTest, DISABLED_RunPlayoutAndRecordingInFullDuplex) {
henrikab2619892015-05-18 16:49:16 +0200964 EXPECT_EQ(record_channels(), playout_channels());
965 EXPECT_EQ(record_sample_rate(), playout_sample_rate());
aleloi5de52fd2016-11-10 01:05:34 -0800966 NiceMock<MockAudioTransportAndroid> mock(kPlayout | kRecording);
kwibergf01633e2016-02-24 05:00:36 -0800967 std::unique_ptr<FifoAudioStream> fifo_audio_stream(
henrikab2619892015-05-18 16:49:16 +0200968 new FifoAudioStream(playout_frames_per_10ms_buffer()));
Niels Möller140b1d92018-11-08 14:52:19 +0100969 mock.HandleCallbacks(&test_is_done_, fifo_audio_stream.get(),
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000970 kFullDuplexTimeInSec * kNumCallbacksPerSecond);
henrika8324b522015-03-27 10:56:23 +0100971 SetMaxPlayoutVolume();
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000972 EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
973 StartRecording();
974 StartPlayout();
Niels Möller140b1d92018-11-08 14:52:19 +0100975 test_is_done_.Wait(
Yves Gerey665174f2018-06-19 15:03:05 +0200976 std::max(kTestTimeOutInMilliseconds, 1000 * kFullDuplexTimeInSec));
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000977 StopPlayout();
978 StopRecording();
ehmaldonadoebb0b8e2016-10-04 01:58:57 -0700979
980 // These thresholds are set rather high to accomodate differences in hardware
981 // in several devices, so this test can be used in swarming.
982 // See http://bugs.webrtc.org/6464
ehmaldonado37a21112016-11-24 03:13:16 -0800983 EXPECT_LE(fifo_audio_stream->average_size(), 60u);
984 EXPECT_LE(fifo_audio_stream->largest_size(), 70u);
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000985}
986
987// Measures loopback latency and reports the min, max and average values for
988// a full duplex audio session.
989// The latency is measured like so:
990// - Insert impulses periodically on the output side.
991// - Detect the impulses on the input side.
992// - Measure the time difference between the transmit time and receive time.
993// - Store time differences in a vector and calculate min, max and average.
994// This test requires a special hardware called Audio Loopback Dongle.
995// See http://source.android.com/devices/audio/loopback.html for details.
henrikab2619892015-05-18 16:49:16 +0200996TEST_F(AudioDeviceTest, DISABLED_MeasureLoopbackLatency) {
997 EXPECT_EQ(record_channels(), playout_channels());
998 EXPECT_EQ(record_sample_rate(), playout_sample_rate());
aleloi5de52fd2016-11-10 01:05:34 -0800999 NiceMock<MockAudioTransportAndroid> mock(kPlayout | kRecording);
kwibergf01633e2016-02-24 05:00:36 -08001000 std::unique_ptr<LatencyMeasuringAudioStream> latency_audio_stream(
henrikab2619892015-05-18 16:49:16 +02001001 new LatencyMeasuringAudioStream(playout_frames_per_10ms_buffer()));
Niels Möller140b1d92018-11-08 14:52:19 +01001002 mock.HandleCallbacks(&test_is_done_, latency_audio_stream.get(),
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +00001003 kMeasureLatencyTimeInSec * kNumCallbacksPerSecond);
1004 EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
henrika8324b522015-03-27 10:56:23 +01001005 SetMaxPlayoutVolume();
henrikab2619892015-05-18 16:49:16 +02001006 DisableBuiltInAECIfAvailable();
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +00001007 StartRecording();
1008 StartPlayout();
Niels Möller140b1d92018-11-08 14:52:19 +01001009 test_is_done_.Wait(
Yves Gerey665174f2018-06-19 15:03:05 +02001010 std::max(kTestTimeOutInMilliseconds, 1000 * kMeasureLatencyTimeInSec));
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +00001011 StopPlayout();
1012 StopRecording();
1013 // Verify that the correct number of transmitted impulses are detected.
1014 EXPECT_EQ(latency_audio_stream->num_latency_values(),
Peter Kastingdce40cf2015-08-24 14:52:23 -07001015 static_cast<size_t>(
1016 kImpulseFrequencyInHz * kMeasureLatencyTimeInSec - 1));
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +00001017 latency_audio_stream->PrintResults();
1018}
1019
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +00001020} // namespace webrtc