blob: e2c6800f38d32714399c06ac9f9e60a1a2c984b2 [file] [log] [blame]
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +00001/*
2 * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
Jonas Olssona4d87372019-07-05 19:08:33 +020011#include "modules/audio_device/include/audio_device.h"
12
henrikaee369e42015-05-25 10:11:27 +020013#include <algorithm>
14#include <limits>
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000015#include <list>
kwibergf01633e2016-02-24 05:00:36 -080016#include <memory>
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000017#include <numeric>
henrikaee369e42015-05-25 10:11:27 +020018#include <string>
19#include <vector>
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000020
Mirko Bonadeid9708072019-01-25 20:26:48 +010021#include "api/scoped_refptr.h"
Danil Chapovalov1c41be62019-04-01 09:16:12 +020022#include "api/task_queue/default_task_queue_factory.h"
23#include "api/task_queue/task_queue_factory.h"
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020024#include "modules/audio_device/android/audio_common.h"
25#include "modules/audio_device/android/audio_manager.h"
26#include "modules/audio_device/android/build_info.h"
27#include "modules/audio_device/android/ensure_initialized.h"
28#include "modules/audio_device/audio_device_impl.h"
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020029#include "modules/audio_device/include/mock_audio_transport.h"
30#include "rtc_base/arraysize.h"
Steve Anton10542f22019-01-11 09:11:00 -080031#include "rtc_base/critical_section.h"
Niels Möller140b1d92018-11-08 14:52:19 +010032#include "rtc_base/event.h"
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020033#include "rtc_base/format_macros.h"
Steve Anton10542f22019-01-11 09:11:00 -080034#include "rtc_base/time_utils.h"
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020035#include "test/gmock.h"
36#include "test/gtest.h"
Steve Anton10542f22019-01-11 09:11:00 -080037#include "test/testsupport/file_utils.h"
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +000038
39using std::cout;
40using std::endl;
41using ::testing::_;
42using ::testing::AtLeast;
43using ::testing::Gt;
44using ::testing::Invoke;
45using ::testing::NiceMock;
46using ::testing::NotNull;
47using ::testing::Return;
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +000048
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000049// #define ENABLE_DEBUG_PRINTF
50#ifdef ENABLE_DEBUG_PRINTF
51#define PRINTD(...) fprintf(stderr, __VA_ARGS__);
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +000052#else
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000053#define PRINTD(...) ((void)0)
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +000054#endif
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000055#define PRINT(...) fprintf(stderr, __VA_ARGS__);
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +000056
57namespace webrtc {
58
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +000059// Number of callbacks (input or output) the tests waits for before we set
60// an event indicating that the test was OK.
Peter Kastingdce40cf2015-08-24 14:52:23 -070061static const size_t kNumCallbacks = 10;
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +000062// Max amount of time we wait for an event to be set while counting callbacks.
63static const int kTestTimeOutInMilliseconds = 10 * 1000;
64// Average number of audio callbacks per second assuming 10ms packet size.
Peter Kastingdce40cf2015-08-24 14:52:23 -070065static const size_t kNumCallbacksPerSecond = 100;
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +000066// Play out a test file during this time (unit is in seconds).
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000067static const int kFilePlayTimeInSec = 5;
Peter Kastingdce40cf2015-08-24 14:52:23 -070068static const size_t kBitsPerSample = 16;
69static const size_t kBytesPerSample = kBitsPerSample / 8;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000070// Run the full-duplex test during this time (unit is in seconds).
71// Note that first |kNumIgnoreFirstCallbacks| are ignored.
henrika8324b522015-03-27 10:56:23 +010072static const int kFullDuplexTimeInSec = 5;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000073// Wait for the callback sequence to stabilize by ignoring this amount of the
74// initial callbacks (avoids initial FIFO access).
75// Only used in the RunPlayoutAndRecordingInFullDuplex test.
Peter Kastingdce40cf2015-08-24 14:52:23 -070076static const size_t kNumIgnoreFirstCallbacks = 50;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000077// Sets the number of impulses per second in the latency test.
78static const int kImpulseFrequencyInHz = 1;
79// Length of round-trip latency measurements. Number of transmitted impulses
80// is kImpulseFrequencyInHz * kMeasureLatencyTimeInSec - 1.
81static const int kMeasureLatencyTimeInSec = 11;
82// Utilized in round-trip latency measurements to avoid capturing noise samples.
henrikab2619892015-05-18 16:49:16 +020083static const int kImpulseThreshold = 1000;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000084static const char kTag[] = "[..........] ";
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +000085
86enum TransportType {
87 kPlayout = 0x1,
88 kRecording = 0x2,
89};
90
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000091// Interface for processing the audio stream. Real implementations can e.g.
92// run audio in loopback, read audio from a file or perform latency
93// measurements.
94class AudioStreamInterface {
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +000095 public:
Peter Kastingdce40cf2015-08-24 14:52:23 -070096 virtual void Write(const void* source, size_t num_frames) = 0;
97 virtual void Read(void* destination, size_t num_frames) = 0;
Yves Gerey665174f2018-06-19 15:03:05 +020098
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +000099 protected:
100 virtual ~AudioStreamInterface() {}
101};
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000102
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000103// Reads audio samples from a PCM file where the file is stored in memory at
104// construction.
105class FileAudioStream : public AudioStreamInterface {
106 public:
Yves Gerey665174f2018-06-19 15:03:05 +0200107 FileAudioStream(size_t num_callbacks,
108 const std::string& file_name,
109 int sample_rate)
110 : file_size_in_bytes_(0), sample_rate_(sample_rate), file_pos_(0) {
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000111 file_size_in_bytes_ = test::GetFileSize(file_name);
112 sample_rate_ = sample_rate;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000113 EXPECT_GE(file_size_in_callbacks(), num_callbacks)
henrika@webrtc.org74d47922015-03-10 11:59:03 +0000114 << "Size of test file is not large enough to last during the test.";
Peter Kastingdce40cf2015-08-24 14:52:23 -0700115 const size_t num_16bit_samples =
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000116 test::GetFileSize(file_name) / kBytesPerSample;
117 file_.reset(new int16_t[num_16bit_samples]);
118 FILE* audio_file = fopen(file_name.c_str(), "rb");
119 EXPECT_NE(audio_file, nullptr);
Yves Gerey665174f2018-06-19 15:03:05 +0200120 size_t num_samples_read =
121 fread(file_.get(), sizeof(int16_t), num_16bit_samples, audio_file);
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000122 EXPECT_EQ(num_samples_read, num_16bit_samples);
123 fclose(audio_file);
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000124 }
125
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000126 // AudioStreamInterface::Write() is not implemented.
Peter Kastingdce40cf2015-08-24 14:52:23 -0700127 void Write(const void* source, size_t num_frames) override {}
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000128
129 // Read samples from file stored in memory (at construction) and copy
130 // |num_frames| (<=> 10ms) to the |destination| byte buffer.
Peter Kastingdce40cf2015-08-24 14:52:23 -0700131 void Read(void* destination, size_t num_frames) override {
Yves Gerey665174f2018-06-19 15:03:05 +0200132 memcpy(destination, static_cast<int16_t*>(&file_[file_pos_]),
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000133 num_frames * sizeof(int16_t));
134 file_pos_ += num_frames;
135 }
136
137 int file_size_in_seconds() const {
Yves Gerey665174f2018-06-19 15:03:05 +0200138 return static_cast<int>(file_size_in_bytes_ /
139 (kBytesPerSample * sample_rate_));
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000140 }
Peter Kastingdce40cf2015-08-24 14:52:23 -0700141 size_t file_size_in_callbacks() const {
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000142 return file_size_in_seconds() * kNumCallbacksPerSecond;
143 }
144
145 private:
Peter Kastingdce40cf2015-08-24 14:52:23 -0700146 size_t file_size_in_bytes_;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000147 int sample_rate_;
kwibergf01633e2016-02-24 05:00:36 -0800148 std::unique_ptr<int16_t[]> file_;
Peter Kastingdce40cf2015-08-24 14:52:23 -0700149 size_t file_pos_;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000150};
151
152// Simple first in first out (FIFO) class that wraps a list of 16-bit audio
153// buffers of fixed size and allows Write and Read operations. The idea is to
154// store recorded audio buffers (using Write) and then read (using Read) these
155// stored buffers with as short delay as possible when the audio layer needs
156// data to play out. The number of buffers in the FIFO will stabilize under
157// normal conditions since there will be a balance between Write and Read calls.
158// The container is a std::list container and access is protected with a lock
159// since both sides (playout and recording) are driven by its own thread.
160class FifoAudioStream : public AudioStreamInterface {
161 public:
Peter Kastingdce40cf2015-08-24 14:52:23 -0700162 explicit FifoAudioStream(size_t frames_per_buffer)
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000163 : frames_per_buffer_(frames_per_buffer),
164 bytes_per_buffer_(frames_per_buffer_ * sizeof(int16_t)),
165 fifo_(new AudioBufferList),
166 largest_size_(0),
167 total_written_elements_(0),
168 write_count_(0) {
169 EXPECT_NE(fifo_.get(), nullptr);
170 }
171
Yves Gerey665174f2018-06-19 15:03:05 +0200172 ~FifoAudioStream() { Flush(); }
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000173
174 // Allocate new memory, copy |num_frames| samples from |source| into memory
175 // and add pointer to the memory location to end of the list.
176 // Increases the size of the FIFO by one element.
Peter Kastingdce40cf2015-08-24 14:52:23 -0700177 void Write(const void* source, size_t num_frames) override {
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000178 ASSERT_EQ(num_frames, frames_per_buffer_);
179 PRINTD("+");
180 if (write_count_++ < kNumIgnoreFirstCallbacks) {
181 return;
182 }
183 int16_t* memory = new int16_t[frames_per_buffer_];
Yves Gerey665174f2018-06-19 15:03:05 +0200184 memcpy(static_cast<int16_t*>(&memory[0]), source, bytes_per_buffer_);
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000185 rtc::CritScope lock(&lock_);
186 fifo_->push_back(memory);
Peter Kastingdce40cf2015-08-24 14:52:23 -0700187 const size_t size = fifo_->size();
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000188 if (size > largest_size_) {
189 largest_size_ = size;
Oleh Prypinb1686782019-08-02 09:36:47 +0200190 PRINTD("(%" RTC_PRIuS ")", largest_size_);
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000191 }
192 total_written_elements_ += size;
193 }
194
195 // Read pointer to data buffer from front of list, copy |num_frames| of stored
196 // data into |destination| and delete the utilized memory allocation.
197 // Decreases the size of the FIFO by one element.
Peter Kastingdce40cf2015-08-24 14:52:23 -0700198 void Read(void* destination, size_t num_frames) override {
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000199 ASSERT_EQ(num_frames, frames_per_buffer_);
200 PRINTD("-");
201 rtc::CritScope lock(&lock_);
202 if (fifo_->empty()) {
203 memset(destination, 0, bytes_per_buffer_);
204 } else {
205 int16_t* memory = fifo_->front();
206 fifo_->pop_front();
Yves Gerey665174f2018-06-19 15:03:05 +0200207 memcpy(destination, static_cast<int16_t*>(&memory[0]), bytes_per_buffer_);
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000208 delete memory;
209 }
210 }
211
Yves Gerey665174f2018-06-19 15:03:05 +0200212 size_t size() const { return fifo_->size(); }
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000213
Yves Gerey665174f2018-06-19 15:03:05 +0200214 size_t largest_size() const { return largest_size_; }
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000215
Peter Kastingdce40cf2015-08-24 14:52:23 -0700216 size_t average_size() const {
Yves Gerey665174f2018-06-19 15:03:05 +0200217 return (total_written_elements_ == 0)
218 ? 0.0
219 : 0.5 + static_cast<float>(total_written_elements_) /
220 (write_count_ - kNumIgnoreFirstCallbacks);
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000221 }
222
223 private:
224 void Flush() {
225 for (auto it = fifo_->begin(); it != fifo_->end(); ++it) {
226 delete *it;
227 }
228 fifo_->clear();
229 }
230
231 using AudioBufferList = std::list<int16_t*>;
232 rtc::CriticalSection lock_;
Peter Kastingdce40cf2015-08-24 14:52:23 -0700233 const size_t frames_per_buffer_;
234 const size_t bytes_per_buffer_;
kwibergf01633e2016-02-24 05:00:36 -0800235 std::unique_ptr<AudioBufferList> fifo_;
Peter Kastingdce40cf2015-08-24 14:52:23 -0700236 size_t largest_size_;
237 size_t total_written_elements_;
238 size_t write_count_;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000239};
240
241// Inserts periodic impulses and measures the latency between the time of
242// transmission and time of receiving the same impulse.
243// Usage requires a special hardware called Audio Loopback Dongle.
244// See http://source.android.com/devices/audio/loopback.html for details.
245class LatencyMeasuringAudioStream : public AudioStreamInterface {
246 public:
Peter Kastingdce40cf2015-08-24 14:52:23 -0700247 explicit LatencyMeasuringAudioStream(size_t frames_per_buffer)
henrika92fd8e62016-11-15 05:37:58 -0800248 : frames_per_buffer_(frames_per_buffer),
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000249 bytes_per_buffer_(frames_per_buffer_ * sizeof(int16_t)),
250 play_count_(0),
251 rec_count_(0),
Yves Gerey665174f2018-06-19 15:03:05 +0200252 pulse_time_(0) {}
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000253
254 // Insert periodic impulses in first two samples of |destination|.
Peter Kastingdce40cf2015-08-24 14:52:23 -0700255 void Read(void* destination, size_t num_frames) override {
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000256 ASSERT_EQ(num_frames, frames_per_buffer_);
257 if (play_count_ == 0) {
258 PRINT("[");
259 }
260 play_count_++;
261 memset(destination, 0, bytes_per_buffer_);
262 if (play_count_ % (kNumCallbacksPerSecond / kImpulseFrequencyInHz) == 0) {
263 if (pulse_time_ == 0) {
henrika92fd8e62016-11-15 05:37:58 -0800264 pulse_time_ = rtc::TimeMillis();
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000265 }
266 PRINT(".");
267 const int16_t impulse = std::numeric_limits<int16_t>::max();
Yves Gerey665174f2018-06-19 15:03:05 +0200268 int16_t* ptr16 = static_cast<int16_t*>(destination);
Peter Kastingdce40cf2015-08-24 14:52:23 -0700269 for (size_t i = 0; i < 2; ++i) {
270 ptr16[i] = impulse;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000271 }
272 }
273 }
274
275 // Detect received impulses in |source|, derive time between transmission and
276 // detection and add the calculated delay to list of latencies.
Peter Kastingdce40cf2015-08-24 14:52:23 -0700277 void Write(const void* source, size_t num_frames) override {
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000278 ASSERT_EQ(num_frames, frames_per_buffer_);
279 rec_count_++;
280 if (pulse_time_ == 0) {
281 // Avoid detection of new impulse response until a new impulse has
282 // been transmitted (sets |pulse_time_| to value larger than zero).
283 return;
284 }
Yves Gerey665174f2018-06-19 15:03:05 +0200285 const int16_t* ptr16 = static_cast<const int16_t*>(source);
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000286 std::vector<int16_t> vec(ptr16, ptr16 + num_frames);
287 // Find max value in the audio buffer.
288 int max = *std::max_element(vec.begin(), vec.end());
289 // Find index (element position in vector) of the max element.
Yves Gerey665174f2018-06-19 15:03:05 +0200290 int index_of_max =
291 std::distance(vec.begin(), std::find(vec.begin(), vec.end(), max));
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000292 if (max > kImpulseThreshold) {
293 PRINTD("(%d,%d)", max, index_of_max);
henrika92fd8e62016-11-15 05:37:58 -0800294 int64_t now_time = rtc::TimeMillis();
Yves Gerey665174f2018-06-19 15:03:05 +0200295 int extra_delay = IndexToMilliseconds(static_cast<double>(index_of_max));
296 PRINTD("[%d]", static_cast<int>(now_time - pulse_time_));
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000297 PRINTD("[%d]", extra_delay);
298 // Total latency is the difference between transmit time and detection
299 // tome plus the extra delay within the buffer in which we detected the
300 // received impulse. It is transmitted at sample 0 but can be received
301 // at sample N where N > 0. The term |extra_delay| accounts for N and it
302 // is a value between 0 and 10ms.
303 latencies_.push_back(now_time - pulse_time_ + extra_delay);
304 pulse_time_ = 0;
305 } else {
306 PRINTD("-");
307 }
308 }
309
Yves Gerey665174f2018-06-19 15:03:05 +0200310 size_t num_latency_values() const { return latencies_.size(); }
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000311
312 int min_latency() const {
313 if (latencies_.empty())
314 return 0;
315 return *std::min_element(latencies_.begin(), latencies_.end());
316 }
317
318 int max_latency() const {
319 if (latencies_.empty())
320 return 0;
321 return *std::max_element(latencies_.begin(), latencies_.end());
322 }
323
324 int average_latency() const {
325 if (latencies_.empty())
326 return 0;
Yves Gerey665174f2018-06-19 15:03:05 +0200327 return 0.5 + static_cast<double>(
328 std::accumulate(latencies_.begin(), latencies_.end(), 0)) /
329 latencies_.size();
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000330 }
331
332 void PrintResults() const {
333 PRINT("] ");
334 for (auto it = latencies_.begin(); it != latencies_.end(); ++it) {
335 PRINT("%d ", *it);
336 }
337 PRINT("\n");
Yves Gerey665174f2018-06-19 15:03:05 +0200338 PRINT("%s[min, max, avg]=[%d, %d, %d] ms\n", kTag, min_latency(),
339 max_latency(), average_latency());
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000340 }
341
342 int IndexToMilliseconds(double index) const {
pkastingb297c5a2015-07-22 15:17:22 -0700343 return static_cast<int>(10.0 * (index / frames_per_buffer_) + 0.5);
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000344 }
345
346 private:
Peter Kastingdce40cf2015-08-24 14:52:23 -0700347 const size_t frames_per_buffer_;
348 const size_t bytes_per_buffer_;
349 size_t play_count_;
350 size_t rec_count_;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000351 int64_t pulse_time_;
352 std::vector<int> latencies_;
353};
354
355// Mocks the AudioTransport object and proxies actions for the two callbacks
356// (RecordedDataIsAvailable and NeedMorePlayData) to different implementations
357// of AudioStreamInterface.
aleloi5de52fd2016-11-10 01:05:34 -0800358class MockAudioTransportAndroid : public test::MockAudioTransport {
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000359 public:
aleloi5de52fd2016-11-10 01:05:34 -0800360 explicit MockAudioTransportAndroid(int type)
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000361 : num_callbacks_(0),
362 type_(type),
363 play_count_(0),
364 rec_count_(0),
365 audio_stream_(nullptr) {}
366
aleloi5de52fd2016-11-10 01:05:34 -0800367 virtual ~MockAudioTransportAndroid() {}
maxmorin1aee0b52016-08-15 11:46:19 -0700368
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000369 // Set default actions of the mock object. We are delegating to fake
370 // implementations (of AudioStreamInterface) here.
Niels Möller140b1d92018-11-08 14:52:19 +0100371 void HandleCallbacks(rtc::Event* test_is_done,
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000372 AudioStreamInterface* audio_stream,
373 int num_callbacks) {
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000374 test_is_done_ = test_is_done;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000375 audio_stream_ = audio_stream;
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000376 num_callbacks_ = num_callbacks;
377 if (play_mode()) {
378 ON_CALL(*this, NeedMorePlayData(_, _, _, _, _, _, _, _))
379 .WillByDefault(
aleloi5de52fd2016-11-10 01:05:34 -0800380 Invoke(this, &MockAudioTransportAndroid::RealNeedMorePlayData));
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000381 }
382 if (rec_mode()) {
383 ON_CALL(*this, RecordedDataIsAvailable(_, _, _, _, _, _, _, _, _, _))
aleloi5de52fd2016-11-10 01:05:34 -0800384 .WillByDefault(Invoke(
385 this, &MockAudioTransportAndroid::RealRecordedDataIsAvailable));
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000386 }
387 }
388
389 int32_t RealRecordedDataIsAvailable(const void* audioSamples,
Peter Kastingdce40cf2015-08-24 14:52:23 -0700390 const size_t nSamples,
391 const size_t nBytesPerSample,
Peter Kasting69558702016-01-12 16:26:35 -0800392 const size_t nChannels,
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000393 const uint32_t samplesPerSec,
394 const uint32_t totalDelayMS,
395 const int32_t clockDrift,
396 const uint32_t currentMicLevel,
397 const bool keyPressed,
henrika883d00f2018-03-16 10:09:49 +0100398 uint32_t& newMicLevel) { // NOLINT
henrika@webrtc.org74d47922015-03-10 11:59:03 +0000399 EXPECT_TRUE(rec_mode()) << "No test is expecting these callbacks.";
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000400 rec_count_++;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000401 // Process the recorded audio stream if an AudioStreamInterface
402 // implementation exists.
403 if (audio_stream_) {
404 audio_stream_->Write(audioSamples, nSamples);
405 }
406 if (ReceivedEnoughCallbacks()) {
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000407 test_is_done_->Set();
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000408 }
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000409 return 0;
410 }
411
Peter Kastingdce40cf2015-08-24 14:52:23 -0700412 int32_t RealNeedMorePlayData(const size_t nSamples,
413 const size_t nBytesPerSample,
Peter Kasting69558702016-01-12 16:26:35 -0800414 const size_t nChannels,
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000415 const uint32_t samplesPerSec,
416 void* audioSamples,
henrika883d00f2018-03-16 10:09:49 +0100417 size_t& nSamplesOut, // NOLINT
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000418 int64_t* elapsed_time_ms,
419 int64_t* ntp_time_ms) {
henrika@webrtc.org74d47922015-03-10 11:59:03 +0000420 EXPECT_TRUE(play_mode()) << "No test is expecting these callbacks.";
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000421 play_count_++;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000422 nSamplesOut = nSamples;
423 // Read (possibly processed) audio stream samples to be played out if an
424 // AudioStreamInterface implementation exists.
425 if (audio_stream_) {
426 audio_stream_->Read(audioSamples, nSamples);
427 }
428 if (ReceivedEnoughCallbacks()) {
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000429 test_is_done_->Set();
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000430 }
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000431 return 0;
432 }
433
434 bool ReceivedEnoughCallbacks() {
435 bool recording_done = false;
436 if (rec_mode())
437 recording_done = rec_count_ >= num_callbacks_;
438 else
439 recording_done = true;
440
441 bool playout_done = false;
442 if (play_mode())
443 playout_done = play_count_ >= num_callbacks_;
444 else
445 playout_done = true;
446
447 return recording_done && playout_done;
448 }
449
450 bool play_mode() const { return type_ & kPlayout; }
451 bool rec_mode() const { return type_ & kRecording; }
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000452
453 private:
Niels Möller140b1d92018-11-08 14:52:19 +0100454 rtc::Event* test_is_done_;
Peter Kastingdce40cf2015-08-24 14:52:23 -0700455 size_t num_callbacks_;
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000456 int type_;
Peter Kastingdce40cf2015-08-24 14:52:23 -0700457 size_t play_count_;
458 size_t rec_count_;
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000459 AudioStreamInterface* audio_stream_;
kwibergf01633e2016-02-24 05:00:36 -0800460 std::unique_ptr<LatencyMeasuringAudioStream> latency_audio_stream_;
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000461};
462
henrikab2619892015-05-18 16:49:16 +0200463// AudioDeviceTest test fixture.
464class AudioDeviceTest : public ::testing::Test {
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000465 protected:
Danil Chapovalov1c41be62019-04-01 09:16:12 +0200466 AudioDeviceTest() : task_queue_factory_(CreateDefaultTaskQueueFactory()) {
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000467 // One-time initialization of JVM and application context. Ensures that we
468 // can do calls between C++ and Java. Initializes both Java and OpenSL ES
469 // implementations.
470 webrtc::audiodevicemodule::EnsureInitialized();
henrikab2619892015-05-18 16:49:16 +0200471 // Creates an audio device using a default audio layer.
472 audio_device_ = CreateAudioDevice(AudioDeviceModule::kPlatformDefaultAudio);
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000473 EXPECT_NE(audio_device_.get(), nullptr);
474 EXPECT_EQ(0, audio_device_->Init());
henrikab2619892015-05-18 16:49:16 +0200475 playout_parameters_ = audio_manager()->GetPlayoutAudioParameters();
476 record_parameters_ = audio_manager()->GetRecordAudioParameters();
henrika523183b2015-05-21 13:43:08 +0200477 build_info_.reset(new BuildInfo());
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000478 }
Yves Gerey665174f2018-06-19 15:03:05 +0200479 virtual ~AudioDeviceTest() { EXPECT_EQ(0, audio_device_->Terminate()); }
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000480
Yves Gerey665174f2018-06-19 15:03:05 +0200481 int playout_sample_rate() const { return playout_parameters_.sample_rate(); }
482 int record_sample_rate() const { return record_parameters_.sample_rate(); }
483 size_t playout_channels() const { return playout_parameters_.channels(); }
484 size_t record_channels() const { return record_parameters_.channels(); }
Peter Kastingdce40cf2015-08-24 14:52:23 -0700485 size_t playout_frames_per_10ms_buffer() const {
henrikab2619892015-05-18 16:49:16 +0200486 return playout_parameters_.frames_per_10ms_buffer();
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000487 }
Peter Kastingdce40cf2015-08-24 14:52:23 -0700488 size_t record_frames_per_10ms_buffer() const {
henrikab2619892015-05-18 16:49:16 +0200489 return record_parameters_.frames_per_10ms_buffer();
490 }
491
492 int total_delay_ms() const {
493 return audio_manager()->GetDelayEstimateInMilliseconds();
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000494 }
495
Peter Boström26b08602015-06-04 15:18:17 +0200496 rtc::scoped_refptr<AudioDeviceModule> audio_device() const {
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000497 return audio_device_;
498 }
499
henrikab2619892015-05-18 16:49:16 +0200500 AudioDeviceModuleImpl* audio_device_impl() const {
501 return static_cast<AudioDeviceModuleImpl*>(audio_device_.get());
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000502 }
503
henrikab2619892015-05-18 16:49:16 +0200504 AudioManager* audio_manager() const {
505 return audio_device_impl()->GetAndroidAudioManagerForTest();
506 }
507
508 AudioManager* GetAudioManager(AudioDeviceModule* adm) const {
Yves Gerey665174f2018-06-19 15:03:05 +0200509 return static_cast<AudioDeviceModuleImpl*>(adm)
510 ->GetAndroidAudioManagerForTest();
henrikab2619892015-05-18 16:49:16 +0200511 }
512
513 AudioDeviceBuffer* audio_device_buffer() const {
514 return audio_device_impl()->GetAudioDeviceBuffer();
515 }
516
Peter Boström26b08602015-06-04 15:18:17 +0200517 rtc::scoped_refptr<AudioDeviceModule> CreateAudioDevice(
henrikab2619892015-05-18 16:49:16 +0200518 AudioDeviceModule::AudioLayer audio_layer) {
Peter Boström26b08602015-06-04 15:18:17 +0200519 rtc::scoped_refptr<AudioDeviceModule> module(
Danil Chapovalov1c41be62019-04-01 09:16:12 +0200520 AudioDeviceModule::Create(audio_layer, task_queue_factory_.get()));
henrikab2619892015-05-18 16:49:16 +0200521 return module;
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000522 }
523
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000524 // Returns file name relative to the resource root given a sample rate.
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000525 std::string GetFileName(int sample_rate) {
526 EXPECT_TRUE(sample_rate == 48000 || sample_rate == 44100);
527 char fname[64];
Yves Gerey665174f2018-06-19 15:03:05 +0200528 snprintf(fname, sizeof(fname), "audio_device/audio_short%d",
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000529 sample_rate / 1000);
530 std::string file_name(webrtc::test::ResourcePath(fname, "pcm"));
531 EXPECT_TRUE(test::FileExists(file_name));
532#ifdef ENABLE_PRINTF
533 PRINT("file name: %s\n", file_name.c_str());
Peter Kastingdce40cf2015-08-24 14:52:23 -0700534 const size_t bytes = test::GetFileSize(file_name);
Oleh Prypinb1686782019-08-02 09:36:47 +0200535 PRINT("file size: %" RTC_PRIuS " [bytes]\n", bytes);
536 PRINT("file size: %" RTC_PRIuS " [samples]\n", bytes / kBytesPerSample);
Peter Kastingdce40cf2015-08-24 14:52:23 -0700537 const int seconds =
538 static_cast<int>(bytes / (sample_rate * kBytesPerSample));
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000539 PRINT("file size: %d [secs]\n", seconds);
Oleh Prypinb1686782019-08-02 09:36:47 +0200540 PRINT("file size: %" RTC_PRIuS " [callbacks]\n",
Peter Kastingdce40cf2015-08-24 14:52:23 -0700541 seconds * kNumCallbacksPerSecond);
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000542#endif
543 return file_name;
544 }
545
henrikab2619892015-05-18 16:49:16 +0200546 AudioDeviceModule::AudioLayer GetActiveAudioLayer() const {
547 AudioDeviceModule::AudioLayer audio_layer;
548 EXPECT_EQ(0, audio_device()->ActiveAudioLayer(&audio_layer));
549 return audio_layer;
550 }
551
552 int TestDelayOnAudioLayer(
553 const AudioDeviceModule::AudioLayer& layer_to_test) {
Peter Boström26b08602015-06-04 15:18:17 +0200554 rtc::scoped_refptr<AudioDeviceModule> audio_device;
henrikab2619892015-05-18 16:49:16 +0200555 audio_device = CreateAudioDevice(layer_to_test);
556 EXPECT_NE(audio_device.get(), nullptr);
557 AudioManager* audio_manager = GetAudioManager(audio_device.get());
558 EXPECT_NE(audio_manager, nullptr);
559 return audio_manager->GetDelayEstimateInMilliseconds();
560 }
561
562 AudioDeviceModule::AudioLayer TestActiveAudioLayer(
563 const AudioDeviceModule::AudioLayer& layer_to_test) {
Peter Boström26b08602015-06-04 15:18:17 +0200564 rtc::scoped_refptr<AudioDeviceModule> audio_device;
henrikab2619892015-05-18 16:49:16 +0200565 audio_device = CreateAudioDevice(layer_to_test);
566 EXPECT_NE(audio_device.get(), nullptr);
567 AudioDeviceModule::AudioLayer active;
568 EXPECT_EQ(0, audio_device->ActiveAudioLayer(&active));
569 return active;
570 }
571
henrika523183b2015-05-21 13:43:08 +0200572 bool DisableTestForThisDevice(const std::string& model) {
573 return (build_info_->GetDeviceModel() == model);
574 }
575
henrikab2619892015-05-18 16:49:16 +0200576 // Volume control is currently only supported for the Java output audio layer.
577 // For OpenSL ES, the internal stream volume is always on max level and there
578 // is no need for this test to set it to max.
579 bool AudioLayerSupportsVolumeControl() const {
580 return GetActiveAudioLayer() == AudioDeviceModule::kAndroidJavaAudio;
581 }
582
henrika8324b522015-03-27 10:56:23 +0100583 void SetMaxPlayoutVolume() {
henrikab2619892015-05-18 16:49:16 +0200584 if (!AudioLayerSupportsVolumeControl())
585 return;
henrika8324b522015-03-27 10:56:23 +0100586 uint32_t max_volume;
587 EXPECT_EQ(0, audio_device()->MaxSpeakerVolume(&max_volume));
588 EXPECT_EQ(0, audio_device()->SetSpeakerVolume(max_volume));
589 }
590
henrikab2619892015-05-18 16:49:16 +0200591 void DisableBuiltInAECIfAvailable() {
592 if (audio_device()->BuiltInAECIsAvailable()) {
593 EXPECT_EQ(0, audio_device()->EnableBuiltInAEC(false));
594 }
595 }
596
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000597 void StartPlayout() {
598 EXPECT_FALSE(audio_device()->PlayoutIsInitialized());
599 EXPECT_FALSE(audio_device()->Playing());
600 EXPECT_EQ(0, audio_device()->InitPlayout());
601 EXPECT_TRUE(audio_device()->PlayoutIsInitialized());
602 EXPECT_EQ(0, audio_device()->StartPlayout());
603 EXPECT_TRUE(audio_device()->Playing());
604 }
605
606 void StopPlayout() {
607 EXPECT_EQ(0, audio_device()->StopPlayout());
608 EXPECT_FALSE(audio_device()->Playing());
henrikab2619892015-05-18 16:49:16 +0200609 EXPECT_FALSE(audio_device()->PlayoutIsInitialized());
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000610 }
611
612 void StartRecording() {
613 EXPECT_FALSE(audio_device()->RecordingIsInitialized());
614 EXPECT_FALSE(audio_device()->Recording());
615 EXPECT_EQ(0, audio_device()->InitRecording());
616 EXPECT_TRUE(audio_device()->RecordingIsInitialized());
617 EXPECT_EQ(0, audio_device()->StartRecording());
618 EXPECT_TRUE(audio_device()->Recording());
619 }
620
621 void StopRecording() {
622 EXPECT_EQ(0, audio_device()->StopRecording());
623 EXPECT_FALSE(audio_device()->Recording());
624 }
625
henrika8324b522015-03-27 10:56:23 +0100626 int GetMaxSpeakerVolume() const {
627 uint32_t max_volume(0);
628 EXPECT_EQ(0, audio_device()->MaxSpeakerVolume(&max_volume));
629 return max_volume;
630 }
631
632 int GetMinSpeakerVolume() const {
633 uint32_t min_volume(0);
634 EXPECT_EQ(0, audio_device()->MinSpeakerVolume(&min_volume));
635 return min_volume;
636 }
637
638 int GetSpeakerVolume() const {
639 uint32_t volume(0);
640 EXPECT_EQ(0, audio_device()->SpeakerVolume(&volume));
641 return volume;
642 }
643
Niels Möller140b1d92018-11-08 14:52:19 +0100644 rtc::Event test_is_done_;
Danil Chapovalov1c41be62019-04-01 09:16:12 +0200645 std::unique_ptr<TaskQueueFactory> task_queue_factory_;
Peter Boström26b08602015-06-04 15:18:17 +0200646 rtc::scoped_refptr<AudioDeviceModule> audio_device_;
henrikab2619892015-05-18 16:49:16 +0200647 AudioParameters playout_parameters_;
648 AudioParameters record_parameters_;
kwibergf01633e2016-02-24 05:00:36 -0800649 std::unique_ptr<BuildInfo> build_info_;
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000650};
651
henrikab2619892015-05-18 16:49:16 +0200652TEST_F(AudioDeviceTest, ConstructDestruct) {
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000653 // Using the test fixture to create and destruct the audio device module.
654}
655
henrikab2619892015-05-18 16:49:16 +0200656// We always ask for a default audio layer when the ADM is constructed. But the
657// ADM will then internally set the best suitable combination of audio layers,
henrika918b5542016-09-19 15:44:09 +0200658// for input and output based on if low-latency output and/or input audio in
659// combination with OpenSL ES is supported or not. This test ensures that the
660// correct selection is done.
henrikab2619892015-05-18 16:49:16 +0200661TEST_F(AudioDeviceTest, VerifyDefaultAudioLayer) {
662 const AudioDeviceModule::AudioLayer audio_layer = GetActiveAudioLayer();
663 bool low_latency_output = audio_manager()->IsLowLatencyPlayoutSupported();
henrika918b5542016-09-19 15:44:09 +0200664 bool low_latency_input = audio_manager()->IsLowLatencyRecordSupported();
henrika883d00f2018-03-16 10:09:49 +0100665 bool aaudio = audio_manager()->IsAAudioSupported();
henrika918b5542016-09-19 15:44:09 +0200666 AudioDeviceModule::AudioLayer expected_audio_layer;
henrika883d00f2018-03-16 10:09:49 +0100667 if (aaudio) {
668 expected_audio_layer = AudioDeviceModule::kAndroidAAudioAudio;
669 } else if (low_latency_output && low_latency_input) {
henrika918b5542016-09-19 15:44:09 +0200670 expected_audio_layer = AudioDeviceModule::kAndroidOpenSLESAudio;
671 } else if (low_latency_output && !low_latency_input) {
672 expected_audio_layer =
673 AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio;
674 } else {
675 expected_audio_layer = AudioDeviceModule::kAndroidJavaAudio;
676 }
henrikab2619892015-05-18 16:49:16 +0200677 EXPECT_EQ(expected_audio_layer, audio_layer);
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000678}
679
henrikab2619892015-05-18 16:49:16 +0200680// Verify that it is possible to explicitly create the two types of supported
681// ADMs. These two tests overrides the default selection of native audio layer
682// by ignoring if the device supports low-latency output or not.
683TEST_F(AudioDeviceTest, CorrectAudioLayerIsUsedForCombinedJavaOpenSLCombo) {
684 AudioDeviceModule::AudioLayer expected_layer =
685 AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio;
Yves Gerey665174f2018-06-19 15:03:05 +0200686 AudioDeviceModule::AudioLayer active_layer =
687 TestActiveAudioLayer(expected_layer);
henrikab2619892015-05-18 16:49:16 +0200688 EXPECT_EQ(expected_layer, active_layer);
689}
690
691TEST_F(AudioDeviceTest, CorrectAudioLayerIsUsedForJavaInBothDirections) {
692 AudioDeviceModule::AudioLayer expected_layer =
693 AudioDeviceModule::kAndroidJavaAudio;
Yves Gerey665174f2018-06-19 15:03:05 +0200694 AudioDeviceModule::AudioLayer active_layer =
695 TestActiveAudioLayer(expected_layer);
henrikab2619892015-05-18 16:49:16 +0200696 EXPECT_EQ(expected_layer, active_layer);
697}
698
henrika918b5542016-09-19 15:44:09 +0200699TEST_F(AudioDeviceTest, CorrectAudioLayerIsUsedForOpenSLInBothDirections) {
700 AudioDeviceModule::AudioLayer expected_layer =
701 AudioDeviceModule::kAndroidOpenSLESAudio;
702 AudioDeviceModule::AudioLayer active_layer =
703 TestActiveAudioLayer(expected_layer);
704 EXPECT_EQ(expected_layer, active_layer);
705}
706
henrika883d00f2018-03-16 10:09:49 +0100707// TODO(bugs.webrtc.org/8914)
Mirko Bonadei185e8022019-03-27 21:11:17 +0100708#if !defined(WEBRTC_AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO)
henrika883d00f2018-03-16 10:09:49 +0100709#define MAYBE_CorrectAudioLayerIsUsedForAAudioInBothDirections \
710 DISABLED_CorrectAudioLayerIsUsedForAAudioInBothDirections
711#else
712#define MAYBE_CorrectAudioLayerIsUsedForAAudioInBothDirections \
713 CorrectAudioLayerIsUsedForAAudioInBothDirections
714#endif
715TEST_F(AudioDeviceTest,
716 MAYBE_CorrectAudioLayerIsUsedForAAudioInBothDirections) {
717 AudioDeviceModule::AudioLayer expected_layer =
718 AudioDeviceModule::kAndroidAAudioAudio;
719 AudioDeviceModule::AudioLayer active_layer =
720 TestActiveAudioLayer(expected_layer);
721 EXPECT_EQ(expected_layer, active_layer);
722}
723
724// TODO(bugs.webrtc.org/8914)
Mirko Bonadei185e8022019-03-27 21:11:17 +0100725#if !defined(WEBRTC_AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO)
henrika883d00f2018-03-16 10:09:49 +0100726#define MAYBE_CorrectAudioLayerIsUsedForCombinedJavaAAudioCombo \
727 DISABLED_CorrectAudioLayerIsUsedForCombinedJavaAAudioCombo
728#else
729#define MAYBE_CorrectAudioLayerIsUsedForCombinedJavaAAudioCombo \
730 CorrectAudioLayerIsUsedForCombinedJavaAAudioCombo
731#endif
732TEST_F(AudioDeviceTest,
733 MAYBE_CorrectAudioLayerIsUsedForCombinedJavaAAudioCombo) {
734 AudioDeviceModule::AudioLayer expected_layer =
735 AudioDeviceModule::kAndroidJavaInputAndAAudioOutputAudio;
736 AudioDeviceModule::AudioLayer active_layer =
737 TestActiveAudioLayer(expected_layer);
738 EXPECT_EQ(expected_layer, active_layer);
739}
740
henrikab2619892015-05-18 16:49:16 +0200741// The Android ADM supports two different delay reporting modes. One for the
742// low-latency output path (in combination with OpenSL ES), and one for the
743// high-latency output path (Java backends in both directions). These two tests
744// verifies that the audio manager reports correct delay estimate given the
745// selected audio layer. Note that, this delay estimate will only be utilized
746// if the HW AEC is disabled.
747TEST_F(AudioDeviceTest, UsesCorrectDelayEstimateForHighLatencyOutputPath) {
748 EXPECT_EQ(kHighLatencyModeDelayEstimateInMilliseconds,
749 TestDelayOnAudioLayer(AudioDeviceModule::kAndroidJavaAudio));
750}
751
752TEST_F(AudioDeviceTest, UsesCorrectDelayEstimateForLowLatencyOutputPath) {
753 EXPECT_EQ(kLowLatencyModeDelayEstimateInMilliseconds,
754 TestDelayOnAudioLayer(
Yves Gerey665174f2018-06-19 15:03:05 +0200755 AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio));
henrikab2619892015-05-18 16:49:16 +0200756}
757
758// Ensure that the ADM internal audio device buffer is configured to use the
759// correct set of parameters.
760TEST_F(AudioDeviceTest, VerifyAudioDeviceBufferParameters) {
761 EXPECT_EQ(playout_parameters_.sample_rate(),
henrikacfbd26d2018-09-05 11:36:22 +0200762 static_cast<int>(audio_device_buffer()->PlayoutSampleRate()));
henrikab2619892015-05-18 16:49:16 +0200763 EXPECT_EQ(record_parameters_.sample_rate(),
henrikacfbd26d2018-09-05 11:36:22 +0200764 static_cast<int>(audio_device_buffer()->RecordingSampleRate()));
henrikab2619892015-05-18 16:49:16 +0200765 EXPECT_EQ(playout_parameters_.channels(),
766 audio_device_buffer()->PlayoutChannels());
767 EXPECT_EQ(record_parameters_.channels(),
768 audio_device_buffer()->RecordingChannels());
769}
770
henrikab2619892015-05-18 16:49:16 +0200771TEST_F(AudioDeviceTest, InitTerminate) {
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000772 // Initialization is part of the test fixture.
773 EXPECT_TRUE(audio_device()->Initialized());
774 EXPECT_EQ(0, audio_device()->Terminate());
775 EXPECT_FALSE(audio_device()->Initialized());
776}
777
henrikab2619892015-05-18 16:49:16 +0200778TEST_F(AudioDeviceTest, Devices) {
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000779 // Device enumeration is not supported. Verify fixed values only.
780 EXPECT_EQ(1, audio_device()->PlayoutDevices());
781 EXPECT_EQ(1, audio_device()->RecordingDevices());
782}
783
henrikab2619892015-05-18 16:49:16 +0200784TEST_F(AudioDeviceTest, SpeakerVolumeShouldBeAvailable) {
785 // The OpenSL ES output audio path does not support volume control.
786 if (!AudioLayerSupportsVolumeControl())
787 return;
henrika8324b522015-03-27 10:56:23 +0100788 bool available;
789 EXPECT_EQ(0, audio_device()->SpeakerVolumeIsAvailable(&available));
790 EXPECT_TRUE(available);
791}
792
henrikab2619892015-05-18 16:49:16 +0200793TEST_F(AudioDeviceTest, MaxSpeakerVolumeIsPositive) {
794 // The OpenSL ES output audio path does not support volume control.
795 if (!AudioLayerSupportsVolumeControl())
796 return;
797 StartPlayout();
henrika8324b522015-03-27 10:56:23 +0100798 EXPECT_GT(GetMaxSpeakerVolume(), 0);
henrikab2619892015-05-18 16:49:16 +0200799 StopPlayout();
henrika8324b522015-03-27 10:56:23 +0100800}
801
henrikab2619892015-05-18 16:49:16 +0200802TEST_F(AudioDeviceTest, MinSpeakerVolumeIsZero) {
803 // The OpenSL ES output audio path does not support volume control.
804 if (!AudioLayerSupportsVolumeControl())
805 return;
henrika8324b522015-03-27 10:56:23 +0100806 EXPECT_EQ(GetMinSpeakerVolume(), 0);
807}
808
henrikab2619892015-05-18 16:49:16 +0200809TEST_F(AudioDeviceTest, DefaultSpeakerVolumeIsWithinMinMax) {
810 // The OpenSL ES output audio path does not support volume control.
811 if (!AudioLayerSupportsVolumeControl())
812 return;
henrika8324b522015-03-27 10:56:23 +0100813 const int default_volume = GetSpeakerVolume();
814 EXPECT_GE(default_volume, GetMinSpeakerVolume());
815 EXPECT_LE(default_volume, GetMaxSpeakerVolume());
816}
817
henrikab2619892015-05-18 16:49:16 +0200818TEST_F(AudioDeviceTest, SetSpeakerVolumeActuallySetsVolume) {
819 // The OpenSL ES output audio path does not support volume control.
820 if (!AudioLayerSupportsVolumeControl())
821 return;
henrika8324b522015-03-27 10:56:23 +0100822 const int default_volume = GetSpeakerVolume();
823 const int max_volume = GetMaxSpeakerVolume();
824 EXPECT_EQ(0, audio_device()->SetSpeakerVolume(max_volume));
825 int new_volume = GetSpeakerVolume();
826 EXPECT_EQ(new_volume, max_volume);
827 EXPECT_EQ(0, audio_device()->SetSpeakerVolume(default_volume));
828}
829
henrikab2619892015-05-18 16:49:16 +0200830// Tests that playout can be initiated, started and stopped. No audio callback
831// is registered in this test.
henrika817208b2016-11-23 06:49:44 -0800832TEST_F(AudioDeviceTest, StartStopPlayout) {
henrikab2619892015-05-18 16:49:16 +0200833 StartPlayout();
834 StopPlayout();
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000835 StartPlayout();
836 StopPlayout();
837}
838
henrika82e20552015-09-25 04:26:14 -0700839// Tests that recording can be initiated, started and stopped. No audio callback
840// is registered in this test.
841TEST_F(AudioDeviceTest, StartStopRecording) {
842 StartRecording();
843 StopRecording();
844 StartRecording();
845 StopRecording();
846}
847
henrikab2619892015-05-18 16:49:16 +0200848// Verify that calling StopPlayout() will leave us in an uninitialized state
849// which will require a new call to InitPlayout(). This test does not call
henrikg91d6ede2015-09-17 00:24:34 -0700850// StartPlayout() while being uninitialized since doing so will hit a
henrika918b5542016-09-19 15:44:09 +0200851// RTC_DCHECK and death tests are not supported on Android.
henrikab2619892015-05-18 16:49:16 +0200852TEST_F(AudioDeviceTest, StopPlayoutRequiresInitToRestart) {
853 EXPECT_EQ(0, audio_device()->InitPlayout());
854 EXPECT_EQ(0, audio_device()->StartPlayout());
855 EXPECT_EQ(0, audio_device()->StopPlayout());
856 EXPECT_FALSE(audio_device()->PlayoutIsInitialized());
857}
858
henrika918b5542016-09-19 15:44:09 +0200859// Verify that calling StopRecording() will leave us in an uninitialized state
860// which will require a new call to InitRecording(). This test does not call
861// StartRecording() while being uninitialized since doing so will hit a
862// RTC_DCHECK and death tests are not supported on Android.
863TEST_F(AudioDeviceTest, StopRecordingRequiresInitToRestart) {
864 EXPECT_EQ(0, audio_device()->InitRecording());
865 EXPECT_EQ(0, audio_device()->StartRecording());
866 EXPECT_EQ(0, audio_device()->StopRecording());
867 EXPECT_FALSE(audio_device()->RecordingIsInitialized());
868}
869
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000870// Start playout and verify that the native audio layer starts asking for real
871// audio samples to play out using the NeedMorePlayData callback.
henrikab2619892015-05-18 16:49:16 +0200872TEST_F(AudioDeviceTest, StartPlayoutVerifyCallbacks) {
aleloi5de52fd2016-11-10 01:05:34 -0800873 MockAudioTransportAndroid mock(kPlayout);
Niels Möller140b1d92018-11-08 14:52:19 +0100874 mock.HandleCallbacks(&test_is_done_, nullptr, kNumCallbacks);
henrikab2619892015-05-18 16:49:16 +0200875 EXPECT_CALL(mock, NeedMorePlayData(playout_frames_per_10ms_buffer(),
Yves Gerey665174f2018-06-19 15:03:05 +0200876 kBytesPerSample, playout_channels(),
877 playout_sample_rate(), NotNull(), _, _, _))
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000878 .Times(AtLeast(kNumCallbacks));
879 EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
880 StartPlayout();
Niels Möller140b1d92018-11-08 14:52:19 +0100881 test_is_done_.Wait(kTestTimeOutInMilliseconds);
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000882 StopPlayout();
883}
884
885// Start recording and verify that the native audio layer starts feeding real
886// audio samples via the RecordedDataIsAvailable callback.
henrika883d00f2018-03-16 10:09:49 +0100887// TODO(henrika): investigate if it is possible to perform a sanity check of
888// delay estimates as well (argument #6).
henrikab2619892015-05-18 16:49:16 +0200889TEST_F(AudioDeviceTest, StartRecordingVerifyCallbacks) {
aleloi5de52fd2016-11-10 01:05:34 -0800890 MockAudioTransportAndroid mock(kRecording);
Niels Möller140b1d92018-11-08 14:52:19 +0100891 mock.HandleCallbacks(&test_is_done_, nullptr, kNumCallbacks);
henrika883d00f2018-03-16 10:09:49 +0100892 EXPECT_CALL(
893 mock, RecordedDataIsAvailable(NotNull(), record_frames_per_10ms_buffer(),
894 kBytesPerSample, record_channels(),
895 record_sample_rate(), _, 0, 0, false, _))
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000896 .Times(AtLeast(kNumCallbacks));
897
898 EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
899 StartRecording();
Niels Möller140b1d92018-11-08 14:52:19 +0100900 test_is_done_.Wait(kTestTimeOutInMilliseconds);
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000901 StopRecording();
902}
903
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000904// Start playout and recording (full-duplex audio) and verify that audio is
905// active in both directions.
henrikab2619892015-05-18 16:49:16 +0200906TEST_F(AudioDeviceTest, StartPlayoutAndRecordingVerifyCallbacks) {
aleloi5de52fd2016-11-10 01:05:34 -0800907 MockAudioTransportAndroid mock(kPlayout | kRecording);
Niels Möller140b1d92018-11-08 14:52:19 +0100908 mock.HandleCallbacks(&test_is_done_, nullptr, kNumCallbacks);
henrikab2619892015-05-18 16:49:16 +0200909 EXPECT_CALL(mock, NeedMorePlayData(playout_frames_per_10ms_buffer(),
Yves Gerey665174f2018-06-19 15:03:05 +0200910 kBytesPerSample, playout_channels(),
911 playout_sample_rate(), NotNull(), _, _, _))
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000912 .Times(AtLeast(kNumCallbacks));
henrika883d00f2018-03-16 10:09:49 +0100913 EXPECT_CALL(
914 mock, RecordedDataIsAvailable(NotNull(), record_frames_per_10ms_buffer(),
915 kBytesPerSample, record_channels(),
916 record_sample_rate(), _, 0, 0, false, _))
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000917 .Times(AtLeast(kNumCallbacks));
918 EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
919 StartPlayout();
920 StartRecording();
Niels Möller140b1d92018-11-08 14:52:19 +0100921 test_is_done_.Wait(kTestTimeOutInMilliseconds);
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000922 StopRecording();
923 StopPlayout();
924}
925
926// Start playout and read audio from an external PCM file when the audio layer
927// asks for data to play out. Real audio is played out in this test but it does
928// not contain any explicit verification that the audio quality is perfect.
henrikab2619892015-05-18 16:49:16 +0200929TEST_F(AudioDeviceTest, RunPlayoutWithFileAsSource) {
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000930 // TODO(henrika): extend test when mono output is supported.
Peter Kasting69558702016-01-12 16:26:35 -0800931 EXPECT_EQ(1u, playout_channels());
aleloi5de52fd2016-11-10 01:05:34 -0800932 NiceMock<MockAudioTransportAndroid> mock(kPlayout);
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000933 const int num_callbacks = kFilePlayTimeInSec * kNumCallbacksPerSecond;
henrika@webrtc.org74d47922015-03-10 11:59:03 +0000934 std::string file_name = GetFileName(playout_sample_rate());
kwibergf01633e2016-02-24 05:00:36 -0800935 std::unique_ptr<FileAudioStream> file_audio_stream(
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000936 new FileAudioStream(num_callbacks, file_name, playout_sample_rate()));
Niels Möller140b1d92018-11-08 14:52:19 +0100937 mock.HandleCallbacks(&test_is_done_, file_audio_stream.get(), num_callbacks);
henrikab2619892015-05-18 16:49:16 +0200938 // SetMaxPlayoutVolume();
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000939 EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
940 StartPlayout();
Niels Möller140b1d92018-11-08 14:52:19 +0100941 test_is_done_.Wait(kTestTimeOutInMilliseconds);
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +0000942 StopPlayout();
943}
944
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000945// Start playout and recording and store recorded data in an intermediate FIFO
946// buffer from which the playout side then reads its samples in the same order
947// as they were stored. Under ideal circumstances, a callback sequence would
948// look like: ...+-+-+-+-+-+-+-..., where '+' means 'packet recorded' and '-'
949// means 'packet played'. Under such conditions, the FIFO would only contain
950// one packet on average. However, under more realistic conditions, the size
951// of the FIFO will vary more due to an unbalance between the two sides.
952// This test tries to verify that the device maintains a balanced callback-
953// sequence by running in loopback for ten seconds while measuring the size
954// (max and average) of the FIFO. The size of the FIFO is increased by the
955// recording side and decreased by the playout side.
956// TODO(henrika): tune the final test parameters after running tests on several
957// different devices.
henrika3def74b2017-10-06 11:23:30 +0200958// Disabling this test on bots since it is difficult to come up with a robust
959// test condition that all worked as intended. The main issue is that, when
960// swarming is used, an initial latency can be built up when the both sides
961// starts at different times. Hence, the test can fail even if audio works
962// as intended. Keeping the test so it can be enabled manually.
963// http://bugs.webrtc.org/7744
964TEST_F(AudioDeviceTest, DISABLED_RunPlayoutAndRecordingInFullDuplex) {
henrikab2619892015-05-18 16:49:16 +0200965 EXPECT_EQ(record_channels(), playout_channels());
966 EXPECT_EQ(record_sample_rate(), playout_sample_rate());
aleloi5de52fd2016-11-10 01:05:34 -0800967 NiceMock<MockAudioTransportAndroid> mock(kPlayout | kRecording);
kwibergf01633e2016-02-24 05:00:36 -0800968 std::unique_ptr<FifoAudioStream> fifo_audio_stream(
henrikab2619892015-05-18 16:49:16 +0200969 new FifoAudioStream(playout_frames_per_10ms_buffer()));
Niels Möller140b1d92018-11-08 14:52:19 +0100970 mock.HandleCallbacks(&test_is_done_, fifo_audio_stream.get(),
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000971 kFullDuplexTimeInSec * kNumCallbacksPerSecond);
henrika8324b522015-03-27 10:56:23 +0100972 SetMaxPlayoutVolume();
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000973 EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
974 StartRecording();
975 StartPlayout();
Niels Möller140b1d92018-11-08 14:52:19 +0100976 test_is_done_.Wait(
Yves Gerey665174f2018-06-19 15:03:05 +0200977 std::max(kTestTimeOutInMilliseconds, 1000 * kFullDuplexTimeInSec));
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000978 StopPlayout();
979 StopRecording();
ehmaldonadoebb0b8e2016-10-04 01:58:57 -0700980
981 // These thresholds are set rather high to accomodate differences in hardware
982 // in several devices, so this test can be used in swarming.
983 // See http://bugs.webrtc.org/6464
ehmaldonado37a21112016-11-24 03:13:16 -0800984 EXPECT_LE(fifo_audio_stream->average_size(), 60u);
985 EXPECT_LE(fifo_audio_stream->largest_size(), 70u);
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +0000986}
987
988// Measures loopback latency and reports the min, max and average values for
989// a full duplex audio session.
990// The latency is measured like so:
991// - Insert impulses periodically on the output side.
992// - Detect the impulses on the input side.
993// - Measure the time difference between the transmit time and receive time.
994// - Store time differences in a vector and calculate min, max and average.
995// This test requires a special hardware called Audio Loopback Dongle.
996// See http://source.android.com/devices/audio/loopback.html for details.
henrikab2619892015-05-18 16:49:16 +0200997TEST_F(AudioDeviceTest, DISABLED_MeasureLoopbackLatency) {
998 EXPECT_EQ(record_channels(), playout_channels());
999 EXPECT_EQ(record_sample_rate(), playout_sample_rate());
aleloi5de52fd2016-11-10 01:05:34 -08001000 NiceMock<MockAudioTransportAndroid> mock(kPlayout | kRecording);
kwibergf01633e2016-02-24 05:00:36 -08001001 std::unique_ptr<LatencyMeasuringAudioStream> latency_audio_stream(
henrikab2619892015-05-18 16:49:16 +02001002 new LatencyMeasuringAudioStream(playout_frames_per_10ms_buffer()));
Niels Möller140b1d92018-11-08 14:52:19 +01001003 mock.HandleCallbacks(&test_is_done_, latency_audio_stream.get(),
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +00001004 kMeasureLatencyTimeInSec * kNumCallbacksPerSecond);
1005 EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
henrika8324b522015-03-27 10:56:23 +01001006 SetMaxPlayoutVolume();
henrikab2619892015-05-18 16:49:16 +02001007 DisableBuiltInAECIfAvailable();
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +00001008 StartRecording();
1009 StartPlayout();
Niels Möller140b1d92018-11-08 14:52:19 +01001010 test_is_done_.Wait(
Yves Gerey665174f2018-06-19 15:03:05 +02001011 std::max(kTestTimeOutInMilliseconds, 1000 * kMeasureLatencyTimeInSec));
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +00001012 StopPlayout();
1013 StopRecording();
1014 // Verify that the correct number of transmitted impulses are detected.
1015 EXPECT_EQ(latency_audio_stream->num_latency_values(),
Peter Kastingdce40cf2015-08-24 14:52:23 -07001016 static_cast<size_t>(
1017 kImpulseFrequencyInHz * kMeasureLatencyTimeInSec - 1));
henrika@webrtc.org80d9aee2015-03-19 15:28:16 +00001018 latency_audio_stream->PrintResults();
1019}
1020
henrika@webrtc.org474d1eb2015-03-09 12:39:53 +00001021} // namespace webrtc