henrika | 883d00f | 2018-03-16 10:09:49 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style license |
| 5 | * that can be found in the LICENSE file in the root of the source |
| 6 | * tree. An additional intellectual property rights grant can be found |
| 7 | * in the file PATENTS. All contributing project authors may |
| 8 | * be found in the AUTHORS file in the root of the source tree. |
| 9 | */ |
| 10 | |
| 11 | #include "modules/audio_device/android/aaudio_wrapper.h" |
| 12 | |
| 13 | #include "modules/audio_device/android/audio_manager.h" |
| 14 | #include "rtc_base/logging.h" |
| 15 | #include "rtc_base/strings/string_builder.h" |
Steve Anton | 10542f2 | 2019-01-11 09:11:00 -0800 | [diff] [blame^] | 16 | #include "rtc_base/time_utils.h" |
henrika | 883d00f | 2018-03-16 10:09:49 +0100 | [diff] [blame] | 17 | |
| 18 | #define LOG_ON_ERROR(op) \ |
| 19 | do { \ |
| 20 | aaudio_result_t result = (op); \ |
| 21 | if (result != AAUDIO_OK) { \ |
| 22 | RTC_LOG(LS_ERROR) << #op << ": " << AAudio_convertResultToText(result); \ |
| 23 | } \ |
| 24 | } while (0) |
| 25 | |
| 26 | #define RETURN_ON_ERROR(op, ...) \ |
| 27 | do { \ |
| 28 | aaudio_result_t result = (op); \ |
| 29 | if (result != AAUDIO_OK) { \ |
| 30 | RTC_LOG(LS_ERROR) << #op << ": " << AAudio_convertResultToText(result); \ |
| 31 | return __VA_ARGS__; \ |
| 32 | } \ |
| 33 | } while (0) |
| 34 | |
| 35 | namespace webrtc { |
| 36 | |
| 37 | namespace { |
| 38 | |
| 39 | const char* DirectionToString(aaudio_direction_t direction) { |
| 40 | switch (direction) { |
| 41 | case AAUDIO_DIRECTION_OUTPUT: |
| 42 | return "OUTPUT"; |
| 43 | case AAUDIO_DIRECTION_INPUT: |
| 44 | return "INPUT"; |
| 45 | default: |
| 46 | return "UNKNOWN"; |
| 47 | } |
| 48 | } |
| 49 | |
| 50 | const char* SharingModeToString(aaudio_sharing_mode_t mode) { |
| 51 | switch (mode) { |
| 52 | case AAUDIO_SHARING_MODE_EXCLUSIVE: |
| 53 | return "EXCLUSIVE"; |
| 54 | case AAUDIO_SHARING_MODE_SHARED: |
| 55 | return "SHARED"; |
| 56 | default: |
| 57 | return "UNKNOWN"; |
| 58 | } |
| 59 | } |
| 60 | |
| 61 | const char* PerformanceModeToString(aaudio_performance_mode_t mode) { |
| 62 | switch (mode) { |
| 63 | case AAUDIO_PERFORMANCE_MODE_NONE: |
| 64 | return "NONE"; |
| 65 | case AAUDIO_PERFORMANCE_MODE_POWER_SAVING: |
| 66 | return "POWER_SAVING"; |
| 67 | case AAUDIO_PERFORMANCE_MODE_LOW_LATENCY: |
| 68 | return "LOW_LATENCY"; |
| 69 | default: |
| 70 | return "UNKNOWN"; |
| 71 | } |
| 72 | } |
| 73 | |
| 74 | const char* FormatToString(int32_t id) { |
| 75 | switch (id) { |
| 76 | case AAUDIO_FORMAT_INVALID: |
| 77 | return "INVALID"; |
| 78 | case AAUDIO_FORMAT_UNSPECIFIED: |
| 79 | return "UNSPECIFIED"; |
| 80 | case AAUDIO_FORMAT_PCM_I16: |
| 81 | return "PCM_I16"; |
| 82 | case AAUDIO_FORMAT_PCM_FLOAT: |
| 83 | return "FLOAT"; |
| 84 | default: |
| 85 | return "UNKNOWN"; |
| 86 | } |
| 87 | } |
| 88 | |
| 89 | void ErrorCallback(AAudioStream* stream, |
| 90 | void* user_data, |
| 91 | aaudio_result_t error) { |
| 92 | RTC_DCHECK(user_data); |
| 93 | AAudioWrapper* aaudio_wrapper = reinterpret_cast<AAudioWrapper*>(user_data); |
| 94 | RTC_LOG(WARNING) << "ErrorCallback: " |
| 95 | << DirectionToString(aaudio_wrapper->direction()); |
| 96 | RTC_DCHECK(aaudio_wrapper->observer()); |
| 97 | aaudio_wrapper->observer()->OnErrorCallback(error); |
| 98 | } |
| 99 | |
| 100 | aaudio_data_callback_result_t DataCallback(AAudioStream* stream, |
| 101 | void* user_data, |
| 102 | void* audio_data, |
| 103 | int32_t num_frames) { |
| 104 | RTC_DCHECK(user_data); |
| 105 | RTC_DCHECK(audio_data); |
| 106 | AAudioWrapper* aaudio_wrapper = reinterpret_cast<AAudioWrapper*>(user_data); |
| 107 | RTC_DCHECK(aaudio_wrapper->observer()); |
| 108 | return aaudio_wrapper->observer()->OnDataCallback(audio_data, num_frames); |
| 109 | } |
| 110 | |
| 111 | // Wraps the stream builder object to ensure that it is released properly when |
| 112 | // the stream builder goes out of scope. |
| 113 | class ScopedStreamBuilder { |
| 114 | public: |
| 115 | ScopedStreamBuilder() { |
| 116 | LOG_ON_ERROR(AAudio_createStreamBuilder(&builder_)); |
| 117 | RTC_DCHECK(builder_); |
| 118 | } |
| 119 | ~ScopedStreamBuilder() { |
| 120 | if (builder_) { |
| 121 | LOG_ON_ERROR(AAudioStreamBuilder_delete(builder_)); |
| 122 | } |
| 123 | } |
| 124 | |
| 125 | AAudioStreamBuilder* get() const { return builder_; } |
| 126 | |
| 127 | private: |
| 128 | AAudioStreamBuilder* builder_ = nullptr; |
| 129 | }; |
| 130 | |
| 131 | } // namespace |
| 132 | |
| 133 | AAudioWrapper::AAudioWrapper(AudioManager* audio_manager, |
| 134 | aaudio_direction_t direction, |
| 135 | AAudioObserverInterface* observer) |
| 136 | : direction_(direction), observer_(observer) { |
| 137 | RTC_LOG(INFO) << "ctor"; |
| 138 | RTC_DCHECK(observer_); |
| 139 | direction_ == AAUDIO_DIRECTION_OUTPUT |
| 140 | ? audio_parameters_ = audio_manager->GetPlayoutAudioParameters() |
| 141 | : audio_parameters_ = audio_manager->GetRecordAudioParameters(); |
| 142 | aaudio_thread_checker_.DetachFromThread(); |
| 143 | RTC_LOG(INFO) << audio_parameters_.ToString(); |
| 144 | } |
| 145 | |
| 146 | AAudioWrapper::~AAudioWrapper() { |
| 147 | RTC_LOG(INFO) << "dtor"; |
| 148 | RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 149 | RTC_DCHECK(!stream_); |
| 150 | } |
| 151 | |
| 152 | bool AAudioWrapper::Init() { |
| 153 | RTC_LOG(INFO) << "Init"; |
| 154 | RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 155 | // Creates a stream builder which can be used to open an audio stream. |
| 156 | ScopedStreamBuilder builder; |
| 157 | // Configures the stream builder using audio parameters given at construction. |
| 158 | SetStreamConfiguration(builder.get()); |
| 159 | // Opens a stream based on options in the stream builder. |
| 160 | if (!OpenStream(builder.get())) { |
| 161 | return false; |
| 162 | } |
| 163 | // Ensures that the opened stream could activate the requested settings. |
| 164 | if (!VerifyStreamConfiguration()) { |
| 165 | return false; |
| 166 | } |
| 167 | // Optimizes the buffer scheme for lowest possible latency and creates |
| 168 | // additional buffer logic to match the 10ms buffer size used in WebRTC. |
| 169 | if (!OptimizeBuffers()) { |
| 170 | return false; |
| 171 | } |
| 172 | LogStreamState(); |
| 173 | return true; |
| 174 | } |
| 175 | |
| 176 | bool AAudioWrapper::Start() { |
| 177 | RTC_LOG(INFO) << "Start"; |
| 178 | RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 179 | // TODO(henrika): this state check might not be needed. |
| 180 | aaudio_stream_state_t current_state = AAudioStream_getState(stream_); |
| 181 | if (current_state != AAUDIO_STREAM_STATE_OPEN) { |
| 182 | RTC_LOG(LS_ERROR) << "Invalid state: " |
| 183 | << AAudio_convertStreamStateToText(current_state); |
| 184 | return false; |
| 185 | } |
| 186 | // Asynchronous request for the stream to start. |
| 187 | RETURN_ON_ERROR(AAudioStream_requestStart(stream_), false); |
| 188 | LogStreamState(); |
| 189 | return true; |
| 190 | } |
| 191 | |
| 192 | bool AAudioWrapper::Stop() { |
| 193 | RTC_LOG(INFO) << "Stop: " << DirectionToString(direction()); |
| 194 | RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 195 | // Asynchronous request for the stream to stop. |
| 196 | RETURN_ON_ERROR(AAudioStream_requestStop(stream_), false); |
| 197 | CloseStream(); |
| 198 | aaudio_thread_checker_.DetachFromThread(); |
| 199 | return true; |
| 200 | } |
| 201 | |
| 202 | double AAudioWrapper::EstimateLatencyMillis() const { |
| 203 | RTC_DCHECK(stream_); |
| 204 | double latency_millis = 0.0; |
| 205 | if (direction() == AAUDIO_DIRECTION_INPUT) { |
| 206 | // For input streams. Best guess we can do is to use the current burst size |
| 207 | // as delay estimate. |
| 208 | latency_millis = static_cast<double>(frames_per_burst()) / sample_rate() * |
| 209 | rtc::kNumMillisecsPerSec; |
| 210 | } else { |
| 211 | int64_t existing_frame_index; |
| 212 | int64_t existing_frame_presentation_time; |
| 213 | // Get the time at which a particular frame was presented to audio hardware. |
| 214 | aaudio_result_t result = AAudioStream_getTimestamp( |
| 215 | stream_, CLOCK_MONOTONIC, &existing_frame_index, |
| 216 | &existing_frame_presentation_time); |
| 217 | // Results are only valid when the stream is in AAUDIO_STREAM_STATE_STARTED. |
| 218 | if (result == AAUDIO_OK) { |
| 219 | // Get write index for next audio frame. |
| 220 | int64_t next_frame_index = frames_written(); |
| 221 | // Number of frames between next frame and the existing frame. |
| 222 | int64_t frame_index_delta = next_frame_index - existing_frame_index; |
| 223 | // Assume the next frame will be written now. |
| 224 | int64_t next_frame_write_time = rtc::TimeNanos(); |
| 225 | // Calculate time when next frame will be presented to the hardware taking |
| 226 | // sample rate into account. |
| 227 | int64_t frame_time_delta = |
| 228 | (frame_index_delta * rtc::kNumNanosecsPerSec) / sample_rate(); |
| 229 | int64_t next_frame_presentation_time = |
| 230 | existing_frame_presentation_time + frame_time_delta; |
| 231 | // Derive a latency estimate given results above. |
| 232 | latency_millis = static_cast<double>(next_frame_presentation_time - |
| 233 | next_frame_write_time) / |
| 234 | rtc::kNumNanosecsPerMillisec; |
| 235 | } |
| 236 | } |
| 237 | return latency_millis; |
| 238 | } |
| 239 | |
| 240 | // Returns new buffer size or a negative error value if buffer size could not |
| 241 | // be increased. |
| 242 | bool AAudioWrapper::IncreaseOutputBufferSize() { |
| 243 | RTC_LOG(INFO) << "IncreaseBufferSize"; |
| 244 | RTC_DCHECK(stream_); |
| 245 | RTC_DCHECK(aaudio_thread_checker_.CalledOnValidThread()); |
| 246 | RTC_DCHECK_EQ(direction(), AAUDIO_DIRECTION_OUTPUT); |
| 247 | aaudio_result_t buffer_size = AAudioStream_getBufferSizeInFrames(stream_); |
| 248 | // Try to increase size of buffer with one burst to reduce risk of underrun. |
| 249 | buffer_size += frames_per_burst(); |
| 250 | // Verify that the new buffer size is not larger than max capacity. |
| 251 | // TODO(henrika): keep track of case when we reach the capacity limit. |
| 252 | const int32_t max_buffer_size = buffer_capacity_in_frames(); |
| 253 | if (buffer_size > max_buffer_size) { |
| 254 | RTC_LOG(LS_ERROR) << "Required buffer size (" << buffer_size |
| 255 | << ") is higher than max: " << max_buffer_size; |
| 256 | return false; |
| 257 | } |
| 258 | RTC_LOG(INFO) << "Updating buffer size to: " << buffer_size |
| 259 | << " (max=" << max_buffer_size << ")"; |
| 260 | buffer_size = AAudioStream_setBufferSizeInFrames(stream_, buffer_size); |
| 261 | if (buffer_size < 0) { |
| 262 | RTC_LOG(LS_ERROR) << "Failed to change buffer size: " |
| 263 | << AAudio_convertResultToText(buffer_size); |
| 264 | return false; |
| 265 | } |
| 266 | RTC_LOG(INFO) << "Buffer size changed to: " << buffer_size; |
| 267 | return true; |
| 268 | } |
| 269 | |
| 270 | void AAudioWrapper::ClearInputStream(void* audio_data, int32_t num_frames) { |
| 271 | RTC_LOG(INFO) << "ClearInputStream"; |
| 272 | RTC_DCHECK(stream_); |
| 273 | RTC_DCHECK(aaudio_thread_checker_.CalledOnValidThread()); |
| 274 | RTC_DCHECK_EQ(direction(), AAUDIO_DIRECTION_INPUT); |
| 275 | aaudio_result_t cleared_frames = 0; |
| 276 | do { |
| 277 | cleared_frames = AAudioStream_read(stream_, audio_data, num_frames, 0); |
| 278 | } while (cleared_frames > 0); |
| 279 | } |
| 280 | |
| 281 | AAudioObserverInterface* AAudioWrapper::observer() const { |
| 282 | return observer_; |
| 283 | } |
| 284 | |
| 285 | AudioParameters AAudioWrapper::audio_parameters() const { |
| 286 | return audio_parameters_; |
| 287 | } |
| 288 | |
| 289 | int32_t AAudioWrapper::samples_per_frame() const { |
| 290 | RTC_DCHECK(stream_); |
| 291 | return AAudioStream_getSamplesPerFrame(stream_); |
| 292 | } |
| 293 | |
| 294 | int32_t AAudioWrapper::buffer_size_in_frames() const { |
| 295 | RTC_DCHECK(stream_); |
| 296 | return AAudioStream_getBufferSizeInFrames(stream_); |
| 297 | } |
| 298 | |
| 299 | int32_t AAudioWrapper::buffer_capacity_in_frames() const { |
| 300 | RTC_DCHECK(stream_); |
| 301 | return AAudioStream_getBufferCapacityInFrames(stream_); |
| 302 | } |
| 303 | |
| 304 | int32_t AAudioWrapper::device_id() const { |
| 305 | RTC_DCHECK(stream_); |
| 306 | return AAudioStream_getDeviceId(stream_); |
| 307 | } |
| 308 | |
| 309 | int32_t AAudioWrapper::xrun_count() const { |
| 310 | RTC_DCHECK(stream_); |
| 311 | return AAudioStream_getXRunCount(stream_); |
| 312 | } |
| 313 | |
| 314 | int32_t AAudioWrapper::format() const { |
| 315 | RTC_DCHECK(stream_); |
| 316 | return AAudioStream_getFormat(stream_); |
| 317 | } |
| 318 | |
| 319 | int32_t AAudioWrapper::sample_rate() const { |
| 320 | RTC_DCHECK(stream_); |
| 321 | return AAudioStream_getSampleRate(stream_); |
| 322 | } |
| 323 | |
| 324 | int32_t AAudioWrapper::channel_count() const { |
| 325 | RTC_DCHECK(stream_); |
| 326 | return AAudioStream_getChannelCount(stream_); |
| 327 | } |
| 328 | |
| 329 | int32_t AAudioWrapper::frames_per_callback() const { |
| 330 | RTC_DCHECK(stream_); |
| 331 | return AAudioStream_getFramesPerDataCallback(stream_); |
| 332 | } |
| 333 | |
| 334 | aaudio_sharing_mode_t AAudioWrapper::sharing_mode() const { |
| 335 | RTC_DCHECK(stream_); |
| 336 | return AAudioStream_getSharingMode(stream_); |
| 337 | } |
| 338 | |
| 339 | aaudio_performance_mode_t AAudioWrapper::performance_mode() const { |
| 340 | RTC_DCHECK(stream_); |
| 341 | return AAudioStream_getPerformanceMode(stream_); |
| 342 | } |
| 343 | |
| 344 | aaudio_stream_state_t AAudioWrapper::stream_state() const { |
| 345 | RTC_DCHECK(stream_); |
| 346 | return AAudioStream_getState(stream_); |
| 347 | } |
| 348 | |
| 349 | int64_t AAudioWrapper::frames_written() const { |
| 350 | RTC_DCHECK(stream_); |
| 351 | return AAudioStream_getFramesWritten(stream_); |
| 352 | } |
| 353 | |
| 354 | int64_t AAudioWrapper::frames_read() const { |
| 355 | RTC_DCHECK(stream_); |
| 356 | return AAudioStream_getFramesRead(stream_); |
| 357 | } |
| 358 | |
| 359 | void AAudioWrapper::SetStreamConfiguration(AAudioStreamBuilder* builder) { |
| 360 | RTC_LOG(INFO) << "SetStreamConfiguration"; |
| 361 | RTC_DCHECK(builder); |
| 362 | RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 363 | // Request usage of default primary output/input device. |
| 364 | // TODO(henrika): verify that default device follows Java APIs. |
| 365 | // https://developer.android.com/reference/android/media/AudioDeviceInfo.html. |
| 366 | AAudioStreamBuilder_setDeviceId(builder, AAUDIO_UNSPECIFIED); |
| 367 | // Use preferred sample rate given by the audio parameters. |
| 368 | AAudioStreamBuilder_setSampleRate(builder, audio_parameters().sample_rate()); |
| 369 | // Use preferred channel configuration given by the audio parameters. |
| 370 | AAudioStreamBuilder_setChannelCount(builder, audio_parameters().channels()); |
| 371 | // Always use 16-bit PCM audio sample format. |
| 372 | AAudioStreamBuilder_setFormat(builder, AAUDIO_FORMAT_PCM_I16); |
| 373 | // TODO(henrika): investigate effect of using AAUDIO_SHARING_MODE_EXCLUSIVE. |
| 374 | // Ask for exclusive mode since this will give us the lowest possible latency. |
| 375 | // If exclusive mode isn't available, shared mode will be used instead. |
| 376 | AAudioStreamBuilder_setSharingMode(builder, AAUDIO_SHARING_MODE_SHARED); |
| 377 | // Use the direction that was given at construction. |
| 378 | AAudioStreamBuilder_setDirection(builder, direction_); |
| 379 | // TODO(henrika): investigate performance using different performance modes. |
| 380 | AAudioStreamBuilder_setPerformanceMode(builder, |
| 381 | AAUDIO_PERFORMANCE_MODE_LOW_LATENCY); |
| 382 | // Given that WebRTC applications require low latency, our audio stream uses |
| 383 | // an asynchronous callback function to transfer data to and from the |
| 384 | // application. AAudio executes the callback in a higher-priority thread that |
| 385 | // has better performance. |
| 386 | AAudioStreamBuilder_setDataCallback(builder, DataCallback, this); |
| 387 | // Request that AAudio calls this functions if any error occurs on a callback |
| 388 | // thread. |
| 389 | AAudioStreamBuilder_setErrorCallback(builder, ErrorCallback, this); |
| 390 | } |
| 391 | |
| 392 | bool AAudioWrapper::OpenStream(AAudioStreamBuilder* builder) { |
| 393 | RTC_LOG(INFO) << "OpenStream"; |
| 394 | RTC_DCHECK(builder); |
| 395 | AAudioStream* stream = nullptr; |
| 396 | RETURN_ON_ERROR(AAudioStreamBuilder_openStream(builder, &stream), false); |
| 397 | stream_ = stream; |
| 398 | LogStreamConfiguration(); |
| 399 | return true; |
| 400 | } |
| 401 | |
| 402 | void AAudioWrapper::CloseStream() { |
| 403 | RTC_LOG(INFO) << "CloseStream"; |
| 404 | RTC_DCHECK(stream_); |
| 405 | LOG_ON_ERROR(AAudioStream_close(stream_)); |
| 406 | stream_ = nullptr; |
| 407 | } |
| 408 | |
| 409 | void AAudioWrapper::LogStreamConfiguration() { |
| 410 | RTC_DCHECK(stream_); |
| 411 | char ss_buf[1024]; |
| 412 | rtc::SimpleStringBuilder ss(ss_buf); |
| 413 | ss << "Stream Configuration: "; |
| 414 | ss << "sample rate=" << sample_rate() << ", channels=" << channel_count(); |
| 415 | ss << ", samples per frame=" << samples_per_frame(); |
| 416 | ss << ", format=" << FormatToString(format()); |
| 417 | ss << ", sharing mode=" << SharingModeToString(sharing_mode()); |
| 418 | ss << ", performance mode=" << PerformanceModeToString(performance_mode()); |
| 419 | ss << ", direction=" << DirectionToString(direction()); |
| 420 | ss << ", device id=" << AAudioStream_getDeviceId(stream_); |
| 421 | ss << ", frames per callback=" << frames_per_callback(); |
| 422 | RTC_LOG(INFO) << ss.str(); |
| 423 | } |
| 424 | |
| 425 | void AAudioWrapper::LogStreamState() { |
| 426 | RTC_LOG(INFO) << "AAudio stream state: " |
| 427 | << AAudio_convertStreamStateToText(stream_state()); |
| 428 | } |
| 429 | |
| 430 | bool AAudioWrapper::VerifyStreamConfiguration() { |
| 431 | RTC_LOG(INFO) << "VerifyStreamConfiguration"; |
| 432 | RTC_DCHECK(stream_); |
| 433 | // TODO(henrika): should we verify device ID as well? |
| 434 | if (AAudioStream_getSampleRate(stream_) != audio_parameters().sample_rate()) { |
| 435 | RTC_LOG(LS_ERROR) << "Stream unable to use requested sample rate"; |
| 436 | return false; |
| 437 | } |
| 438 | if (AAudioStream_getChannelCount(stream_) != |
| 439 | static_cast<int32_t>(audio_parameters().channels())) { |
| 440 | RTC_LOG(LS_ERROR) << "Stream unable to use requested channel count"; |
| 441 | return false; |
| 442 | } |
| 443 | if (AAudioStream_getFormat(stream_) != AAUDIO_FORMAT_PCM_I16) { |
| 444 | RTC_LOG(LS_ERROR) << "Stream unable to use requested format"; |
| 445 | return false; |
| 446 | } |
| 447 | if (AAudioStream_getSharingMode(stream_) != AAUDIO_SHARING_MODE_SHARED) { |
| 448 | RTC_LOG(LS_ERROR) << "Stream unable to use requested sharing mode"; |
| 449 | return false; |
| 450 | } |
| 451 | if (AAudioStream_getPerformanceMode(stream_) != |
| 452 | AAUDIO_PERFORMANCE_MODE_LOW_LATENCY) { |
| 453 | RTC_LOG(LS_ERROR) << "Stream unable to use requested performance mode"; |
| 454 | return false; |
| 455 | } |
| 456 | if (AAudioStream_getDirection(stream_) != direction()) { |
| 457 | RTC_LOG(LS_ERROR) << "Stream direction could not be set"; |
| 458 | return false; |
| 459 | } |
| 460 | if (AAudioStream_getSamplesPerFrame(stream_) != |
| 461 | static_cast<int32_t>(audio_parameters().channels())) { |
| 462 | RTC_LOG(LS_ERROR) << "Invalid number of samples per frame"; |
| 463 | return false; |
| 464 | } |
| 465 | return true; |
| 466 | } |
| 467 | |
| 468 | bool AAudioWrapper::OptimizeBuffers() { |
| 469 | RTC_LOG(INFO) << "OptimizeBuffers"; |
| 470 | RTC_DCHECK(stream_); |
| 471 | // Maximum number of frames that can be filled without blocking. |
| 472 | RTC_LOG(INFO) << "max buffer capacity in frames: " |
| 473 | << buffer_capacity_in_frames(); |
| 474 | // Query the number of frames that the application should read or write at |
| 475 | // one time for optimal performance. |
| 476 | int32_t frames_per_burst = AAudioStream_getFramesPerBurst(stream_); |
| 477 | RTC_LOG(INFO) << "frames per burst for optimal performance: " |
| 478 | << frames_per_burst; |
| 479 | frames_per_burst_ = frames_per_burst; |
| 480 | if (direction() == AAUDIO_DIRECTION_INPUT) { |
| 481 | // There is no point in calling setBufferSizeInFrames() for input streams |
| 482 | // since it has no effect on the performance (latency in this case). |
| 483 | return true; |
| 484 | } |
| 485 | // Set buffer size to same as burst size to guarantee lowest possible latency. |
| 486 | // This size might change for output streams if underruns are detected and |
| 487 | // automatic buffer adjustment is enabled. |
| 488 | AAudioStream_setBufferSizeInFrames(stream_, frames_per_burst); |
| 489 | int32_t buffer_size = AAudioStream_getBufferSizeInFrames(stream_); |
| 490 | if (buffer_size != frames_per_burst) { |
| 491 | RTC_LOG(LS_ERROR) << "Failed to use optimal buffer burst size"; |
| 492 | return false; |
| 493 | } |
| 494 | // Maximum number of frames that can be filled without blocking. |
| 495 | RTC_LOG(INFO) << "buffer burst size in frames: " << buffer_size; |
| 496 | return true; |
| 497 | } |
| 498 | |
| 499 | } // namespace webrtc |