henrike@webrtc.org | 82f014a | 2013-09-10 18:24:07 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style license |
| 5 | * that can be found in the LICENSE file in the root of the source |
| 6 | * tree. An additional intellectual property rights grant can be found |
| 7 | * in the file PATENTS. All contributing project authors may |
| 8 | * be found in the AUTHORS file in the root of the source tree. |
| 9 | */ |
| 10 | |
| 11 | #ifndef WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_OUTPUT_H_ |
| 12 | #define WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_OUTPUT_H_ |
| 13 | |
| 14 | #include <SLES/OpenSLES.h> |
| 15 | #include <SLES/OpenSLES_Android.h> |
| 16 | #include <SLES/OpenSLES_AndroidConfiguration.h> |
| 17 | |
| 18 | #include "webrtc/modules/audio_device/android/audio_manager_jni.h" |
| 19 | #include "webrtc/modules/audio_device/android/low_latency_event.h" |
henrike@webrtc.org | 9ee75e9 | 2013-12-11 21:42:44 +0000 | [diff] [blame] | 20 | #include "webrtc/modules/audio_device/android/audio_common.h" |
henrike@webrtc.org | 82f014a | 2013-09-10 18:24:07 +0000 | [diff] [blame] | 21 | #include "webrtc/modules/audio_device/include/audio_device_defines.h" |
| 22 | #include "webrtc/modules/audio_device/include/audio_device.h" |
| 23 | #include "webrtc/system_wrappers/interface/scoped_ptr.h" |
| 24 | |
| 25 | namespace webrtc { |
| 26 | |
| 27 | class AudioDeviceBuffer; |
| 28 | class CriticalSectionWrapper; |
| 29 | class FineAudioBuffer; |
| 30 | class SingleRwFifo; |
| 31 | class ThreadWrapper; |
| 32 | |
| 33 | // OpenSL implementation that facilitate playing PCM data to an android device. |
| 34 | // This class is Thread-compatible. I.e. Given an instance of this class, calls |
| 35 | // to non-const methods require exclusive access to the object. |
henrike@webrtc.org | 9ee75e9 | 2013-12-11 21:42:44 +0000 | [diff] [blame] | 36 | class OpenSlesOutput : public PlayoutDelayProvider { |
henrike@webrtc.org | 82f014a | 2013-09-10 18:24:07 +0000 | [diff] [blame] | 37 | public: |
| 38 | explicit OpenSlesOutput(const int32_t id); |
| 39 | virtual ~OpenSlesOutput(); |
| 40 | |
henrike@webrtc.org | 9ee75e9 | 2013-12-11 21:42:44 +0000 | [diff] [blame] | 41 | static int32_t SetAndroidAudioDeviceObjects(void* javaVM, |
| 42 | void* env, |
| 43 | void* context); |
henrike@webrtc.org | 573a1b4 | 2014-01-10 22:58:06 +0000 | [diff] [blame] | 44 | static void ClearAndroidAudioDeviceObjects(); |
henrike@webrtc.org | 9ee75e9 | 2013-12-11 21:42:44 +0000 | [diff] [blame] | 45 | |
henrike@webrtc.org | 82f014a | 2013-09-10 18:24:07 +0000 | [diff] [blame] | 46 | // Main initializaton and termination |
| 47 | int32_t Init(); |
| 48 | int32_t Terminate(); |
| 49 | bool Initialized() const { return initialized_; } |
| 50 | |
| 51 | // Device enumeration |
| 52 | int16_t PlayoutDevices() { return 1; } |
| 53 | |
| 54 | int32_t PlayoutDeviceName(uint16_t index, |
| 55 | char name[kAdmMaxDeviceNameSize], |
| 56 | char guid[kAdmMaxGuidSize]); |
| 57 | |
| 58 | // Device selection |
| 59 | int32_t SetPlayoutDevice(uint16_t index); |
| 60 | int32_t SetPlayoutDevice( |
| 61 | AudioDeviceModule::WindowsDeviceType device) { return 0; } |
| 62 | |
henrike@webrtc.org | 9ee75e9 | 2013-12-11 21:42:44 +0000 | [diff] [blame] | 63 | // No-op |
| 64 | int32_t SetPlayoutSampleRate(uint32_t sample_rate_hz) { return 0; } |
| 65 | |
henrike@webrtc.org | 82f014a | 2013-09-10 18:24:07 +0000 | [diff] [blame] | 66 | // Audio transport initialization |
| 67 | int32_t PlayoutIsAvailable(bool& available); // NOLINT |
| 68 | int32_t InitPlayout(); |
| 69 | bool PlayoutIsInitialized() const { return play_initialized_; } |
| 70 | |
| 71 | // Audio transport control |
| 72 | int32_t StartPlayout(); |
| 73 | int32_t StopPlayout(); |
| 74 | bool Playing() const { return playing_; } |
| 75 | |
| 76 | // Audio mixer initialization |
henrike@webrtc.org | 82f014a | 2013-09-10 18:24:07 +0000 | [diff] [blame] | 77 | int32_t InitSpeaker(); |
| 78 | bool SpeakerIsInitialized() const { return speaker_initialized_; } |
| 79 | |
| 80 | // Speaker volume controls |
| 81 | int32_t SpeakerVolumeIsAvailable(bool& available); // NOLINT |
| 82 | int32_t SetSpeakerVolume(uint32_t volume); |
| 83 | int32_t SpeakerVolume(uint32_t& volume) const { return 0; } // NOLINT |
| 84 | int32_t MaxSpeakerVolume(uint32_t& maxVolume) const; // NOLINT |
| 85 | int32_t MinSpeakerVolume(uint32_t& minVolume) const; // NOLINT |
| 86 | int32_t SpeakerVolumeStepSize(uint16_t& stepSize) const; // NOLINT |
| 87 | |
| 88 | // Speaker mute control |
| 89 | int32_t SpeakerMuteIsAvailable(bool& available); // NOLINT |
| 90 | int32_t SetSpeakerMute(bool enable) { return -1; } |
| 91 | int32_t SpeakerMute(bool& enabled) const { return -1; } // NOLINT |
| 92 | |
| 93 | |
| 94 | // Stereo support |
| 95 | int32_t StereoPlayoutIsAvailable(bool& available); // NOLINT |
| 96 | int32_t SetStereoPlayout(bool enable); |
| 97 | int32_t StereoPlayout(bool& enabled) const; // NOLINT |
| 98 | |
| 99 | // Delay information and control |
| 100 | int32_t SetPlayoutBuffer(const AudioDeviceModule::BufferType type, |
| 101 | uint16_t sizeMS) { return -1; } |
| 102 | int32_t PlayoutBuffer(AudioDeviceModule::BufferType& type, // NOLINT |
| 103 | uint16_t& sizeMS) const; |
| 104 | int32_t PlayoutDelay(uint16_t& delayMS) const; // NOLINT |
| 105 | |
| 106 | |
| 107 | // Error and warning information |
| 108 | bool PlayoutWarning() const { return false; } |
| 109 | bool PlayoutError() const { return false; } |
| 110 | void ClearPlayoutWarning() {} |
| 111 | void ClearPlayoutError() {} |
| 112 | |
| 113 | // Attach audio buffer |
| 114 | void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer); |
| 115 | |
| 116 | // Speaker audio routing |
| 117 | int32_t SetLoudspeakerStatus(bool enable); |
| 118 | int32_t GetLoudspeakerStatus(bool& enable) const; // NOLINT |
| 119 | |
| 120 | protected: |
| 121 | virtual int PlayoutDelayMs(); |
| 122 | |
| 123 | private: |
| 124 | enum { |
| 125 | kNumInterfaces = 3, |
| 126 | // TODO(xians): Reduce the numbers of buffers to improve the latency. |
| 127 | // Currently 30ms worth of buffers are needed due to audio |
| 128 | // pipeline processing jitter. Note: kNumOpenSlBuffers must |
| 129 | // not be changed. |
| 130 | // According to the opensles documentation in the ndk: |
| 131 | // The lower output latency path is used only if the application requests a |
| 132 | // buffer count of 2 or more. Use minimum number of buffers to keep delay |
| 133 | // as low as possible. |
| 134 | kNumOpenSlBuffers = 2, |
| 135 | // NetEq delivers frames on a 10ms basis. This means that every 10ms there |
| 136 | // will be a time consuming task. Keeping 10ms worth of buffers will ensure |
| 137 | // that there is 10ms to perform the time consuming task without running |
| 138 | // into underflow. |
| 139 | // In addition to the 10ms that needs to be stored for NetEq processing |
| 140 | // there will be jitter in audio pipe line due to the acquisition of locks. |
| 141 | // Note: The buffers in the OpenSL queue do not count towards the 10ms of |
| 142 | // frames needed since OpenSL needs to have them ready for playout. |
henrike@webrtc.org | 9ee75e9 | 2013-12-11 21:42:44 +0000 | [diff] [blame] | 143 | kNum10MsToBuffer = 6, |
henrike@webrtc.org | 82f014a | 2013-09-10 18:24:07 +0000 | [diff] [blame] | 144 | }; |
| 145 | |
| 146 | bool InitSampleRate(); |
| 147 | bool SetLowLatency(); |
| 148 | void UpdatePlayoutDelay(); |
| 149 | // It might be possible to dynamically add or remove buffers based on how |
| 150 | // close to depletion the fifo is. Few buffers means low delay. Too few |
| 151 | // buffers will cause underrun. Dynamically changing the number of buffer |
| 152 | // will greatly increase code complexity. |
| 153 | void CalculateNumFifoBuffersNeeded(); |
| 154 | void AllocateBuffers(); |
| 155 | int TotalBuffersUsed() const; |
| 156 | bool EnqueueAllBuffers(); |
| 157 | // This function also configures the audio player, e.g. sample rate to use |
| 158 | // etc, so it should be called when starting playout. |
| 159 | bool CreateAudioPlayer(); |
| 160 | void DestroyAudioPlayer(); |
| 161 | |
| 162 | // When underrun happens there won't be a new frame ready for playout that |
| 163 | // can be retrieved yet. Since the OpenSL thread must return ASAP there will |
| 164 | // be one less queue available to OpenSL. This function handles this case |
| 165 | // gracefully by restarting the audio, pushing silent frames to OpenSL for |
| 166 | // playout. This will sound like a click. Underruns are also logged to |
| 167 | // make it possible to identify these types of audio artifacts. |
| 168 | // This function returns true if there has been underrun. Further processing |
| 169 | // of audio data should be avoided until this function returns false again. |
| 170 | // The function needs to be protected by |crit_sect_|. |
| 171 | bool HandleUnderrun(int event_id, int event_msg); |
| 172 | |
| 173 | static void PlayerSimpleBufferQueueCallback( |
| 174 | SLAndroidSimpleBufferQueueItf queueItf, |
| 175 | void* pContext); |
| 176 | // This function must not take any locks or do any heavy work. It is a |
| 177 | // requirement for the OpenSL implementation to work as intended. The reason |
| 178 | // for this is that taking locks exposes the OpenSL thread to the risk of |
| 179 | // priority inversion. |
| 180 | void PlayerSimpleBufferQueueCallbackHandler( |
| 181 | SLAndroidSimpleBufferQueueItf queueItf); |
| 182 | |
| 183 | bool StartCbThreads(); |
| 184 | void StopCbThreads(); |
| 185 | static bool CbThread(void* context); |
| 186 | // This function must be protected against data race with threads calling this |
| 187 | // class' public functions. It is a requirement for this class to be |
| 188 | // Thread-compatible. |
| 189 | bool CbThreadImpl(); |
| 190 | |
| 191 | // Java API handle |
| 192 | AudioManagerJni audio_manager_; |
| 193 | |
| 194 | int id_; |
| 195 | bool initialized_; |
| 196 | bool speaker_initialized_; |
| 197 | bool play_initialized_; |
| 198 | |
| 199 | // Members that are read/write accessed concurrently by the process thread and |
| 200 | // threads calling public functions of this class. |
| 201 | scoped_ptr<ThreadWrapper> play_thread_; // Processing thread |
| 202 | scoped_ptr<CriticalSectionWrapper> crit_sect_; |
| 203 | // This member controls the starting and stopping of playing audio to the |
| 204 | // the device. |
| 205 | bool playing_; |
| 206 | |
| 207 | // Only one thread, T1, may push and only one thread, T2, may pull. T1 may or |
| 208 | // may not be the same thread as T2. T1 is the process thread and T2 is the |
| 209 | // OpenSL thread. |
| 210 | scoped_ptr<SingleRwFifo> fifo_; |
| 211 | int num_fifo_buffers_needed_; |
| 212 | LowLatencyEvent event_; |
| 213 | int number_underruns_; |
| 214 | |
| 215 | // OpenSL handles |
| 216 | SLObjectItf sles_engine_; |
| 217 | SLEngineItf sles_engine_itf_; |
| 218 | SLObjectItf sles_player_; |
| 219 | SLPlayItf sles_player_itf_; |
| 220 | SLAndroidSimpleBufferQueueItf sles_player_sbq_itf_; |
| 221 | SLObjectItf sles_output_mixer_; |
| 222 | |
| 223 | // Audio buffers |
| 224 | AudioDeviceBuffer* audio_buffer_; |
| 225 | scoped_ptr<FineAudioBuffer> fine_buffer_; |
andrew@webrtc.org | 8f69330 | 2014-04-25 23:10:28 +0000 | [diff] [blame] | 226 | scoped_ptr<scoped_ptr<int8_t[]>[]> play_buf_; |
henrike@webrtc.org | 82f014a | 2013-09-10 18:24:07 +0000 | [diff] [blame] | 227 | // Index in |rec_buf_| pointing to the audio buffer that will be ready the |
| 228 | // next time PlayerSimpleBufferQueueCallbackHandler is invoked. |
| 229 | // Ready means buffer is ready to be played out to device. |
| 230 | int active_queue_; |
| 231 | |
| 232 | // Audio settings |
| 233 | uint32_t speaker_sampling_rate_; |
| 234 | int buffer_size_samples_; |
| 235 | int buffer_size_bytes_; |
| 236 | |
| 237 | // Audio status |
| 238 | uint16_t playout_delay_; |
| 239 | }; |
| 240 | |
| 241 | } // namespace webrtc |
| 242 | |
| 243 | #endif // WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_OUTPUT_H_ |