henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 1 | /* |
| 2 | * libjingle |
| 3 | * Copyright 2004 Google Inc. |
| 4 | * |
| 5 | * Redistribution and use in source and binary forms, with or without |
| 6 | * modification, are permitted provided that the following conditions are met: |
| 7 | * |
| 8 | * 1. Redistributions of source code must retain the above copyright notice, |
| 9 | * this list of conditions and the following disclaimer. |
| 10 | * 2. Redistributions in binary form must reproduce the above copyright notice, |
| 11 | * this list of conditions and the following disclaimer in the documentation |
| 12 | * and/or other materials provided with the distribution. |
| 13 | * 3. The name of the author may not be used to endorse or promote products |
| 14 | * derived from this software without specific prior written permission. |
| 15 | * |
| 16 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED |
| 17 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF |
| 18 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO |
| 19 | * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 20 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
| 21 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; |
| 22 | * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, |
| 23 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR |
| 24 | * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF |
| 25 | * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 26 | */ |
| 27 | |
| 28 | #ifndef TALK_MEDIA_BASE_MEDIAENGINE_H_ |
| 29 | #define TALK_MEDIA_BASE_MEDIAENGINE_H_ |
| 30 | |
| 31 | #ifdef OSX |
| 32 | #include <CoreAudio/CoreAudio.h> |
| 33 | #endif |
| 34 | |
| 35 | #include <climits> |
| 36 | #include <string> |
| 37 | #include <vector> |
| 38 | |
| 39 | #include "talk/base/sigslotrepeater.h" |
| 40 | #include "talk/media/base/codec.h" |
| 41 | #include "talk/media/base/mediachannel.h" |
| 42 | #include "talk/media/base/mediacommon.h" |
| 43 | #include "talk/media/base/videocapturer.h" |
| 44 | #include "talk/media/base/videocommon.h" |
| 45 | #include "talk/media/base/videoprocessor.h" |
| 46 | #include "talk/media/base/voiceprocessor.h" |
| 47 | #include "talk/media/devices/devicemanager.h" |
| 48 | |
| 49 | #if defined(GOOGLE_CHROME_BUILD) || defined(CHROMIUM_BUILD) |
| 50 | #define DISABLE_MEDIA_ENGINE_FACTORY |
| 51 | #endif |
| 52 | |
| 53 | namespace cricket { |
| 54 | |
| 55 | class VideoCapturer; |
| 56 | |
| 57 | // MediaEngineInterface is an abstraction of a media engine which can be |
| 58 | // subclassed to support different media componentry backends. |
| 59 | // It supports voice and video operations in the same class to facilitate |
| 60 | // proper synchronization between both media types. |
| 61 | class MediaEngineInterface { |
| 62 | public: |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 63 | // Default value to be used for SetAudioDelayOffset(). |
| 64 | static const int kDefaultAudioDelayOffset; |
| 65 | |
| 66 | virtual ~MediaEngineInterface() {} |
| 67 | |
| 68 | // Initialization |
| 69 | // Starts the engine. |
| 70 | virtual bool Init(talk_base::Thread* worker_thread) = 0; |
| 71 | // Shuts down the engine. |
| 72 | virtual void Terminate() = 0; |
| 73 | // Returns what the engine is capable of, as a set of Capabilities, above. |
| 74 | virtual int GetCapabilities() = 0; |
| 75 | |
| 76 | // MediaChannel creation |
| 77 | // Creates a voice media channel. Returns NULL on failure. |
| 78 | virtual VoiceMediaChannel *CreateChannel() = 0; |
| 79 | // Creates a video media channel, paired with the specified voice channel. |
| 80 | // Returns NULL on failure. |
| 81 | virtual VideoMediaChannel *CreateVideoChannel( |
| 82 | VoiceMediaChannel* voice_media_channel) = 0; |
| 83 | |
| 84 | // Creates a soundclip object for playing sounds on. Returns NULL on failure. |
| 85 | virtual SoundclipMedia *CreateSoundclip() = 0; |
| 86 | |
| 87 | // Configuration |
mallinath@webrtc.org | a27be8e | 2013-09-27 23:04:10 +0000 | [diff] [blame] | 88 | // Gets global audio options. |
| 89 | virtual AudioOptions GetAudioOptions() const = 0; |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 90 | // Sets global audio options. "options" are from AudioOptions, above. |
mallinath@webrtc.org | a27be8e | 2013-09-27 23:04:10 +0000 | [diff] [blame] | 91 | virtual bool SetAudioOptions(const AudioOptions& options) = 0; |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 92 | // Sets global video options. "options" are from VideoOptions, above. |
mallinath@webrtc.org | a27be8e | 2013-09-27 23:04:10 +0000 | [diff] [blame] | 93 | virtual bool SetVideoOptions(const VideoOptions& options) = 0; |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 94 | // Sets the value used by the echo canceller to offset delay values obtained |
| 95 | // from the OS. |
| 96 | virtual bool SetAudioDelayOffset(int offset) = 0; |
| 97 | // Sets the default (maximum) codec/resolution and encoder option to capture |
| 98 | // and encode video. |
| 99 | virtual bool SetDefaultVideoEncoderConfig(const VideoEncoderConfig& config) |
| 100 | = 0; |
wu@webrtc.org | 7818752 | 2013-10-07 23:32:02 +0000 | [diff] [blame] | 101 | // Gets the default (maximum) codec/resolution and encoder option used to |
| 102 | // capture and encode video, as set by SetDefaultVideoEncoderConfig or the |
| 103 | // default from the video engine if not previously set. |
| 104 | virtual VideoEncoderConfig GetDefaultVideoEncoderConfig() const = 0; |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 105 | |
| 106 | // Device selection |
| 107 | // TODO(tschmelcher): Add method for selecting the soundclip device. |
| 108 | virtual bool SetSoundDevices(const Device* in_device, |
| 109 | const Device* out_device) = 0; |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 110 | |
| 111 | // Device configuration |
| 112 | // Gets the current speaker volume, as a value between 0 and 255. |
| 113 | virtual bool GetOutputVolume(int* level) = 0; |
| 114 | // Sets the current speaker volume, as a value between 0 and 255. |
| 115 | virtual bool SetOutputVolume(int level) = 0; |
| 116 | |
| 117 | // Local monitoring |
| 118 | // Gets the current microphone level, as a value between 0 and 10. |
| 119 | virtual int GetInputLevel() = 0; |
| 120 | // Starts or stops the local microphone. Useful if local mic info is needed |
| 121 | // prior to a call being connected; the mic will be started automatically |
| 122 | // when a VoiceMediaChannel starts sending. |
| 123 | virtual bool SetLocalMonitor(bool enable) = 0; |
| 124 | // Installs a callback for raw frames from the local camera. |
| 125 | virtual bool SetLocalRenderer(VideoRenderer* renderer) = 0; |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 126 | |
| 127 | virtual const std::vector<AudioCodec>& audio_codecs() = 0; |
| 128 | virtual const std::vector<RtpHeaderExtension>& |
| 129 | audio_rtp_header_extensions() = 0; |
| 130 | virtual const std::vector<VideoCodec>& video_codecs() = 0; |
| 131 | virtual const std::vector<RtpHeaderExtension>& |
| 132 | video_rtp_header_extensions() = 0; |
| 133 | |
| 134 | // Logging control |
| 135 | virtual void SetVoiceLogging(int min_sev, const char* filter) = 0; |
| 136 | virtual void SetVideoLogging(int min_sev, const char* filter) = 0; |
| 137 | |
wu@webrtc.org | a989080 | 2013-12-13 00:21:03 +0000 | [diff] [blame^] | 138 | // Starts AEC dump using existing file. |
| 139 | virtual bool StartAecDump(FILE* file) = 0; |
| 140 | |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 141 | // Voice processors for effects. |
| 142 | virtual bool RegisterVoiceProcessor(uint32 ssrc, |
| 143 | VoiceProcessor* video_processor, |
| 144 | MediaProcessorDirection direction) = 0; |
| 145 | virtual bool UnregisterVoiceProcessor(uint32 ssrc, |
| 146 | VoiceProcessor* video_processor, |
| 147 | MediaProcessorDirection direction) = 0; |
| 148 | |
| 149 | virtual VideoFormat GetStartCaptureFormat() const = 0; |
| 150 | |
| 151 | virtual sigslot::repeater2<VideoCapturer*, CaptureState>& |
| 152 | SignalVideoCaptureStateChange() = 0; |
| 153 | }; |
| 154 | |
| 155 | |
| 156 | #if !defined(DISABLE_MEDIA_ENGINE_FACTORY) |
| 157 | class MediaEngineFactory { |
| 158 | public: |
| 159 | static MediaEngineInterface* Create(); |
| 160 | }; |
| 161 | #endif |
| 162 | |
| 163 | // CompositeMediaEngine constructs a MediaEngine from separate |
| 164 | // voice and video engine classes. |
| 165 | template<class VOICE, class VIDEO> |
| 166 | class CompositeMediaEngine : public MediaEngineInterface { |
| 167 | public: |
| 168 | CompositeMediaEngine() {} |
| 169 | virtual ~CompositeMediaEngine() {} |
| 170 | virtual bool Init(talk_base::Thread* worker_thread) { |
| 171 | if (!voice_.Init(worker_thread)) |
| 172 | return false; |
| 173 | if (!video_.Init(worker_thread)) { |
| 174 | voice_.Terminate(); |
| 175 | return false; |
| 176 | } |
| 177 | SignalVideoCaptureStateChange().repeat(video_.SignalCaptureStateChange); |
| 178 | return true; |
| 179 | } |
| 180 | virtual void Terminate() { |
| 181 | video_.Terminate(); |
| 182 | voice_.Terminate(); |
| 183 | } |
| 184 | |
| 185 | virtual int GetCapabilities() { |
| 186 | return (voice_.GetCapabilities() | video_.GetCapabilities()); |
| 187 | } |
| 188 | virtual VoiceMediaChannel *CreateChannel() { |
| 189 | return voice_.CreateChannel(); |
| 190 | } |
| 191 | virtual VideoMediaChannel *CreateVideoChannel(VoiceMediaChannel* channel) { |
| 192 | return video_.CreateChannel(channel); |
| 193 | } |
| 194 | virtual SoundclipMedia *CreateSoundclip() { |
| 195 | return voice_.CreateSoundclip(); |
| 196 | } |
| 197 | |
mallinath@webrtc.org | a27be8e | 2013-09-27 23:04:10 +0000 | [diff] [blame] | 198 | virtual AudioOptions GetAudioOptions() const { |
| 199 | return voice_.GetOptions(); |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 200 | } |
mallinath@webrtc.org | a27be8e | 2013-09-27 23:04:10 +0000 | [diff] [blame] | 201 | virtual bool SetAudioOptions(const AudioOptions& options) { |
| 202 | return voice_.SetOptions(options); |
| 203 | } |
| 204 | virtual bool SetVideoOptions(const VideoOptions& options) { |
| 205 | return video_.SetOptions(options); |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 206 | } |
| 207 | virtual bool SetAudioDelayOffset(int offset) { |
| 208 | return voice_.SetDelayOffset(offset); |
| 209 | } |
| 210 | virtual bool SetDefaultVideoEncoderConfig(const VideoEncoderConfig& config) { |
| 211 | return video_.SetDefaultEncoderConfig(config); |
| 212 | } |
wu@webrtc.org | 7818752 | 2013-10-07 23:32:02 +0000 | [diff] [blame] | 213 | virtual VideoEncoderConfig GetDefaultVideoEncoderConfig() const { |
| 214 | return video_.GetDefaultEncoderConfig(); |
| 215 | } |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 216 | |
| 217 | virtual bool SetSoundDevices(const Device* in_device, |
| 218 | const Device* out_device) { |
| 219 | return voice_.SetDevices(in_device, out_device); |
| 220 | } |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 221 | |
| 222 | virtual bool GetOutputVolume(int* level) { |
| 223 | return voice_.GetOutputVolume(level); |
| 224 | } |
| 225 | virtual bool SetOutputVolume(int level) { |
| 226 | return voice_.SetOutputVolume(level); |
| 227 | } |
| 228 | |
| 229 | virtual int GetInputLevel() { |
| 230 | return voice_.GetInputLevel(); |
| 231 | } |
| 232 | virtual bool SetLocalMonitor(bool enable) { |
| 233 | return voice_.SetLocalMonitor(enable); |
| 234 | } |
| 235 | virtual bool SetLocalRenderer(VideoRenderer* renderer) { |
| 236 | return video_.SetLocalRenderer(renderer); |
| 237 | } |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 238 | |
| 239 | virtual const std::vector<AudioCodec>& audio_codecs() { |
| 240 | return voice_.codecs(); |
| 241 | } |
| 242 | virtual const std::vector<RtpHeaderExtension>& audio_rtp_header_extensions() { |
| 243 | return voice_.rtp_header_extensions(); |
| 244 | } |
| 245 | virtual const std::vector<VideoCodec>& video_codecs() { |
| 246 | return video_.codecs(); |
| 247 | } |
| 248 | virtual const std::vector<RtpHeaderExtension>& video_rtp_header_extensions() { |
| 249 | return video_.rtp_header_extensions(); |
| 250 | } |
| 251 | |
| 252 | virtual void SetVoiceLogging(int min_sev, const char* filter) { |
wu@webrtc.org | 967bfff | 2013-09-19 05:49:50 +0000 | [diff] [blame] | 253 | voice_.SetLogging(min_sev, filter); |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 254 | } |
| 255 | virtual void SetVideoLogging(int min_sev, const char* filter) { |
wu@webrtc.org | 967bfff | 2013-09-19 05:49:50 +0000 | [diff] [blame] | 256 | video_.SetLogging(min_sev, filter); |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 257 | } |
| 258 | |
wu@webrtc.org | a989080 | 2013-12-13 00:21:03 +0000 | [diff] [blame^] | 259 | virtual bool StartAecDump(FILE* file) { |
| 260 | return voice_.StartAecDump(file); |
| 261 | } |
| 262 | |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 263 | virtual bool RegisterVoiceProcessor(uint32 ssrc, |
| 264 | VoiceProcessor* processor, |
| 265 | MediaProcessorDirection direction) { |
| 266 | return voice_.RegisterProcessor(ssrc, processor, direction); |
| 267 | } |
| 268 | virtual bool UnregisterVoiceProcessor(uint32 ssrc, |
| 269 | VoiceProcessor* processor, |
| 270 | MediaProcessorDirection direction) { |
| 271 | return voice_.UnregisterProcessor(ssrc, processor, direction); |
| 272 | } |
| 273 | virtual VideoFormat GetStartCaptureFormat() const { |
| 274 | return video_.GetStartCaptureFormat(); |
| 275 | } |
| 276 | virtual sigslot::repeater2<VideoCapturer*, CaptureState>& |
| 277 | SignalVideoCaptureStateChange() { |
| 278 | return signal_state_change_; |
| 279 | } |
| 280 | |
| 281 | protected: |
| 282 | VOICE voice_; |
| 283 | VIDEO video_; |
| 284 | sigslot::repeater2<VideoCapturer*, CaptureState> signal_state_change_; |
| 285 | }; |
| 286 | |
| 287 | // NullVoiceEngine can be used with CompositeMediaEngine in the case where only |
| 288 | // a video engine is desired. |
| 289 | class NullVoiceEngine { |
| 290 | public: |
| 291 | bool Init(talk_base::Thread* worker_thread) { return true; } |
| 292 | void Terminate() {} |
| 293 | int GetCapabilities() { return 0; } |
| 294 | // If you need this to return an actual channel, use FakeMediaEngine instead. |
| 295 | VoiceMediaChannel* CreateChannel() { |
| 296 | return NULL; |
| 297 | } |
| 298 | SoundclipMedia* CreateSoundclip() { |
| 299 | return NULL; |
| 300 | } |
| 301 | bool SetDelayOffset(int offset) { return true; } |
mallinath@webrtc.org | a27be8e | 2013-09-27 23:04:10 +0000 | [diff] [blame] | 302 | AudioOptions GetOptions() const { return AudioOptions(); } |
| 303 | bool SetOptions(const AudioOptions& options) { return true; } |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 304 | bool SetDevices(const Device* in_device, const Device* out_device) { |
| 305 | return true; |
| 306 | } |
| 307 | bool GetOutputVolume(int* level) { |
| 308 | *level = 0; |
| 309 | return true; |
| 310 | } |
| 311 | bool SetOutputVolume(int level) { return true; } |
| 312 | int GetInputLevel() { return 0; } |
| 313 | bool SetLocalMonitor(bool enable) { return true; } |
| 314 | const std::vector<AudioCodec>& codecs() { return codecs_; } |
| 315 | const std::vector<RtpHeaderExtension>& rtp_header_extensions() { |
| 316 | return rtp_header_extensions_; |
| 317 | } |
| 318 | void SetLogging(int min_sev, const char* filter) {} |
| 319 | bool RegisterProcessor(uint32 ssrc, |
| 320 | VoiceProcessor* voice_processor, |
| 321 | MediaProcessorDirection direction) { return true; } |
| 322 | bool UnregisterProcessor(uint32 ssrc, |
| 323 | VoiceProcessor* voice_processor, |
| 324 | MediaProcessorDirection direction) { return true; } |
| 325 | |
| 326 | private: |
| 327 | std::vector<AudioCodec> codecs_; |
| 328 | std::vector<RtpHeaderExtension> rtp_header_extensions_; |
| 329 | }; |
| 330 | |
| 331 | // NullVideoEngine can be used with CompositeMediaEngine in the case where only |
| 332 | // a voice engine is desired. |
| 333 | class NullVideoEngine { |
| 334 | public: |
| 335 | bool Init(talk_base::Thread* worker_thread) { return true; } |
| 336 | void Terminate() {} |
| 337 | int GetCapabilities() { return 0; } |
| 338 | // If you need this to return an actual channel, use FakeMediaEngine instead. |
| 339 | VideoMediaChannel* CreateChannel( |
| 340 | VoiceMediaChannel* voice_media_channel) { |
| 341 | return NULL; |
| 342 | } |
mallinath@webrtc.org | a27be8e | 2013-09-27 23:04:10 +0000 | [diff] [blame] | 343 | bool SetOptions(const VideoOptions& options) { return true; } |
wu@webrtc.org | 7818752 | 2013-10-07 23:32:02 +0000 | [diff] [blame] | 344 | VideoEncoderConfig GetDefaultEncoderConfig() const { |
| 345 | return VideoEncoderConfig(); |
| 346 | } |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 347 | bool SetDefaultEncoderConfig(const VideoEncoderConfig& config) { |
| 348 | return true; |
| 349 | } |
| 350 | bool SetLocalRenderer(VideoRenderer* renderer) { return true; } |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 351 | const std::vector<VideoCodec>& codecs() { return codecs_; } |
| 352 | const std::vector<RtpHeaderExtension>& rtp_header_extensions() { |
| 353 | return rtp_header_extensions_; |
| 354 | } |
| 355 | void SetLogging(int min_sev, const char* filter) {} |
| 356 | VideoFormat GetStartCaptureFormat() const { return VideoFormat(); } |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 357 | |
| 358 | sigslot::signal2<VideoCapturer*, CaptureState> SignalCaptureStateChange; |
| 359 | private: |
| 360 | std::vector<VideoCodec> codecs_; |
| 361 | std::vector<RtpHeaderExtension> rtp_header_extensions_; |
| 362 | }; |
| 363 | |
| 364 | typedef CompositeMediaEngine<NullVoiceEngine, NullVideoEngine> NullMediaEngine; |
| 365 | |
| 366 | enum DataChannelType { |
| 367 | DCT_NONE = 0, |
| 368 | DCT_RTP = 1, |
| 369 | DCT_SCTP = 2 |
| 370 | }; |
| 371 | |
| 372 | class DataEngineInterface { |
| 373 | public: |
| 374 | virtual ~DataEngineInterface() {} |
| 375 | virtual DataMediaChannel* CreateChannel(DataChannelType type) = 0; |
| 376 | virtual const std::vector<DataCodec>& data_codecs() = 0; |
| 377 | }; |
| 378 | |
| 379 | } // namespace cricket |
| 380 | |
| 381 | #endif // TALK_MEDIA_BASE_MEDIAENGINE_H_ |