henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 1 | /* |
| 2 | * libjingle |
| 3 | * Copyright 2004 Google Inc. |
| 4 | * |
| 5 | * Redistribution and use in source and binary forms, with or without |
| 6 | * modification, are permitted provided that the following conditions are met: |
| 7 | * |
| 8 | * 1. Redistributions of source code must retain the above copyright notice, |
| 9 | * this list of conditions and the following disclaimer. |
| 10 | * 2. Redistributions in binary form must reproduce the above copyright notice, |
| 11 | * this list of conditions and the following disclaimer in the documentation |
| 12 | * and/or other materials provided with the distribution. |
| 13 | * 3. The name of the author may not be used to endorse or promote products |
| 14 | * derived from this software without specific prior written permission. |
| 15 | * |
| 16 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED |
| 17 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF |
| 18 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO |
| 19 | * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 20 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
| 21 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; |
| 22 | * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, |
| 23 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR |
| 24 | * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF |
| 25 | * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 26 | */ |
| 27 | |
| 28 | #ifndef TALK_MEDIA_BASE_MEDIAENGINE_H_ |
| 29 | #define TALK_MEDIA_BASE_MEDIAENGINE_H_ |
| 30 | |
| 31 | #ifdef OSX |
| 32 | #include <CoreAudio/CoreAudio.h> |
| 33 | #endif |
| 34 | |
| 35 | #include <climits> |
| 36 | #include <string> |
| 37 | #include <vector> |
| 38 | |
| 39 | #include "talk/base/sigslotrepeater.h" |
| 40 | #include "talk/media/base/codec.h" |
| 41 | #include "talk/media/base/mediachannel.h" |
| 42 | #include "talk/media/base/mediacommon.h" |
| 43 | #include "talk/media/base/videocapturer.h" |
| 44 | #include "talk/media/base/videocommon.h" |
| 45 | #include "talk/media/base/videoprocessor.h" |
| 46 | #include "talk/media/base/voiceprocessor.h" |
| 47 | #include "talk/media/devices/devicemanager.h" |
| 48 | |
| 49 | #if defined(GOOGLE_CHROME_BUILD) || defined(CHROMIUM_BUILD) |
| 50 | #define DISABLE_MEDIA_ENGINE_FACTORY |
| 51 | #endif |
| 52 | |
| 53 | namespace cricket { |
| 54 | |
| 55 | class VideoCapturer; |
| 56 | |
| 57 | // MediaEngineInterface is an abstraction of a media engine which can be |
| 58 | // subclassed to support different media componentry backends. |
| 59 | // It supports voice and video operations in the same class to facilitate |
| 60 | // proper synchronization between both media types. |
| 61 | class MediaEngineInterface { |
| 62 | public: |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 63 | // Default value to be used for SetAudioDelayOffset(). |
| 64 | static const int kDefaultAudioDelayOffset; |
| 65 | |
| 66 | virtual ~MediaEngineInterface() {} |
| 67 | |
| 68 | // Initialization |
| 69 | // Starts the engine. |
| 70 | virtual bool Init(talk_base::Thread* worker_thread) = 0; |
| 71 | // Shuts down the engine. |
| 72 | virtual void Terminate() = 0; |
| 73 | // Returns what the engine is capable of, as a set of Capabilities, above. |
| 74 | virtual int GetCapabilities() = 0; |
| 75 | |
| 76 | // MediaChannel creation |
| 77 | // Creates a voice media channel. Returns NULL on failure. |
| 78 | virtual VoiceMediaChannel *CreateChannel() = 0; |
| 79 | // Creates a video media channel, paired with the specified voice channel. |
| 80 | // Returns NULL on failure. |
| 81 | virtual VideoMediaChannel *CreateVideoChannel( |
| 82 | VoiceMediaChannel* voice_media_channel) = 0; |
| 83 | |
| 84 | // Creates a soundclip object for playing sounds on. Returns NULL on failure. |
| 85 | virtual SoundclipMedia *CreateSoundclip() = 0; |
| 86 | |
| 87 | // Configuration |
mallinath@webrtc.org | a27be8e | 2013-09-27 23:04:10 +0000 | [diff] [blame^] | 88 | // Gets global audio options. |
| 89 | virtual AudioOptions GetAudioOptions() const = 0; |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 90 | // Sets global audio options. "options" are from AudioOptions, above. |
mallinath@webrtc.org | a27be8e | 2013-09-27 23:04:10 +0000 | [diff] [blame^] | 91 | virtual bool SetAudioOptions(const AudioOptions& options) = 0; |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 92 | // Sets global video options. "options" are from VideoOptions, above. |
mallinath@webrtc.org | a27be8e | 2013-09-27 23:04:10 +0000 | [diff] [blame^] | 93 | virtual bool SetVideoOptions(const VideoOptions& options) = 0; |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 94 | // Sets the value used by the echo canceller to offset delay values obtained |
| 95 | // from the OS. |
| 96 | virtual bool SetAudioDelayOffset(int offset) = 0; |
| 97 | // Sets the default (maximum) codec/resolution and encoder option to capture |
| 98 | // and encode video. |
| 99 | virtual bool SetDefaultVideoEncoderConfig(const VideoEncoderConfig& config) |
| 100 | = 0; |
| 101 | |
| 102 | // Device selection |
| 103 | // TODO(tschmelcher): Add method for selecting the soundclip device. |
| 104 | virtual bool SetSoundDevices(const Device* in_device, |
| 105 | const Device* out_device) = 0; |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 106 | |
| 107 | // Device configuration |
| 108 | // Gets the current speaker volume, as a value between 0 and 255. |
| 109 | virtual bool GetOutputVolume(int* level) = 0; |
| 110 | // Sets the current speaker volume, as a value between 0 and 255. |
| 111 | virtual bool SetOutputVolume(int level) = 0; |
| 112 | |
| 113 | // Local monitoring |
| 114 | // Gets the current microphone level, as a value between 0 and 10. |
| 115 | virtual int GetInputLevel() = 0; |
| 116 | // Starts or stops the local microphone. Useful if local mic info is needed |
| 117 | // prior to a call being connected; the mic will be started automatically |
| 118 | // when a VoiceMediaChannel starts sending. |
| 119 | virtual bool SetLocalMonitor(bool enable) = 0; |
| 120 | // Installs a callback for raw frames from the local camera. |
| 121 | virtual bool SetLocalRenderer(VideoRenderer* renderer) = 0; |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 122 | |
| 123 | virtual const std::vector<AudioCodec>& audio_codecs() = 0; |
| 124 | virtual const std::vector<RtpHeaderExtension>& |
| 125 | audio_rtp_header_extensions() = 0; |
| 126 | virtual const std::vector<VideoCodec>& video_codecs() = 0; |
| 127 | virtual const std::vector<RtpHeaderExtension>& |
| 128 | video_rtp_header_extensions() = 0; |
| 129 | |
| 130 | // Logging control |
| 131 | virtual void SetVoiceLogging(int min_sev, const char* filter) = 0; |
| 132 | virtual void SetVideoLogging(int min_sev, const char* filter) = 0; |
| 133 | |
| 134 | // Voice processors for effects. |
| 135 | virtual bool RegisterVoiceProcessor(uint32 ssrc, |
| 136 | VoiceProcessor* video_processor, |
| 137 | MediaProcessorDirection direction) = 0; |
| 138 | virtual bool UnregisterVoiceProcessor(uint32 ssrc, |
| 139 | VoiceProcessor* video_processor, |
| 140 | MediaProcessorDirection direction) = 0; |
| 141 | |
| 142 | virtual VideoFormat GetStartCaptureFormat() const = 0; |
| 143 | |
| 144 | virtual sigslot::repeater2<VideoCapturer*, CaptureState>& |
| 145 | SignalVideoCaptureStateChange() = 0; |
| 146 | }; |
| 147 | |
| 148 | |
| 149 | #if !defined(DISABLE_MEDIA_ENGINE_FACTORY) |
| 150 | class MediaEngineFactory { |
| 151 | public: |
| 152 | static MediaEngineInterface* Create(); |
| 153 | }; |
| 154 | #endif |
| 155 | |
| 156 | // CompositeMediaEngine constructs a MediaEngine from separate |
| 157 | // voice and video engine classes. |
| 158 | template<class VOICE, class VIDEO> |
| 159 | class CompositeMediaEngine : public MediaEngineInterface { |
| 160 | public: |
| 161 | CompositeMediaEngine() {} |
| 162 | virtual ~CompositeMediaEngine() {} |
| 163 | virtual bool Init(talk_base::Thread* worker_thread) { |
| 164 | if (!voice_.Init(worker_thread)) |
| 165 | return false; |
| 166 | if (!video_.Init(worker_thread)) { |
| 167 | voice_.Terminate(); |
| 168 | return false; |
| 169 | } |
| 170 | SignalVideoCaptureStateChange().repeat(video_.SignalCaptureStateChange); |
| 171 | return true; |
| 172 | } |
| 173 | virtual void Terminate() { |
| 174 | video_.Terminate(); |
| 175 | voice_.Terminate(); |
| 176 | } |
| 177 | |
| 178 | virtual int GetCapabilities() { |
| 179 | return (voice_.GetCapabilities() | video_.GetCapabilities()); |
| 180 | } |
| 181 | virtual VoiceMediaChannel *CreateChannel() { |
| 182 | return voice_.CreateChannel(); |
| 183 | } |
| 184 | virtual VideoMediaChannel *CreateVideoChannel(VoiceMediaChannel* channel) { |
| 185 | return video_.CreateChannel(channel); |
| 186 | } |
| 187 | virtual SoundclipMedia *CreateSoundclip() { |
| 188 | return voice_.CreateSoundclip(); |
| 189 | } |
| 190 | |
mallinath@webrtc.org | a27be8e | 2013-09-27 23:04:10 +0000 | [diff] [blame^] | 191 | virtual AudioOptions GetAudioOptions() const { |
| 192 | return voice_.GetOptions(); |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 193 | } |
mallinath@webrtc.org | a27be8e | 2013-09-27 23:04:10 +0000 | [diff] [blame^] | 194 | virtual bool SetAudioOptions(const AudioOptions& options) { |
| 195 | return voice_.SetOptions(options); |
| 196 | } |
| 197 | virtual bool SetVideoOptions(const VideoOptions& options) { |
| 198 | return video_.SetOptions(options); |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 199 | } |
| 200 | virtual bool SetAudioDelayOffset(int offset) { |
| 201 | return voice_.SetDelayOffset(offset); |
| 202 | } |
| 203 | virtual bool SetDefaultVideoEncoderConfig(const VideoEncoderConfig& config) { |
| 204 | return video_.SetDefaultEncoderConfig(config); |
| 205 | } |
| 206 | |
| 207 | virtual bool SetSoundDevices(const Device* in_device, |
| 208 | const Device* out_device) { |
| 209 | return voice_.SetDevices(in_device, out_device); |
| 210 | } |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 211 | |
| 212 | virtual bool GetOutputVolume(int* level) { |
| 213 | return voice_.GetOutputVolume(level); |
| 214 | } |
| 215 | virtual bool SetOutputVolume(int level) { |
| 216 | return voice_.SetOutputVolume(level); |
| 217 | } |
| 218 | |
| 219 | virtual int GetInputLevel() { |
| 220 | return voice_.GetInputLevel(); |
| 221 | } |
| 222 | virtual bool SetLocalMonitor(bool enable) { |
| 223 | return voice_.SetLocalMonitor(enable); |
| 224 | } |
| 225 | virtual bool SetLocalRenderer(VideoRenderer* renderer) { |
| 226 | return video_.SetLocalRenderer(renderer); |
| 227 | } |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 228 | |
| 229 | virtual const std::vector<AudioCodec>& audio_codecs() { |
| 230 | return voice_.codecs(); |
| 231 | } |
| 232 | virtual const std::vector<RtpHeaderExtension>& audio_rtp_header_extensions() { |
| 233 | return voice_.rtp_header_extensions(); |
| 234 | } |
| 235 | virtual const std::vector<VideoCodec>& video_codecs() { |
| 236 | return video_.codecs(); |
| 237 | } |
| 238 | virtual const std::vector<RtpHeaderExtension>& video_rtp_header_extensions() { |
| 239 | return video_.rtp_header_extensions(); |
| 240 | } |
| 241 | |
| 242 | virtual void SetVoiceLogging(int min_sev, const char* filter) { |
wu@webrtc.org | 967bfff | 2013-09-19 05:49:50 +0000 | [diff] [blame] | 243 | voice_.SetLogging(min_sev, filter); |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 244 | } |
| 245 | virtual void SetVideoLogging(int min_sev, const char* filter) { |
wu@webrtc.org | 967bfff | 2013-09-19 05:49:50 +0000 | [diff] [blame] | 246 | video_.SetLogging(min_sev, filter); |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 247 | } |
| 248 | |
| 249 | virtual bool RegisterVoiceProcessor(uint32 ssrc, |
| 250 | VoiceProcessor* processor, |
| 251 | MediaProcessorDirection direction) { |
| 252 | return voice_.RegisterProcessor(ssrc, processor, direction); |
| 253 | } |
| 254 | virtual bool UnregisterVoiceProcessor(uint32 ssrc, |
| 255 | VoiceProcessor* processor, |
| 256 | MediaProcessorDirection direction) { |
| 257 | return voice_.UnregisterProcessor(ssrc, processor, direction); |
| 258 | } |
| 259 | virtual VideoFormat GetStartCaptureFormat() const { |
| 260 | return video_.GetStartCaptureFormat(); |
| 261 | } |
| 262 | virtual sigslot::repeater2<VideoCapturer*, CaptureState>& |
| 263 | SignalVideoCaptureStateChange() { |
| 264 | return signal_state_change_; |
| 265 | } |
| 266 | |
| 267 | protected: |
| 268 | VOICE voice_; |
| 269 | VIDEO video_; |
| 270 | sigslot::repeater2<VideoCapturer*, CaptureState> signal_state_change_; |
| 271 | }; |
| 272 | |
| 273 | // NullVoiceEngine can be used with CompositeMediaEngine in the case where only |
| 274 | // a video engine is desired. |
| 275 | class NullVoiceEngine { |
| 276 | public: |
| 277 | bool Init(talk_base::Thread* worker_thread) { return true; } |
| 278 | void Terminate() {} |
| 279 | int GetCapabilities() { return 0; } |
| 280 | // If you need this to return an actual channel, use FakeMediaEngine instead. |
| 281 | VoiceMediaChannel* CreateChannel() { |
| 282 | return NULL; |
| 283 | } |
| 284 | SoundclipMedia* CreateSoundclip() { |
| 285 | return NULL; |
| 286 | } |
| 287 | bool SetDelayOffset(int offset) { return true; } |
mallinath@webrtc.org | a27be8e | 2013-09-27 23:04:10 +0000 | [diff] [blame^] | 288 | AudioOptions GetOptions() const { return AudioOptions(); } |
| 289 | bool SetOptions(const AudioOptions& options) { return true; } |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 290 | bool SetDevices(const Device* in_device, const Device* out_device) { |
| 291 | return true; |
| 292 | } |
| 293 | bool GetOutputVolume(int* level) { |
| 294 | *level = 0; |
| 295 | return true; |
| 296 | } |
| 297 | bool SetOutputVolume(int level) { return true; } |
| 298 | int GetInputLevel() { return 0; } |
| 299 | bool SetLocalMonitor(bool enable) { return true; } |
| 300 | const std::vector<AudioCodec>& codecs() { return codecs_; } |
| 301 | const std::vector<RtpHeaderExtension>& rtp_header_extensions() { |
| 302 | return rtp_header_extensions_; |
| 303 | } |
| 304 | void SetLogging(int min_sev, const char* filter) {} |
| 305 | bool RegisterProcessor(uint32 ssrc, |
| 306 | VoiceProcessor* voice_processor, |
| 307 | MediaProcessorDirection direction) { return true; } |
| 308 | bool UnregisterProcessor(uint32 ssrc, |
| 309 | VoiceProcessor* voice_processor, |
| 310 | MediaProcessorDirection direction) { return true; } |
| 311 | |
| 312 | private: |
| 313 | std::vector<AudioCodec> codecs_; |
| 314 | std::vector<RtpHeaderExtension> rtp_header_extensions_; |
| 315 | }; |
| 316 | |
| 317 | // NullVideoEngine can be used with CompositeMediaEngine in the case where only |
| 318 | // a voice engine is desired. |
| 319 | class NullVideoEngine { |
| 320 | public: |
| 321 | bool Init(talk_base::Thread* worker_thread) { return true; } |
| 322 | void Terminate() {} |
| 323 | int GetCapabilities() { return 0; } |
| 324 | // If you need this to return an actual channel, use FakeMediaEngine instead. |
| 325 | VideoMediaChannel* CreateChannel( |
| 326 | VoiceMediaChannel* voice_media_channel) { |
| 327 | return NULL; |
| 328 | } |
mallinath@webrtc.org | a27be8e | 2013-09-27 23:04:10 +0000 | [diff] [blame^] | 329 | bool SetOptions(const VideoOptions& options) { return true; } |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 330 | bool SetDefaultEncoderConfig(const VideoEncoderConfig& config) { |
| 331 | return true; |
| 332 | } |
| 333 | bool SetLocalRenderer(VideoRenderer* renderer) { return true; } |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 334 | const std::vector<VideoCodec>& codecs() { return codecs_; } |
| 335 | const std::vector<RtpHeaderExtension>& rtp_header_extensions() { |
| 336 | return rtp_header_extensions_; |
| 337 | } |
| 338 | void SetLogging(int min_sev, const char* filter) {} |
| 339 | VideoFormat GetStartCaptureFormat() const { return VideoFormat(); } |
henrike@webrtc.org | 28e2075 | 2013-07-10 00:45:36 +0000 | [diff] [blame] | 340 | |
| 341 | sigslot::signal2<VideoCapturer*, CaptureState> SignalCaptureStateChange; |
| 342 | private: |
| 343 | std::vector<VideoCodec> codecs_; |
| 344 | std::vector<RtpHeaderExtension> rtp_header_extensions_; |
| 345 | }; |
| 346 | |
| 347 | typedef CompositeMediaEngine<NullVoiceEngine, NullVideoEngine> NullMediaEngine; |
| 348 | |
| 349 | enum DataChannelType { |
| 350 | DCT_NONE = 0, |
| 351 | DCT_RTP = 1, |
| 352 | DCT_SCTP = 2 |
| 353 | }; |
| 354 | |
| 355 | class DataEngineInterface { |
| 356 | public: |
| 357 | virtual ~DataEngineInterface() {} |
| 358 | virtual DataMediaChannel* CreateChannel(DataChannelType type) = 0; |
| 359 | virtual const std::vector<DataCodec>& data_codecs() = 0; |
| 360 | }; |
| 361 | |
| 362 | } // namespace cricket |
| 363 | |
| 364 | #endif // TALK_MEDIA_BASE_MEDIAENGINE_H_ |