blob: f1460a6cb8af9352b5b876a4708ce8cc1f76d994 [file] [log] [blame]
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001/*
2 * libjingle
3 * Copyright 2004 Google Inc.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 * 3. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
17 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
18 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
19 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
20 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
22 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
23 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
24 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
25 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28#ifdef HAVE_CONFIG_H
29#include <config.h>
30#endif
31
32#ifdef HAVE_WEBRTC_VOICE
33
34#include "talk/media/webrtc/webrtcvoiceengine.h"
35
36#include <algorithm>
37#include <cstdio>
38#include <string>
39#include <vector>
40
41#include "talk/base/base64.h"
42#include "talk/base/byteorder.h"
43#include "talk/base/common.h"
44#include "talk/base/helpers.h"
45#include "talk/base/logging.h"
46#include "talk/base/stringencode.h"
47#include "talk/base/stringutils.h"
48#include "talk/media/base/audiorenderer.h"
49#include "talk/media/base/constants.h"
50#include "talk/media/base/streamparams.h"
51#include "talk/media/base/voiceprocessor.h"
52#include "talk/media/webrtc/webrtcvoe.h"
mallinath@webrtc.orga27be8e2013-09-27 23:04:10 +000053#include "webrtc/common.h"
henrike@webrtc.org28e20752013-07-10 00:45:36 +000054#include "webrtc/modules/audio_processing/include/audio_processing.h"
55
56#ifdef WIN32
57#include <objbase.h> // NOLINT
58#endif
59
60namespace cricket {
61
62struct CodecPref {
63 const char* name;
64 int clockrate;
65 int channels;
66 int payload_type;
67 bool is_multi_rate;
68};
69
70static const CodecPref kCodecPrefs[] = {
71 { "OPUS", 48000, 2, 111, true },
72 { "ISAC", 16000, 1, 103, true },
73 { "ISAC", 32000, 1, 104, true },
74 { "CELT", 32000, 1, 109, true },
75 { "CELT", 32000, 2, 110, true },
76 { "G722", 16000, 1, 9, false },
77 { "ILBC", 8000, 1, 102, false },
78 { "PCMU", 8000, 1, 0, false },
79 { "PCMA", 8000, 1, 8, false },
80 { "CN", 48000, 1, 107, false },
81 { "CN", 32000, 1, 106, false },
82 { "CN", 16000, 1, 105, false },
83 { "CN", 8000, 1, 13, false },
84 { "red", 8000, 1, 127, false },
85 { "telephone-event", 8000, 1, 126, false },
86};
87
88// For Linux/Mac, using the default device is done by specifying index 0 for
89// VoE 4.0 and not -1 (which was the case for VoE 3.5).
90//
91// On Windows Vista and newer, Microsoft introduced the concept of "Default
92// Communications Device". This means that there are two types of default
93// devices (old Wave Audio style default and Default Communications Device).
94//
95// On Windows systems which only support Wave Audio style default, uses either
96// -1 or 0 to select the default device.
97//
98// On Windows systems which support both "Default Communication Device" and
99// old Wave Audio style default, use -1 for Default Communications Device and
100// -2 for Wave Audio style default, which is what we want to use for clips.
101// It's not clear yet whether the -2 index is handled properly on other OSes.
102
103#ifdef WIN32
104static const int kDefaultAudioDeviceId = -1;
105static const int kDefaultSoundclipDeviceId = -2;
106#else
107static const int kDefaultAudioDeviceId = 0;
108#endif
109
henrike@webrtc.org28e20752013-07-10 00:45:36 +0000110static const char kIsacCodecName[] = "ISAC";
111static const char kL16CodecName[] = "L16";
112// Codec parameters for Opus.
113static const int kOpusMonoBitrate = 32000;
114// Parameter used for NACK.
115// This value is equivalent to 5 seconds of audio data at 20 ms per packet.
116static const int kNackMaxPackets = 250;
117static const int kOpusStereoBitrate = 64000;
henrike@webrtc.org1e09a712013-07-26 19:17:59 +0000118// draft-spittka-payload-rtp-opus-03
119// Opus bitrate should be in the range between 6000 and 510000.
120static const int kOpusMinBitrate = 6000;
121static const int kOpusMaxBitrate = 510000;
wu@webrtc.orgde305012013-10-31 15:40:38 +0000122// Default audio dscp value.
123// See http://tools.ietf.org/html/rfc2474 for details.
124// See also http://tools.ietf.org/html/draft-jennings-rtcweb-qos-00
125static const talk_base::DiffServCodePoint kAudioDscpValue = talk_base::DSCP_EF;
henrike@webrtc.org1e09a712013-07-26 19:17:59 +0000126
sergeyu@chromium.orga59696b2013-09-13 23:48:58 +0000127// Ensure we open the file in a writeable path on ChromeOS and Android. This
128// workaround can be removed when it's possible to specify a filename for audio
129// option based AEC dumps.
henrike@webrtc.org1e09a712013-07-26 19:17:59 +0000130//
131// TODO(grunell): Use a string in the options instead of hardcoding it here
132// and let the embedder choose the filename (crbug.com/264223).
133//
sergeyu@chromium.orga59696b2013-09-13 23:48:58 +0000134// NOTE(ajm): Don't use hardcoded paths on platforms not explicitly specified
135// below.
136#if defined(CHROMEOS)
henrike@webrtc.org1e09a712013-07-26 19:17:59 +0000137static const char kAecDumpByAudioOptionFilename[] = "/tmp/audio.aecdump";
sergeyu@chromium.orga59696b2013-09-13 23:48:58 +0000138#elif defined(ANDROID)
139static const char kAecDumpByAudioOptionFilename[] = "/sdcard/audio.aecdump";
henrike@webrtc.org1e09a712013-07-26 19:17:59 +0000140#else
141static const char kAecDumpByAudioOptionFilename[] = "audio.aecdump";
142#endif
henrike@webrtc.org28e20752013-07-10 00:45:36 +0000143
144// Dumps an AudioCodec in RFC 2327-ish format.
145static std::string ToString(const AudioCodec& codec) {
146 std::stringstream ss;
147 ss << codec.name << "/" << codec.clockrate << "/" << codec.channels
148 << " (" << codec.id << ")";
149 return ss.str();
150}
151static std::string ToString(const webrtc::CodecInst& codec) {
152 std::stringstream ss;
153 ss << codec.plname << "/" << codec.plfreq << "/" << codec.channels
154 << " (" << codec.pltype << ")";
155 return ss.str();
156}
157
158static void LogMultiline(talk_base::LoggingSeverity sev, char* text) {
159 const char* delim = "\r\n";
160 for (char* tok = strtok(text, delim); tok; tok = strtok(NULL, delim)) {
161 LOG_V(sev) << tok;
162 }
163}
164
165// Severity is an integer because it comes is assumed to be from command line.
166static int SeverityToFilter(int severity) {
167 int filter = webrtc::kTraceNone;
168 switch (severity) {
169 case talk_base::LS_VERBOSE:
170 filter |= webrtc::kTraceAll;
171 case talk_base::LS_INFO:
172 filter |= (webrtc::kTraceStateInfo | webrtc::kTraceInfo);
173 case talk_base::LS_WARNING:
174 filter |= (webrtc::kTraceTerseInfo | webrtc::kTraceWarning);
175 case talk_base::LS_ERROR:
176 filter |= (webrtc::kTraceError | webrtc::kTraceCritical);
177 }
178 return filter;
179}
180
181static bool IsCodecMultiRate(const webrtc::CodecInst& codec) {
182 for (size_t i = 0; i < ARRAY_SIZE(kCodecPrefs); ++i) {
183 if (_stricmp(kCodecPrefs[i].name, codec.plname) == 0 &&
184 kCodecPrefs[i].clockrate == codec.plfreq) {
185 return kCodecPrefs[i].is_multi_rate;
186 }
187 }
188 return false;
189}
190
henrike@webrtc.org704bf9e2014-02-27 17:52:04 +0000191static bool IsTelephoneEventCodec(const std::string& name) {
192 return _stricmp(name.c_str(), "telephone-event") == 0;
193}
194
195static bool IsCNCodec(const std::string& name) {
196 return _stricmp(name.c_str(), "CN") == 0;
197}
198
199static bool IsRedCodec(const std::string& name) {
200 return _stricmp(name.c_str(), "red") == 0;
201}
202
henrike@webrtc.org28e20752013-07-10 00:45:36 +0000203static bool FindCodec(const std::vector<AudioCodec>& codecs,
204 const AudioCodec& codec,
205 AudioCodec* found_codec) {
206 for (std::vector<AudioCodec>::const_iterator it = codecs.begin();
207 it != codecs.end(); ++it) {
208 if (it->Matches(codec)) {
209 if (found_codec != NULL) {
210 *found_codec = *it;
211 }
212 return true;
213 }
214 }
215 return false;
216}
wu@webrtc.org1d1ffc92013-10-16 18:12:02 +0000217
henrike@webrtc.org28e20752013-07-10 00:45:36 +0000218static bool IsNackEnabled(const AudioCodec& codec) {
219 return codec.HasFeedbackParam(FeedbackParam(kRtcpFbParamNack,
220 kParamValueEmpty));
221}
222
wu@webrtc.org1d1ffc92013-10-16 18:12:02 +0000223// Gets the default set of options applied to the engine. Historically, these
224// were supplied as a combination of flags from the channel manager (ec, agc,
225// ns, and highpass) and the rest hardcoded in InitInternal.
226static AudioOptions GetDefaultEngineOptions() {
227 AudioOptions options;
228 options.echo_cancellation.Set(true);
229 options.auto_gain_control.Set(true);
230 options.noise_suppression.Set(true);
231 options.highpass_filter.Set(true);
232 options.stereo_swapping.Set(false);
233 options.typing_detection.Set(true);
234 options.conference_mode.Set(false);
235 options.adjust_agc_delta.Set(0);
236 options.experimental_agc.Set(false);
237 options.experimental_aec.Set(false);
sergeyu@chromium.org9cf037b2014-02-07 19:03:26 +0000238 options.experimental_ns.Set(false);
wu@webrtc.org1d1ffc92013-10-16 18:12:02 +0000239 options.aec_dump.Set(false);
buildbot@webrtc.orgd27d9ae2014-06-19 01:56:46 +0000240 options.opus_fec.Set(false);
wu@webrtc.org1d1ffc92013-10-16 18:12:02 +0000241 return options;
242}
henrike@webrtc.org28e20752013-07-10 00:45:36 +0000243
244class WebRtcSoundclipMedia : public SoundclipMedia {
245 public:
246 explicit WebRtcSoundclipMedia(WebRtcVoiceEngine *engine)
247 : engine_(engine), webrtc_channel_(-1) {
248 engine_->RegisterSoundclip(this);
249 }
250
251 virtual ~WebRtcSoundclipMedia() {
252 engine_->UnregisterSoundclip(this);
253 if (webrtc_channel_ != -1) {
254 // We shouldn't have to call Disable() here. DeleteChannel() should call
255 // StopPlayout() while deleting the channel. We should fix the bug
256 // inside WebRTC and remove the Disable() call bellow. This work is
257 // tracked by bug http://b/issue?id=5382855.
258 PlaySound(NULL, 0, 0);
259 Disable();
260 if (engine_->voe_sc()->base()->DeleteChannel(webrtc_channel_)
261 == -1) {
262 LOG_RTCERR1(DeleteChannel, webrtc_channel_);
263 }
264 }
265 }
266
267 bool Init() {
wu@webrtc.org4551b792013-10-09 15:37:36 +0000268 if (!engine_->voe_sc()) {
269 return false;
270 }
sergeyu@chromium.org5bc25c42013-12-05 00:24:06 +0000271 webrtc_channel_ = engine_->CreateSoundclipVoiceChannel();
henrike@webrtc.org28e20752013-07-10 00:45:36 +0000272 if (webrtc_channel_ == -1) {
273 LOG_RTCERR0(CreateChannel);
274 return false;
275 }
276 return true;
277 }
278
279 bool Enable() {
280 if (engine_->voe_sc()->base()->StartPlayout(webrtc_channel_) == -1) {
281 LOG_RTCERR1(StartPlayout, webrtc_channel_);
282 return false;
283 }
284 return true;
285 }
286
287 bool Disable() {
288 if (engine_->voe_sc()->base()->StopPlayout(webrtc_channel_) == -1) {
289 LOG_RTCERR1(StopPlayout, webrtc_channel_);
290 return false;
291 }
292 return true;
293 }
294
295 virtual bool PlaySound(const char *buf, int len, int flags) {
296 // The voe file api is not available in chrome.
297 if (!engine_->voe_sc()->file()) {
298 return false;
299 }
300 // Must stop playing the current sound (if any), because we are about to
301 // modify the stream.
302 if (engine_->voe_sc()->file()->StopPlayingFileLocally(webrtc_channel_)
303 == -1) {
304 LOG_RTCERR1(StopPlayingFileLocally, webrtc_channel_);
305 return false;
306 }
307
308 if (buf) {
309 stream_.reset(new WebRtcSoundclipStream(buf, len));
310 stream_->set_loop((flags & SF_LOOP) != 0);
311 stream_->Rewind();
312
313 // Play it.
314 if (engine_->voe_sc()->file()->StartPlayingFileLocally(
315 webrtc_channel_, stream_.get()) == -1) {
316 LOG_RTCERR2(StartPlayingFileLocally, webrtc_channel_, stream_.get());
317 LOG(LS_ERROR) << "Unable to start soundclip";
318 return false;
319 }
320 } else {
321 stream_.reset();
322 }
323 return true;
324 }
325
326 int GetLastEngineError() const { return engine_->voe_sc()->error(); }
327
328 private:
329 WebRtcVoiceEngine *engine_;
330 int webrtc_channel_;
331 talk_base::scoped_ptr<WebRtcSoundclipStream> stream_;
332};
333
334WebRtcVoiceEngine::WebRtcVoiceEngine()
335 : voe_wrapper_(new VoEWrapper()),
336 voe_wrapper_sc_(new VoEWrapper()),
wu@webrtc.org4551b792013-10-09 15:37:36 +0000337 voe_wrapper_sc_initialized_(false),
henrike@webrtc.org28e20752013-07-10 00:45:36 +0000338 tracing_(new VoETraceWrapper()),
339 adm_(NULL),
340 adm_sc_(NULL),
341 log_filter_(SeverityToFilter(kDefaultLogSeverity)),
342 is_dumping_aec_(false),
343 desired_local_monitor_enable_(false),
344 tx_processor_ssrc_(0),
345 rx_processor_ssrc_(0) {
346 Construct();
347}
348
349WebRtcVoiceEngine::WebRtcVoiceEngine(VoEWrapper* voe_wrapper,
350 VoEWrapper* voe_wrapper_sc,
351 VoETraceWrapper* tracing)
352 : voe_wrapper_(voe_wrapper),
353 voe_wrapper_sc_(voe_wrapper_sc),
wu@webrtc.org4551b792013-10-09 15:37:36 +0000354 voe_wrapper_sc_initialized_(false),
henrike@webrtc.org28e20752013-07-10 00:45:36 +0000355 tracing_(tracing),
356 adm_(NULL),
357 adm_sc_(NULL),
358 log_filter_(SeverityToFilter(kDefaultLogSeverity)),
359 is_dumping_aec_(false),
360 desired_local_monitor_enable_(false),
buildbot@webrtc.org13d67762014-05-02 17:33:29 +0000361 tx_processor_ssrc_(0),
362 rx_processor_ssrc_(0) {
363 Construct();
364}
365
366void WebRtcVoiceEngine::Construct() {
367 SetTraceFilter(log_filter_);
368 initialized_ = false;
369 LOG(LS_VERBOSE) << "WebRtcVoiceEngine::WebRtcVoiceEngine";
370 SetTraceOptions("");
371 if (tracing_->SetTraceCallback(this) == -1) {
372 LOG_RTCERR0(SetTraceCallback);
373 }
374 if (voe_wrapper_->base()->RegisterVoiceEngineObserver(*this) == -1) {
375 LOG_RTCERR0(RegisterVoiceEngineObserver);
376 }
377 // Clear the default agc state.
378 memset(&default_agc_config_, 0, sizeof(default_agc_config_));
379
380 // Load our audio codec list.
381 ConstructCodecs();
382
383 // Load our RTP Header extensions.
384 rtp_header_extensions_.push_back(
385 RtpHeaderExtension(kRtpAudioLevelHeaderExtension,
386 kRtpAudioLevelHeaderExtensionDefaultId));
387 rtp_header_extensions_.push_back(
388 RtpHeaderExtension(kRtpAbsoluteSenderTimeHeaderExtension,
389 kRtpAbsoluteSenderTimeHeaderExtensionDefaultId));
390 options_ = GetDefaultEngineOptions();
391}
392
393static bool IsOpus(const AudioCodec& codec) {
394 return (_stricmp(codec.name.c_str(), kOpusCodecName) == 0);
395}
396
397static bool IsIsac(const AudioCodec& codec) {
398 return (_stricmp(codec.name.c_str(), kIsacCodecName) == 0);
399}
400
401// True if params["stereo"] == "1"
402static bool IsOpusStereoEnabled(const AudioCodec& codec) {
buildbot@webrtc.orgd27d9ae2014-06-19 01:56:46 +0000403 int value;
404 return codec.GetParam(kCodecParamStereo, &value) && value == 1;
buildbot@webrtc.org13d67762014-05-02 17:33:29 +0000405}
406
407static bool IsValidOpusBitrate(int bitrate) {
408 return (bitrate >= kOpusMinBitrate && bitrate <= kOpusMaxBitrate);
409}
410
411// Returns 0 if params[kCodecParamMaxAverageBitrate] is not defined or invalid.
412// Returns the value of params[kCodecParamMaxAverageBitrate] otherwise.
413static int GetOpusBitrateFromParams(const AudioCodec& codec) {
414 int bitrate = 0;
415 if (!codec.GetParam(kCodecParamMaxAverageBitrate, &bitrate)) {
416 return 0;
417 }
418 if (!IsValidOpusBitrate(bitrate)) {
419 LOG(LS_WARNING) << "Codec parameter \"maxaveragebitrate\" has an "
420 << "invalid value: " << bitrate;
421 return 0;
422 }
423 return bitrate;
424}
425
buildbot@webrtc.orgd27d9ae2014-06-19 01:56:46 +0000426// Return true params[kCodecParamUseInbandFec] == kParamValueTrue, false
427// otherwise.
buildbot@webrtc.orgae740dd2014-06-17 10:56:41 +0000428static bool IsOpusFecEnabled(const AudioCodec& codec) {
buildbot@webrtc.orgd27d9ae2014-06-19 01:56:46 +0000429 int value;
430 return codec.GetParam(kCodecParamUseInbandFec, &value) && value == 1;
431}
buildbot@webrtc.orgae740dd2014-06-17 10:56:41 +0000432
buildbot@webrtc.orgd27d9ae2014-06-19 01:56:46 +0000433// Set params[kCodecParamUseInbandFec]. Caller should make sure codec is Opus.
434static void SetOpusFec(AudioCodec *codec, bool opus_fec) {
435 if (opus_fec) {
436 codec->params[kCodecParamUseInbandFec] = kParamValueTrue;
437 } else {
438 codec->params.erase(kCodecParamUseInbandFec);
439 }
buildbot@webrtc.orgae740dd2014-06-17 10:56:41 +0000440}
441
buildbot@webrtc.org13d67762014-05-02 17:33:29 +0000442void WebRtcVoiceEngine::ConstructCodecs() {
443 LOG(LS_INFO) << "WebRtc VoiceEngine codecs:";
444 int ncodecs = voe_wrapper_->codec()->NumOfCodecs();
445 for (int i = 0; i < ncodecs; ++i) {
446 webrtc::CodecInst voe_codec;
447 if (voe_wrapper_->codec()->GetCodec(i, voe_codec) != -1) {
448 // Skip uncompressed formats.
449 if (_stricmp(voe_codec.plname, kL16CodecName) == 0) {
450 continue;
451 }
452
453 const CodecPref* pref = NULL;
454 for (size_t j = 0; j < ARRAY_SIZE(kCodecPrefs); ++j) {
455 if (_stricmp(kCodecPrefs[j].name, voe_codec.plname) == 0 &&
456 kCodecPrefs[j].clockrate == voe_codec.plfreq &&
457 kCodecPrefs[j].channels == voe_codec.channels) {
458 pref = &kCodecPrefs[j];
459 break;
460 }
461 }
462
463 if (pref) {
464 // Use the payload type that we've configured in our pref table;
465 // use the offset in our pref table to determine the sort order.
466 AudioCodec codec(pref->payload_type, voe_codec.plname, voe_codec.plfreq,
467 voe_codec.rate, voe_codec.channels,
468 ARRAY_SIZE(kCodecPrefs) - (pref - kCodecPrefs));
469 LOG(LS_INFO) << ToString(codec);
470 if (IsIsac(codec)) {
471 // Indicate auto-bandwidth in signaling.
472 codec.bitrate = 0;
473 }
474 if (IsOpus(codec)) {
475 // Only add fmtp parameters that differ from the spec.
476 if (kPreferredMinPTime != kOpusDefaultMinPTime) {
477 codec.params[kCodecParamMinPTime] =
478 talk_base::ToString(kPreferredMinPTime);
479 }
480 if (kPreferredMaxPTime != kOpusDefaultMaxPTime) {
481 codec.params[kCodecParamMaxPTime] =
482 talk_base::ToString(kPreferredMaxPTime);
483 }
484 // TODO(hellner): Add ptime, sprop-stereo, stereo and useinbandfec
485 // when they can be set to values other than the default.
buildbot@webrtc.orgd27d9ae2014-06-19 01:56:46 +0000486 SetOpusFec(&codec, false);
buildbot@webrtc.org13d67762014-05-02 17:33:29 +0000487 }
488 codecs_.push_back(codec);
489 } else {
490 LOG(LS_WARNING) << "Unexpected codec: " << ToString(voe_codec);
491 }
492 }
493 }
494 // Make sure they are in local preference order.
495 std::sort(codecs_.begin(), codecs_.end(), &AudioCodec::Preferable);
496}
497
498WebRtcVoiceEngine::~WebRtcVoiceEngine() {
499 LOG(LS_VERBOSE) << "WebRtcVoiceEngine::~WebRtcVoiceEngine";
500 if (voe_wrapper_->base()->DeRegisterVoiceEngineObserver() == -1) {
501 LOG_RTCERR0(DeRegisterVoiceEngineObserver);
502 }
503 if (adm_) {
504 voe_wrapper_.reset();
505 adm_->Release();
506 adm_ = NULL;
507 }
508 if (adm_sc_) {
509 voe_wrapper_sc_.reset();
510 adm_sc_->Release();
511 adm_sc_ = NULL;
512 }
513
514 // Test to see if the media processor was deregistered properly
515 ASSERT(SignalRxMediaFrame.is_empty());
516 ASSERT(SignalTxMediaFrame.is_empty());
517
518 tracing_->SetTraceCallback(NULL);
519}
520
521bool WebRtcVoiceEngine::Init(talk_base::Thread* worker_thread) {
522 LOG(LS_INFO) << "WebRtcVoiceEngine::Init";
523 bool res = InitInternal();
524 if (res) {
525 LOG(LS_INFO) << "WebRtcVoiceEngine::Init Done!";
526 } else {
527 LOG(LS_ERROR) << "WebRtcVoiceEngine::Init failed";
528 Terminate();
529 }
530 return res;
531}
532
533bool WebRtcVoiceEngine::InitInternal() {
534 // Temporarily turn logging level up for the Init call
535 int old_filter = log_filter_;
536 int extended_filter = log_filter_ | SeverityToFilter(talk_base::LS_INFO);
537 SetTraceFilter(extended_filter);
538 SetTraceOptions("");
539
540 // Init WebRtc VoiceEngine.
541 if (voe_wrapper_->base()->Init(adm_) == -1) {
542 LOG_RTCERR0_EX(Init, voe_wrapper_->error());
543 SetTraceFilter(old_filter);
544 return false;
545 }
546
547 SetTraceFilter(old_filter);
548 SetTraceOptions(log_options_);
549
550 // Log the VoiceEngine version info
551 char buffer[1024] = "";
552 voe_wrapper_->base()->GetVersion(buffer);
553 LOG(LS_INFO) << "WebRtc VoiceEngine Version:";
554 LogMultiline(talk_base::LS_INFO, buffer);
555
556 // Save the default AGC configuration settings. This must happen before
557 // calling SetOptions or the default will be overwritten.
558 if (voe_wrapper_->processing()->GetAgcConfig(default_agc_config_) == -1) {
559 LOG_RTCERR0(GetAgcConfig);
560 return false;
561 }
562
563 // Set defaults for options, so that ApplyOptions applies them explicitly
564 // when we clear option (channel) overrides. External clients can still
565 // modify the defaults via SetOptions (on the media engine).
566 if (!SetOptions(GetDefaultEngineOptions())) {
567 return false;
568 }
569
570 // Print our codec list again for the call diagnostic log
571 LOG(LS_INFO) << "WebRtc VoiceEngine codecs:";
572 for (std::vector<AudioCodec>::const_iterator it = codecs_.begin();
573 it != codecs_.end(); ++it) {
574 LOG(LS_INFO) << ToString(*it);
575 }
576
577 // Disable the DTMF playout when a tone is sent.
578 // PlayDtmfTone will be used if local playout is needed.
579 if (voe_wrapper_->dtmf()->SetDtmfFeedbackStatus(false) == -1) {
580 LOG_RTCERR1(SetDtmfFeedbackStatus, false);
581 }
582
583 initialized_ = true;
584 return true;
585}
586
587bool WebRtcVoiceEngine::EnsureSoundclipEngineInit() {
588 if (voe_wrapper_sc_initialized_) {
589 return true;
590 }
591 // Note that, if initialization fails, voe_wrapper_sc_initialized_ will still
592 // be false, so subsequent calls to EnsureSoundclipEngineInit will
593 // probably just fail again. That's acceptable behavior.
594#if defined(LINUX) && !defined(HAVE_LIBPULSE)
595 voe_wrapper_sc_->hw()->SetAudioDeviceLayer(webrtc::kAudioLinuxAlsa);
596#endif
597
598 // Initialize the VoiceEngine instance that we'll use to play out sound clips.
599 if (voe_wrapper_sc_->base()->Init(adm_sc_) == -1) {
600 LOG_RTCERR0_EX(Init, voe_wrapper_sc_->error());
601 return false;
602 }
603
604 // On Windows, tell it to use the default sound (not communication) devices.
605 // First check whether there is a valid sound device for playback.
606 // TODO(juberti): Clean this up when we support setting the soundclip device.
607#ifdef WIN32
608 // The SetPlayoutDevice may not be implemented in the case of external ADM.
609 // TODO(ronghuawu): We should only check the adm_sc_ here, but current
610 // PeerConnection interface never set the adm_sc_, so need to check both
611 // in order to determine if the external adm is used.
612 if (!adm_ && !adm_sc_) {
613 int num_of_devices = 0;
614 if (voe_wrapper_sc_->hw()->GetNumOfPlayoutDevices(num_of_devices) != -1 &&
615 num_of_devices > 0) {
616 if (voe_wrapper_sc_->hw()->SetPlayoutDevice(kDefaultSoundclipDeviceId)
617 == -1) {
618 LOG_RTCERR1_EX(SetPlayoutDevice, kDefaultSoundclipDeviceId,
619 voe_wrapper_sc_->error());
620 return false;
621 }
622 } else {
623 LOG(LS_WARNING) << "No valid sound playout device found.";
624 }
625 }
626#endif
627 voe_wrapper_sc_initialized_ = true;
628 LOG(LS_INFO) << "Initialized WebRtc soundclip engine.";
629 return true;
630}
631
632void WebRtcVoiceEngine::Terminate() {
633 LOG(LS_INFO) << "WebRtcVoiceEngine::Terminate";
634 initialized_ = false;
635
636 StopAecDump();
637
638 if (voe_wrapper_sc_) {
639 voe_wrapper_sc_initialized_ = false;
640 voe_wrapper_sc_->base()->Terminate();
641 }
642 voe_wrapper_->base()->Terminate();
643 desired_local_monitor_enable_ = false;
644}
645
646int WebRtcVoiceEngine::GetCapabilities() {
647 return AUDIO_SEND | AUDIO_RECV;
648}
649
650VoiceMediaChannel *WebRtcVoiceEngine::CreateChannel() {
651 WebRtcVoiceMediaChannel* ch = new WebRtcVoiceMediaChannel(this);
652 if (!ch->valid()) {
653 delete ch;
654 ch = NULL;
655 }
656 return ch;
657}
658
659SoundclipMedia *WebRtcVoiceEngine::CreateSoundclip() {
660 if (!EnsureSoundclipEngineInit()) {
661 LOG(LS_ERROR) << "Unable to create soundclip: soundclip engine failed to "
662 << "initialize.";
663 return NULL;
664 }
665 WebRtcSoundclipMedia *soundclip = new WebRtcSoundclipMedia(this);
666 if (!soundclip->Init() || !soundclip->Enable()) {
667 delete soundclip;
668 return NULL;
669 }
670 return soundclip;
671}
672
673bool WebRtcVoiceEngine::SetOptions(const AudioOptions& options) {
674 if (!ApplyOptions(options)) {
675 return false;
676 }
677 options_ = options;
678 return true;
679}
680
681bool WebRtcVoiceEngine::SetOptionOverrides(const AudioOptions& overrides) {
682 LOG(LS_INFO) << "Setting option overrides: " << overrides.ToString();
683 if (!ApplyOptions(overrides)) {
684 return false;
685 }
686 option_overrides_ = overrides;
687 return true;
688}
689
690bool WebRtcVoiceEngine::ClearOptionOverrides() {
691 LOG(LS_INFO) << "Clearing option overrides.";
692 AudioOptions options = options_;
693 // Only call ApplyOptions if |options_overrides_| contains overrided options.
694 // ApplyOptions affects NS, AGC other options that is shared between
695 // all WebRtcVoiceEngineChannels.
696 if (option_overrides_ == AudioOptions()) {
697 return true;
698 }
699
700 if (!ApplyOptions(options)) {
701 return false;
702 }
703 option_overrides_ = AudioOptions();
704 return true;
705}
706
707// AudioOptions defaults are set in InitInternal (for options with corresponding
708// MediaEngineInterface flags) and in SetOptions(int) for flagless options.
709bool WebRtcVoiceEngine::ApplyOptions(const AudioOptions& options_in) {
710 AudioOptions options = options_in; // The options are modified below.
711 // kEcConference is AEC with high suppression.
712 webrtc::EcModes ec_mode = webrtc::kEcConference;
713 webrtc::AecmModes aecm_mode = webrtc::kAecmSpeakerphone;
714 webrtc::AgcModes agc_mode = webrtc::kAgcAdaptiveAnalog;
715 webrtc::NsModes ns_mode = webrtc::kNsHighSuppression;
716 bool aecm_comfort_noise = false;
717 if (options.aecm_generate_comfort_noise.Get(&aecm_comfort_noise)) {
718 LOG(LS_VERBOSE) << "Comfort noise explicitly set to "
719 << aecm_comfort_noise << " (default is false).";
720 }
721
722#if defined(IOS)
723 // On iOS, VPIO provides built-in EC and AGC.
724 options.echo_cancellation.Set(false);
725 options.auto_gain_control.Set(false);
726#elif defined(ANDROID)
727 ec_mode = webrtc::kEcAecm;
728#endif
729
730#if defined(IOS) || defined(ANDROID)
731 // Set the AGC mode for iOS as well despite disabling it above, to avoid
732 // unsupported configuration errors from webrtc.
733 agc_mode = webrtc::kAgcFixedDigital;
734 options.typing_detection.Set(false);
735 options.experimental_agc.Set(false);
736 options.experimental_aec.Set(false);
737 options.experimental_ns.Set(false);
738#endif
739
740 LOG(LS_INFO) << "Applying audio options: " << options.ToString();
741
742 webrtc::VoEAudioProcessing* voep = voe_wrapper_->processing();
743
744 bool echo_cancellation;
745 if (options.echo_cancellation.Get(&echo_cancellation)) {
746 if (voep->SetEcStatus(echo_cancellation, ec_mode) == -1) {
747 LOG_RTCERR2(SetEcStatus, echo_cancellation, ec_mode);
748 return false;
749 } else {
750 LOG(LS_VERBOSE) << "Echo control set to " << echo_cancellation
751 << " with mode " << ec_mode;
752 }
753#if !defined(ANDROID)
754 // TODO(ajm): Remove the error return on Android from webrtc.
755 if (voep->SetEcMetricsStatus(echo_cancellation) == -1) {
756 LOG_RTCERR1(SetEcMetricsStatus, echo_cancellation);
757 return false;
758 }
759#endif
760 if (ec_mode == webrtc::kEcAecm) {
761 if (voep->SetAecmMode(aecm_mode, aecm_comfort_noise) != 0) {
762 LOG_RTCERR2(SetAecmMode, aecm_mode, aecm_comfort_noise);
763 return false;
764 }
765 }
766 }
767
768 bool auto_gain_control;
769 if (options.auto_gain_control.Get(&auto_gain_control)) {
770 if (voep->SetAgcStatus(auto_gain_control, agc_mode) == -1) {
771 LOG_RTCERR2(SetAgcStatus, auto_gain_control, agc_mode);
772 return false;
773 } else {
774 LOG(LS_VERBOSE) << "Auto gain set to " << auto_gain_control
775 << " with mode " << agc_mode;
776 }
777 }
778
779 if (options.tx_agc_target_dbov.IsSet() ||
780 options.tx_agc_digital_compression_gain.IsSet() ||
781 options.tx_agc_limiter.IsSet()) {
782 // Override default_agc_config_. Generally, an unset option means "leave
783 // the VoE bits alone" in this function, so we want whatever is set to be
784 // stored as the new "default". If we didn't, then setting e.g.
785 // tx_agc_target_dbov would reset digital compression gain and limiter
786 // settings.
787 // Also, if we don't update default_agc_config_, then adjust_agc_delta
788 // would be an offset from the original values, and not whatever was set
789 // explicitly.
790 default_agc_config_.targetLeveldBOv =
791 options.tx_agc_target_dbov.GetWithDefaultIfUnset(
792 default_agc_config_.targetLeveldBOv);
793 default_agc_config_.digitalCompressionGaindB =
794 options.tx_agc_digital_compression_gain.GetWithDefaultIfUnset(
795 default_agc_config_.digitalCompressionGaindB);
796 default_agc_config_.limiterEnable =
797 options.tx_agc_limiter.GetWithDefaultIfUnset(
798 default_agc_config_.limiterEnable);
799 if (voe_wrapper_->processing()->SetAgcConfig(default_agc_config_) == -1) {
800 LOG_RTCERR3(SetAgcConfig,
801 default_agc_config_.targetLeveldBOv,
802 default_agc_config_.digitalCompressionGaindB,
803 default_agc_config_.limiterEnable);
804 return false;
805 }
806 }
807
808 bool noise_suppression;
809 if (options.noise_suppression.Get(&noise_suppression)) {
810 if (voep->SetNsStatus(noise_suppression, ns_mode) == -1) {
811 LOG_RTCERR2(SetNsStatus, noise_suppression, ns_mode);
812 return false;
813 } else {
814 LOG(LS_VERBOSE) << "Noise suppression set to " << noise_suppression
815 << " with mode " << ns_mode;
816 }
817 }
818
819 bool experimental_ns;
820 if (options.experimental_ns.Get(&experimental_ns)) {
821 webrtc::AudioProcessing* audioproc =
822 voe_wrapper_->base()->audio_processing();
823 // We check audioproc for the benefit of tests, since FakeWebRtcVoiceEngine
824 // returns NULL on audio_processing().
825 if (audioproc) {
826 if (audioproc->EnableExperimentalNs(experimental_ns) == -1) {
827 LOG_RTCERR1(EnableExperimentalNs, experimental_ns);
828 return false;
829 }
830 } else {
831 LOG(LS_VERBOSE) << "Experimental noise suppression set to "
832 << experimental_ns;
833 }
834 }
835
836 bool highpass_filter;
837 if (options.highpass_filter.Get(&highpass_filter)) {
838 LOG(LS_INFO) << "High pass filter enabled? " << highpass_filter;
839 if (voep->EnableHighPassFilter(highpass_filter) == -1) {
840 LOG_RTCERR1(SetHighpassFilterStatus, highpass_filter);
841 return false;
842 }
843 }
844
845 bool stereo_swapping;
846 if (options.stereo_swapping.Get(&stereo_swapping)) {
847 LOG(LS_INFO) << "Stereo swapping enabled? " << stereo_swapping;
848 voep->EnableStereoChannelSwapping(stereo_swapping);
849 if (voep->IsStereoChannelSwappingEnabled() != stereo_swapping) {
850 LOG_RTCERR1(EnableStereoChannelSwapping, stereo_swapping);
851 return false;
852 }
853 }
854
855 bool typing_detection;
856 if (options.typing_detection.Get(&typing_detection)) {
857 LOG(LS_INFO) << "Typing detection is enabled? " << typing_detection;
858 if (voep->SetTypingDetectionStatus(typing_detection) == -1) {
859 // In case of error, log the info and continue
860 LOG_RTCERR1(SetTypingDetectionStatus, typing_detection);
861 }
862 }
863
864 int adjust_agc_delta;
865 if (options.adjust_agc_delta.Get(&adjust_agc_delta)) {
866 LOG(LS_INFO) << "Adjust agc delta is " << adjust_agc_delta;
867 if (!AdjustAgcLevel(adjust_agc_delta)) {
868 return false;
869 }
870 }
871
872 bool aec_dump;
873 if (options.aec_dump.Get(&aec_dump)) {
874 LOG(LS_INFO) << "Aec dump is enabled? " << aec_dump;
875 if (aec_dump)
876 StartAecDump(kAecDumpByAudioOptionFilename);
877 else
878 StopAecDump();
879 }
880
881 bool experimental_aec;
882 if (options.experimental_aec.Get(&experimental_aec)) {
883 LOG(LS_INFO) << "Experimental aec is " << experimental_aec;
884 webrtc::AudioProcessing* audioproc =
885 voe_wrapper_->base()->audio_processing();
886 // We check audioproc for the benefit of tests, since FakeWebRtcVoiceEngine
887 // returns NULL on audio_processing().
888 if (audioproc) {
889 webrtc::Config config;
890 config.Set<webrtc::DelayCorrection>(
891 new webrtc::DelayCorrection(experimental_aec));
892 audioproc->SetExtraOptions(config);
893 }
894 }
895
896 uint32 recording_sample_rate;
897 if (options.recording_sample_rate.Get(&recording_sample_rate)) {
898 LOG(LS_INFO) << "Recording sample rate is " << recording_sample_rate;
899 if (voe_wrapper_->hw()->SetRecordingSampleRate(recording_sample_rate)) {
900 LOG_RTCERR1(SetRecordingSampleRate, recording_sample_rate);
901 }
902 }
903
904 uint32 playout_sample_rate;
905 if (options.playout_sample_rate.Get(&playout_sample_rate)) {
906 LOG(LS_INFO) << "Playout sample rate is " << playout_sample_rate;
907 if (voe_wrapper_->hw()->SetPlayoutSampleRate(playout_sample_rate)) {
908 LOG_RTCERR1(SetPlayoutSampleRate, playout_sample_rate);
909 }
910 }
911
buildbot@webrtc.orgd27d9ae2014-06-19 01:56:46 +0000912 bool opus_fec = false;
913 if (options.opus_fec.Get(&opus_fec)) {
914 LOG(LS_INFO) << "Opus FEC is enabled? " << opus_fec;
915 for (std::vector<AudioCodec>::iterator it = codecs_.begin();
916 it != codecs_.end(); ++it) {
917 if (IsOpus(*it))
918 SetOpusFec(&(*it), opus_fec);
919 }
920 }
921
buildbot@webrtc.org13d67762014-05-02 17:33:29 +0000922 return true;
923}
924
925bool WebRtcVoiceEngine::SetDelayOffset(int offset) {
926 voe_wrapper_->processing()->SetDelayOffsetMs(offset);
927 if (voe_wrapper_->processing()->DelayOffsetMs() != offset) {
928 LOG_RTCERR1(SetDelayOffsetMs, offset);
929 return false;
930 }
931
932 return true;
933}
934
935struct ResumeEntry {
936 ResumeEntry(WebRtcVoiceMediaChannel *c, bool p, SendFlags s)
937 : channel(c),
938 playout(p),
939 send(s) {
940 }
941
942 WebRtcVoiceMediaChannel *channel;
943 bool playout;
944 SendFlags send;
945};
946
947// TODO(juberti): Refactor this so that the core logic can be used to set the
948// soundclip device. At that time, reinstate the soundclip pause/resume code.
949bool WebRtcVoiceEngine::SetDevices(const Device* in_device,
950 const Device* out_device) {
951#if !defined(IOS)
952 int in_id = in_device ? talk_base::FromString<int>(in_device->id) :
953 kDefaultAudioDeviceId;
954 int out_id = out_device ? talk_base::FromString<int>(out_device->id) :
955 kDefaultAudioDeviceId;
956 // The device manager uses -1 as the default device, which was the case for
957 // VoE 3.5. VoE 4.0, however, uses 0 as the default in Linux and Mac.
958#ifndef WIN32
959 if (-1 == in_id) {
960 in_id = kDefaultAudioDeviceId;
961 }
962 if (-1 == out_id) {
963 out_id = kDefaultAudioDeviceId;
964 }
965#endif
966
967 std::string in_name = (in_id != kDefaultAudioDeviceId) ?
968 in_device->name : "Default device";
969 std::string out_name = (out_id != kDefaultAudioDeviceId) ?
970 out_device->name : "Default device";
971 LOG(LS_INFO) << "Setting microphone to (id=" << in_id << ", name=" << in_name
972 << ") and speaker to (id=" << out_id << ", name=" << out_name
973 << ")";
974
975 // If we're running the local monitor, we need to stop it first.
976 bool ret = true;
977 if (!PauseLocalMonitor()) {
978 LOG(LS_WARNING) << "Failed to pause local monitor";
979 ret = false;
980 }
981
982 // Must also pause all audio playback and capture.
983 for (ChannelList::const_iterator i = channels_.begin();
984 i != channels_.end(); ++i) {
985 WebRtcVoiceMediaChannel *channel = *i;
986 if (!channel->PausePlayout()) {
987 LOG(LS_WARNING) << "Failed to pause playout";
988 ret = false;
989 }
990 if (!channel->PauseSend()) {
991 LOG(LS_WARNING) << "Failed to pause send";
992 ret = false;
993 }
994 }
995
996 // Find the recording device id in VoiceEngine and set recording device.
997 if (!FindWebRtcAudioDeviceId(true, in_name, in_id, &in_id)) {
998 ret = false;
999 }
1000 if (ret) {
1001 if (voe_wrapper_->hw()->SetRecordingDevice(in_id) == -1) {
1002 LOG_RTCERR2(SetRecordingDevice, in_name, in_id);
1003 ret = false;
1004 }
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001005 }
1006
1007 // Find the playout device id in VoiceEngine and set playout device.
1008 if (!FindWebRtcAudioDeviceId(false, out_name, out_id, &out_id)) {
1009 LOG(LS_WARNING) << "Failed to find VoiceEngine device id for " << out_name;
1010 ret = false;
1011 }
1012 if (ret) {
1013 if (voe_wrapper_->hw()->SetPlayoutDevice(out_id) == -1) {
sergeyu@chromium.org5bc25c42013-12-05 00:24:06 +00001014 LOG_RTCERR2(SetPlayoutDevice, out_name, out_id);
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001015 ret = false;
1016 }
1017 }
1018
1019 // Resume all audio playback and capture.
1020 for (ChannelList::const_iterator i = channels_.begin();
1021 i != channels_.end(); ++i) {
1022 WebRtcVoiceMediaChannel *channel = *i;
1023 if (!channel->ResumePlayout()) {
1024 LOG(LS_WARNING) << "Failed to resume playout";
1025 ret = false;
1026 }
1027 if (!channel->ResumeSend()) {
1028 LOG(LS_WARNING) << "Failed to resume send";
1029 ret = false;
1030 }
1031 }
1032
1033 // Resume local monitor.
1034 if (!ResumeLocalMonitor()) {
1035 LOG(LS_WARNING) << "Failed to resume local monitor";
1036 ret = false;
1037 }
1038
1039 if (ret) {
1040 LOG(LS_INFO) << "Set microphone to (id=" << in_id <<" name=" << in_name
1041 << ") and speaker to (id="<< out_id << " name=" << out_name
1042 << ")";
1043 }
1044
1045 return ret;
1046#else
1047 return true;
wu@webrtc.orgcecfd182013-10-30 05:18:12 +00001048#endif // !IOS
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001049}
1050
1051bool WebRtcVoiceEngine::FindWebRtcAudioDeviceId(
1052 bool is_input, const std::string& dev_name, int dev_id, int* rtc_id) {
1053 // In Linux, VoiceEngine uses the same device dev_id as the device manager.
wu@webrtc.orgcecfd182013-10-30 05:18:12 +00001054#if defined(LINUX) || defined(ANDROID)
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001055 *rtc_id = dev_id;
1056 return true;
1057#else
1058 // In Windows and Mac, we need to find the VoiceEngine device id by name
1059 // unless the input dev_id is the default device id.
1060 if (kDefaultAudioDeviceId == dev_id) {
1061 *rtc_id = dev_id;
1062 return true;
1063 }
1064
1065 // Get the number of VoiceEngine audio devices.
1066 int count = 0;
1067 if (is_input) {
1068 if (-1 == voe_wrapper_->hw()->GetNumOfRecordingDevices(count)) {
1069 LOG_RTCERR0(GetNumOfRecordingDevices);
1070 return false;
1071 }
1072 } else {
1073 if (-1 == voe_wrapper_->hw()->GetNumOfPlayoutDevices(count)) {
1074 LOG_RTCERR0(GetNumOfPlayoutDevices);
1075 return false;
1076 }
1077 }
1078
1079 for (int i = 0; i < count; ++i) {
1080 char name[128];
1081 char guid[128];
1082 if (is_input) {
1083 voe_wrapper_->hw()->GetRecordingDeviceName(i, name, guid);
1084 LOG(LS_VERBOSE) << "VoiceEngine microphone " << i << ": " << name;
1085 } else {
1086 voe_wrapper_->hw()->GetPlayoutDeviceName(i, name, guid);
1087 LOG(LS_VERBOSE) << "VoiceEngine speaker " << i << ": " << name;
1088 }
1089
1090 std::string webrtc_name(name);
1091 if (dev_name.compare(0, webrtc_name.size(), webrtc_name) == 0) {
1092 *rtc_id = i;
1093 return true;
1094 }
1095 }
1096 LOG(LS_WARNING) << "VoiceEngine cannot find device: " << dev_name;
1097 return false;
1098#endif
1099}
1100
1101bool WebRtcVoiceEngine::GetOutputVolume(int* level) {
1102 unsigned int ulevel;
1103 if (voe_wrapper_->volume()->GetSpeakerVolume(ulevel) == -1) {
1104 LOG_RTCERR1(GetSpeakerVolume, level);
1105 return false;
1106 }
1107 *level = ulevel;
1108 return true;
1109}
1110
1111bool WebRtcVoiceEngine::SetOutputVolume(int level) {
1112 ASSERT(level >= 0 && level <= 255);
1113 if (voe_wrapper_->volume()->SetSpeakerVolume(level) == -1) {
1114 LOG_RTCERR1(SetSpeakerVolume, level);
1115 return false;
1116 }
1117 return true;
1118}
1119
1120int WebRtcVoiceEngine::GetInputLevel() {
1121 unsigned int ulevel;
1122 return (voe_wrapper_->volume()->GetSpeechInputLevel(ulevel) != -1) ?
1123 static_cast<int>(ulevel) : -1;
1124}
1125
1126bool WebRtcVoiceEngine::SetLocalMonitor(bool enable) {
1127 desired_local_monitor_enable_ = enable;
1128 return ChangeLocalMonitor(desired_local_monitor_enable_);
1129}
1130
1131bool WebRtcVoiceEngine::ChangeLocalMonitor(bool enable) {
1132 // The voe file api is not available in chrome.
1133 if (!voe_wrapper_->file()) {
1134 return false;
1135 }
1136 if (enable && !monitor_) {
1137 monitor_.reset(new WebRtcMonitorStream);
1138 if (voe_wrapper_->file()->StartRecordingMicrophone(monitor_.get()) == -1) {
1139 LOG_RTCERR1(StartRecordingMicrophone, monitor_.get());
1140 // Must call Stop() because there are some cases where Start will report
1141 // failure but still change the state, and if we leave VE in the on state
1142 // then it could crash later when trying to invoke methods on our monitor.
1143 voe_wrapper_->file()->StopRecordingMicrophone();
1144 monitor_.reset();
1145 return false;
1146 }
1147 } else if (!enable && monitor_) {
1148 voe_wrapper_->file()->StopRecordingMicrophone();
1149 monitor_.reset();
1150 }
1151 return true;
1152}
1153
1154bool WebRtcVoiceEngine::PauseLocalMonitor() {
1155 return ChangeLocalMonitor(false);
1156}
1157
1158bool WebRtcVoiceEngine::ResumeLocalMonitor() {
1159 return ChangeLocalMonitor(desired_local_monitor_enable_);
1160}
1161
1162const std::vector<AudioCodec>& WebRtcVoiceEngine::codecs() {
1163 return codecs_;
1164}
1165
1166bool WebRtcVoiceEngine::FindCodec(const AudioCodec& in) {
1167 return FindWebRtcCodec(in, NULL);
1168}
1169
1170// Get the VoiceEngine codec that matches |in|, with the supplied settings.
1171bool WebRtcVoiceEngine::FindWebRtcCodec(const AudioCodec& in,
1172 webrtc::CodecInst* out) {
1173 int ncodecs = voe_wrapper_->codec()->NumOfCodecs();
1174 for (int i = 0; i < ncodecs; ++i) {
1175 webrtc::CodecInst voe_codec;
1176 if (voe_wrapper_->codec()->GetCodec(i, voe_codec) != -1) {
1177 AudioCodec codec(voe_codec.pltype, voe_codec.plname, voe_codec.plfreq,
1178 voe_codec.rate, voe_codec.channels, 0);
1179 bool multi_rate = IsCodecMultiRate(voe_codec);
1180 // Allow arbitrary rates for ISAC to be specified.
1181 if (multi_rate) {
1182 // Set codec.bitrate to 0 so the check for codec.Matches() passes.
1183 codec.bitrate = 0;
1184 }
1185 if (codec.Matches(in)) {
1186 if (out) {
1187 // Fixup the payload type.
1188 voe_codec.pltype = in.id;
1189
1190 // Set bitrate if specified.
1191 if (multi_rate && in.bitrate != 0) {
1192 voe_codec.rate = in.bitrate;
1193 }
1194
1195 // Apply codec-specific settings.
1196 if (IsIsac(codec)) {
1197 // If ISAC and an explicit bitrate is not specified,
1198 // enable auto bandwidth adjustment.
1199 voe_codec.rate = (in.bitrate > 0) ? in.bitrate : -1;
1200 }
1201 *out = voe_codec;
1202 }
1203 return true;
1204 }
1205 }
1206 }
1207 return false;
1208}
1209const std::vector<RtpHeaderExtension>&
1210WebRtcVoiceEngine::rtp_header_extensions() const {
1211 return rtp_header_extensions_;
1212}
1213
1214void WebRtcVoiceEngine::SetLogging(int min_sev, const char* filter) {
1215 // if min_sev == -1, we keep the current log level.
1216 if (min_sev >= 0) {
1217 SetTraceFilter(SeverityToFilter(min_sev));
1218 }
1219 log_options_ = filter;
1220 SetTraceOptions(initialized_ ? log_options_ : "");
1221}
1222
1223int WebRtcVoiceEngine::GetLastEngineError() {
1224 return voe_wrapper_->error();
1225}
1226
1227void WebRtcVoiceEngine::SetTraceFilter(int filter) {
1228 log_filter_ = filter;
1229 tracing_->SetTraceFilter(filter);
1230}
1231
1232// We suppport three different logging settings for VoiceEngine:
1233// 1. Observer callback that goes into talk diagnostic logfile.
1234// Use --logfile and --loglevel
1235//
1236// 2. Encrypted VoiceEngine log for debugging VoiceEngine.
1237// Use --voice_loglevel --voice_logfilter "tracefile file_name"
1238//
1239// 3. EC log and dump for debugging QualityEngine.
1240// Use --voice_loglevel --voice_logfilter "recordEC file_name"
1241//
1242// For more details see: "https://sites.google.com/a/google.com/wavelet/Home/
1243// Magic-Flute--RTC-Engine-/Magic-Flute-Command-Line-Parameters"
1244void WebRtcVoiceEngine::SetTraceOptions(const std::string& options) {
1245 // Set encrypted trace file.
1246 std::vector<std::string> opts;
1247 talk_base::tokenize(options, ' ', '"', '"', &opts);
1248 std::vector<std::string>::iterator tracefile =
1249 std::find(opts.begin(), opts.end(), "tracefile");
1250 if (tracefile != opts.end() && ++tracefile != opts.end()) {
1251 // Write encrypted debug output (at same loglevel) to file
1252 // EncryptedTraceFile no longer supported.
1253 if (tracing_->SetTraceFile(tracefile->c_str()) == -1) {
1254 LOG_RTCERR1(SetTraceFile, *tracefile);
1255 }
1256 }
1257
wu@webrtc.org97077a32013-10-25 21:18:33 +00001258 // Allow trace options to override the trace filter. We default
1259 // it to log_filter_ (as a translation of libjingle log levels)
1260 // elsewhere, but this allows clients to explicitly set webrtc
1261 // log levels.
1262 std::vector<std::string>::iterator tracefilter =
1263 std::find(opts.begin(), opts.end(), "tracefilter");
1264 if (tracefilter != opts.end() && ++tracefilter != opts.end()) {
1265 if (!tracing_->SetTraceFilter(talk_base::FromString<int>(*tracefilter))) {
1266 LOG_RTCERR1(SetTraceFilter, *tracefilter);
1267 }
1268 }
1269
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001270 // Set AEC dump file
1271 std::vector<std::string>::iterator recordEC =
1272 std::find(opts.begin(), opts.end(), "recordEC");
1273 if (recordEC != opts.end()) {
1274 ++recordEC;
1275 if (recordEC != opts.end())
1276 StartAecDump(recordEC->c_str());
1277 else
1278 StopAecDump();
1279 }
1280}
1281
1282// Ignore spammy trace messages, mostly from the stats API when we haven't
1283// gotten RTCP info yet from the remote side.
1284bool WebRtcVoiceEngine::ShouldIgnoreTrace(const std::string& trace) {
1285 static const char* kTracesToIgnore[] = {
1286 "\tfailed to GetReportBlockInformation",
1287 "GetRecCodec() failed to get received codec",
1288 "GetReceivedRtcpStatistics: Could not get received RTP statistics",
1289 "GetRemoteRTCPData() failed to measure statistics due to lack of received RTP and/or RTCP packets", // NOLINT
1290 "GetRemoteRTCPData() failed to retrieve sender info for remote side",
1291 "GetRTPStatistics() failed to measure RTT since no RTP packets have been received yet", // NOLINT
1292 "GetRTPStatistics() failed to read RTP statistics from the RTP/RTCP module",
1293 "GetRTPStatistics() failed to retrieve RTT from the RTP/RTCP module",
1294 "SenderInfoReceived No received SR",
1295 "StatisticsRTP() no statistics available",
1296 "TransmitMixer::TypingDetection() VE_TYPING_NOISE_WARNING message has been posted", // NOLINT
1297 "TransmitMixer::TypingDetection() pending noise-saturation warning exists", // NOLINT
1298 "GetRecPayloadType() failed to retrieve RX payload type (error=10026)", // NOLINT
1299 "StopPlayingFileAsMicrophone() isnot playing (error=8088)",
1300 NULL
1301 };
1302 for (const char* const* p = kTracesToIgnore; *p; ++p) {
1303 if (trace.find(*p) != std::string::npos) {
1304 return true;
1305 }
1306 }
1307 return false;
1308}
1309
1310void WebRtcVoiceEngine::Print(webrtc::TraceLevel level, const char* trace,
1311 int length) {
1312 talk_base::LoggingSeverity sev = talk_base::LS_VERBOSE;
1313 if (level == webrtc::kTraceError || level == webrtc::kTraceCritical)
1314 sev = talk_base::LS_ERROR;
1315 else if (level == webrtc::kTraceWarning)
1316 sev = talk_base::LS_WARNING;
1317 else if (level == webrtc::kTraceStateInfo || level == webrtc::kTraceInfo)
1318 sev = talk_base::LS_INFO;
1319 else if (level == webrtc::kTraceTerseInfo)
1320 sev = talk_base::LS_INFO;
1321
1322 // Skip past boilerplate prefix text
1323 if (length < 72) {
1324 std::string msg(trace, length);
1325 LOG(LS_ERROR) << "Malformed webrtc log message: ";
1326 LOG_V(sev) << msg;
1327 } else {
1328 std::string msg(trace + 71, length - 72);
1329 if (!ShouldIgnoreTrace(msg)) {
1330 LOG_V(sev) << "webrtc: " << msg;
1331 }
1332 }
1333}
1334
1335void WebRtcVoiceEngine::CallbackOnError(int channel_num, int err_code) {
1336 talk_base::CritScope lock(&channels_cs_);
1337 WebRtcVoiceMediaChannel* channel = NULL;
1338 uint32 ssrc = 0;
1339 LOG(LS_WARNING) << "VoiceEngine error " << err_code << " reported on channel "
1340 << channel_num << ".";
1341 if (FindChannelAndSsrc(channel_num, &channel, &ssrc)) {
1342 ASSERT(channel != NULL);
1343 channel->OnError(ssrc, err_code);
1344 } else {
1345 LOG(LS_ERROR) << "VoiceEngine channel " << channel_num
1346 << " could not be found in channel list when error reported.";
1347 }
1348}
1349
1350bool WebRtcVoiceEngine::FindChannelAndSsrc(
1351 int channel_num, WebRtcVoiceMediaChannel** channel, uint32* ssrc) const {
1352 ASSERT(channel != NULL && ssrc != NULL);
1353
1354 *channel = NULL;
1355 *ssrc = 0;
1356 // Find corresponding channel and ssrc
1357 for (ChannelList::const_iterator it = channels_.begin();
1358 it != channels_.end(); ++it) {
1359 ASSERT(*it != NULL);
1360 if ((*it)->FindSsrc(channel_num, ssrc)) {
1361 *channel = *it;
1362 return true;
1363 }
1364 }
1365
1366 return false;
1367}
1368
1369// This method will search through the WebRtcVoiceMediaChannels and
1370// obtain the voice engine's channel number.
1371bool WebRtcVoiceEngine::FindChannelNumFromSsrc(
1372 uint32 ssrc, MediaProcessorDirection direction, int* channel_num) {
1373 ASSERT(channel_num != NULL);
1374 ASSERT(direction == MPD_RX || direction == MPD_TX);
1375
1376 *channel_num = -1;
1377 // Find corresponding channel for ssrc.
1378 for (ChannelList::const_iterator it = channels_.begin();
1379 it != channels_.end(); ++it) {
1380 ASSERT(*it != NULL);
1381 if (direction & MPD_RX) {
1382 *channel_num = (*it)->GetReceiveChannelNum(ssrc);
1383 }
1384 if (*channel_num == -1 && (direction & MPD_TX)) {
1385 *channel_num = (*it)->GetSendChannelNum(ssrc);
1386 }
1387 if (*channel_num != -1) {
1388 return true;
1389 }
1390 }
1391 LOG(LS_WARNING) << "FindChannelFromSsrc. No Channel Found for Ssrc: " << ssrc;
1392 return false;
1393}
1394
1395void WebRtcVoiceEngine::RegisterChannel(WebRtcVoiceMediaChannel *channel) {
1396 talk_base::CritScope lock(&channels_cs_);
1397 channels_.push_back(channel);
1398}
1399
1400void WebRtcVoiceEngine::UnregisterChannel(WebRtcVoiceMediaChannel *channel) {
1401 talk_base::CritScope lock(&channels_cs_);
1402 ChannelList::iterator i = std::find(channels_.begin(),
1403 channels_.end(),
1404 channel);
1405 if (i != channels_.end()) {
1406 channels_.erase(i);
1407 }
1408}
1409
1410void WebRtcVoiceEngine::RegisterSoundclip(WebRtcSoundclipMedia *soundclip) {
1411 soundclips_.push_back(soundclip);
1412}
1413
1414void WebRtcVoiceEngine::UnregisterSoundclip(WebRtcSoundclipMedia *soundclip) {
1415 SoundclipList::iterator i = std::find(soundclips_.begin(),
1416 soundclips_.end(),
1417 soundclip);
1418 if (i != soundclips_.end()) {
1419 soundclips_.erase(i);
1420 }
1421}
1422
1423// Adjusts the default AGC target level by the specified delta.
1424// NB: If we start messing with other config fields, we'll want
1425// to save the current webrtc::AgcConfig as well.
1426bool WebRtcVoiceEngine::AdjustAgcLevel(int delta) {
1427 webrtc::AgcConfig config = default_agc_config_;
1428 config.targetLeveldBOv -= delta;
1429
1430 LOG(LS_INFO) << "Adjusting AGC level from default -"
1431 << default_agc_config_.targetLeveldBOv << "dB to -"
1432 << config.targetLeveldBOv << "dB";
1433
1434 if (voe_wrapper_->processing()->SetAgcConfig(config) == -1) {
1435 LOG_RTCERR1(SetAgcConfig, config.targetLeveldBOv);
1436 return false;
1437 }
1438 return true;
1439}
1440
1441bool WebRtcVoiceEngine::SetAudioDeviceModule(webrtc::AudioDeviceModule* adm,
1442 webrtc::AudioDeviceModule* adm_sc) {
1443 if (initialized_) {
1444 LOG(LS_WARNING) << "SetAudioDeviceModule can not be called after Init.";
1445 return false;
1446 }
1447 if (adm_) {
1448 adm_->Release();
1449 adm_ = NULL;
1450 }
1451 if (adm) {
1452 adm_ = adm;
1453 adm_->AddRef();
1454 }
1455
1456 if (adm_sc_) {
1457 adm_sc_->Release();
1458 adm_sc_ = NULL;
1459 }
1460 if (adm_sc) {
1461 adm_sc_ = adm_sc;
1462 adm_sc_->AddRef();
1463 }
1464 return true;
1465}
1466
wu@webrtc.orga8910d22014-01-23 22:12:45 +00001467bool WebRtcVoiceEngine::StartAecDump(talk_base::PlatformFile file) {
1468 FILE* aec_dump_file_stream = talk_base::FdopenPlatformFileForWriting(file);
1469 if (!aec_dump_file_stream) {
1470 LOG(LS_ERROR) << "Could not open AEC dump file stream.";
1471 if (!talk_base::ClosePlatformFile(file))
1472 LOG(LS_WARNING) << "Could not close file.";
1473 return false;
1474 }
wu@webrtc.orga9890802013-12-13 00:21:03 +00001475 StopAecDump();
wu@webrtc.orga8910d22014-01-23 22:12:45 +00001476 if (voe_wrapper_->processing()->StartDebugRecording(aec_dump_file_stream) !=
wu@webrtc.orga9890802013-12-13 00:21:03 +00001477 webrtc::AudioProcessing::kNoError) {
wu@webrtc.orga8910d22014-01-23 22:12:45 +00001478 LOG_RTCERR0(StartDebugRecording);
1479 fclose(aec_dump_file_stream);
wu@webrtc.orga9890802013-12-13 00:21:03 +00001480 return false;
1481 }
1482 is_dumping_aec_ = true;
1483 return true;
wu@webrtc.orga9890802013-12-13 00:21:03 +00001484}
1485
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001486bool WebRtcVoiceEngine::RegisterProcessor(
1487 uint32 ssrc,
1488 VoiceProcessor* voice_processor,
1489 MediaProcessorDirection direction) {
1490 bool register_with_webrtc = false;
1491 int channel_id = -1;
1492 bool success = false;
1493 uint32* processor_ssrc = NULL;
1494 bool found_channel = FindChannelNumFromSsrc(ssrc, direction, &channel_id);
1495 if (voice_processor == NULL || !found_channel) {
1496 LOG(LS_WARNING) << "Media Processing Registration Failed. ssrc: " << ssrc
1497 << " foundChannel: " << found_channel;
1498 return false;
1499 }
1500
1501 webrtc::ProcessingTypes processing_type;
1502 {
1503 talk_base::CritScope cs(&signal_media_critical_);
1504 if (direction == MPD_RX) {
1505 processing_type = webrtc::kPlaybackAllChannelsMixed;
1506 if (SignalRxMediaFrame.is_empty()) {
1507 register_with_webrtc = true;
1508 processor_ssrc = &rx_processor_ssrc_;
1509 }
1510 SignalRxMediaFrame.connect(voice_processor,
1511 &VoiceProcessor::OnFrame);
1512 } else {
1513 processing_type = webrtc::kRecordingPerChannel;
1514 if (SignalTxMediaFrame.is_empty()) {
1515 register_with_webrtc = true;
1516 processor_ssrc = &tx_processor_ssrc_;
1517 }
1518 SignalTxMediaFrame.connect(voice_processor,
1519 &VoiceProcessor::OnFrame);
1520 }
1521 }
1522 if (register_with_webrtc) {
1523 // TODO(janahan): when registering consider instantiating a
1524 // a VoeMediaProcess object and not make the engine extend the interface.
1525 if (voe()->media() && voe()->media()->
1526 RegisterExternalMediaProcessing(channel_id,
1527 processing_type,
1528 *this) != -1) {
1529 LOG(LS_INFO) << "Media Processing Registration Succeeded. channel:"
1530 << channel_id;
1531 *processor_ssrc = ssrc;
1532 success = true;
1533 } else {
1534 LOG_RTCERR2(RegisterExternalMediaProcessing,
1535 channel_id,
1536 processing_type);
1537 success = false;
1538 }
1539 } else {
1540 // If we don't have to register with the engine, we just needed to
1541 // connect a new processor, set success to true;
1542 success = true;
1543 }
1544 return success;
1545}
1546
1547bool WebRtcVoiceEngine::UnregisterProcessorChannel(
1548 MediaProcessorDirection channel_direction,
1549 uint32 ssrc,
1550 VoiceProcessor* voice_processor,
1551 MediaProcessorDirection processor_direction) {
1552 bool success = true;
1553 FrameSignal* signal;
1554 webrtc::ProcessingTypes processing_type;
1555 uint32* processor_ssrc = NULL;
1556 if (channel_direction == MPD_RX) {
1557 signal = &SignalRxMediaFrame;
1558 processing_type = webrtc::kPlaybackAllChannelsMixed;
1559 processor_ssrc = &rx_processor_ssrc_;
1560 } else {
1561 signal = &SignalTxMediaFrame;
1562 processing_type = webrtc::kRecordingPerChannel;
1563 processor_ssrc = &tx_processor_ssrc_;
1564 }
1565
1566 int deregister_id = -1;
1567 {
1568 talk_base::CritScope cs(&signal_media_critical_);
1569 if ((processor_direction & channel_direction) != 0 && !signal->is_empty()) {
1570 signal->disconnect(voice_processor);
1571 int channel_id = -1;
1572 bool found_channel = FindChannelNumFromSsrc(ssrc,
1573 channel_direction,
1574 &channel_id);
1575 if (signal->is_empty() && found_channel) {
1576 deregister_id = channel_id;
1577 }
1578 }
1579 }
1580 if (deregister_id != -1) {
1581 if (voe()->media() &&
1582 voe()->media()->DeRegisterExternalMediaProcessing(deregister_id,
1583 processing_type) != -1) {
1584 *processor_ssrc = 0;
1585 LOG(LS_INFO) << "Media Processing DeRegistration Succeeded. channel:"
1586 << deregister_id;
1587 } else {
1588 LOG_RTCERR2(DeRegisterExternalMediaProcessing,
1589 deregister_id,
1590 processing_type);
1591 success = false;
1592 }
1593 }
1594 return success;
1595}
1596
1597bool WebRtcVoiceEngine::UnregisterProcessor(
1598 uint32 ssrc,
1599 VoiceProcessor* voice_processor,
1600 MediaProcessorDirection direction) {
1601 bool success = true;
1602 if (voice_processor == NULL) {
1603 LOG(LS_WARNING) << "Media Processing Deregistration Failed. ssrc: "
1604 << ssrc;
1605 return false;
1606 }
1607 if (!UnregisterProcessorChannel(MPD_RX, ssrc, voice_processor, direction)) {
1608 success = false;
1609 }
1610 if (!UnregisterProcessorChannel(MPD_TX, ssrc, voice_processor, direction)) {
1611 success = false;
1612 }
1613 return success;
1614}
1615
1616// Implementing method from WebRtc VoEMediaProcess interface
1617// Do not lock mux_channel_cs_ in this callback.
1618void WebRtcVoiceEngine::Process(int channel,
1619 webrtc::ProcessingTypes type,
1620 int16_t audio10ms[],
1621 int length,
1622 int sampling_freq,
1623 bool is_stereo) {
1624 talk_base::CritScope cs(&signal_media_critical_);
1625 AudioFrame frame(audio10ms, length, sampling_freq, is_stereo);
1626 if (type == webrtc::kPlaybackAllChannelsMixed) {
1627 SignalRxMediaFrame(rx_processor_ssrc_, MPD_RX, &frame);
1628 } else if (type == webrtc::kRecordingPerChannel) {
1629 SignalTxMediaFrame(tx_processor_ssrc_, MPD_TX, &frame);
1630 } else {
1631 LOG(LS_WARNING) << "Media Processing invoked unexpectedly."
1632 << " channel: " << channel << " type: " << type
1633 << " tx_ssrc: " << tx_processor_ssrc_
1634 << " rx_ssrc: " << rx_processor_ssrc_;
1635 }
1636}
1637
1638void WebRtcVoiceEngine::StartAecDump(const std::string& filename) {
1639 if (!is_dumping_aec_) {
1640 // Start dumping AEC when we are not dumping.
1641 if (voe_wrapper_->processing()->StartDebugRecording(
1642 filename.c_str()) != webrtc::AudioProcessing::kNoError) {
wu@webrtc.orga9890802013-12-13 00:21:03 +00001643 LOG_RTCERR1(StartDebugRecording, filename.c_str());
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001644 } else {
1645 is_dumping_aec_ = true;
1646 }
1647 }
1648}
1649
1650void WebRtcVoiceEngine::StopAecDump() {
1651 if (is_dumping_aec_) {
1652 // Stop dumping AEC when we are dumping.
1653 if (voe_wrapper_->processing()->StopDebugRecording() !=
1654 webrtc::AudioProcessing::kNoError) {
1655 LOG_RTCERR0(StopDebugRecording);
1656 }
1657 is_dumping_aec_ = false;
1658 }
1659}
1660
sergeyu@chromium.org5bc25c42013-12-05 00:24:06 +00001661int WebRtcVoiceEngine::CreateVoiceChannel(VoEWrapper* voice_engine_wrapper) {
sergeyu@chromium.org5bc25c42013-12-05 00:24:06 +00001662 return voice_engine_wrapper->base()->CreateChannel(voe_config_);
sergeyu@chromium.org5bc25c42013-12-05 00:24:06 +00001663}
1664
1665int WebRtcVoiceEngine::CreateMediaVoiceChannel() {
1666 return CreateVoiceChannel(voe_wrapper_.get());
1667}
1668
1669int WebRtcVoiceEngine::CreateSoundclipVoiceChannel() {
1670 return CreateVoiceChannel(voe_wrapper_sc_.get());
1671}
1672
mallinath@webrtc.org67ee6b92014-02-03 16:57:16 +00001673class WebRtcVoiceMediaChannel::WebRtcVoiceChannelRenderer
1674 : public AudioRenderer::Sink {
1675 public:
1676 WebRtcVoiceChannelRenderer(int ch,
1677 webrtc::AudioTransport* voe_audio_transport)
1678 : channel_(ch),
1679 voe_audio_transport_(voe_audio_transport),
1680 renderer_(NULL) {
1681 }
1682 virtual ~WebRtcVoiceChannelRenderer() {
1683 Stop();
1684 }
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00001685
mallinath@webrtc.org67ee6b92014-02-03 16:57:16 +00001686 // Starts the rendering by setting a sink to the renderer to get data
1687 // callback.
henrike@webrtc.orga7b98182014-02-21 15:51:43 +00001688 // This method is called on the libjingle worker thread.
mallinath@webrtc.org67ee6b92014-02-03 16:57:16 +00001689 // TODO(xians): Make sure Start() is called only once.
1690 void Start(AudioRenderer* renderer) {
henrike@webrtc.orga7b98182014-02-21 15:51:43 +00001691 talk_base::CritScope lock(&lock_);
mallinath@webrtc.org67ee6b92014-02-03 16:57:16 +00001692 ASSERT(renderer != NULL);
henrike@webrtc.orga7b98182014-02-21 15:51:43 +00001693 if (renderer_ != NULL) {
mallinath@webrtc.org67ee6b92014-02-03 16:57:16 +00001694 ASSERT(renderer_ == renderer);
1695 return;
1696 }
1697
1698 // TODO(xians): Remove AddChannel() call after Chrome turns on APM
1699 // in getUserMedia by default.
1700 renderer->AddChannel(channel_);
1701 renderer->SetSink(this);
1702 renderer_ = renderer;
1703 }
1704
1705 // Stops rendering by setting the sink of the renderer to NULL. No data
1706 // callback will be received after this method.
henrike@webrtc.orga7b98182014-02-21 15:51:43 +00001707 // This method is called on the libjingle worker thread.
mallinath@webrtc.org67ee6b92014-02-03 16:57:16 +00001708 void Stop() {
henrike@webrtc.orga7b98182014-02-21 15:51:43 +00001709 talk_base::CritScope lock(&lock_);
1710 if (renderer_ == NULL)
mallinath@webrtc.org67ee6b92014-02-03 16:57:16 +00001711 return;
1712
1713 renderer_->RemoveChannel(channel_);
1714 renderer_->SetSink(NULL);
1715 renderer_ = NULL;
1716 }
1717
1718 // AudioRenderer::Sink implementation.
henrike@webrtc.orga7b98182014-02-21 15:51:43 +00001719 // This method is called on the audio thread.
mallinath@webrtc.org67ee6b92014-02-03 16:57:16 +00001720 virtual void OnData(const void* audio_data,
1721 int bits_per_sample,
1722 int sample_rate,
1723 int number_of_channels,
1724 int number_of_frames) OVERRIDE {
henrike@webrtc.orga7b98182014-02-21 15:51:43 +00001725 voe_audio_transport_->OnData(channel_,
1726 audio_data,
1727 bits_per_sample,
1728 sample_rate,
1729 number_of_channels,
1730 number_of_frames);
henrike@webrtc.orga7b98182014-02-21 15:51:43 +00001731 }
1732
1733 // Callback from the |renderer_| when it is going away. In case Start() has
1734 // never been called, this callback won't be triggered.
1735 virtual void OnClose() OVERRIDE {
1736 talk_base::CritScope lock(&lock_);
1737 // Set |renderer_| to NULL to make sure no more callback will get into
1738 // the renderer.
1739 renderer_ = NULL;
mallinath@webrtc.org67ee6b92014-02-03 16:57:16 +00001740 }
1741
1742 // Accessor to the VoE channel ID.
1743 int channel() const { return channel_; }
1744
1745 private:
1746 const int channel_;
1747 webrtc::AudioTransport* const voe_audio_transport_;
1748
1749 // Raw pointer to AudioRenderer owned by LocalAudioTrackHandler.
1750 // PeerConnection will make sure invalidating the pointer before the object
1751 // goes away.
1752 AudioRenderer* renderer_;
henrike@webrtc.orga7b98182014-02-21 15:51:43 +00001753
1754 // Protects |renderer_| in Start(), Stop() and OnClose().
1755 talk_base::CriticalSection lock_;
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00001756};
1757
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001758// WebRtcVoiceMediaChannel
1759WebRtcVoiceMediaChannel::WebRtcVoiceMediaChannel(WebRtcVoiceEngine *engine)
1760 : WebRtcMediaChannel<VoiceMediaChannel, WebRtcVoiceEngine>(
1761 engine,
sergeyu@chromium.org5bc25c42013-12-05 00:24:06 +00001762 engine->CreateMediaVoiceChannel()),
wu@webrtc.org1d1ffc92013-10-16 18:12:02 +00001763 send_bw_setting_(false),
wu@webrtc.org1d1ffc92013-10-16 18:12:02 +00001764 send_bw_bps_(0),
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001765 options_(),
1766 dtmf_allowed_(false),
1767 desired_playout_(false),
1768 nack_enabled_(false),
1769 playout_(false),
wu@webrtc.org967bfff2013-09-19 05:49:50 +00001770 typing_noise_detected_(false),
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001771 desired_send_(SEND_NOTHING),
1772 send_(SEND_NOTHING),
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001773 default_receive_ssrc_(0) {
1774 engine->RegisterChannel(this);
1775 LOG(LS_VERBOSE) << "WebRtcVoiceMediaChannel::WebRtcVoiceMediaChannel "
1776 << voe_channel();
1777
wu@webrtc.org9dba5252013-08-05 20:36:57 +00001778 ConfigureSendChannel(voe_channel());
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001779}
1780
1781WebRtcVoiceMediaChannel::~WebRtcVoiceMediaChannel() {
1782 LOG(LS_VERBOSE) << "WebRtcVoiceMediaChannel::~WebRtcVoiceMediaChannel "
1783 << voe_channel();
1784
wu@webrtc.org9dba5252013-08-05 20:36:57 +00001785 // Remove any remaining send streams, the default channel will be deleted
1786 // later.
1787 while (!send_channels_.empty())
1788 RemoveSendStream(send_channels_.begin()->first);
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001789
1790 // Unregister ourselves from the engine.
1791 engine()->UnregisterChannel(this);
1792 // Remove any remaining streams.
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00001793 while (!receive_channels_.empty()) {
1794 RemoveRecvStream(receive_channels_.begin()->first);
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001795 }
1796
wu@webrtc.org9dba5252013-08-05 20:36:57 +00001797 // Delete the default channel.
1798 DeleteChannel(voe_channel());
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001799}
1800
1801bool WebRtcVoiceMediaChannel::SetOptions(const AudioOptions& options) {
1802 LOG(LS_INFO) << "Setting voice channel options: "
1803 << options.ToString();
1804
wu@webrtc.orgde305012013-10-31 15:40:38 +00001805 // Check if DSCP value is changed from previous.
1806 bool dscp_option_changed = (options_.dscp != options.dscp);
1807
wu@webrtc.org9dba5252013-08-05 20:36:57 +00001808 // TODO(xians): Add support to set different options for different send
1809 // streams after we support multiple APMs.
1810
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001811 // We retain all of the existing options, and apply the given ones
1812 // on top. This means there is no way to "clear" options such that
1813 // they go back to the engine default.
1814 options_.SetAll(options);
1815
1816 if (send_ != SEND_NOTHING) {
1817 if (!engine()->SetOptionOverrides(options_)) {
1818 LOG(LS_WARNING) <<
1819 "Failed to engine SetOptionOverrides during channel SetOptions.";
1820 return false;
1821 }
1822 } else {
1823 // Will be interpreted when appropriate.
1824 }
1825
wu@webrtc.org97077a32013-10-25 21:18:33 +00001826 // Receiver-side auto gain control happens per channel, so set it here from
1827 // options. Note that, like conference mode, setting it on the engine won't
1828 // have the desired effect, since voice channels don't inherit options from
1829 // the media engine when those options are applied per-channel.
1830 bool rx_auto_gain_control;
1831 if (options.rx_auto_gain_control.Get(&rx_auto_gain_control)) {
1832 if (engine()->voe()->processing()->SetRxAgcStatus(
1833 voe_channel(), rx_auto_gain_control,
1834 webrtc::kAgcFixedDigital) == -1) {
1835 LOG_RTCERR1(SetRxAgcStatus, rx_auto_gain_control);
1836 return false;
1837 } else {
1838 LOG(LS_VERBOSE) << "Rx auto gain set to " << rx_auto_gain_control
1839 << " with mode " << webrtc::kAgcFixedDigital;
1840 }
1841 }
1842 if (options.rx_agc_target_dbov.IsSet() ||
1843 options.rx_agc_digital_compression_gain.IsSet() ||
1844 options.rx_agc_limiter.IsSet()) {
1845 webrtc::AgcConfig config;
1846 // If only some of the options are being overridden, get the current
1847 // settings for the channel and bail if they aren't available.
1848 if (!options.rx_agc_target_dbov.IsSet() ||
1849 !options.rx_agc_digital_compression_gain.IsSet() ||
1850 !options.rx_agc_limiter.IsSet()) {
1851 if (engine()->voe()->processing()->GetRxAgcConfig(
1852 voe_channel(), config) != 0) {
1853 LOG(LS_ERROR) << "Failed to get default rx agc configuration for "
1854 << "channel " << voe_channel() << ". Since not all rx "
1855 << "agc options are specified, unable to safely set rx "
1856 << "agc options.";
1857 return false;
1858 }
1859 }
1860 config.targetLeveldBOv =
1861 options.rx_agc_target_dbov.GetWithDefaultIfUnset(
1862 config.targetLeveldBOv);
1863 config.digitalCompressionGaindB =
1864 options.rx_agc_digital_compression_gain.GetWithDefaultIfUnset(
1865 config.digitalCompressionGaindB);
1866 config.limiterEnable = options.rx_agc_limiter.GetWithDefaultIfUnset(
1867 config.limiterEnable);
1868 if (engine()->voe()->processing()->SetRxAgcConfig(
1869 voe_channel(), config) == -1) {
1870 LOG_RTCERR4(SetRxAgcConfig, voe_channel(), config.targetLeveldBOv,
1871 config.digitalCompressionGaindB, config.limiterEnable);
1872 return false;
1873 }
1874 }
wu@webrtc.orgde305012013-10-31 15:40:38 +00001875 if (dscp_option_changed) {
1876 talk_base::DiffServCodePoint dscp = talk_base::DSCP_DEFAULT;
henrika@webrtc.orgaebb1ad2014-01-14 10:00:58 +00001877 if (options_.dscp.GetWithDefaultIfUnset(false))
wu@webrtc.orgde305012013-10-31 15:40:38 +00001878 dscp = kAudioDscpValue;
1879 if (MediaChannel::SetDscp(dscp) != 0) {
1880 LOG(LS_WARNING) << "Failed to set DSCP settings for audio channel";
1881 }
1882 }
wu@webrtc.org97077a32013-10-25 21:18:33 +00001883
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001884 LOG(LS_INFO) << "Set voice channel options. Current options: "
1885 << options_.ToString();
1886 return true;
1887}
1888
1889bool WebRtcVoiceMediaChannel::SetRecvCodecs(
1890 const std::vector<AudioCodec>& codecs) {
1891 // Set the payload types to be used for incoming media.
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001892 LOG(LS_INFO) << "Setting receive voice codecs:";
1893
1894 std::vector<AudioCodec> new_codecs;
1895 // Find all new codecs. We allow adding new codecs but don't allow changing
1896 // the payload type of codecs that is already configured since we might
1897 // already be receiving packets with that payload type.
1898 for (std::vector<AudioCodec>::const_iterator it = codecs.begin();
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00001899 it != codecs.end(); ++it) {
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001900 AudioCodec old_codec;
1901 if (FindCodec(recv_codecs_, *it, &old_codec)) {
1902 if (old_codec.id != it->id) {
1903 LOG(LS_ERROR) << it->name << " payload type changed.";
1904 return false;
1905 }
1906 } else {
1907 new_codecs.push_back(*it);
1908 }
1909 }
1910 if (new_codecs.empty()) {
1911 // There are no new codecs to configure. Already configured codecs are
1912 // never removed.
1913 return true;
1914 }
1915
1916 if (playout_) {
1917 // Receive codecs can not be changed while playing. So we temporarily
1918 // pause playout.
1919 PausePlayout();
1920 }
1921
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00001922 bool ret = true;
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001923 for (std::vector<AudioCodec>::const_iterator it = new_codecs.begin();
1924 it != new_codecs.end() && ret; ++it) {
1925 webrtc::CodecInst voe_codec;
1926 if (engine()->FindWebRtcCodec(*it, &voe_codec)) {
1927 LOG(LS_INFO) << ToString(*it);
1928 voe_codec.pltype = it->id;
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00001929 if (default_receive_ssrc_ == 0) {
1930 // Set the receive codecs on the default channel explicitly if the
1931 // default channel is not used by |receive_channels_|, this happens in
1932 // conference mode or in non-conference mode when there is no playout
1933 // channel.
1934 // TODO(xians): Figure out how we use the default channel in conference
1935 // mode.
1936 if (engine()->voe()->codec()->SetRecPayloadType(
1937 voe_channel(), voe_codec) == -1) {
1938 LOG_RTCERR2(SetRecPayloadType, voe_channel(), ToString(voe_codec));
1939 ret = false;
1940 }
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001941 }
1942
1943 // Set the receive codecs on all receiving channels.
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00001944 for (ChannelMap::iterator it = receive_channels_.begin();
1945 it != receive_channels_.end() && ret; ++it) {
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001946 if (engine()->voe()->codec()->SetRecPayloadType(
mallinath@webrtc.org67ee6b92014-02-03 16:57:16 +00001947 it->second->channel(), voe_codec) == -1) {
1948 LOG_RTCERR2(SetRecPayloadType, it->second->channel(),
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00001949 ToString(voe_codec));
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001950 ret = false;
1951 }
1952 }
1953 } else {
1954 LOG(LS_WARNING) << "Unknown codec " << ToString(*it);
1955 ret = false;
1956 }
1957 }
1958 if (ret) {
1959 recv_codecs_ = codecs;
1960 }
1961
1962 if (desired_playout_ && !playout_) {
1963 ResumePlayout();
1964 }
1965 return ret;
1966}
1967
1968bool WebRtcVoiceMediaChannel::SetSendCodecs(
wu@webrtc.orgcadf9042013-08-30 21:24:16 +00001969 int channel, const std::vector<AudioCodec>& codecs) {
buildbot@webrtc.orgae740dd2014-06-17 10:56:41 +00001970 // Disable VAD, FEC, and RED unless we know the other side wants them.
wu@webrtc.orgcadf9042013-08-30 21:24:16 +00001971 engine()->voe()->codec()->SetVADStatus(channel, false);
1972 engine()->voe()->rtp()->SetNACKStatus(channel, false, 0);
buildbot@webrtc.orgae740dd2014-06-17 10:56:41 +00001973#ifdef USE_WEBRTC_DEV_BRANCH
1974 engine()->voe()->rtp()->SetREDStatus(channel, false);
1975 engine()->voe()->codec()->SetFECStatus(channel, false);
1976#else
1977 // TODO(minyue): Remove code under #else case after new WebRTC roll.
wu@webrtc.orgcadf9042013-08-30 21:24:16 +00001978 engine()->voe()->rtp()->SetFECStatus(channel, false);
buildbot@webrtc.orgae740dd2014-06-17 10:56:41 +00001979#endif // USE_WEBRTC_DEV_BRANCH
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001980
1981 // Scan through the list to figure out the codec to use for sending, along
1982 // with the proper configuration for VAD and DTMF.
henrike@webrtc.org704bf9e2014-02-27 17:52:04 +00001983 bool found_send_codec = false;
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001984 webrtc::CodecInst send_codec;
1985 memset(&send_codec, 0, sizeof(send_codec));
1986
wu@webrtc.org05e7b442014-04-01 17:44:24 +00001987 bool nack_enabled = nack_enabled_;
1988
henrike@webrtc.org704bf9e2014-02-27 17:52:04 +00001989 // Set send codec (the first non-telephone-event/CN codec)
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001990 for (std::vector<AudioCodec>::const_iterator it = codecs.begin();
1991 it != codecs.end(); ++it) {
1992 // Ignore codecs we don't know about. The negotiation step should prevent
1993 // this, but double-check to be sure.
1994 webrtc::CodecInst voe_codec;
1995 if (!engine()->FindWebRtcCodec(*it, &voe_codec)) {
henrika@webrtc.orgaebb1ad2014-01-14 10:00:58 +00001996 LOG(LS_WARNING) << "Unknown codec " << ToString(*it);
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001997 continue;
1998 }
1999
henrike@webrtc.org704bf9e2014-02-27 17:52:04 +00002000 if (IsTelephoneEventCodec(it->name) || IsCNCodec(it->name)) {
2001 // Skip telephone-event/CN codec, which will be handled later.
2002 continue;
2003 }
2004
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002005 // If OPUS, change what we send according to the "stereo" codec
2006 // parameter, and not the "channels" parameter. We set
2007 // voe_codec.channels to 2 if "stereo=1" and 1 otherwise. If
2008 // the bitrate is not specified, i.e. is zero, we set it to the
2009 // appropriate default value for mono or stereo Opus.
2010 if (IsOpus(*it)) {
2011 if (IsOpusStereoEnabled(*it)) {
2012 voe_codec.channels = 2;
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002013 if (!IsValidOpusBitrate(it->bitrate)) {
2014 if (it->bitrate != 0) {
2015 LOG(LS_WARNING) << "Overrides the invalid supplied bitrate("
2016 << it->bitrate
2017 << ") with default opus stereo bitrate: "
2018 << kOpusStereoBitrate;
2019 }
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002020 voe_codec.rate = kOpusStereoBitrate;
2021 }
2022 } else {
2023 voe_codec.channels = 1;
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002024 if (!IsValidOpusBitrate(it->bitrate)) {
2025 if (it->bitrate != 0) {
2026 LOG(LS_WARNING) << "Overrides the invalid supplied bitrate("
2027 << it->bitrate
2028 << ") with default opus mono bitrate: "
2029 << kOpusMonoBitrate;
2030 }
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002031 voe_codec.rate = kOpusMonoBitrate;
2032 }
2033 }
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002034 int bitrate_from_params = GetOpusBitrateFromParams(*it);
2035 if (bitrate_from_params != 0) {
2036 voe_codec.rate = bitrate_from_params;
2037 }
buildbot@webrtc.orgae740dd2014-06-17 10:56:41 +00002038
buildbot@webrtc.orgd27d9ae2014-06-19 01:56:46 +00002039 // For Opus, we also enable inband FEC if it is requested.
buildbot@webrtc.orgae740dd2014-06-17 10:56:41 +00002040 if (IsOpusFecEnabled(*it)) {
2041 LOG(LS_INFO) << "Enabling Opus FEC on channel " << channel;
2042#ifdef USE_WEBRTC_DEV_BRANCH
2043 if (engine()->voe()->codec()->SetFECStatus(channel, true) == -1) {
2044 // Enable in-band FEC of the Opus codec. Treat any failure as a fatal
2045 // internal error.
2046 LOG_RTCERR2(SetFECStatus, channel, true);
2047 return false;
2048 }
2049#endif // USE_WEBRTC_DEV_BRANCH
2050 }
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002051 }
2052
henrike@webrtc.org704bf9e2014-02-27 17:52:04 +00002053 // We'll use the first codec in the list to actually send audio data.
2054 // Be sure to use the payload type requested by the remote side.
buildbot@webrtc.orgae740dd2014-06-17 10:56:41 +00002055 // "red", for RED audio, is a special case where the actual codec to be
henrike@webrtc.org704bf9e2014-02-27 17:52:04 +00002056 // used is specified in params.
2057 if (IsRedCodec(it->name)) {
2058 // Parse out the RED parameters. If we fail, just ignore RED;
2059 // we don't support all possible params/usage scenarios.
2060 if (!GetRedSendCodec(*it, codecs, &send_codec)) {
2061 continue;
2062 }
2063
2064 // Enable redundant encoding of the specified codec. Treat any
2065 // failure as a fatal internal error.
buildbot@webrtc.orgae740dd2014-06-17 10:56:41 +00002066#ifdef USE_WEBRTC_DEV_BRANCH
2067 LOG(LS_INFO) << "Enabling RED on channel " << channel;
2068 if (engine()->voe()->rtp()->SetREDStatus(channel, true, it->id) == -1) {
2069 LOG_RTCERR3(SetREDStatus, channel, true, it->id);
2070#else
2071 // TODO(minyue): Remove code under #else case after new WebRTC roll.
henrike@webrtc.org704bf9e2014-02-27 17:52:04 +00002072 LOG(LS_INFO) << "Enabling FEC";
2073 if (engine()->voe()->rtp()->SetFECStatus(channel, true, it->id) == -1) {
2074 LOG_RTCERR3(SetFECStatus, channel, true, it->id);
buildbot@webrtc.orgae740dd2014-06-17 10:56:41 +00002075#endif // USE_WEBRTC_DEV_BRANCH
henrike@webrtc.org704bf9e2014-02-27 17:52:04 +00002076 return false;
2077 }
2078 } else {
2079 send_codec = voe_codec;
wu@webrtc.org05e7b442014-04-01 17:44:24 +00002080 nack_enabled = IsNackEnabled(*it);
henrike@webrtc.org704bf9e2014-02-27 17:52:04 +00002081 }
2082 found_send_codec = true;
2083 break;
2084 }
2085
wu@webrtc.org05e7b442014-04-01 17:44:24 +00002086 if (nack_enabled_ != nack_enabled) {
2087 SetNack(channel, nack_enabled);
2088 nack_enabled_ = nack_enabled;
2089 }
2090
henrike@webrtc.org704bf9e2014-02-27 17:52:04 +00002091 if (!found_send_codec) {
2092 LOG(LS_WARNING) << "Received empty list of codecs.";
2093 return false;
2094 }
2095
2096 // Set the codec immediately, since SetVADStatus() depends on whether
2097 // the current codec is mono or stereo.
2098 if (!SetSendCodec(channel, send_codec))
2099 return false;
2100
2101 // Always update the |send_codec_| to the currently set send codec.
2102 send_codec_.reset(new webrtc::CodecInst(send_codec));
2103
2104 if (send_bw_setting_) {
2105 SetSendBandwidthInternal(send_bw_bps_);
2106 }
2107
2108 // Loop through the codecs list again to config the telephone-event/CN codec.
2109 for (std::vector<AudioCodec>::const_iterator it = codecs.begin();
2110 it != codecs.end(); ++it) {
2111 // Ignore codecs we don't know about. The negotiation step should prevent
2112 // this, but double-check to be sure.
2113 webrtc::CodecInst voe_codec;
2114 if (!engine()->FindWebRtcCodec(*it, &voe_codec)) {
2115 LOG(LS_WARNING) << "Unknown codec " << ToString(*it);
2116 continue;
2117 }
2118
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002119 // Find the DTMF telephone event "codec" and tell VoiceEngine channels
2120 // about it.
henrike@webrtc.org704bf9e2014-02-27 17:52:04 +00002121 if (IsTelephoneEventCodec(it->name)) {
wu@webrtc.orgcadf9042013-08-30 21:24:16 +00002122 if (engine()->voe()->dtmf()->SetSendTelephoneEventPayloadType(
2123 channel, it->id) == -1) {
2124 LOG_RTCERR2(SetSendTelephoneEventPayloadType, channel, it->id);
2125 return false;
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002126 }
henrike@webrtc.org704bf9e2014-02-27 17:52:04 +00002127 } else if (IsCNCodec(it->name)) {
2128 // Turn voice activity detection/comfort noise on if supported.
2129 // Set the wideband CN payload type appropriately.
2130 // (narrowband always uses the static payload type 13).
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002131 webrtc::PayloadFrequencies cn_freq;
2132 switch (it->clockrate) {
2133 case 8000:
2134 cn_freq = webrtc::kFreq8000Hz;
2135 break;
2136 case 16000:
2137 cn_freq = webrtc::kFreq16000Hz;
2138 break;
2139 case 32000:
2140 cn_freq = webrtc::kFreq32000Hz;
2141 break;
2142 default:
2143 LOG(LS_WARNING) << "CN frequency " << it->clockrate
2144 << " not supported.";
2145 continue;
2146 }
wu@webrtc.orgcadf9042013-08-30 21:24:16 +00002147 // Set the CN payloadtype and the VAD status.
2148 // The CN payload type for 8000 Hz clockrate is fixed at 13.
2149 if (cn_freq != webrtc::kFreq8000Hz) {
2150 if (engine()->voe()->codec()->SetSendCNPayloadType(
2151 channel, it->id, cn_freq) == -1) {
2152 LOG_RTCERR3(SetSendCNPayloadType, channel, it->id, cn_freq);
2153 // TODO(ajm): This failure condition will be removed from VoE.
2154 // Restore the return here when we update to a new enough webrtc.
2155 //
2156 // Not returning false because the SetSendCNPayloadType will fail if
2157 // the channel is already sending.
2158 // This can happen if the remote description is applied twice, for
2159 // example in the case of ROAP on top of JSEP, where both side will
2160 // send the offer.
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002161 }
wu@webrtc.orgcadf9042013-08-30 21:24:16 +00002162 }
wu@webrtc.orgcadf9042013-08-30 21:24:16 +00002163 // Only turn on VAD if we have a CN payload type that matches the
2164 // clockrate for the codec we are going to use.
2165 if (it->clockrate == send_codec.plfreq) {
2166 LOG(LS_INFO) << "Enabling VAD";
2167 if (engine()->voe()->codec()->SetVADStatus(channel, true) == -1) {
2168 LOG_RTCERR2(SetVADStatus, channel, true);
2169 return false;
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002170 }
2171 }
2172 }
wu@webrtc.org1d1ffc92013-10-16 18:12:02 +00002173 }
wu@webrtc.orgcadf9042013-08-30 21:24:16 +00002174 return true;
2175}
2176
2177bool WebRtcVoiceMediaChannel::SetSendCodecs(
2178 const std::vector<AudioCodec>& codecs) {
2179 dtmf_allowed_ = false;
2180 for (std::vector<AudioCodec>::const_iterator it = codecs.begin();
2181 it != codecs.end(); ++it) {
2182 // Find the DTMF telephone event "codec".
2183 if (_stricmp(it->name.c_str(), "telephone-event") == 0 ||
2184 _stricmp(it->name.c_str(), "audio/telephone-event") == 0) {
2185 dtmf_allowed_ = true;
2186 }
2187 }
2188
2189 // Cache the codecs in order to configure the channel created later.
2190 send_codecs_ = codecs;
2191 for (ChannelMap::iterator iter = send_channels_.begin();
2192 iter != send_channels_.end(); ++iter) {
mallinath@webrtc.org67ee6b92014-02-03 16:57:16 +00002193 if (!SetSendCodecs(iter->second->channel(), codecs)) {
wu@webrtc.orgcadf9042013-08-30 21:24:16 +00002194 return false;
2195 }
2196 }
2197
wu@webrtc.org05e7b442014-04-01 17:44:24 +00002198 // Set nack status on receive channels and update |nack_enabled_|.
wu@webrtc.orgcadf9042013-08-30 21:24:16 +00002199 SetNack(receive_channels_, nack_enabled_);
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002200 return true;
2201}
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002202
2203void WebRtcVoiceMediaChannel::SetNack(const ChannelMap& channels,
2204 bool nack_enabled) {
2205 for (ChannelMap::const_iterator it = channels.begin();
2206 it != channels.end(); ++it) {
mallinath@webrtc.org67ee6b92014-02-03 16:57:16 +00002207 SetNack(it->second->channel(), nack_enabled);
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002208 }
2209}
2210
wu@webrtc.orgcadf9042013-08-30 21:24:16 +00002211void WebRtcVoiceMediaChannel::SetNack(int channel, bool nack_enabled) {
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002212 if (nack_enabled) {
wu@webrtc.orgcadf9042013-08-30 21:24:16 +00002213 LOG(LS_INFO) << "Enabling NACK for channel " << channel;
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002214 engine()->voe()->rtp()->SetNACKStatus(channel, true, kNackMaxPackets);
2215 } else {
wu@webrtc.orgcadf9042013-08-30 21:24:16 +00002216 LOG(LS_INFO) << "Disabling NACK for channel " << channel;
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002217 engine()->voe()->rtp()->SetNACKStatus(channel, false, 0);
2218 }
2219}
2220
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002221bool WebRtcVoiceMediaChannel::SetSendCodec(
2222 const webrtc::CodecInst& send_codec) {
2223 LOG(LS_INFO) << "Selected voice codec " << ToString(send_codec)
2224 << ", bitrate=" << send_codec.rate;
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002225 for (ChannelMap::iterator iter = send_channels_.begin();
2226 iter != send_channels_.end(); ++iter) {
mallinath@webrtc.org67ee6b92014-02-03 16:57:16 +00002227 if (!SetSendCodec(iter->second->channel(), send_codec))
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002228 return false;
2229 }
2230
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002231 return true;
2232}
2233
2234bool WebRtcVoiceMediaChannel::SetSendCodec(
2235 int channel, const webrtc::CodecInst& send_codec) {
2236 LOG(LS_INFO) << "Send channel " << channel << " selected voice codec "
2237 << ToString(send_codec) << ", bitrate=" << send_codec.rate;
2238
wu@webrtc.org05e7b442014-04-01 17:44:24 +00002239 webrtc::CodecInst current_codec;
2240 if (engine()->voe()->codec()->GetSendCodec(channel, current_codec) == 0 &&
2241 (send_codec == current_codec)) {
2242 // Codec is already configured, we can return without setting it again.
2243 return true;
2244 }
2245
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002246 if (engine()->voe()->codec()->SetSendCodec(channel, send_codec) == -1) {
2247 LOG_RTCERR2(SetSendCodec, channel, ToString(send_codec));
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002248 return false;
2249 }
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002250 return true;
2251}
2252
2253bool WebRtcVoiceMediaChannel::SetRecvRtpHeaderExtensions(
2254 const std::vector<RtpHeaderExtension>& extensions) {
buildbot@webrtc.org150835e2014-05-06 15:54:38 +00002255 if (receive_extensions_ == extensions) {
2256 return true;
2257 }
2258
2259 // The default channel may or may not be in |receive_channels_|. Set the rtp
2260 // header extensions for default channel regardless.
2261 if (!SetChannelRecvRtpHeaderExtensions(voe_channel(), extensions)) {
2262 return false;
2263 }
henrike@webrtc.org79047f92014-03-06 23:46:59 +00002264
2265 // Loop through all receive channels and enable/disable the extensions.
2266 for (ChannelMap::const_iterator channel_it = receive_channels_.begin();
2267 channel_it != receive_channels_.end(); ++channel_it) {
buildbot@webrtc.org150835e2014-05-06 15:54:38 +00002268 if (!SetChannelRecvRtpHeaderExtensions(channel_it->second->channel(),
2269 extensions)) {
henrike@webrtc.org79047f92014-03-06 23:46:59 +00002270 return false;
2271 }
2272 }
buildbot@webrtc.org150835e2014-05-06 15:54:38 +00002273
2274 receive_extensions_ = extensions;
2275 return true;
2276}
2277
2278bool WebRtcVoiceMediaChannel::SetChannelRecvRtpHeaderExtensions(
2279 int channel_id, const std::vector<RtpHeaderExtension>& extensions) {
2280#ifdef USE_WEBRTC_DEV_BRANCH
2281 const RtpHeaderExtension* audio_level_extension =
2282 FindHeaderExtension(extensions, kRtpAudioLevelHeaderExtension);
2283 if (!SetHeaderExtension(
2284 &webrtc::VoERTP_RTCP::SetReceiveAudioLevelIndicationStatus, channel_id,
2285 audio_level_extension)) {
2286 return false;
2287 }
2288#endif // USE_WEBRTC_DEV_BRANCH
2289
2290 const RtpHeaderExtension* send_time_extension =
2291 FindHeaderExtension(extensions, kRtpAbsoluteSenderTimeHeaderExtension);
2292 if (!SetHeaderExtension(
2293 &webrtc::VoERTP_RTCP::SetReceiveAbsoluteSenderTimeStatus, channel_id,
2294 send_time_extension)) {
2295 return false;
2296 }
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002297 return true;
2298}
2299
2300bool WebRtcVoiceMediaChannel::SetSendRtpHeaderExtensions(
2301 const std::vector<RtpHeaderExtension>& extensions) {
buildbot@webrtc.org150835e2014-05-06 15:54:38 +00002302 if (send_extensions_ == extensions) {
2303 return true;
2304 }
2305
2306 // The default channel may or may not be in |send_channels_|. Set the rtp
2307 // header extensions for default channel regardless.
2308
2309 if (!SetChannelSendRtpHeaderExtensions(voe_channel(), extensions)) {
2310 return false;
2311 }
2312
2313 // Loop through all send channels and enable/disable the extensions.
2314 for (ChannelMap::const_iterator channel_it = send_channels_.begin();
2315 channel_it != send_channels_.end(); ++channel_it) {
2316 if (!SetChannelSendRtpHeaderExtensions(channel_it->second->channel(),
2317 extensions)) {
2318 return false;
2319 }
2320 }
2321
2322 send_extensions_ = extensions;
2323 return true;
2324}
2325
2326bool WebRtcVoiceMediaChannel::SetChannelSendRtpHeaderExtensions(
2327 int channel_id, const std::vector<RtpHeaderExtension>& extensions) {
henrike@webrtc.org79047f92014-03-06 23:46:59 +00002328 const RtpHeaderExtension* audio_level_extension =
2329 FindHeaderExtension(extensions, kRtpAudioLevelHeaderExtension);
buildbot@webrtc.org150835e2014-05-06 15:54:38 +00002330
henrike@webrtc.org79047f92014-03-06 23:46:59 +00002331 if (!SetHeaderExtension(
buildbot@webrtc.org150835e2014-05-06 15:54:38 +00002332 &webrtc::VoERTP_RTCP::SetSendAudioLevelIndicationStatus, channel_id,
henrike@webrtc.org79047f92014-03-06 23:46:59 +00002333 audio_level_extension)) {
2334 return false;
2335 }
buildbot@webrtc.org150835e2014-05-06 15:54:38 +00002336
2337 const RtpHeaderExtension* send_time_extension =
2338 FindHeaderExtension(extensions, kRtpAbsoluteSenderTimeHeaderExtension);
henrike@webrtc.org79047f92014-03-06 23:46:59 +00002339 if (!SetHeaderExtension(
buildbot@webrtc.org150835e2014-05-06 15:54:38 +00002340 &webrtc::VoERTP_RTCP::SetSendAbsoluteSenderTimeStatus, channel_id,
henrike@webrtc.org79047f92014-03-06 23:46:59 +00002341 send_time_extension)) {
2342 return false;
2343 }
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002344
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002345 return true;
2346}
2347
2348bool WebRtcVoiceMediaChannel::SetPlayout(bool playout) {
2349 desired_playout_ = playout;
2350 return ChangePlayout(desired_playout_);
2351}
2352
2353bool WebRtcVoiceMediaChannel::PausePlayout() {
2354 return ChangePlayout(false);
2355}
2356
2357bool WebRtcVoiceMediaChannel::ResumePlayout() {
2358 return ChangePlayout(desired_playout_);
2359}
2360
2361bool WebRtcVoiceMediaChannel::ChangePlayout(bool playout) {
2362 if (playout_ == playout) {
2363 return true;
2364 }
2365
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002366 // Change the playout of all channels to the new state.
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002367 bool result = true;
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002368 if (receive_channels_.empty()) {
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002369 // Only toggle the default channel if we don't have any other channels.
2370 result = SetPlayout(voe_channel(), playout);
2371 }
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002372 for (ChannelMap::iterator it = receive_channels_.begin();
2373 it != receive_channels_.end() && result; ++it) {
mallinath@webrtc.org67ee6b92014-02-03 16:57:16 +00002374 if (!SetPlayout(it->second->channel(), playout)) {
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002375 LOG(LS_ERROR) << "SetPlayout " << playout << " on channel "
mallinath@webrtc.org67ee6b92014-02-03 16:57:16 +00002376 << it->second->channel() << " failed";
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002377 result = false;
2378 }
2379 }
2380
2381 if (result) {
2382 playout_ = playout;
2383 }
2384 return result;
2385}
2386
2387bool WebRtcVoiceMediaChannel::SetSend(SendFlags send) {
2388 desired_send_ = send;
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002389 if (!send_channels_.empty())
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002390 return ChangeSend(desired_send_);
2391 return true;
2392}
2393
2394bool WebRtcVoiceMediaChannel::PauseSend() {
2395 return ChangeSend(SEND_NOTHING);
2396}
2397
2398bool WebRtcVoiceMediaChannel::ResumeSend() {
2399 return ChangeSend(desired_send_);
2400}
2401
2402bool WebRtcVoiceMediaChannel::ChangeSend(SendFlags send) {
2403 if (send_ == send) {
2404 return true;
2405 }
2406
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002407 // Change the settings on each send channel.
2408 if (send == SEND_MICROPHONE)
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002409 engine()->SetOptionOverrides(options_);
2410
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002411 // Change the settings on each send channel.
2412 for (ChannelMap::iterator iter = send_channels_.begin();
2413 iter != send_channels_.end(); ++iter) {
mallinath@webrtc.org67ee6b92014-02-03 16:57:16 +00002414 if (!ChangeSend(iter->second->channel(), send))
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002415 return false;
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002416 }
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002417
2418 // Clear up the options after stopping sending.
2419 if (send == SEND_NOTHING)
2420 engine()->ClearOptionOverrides();
2421
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002422 send_ = send;
2423 return true;
2424}
2425
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002426bool WebRtcVoiceMediaChannel::ChangeSend(int channel, SendFlags send) {
2427 if (send == SEND_MICROPHONE) {
2428 if (engine()->voe()->base()->StartSend(channel) == -1) {
2429 LOG_RTCERR1(StartSend, channel);
2430 return false;
2431 }
2432 if (engine()->voe()->file() &&
2433 engine()->voe()->file()->StopPlayingFileAsMicrophone(channel) == -1) {
2434 LOG_RTCERR1(StopPlayingFileAsMicrophone, channel);
2435 return false;
2436 }
2437 } else { // SEND_NOTHING
2438 ASSERT(send == SEND_NOTHING);
2439 if (engine()->voe()->base()->StopSend(channel) == -1) {
2440 LOG_RTCERR1(StopSend, channel);
2441 return false;
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002442 }
2443 }
2444
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002445 return true;
2446}
2447
buildbot@webrtc.org150835e2014-05-06 15:54:38 +00002448// TODO(ronghuawu): Change this method to return bool.
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002449void WebRtcVoiceMediaChannel::ConfigureSendChannel(int channel) {
2450 if (engine()->voe()->network()->RegisterExternalTransport(
2451 channel, *this) == -1) {
2452 LOG_RTCERR2(RegisterExternalTransport, channel, this);
2453 }
2454
2455 // Enable RTCP (for quality stats and feedback messages)
2456 EnableRtcp(channel);
2457
2458 // Reset all recv codecs; they will be enabled via SetRecvCodecs.
2459 ResetRecvCodecs(channel);
buildbot@webrtc.org150835e2014-05-06 15:54:38 +00002460
2461 // Set RTP header extension for the new channel.
2462 SetChannelSendRtpHeaderExtensions(channel, send_extensions_);
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002463}
2464
2465bool WebRtcVoiceMediaChannel::DeleteChannel(int channel) {
2466 if (engine()->voe()->network()->DeRegisterExternalTransport(channel) == -1) {
2467 LOG_RTCERR1(DeRegisterExternalTransport, channel);
2468 }
2469
2470 if (engine()->voe()->base()->DeleteChannel(channel) == -1) {
2471 LOG_RTCERR1(DeleteChannel, channel);
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002472 return false;
2473 }
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002474
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002475 return true;
2476}
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002477
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002478bool WebRtcVoiceMediaChannel::AddSendStream(const StreamParams& sp) {
2479 // If the default channel is already used for sending create a new channel
2480 // otherwise use the default channel for sending.
2481 int channel = GetSendChannelNum(sp.first_ssrc());
2482 if (channel != -1) {
2483 LOG(LS_ERROR) << "Stream already exists with ssrc " << sp.first_ssrc();
2484 return false;
2485 }
2486
2487 bool default_channel_is_available = true;
2488 for (ChannelMap::const_iterator iter = send_channels_.begin();
2489 iter != send_channels_.end(); ++iter) {
mallinath@webrtc.org67ee6b92014-02-03 16:57:16 +00002490 if (IsDefaultChannel(iter->second->channel())) {
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002491 default_channel_is_available = false;
2492 break;
2493 }
2494 }
2495 if (default_channel_is_available) {
2496 channel = voe_channel();
2497 } else {
2498 // Create a new channel for sending audio data.
sergeyu@chromium.org5bc25c42013-12-05 00:24:06 +00002499 channel = engine()->CreateMediaVoiceChannel();
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002500 if (channel == -1) {
2501 LOG_RTCERR0(CreateChannel);
2502 return false;
2503 }
2504
2505 ConfigureSendChannel(channel);
2506 }
2507
2508 // Save the channel to send_channels_, so that RemoveSendStream() can still
2509 // delete the channel in case failure happens below.
mallinath@webrtc.org67ee6b92014-02-03 16:57:16 +00002510 webrtc::AudioTransport* audio_transport =
2511 engine()->voe()->base()->audio_transport();
mallinath@webrtc.org67ee6b92014-02-03 16:57:16 +00002512 send_channels_.insert(std::make_pair(
2513 sp.first_ssrc(),
2514 new WebRtcVoiceChannelRenderer(channel, audio_transport)));
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002515
2516 // Set the send (local) SSRC.
2517 // If there are multiple send SSRCs, we can only set the first one here, and
2518 // the rest of the SSRC(s) need to be set after SetSendCodec has been called
2519 // (with a codec requires multiple SSRC(s)).
2520 if (engine()->voe()->rtp()->SetLocalSSRC(channel, sp.first_ssrc()) == -1) {
2521 LOG_RTCERR2(SetSendSSRC, channel, sp.first_ssrc());
2522 return false;
2523 }
2524
2525 // At this point the channel's local SSRC has been updated. If the channel is
2526 // the default channel make sure that all the receive channels are updated as
2527 // well. Receive channels have to have the same SSRC as the default channel in
2528 // order to send receiver reports with this SSRC.
2529 if (IsDefaultChannel(channel)) {
2530 for (ChannelMap::const_iterator it = receive_channels_.begin();
2531 it != receive_channels_.end(); ++it) {
2532 // Only update the SSRC for non-default channels.
mallinath@webrtc.org67ee6b92014-02-03 16:57:16 +00002533 if (!IsDefaultChannel(it->second->channel())) {
2534 if (engine()->voe()->rtp()->SetLocalSSRC(it->second->channel(),
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002535 sp.first_ssrc()) != 0) {
mallinath@webrtc.org67ee6b92014-02-03 16:57:16 +00002536 LOG_RTCERR2(SetLocalSSRC, it->second->channel(), sp.first_ssrc());
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002537 return false;
2538 }
2539 }
2540 }
2541 }
2542
2543 if (engine()->voe()->rtp()->SetRTCP_CNAME(channel, sp.cname.c_str()) == -1) {
2544 LOG_RTCERR2(SetRTCP_CNAME, channel, sp.cname);
2545 return false;
2546 }
2547
wu@webrtc.orgcadf9042013-08-30 21:24:16 +00002548 // Set the current codecs to be used for the new channel.
2549 if (!send_codecs_.empty() && !SetSendCodecs(channel, send_codecs_))
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002550 return false;
2551
2552 return ChangeSend(channel, desired_send_);
2553}
2554
2555bool WebRtcVoiceMediaChannel::RemoveSendStream(uint32 ssrc) {
2556 ChannelMap::iterator it = send_channels_.find(ssrc);
2557 if (it == send_channels_.end()) {
2558 LOG(LS_WARNING) << "Try to remove stream with ssrc " << ssrc
2559 << " which doesn't exist.";
2560 return false;
2561 }
2562
mallinath@webrtc.org67ee6b92014-02-03 16:57:16 +00002563 int channel = it->second->channel();
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002564 ChangeSend(channel, SEND_NOTHING);
2565
mallinath@webrtc.org67ee6b92014-02-03 16:57:16 +00002566 // Delete the WebRtcVoiceChannelRenderer object connected to the channel,
2567 // this will disconnect the audio renderer with the send channel.
2568 delete it->second;
2569 send_channels_.erase(it);
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002570
2571 if (IsDefaultChannel(channel)) {
2572 // Do not delete the default channel since the receive channels depend on
2573 // the default channel, recycle it instead.
2574 ChangeSend(channel, SEND_NOTHING);
2575 } else {
2576 // Clean up and delete the send channel.
2577 LOG(LS_INFO) << "Removing audio send stream " << ssrc
2578 << " with VoiceEngine channel #" << channel << ".";
2579 if (!DeleteChannel(channel))
2580 return false;
2581 }
2582
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002583 if (send_channels_.empty())
2584 ChangeSend(SEND_NOTHING);
2585
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002586 return true;
2587}
2588
2589bool WebRtcVoiceMediaChannel::AddRecvStream(const StreamParams& sp) {
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002590 talk_base::CritScope lock(&receive_channels_cs_);
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002591
2592 if (!VERIFY(sp.ssrcs.size() == 1))
2593 return false;
2594 uint32 ssrc = sp.first_ssrc();
2595
wu@webrtc.org78187522013-10-07 23:32:02 +00002596 if (ssrc == 0) {
2597 LOG(LS_WARNING) << "AddRecvStream with 0 ssrc is not supported.";
2598 return false;
2599 }
2600
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002601 if (receive_channels_.find(ssrc) != receive_channels_.end()) {
2602 LOG(LS_ERROR) << "Stream already exists with ssrc " << ssrc;
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002603 return false;
2604 }
2605
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002606 // Reuse default channel for recv stream in non-conference mode call
2607 // when the default channel is not being used.
mallinath@webrtc.org67ee6b92014-02-03 16:57:16 +00002608 webrtc::AudioTransport* audio_transport =
2609 engine()->voe()->base()->audio_transport();
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002610 if (!InConferenceMode() && default_receive_ssrc_ == 0) {
2611 LOG(LS_INFO) << "Recv stream " << sp.first_ssrc()
2612 << " reuse default channel";
2613 default_receive_ssrc_ = sp.first_ssrc();
2614 receive_channels_.insert(std::make_pair(
mallinath@webrtc.org67ee6b92014-02-03 16:57:16 +00002615 default_receive_ssrc_,
2616 new WebRtcVoiceChannelRenderer(voe_channel(), audio_transport)));
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002617 return SetPlayout(voe_channel(), playout_);
2618 }
2619
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002620 // Create a new channel for receiving audio data.
sergeyu@chromium.org5bc25c42013-12-05 00:24:06 +00002621 int channel = engine()->CreateMediaVoiceChannel();
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002622 if (channel == -1) {
2623 LOG_RTCERR0(CreateChannel);
2624 return false;
2625 }
2626
wu@webrtc.org78187522013-10-07 23:32:02 +00002627 if (!ConfigureRecvChannel(channel)) {
2628 DeleteChannel(channel);
2629 return false;
2630 }
2631
2632 receive_channels_.insert(
mallinath@webrtc.org67ee6b92014-02-03 16:57:16 +00002633 std::make_pair(
2634 ssrc, new WebRtcVoiceChannelRenderer(channel, audio_transport)));
wu@webrtc.org78187522013-10-07 23:32:02 +00002635
2636 LOG(LS_INFO) << "New audio stream " << ssrc
2637 << " registered to VoiceEngine channel #"
2638 << channel << ".";
2639 return true;
2640}
2641
2642bool WebRtcVoiceMediaChannel::ConfigureRecvChannel(int channel) {
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002643 // Configure to use external transport, like our default channel.
2644 if (engine()->voe()->network()->RegisterExternalTransport(
2645 channel, *this) == -1) {
2646 LOG_RTCERR2(SetExternalTransport, channel, this);
2647 return false;
2648 }
2649
2650 // Use the same SSRC as our default channel (so the RTCP reports are correct).
henrika@webrtc.orgaebb1ad2014-01-14 10:00:58 +00002651 unsigned int send_ssrc = 0;
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002652 webrtc::VoERTP_RTCP* rtp = engine()->voe()->rtp();
2653 if (rtp->GetLocalSSRC(voe_channel(), send_ssrc) == -1) {
henrika@webrtc.orgaebb1ad2014-01-14 10:00:58 +00002654 LOG_RTCERR1(GetSendSSRC, channel);
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002655 return false;
2656 }
2657 if (rtp->SetLocalSSRC(channel, send_ssrc) == -1) {
henrika@webrtc.orgaebb1ad2014-01-14 10:00:58 +00002658 LOG_RTCERR1(SetSendSSRC, channel);
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002659 return false;
2660 }
2661
2662 // Use the same recv payload types as our default channel.
2663 ResetRecvCodecs(channel);
2664 if (!recv_codecs_.empty()) {
2665 for (std::vector<AudioCodec>::const_iterator it = recv_codecs_.begin();
2666 it != recv_codecs_.end(); ++it) {
2667 webrtc::CodecInst voe_codec;
2668 if (engine()->FindWebRtcCodec(*it, &voe_codec)) {
2669 voe_codec.pltype = it->id;
2670 voe_codec.rate = 0; // Needed to make GetRecPayloadType work for ISAC
2671 if (engine()->voe()->codec()->GetRecPayloadType(
2672 voe_channel(), voe_codec) != -1) {
2673 if (engine()->voe()->codec()->SetRecPayloadType(
2674 channel, voe_codec) == -1) {
2675 LOG_RTCERR2(SetRecPayloadType, channel, ToString(voe_codec));
2676 return false;
2677 }
2678 }
2679 }
2680 }
2681 }
2682
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002683 if (InConferenceMode()) {
2684 // To be in par with the video, voe_channel() is not used for receiving in
2685 // a conference call.
2686 if (receive_channels_.empty() && default_receive_ssrc_ == 0 && playout_) {
2687 // This is the first stream in a multi user meeting. We can now
2688 // disable playback of the default stream. This since the default
2689 // stream will probably have received some initial packets before
2690 // the new stream was added. This will mean that the CN state from
2691 // the default channel will be mixed in with the other streams
2692 // throughout the whole meeting, which might be disturbing.
2693 LOG(LS_INFO) << "Disabling playback on the default voice channel";
2694 SetPlayout(voe_channel(), false);
2695 }
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002696 }
wu@webrtc.orgcadf9042013-08-30 21:24:16 +00002697 SetNack(channel, nack_enabled_);
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002698
buildbot@webrtc.org150835e2014-05-06 15:54:38 +00002699 // Set RTP header extension for the new channel.
2700 if (!SetChannelRecvRtpHeaderExtensions(channel, receive_extensions_)) {
2701 return false;
2702 }
2703
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002704 return SetPlayout(channel, playout_);
2705}
2706
2707bool WebRtcVoiceMediaChannel::RemoveRecvStream(uint32 ssrc) {
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002708 talk_base::CritScope lock(&receive_channels_cs_);
2709 ChannelMap::iterator it = receive_channels_.find(ssrc);
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002710 if (it == receive_channels_.end()) {
2711 LOG(LS_WARNING) << "Try to remove stream with ssrc " << ssrc
2712 << " which doesn't exist.";
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002713 return false;
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002714 }
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002715
mallinath@webrtc.org67ee6b92014-02-03 16:57:16 +00002716 // Delete the WebRtcVoiceChannelRenderer object connected to the channel, this
2717 // will disconnect the audio renderer with the receive channel.
2718 // Cache the channel before the deletion.
2719 const int channel = it->second->channel();
2720 delete it->second;
2721 receive_channels_.erase(it);
2722
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002723 if (ssrc == default_receive_ssrc_) {
mallinath@webrtc.org67ee6b92014-02-03 16:57:16 +00002724 ASSERT(IsDefaultChannel(channel));
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002725 // Recycle the default channel is for recv stream.
2726 if (playout_)
2727 SetPlayout(voe_channel(), false);
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002728
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002729 default_receive_ssrc_ = 0;
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002730 return true;
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002731 }
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002732
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002733 LOG(LS_INFO) << "Removing audio stream " << ssrc
mallinath@webrtc.org67ee6b92014-02-03 16:57:16 +00002734 << " with VoiceEngine channel #" << channel << ".";
2735 if (!DeleteChannel(channel))
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002736 return false;
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002737
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002738 bool enable_default_channel_playout = false;
2739 if (receive_channels_.empty()) {
2740 // The last stream was removed. We can now enable the default
2741 // channel for new channels to be played out immediately without
2742 // waiting for AddStream messages.
2743 // We do this for both conference mode and non-conference mode.
2744 // TODO(oja): Does the default channel still have it's CN state?
2745 enable_default_channel_playout = true;
2746 }
2747 if (!InConferenceMode() && receive_channels_.size() == 1 &&
2748 default_receive_ssrc_ != 0) {
2749 // Only the default channel is active, enable the playout on default
2750 // channel.
2751 enable_default_channel_playout = true;
2752 }
2753 if (enable_default_channel_playout && playout_) {
2754 LOG(LS_INFO) << "Enabling playback on the default voice channel";
2755 SetPlayout(voe_channel(), true);
2756 }
2757
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002758 return true;
2759}
2760
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002761bool WebRtcVoiceMediaChannel::SetRemoteRenderer(uint32 ssrc,
2762 AudioRenderer* renderer) {
2763 ChannelMap::iterator it = receive_channels_.find(ssrc);
2764 if (it == receive_channels_.end()) {
2765 if (renderer) {
2766 // Return an error if trying to set a valid renderer with an invalid ssrc.
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002767 LOG(LS_ERROR) << "SetRemoteRenderer failed with ssrc "<< ssrc;
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002768 return false;
2769 }
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002770
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002771 // The channel likely has gone away, do nothing.
2772 return true;
2773 }
2774
mallinath@webrtc.org67ee6b92014-02-03 16:57:16 +00002775 if (renderer)
2776 it->second->Start(renderer);
2777 else
2778 it->second->Stop();
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002779
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002780 return true;
2781}
2782
2783bool WebRtcVoiceMediaChannel::SetLocalRenderer(uint32 ssrc,
2784 AudioRenderer* renderer) {
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002785 ChannelMap::iterator it = send_channels_.find(ssrc);
2786 if (it == send_channels_.end()) {
2787 if (renderer) {
2788 // Return an error if trying to set a valid renderer with an invalid ssrc.
2789 LOG(LS_ERROR) << "SetLocalRenderer failed with ssrc "<< ssrc;
2790 return false;
2791 }
2792
2793 // The channel likely has gone away, do nothing.
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002794 return true;
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002795 }
2796
mallinath@webrtc.org67ee6b92014-02-03 16:57:16 +00002797 if (renderer)
2798 it->second->Start(renderer);
2799 else
2800 it->second->Stop();
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002801
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002802 return true;
2803}
2804
2805bool WebRtcVoiceMediaChannel::GetActiveStreams(
2806 AudioInfo::StreamList* actives) {
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002807 // In conference mode, the default channel should not be in
2808 // |receive_channels_|.
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002809 actives->clear();
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002810 for (ChannelMap::iterator it = receive_channels_.begin();
2811 it != receive_channels_.end(); ++it) {
mallinath@webrtc.org67ee6b92014-02-03 16:57:16 +00002812 int level = GetOutputLevel(it->second->channel());
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002813 if (level > 0) {
2814 actives->push_back(std::make_pair(it->first, level));
2815 }
2816 }
2817 return true;
2818}
2819
2820int WebRtcVoiceMediaChannel::GetOutputLevel() {
2821 // return the highest output level of all streams
2822 int highest = GetOutputLevel(voe_channel());
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002823 for (ChannelMap::iterator it = receive_channels_.begin();
2824 it != receive_channels_.end(); ++it) {
mallinath@webrtc.org67ee6b92014-02-03 16:57:16 +00002825 int level = GetOutputLevel(it->second->channel());
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002826 highest = talk_base::_max(level, highest);
2827 }
2828 return highest;
2829}
2830
2831int WebRtcVoiceMediaChannel::GetTimeSinceLastTyping() {
2832 int ret;
2833 if (engine()->voe()->processing()->TimeSinceLastTyping(ret) == -1) {
2834 // In case of error, log the info and continue
2835 LOG_RTCERR0(TimeSinceLastTyping);
2836 ret = -1;
2837 } else {
2838 ret *= 1000; // We return ms, webrtc returns seconds.
2839 }
2840 return ret;
2841}
2842
2843void WebRtcVoiceMediaChannel::SetTypingDetectionParameters(int time_window,
2844 int cost_per_typing, int reporting_threshold, int penalty_decay,
2845 int type_event_delay) {
2846 if (engine()->voe()->processing()->SetTypingDetectionParameters(
2847 time_window, cost_per_typing,
2848 reporting_threshold, penalty_decay, type_event_delay) == -1) {
2849 // In case of error, log the info and continue
2850 LOG_RTCERR5(SetTypingDetectionParameters, time_window,
2851 cost_per_typing, reporting_threshold, penalty_decay,
2852 type_event_delay);
2853 }
2854}
2855
2856bool WebRtcVoiceMediaChannel::SetOutputScaling(
2857 uint32 ssrc, double left, double right) {
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002858 talk_base::CritScope lock(&receive_channels_cs_);
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002859 // Collect the channels to scale the output volume.
2860 std::vector<int> channels;
2861 if (0 == ssrc) { // Collect all channels, including the default one.
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002862 // Default channel is not in receive_channels_ if it is not being used for
2863 // playout.
2864 if (default_receive_ssrc_ == 0)
2865 channels.push_back(voe_channel());
2866 for (ChannelMap::const_iterator it = receive_channels_.begin();
2867 it != receive_channels_.end(); ++it) {
mallinath@webrtc.org67ee6b92014-02-03 16:57:16 +00002868 channels.push_back(it->second->channel());
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002869 }
2870 } else { // Collect only the channel of the specified ssrc.
2871 int channel = GetReceiveChannelNum(ssrc);
2872 if (-1 == channel) {
2873 LOG(LS_WARNING) << "Cannot find channel for ssrc:" << ssrc;
2874 return false;
2875 }
2876 channels.push_back(channel);
2877 }
2878
2879 // Scale the output volume for the collected channels. We first normalize to
2880 // scale the volume and then set the left and right pan.
2881 float scale = static_cast<float>(talk_base::_max(left, right));
2882 if (scale > 0.0001f) {
2883 left /= scale;
2884 right /= scale;
2885 }
2886 for (std::vector<int>::const_iterator it = channels.begin();
2887 it != channels.end(); ++it) {
2888 if (-1 == engine()->voe()->volume()->SetChannelOutputVolumeScaling(
2889 *it, scale)) {
2890 LOG_RTCERR2(SetChannelOutputVolumeScaling, *it, scale);
2891 return false;
2892 }
2893 if (-1 == engine()->voe()->volume()->SetOutputVolumePan(
2894 *it, static_cast<float>(left), static_cast<float>(right))) {
2895 LOG_RTCERR3(SetOutputVolumePan, *it, left, right);
2896 // Do not return if fails. SetOutputVolumePan is not available for all
2897 // pltforms.
2898 }
2899 LOG(LS_INFO) << "SetOutputScaling to left=" << left * scale
2900 << " right=" << right * scale
2901 << " for channel " << *it << " and ssrc " << ssrc;
2902 }
2903 return true;
2904}
2905
2906bool WebRtcVoiceMediaChannel::GetOutputScaling(
2907 uint32 ssrc, double* left, double* right) {
2908 if (!left || !right) return false;
2909
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002910 talk_base::CritScope lock(&receive_channels_cs_);
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002911 // Determine which channel based on ssrc.
2912 int channel = (0 == ssrc) ? voe_channel() : GetReceiveChannelNum(ssrc);
2913 if (channel == -1) {
2914 LOG(LS_WARNING) << "Cannot find channel for ssrc:" << ssrc;
2915 return false;
2916 }
2917
2918 float scaling;
2919 if (-1 == engine()->voe()->volume()->GetChannelOutputVolumeScaling(
2920 channel, scaling)) {
2921 LOG_RTCERR2(GetChannelOutputVolumeScaling, channel, scaling);
2922 return false;
2923 }
2924
2925 float left_pan;
2926 float right_pan;
2927 if (-1 == engine()->voe()->volume()->GetOutputVolumePan(
2928 channel, left_pan, right_pan)) {
2929 LOG_RTCERR3(GetOutputVolumePan, channel, left_pan, right_pan);
2930 // If GetOutputVolumePan fails, we use the default left and right pan.
2931 left_pan = 1.0f;
2932 right_pan = 1.0f;
2933 }
2934
2935 *left = scaling * left_pan;
2936 *right = scaling * right_pan;
2937 return true;
2938}
2939
2940bool WebRtcVoiceMediaChannel::SetRingbackTone(const char *buf, int len) {
2941 ringback_tone_.reset(new WebRtcSoundclipStream(buf, len));
2942 return true;
2943}
2944
2945bool WebRtcVoiceMediaChannel::PlayRingbackTone(uint32 ssrc,
2946 bool play, bool loop) {
2947 if (!ringback_tone_) {
2948 return false;
2949 }
2950
2951 // The voe file api is not available in chrome.
2952 if (!engine()->voe()->file()) {
2953 return false;
2954 }
2955
2956 // Determine which VoiceEngine channel to play on.
2957 int channel = (ssrc == 0) ? voe_channel() : GetReceiveChannelNum(ssrc);
2958 if (channel == -1) {
2959 return false;
2960 }
2961
2962 // Make sure the ringtone is cued properly, and play it out.
2963 if (play) {
2964 ringback_tone_->set_loop(loop);
2965 ringback_tone_->Rewind();
2966 if (engine()->voe()->file()->StartPlayingFileLocally(channel,
2967 ringback_tone_.get()) == -1) {
2968 LOG_RTCERR2(StartPlayingFileLocally, channel, ringback_tone_.get());
2969 LOG(LS_ERROR) << "Unable to start ringback tone";
2970 return false;
2971 }
2972 ringback_channels_.insert(channel);
2973 LOG(LS_INFO) << "Started ringback on channel " << channel;
2974 } else {
2975 if (engine()->voe()->file()->IsPlayingFileLocally(channel) == 1 &&
2976 engine()->voe()->file()->StopPlayingFileLocally(channel) == -1) {
2977 LOG_RTCERR1(StopPlayingFileLocally, channel);
2978 return false;
2979 }
2980 LOG(LS_INFO) << "Stopped ringback on channel " << channel;
2981 ringback_channels_.erase(channel);
2982 }
2983
2984 return true;
2985}
2986
2987bool WebRtcVoiceMediaChannel::CanInsertDtmf() {
2988 return dtmf_allowed_;
2989}
2990
2991bool WebRtcVoiceMediaChannel::InsertDtmf(uint32 ssrc, int event,
2992 int duration, int flags) {
2993 if (!dtmf_allowed_) {
2994 return false;
2995 }
2996
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002997 // Send the event.
2998 if (flags & cricket::DF_SEND) {
wu@webrtc.orgcadf9042013-08-30 21:24:16 +00002999 int channel = -1;
3000 if (ssrc == 0) {
3001 bool default_channel_is_inuse = false;
3002 for (ChannelMap::const_iterator iter = send_channels_.begin();
3003 iter != send_channels_.end(); ++iter) {
mallinath@webrtc.org67ee6b92014-02-03 16:57:16 +00003004 if (IsDefaultChannel(iter->second->channel())) {
wu@webrtc.orgcadf9042013-08-30 21:24:16 +00003005 default_channel_is_inuse = true;
3006 break;
3007 }
3008 }
3009 if (default_channel_is_inuse) {
3010 channel = voe_channel();
3011 } else if (!send_channels_.empty()) {
mallinath@webrtc.org67ee6b92014-02-03 16:57:16 +00003012 channel = send_channels_.begin()->second->channel();
wu@webrtc.orgcadf9042013-08-30 21:24:16 +00003013 }
3014 } else {
3015 channel = GetSendChannelNum(ssrc);
3016 }
wu@webrtc.org9dba5252013-08-05 20:36:57 +00003017 if (channel == -1) {
henrike@webrtc.org28e20752013-07-10 00:45:36 +00003018 LOG(LS_WARNING) << "InsertDtmf - The specified ssrc "
3019 << ssrc << " is not in use.";
3020 return false;
3021 }
3022 // Send DTMF using out-of-band DTMF. ("true", as 3rd arg)
wu@webrtc.org9dba5252013-08-05 20:36:57 +00003023 if (engine()->voe()->dtmf()->SendTelephoneEvent(
3024 channel, event, true, duration) == -1) {
3025 LOG_RTCERR4(SendTelephoneEvent, channel, event, true, duration);
henrike@webrtc.org28e20752013-07-10 00:45:36 +00003026 return false;
3027 }
3028 }
3029
3030 // Play the event.
3031 if (flags & cricket::DF_PLAY) {
3032 // Play DTMF tone locally.
3033 if (engine()->voe()->dtmf()->PlayDtmfTone(event, duration) == -1) {
3034 LOG_RTCERR2(PlayDtmfTone, event, duration);
3035 return false;
3036 }
3037 }
3038
3039 return true;
3040}
3041
wu@webrtc.orga9890802013-12-13 00:21:03 +00003042void WebRtcVoiceMediaChannel::OnPacketReceived(
3043 talk_base::Buffer* packet, const talk_base::PacketTime& packet_time) {
henrike@webrtc.org28e20752013-07-10 00:45:36 +00003044 // Pick which channel to send this packet to. If this packet doesn't match
3045 // any multiplexed streams, just send it to the default channel. Otherwise,
3046 // send it to the specific decoder instance for that stream.
3047 int which_channel = GetReceiveChannelNum(
3048 ParseSsrc(packet->data(), packet->length(), false));
3049 if (which_channel == -1) {
3050 which_channel = voe_channel();
3051 }
3052
3053 // Stop any ringback that might be playing on the channel.
3054 // It's possible the ringback has already stopped, ih which case we'll just
3055 // use the opportunity to remove the channel from ringback_channels_.
3056 if (engine()->voe()->file()) {
3057 const std::set<int>::iterator it = ringback_channels_.find(which_channel);
3058 if (it != ringback_channels_.end()) {
3059 if (engine()->voe()->file()->IsPlayingFileLocally(
3060 which_channel) == 1) {
3061 engine()->voe()->file()->StopPlayingFileLocally(which_channel);
3062 LOG(LS_INFO) << "Stopped ringback on channel " << which_channel
3063 << " due to incoming media";
3064 }
3065 ringback_channels_.erase(which_channel);
3066 }
3067 }
3068
3069 // Pass it off to the decoder.
henrike@webrtc.org28654cb2013-07-22 21:07:49 +00003070 engine()->voe()->network()->ReceivedRTPPacket(
3071 which_channel,
3072 packet->data(),
3073 static_cast<unsigned int>(packet->length()));
henrike@webrtc.org28e20752013-07-10 00:45:36 +00003074}
3075
wu@webrtc.orga9890802013-12-13 00:21:03 +00003076void WebRtcVoiceMediaChannel::OnRtcpReceived(
3077 talk_base::Buffer* packet, const talk_base::PacketTime& packet_time) {
wu@webrtc.org9dba5252013-08-05 20:36:57 +00003078 // Sending channels need all RTCP packets with feedback information.
3079 // Even sender reports can contain attached report blocks.
3080 // Receiving channels need sender reports in order to create
3081 // correct receiver reports.
3082 int type = 0;
3083 if (!GetRtcpType(packet->data(), packet->length(), &type)) {
3084 LOG(LS_WARNING) << "Failed to parse type from received RTCP packet";
3085 return;
henrike@webrtc.org28e20752013-07-10 00:45:36 +00003086 }
3087
wu@webrtc.org9dba5252013-08-05 20:36:57 +00003088 // If it is a sender report, find the channel that is listening.
3089 bool has_sent_to_default_channel = false;
3090 if (type == kRtcpTypeSR) {
3091 int which_channel = GetReceiveChannelNum(
3092 ParseSsrc(packet->data(), packet->length(), true));
3093 if (which_channel != -1) {
3094 engine()->voe()->network()->ReceivedRTCPPacket(
3095 which_channel,
3096 packet->data(),
3097 static_cast<unsigned int>(packet->length()));
3098
3099 if (IsDefaultChannel(which_channel))
3100 has_sent_to_default_channel = true;
3101 }
3102 }
3103
3104 // SR may continue RR and any RR entry may correspond to any one of the send
3105 // channels. So all RTCP packets must be forwarded all send channels. VoE
3106 // will filter out RR internally.
3107 for (ChannelMap::iterator iter = send_channels_.begin();
3108 iter != send_channels_.end(); ++iter) {
3109 // Make sure not sending the same packet to default channel more than once.
mallinath@webrtc.org67ee6b92014-02-03 16:57:16 +00003110 if (IsDefaultChannel(iter->second->channel()) &&
3111 has_sent_to_default_channel)
wu@webrtc.org9dba5252013-08-05 20:36:57 +00003112 continue;
3113
3114 engine()->voe()->network()->ReceivedRTCPPacket(
mallinath@webrtc.org67ee6b92014-02-03 16:57:16 +00003115 iter->second->channel(),
wu@webrtc.org9dba5252013-08-05 20:36:57 +00003116 packet->data(),
3117 static_cast<unsigned int>(packet->length()));
3118 }
henrike@webrtc.org28e20752013-07-10 00:45:36 +00003119}
3120
3121bool WebRtcVoiceMediaChannel::MuteStream(uint32 ssrc, bool muted) {
wu@webrtc.org9dba5252013-08-05 20:36:57 +00003122 int channel = (ssrc == 0) ? voe_channel() : GetSendChannelNum(ssrc);
3123 if (channel == -1) {
henrike@webrtc.org28e20752013-07-10 00:45:36 +00003124 LOG(LS_WARNING) << "The specified ssrc " << ssrc << " is not in use.";
3125 return false;
3126 }
wu@webrtc.org9dba5252013-08-05 20:36:57 +00003127 if (engine()->voe()->volume()->SetInputMute(channel, muted) == -1) {
3128 LOG_RTCERR2(SetInputMute, channel, muted);
henrike@webrtc.org28e20752013-07-10 00:45:36 +00003129 return false;
3130 }
3131 return true;
3132}
3133
sergeyu@chromium.org4b26e2e2014-01-15 23:15:54 +00003134bool WebRtcVoiceMediaChannel::SetStartSendBandwidth(int bps) {
3135 // TODO(andresp): Add support for setting an independent start bandwidth when
3136 // bandwidth estimation is enabled for voice engine.
3137 return false;
wu@webrtc.org1d1ffc92013-10-16 18:12:02 +00003138}
3139
sergeyu@chromium.org4b26e2e2014-01-15 23:15:54 +00003140bool WebRtcVoiceMediaChannel::SetMaxSendBandwidth(int bps) {
3141 LOG(LS_INFO) << "WebRtcVoiceMediaChanne::SetSendBandwidth.";
3142
3143 return SetSendBandwidthInternal(bps);
3144}
3145
3146bool WebRtcVoiceMediaChannel::SetSendBandwidthInternal(int bps) {
3147 LOG(LS_INFO) << "WebRtcVoiceMediaChannel::SetSendBandwidthInternal.";
3148
3149 send_bw_setting_ = true;
3150 send_bw_bps_ = bps;
wu@webrtc.org1d1ffc92013-10-16 18:12:02 +00003151
henrike@webrtc.org28e20752013-07-10 00:45:36 +00003152 if (!send_codec_) {
wu@webrtc.org1d1ffc92013-10-16 18:12:02 +00003153 LOG(LS_INFO) << "The send codec has not been set up yet. "
3154 << "The send bandwidth setting will be applied later.";
3155 return true;
henrike@webrtc.org28e20752013-07-10 00:45:36 +00003156 }
3157
3158 // Bandwidth is auto by default.
sergeyu@chromium.org4b26e2e2014-01-15 23:15:54 +00003159 // TODO(bemasc): Fix this so that if SetMaxSendBandwidth(50) is followed by
3160 // SetMaxSendBandwith(0), the second call removes the previous limit.
3161 if (bps <= 0)
henrike@webrtc.org28e20752013-07-10 00:45:36 +00003162 return true;
3163
3164 webrtc::CodecInst codec = *send_codec_;
3165 bool is_multi_rate = IsCodecMultiRate(codec);
3166
3167 if (is_multi_rate) {
3168 // If codec is multi-rate then just set the bitrate.
3169 codec.rate = bps;
3170 if (!SetSendCodec(codec)) {
3171 LOG(LS_INFO) << "Failed to set codec " << codec.plname
3172 << " to bitrate " << bps << " bps.";
3173 return false;
3174 }
3175 return true;
3176 } else {
3177 // If codec is not multi-rate and |bps| is less than the fixed bitrate
3178 // then fail. If codec is not multi-rate and |bps| exceeds or equal the
3179 // fixed bitrate then ignore.
3180 if (bps < codec.rate) {
3181 LOG(LS_INFO) << "Failed to set codec " << codec.plname
3182 << " to bitrate " << bps << " bps"
3183 << ", requires at least " << codec.rate << " bps.";
3184 return false;
3185 }
3186 return true;
3187 }
3188}
3189
3190bool WebRtcVoiceMediaChannel::GetStats(VoiceMediaInfo* info) {
wu@webrtc.org9dba5252013-08-05 20:36:57 +00003191 bool echo_metrics_on = false;
3192 // These can take on valid negative values, so use the lowest possible level
3193 // as default rather than -1.
3194 int echo_return_loss = -100;
3195 int echo_return_loss_enhancement = -100;
3196 // These can also be negative, but in practice -1 is only used to signal
3197 // insufficient data, since the resolution is limited to multiples of 4 ms.
3198 int echo_delay_median_ms = -1;
3199 int echo_delay_std_ms = -1;
3200 if (engine()->voe()->processing()->GetEcMetricsStatus(
3201 echo_metrics_on) != -1 && echo_metrics_on) {
3202 // TODO(ajm): we may want to use VoECallReport::GetEchoMetricsSummary
3203 // here, but it appears to be unsuitable currently. Revisit after this is
3204 // investigated: http://b/issue?id=5666755
3205 int erl, erle, rerl, anlp;
3206 if (engine()->voe()->processing()->GetEchoMetrics(
3207 erl, erle, rerl, anlp) != -1) {
3208 echo_return_loss = erl;
3209 echo_return_loss_enhancement = erle;
3210 }
3211
3212 int median, std;
3213 if (engine()->voe()->processing()->GetEcDelayMetrics(median, std) != -1) {
3214 echo_delay_median_ms = median;
3215 echo_delay_std_ms = std;
3216 }
3217 }
3218
henrike@webrtc.org28e20752013-07-10 00:45:36 +00003219 webrtc::CallStatistics cs;
3220 unsigned int ssrc;
3221 webrtc::CodecInst codec;
3222 unsigned int level;
3223
wu@webrtc.org9dba5252013-08-05 20:36:57 +00003224 for (ChannelMap::const_iterator channel_iter = send_channels_.begin();
3225 channel_iter != send_channels_.end(); ++channel_iter) {
mallinath@webrtc.org67ee6b92014-02-03 16:57:16 +00003226 const int channel = channel_iter->second->channel();
henrike@webrtc.org28e20752013-07-10 00:45:36 +00003227
wu@webrtc.org9dba5252013-08-05 20:36:57 +00003228 // Fill in the sender info, based on what we know, and what the
3229 // remote side told us it got from its RTCP report.
3230 VoiceSenderInfo sinfo;
henrike@webrtc.org28e20752013-07-10 00:45:36 +00003231
wu@webrtc.org9dba5252013-08-05 20:36:57 +00003232 if (engine()->voe()->rtp()->GetRTCPStatistics(channel, cs) == -1 ||
3233 engine()->voe()->rtp()->GetLocalSSRC(channel, ssrc) == -1) {
3234 continue;
3235 }
henrike@webrtc.org28e20752013-07-10 00:45:36 +00003236
sergeyu@chromium.org5bc25c42013-12-05 00:24:06 +00003237 sinfo.add_ssrc(ssrc);
wu@webrtc.org9dba5252013-08-05 20:36:57 +00003238 sinfo.codec_name = send_codec_.get() ? send_codec_->plname : "";
3239 sinfo.bytes_sent = cs.bytesSent;
3240 sinfo.packets_sent = cs.packetsSent;
3241 // RTT isn't known until a RTCP report is received. Until then, VoiceEngine
3242 // returns 0 to indicate an error value.
3243 sinfo.rtt_ms = (cs.rttMs > 0) ? cs.rttMs : -1;
3244
3245 // Get data from the last remote RTCP report. Use default values if no data
3246 // available.
3247 sinfo.fraction_lost = -1.0;
3248 sinfo.jitter_ms = -1;
3249 sinfo.packets_lost = -1;
3250 sinfo.ext_seqnum = -1;
3251 std::vector<webrtc::ReportBlock> receive_blocks;
3252 if (engine()->voe()->rtp()->GetRemoteRTCPReportBlocks(
3253 channel, &receive_blocks) != -1 &&
3254 engine()->voe()->codec()->GetSendCodec(channel, codec) != -1) {
3255 std::vector<webrtc::ReportBlock>::iterator iter;
3256 for (iter = receive_blocks.begin(); iter != receive_blocks.end();
3257 ++iter) {
3258 // Lookup report for send ssrc only.
sergeyu@chromium.org5bc25c42013-12-05 00:24:06 +00003259 if (iter->source_SSRC == sinfo.ssrc()) {
wu@webrtc.org9dba5252013-08-05 20:36:57 +00003260 // Convert Q8 to floating point.
3261 sinfo.fraction_lost = static_cast<float>(iter->fraction_lost) / 256;
3262 // Convert samples to milliseconds.
3263 if (codec.plfreq / 1000 > 0) {
3264 sinfo.jitter_ms = iter->interarrival_jitter / (codec.plfreq / 1000);
3265 }
3266 sinfo.packets_lost = iter->cumulative_num_packets_lost;
3267 sinfo.ext_seqnum = iter->extended_highest_sequence_number;
3268 break;
henrike@webrtc.org28e20752013-07-10 00:45:36 +00003269 }
henrike@webrtc.org28e20752013-07-10 00:45:36 +00003270 }
3271 }
wu@webrtc.org9dba5252013-08-05 20:36:57 +00003272
3273 // Local speech level.
3274 sinfo.audio_level = (engine()->voe()->volume()->
3275 GetSpeechInputLevelFullRange(level) != -1) ? level : -1;
3276
3277 // TODO(xians): We are injecting the same APM logging to all the send
3278 // channels here because there is no good way to know which send channel
3279 // is using the APM. The correct fix is to allow the send channels to have
3280 // their own APM so that we can feed the correct APM logging to different
3281 // send channels. See issue crbug/264611 .
3282 sinfo.echo_return_loss = echo_return_loss;
3283 sinfo.echo_return_loss_enhancement = echo_return_loss_enhancement;
3284 sinfo.echo_delay_median_ms = echo_delay_median_ms;
3285 sinfo.echo_delay_std_ms = echo_delay_std_ms;
mallinath@webrtc.orga27be8e2013-09-27 23:04:10 +00003286 // TODO(ajm): Re-enable this metric once we have a reliable implementation.
3287 sinfo.aec_quality_min = -1;
wu@webrtc.org967bfff2013-09-19 05:49:50 +00003288 sinfo.typing_noise_detected = typing_noise_detected_;
wu@webrtc.org9dba5252013-08-05 20:36:57 +00003289
3290 info->senders.push_back(sinfo);
henrike@webrtc.org28e20752013-07-10 00:45:36 +00003291 }
3292
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00003293 // Build the list of receivers, one for each receiving channel, or 1 in
3294 // a 1:1 call.
henrike@webrtc.org28e20752013-07-10 00:45:36 +00003295 std::vector<int> channels;
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00003296 for (ChannelMap::const_iterator it = receive_channels_.begin();
3297 it != receive_channels_.end(); ++it) {
mallinath@webrtc.org67ee6b92014-02-03 16:57:16 +00003298 channels.push_back(it->second->channel());
henrike@webrtc.org28e20752013-07-10 00:45:36 +00003299 }
3300 if (channels.empty()) {
3301 channels.push_back(voe_channel());
3302 }
3303
3304 // Get the SSRC and stats for each receiver, based on our own calculations.
3305 for (std::vector<int>::const_iterator it = channels.begin();
3306 it != channels.end(); ++it) {
3307 memset(&cs, 0, sizeof(cs));
3308 if (engine()->voe()->rtp()->GetRemoteSSRC(*it, ssrc) != -1 &&
3309 engine()->voe()->rtp()->GetRTCPStatistics(*it, cs) != -1 &&
3310 engine()->voe()->codec()->GetRecCodec(*it, codec) != -1) {
3311 VoiceReceiverInfo rinfo;
sergeyu@chromium.org5bc25c42013-12-05 00:24:06 +00003312 rinfo.add_ssrc(ssrc);
henrike@webrtc.org28e20752013-07-10 00:45:36 +00003313 rinfo.bytes_rcvd = cs.bytesReceived;
3314 rinfo.packets_rcvd = cs.packetsReceived;
3315 // The next four fields are from the most recently sent RTCP report.
3316 // Convert Q8 to floating point.
3317 rinfo.fraction_lost = static_cast<float>(cs.fractionLost) / (1 << 8);
3318 rinfo.packets_lost = cs.cumulativeLost;
3319 rinfo.ext_seqnum = cs.extendedMax;
buildbot@webrtc.orgb525a9d2014-06-03 09:42:15 +00003320#ifdef USE_WEBRTC_DEV_BRANCH
3321 rinfo.capture_start_ntp_time_ms = cs.capture_start_ntp_time_ms_;
3322#endif
buildbot@webrtc.org7e71b772014-06-13 01:14:01 +00003323 if (codec.pltype != -1) {
3324 rinfo.codec_name = codec.plname;
3325 }
henrike@webrtc.org28e20752013-07-10 00:45:36 +00003326 // Convert samples to milliseconds.
3327 if (codec.plfreq / 1000 > 0) {
3328 rinfo.jitter_ms = cs.jitterSamples / (codec.plfreq / 1000);
3329 }
3330
3331 // Get jitter buffer and total delay (alg + jitter + playout) stats.
3332 webrtc::NetworkStatistics ns;
3333 if (engine()->voe()->neteq() &&
3334 engine()->voe()->neteq()->GetNetworkStatistics(
3335 *it, ns) != -1) {
3336 rinfo.jitter_buffer_ms = ns.currentBufferSize;
3337 rinfo.jitter_buffer_preferred_ms = ns.preferredBufferSize;
3338 rinfo.expand_rate =
henrike@webrtc.org28654cb2013-07-22 21:07:49 +00003339 static_cast<float>(ns.currentExpandRate) / (1 << 14);
henrike@webrtc.org28e20752013-07-10 00:45:36 +00003340 }
henrike@webrtc.orgb8c254a2014-02-14 23:38:45 +00003341
3342 webrtc::AudioDecodingCallStats ds;
3343 if (engine()->voe()->neteq() &&
3344 engine()->voe()->neteq()->GetDecodingCallStatistics(
3345 *it, &ds) != -1) {
3346 rinfo.decoding_calls_to_silence_generator =
3347 ds.calls_to_silence_generator;
3348 rinfo.decoding_calls_to_neteq = ds.calls_to_neteq;
3349 rinfo.decoding_normal = ds.decoded_normal;
3350 rinfo.decoding_plc = ds.decoded_plc;
3351 rinfo.decoding_cng = ds.decoded_cng;
3352 rinfo.decoding_plc_cng = ds.decoded_plc_cng;
3353 }
3354
henrike@webrtc.org28e20752013-07-10 00:45:36 +00003355 if (engine()->voe()->sync()) {
sergeyu@chromium.orga23f0ca2013-11-13 22:48:52 +00003356 int jitter_buffer_delay_ms = 0;
henrike@webrtc.org28e20752013-07-10 00:45:36 +00003357 int playout_buffer_delay_ms = 0;
3358 engine()->voe()->sync()->GetDelayEstimate(
sergeyu@chromium.orga23f0ca2013-11-13 22:48:52 +00003359 *it, &jitter_buffer_delay_ms, &playout_buffer_delay_ms);
3360 rinfo.delay_estimate_ms = jitter_buffer_delay_ms +
3361 playout_buffer_delay_ms;
henrike@webrtc.org28e20752013-07-10 00:45:36 +00003362 }
3363
3364 // Get speech level.
3365 rinfo.audio_level = (engine()->voe()->volume()->
3366 GetSpeechOutputLevelFullRange(*it, level) != -1) ? level : -1;
3367 info->receivers.push_back(rinfo);
3368 }
3369 }
3370
3371 return true;
3372}
3373
3374void WebRtcVoiceMediaChannel::GetLastMediaError(
3375 uint32* ssrc, VoiceMediaChannel::Error* error) {
3376 ASSERT(ssrc != NULL);
3377 ASSERT(error != NULL);
3378 FindSsrc(voe_channel(), ssrc);
3379 *error = WebRtcErrorToChannelError(GetLastEngineError());
3380}
3381
3382bool WebRtcVoiceMediaChannel::FindSsrc(int channel_num, uint32* ssrc) {
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00003383 talk_base::CritScope lock(&receive_channels_cs_);
henrike@webrtc.org28e20752013-07-10 00:45:36 +00003384 ASSERT(ssrc != NULL);
wu@webrtc.org9dba5252013-08-05 20:36:57 +00003385 if (channel_num == -1 && send_ != SEND_NOTHING) {
henrike@webrtc.org28e20752013-07-10 00:45:36 +00003386 // Sometimes the VoiceEngine core will throw error with channel_num = -1.
3387 // This means the error is not limited to a specific channel. Signal the
3388 // message using ssrc=0. If the current channel is sending, use this
3389 // channel for sending the message.
3390 *ssrc = 0;
3391 return true;
3392 } else {
wu@webrtc.org9dba5252013-08-05 20:36:57 +00003393 // Check whether this is a sending channel.
3394 for (ChannelMap::const_iterator it = send_channels_.begin();
3395 it != send_channels_.end(); ++it) {
mallinath@webrtc.org67ee6b92014-02-03 16:57:16 +00003396 if (it->second->channel() == channel_num) {
wu@webrtc.org9dba5252013-08-05 20:36:57 +00003397 // This is a sending channel.
3398 uint32 local_ssrc = 0;
3399 if (engine()->voe()->rtp()->GetLocalSSRC(
3400 channel_num, local_ssrc) != -1) {
3401 *ssrc = local_ssrc;
3402 }
3403 return true;
3404 }
3405 }
3406
henrike@webrtc.org28e20752013-07-10 00:45:36 +00003407 // Check whether this is a receiving channel.
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00003408 for (ChannelMap::const_iterator it = receive_channels_.begin();
3409 it != receive_channels_.end(); ++it) {
mallinath@webrtc.org67ee6b92014-02-03 16:57:16 +00003410 if (it->second->channel() == channel_num) {
henrike@webrtc.org28e20752013-07-10 00:45:36 +00003411 *ssrc = it->first;
3412 return true;
3413 }
3414 }
3415 }
3416 return false;
3417}
3418
3419void WebRtcVoiceMediaChannel::OnError(uint32 ssrc, int error) {
wu@webrtc.org967bfff2013-09-19 05:49:50 +00003420 if (error == VE_TYPING_NOISE_WARNING) {
3421 typing_noise_detected_ = true;
3422 } else if (error == VE_TYPING_NOISE_OFF_WARNING) {
3423 typing_noise_detected_ = false;
3424 }
henrike@webrtc.org28e20752013-07-10 00:45:36 +00003425 SignalMediaError(ssrc, WebRtcErrorToChannelError(error));
3426}
3427
3428int WebRtcVoiceMediaChannel::GetOutputLevel(int channel) {
3429 unsigned int ulevel;
3430 int ret =
3431 engine()->voe()->volume()->GetSpeechOutputLevel(channel, ulevel);
3432 return (ret == 0) ? static_cast<int>(ulevel) : -1;
3433}
3434
3435int WebRtcVoiceMediaChannel::GetReceiveChannelNum(uint32 ssrc) {
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00003436 ChannelMap::iterator it = receive_channels_.find(ssrc);
3437 if (it != receive_channels_.end())
mallinath@webrtc.org67ee6b92014-02-03 16:57:16 +00003438 return it->second->channel();
henrike@webrtc.org28e20752013-07-10 00:45:36 +00003439 return (ssrc == default_receive_ssrc_) ? voe_channel() : -1;
3440}
3441
3442int WebRtcVoiceMediaChannel::GetSendChannelNum(uint32 ssrc) {
wu@webrtc.org9dba5252013-08-05 20:36:57 +00003443 ChannelMap::iterator it = send_channels_.find(ssrc);
3444 if (it != send_channels_.end())
mallinath@webrtc.org67ee6b92014-02-03 16:57:16 +00003445 return it->second->channel();
wu@webrtc.org9dba5252013-08-05 20:36:57 +00003446
3447 return -1;
henrike@webrtc.org28e20752013-07-10 00:45:36 +00003448}
3449
3450bool WebRtcVoiceMediaChannel::GetRedSendCodec(const AudioCodec& red_codec,
3451 const std::vector<AudioCodec>& all_codecs, webrtc::CodecInst* send_codec) {
3452 // Get the RED encodings from the parameter with no name. This may
3453 // change based on what is discussed on the Jingle list.
3454 // The encoding parameter is of the form "a/b"; we only support where
3455 // a == b. Verify this and parse out the value into red_pt.
3456 // If the parameter value is absent (as it will be until we wire up the
3457 // signaling of this message), use the second codec specified (i.e. the
3458 // one after "red") as the encoding parameter.
3459 int red_pt = -1;
3460 std::string red_params;
3461 CodecParameterMap::const_iterator it = red_codec.params.find("");
3462 if (it != red_codec.params.end()) {
3463 red_params = it->second;
3464 std::vector<std::string> red_pts;
3465 if (talk_base::split(red_params, '/', &red_pts) != 2 ||
3466 red_pts[0] != red_pts[1] ||
3467 !talk_base::FromString(red_pts[0], &red_pt)) {
3468 LOG(LS_WARNING) << "RED params " << red_params << " not supported.";
3469 return false;
3470 }
3471 } else if (red_codec.params.empty()) {
3472 LOG(LS_WARNING) << "RED params not present, using defaults";
3473 if (all_codecs.size() > 1) {
3474 red_pt = all_codecs[1].id;
3475 }
3476 }
3477
3478 // Try to find red_pt in |codecs|.
3479 std::vector<AudioCodec>::const_iterator codec;
3480 for (codec = all_codecs.begin(); codec != all_codecs.end(); ++codec) {
3481 if (codec->id == red_pt)
3482 break;
3483 }
3484
3485 // If we find the right codec, that will be the codec we pass to
3486 // SetSendCodec, with the desired payload type.
3487 if (codec != all_codecs.end() &&
3488 engine()->FindWebRtcCodec(*codec, send_codec)) {
3489 } else {
3490 LOG(LS_WARNING) << "RED params " << red_params << " are invalid.";
3491 return false;
3492 }
3493
3494 return true;
3495}
3496
3497bool WebRtcVoiceMediaChannel::EnableRtcp(int channel) {
3498 if (engine()->voe()->rtp()->SetRTCPStatus(channel, true) == -1) {
wu@webrtc.org9dba5252013-08-05 20:36:57 +00003499 LOG_RTCERR2(SetRTCPStatus, channel, 1);
henrike@webrtc.org28e20752013-07-10 00:45:36 +00003500 return false;
3501 }
3502 // TODO(juberti): Enable VQMon and RTCP XR reports, once we know what
3503 // what we want to do with them.
3504 // engine()->voe().EnableVQMon(voe_channel(), true);
3505 // engine()->voe().EnableRTCP_XR(voe_channel(), true);
3506 return true;
3507}
3508
3509bool WebRtcVoiceMediaChannel::ResetRecvCodecs(int channel) {
3510 int ncodecs = engine()->voe()->codec()->NumOfCodecs();
3511 for (int i = 0; i < ncodecs; ++i) {
3512 webrtc::CodecInst voe_codec;
3513 if (engine()->voe()->codec()->GetCodec(i, voe_codec) != -1) {
3514 voe_codec.pltype = -1;
3515 if (engine()->voe()->codec()->SetRecPayloadType(
3516 channel, voe_codec) == -1) {
3517 LOG_RTCERR2(SetRecPayloadType, channel, ToString(voe_codec));
3518 return false;
3519 }
3520 }
3521 }
3522 return true;
3523}
3524
3525bool WebRtcVoiceMediaChannel::SetPlayout(int channel, bool playout) {
3526 if (playout) {
3527 LOG(LS_INFO) << "Starting playout for channel #" << channel;
3528 if (engine()->voe()->base()->StartPlayout(channel) == -1) {
3529 LOG_RTCERR1(StartPlayout, channel);
3530 return false;
3531 }
3532 } else {
3533 LOG(LS_INFO) << "Stopping playout for channel #" << channel;
3534 engine()->voe()->base()->StopPlayout(channel);
3535 }
3536 return true;
3537}
3538
3539uint32 WebRtcVoiceMediaChannel::ParseSsrc(const void* data, size_t len,
3540 bool rtcp) {
3541 size_t ssrc_pos = (!rtcp) ? 8 : 4;
3542 uint32 ssrc = 0;
3543 if (len >= (ssrc_pos + sizeof(ssrc))) {
3544 ssrc = talk_base::GetBE32(static_cast<const char*>(data) + ssrc_pos);
3545 }
3546 return ssrc;
3547}
3548
3549// Convert VoiceEngine error code into VoiceMediaChannel::Error enum.
3550VoiceMediaChannel::Error
3551 WebRtcVoiceMediaChannel::WebRtcErrorToChannelError(int err_code) {
3552 switch (err_code) {
3553 case 0:
3554 return ERROR_NONE;
3555 case VE_CANNOT_START_RECORDING:
3556 case VE_MIC_VOL_ERROR:
3557 case VE_GET_MIC_VOL_ERROR:
3558 case VE_CANNOT_ACCESS_MIC_VOL:
3559 return ERROR_REC_DEVICE_OPEN_FAILED;
3560 case VE_SATURATION_WARNING:
3561 return ERROR_REC_DEVICE_SATURATION;
3562 case VE_REC_DEVICE_REMOVED:
3563 return ERROR_REC_DEVICE_REMOVED;
3564 case VE_RUNTIME_REC_WARNING:
3565 case VE_RUNTIME_REC_ERROR:
3566 return ERROR_REC_RUNTIME_ERROR;
3567 case VE_CANNOT_START_PLAYOUT:
3568 case VE_SPEAKER_VOL_ERROR:
3569 case VE_GET_SPEAKER_VOL_ERROR:
3570 case VE_CANNOT_ACCESS_SPEAKER_VOL:
3571 return ERROR_PLAY_DEVICE_OPEN_FAILED;
3572 case VE_RUNTIME_PLAY_WARNING:
3573 case VE_RUNTIME_PLAY_ERROR:
3574 return ERROR_PLAY_RUNTIME_ERROR;
3575 case VE_TYPING_NOISE_WARNING:
3576 return ERROR_REC_TYPING_NOISE_DETECTED;
3577 default:
3578 return VoiceMediaChannel::ERROR_OTHER;
3579 }
3580}
3581
henrike@webrtc.org79047f92014-03-06 23:46:59 +00003582bool WebRtcVoiceMediaChannel::SetHeaderExtension(ExtensionSetterFunction setter,
3583 int channel_id, const RtpHeaderExtension* extension) {
3584 bool enable = false;
buildbot@webrtc.org150835e2014-05-06 15:54:38 +00003585 int id = 0;
3586 std::string uri;
henrike@webrtc.org79047f92014-03-06 23:46:59 +00003587 if (extension) {
3588 enable = true;
3589 id = extension->id;
buildbot@webrtc.org150835e2014-05-06 15:54:38 +00003590 uri = extension->uri;
henrike@webrtc.org79047f92014-03-06 23:46:59 +00003591 }
3592 if ((engine()->voe()->rtp()->*setter)(channel_id, enable, id) != 0) {
buildbot@webrtc.org150835e2014-05-06 15:54:38 +00003593 LOG_RTCERR4(*setter, uri, channel_id, enable, id);
henrike@webrtc.org79047f92014-03-06 23:46:59 +00003594 return false;
3595 }
3596 return true;
3597}
3598
henrike@webrtc.org28e20752013-07-10 00:45:36 +00003599int WebRtcSoundclipStream::Read(void *buf, int len) {
3600 size_t res = 0;
3601 mem_.Read(buf, len, &res, NULL);
henrike@webrtc.org28654cb2013-07-22 21:07:49 +00003602 return static_cast<int>(res);
henrike@webrtc.org28e20752013-07-10 00:45:36 +00003603}
3604
3605int WebRtcSoundclipStream::Rewind() {
3606 mem_.Rewind();
3607 // Return -1 to keep VoiceEngine from looping.
3608 return (loop_) ? 0 : -1;
3609}
3610
3611} // namespace cricket
3612
3613#endif // HAVE_WEBRTC_VOICE