blob: 121dd46247a3458efe989e27c9eb9bb37e533588 [file] [log] [blame]
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001/*
2 * libjingle
3 * Copyright 2004 Google Inc.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 * 3. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
17 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
18 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
19 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
20 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
22 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
23 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
24 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
25 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28#ifdef HAVE_CONFIG_H
29#include <config.h>
30#endif
31
32#ifdef HAVE_WEBRTC_VOICE
33
34#include "talk/media/webrtc/webrtcvoiceengine.h"
35
36#include <algorithm>
37#include <cstdio>
38#include <string>
39#include <vector>
40
41#include "talk/base/base64.h"
42#include "talk/base/byteorder.h"
43#include "talk/base/common.h"
44#include "talk/base/helpers.h"
45#include "talk/base/logging.h"
46#include "talk/base/stringencode.h"
47#include "talk/base/stringutils.h"
48#include "talk/media/base/audiorenderer.h"
49#include "talk/media/base/constants.h"
50#include "talk/media/base/streamparams.h"
51#include "talk/media/base/voiceprocessor.h"
52#include "talk/media/webrtc/webrtcvoe.h"
mallinath@webrtc.orga27be8e2013-09-27 23:04:10 +000053#include "webrtc/common.h"
henrike@webrtc.org28e20752013-07-10 00:45:36 +000054#include "webrtc/modules/audio_processing/include/audio_processing.h"
55
56#ifdef WIN32
57#include <objbase.h> // NOLINT
58#endif
59
60namespace cricket {
61
62struct CodecPref {
63 const char* name;
64 int clockrate;
65 int channels;
66 int payload_type;
67 bool is_multi_rate;
68};
69
70static const CodecPref kCodecPrefs[] = {
71 { "OPUS", 48000, 2, 111, true },
72 { "ISAC", 16000, 1, 103, true },
73 { "ISAC", 32000, 1, 104, true },
74 { "CELT", 32000, 1, 109, true },
75 { "CELT", 32000, 2, 110, true },
76 { "G722", 16000, 1, 9, false },
77 { "ILBC", 8000, 1, 102, false },
78 { "PCMU", 8000, 1, 0, false },
79 { "PCMA", 8000, 1, 8, false },
80 { "CN", 48000, 1, 107, false },
81 { "CN", 32000, 1, 106, false },
82 { "CN", 16000, 1, 105, false },
83 { "CN", 8000, 1, 13, false },
84 { "red", 8000, 1, 127, false },
85 { "telephone-event", 8000, 1, 126, false },
86};
87
88// For Linux/Mac, using the default device is done by specifying index 0 for
89// VoE 4.0 and not -1 (which was the case for VoE 3.5).
90//
91// On Windows Vista and newer, Microsoft introduced the concept of "Default
92// Communications Device". This means that there are two types of default
93// devices (old Wave Audio style default and Default Communications Device).
94//
95// On Windows systems which only support Wave Audio style default, uses either
96// -1 or 0 to select the default device.
97//
98// On Windows systems which support both "Default Communication Device" and
99// old Wave Audio style default, use -1 for Default Communications Device and
100// -2 for Wave Audio style default, which is what we want to use for clips.
101// It's not clear yet whether the -2 index is handled properly on other OSes.
102
103#ifdef WIN32
104static const int kDefaultAudioDeviceId = -1;
105static const int kDefaultSoundclipDeviceId = -2;
106#else
107static const int kDefaultAudioDeviceId = 0;
108#endif
109
110// extension header for audio levels, as defined in
111// http://tools.ietf.org/html/draft-ietf-avtext-client-to-mixer-audio-level-03
112static const char kRtpAudioLevelHeaderExtension[] =
113 "urn:ietf:params:rtp-hdrext:ssrc-audio-level";
114static const int kRtpAudioLevelHeaderExtensionId = 1;
115
116static const char kIsacCodecName[] = "ISAC";
117static const char kL16CodecName[] = "L16";
118// Codec parameters for Opus.
119static const int kOpusMonoBitrate = 32000;
120// Parameter used for NACK.
121// This value is equivalent to 5 seconds of audio data at 20 ms per packet.
122static const int kNackMaxPackets = 250;
123static const int kOpusStereoBitrate = 64000;
henrike@webrtc.org1e09a712013-07-26 19:17:59 +0000124// draft-spittka-payload-rtp-opus-03
125// Opus bitrate should be in the range between 6000 and 510000.
126static const int kOpusMinBitrate = 6000;
127static const int kOpusMaxBitrate = 510000;
128
sergeyu@chromium.orga59696b2013-09-13 23:48:58 +0000129// Ensure we open the file in a writeable path on ChromeOS and Android. This
130// workaround can be removed when it's possible to specify a filename for audio
131// option based AEC dumps.
henrike@webrtc.org1e09a712013-07-26 19:17:59 +0000132//
133// TODO(grunell): Use a string in the options instead of hardcoding it here
134// and let the embedder choose the filename (crbug.com/264223).
135//
sergeyu@chromium.orga59696b2013-09-13 23:48:58 +0000136// NOTE(ajm): Don't use hardcoded paths on platforms not explicitly specified
137// below.
138#if defined(CHROMEOS)
henrike@webrtc.org1e09a712013-07-26 19:17:59 +0000139static const char kAecDumpByAudioOptionFilename[] = "/tmp/audio.aecdump";
sergeyu@chromium.orga59696b2013-09-13 23:48:58 +0000140#elif defined(ANDROID)
141static const char kAecDumpByAudioOptionFilename[] = "/sdcard/audio.aecdump";
henrike@webrtc.org1e09a712013-07-26 19:17:59 +0000142#else
143static const char kAecDumpByAudioOptionFilename[] = "audio.aecdump";
144#endif
henrike@webrtc.org28e20752013-07-10 00:45:36 +0000145
146// Dumps an AudioCodec in RFC 2327-ish format.
147static std::string ToString(const AudioCodec& codec) {
148 std::stringstream ss;
149 ss << codec.name << "/" << codec.clockrate << "/" << codec.channels
150 << " (" << codec.id << ")";
151 return ss.str();
152}
153static std::string ToString(const webrtc::CodecInst& codec) {
154 std::stringstream ss;
155 ss << codec.plname << "/" << codec.plfreq << "/" << codec.channels
156 << " (" << codec.pltype << ")";
157 return ss.str();
158}
159
160static void LogMultiline(talk_base::LoggingSeverity sev, char* text) {
161 const char* delim = "\r\n";
162 for (char* tok = strtok(text, delim); tok; tok = strtok(NULL, delim)) {
163 LOG_V(sev) << tok;
164 }
165}
166
167// Severity is an integer because it comes is assumed to be from command line.
168static int SeverityToFilter(int severity) {
169 int filter = webrtc::kTraceNone;
170 switch (severity) {
171 case talk_base::LS_VERBOSE:
172 filter |= webrtc::kTraceAll;
173 case talk_base::LS_INFO:
174 filter |= (webrtc::kTraceStateInfo | webrtc::kTraceInfo);
175 case talk_base::LS_WARNING:
176 filter |= (webrtc::kTraceTerseInfo | webrtc::kTraceWarning);
177 case talk_base::LS_ERROR:
178 filter |= (webrtc::kTraceError | webrtc::kTraceCritical);
179 }
180 return filter;
181}
182
183static bool IsCodecMultiRate(const webrtc::CodecInst& codec) {
184 for (size_t i = 0; i < ARRAY_SIZE(kCodecPrefs); ++i) {
185 if (_stricmp(kCodecPrefs[i].name, codec.plname) == 0 &&
186 kCodecPrefs[i].clockrate == codec.plfreq) {
187 return kCodecPrefs[i].is_multi_rate;
188 }
189 }
190 return false;
191}
192
193static bool FindCodec(const std::vector<AudioCodec>& codecs,
194 const AudioCodec& codec,
195 AudioCodec* found_codec) {
196 for (std::vector<AudioCodec>::const_iterator it = codecs.begin();
197 it != codecs.end(); ++it) {
198 if (it->Matches(codec)) {
199 if (found_codec != NULL) {
200 *found_codec = *it;
201 }
202 return true;
203 }
204 }
205 return false;
206}
wu@webrtc.org1d1ffc92013-10-16 18:12:02 +0000207
henrike@webrtc.org28e20752013-07-10 00:45:36 +0000208static bool IsNackEnabled(const AudioCodec& codec) {
209 return codec.HasFeedbackParam(FeedbackParam(kRtcpFbParamNack,
210 kParamValueEmpty));
211}
212
wu@webrtc.org1d1ffc92013-10-16 18:12:02 +0000213// Gets the default set of options applied to the engine. Historically, these
214// were supplied as a combination of flags from the channel manager (ec, agc,
215// ns, and highpass) and the rest hardcoded in InitInternal.
216static AudioOptions GetDefaultEngineOptions() {
217 AudioOptions options;
218 options.echo_cancellation.Set(true);
219 options.auto_gain_control.Set(true);
220 options.noise_suppression.Set(true);
221 options.highpass_filter.Set(true);
222 options.stereo_swapping.Set(false);
223 options.typing_detection.Set(true);
224 options.conference_mode.Set(false);
225 options.adjust_agc_delta.Set(0);
226 options.experimental_agc.Set(false);
227 options.experimental_aec.Set(false);
228 options.aec_dump.Set(false);
229 return options;
230}
henrike@webrtc.org28e20752013-07-10 00:45:36 +0000231
232class WebRtcSoundclipMedia : public SoundclipMedia {
233 public:
234 explicit WebRtcSoundclipMedia(WebRtcVoiceEngine *engine)
235 : engine_(engine), webrtc_channel_(-1) {
236 engine_->RegisterSoundclip(this);
237 }
238
239 virtual ~WebRtcSoundclipMedia() {
240 engine_->UnregisterSoundclip(this);
241 if (webrtc_channel_ != -1) {
242 // We shouldn't have to call Disable() here. DeleteChannel() should call
243 // StopPlayout() while deleting the channel. We should fix the bug
244 // inside WebRTC and remove the Disable() call bellow. This work is
245 // tracked by bug http://b/issue?id=5382855.
246 PlaySound(NULL, 0, 0);
247 Disable();
248 if (engine_->voe_sc()->base()->DeleteChannel(webrtc_channel_)
249 == -1) {
250 LOG_RTCERR1(DeleteChannel, webrtc_channel_);
251 }
252 }
253 }
254
255 bool Init() {
wu@webrtc.org4551b792013-10-09 15:37:36 +0000256 if (!engine_->voe_sc()) {
257 return false;
258 }
henrike@webrtc.org28e20752013-07-10 00:45:36 +0000259 webrtc_channel_ = engine_->voe_sc()->base()->CreateChannel();
260 if (webrtc_channel_ == -1) {
261 LOG_RTCERR0(CreateChannel);
262 return false;
263 }
264 return true;
265 }
266
267 bool Enable() {
268 if (engine_->voe_sc()->base()->StartPlayout(webrtc_channel_) == -1) {
269 LOG_RTCERR1(StartPlayout, webrtc_channel_);
270 return false;
271 }
272 return true;
273 }
274
275 bool Disable() {
276 if (engine_->voe_sc()->base()->StopPlayout(webrtc_channel_) == -1) {
277 LOG_RTCERR1(StopPlayout, webrtc_channel_);
278 return false;
279 }
280 return true;
281 }
282
283 virtual bool PlaySound(const char *buf, int len, int flags) {
284 // The voe file api is not available in chrome.
285 if (!engine_->voe_sc()->file()) {
286 return false;
287 }
288 // Must stop playing the current sound (if any), because we are about to
289 // modify the stream.
290 if (engine_->voe_sc()->file()->StopPlayingFileLocally(webrtc_channel_)
291 == -1) {
292 LOG_RTCERR1(StopPlayingFileLocally, webrtc_channel_);
293 return false;
294 }
295
296 if (buf) {
297 stream_.reset(new WebRtcSoundclipStream(buf, len));
298 stream_->set_loop((flags & SF_LOOP) != 0);
299 stream_->Rewind();
300
301 // Play it.
302 if (engine_->voe_sc()->file()->StartPlayingFileLocally(
303 webrtc_channel_, stream_.get()) == -1) {
304 LOG_RTCERR2(StartPlayingFileLocally, webrtc_channel_, stream_.get());
305 LOG(LS_ERROR) << "Unable to start soundclip";
306 return false;
307 }
308 } else {
309 stream_.reset();
310 }
311 return true;
312 }
313
314 int GetLastEngineError() const { return engine_->voe_sc()->error(); }
315
316 private:
317 WebRtcVoiceEngine *engine_;
318 int webrtc_channel_;
319 talk_base::scoped_ptr<WebRtcSoundclipStream> stream_;
320};
321
322WebRtcVoiceEngine::WebRtcVoiceEngine()
323 : voe_wrapper_(new VoEWrapper()),
324 voe_wrapper_sc_(new VoEWrapper()),
wu@webrtc.org4551b792013-10-09 15:37:36 +0000325 voe_wrapper_sc_initialized_(false),
henrike@webrtc.org28e20752013-07-10 00:45:36 +0000326 tracing_(new VoETraceWrapper()),
327 adm_(NULL),
328 adm_sc_(NULL),
329 log_filter_(SeverityToFilter(kDefaultLogSeverity)),
330 is_dumping_aec_(false),
331 desired_local_monitor_enable_(false),
332 tx_processor_ssrc_(0),
333 rx_processor_ssrc_(0) {
334 Construct();
335}
336
337WebRtcVoiceEngine::WebRtcVoiceEngine(VoEWrapper* voe_wrapper,
338 VoEWrapper* voe_wrapper_sc,
339 VoETraceWrapper* tracing)
340 : voe_wrapper_(voe_wrapper),
341 voe_wrapper_sc_(voe_wrapper_sc),
wu@webrtc.org4551b792013-10-09 15:37:36 +0000342 voe_wrapper_sc_initialized_(false),
henrike@webrtc.org28e20752013-07-10 00:45:36 +0000343 tracing_(tracing),
344 adm_(NULL),
345 adm_sc_(NULL),
346 log_filter_(SeverityToFilter(kDefaultLogSeverity)),
347 is_dumping_aec_(false),
348 desired_local_monitor_enable_(false),
349 tx_processor_ssrc_(0),
350 rx_processor_ssrc_(0) {
351 Construct();
352}
353
354void WebRtcVoiceEngine::Construct() {
355 SetTraceFilter(log_filter_);
356 initialized_ = false;
357 LOG(LS_VERBOSE) << "WebRtcVoiceEngine::WebRtcVoiceEngine";
358 SetTraceOptions("");
359 if (tracing_->SetTraceCallback(this) == -1) {
360 LOG_RTCERR0(SetTraceCallback);
361 }
362 if (voe_wrapper_->base()->RegisterVoiceEngineObserver(*this) == -1) {
363 LOG_RTCERR0(RegisterVoiceEngineObserver);
364 }
365 // Clear the default agc state.
366 memset(&default_agc_config_, 0, sizeof(default_agc_config_));
367
368 // Load our audio codec list.
369 ConstructCodecs();
370
371 // Load our RTP Header extensions.
372 rtp_header_extensions_.push_back(
373 RtpHeaderExtension(kRtpAudioLevelHeaderExtension,
374 kRtpAudioLevelHeaderExtensionId));
wu@webrtc.org1d1ffc92013-10-16 18:12:02 +0000375 options_ = GetDefaultEngineOptions();
henrike@webrtc.org28e20752013-07-10 00:45:36 +0000376}
377
378static bool IsOpus(const AudioCodec& codec) {
379 return (_stricmp(codec.name.c_str(), kOpusCodecName) == 0);
380}
381
382static bool IsIsac(const AudioCodec& codec) {
383 return (_stricmp(codec.name.c_str(), kIsacCodecName) == 0);
384}
385
386// True if params["stereo"] == "1"
387static bool IsOpusStereoEnabled(const AudioCodec& codec) {
388 CodecParameterMap::const_iterator param =
389 codec.params.find(kCodecParamStereo);
390 if (param == codec.params.end()) {
391 return false;
392 }
393 return param->second == kParamValueTrue;
394}
395
henrike@webrtc.org1e09a712013-07-26 19:17:59 +0000396static bool IsValidOpusBitrate(int bitrate) {
397 return (bitrate >= kOpusMinBitrate && bitrate <= kOpusMaxBitrate);
398}
399
400// Returns 0 if params[kCodecParamMaxAverageBitrate] is not defined or invalid.
401// Returns the value of params[kCodecParamMaxAverageBitrate] otherwise.
402static int GetOpusBitrateFromParams(const AudioCodec& codec) {
403 int bitrate = 0;
404 if (!codec.GetParam(kCodecParamMaxAverageBitrate, &bitrate)) {
405 return 0;
406 }
407 if (!IsValidOpusBitrate(bitrate)) {
408 LOG(LS_WARNING) << "Codec parameter \"maxaveragebitrate\" has an "
409 << "invalid value: " << bitrate;
410 return 0;
411 }
412 return bitrate;
413}
414
henrike@webrtc.org28e20752013-07-10 00:45:36 +0000415void WebRtcVoiceEngine::ConstructCodecs() {
416 LOG(LS_INFO) << "WebRtc VoiceEngine codecs:";
417 int ncodecs = voe_wrapper_->codec()->NumOfCodecs();
418 for (int i = 0; i < ncodecs; ++i) {
419 webrtc::CodecInst voe_codec;
420 if (voe_wrapper_->codec()->GetCodec(i, voe_codec) != -1) {
421 // Skip uncompressed formats.
422 if (_stricmp(voe_codec.plname, kL16CodecName) == 0) {
423 continue;
424 }
425
426 const CodecPref* pref = NULL;
427 for (size_t j = 0; j < ARRAY_SIZE(kCodecPrefs); ++j) {
428 if (_stricmp(kCodecPrefs[j].name, voe_codec.plname) == 0 &&
429 kCodecPrefs[j].clockrate == voe_codec.plfreq &&
430 kCodecPrefs[j].channels == voe_codec.channels) {
431 pref = &kCodecPrefs[j];
432 break;
433 }
434 }
435
436 if (pref) {
437 // Use the payload type that we've configured in our pref table;
438 // use the offset in our pref table to determine the sort order.
439 AudioCodec codec(pref->payload_type, voe_codec.plname, voe_codec.plfreq,
440 voe_codec.rate, voe_codec.channels,
441 ARRAY_SIZE(kCodecPrefs) - (pref - kCodecPrefs));
442 LOG(LS_INFO) << ToString(codec);
443 if (IsIsac(codec)) {
444 // Indicate auto-bandwidth in signaling.
445 codec.bitrate = 0;
446 }
447 if (IsOpus(codec)) {
448 // Only add fmtp parameters that differ from the spec.
449 if (kPreferredMinPTime != kOpusDefaultMinPTime) {
450 codec.params[kCodecParamMinPTime] =
451 talk_base::ToString(kPreferredMinPTime);
452 }
453 if (kPreferredMaxPTime != kOpusDefaultMaxPTime) {
454 codec.params[kCodecParamMaxPTime] =
455 talk_base::ToString(kPreferredMaxPTime);
456 }
457 // TODO(hellner): Add ptime, sprop-stereo, stereo and useinbandfec
458 // when they can be set to values other than the default.
459 }
460 codecs_.push_back(codec);
461 } else {
462 LOG(LS_WARNING) << "Unexpected codec: " << ToString(voe_codec);
463 }
464 }
465 }
466 // Make sure they are in local preference order.
467 std::sort(codecs_.begin(), codecs_.end(), &AudioCodec::Preferable);
468}
469
470WebRtcVoiceEngine::~WebRtcVoiceEngine() {
471 LOG(LS_VERBOSE) << "WebRtcVoiceEngine::~WebRtcVoiceEngine";
472 if (voe_wrapper_->base()->DeRegisterVoiceEngineObserver() == -1) {
473 LOG_RTCERR0(DeRegisterVoiceEngineObserver);
474 }
475 if (adm_) {
476 voe_wrapper_.reset();
477 adm_->Release();
478 adm_ = NULL;
479 }
480 if (adm_sc_) {
481 voe_wrapper_sc_.reset();
482 adm_sc_->Release();
483 adm_sc_ = NULL;
484 }
485
486 // Test to see if the media processor was deregistered properly
487 ASSERT(SignalRxMediaFrame.is_empty());
488 ASSERT(SignalTxMediaFrame.is_empty());
489
490 tracing_->SetTraceCallback(NULL);
491}
492
493bool WebRtcVoiceEngine::Init(talk_base::Thread* worker_thread) {
494 LOG(LS_INFO) << "WebRtcVoiceEngine::Init";
495 bool res = InitInternal();
496 if (res) {
497 LOG(LS_INFO) << "WebRtcVoiceEngine::Init Done!";
498 } else {
499 LOG(LS_ERROR) << "WebRtcVoiceEngine::Init failed";
500 Terminate();
501 }
502 return res;
503}
504
505bool WebRtcVoiceEngine::InitInternal() {
506 // Temporarily turn logging level up for the Init call
507 int old_filter = log_filter_;
508 int extended_filter = log_filter_ | SeverityToFilter(talk_base::LS_INFO);
509 SetTraceFilter(extended_filter);
510 SetTraceOptions("");
511
512 // Init WebRtc VoiceEngine.
513 if (voe_wrapper_->base()->Init(adm_) == -1) {
514 LOG_RTCERR0_EX(Init, voe_wrapper_->error());
515 SetTraceFilter(old_filter);
516 return false;
517 }
518
519 SetTraceFilter(old_filter);
520 SetTraceOptions(log_options_);
521
522 // Log the VoiceEngine version info
523 char buffer[1024] = "";
524 voe_wrapper_->base()->GetVersion(buffer);
525 LOG(LS_INFO) << "WebRtc VoiceEngine Version:";
526 LogMultiline(talk_base::LS_INFO, buffer);
527
528 // Save the default AGC configuration settings. This must happen before
529 // calling SetOptions or the default will be overwritten.
530 if (voe_wrapper_->processing()->GetAgcConfig(default_agc_config_) == -1) {
wu@webrtc.org97077a32013-10-25 21:18:33 +0000531 LOG_RTCERR0(GetAgcConfig);
henrike@webrtc.org28e20752013-07-10 00:45:36 +0000532 return false;
533 }
534
mallinath@webrtc.orga27be8e2013-09-27 23:04:10 +0000535 // Set defaults for options, so that ApplyOptions applies them explicitly
536 // when we clear option (channel) overrides. External clients can still
537 // modify the defaults via SetOptions (on the media engine).
538 if (!SetOptions(GetDefaultEngineOptions())) {
henrike@webrtc.org28e20752013-07-10 00:45:36 +0000539 return false;
540 }
541
542 // Print our codec list again for the call diagnostic log
543 LOG(LS_INFO) << "WebRtc VoiceEngine codecs:";
544 for (std::vector<AudioCodec>::const_iterator it = codecs_.begin();
545 it != codecs_.end(); ++it) {
546 LOG(LS_INFO) << ToString(*it);
547 }
548
wu@webrtc.org4551b792013-10-09 15:37:36 +0000549 // Disable the DTMF playout when a tone is sent.
550 // PlayDtmfTone will be used if local playout is needed.
551 if (voe_wrapper_->dtmf()->SetDtmfFeedbackStatus(false) == -1) {
552 LOG_RTCERR1(SetDtmfFeedbackStatus, false);
553 }
554
555 initialized_ = true;
556 return true;
557}
558
559bool WebRtcVoiceEngine::EnsureSoundclipEngineInit() {
560 if (voe_wrapper_sc_initialized_) {
561 return true;
562 }
563 // Note that, if initialization fails, voe_wrapper_sc_initialized_ will still
564 // be false, so subsequent calls to EnsureSoundclipEngineInit will
565 // probably just fail again. That's acceptable behavior.
henrike@webrtc.org28e20752013-07-10 00:45:36 +0000566#if defined(LINUX) && !defined(HAVE_LIBPULSE)
567 voe_wrapper_sc_->hw()->SetAudioDeviceLayer(webrtc::kAudioLinuxAlsa);
568#endif
569
570 // Initialize the VoiceEngine instance that we'll use to play out sound clips.
571 if (voe_wrapper_sc_->base()->Init(adm_sc_) == -1) {
572 LOG_RTCERR0_EX(Init, voe_wrapper_sc_->error());
573 return false;
574 }
575
576 // On Windows, tell it to use the default sound (not communication) devices.
577 // First check whether there is a valid sound device for playback.
578 // TODO(juberti): Clean this up when we support setting the soundclip device.
579#ifdef WIN32
580 // The SetPlayoutDevice may not be implemented in the case of external ADM.
581 // TODO(ronghuawu): We should only check the adm_sc_ here, but current
582 // PeerConnection interface never set the adm_sc_, so need to check both
583 // in order to determine if the external adm is used.
584 if (!adm_ && !adm_sc_) {
585 int num_of_devices = 0;
586 if (voe_wrapper_sc_->hw()->GetNumOfPlayoutDevices(num_of_devices) != -1 &&
587 num_of_devices > 0) {
588 if (voe_wrapper_sc_->hw()->SetPlayoutDevice(kDefaultSoundclipDeviceId)
589 == -1) {
590 LOG_RTCERR1_EX(SetPlayoutDevice, kDefaultSoundclipDeviceId,
591 voe_wrapper_sc_->error());
592 return false;
593 }
594 } else {
595 LOG(LS_WARNING) << "No valid sound playout device found.";
596 }
597 }
598#endif
wu@webrtc.org4551b792013-10-09 15:37:36 +0000599 voe_wrapper_sc_initialized_ = true;
600 LOG(LS_INFO) << "Initialized WebRtc soundclip engine.";
henrike@webrtc.org28e20752013-07-10 00:45:36 +0000601 return true;
602}
603
604void WebRtcVoiceEngine::Terminate() {
605 LOG(LS_INFO) << "WebRtcVoiceEngine::Terminate";
606 initialized_ = false;
607
608 StopAecDump();
609
wu@webrtc.org4551b792013-10-09 15:37:36 +0000610 if (voe_wrapper_sc_) {
611 voe_wrapper_sc_initialized_ = false;
612 voe_wrapper_sc_->base()->Terminate();
613 }
henrike@webrtc.org28e20752013-07-10 00:45:36 +0000614 voe_wrapper_->base()->Terminate();
615 desired_local_monitor_enable_ = false;
616}
617
618int WebRtcVoiceEngine::GetCapabilities() {
619 return AUDIO_SEND | AUDIO_RECV;
620}
621
622VoiceMediaChannel *WebRtcVoiceEngine::CreateChannel() {
623 WebRtcVoiceMediaChannel* ch = new WebRtcVoiceMediaChannel(this);
624 if (!ch->valid()) {
625 delete ch;
626 ch = NULL;
627 }
628 return ch;
629}
630
631SoundclipMedia *WebRtcVoiceEngine::CreateSoundclip() {
wu@webrtc.org4551b792013-10-09 15:37:36 +0000632 if (!EnsureSoundclipEngineInit()) {
633 LOG(LS_ERROR) << "Unable to create soundclip: soundclip engine failed to "
634 << "initialize.";
635 return NULL;
636 }
henrike@webrtc.org28e20752013-07-10 00:45:36 +0000637 WebRtcSoundclipMedia *soundclip = new WebRtcSoundclipMedia(this);
638 if (!soundclip->Init() || !soundclip->Enable()) {
639 delete soundclip;
640 return NULL;
641 }
642 return soundclip;
643}
644
mallinath@webrtc.orga27be8e2013-09-27 23:04:10 +0000645bool WebRtcVoiceEngine::SetOptions(const AudioOptions& options) {
henrike@webrtc.org28e20752013-07-10 00:45:36 +0000646 if (!ApplyOptions(options)) {
647 return false;
648 }
649 options_ = options;
650 return true;
651}
652
653bool WebRtcVoiceEngine::SetOptionOverrides(const AudioOptions& overrides) {
654 LOG(LS_INFO) << "Setting option overrides: " << overrides.ToString();
655 if (!ApplyOptions(overrides)) {
656 return false;
657 }
658 option_overrides_ = overrides;
659 return true;
660}
661
662bool WebRtcVoiceEngine::ClearOptionOverrides() {
663 LOG(LS_INFO) << "Clearing option overrides.";
664 AudioOptions options = options_;
665 // Only call ApplyOptions if |options_overrides_| contains overrided options.
666 // ApplyOptions affects NS, AGC other options that is shared between
667 // all WebRtcVoiceEngineChannels.
668 if (option_overrides_ == AudioOptions()) {
669 return true;
670 }
671
672 if (!ApplyOptions(options)) {
673 return false;
674 }
675 option_overrides_ = AudioOptions();
676 return true;
677}
678
679// AudioOptions defaults are set in InitInternal (for options with corresponding
680// MediaEngineInterface flags) and in SetOptions(int) for flagless options.
681bool WebRtcVoiceEngine::ApplyOptions(const AudioOptions& options_in) {
682 AudioOptions options = options_in; // The options are modified below.
683 // kEcConference is AEC with high suppression.
684 webrtc::EcModes ec_mode = webrtc::kEcConference;
685 webrtc::AecmModes aecm_mode = webrtc::kAecmSpeakerphone;
686 webrtc::AgcModes agc_mode = webrtc::kAgcAdaptiveAnalog;
687 webrtc::NsModes ns_mode = webrtc::kNsHighSuppression;
688 bool aecm_comfort_noise = false;
wu@webrtc.org97077a32013-10-25 21:18:33 +0000689 if (options.aecm_generate_comfort_noise.Get(&aecm_comfort_noise)) {
690 LOG(LS_VERBOSE) << "Comfort noise explicitly set to "
691 << aecm_comfort_noise << " (default is false).";
692 }
henrike@webrtc.org28e20752013-07-10 00:45:36 +0000693
694#if defined(IOS)
695 // On iOS, VPIO provides built-in EC and AGC.
696 options.echo_cancellation.Set(false);
697 options.auto_gain_control.Set(false);
698#elif defined(ANDROID)
699 ec_mode = webrtc::kEcAecm;
700#endif
701
702#if defined(IOS) || defined(ANDROID)
703 // Set the AGC mode for iOS as well despite disabling it above, to avoid
704 // unsupported configuration errors from webrtc.
705 agc_mode = webrtc::kAgcFixedDigital;
706 options.typing_detection.Set(false);
707 options.experimental_agc.Set(false);
708 options.experimental_aec.Set(false);
709#endif
710
711 LOG(LS_INFO) << "Applying audio options: " << options.ToString();
712
713 webrtc::VoEAudioProcessing* voep = voe_wrapper_->processing();
714
715 bool echo_cancellation;
716 if (options.echo_cancellation.Get(&echo_cancellation)) {
717 if (voep->SetEcStatus(echo_cancellation, ec_mode) == -1) {
718 LOG_RTCERR2(SetEcStatus, echo_cancellation, ec_mode);
719 return false;
wu@webrtc.org97077a32013-10-25 21:18:33 +0000720 } else {
721 LOG(LS_VERBOSE) << "Echo control set to " << echo_cancellation
722 << " with mode " << ec_mode;
henrike@webrtc.org28e20752013-07-10 00:45:36 +0000723 }
724#if !defined(ANDROID)
725 // TODO(ajm): Remove the error return on Android from webrtc.
726 if (voep->SetEcMetricsStatus(echo_cancellation) == -1) {
727 LOG_RTCERR1(SetEcMetricsStatus, echo_cancellation);
728 return false;
729 }
730#endif
731 if (ec_mode == webrtc::kEcAecm) {
732 if (voep->SetAecmMode(aecm_mode, aecm_comfort_noise) != 0) {
733 LOG_RTCERR2(SetAecmMode, aecm_mode, aecm_comfort_noise);
734 return false;
735 }
736 }
737 }
738
739 bool auto_gain_control;
740 if (options.auto_gain_control.Get(&auto_gain_control)) {
741 if (voep->SetAgcStatus(auto_gain_control, agc_mode) == -1) {
742 LOG_RTCERR2(SetAgcStatus, auto_gain_control, agc_mode);
743 return false;
wu@webrtc.org97077a32013-10-25 21:18:33 +0000744 } else {
745 LOG(LS_VERBOSE) << "Auto gain set to " << auto_gain_control
746 << " with mode " << agc_mode;
747 }
748 }
749
750 if (options.tx_agc_target_dbov.IsSet() ||
751 options.tx_agc_digital_compression_gain.IsSet() ||
752 options.tx_agc_limiter.IsSet()) {
753 // Override default_agc_config_. Generally, an unset option means "leave
754 // the VoE bits alone" in this function, so we want whatever is set to be
755 // stored as the new "default". If we didn't, then setting e.g.
756 // tx_agc_target_dbov would reset digital compression gain and limiter
757 // settings.
758 // Also, if we don't update default_agc_config_, then adjust_agc_delta
759 // would be an offset from the original values, and not whatever was set
760 // explicitly.
761 default_agc_config_.targetLeveldBOv =
762 options.tx_agc_target_dbov.GetWithDefaultIfUnset(
763 default_agc_config_.targetLeveldBOv);
764 default_agc_config_.digitalCompressionGaindB =
765 options.tx_agc_digital_compression_gain.GetWithDefaultIfUnset(
766 default_agc_config_.digitalCompressionGaindB);
767 default_agc_config_.limiterEnable =
768 options.tx_agc_limiter.GetWithDefaultIfUnset(
769 default_agc_config_.limiterEnable);
770 if (voe_wrapper_->processing()->SetAgcConfig(default_agc_config_) == -1) {
771 LOG_RTCERR3(SetAgcConfig,
772 default_agc_config_.targetLeveldBOv,
773 default_agc_config_.digitalCompressionGaindB,
774 default_agc_config_.limiterEnable);
775 return false;
henrike@webrtc.org28e20752013-07-10 00:45:36 +0000776 }
777 }
778
779 bool noise_suppression;
780 if (options.noise_suppression.Get(&noise_suppression)) {
781 if (voep->SetNsStatus(noise_suppression, ns_mode) == -1) {
782 LOG_RTCERR2(SetNsStatus, noise_suppression, ns_mode);
783 return false;
wu@webrtc.org97077a32013-10-25 21:18:33 +0000784 } else {
785 LOG(LS_VERBOSE) << "Noise suppression set to " << noise_suppression
786 << " with mode " << ns_mode;
henrike@webrtc.org28e20752013-07-10 00:45:36 +0000787 }
788 }
789
790 bool highpass_filter;
791 if (options.highpass_filter.Get(&highpass_filter)) {
792 if (voep->EnableHighPassFilter(highpass_filter) == -1) {
793 LOG_RTCERR1(SetHighpassFilterStatus, highpass_filter);
794 return false;
795 }
796 }
797
798 bool stereo_swapping;
799 if (options.stereo_swapping.Get(&stereo_swapping)) {
800 voep->EnableStereoChannelSwapping(stereo_swapping);
801 if (voep->IsStereoChannelSwappingEnabled() != stereo_swapping) {
802 LOG_RTCERR1(EnableStereoChannelSwapping, stereo_swapping);
803 return false;
804 }
805 }
806
807 bool typing_detection;
808 if (options.typing_detection.Get(&typing_detection)) {
809 if (voep->SetTypingDetectionStatus(typing_detection) == -1) {
810 // In case of error, log the info and continue
811 LOG_RTCERR1(SetTypingDetectionStatus, typing_detection);
812 }
813 }
814
815 int adjust_agc_delta;
816 if (options.adjust_agc_delta.Get(&adjust_agc_delta)) {
817 if (!AdjustAgcLevel(adjust_agc_delta)) {
818 return false;
819 }
820 }
821
822 bool aec_dump;
823 if (options.aec_dump.Get(&aec_dump)) {
henrike@webrtc.org28e20752013-07-10 00:45:36 +0000824 if (aec_dump)
henrike@webrtc.org1e09a712013-07-26 19:17:59 +0000825 StartAecDump(kAecDumpByAudioOptionFilename);
henrike@webrtc.org28e20752013-07-10 00:45:36 +0000826 else
827 StopAecDump();
828 }
829
mallinath@webrtc.orga27be8e2013-09-27 23:04:10 +0000830 bool experimental_aec;
831 if (options.experimental_aec.Get(&experimental_aec)) {
832 webrtc::AudioProcessing* audioproc =
833 voe_wrapper_->base()->audio_processing();
834 // We check audioproc for the benefit of tests, since FakeWebRtcVoiceEngine
835 // returns NULL on audio_processing().
836 if (audioproc) {
837 webrtc::Config config;
838 config.Set<webrtc::DelayCorrection>(
839 new webrtc::DelayCorrection(experimental_aec));
840 audioproc->SetExtraOptions(config);
841 }
842 }
843
wu@webrtc.org97077a32013-10-25 21:18:33 +0000844 uint32 recording_sample_rate;
845 if (options.recording_sample_rate.Get(&recording_sample_rate)) {
846 if (voe_wrapper_->hw()->SetRecordingSampleRate(recording_sample_rate)) {
847 LOG_RTCERR1(SetRecordingSampleRate, recording_sample_rate);
848 }
849 }
850
851 uint32 playout_sample_rate;
852 if (options.playout_sample_rate.Get(&playout_sample_rate)) {
853 if (voe_wrapper_->hw()->SetPlayoutSampleRate(playout_sample_rate)) {
854 LOG_RTCERR1(SetPlayoutSampleRate, playout_sample_rate);
855 }
856 }
857
henrike@webrtc.org28e20752013-07-10 00:45:36 +0000858
859 return true;
860}
861
862bool WebRtcVoiceEngine::SetDelayOffset(int offset) {
863 voe_wrapper_->processing()->SetDelayOffsetMs(offset);
864 if (voe_wrapper_->processing()->DelayOffsetMs() != offset) {
865 LOG_RTCERR1(SetDelayOffsetMs, offset);
866 return false;
867 }
868
869 return true;
870}
871
872struct ResumeEntry {
873 ResumeEntry(WebRtcVoiceMediaChannel *c, bool p, SendFlags s)
874 : channel(c),
875 playout(p),
876 send(s) {
877 }
878
879 WebRtcVoiceMediaChannel *channel;
880 bool playout;
881 SendFlags send;
882};
883
884// TODO(juberti): Refactor this so that the core logic can be used to set the
885// soundclip device. At that time, reinstate the soundclip pause/resume code.
886bool WebRtcVoiceEngine::SetDevices(const Device* in_device,
887 const Device* out_device) {
888#if !defined(IOS) && !defined(ANDROID)
889 int in_id = in_device ? talk_base::FromString<int>(in_device->id) :
890 kDefaultAudioDeviceId;
891 int out_id = out_device ? talk_base::FromString<int>(out_device->id) :
892 kDefaultAudioDeviceId;
893 // The device manager uses -1 as the default device, which was the case for
894 // VoE 3.5. VoE 4.0, however, uses 0 as the default in Linux and Mac.
895#ifndef WIN32
896 if (-1 == in_id) {
897 in_id = kDefaultAudioDeviceId;
898 }
899 if (-1 == out_id) {
900 out_id = kDefaultAudioDeviceId;
901 }
902#endif
903
904 std::string in_name = (in_id != kDefaultAudioDeviceId) ?
905 in_device->name : "Default device";
906 std::string out_name = (out_id != kDefaultAudioDeviceId) ?
907 out_device->name : "Default device";
908 LOG(LS_INFO) << "Setting microphone to (id=" << in_id << ", name=" << in_name
909 << ") and speaker to (id=" << out_id << ", name=" << out_name
910 << ")";
911
912 // If we're running the local monitor, we need to stop it first.
913 bool ret = true;
914 if (!PauseLocalMonitor()) {
915 LOG(LS_WARNING) << "Failed to pause local monitor";
916 ret = false;
917 }
918
919 // Must also pause all audio playback and capture.
920 for (ChannelList::const_iterator i = channels_.begin();
921 i != channels_.end(); ++i) {
922 WebRtcVoiceMediaChannel *channel = *i;
923 if (!channel->PausePlayout()) {
924 LOG(LS_WARNING) << "Failed to pause playout";
925 ret = false;
926 }
927 if (!channel->PauseSend()) {
928 LOG(LS_WARNING) << "Failed to pause send";
929 ret = false;
930 }
931 }
932
933 // Find the recording device id in VoiceEngine and set recording device.
934 if (!FindWebRtcAudioDeviceId(true, in_name, in_id, &in_id)) {
935 ret = false;
936 }
937 if (ret) {
938 if (voe_wrapper_->hw()->SetRecordingDevice(in_id) == -1) {
939 LOG_RTCERR2(SetRecordingDevice, in_device->name, in_id);
940 ret = false;
941 }
942 }
943
944 // Find the playout device id in VoiceEngine and set playout device.
945 if (!FindWebRtcAudioDeviceId(false, out_name, out_id, &out_id)) {
946 LOG(LS_WARNING) << "Failed to find VoiceEngine device id for " << out_name;
947 ret = false;
948 }
949 if (ret) {
950 if (voe_wrapper_->hw()->SetPlayoutDevice(out_id) == -1) {
951 LOG_RTCERR2(SetPlayoutDevice, out_device->name, out_id);
952 ret = false;
953 }
954 }
955
956 // Resume all audio playback and capture.
957 for (ChannelList::const_iterator i = channels_.begin();
958 i != channels_.end(); ++i) {
959 WebRtcVoiceMediaChannel *channel = *i;
960 if (!channel->ResumePlayout()) {
961 LOG(LS_WARNING) << "Failed to resume playout";
962 ret = false;
963 }
964 if (!channel->ResumeSend()) {
965 LOG(LS_WARNING) << "Failed to resume send";
966 ret = false;
967 }
968 }
969
970 // Resume local monitor.
971 if (!ResumeLocalMonitor()) {
972 LOG(LS_WARNING) << "Failed to resume local monitor";
973 ret = false;
974 }
975
976 if (ret) {
977 LOG(LS_INFO) << "Set microphone to (id=" << in_id <<" name=" << in_name
978 << ") and speaker to (id="<< out_id << " name=" << out_name
979 << ")";
980 }
981
982 return ret;
983#else
984 return true;
985#endif // !IOS && !ANDROID
986}
987
988bool WebRtcVoiceEngine::FindWebRtcAudioDeviceId(
989 bool is_input, const std::string& dev_name, int dev_id, int* rtc_id) {
990 // In Linux, VoiceEngine uses the same device dev_id as the device manager.
991#ifdef LINUX
992 *rtc_id = dev_id;
993 return true;
994#else
995 // In Windows and Mac, we need to find the VoiceEngine device id by name
996 // unless the input dev_id is the default device id.
997 if (kDefaultAudioDeviceId == dev_id) {
998 *rtc_id = dev_id;
999 return true;
1000 }
1001
1002 // Get the number of VoiceEngine audio devices.
1003 int count = 0;
1004 if (is_input) {
1005 if (-1 == voe_wrapper_->hw()->GetNumOfRecordingDevices(count)) {
1006 LOG_RTCERR0(GetNumOfRecordingDevices);
1007 return false;
1008 }
1009 } else {
1010 if (-1 == voe_wrapper_->hw()->GetNumOfPlayoutDevices(count)) {
1011 LOG_RTCERR0(GetNumOfPlayoutDevices);
1012 return false;
1013 }
1014 }
1015
1016 for (int i = 0; i < count; ++i) {
1017 char name[128];
1018 char guid[128];
1019 if (is_input) {
1020 voe_wrapper_->hw()->GetRecordingDeviceName(i, name, guid);
1021 LOG(LS_VERBOSE) << "VoiceEngine microphone " << i << ": " << name;
1022 } else {
1023 voe_wrapper_->hw()->GetPlayoutDeviceName(i, name, guid);
1024 LOG(LS_VERBOSE) << "VoiceEngine speaker " << i << ": " << name;
1025 }
1026
1027 std::string webrtc_name(name);
1028 if (dev_name.compare(0, webrtc_name.size(), webrtc_name) == 0) {
1029 *rtc_id = i;
1030 return true;
1031 }
1032 }
1033 LOG(LS_WARNING) << "VoiceEngine cannot find device: " << dev_name;
1034 return false;
1035#endif
1036}
1037
1038bool WebRtcVoiceEngine::GetOutputVolume(int* level) {
1039 unsigned int ulevel;
1040 if (voe_wrapper_->volume()->GetSpeakerVolume(ulevel) == -1) {
1041 LOG_RTCERR1(GetSpeakerVolume, level);
1042 return false;
1043 }
1044 *level = ulevel;
1045 return true;
1046}
1047
1048bool WebRtcVoiceEngine::SetOutputVolume(int level) {
1049 ASSERT(level >= 0 && level <= 255);
1050 if (voe_wrapper_->volume()->SetSpeakerVolume(level) == -1) {
1051 LOG_RTCERR1(SetSpeakerVolume, level);
1052 return false;
1053 }
1054 return true;
1055}
1056
1057int WebRtcVoiceEngine::GetInputLevel() {
1058 unsigned int ulevel;
1059 return (voe_wrapper_->volume()->GetSpeechInputLevel(ulevel) != -1) ?
1060 static_cast<int>(ulevel) : -1;
1061}
1062
1063bool WebRtcVoiceEngine::SetLocalMonitor(bool enable) {
1064 desired_local_monitor_enable_ = enable;
1065 return ChangeLocalMonitor(desired_local_monitor_enable_);
1066}
1067
1068bool WebRtcVoiceEngine::ChangeLocalMonitor(bool enable) {
1069 // The voe file api is not available in chrome.
1070 if (!voe_wrapper_->file()) {
1071 return false;
1072 }
1073 if (enable && !monitor_) {
1074 monitor_.reset(new WebRtcMonitorStream);
1075 if (voe_wrapper_->file()->StartRecordingMicrophone(monitor_.get()) == -1) {
1076 LOG_RTCERR1(StartRecordingMicrophone, monitor_.get());
1077 // Must call Stop() because there are some cases where Start will report
1078 // failure but still change the state, and if we leave VE in the on state
1079 // then it could crash later when trying to invoke methods on our monitor.
1080 voe_wrapper_->file()->StopRecordingMicrophone();
1081 monitor_.reset();
1082 return false;
1083 }
1084 } else if (!enable && monitor_) {
1085 voe_wrapper_->file()->StopRecordingMicrophone();
1086 monitor_.reset();
1087 }
1088 return true;
1089}
1090
1091bool WebRtcVoiceEngine::PauseLocalMonitor() {
1092 return ChangeLocalMonitor(false);
1093}
1094
1095bool WebRtcVoiceEngine::ResumeLocalMonitor() {
1096 return ChangeLocalMonitor(desired_local_monitor_enable_);
1097}
1098
1099const std::vector<AudioCodec>& WebRtcVoiceEngine::codecs() {
1100 return codecs_;
1101}
1102
1103bool WebRtcVoiceEngine::FindCodec(const AudioCodec& in) {
1104 return FindWebRtcCodec(in, NULL);
1105}
1106
1107// Get the VoiceEngine codec that matches |in|, with the supplied settings.
1108bool WebRtcVoiceEngine::FindWebRtcCodec(const AudioCodec& in,
1109 webrtc::CodecInst* out) {
1110 int ncodecs = voe_wrapper_->codec()->NumOfCodecs();
1111 for (int i = 0; i < ncodecs; ++i) {
1112 webrtc::CodecInst voe_codec;
1113 if (voe_wrapper_->codec()->GetCodec(i, voe_codec) != -1) {
1114 AudioCodec codec(voe_codec.pltype, voe_codec.plname, voe_codec.plfreq,
1115 voe_codec.rate, voe_codec.channels, 0);
1116 bool multi_rate = IsCodecMultiRate(voe_codec);
1117 // Allow arbitrary rates for ISAC to be specified.
1118 if (multi_rate) {
1119 // Set codec.bitrate to 0 so the check for codec.Matches() passes.
1120 codec.bitrate = 0;
1121 }
1122 if (codec.Matches(in)) {
1123 if (out) {
1124 // Fixup the payload type.
1125 voe_codec.pltype = in.id;
1126
1127 // Set bitrate if specified.
1128 if (multi_rate && in.bitrate != 0) {
1129 voe_codec.rate = in.bitrate;
1130 }
1131
1132 // Apply codec-specific settings.
1133 if (IsIsac(codec)) {
1134 // If ISAC and an explicit bitrate is not specified,
1135 // enable auto bandwidth adjustment.
1136 voe_codec.rate = (in.bitrate > 0) ? in.bitrate : -1;
1137 }
1138 *out = voe_codec;
1139 }
1140 return true;
1141 }
1142 }
1143 }
1144 return false;
1145}
1146const std::vector<RtpHeaderExtension>&
1147WebRtcVoiceEngine::rtp_header_extensions() const {
1148 return rtp_header_extensions_;
1149}
1150
1151void WebRtcVoiceEngine::SetLogging(int min_sev, const char* filter) {
1152 // if min_sev == -1, we keep the current log level.
1153 if (min_sev >= 0) {
1154 SetTraceFilter(SeverityToFilter(min_sev));
1155 }
1156 log_options_ = filter;
1157 SetTraceOptions(initialized_ ? log_options_ : "");
1158}
1159
1160int WebRtcVoiceEngine::GetLastEngineError() {
1161 return voe_wrapper_->error();
1162}
1163
1164void WebRtcVoiceEngine::SetTraceFilter(int filter) {
1165 log_filter_ = filter;
1166 tracing_->SetTraceFilter(filter);
1167}
1168
1169// We suppport three different logging settings for VoiceEngine:
1170// 1. Observer callback that goes into talk diagnostic logfile.
1171// Use --logfile and --loglevel
1172//
1173// 2. Encrypted VoiceEngine log for debugging VoiceEngine.
1174// Use --voice_loglevel --voice_logfilter "tracefile file_name"
1175//
1176// 3. EC log and dump for debugging QualityEngine.
1177// Use --voice_loglevel --voice_logfilter "recordEC file_name"
1178//
1179// For more details see: "https://sites.google.com/a/google.com/wavelet/Home/
1180// Magic-Flute--RTC-Engine-/Magic-Flute-Command-Line-Parameters"
1181void WebRtcVoiceEngine::SetTraceOptions(const std::string& options) {
1182 // Set encrypted trace file.
1183 std::vector<std::string> opts;
1184 talk_base::tokenize(options, ' ', '"', '"', &opts);
1185 std::vector<std::string>::iterator tracefile =
1186 std::find(opts.begin(), opts.end(), "tracefile");
1187 if (tracefile != opts.end() && ++tracefile != opts.end()) {
1188 // Write encrypted debug output (at same loglevel) to file
1189 // EncryptedTraceFile no longer supported.
1190 if (tracing_->SetTraceFile(tracefile->c_str()) == -1) {
1191 LOG_RTCERR1(SetTraceFile, *tracefile);
1192 }
1193 }
1194
wu@webrtc.org97077a32013-10-25 21:18:33 +00001195 // Allow trace options to override the trace filter. We default
1196 // it to log_filter_ (as a translation of libjingle log levels)
1197 // elsewhere, but this allows clients to explicitly set webrtc
1198 // log levels.
1199 std::vector<std::string>::iterator tracefilter =
1200 std::find(opts.begin(), opts.end(), "tracefilter");
1201 if (tracefilter != opts.end() && ++tracefilter != opts.end()) {
1202 if (!tracing_->SetTraceFilter(talk_base::FromString<int>(*tracefilter))) {
1203 LOG_RTCERR1(SetTraceFilter, *tracefilter);
1204 }
1205 }
1206
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001207 // Set AEC dump file
1208 std::vector<std::string>::iterator recordEC =
1209 std::find(opts.begin(), opts.end(), "recordEC");
1210 if (recordEC != opts.end()) {
1211 ++recordEC;
1212 if (recordEC != opts.end())
1213 StartAecDump(recordEC->c_str());
1214 else
1215 StopAecDump();
1216 }
1217}
1218
1219// Ignore spammy trace messages, mostly from the stats API when we haven't
1220// gotten RTCP info yet from the remote side.
1221bool WebRtcVoiceEngine::ShouldIgnoreTrace(const std::string& trace) {
1222 static const char* kTracesToIgnore[] = {
1223 "\tfailed to GetReportBlockInformation",
1224 "GetRecCodec() failed to get received codec",
1225 "GetReceivedRtcpStatistics: Could not get received RTP statistics",
1226 "GetRemoteRTCPData() failed to measure statistics due to lack of received RTP and/or RTCP packets", // NOLINT
1227 "GetRemoteRTCPData() failed to retrieve sender info for remote side",
1228 "GetRTPStatistics() failed to measure RTT since no RTP packets have been received yet", // NOLINT
1229 "GetRTPStatistics() failed to read RTP statistics from the RTP/RTCP module",
1230 "GetRTPStatistics() failed to retrieve RTT from the RTP/RTCP module",
1231 "SenderInfoReceived No received SR",
1232 "StatisticsRTP() no statistics available",
1233 "TransmitMixer::TypingDetection() VE_TYPING_NOISE_WARNING message has been posted", // NOLINT
1234 "TransmitMixer::TypingDetection() pending noise-saturation warning exists", // NOLINT
1235 "GetRecPayloadType() failed to retrieve RX payload type (error=10026)", // NOLINT
1236 "StopPlayingFileAsMicrophone() isnot playing (error=8088)",
1237 NULL
1238 };
1239 for (const char* const* p = kTracesToIgnore; *p; ++p) {
1240 if (trace.find(*p) != std::string::npos) {
1241 return true;
1242 }
1243 }
1244 return false;
1245}
1246
1247void WebRtcVoiceEngine::Print(webrtc::TraceLevel level, const char* trace,
1248 int length) {
1249 talk_base::LoggingSeverity sev = talk_base::LS_VERBOSE;
1250 if (level == webrtc::kTraceError || level == webrtc::kTraceCritical)
1251 sev = talk_base::LS_ERROR;
1252 else if (level == webrtc::kTraceWarning)
1253 sev = talk_base::LS_WARNING;
1254 else if (level == webrtc::kTraceStateInfo || level == webrtc::kTraceInfo)
1255 sev = talk_base::LS_INFO;
1256 else if (level == webrtc::kTraceTerseInfo)
1257 sev = talk_base::LS_INFO;
1258
1259 // Skip past boilerplate prefix text
1260 if (length < 72) {
1261 std::string msg(trace, length);
1262 LOG(LS_ERROR) << "Malformed webrtc log message: ";
1263 LOG_V(sev) << msg;
1264 } else {
1265 std::string msg(trace + 71, length - 72);
1266 if (!ShouldIgnoreTrace(msg)) {
1267 LOG_V(sev) << "webrtc: " << msg;
1268 }
1269 }
1270}
1271
1272void WebRtcVoiceEngine::CallbackOnError(int channel_num, int err_code) {
1273 talk_base::CritScope lock(&channels_cs_);
1274 WebRtcVoiceMediaChannel* channel = NULL;
1275 uint32 ssrc = 0;
1276 LOG(LS_WARNING) << "VoiceEngine error " << err_code << " reported on channel "
1277 << channel_num << ".";
1278 if (FindChannelAndSsrc(channel_num, &channel, &ssrc)) {
1279 ASSERT(channel != NULL);
1280 channel->OnError(ssrc, err_code);
1281 } else {
1282 LOG(LS_ERROR) << "VoiceEngine channel " << channel_num
1283 << " could not be found in channel list when error reported.";
1284 }
1285}
1286
1287bool WebRtcVoiceEngine::FindChannelAndSsrc(
1288 int channel_num, WebRtcVoiceMediaChannel** channel, uint32* ssrc) const {
1289 ASSERT(channel != NULL && ssrc != NULL);
1290
1291 *channel = NULL;
1292 *ssrc = 0;
1293 // Find corresponding channel and ssrc
1294 for (ChannelList::const_iterator it = channels_.begin();
1295 it != channels_.end(); ++it) {
1296 ASSERT(*it != NULL);
1297 if ((*it)->FindSsrc(channel_num, ssrc)) {
1298 *channel = *it;
1299 return true;
1300 }
1301 }
1302
1303 return false;
1304}
1305
1306// This method will search through the WebRtcVoiceMediaChannels and
1307// obtain the voice engine's channel number.
1308bool WebRtcVoiceEngine::FindChannelNumFromSsrc(
1309 uint32 ssrc, MediaProcessorDirection direction, int* channel_num) {
1310 ASSERT(channel_num != NULL);
1311 ASSERT(direction == MPD_RX || direction == MPD_TX);
1312
1313 *channel_num = -1;
1314 // Find corresponding channel for ssrc.
1315 for (ChannelList::const_iterator it = channels_.begin();
1316 it != channels_.end(); ++it) {
1317 ASSERT(*it != NULL);
1318 if (direction & MPD_RX) {
1319 *channel_num = (*it)->GetReceiveChannelNum(ssrc);
1320 }
1321 if (*channel_num == -1 && (direction & MPD_TX)) {
1322 *channel_num = (*it)->GetSendChannelNum(ssrc);
1323 }
1324 if (*channel_num != -1) {
1325 return true;
1326 }
1327 }
1328 LOG(LS_WARNING) << "FindChannelFromSsrc. No Channel Found for Ssrc: " << ssrc;
1329 return false;
1330}
1331
1332void WebRtcVoiceEngine::RegisterChannel(WebRtcVoiceMediaChannel *channel) {
1333 talk_base::CritScope lock(&channels_cs_);
1334 channels_.push_back(channel);
1335}
1336
1337void WebRtcVoiceEngine::UnregisterChannel(WebRtcVoiceMediaChannel *channel) {
1338 talk_base::CritScope lock(&channels_cs_);
1339 ChannelList::iterator i = std::find(channels_.begin(),
1340 channels_.end(),
1341 channel);
1342 if (i != channels_.end()) {
1343 channels_.erase(i);
1344 }
1345}
1346
1347void WebRtcVoiceEngine::RegisterSoundclip(WebRtcSoundclipMedia *soundclip) {
1348 soundclips_.push_back(soundclip);
1349}
1350
1351void WebRtcVoiceEngine::UnregisterSoundclip(WebRtcSoundclipMedia *soundclip) {
1352 SoundclipList::iterator i = std::find(soundclips_.begin(),
1353 soundclips_.end(),
1354 soundclip);
1355 if (i != soundclips_.end()) {
1356 soundclips_.erase(i);
1357 }
1358}
1359
1360// Adjusts the default AGC target level by the specified delta.
1361// NB: If we start messing with other config fields, we'll want
1362// to save the current webrtc::AgcConfig as well.
1363bool WebRtcVoiceEngine::AdjustAgcLevel(int delta) {
1364 webrtc::AgcConfig config = default_agc_config_;
1365 config.targetLeveldBOv -= delta;
1366
1367 LOG(LS_INFO) << "Adjusting AGC level from default -"
1368 << default_agc_config_.targetLeveldBOv << "dB to -"
1369 << config.targetLeveldBOv << "dB";
1370
1371 if (voe_wrapper_->processing()->SetAgcConfig(config) == -1) {
1372 LOG_RTCERR1(SetAgcConfig, config.targetLeveldBOv);
1373 return false;
1374 }
1375 return true;
1376}
1377
1378bool WebRtcVoiceEngine::SetAudioDeviceModule(webrtc::AudioDeviceModule* adm,
1379 webrtc::AudioDeviceModule* adm_sc) {
1380 if (initialized_) {
1381 LOG(LS_WARNING) << "SetAudioDeviceModule can not be called after Init.";
1382 return false;
1383 }
1384 if (adm_) {
1385 adm_->Release();
1386 adm_ = NULL;
1387 }
1388 if (adm) {
1389 adm_ = adm;
1390 adm_->AddRef();
1391 }
1392
1393 if (adm_sc_) {
1394 adm_sc_->Release();
1395 adm_sc_ = NULL;
1396 }
1397 if (adm_sc) {
1398 adm_sc_ = adm_sc;
1399 adm_sc_->AddRef();
1400 }
1401 return true;
1402}
1403
1404bool WebRtcVoiceEngine::RegisterProcessor(
1405 uint32 ssrc,
1406 VoiceProcessor* voice_processor,
1407 MediaProcessorDirection direction) {
1408 bool register_with_webrtc = false;
1409 int channel_id = -1;
1410 bool success = false;
1411 uint32* processor_ssrc = NULL;
1412 bool found_channel = FindChannelNumFromSsrc(ssrc, direction, &channel_id);
1413 if (voice_processor == NULL || !found_channel) {
1414 LOG(LS_WARNING) << "Media Processing Registration Failed. ssrc: " << ssrc
1415 << " foundChannel: " << found_channel;
1416 return false;
1417 }
1418
1419 webrtc::ProcessingTypes processing_type;
1420 {
1421 talk_base::CritScope cs(&signal_media_critical_);
1422 if (direction == MPD_RX) {
1423 processing_type = webrtc::kPlaybackAllChannelsMixed;
1424 if (SignalRxMediaFrame.is_empty()) {
1425 register_with_webrtc = true;
1426 processor_ssrc = &rx_processor_ssrc_;
1427 }
1428 SignalRxMediaFrame.connect(voice_processor,
1429 &VoiceProcessor::OnFrame);
1430 } else {
1431 processing_type = webrtc::kRecordingPerChannel;
1432 if (SignalTxMediaFrame.is_empty()) {
1433 register_with_webrtc = true;
1434 processor_ssrc = &tx_processor_ssrc_;
1435 }
1436 SignalTxMediaFrame.connect(voice_processor,
1437 &VoiceProcessor::OnFrame);
1438 }
1439 }
1440 if (register_with_webrtc) {
1441 // TODO(janahan): when registering consider instantiating a
1442 // a VoeMediaProcess object and not make the engine extend the interface.
1443 if (voe()->media() && voe()->media()->
1444 RegisterExternalMediaProcessing(channel_id,
1445 processing_type,
1446 *this) != -1) {
1447 LOG(LS_INFO) << "Media Processing Registration Succeeded. channel:"
1448 << channel_id;
1449 *processor_ssrc = ssrc;
1450 success = true;
1451 } else {
1452 LOG_RTCERR2(RegisterExternalMediaProcessing,
1453 channel_id,
1454 processing_type);
1455 success = false;
1456 }
1457 } else {
1458 // If we don't have to register with the engine, we just needed to
1459 // connect a new processor, set success to true;
1460 success = true;
1461 }
1462 return success;
1463}
1464
1465bool WebRtcVoiceEngine::UnregisterProcessorChannel(
1466 MediaProcessorDirection channel_direction,
1467 uint32 ssrc,
1468 VoiceProcessor* voice_processor,
1469 MediaProcessorDirection processor_direction) {
1470 bool success = true;
1471 FrameSignal* signal;
1472 webrtc::ProcessingTypes processing_type;
1473 uint32* processor_ssrc = NULL;
1474 if (channel_direction == MPD_RX) {
1475 signal = &SignalRxMediaFrame;
1476 processing_type = webrtc::kPlaybackAllChannelsMixed;
1477 processor_ssrc = &rx_processor_ssrc_;
1478 } else {
1479 signal = &SignalTxMediaFrame;
1480 processing_type = webrtc::kRecordingPerChannel;
1481 processor_ssrc = &tx_processor_ssrc_;
1482 }
1483
1484 int deregister_id = -1;
1485 {
1486 talk_base::CritScope cs(&signal_media_critical_);
1487 if ((processor_direction & channel_direction) != 0 && !signal->is_empty()) {
1488 signal->disconnect(voice_processor);
1489 int channel_id = -1;
1490 bool found_channel = FindChannelNumFromSsrc(ssrc,
1491 channel_direction,
1492 &channel_id);
1493 if (signal->is_empty() && found_channel) {
1494 deregister_id = channel_id;
1495 }
1496 }
1497 }
1498 if (deregister_id != -1) {
1499 if (voe()->media() &&
1500 voe()->media()->DeRegisterExternalMediaProcessing(deregister_id,
1501 processing_type) != -1) {
1502 *processor_ssrc = 0;
1503 LOG(LS_INFO) << "Media Processing DeRegistration Succeeded. channel:"
1504 << deregister_id;
1505 } else {
1506 LOG_RTCERR2(DeRegisterExternalMediaProcessing,
1507 deregister_id,
1508 processing_type);
1509 success = false;
1510 }
1511 }
1512 return success;
1513}
1514
1515bool WebRtcVoiceEngine::UnregisterProcessor(
1516 uint32 ssrc,
1517 VoiceProcessor* voice_processor,
1518 MediaProcessorDirection direction) {
1519 bool success = true;
1520 if (voice_processor == NULL) {
1521 LOG(LS_WARNING) << "Media Processing Deregistration Failed. ssrc: "
1522 << ssrc;
1523 return false;
1524 }
1525 if (!UnregisterProcessorChannel(MPD_RX, ssrc, voice_processor, direction)) {
1526 success = false;
1527 }
1528 if (!UnregisterProcessorChannel(MPD_TX, ssrc, voice_processor, direction)) {
1529 success = false;
1530 }
1531 return success;
1532}
1533
1534// Implementing method from WebRtc VoEMediaProcess interface
1535// Do not lock mux_channel_cs_ in this callback.
1536void WebRtcVoiceEngine::Process(int channel,
1537 webrtc::ProcessingTypes type,
1538 int16_t audio10ms[],
1539 int length,
1540 int sampling_freq,
1541 bool is_stereo) {
1542 talk_base::CritScope cs(&signal_media_critical_);
1543 AudioFrame frame(audio10ms, length, sampling_freq, is_stereo);
1544 if (type == webrtc::kPlaybackAllChannelsMixed) {
1545 SignalRxMediaFrame(rx_processor_ssrc_, MPD_RX, &frame);
1546 } else if (type == webrtc::kRecordingPerChannel) {
1547 SignalTxMediaFrame(tx_processor_ssrc_, MPD_TX, &frame);
1548 } else {
1549 LOG(LS_WARNING) << "Media Processing invoked unexpectedly."
1550 << " channel: " << channel << " type: " << type
1551 << " tx_ssrc: " << tx_processor_ssrc_
1552 << " rx_ssrc: " << rx_processor_ssrc_;
1553 }
1554}
1555
1556void WebRtcVoiceEngine::StartAecDump(const std::string& filename) {
1557 if (!is_dumping_aec_) {
1558 // Start dumping AEC when we are not dumping.
1559 if (voe_wrapper_->processing()->StartDebugRecording(
1560 filename.c_str()) != webrtc::AudioProcessing::kNoError) {
1561 LOG_RTCERR0(StartDebugRecording);
1562 } else {
1563 is_dumping_aec_ = true;
1564 }
1565 }
1566}
1567
1568void WebRtcVoiceEngine::StopAecDump() {
1569 if (is_dumping_aec_) {
1570 // Stop dumping AEC when we are dumping.
1571 if (voe_wrapper_->processing()->StopDebugRecording() !=
1572 webrtc::AudioProcessing::kNoError) {
1573 LOG_RTCERR0(StopDebugRecording);
1574 }
1575 is_dumping_aec_ = false;
1576 }
1577}
1578
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00001579// This struct relies on the generated copy constructor and assignment operator
1580// since it is used in an stl::map.
1581struct WebRtcVoiceMediaChannel::WebRtcVoiceChannelInfo {
1582 WebRtcVoiceChannelInfo() : channel(-1), renderer(NULL) {}
1583 WebRtcVoiceChannelInfo(int ch, AudioRenderer* r)
1584 : channel(ch),
1585 renderer(r) {}
1586 ~WebRtcVoiceChannelInfo() {}
1587
1588 int channel;
1589 AudioRenderer* renderer;
1590};
1591
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001592// WebRtcVoiceMediaChannel
1593WebRtcVoiceMediaChannel::WebRtcVoiceMediaChannel(WebRtcVoiceEngine *engine)
1594 : WebRtcMediaChannel<VoiceMediaChannel, WebRtcVoiceEngine>(
1595 engine,
1596 engine->voe()->base()->CreateChannel()),
wu@webrtc.org1d1ffc92013-10-16 18:12:02 +00001597 send_bw_setting_(false),
1598 send_autobw_(false),
1599 send_bw_bps_(0),
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001600 options_(),
1601 dtmf_allowed_(false),
1602 desired_playout_(false),
1603 nack_enabled_(false),
1604 playout_(false),
wu@webrtc.org967bfff2013-09-19 05:49:50 +00001605 typing_noise_detected_(false),
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001606 desired_send_(SEND_NOTHING),
1607 send_(SEND_NOTHING),
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001608 default_receive_ssrc_(0) {
1609 engine->RegisterChannel(this);
1610 LOG(LS_VERBOSE) << "WebRtcVoiceMediaChannel::WebRtcVoiceMediaChannel "
1611 << voe_channel();
1612
wu@webrtc.org9dba5252013-08-05 20:36:57 +00001613 ConfigureSendChannel(voe_channel());
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001614}
1615
1616WebRtcVoiceMediaChannel::~WebRtcVoiceMediaChannel() {
1617 LOG(LS_VERBOSE) << "WebRtcVoiceMediaChannel::~WebRtcVoiceMediaChannel "
1618 << voe_channel();
1619
wu@webrtc.org9dba5252013-08-05 20:36:57 +00001620 // Remove any remaining send streams, the default channel will be deleted
1621 // later.
1622 while (!send_channels_.empty())
1623 RemoveSendStream(send_channels_.begin()->first);
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001624
1625 // Unregister ourselves from the engine.
1626 engine()->UnregisterChannel(this);
1627 // Remove any remaining streams.
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00001628 while (!receive_channels_.empty()) {
1629 RemoveRecvStream(receive_channels_.begin()->first);
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001630 }
1631
wu@webrtc.org9dba5252013-08-05 20:36:57 +00001632 // Delete the default channel.
1633 DeleteChannel(voe_channel());
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001634}
1635
1636bool WebRtcVoiceMediaChannel::SetOptions(const AudioOptions& options) {
1637 LOG(LS_INFO) << "Setting voice channel options: "
1638 << options.ToString();
1639
wu@webrtc.org9dba5252013-08-05 20:36:57 +00001640 // TODO(xians): Add support to set different options for different send
1641 // streams after we support multiple APMs.
1642
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001643 // We retain all of the existing options, and apply the given ones
1644 // on top. This means there is no way to "clear" options such that
1645 // they go back to the engine default.
1646 options_.SetAll(options);
1647
1648 if (send_ != SEND_NOTHING) {
1649 if (!engine()->SetOptionOverrides(options_)) {
1650 LOG(LS_WARNING) <<
1651 "Failed to engine SetOptionOverrides during channel SetOptions.";
1652 return false;
1653 }
1654 } else {
1655 // Will be interpreted when appropriate.
1656 }
1657
wu@webrtc.org97077a32013-10-25 21:18:33 +00001658 // Receiver-side auto gain control happens per channel, so set it here from
1659 // options. Note that, like conference mode, setting it on the engine won't
1660 // have the desired effect, since voice channels don't inherit options from
1661 // the media engine when those options are applied per-channel.
1662 bool rx_auto_gain_control;
1663 if (options.rx_auto_gain_control.Get(&rx_auto_gain_control)) {
1664 if (engine()->voe()->processing()->SetRxAgcStatus(
1665 voe_channel(), rx_auto_gain_control,
1666 webrtc::kAgcFixedDigital) == -1) {
1667 LOG_RTCERR1(SetRxAgcStatus, rx_auto_gain_control);
1668 return false;
1669 } else {
1670 LOG(LS_VERBOSE) << "Rx auto gain set to " << rx_auto_gain_control
1671 << " with mode " << webrtc::kAgcFixedDigital;
1672 }
1673 }
1674 if (options.rx_agc_target_dbov.IsSet() ||
1675 options.rx_agc_digital_compression_gain.IsSet() ||
1676 options.rx_agc_limiter.IsSet()) {
1677 webrtc::AgcConfig config;
1678 // If only some of the options are being overridden, get the current
1679 // settings for the channel and bail if they aren't available.
1680 if (!options.rx_agc_target_dbov.IsSet() ||
1681 !options.rx_agc_digital_compression_gain.IsSet() ||
1682 !options.rx_agc_limiter.IsSet()) {
1683 if (engine()->voe()->processing()->GetRxAgcConfig(
1684 voe_channel(), config) != 0) {
1685 LOG(LS_ERROR) << "Failed to get default rx agc configuration for "
1686 << "channel " << voe_channel() << ". Since not all rx "
1687 << "agc options are specified, unable to safely set rx "
1688 << "agc options.";
1689 return false;
1690 }
1691 }
1692 config.targetLeveldBOv =
1693 options.rx_agc_target_dbov.GetWithDefaultIfUnset(
1694 config.targetLeveldBOv);
1695 config.digitalCompressionGaindB =
1696 options.rx_agc_digital_compression_gain.GetWithDefaultIfUnset(
1697 config.digitalCompressionGaindB);
1698 config.limiterEnable = options.rx_agc_limiter.GetWithDefaultIfUnset(
1699 config.limiterEnable);
1700 if (engine()->voe()->processing()->SetRxAgcConfig(
1701 voe_channel(), config) == -1) {
1702 LOG_RTCERR4(SetRxAgcConfig, voe_channel(), config.targetLeveldBOv,
1703 config.digitalCompressionGaindB, config.limiterEnable);
1704 return false;
1705 }
1706 }
1707
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001708 LOG(LS_INFO) << "Set voice channel options. Current options: "
1709 << options_.ToString();
1710 return true;
1711}
1712
1713bool WebRtcVoiceMediaChannel::SetRecvCodecs(
1714 const std::vector<AudioCodec>& codecs) {
1715 // Set the payload types to be used for incoming media.
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001716 LOG(LS_INFO) << "Setting receive voice codecs:";
1717
1718 std::vector<AudioCodec> new_codecs;
1719 // Find all new codecs. We allow adding new codecs but don't allow changing
1720 // the payload type of codecs that is already configured since we might
1721 // already be receiving packets with that payload type.
1722 for (std::vector<AudioCodec>::const_iterator it = codecs.begin();
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00001723 it != codecs.end(); ++it) {
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001724 AudioCodec old_codec;
1725 if (FindCodec(recv_codecs_, *it, &old_codec)) {
1726 if (old_codec.id != it->id) {
1727 LOG(LS_ERROR) << it->name << " payload type changed.";
1728 return false;
1729 }
1730 } else {
1731 new_codecs.push_back(*it);
1732 }
1733 }
1734 if (new_codecs.empty()) {
1735 // There are no new codecs to configure. Already configured codecs are
1736 // never removed.
1737 return true;
1738 }
1739
1740 if (playout_) {
1741 // Receive codecs can not be changed while playing. So we temporarily
1742 // pause playout.
1743 PausePlayout();
1744 }
1745
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00001746 bool ret = true;
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001747 for (std::vector<AudioCodec>::const_iterator it = new_codecs.begin();
1748 it != new_codecs.end() && ret; ++it) {
1749 webrtc::CodecInst voe_codec;
1750 if (engine()->FindWebRtcCodec(*it, &voe_codec)) {
1751 LOG(LS_INFO) << ToString(*it);
1752 voe_codec.pltype = it->id;
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00001753 if (default_receive_ssrc_ == 0) {
1754 // Set the receive codecs on the default channel explicitly if the
1755 // default channel is not used by |receive_channels_|, this happens in
1756 // conference mode or in non-conference mode when there is no playout
1757 // channel.
1758 // TODO(xians): Figure out how we use the default channel in conference
1759 // mode.
1760 if (engine()->voe()->codec()->SetRecPayloadType(
1761 voe_channel(), voe_codec) == -1) {
1762 LOG_RTCERR2(SetRecPayloadType, voe_channel(), ToString(voe_codec));
1763 ret = false;
1764 }
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001765 }
1766
1767 // Set the receive codecs on all receiving channels.
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00001768 for (ChannelMap::iterator it = receive_channels_.begin();
1769 it != receive_channels_.end() && ret; ++it) {
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001770 if (engine()->voe()->codec()->SetRecPayloadType(
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00001771 it->second.channel, voe_codec) == -1) {
1772 LOG_RTCERR2(SetRecPayloadType, it->second.channel,
1773 ToString(voe_codec));
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001774 ret = false;
1775 }
1776 }
1777 } else {
1778 LOG(LS_WARNING) << "Unknown codec " << ToString(*it);
1779 ret = false;
1780 }
1781 }
1782 if (ret) {
1783 recv_codecs_ = codecs;
1784 }
1785
1786 if (desired_playout_ && !playout_) {
1787 ResumePlayout();
1788 }
1789 return ret;
1790}
1791
1792bool WebRtcVoiceMediaChannel::SetSendCodecs(
wu@webrtc.orgcadf9042013-08-30 21:24:16 +00001793 int channel, const std::vector<AudioCodec>& codecs) {
1794 // Disable VAD, and FEC unless we know the other side wants them.
1795 engine()->voe()->codec()->SetVADStatus(channel, false);
1796 engine()->voe()->rtp()->SetNACKStatus(channel, false, 0);
1797 engine()->voe()->rtp()->SetFECStatus(channel, false);
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001798
1799 // Scan through the list to figure out the codec to use for sending, along
1800 // with the proper configuration for VAD and DTMF.
1801 bool first = true;
1802 webrtc::CodecInst send_codec;
1803 memset(&send_codec, 0, sizeof(send_codec));
1804
1805 for (std::vector<AudioCodec>::const_iterator it = codecs.begin();
1806 it != codecs.end(); ++it) {
1807 // Ignore codecs we don't know about. The negotiation step should prevent
1808 // this, but double-check to be sure.
1809 webrtc::CodecInst voe_codec;
1810 if (!engine()->FindWebRtcCodec(*it, &voe_codec)) {
1811 LOG(LS_WARNING) << "Unknown codec " << ToString(voe_codec);
1812 continue;
1813 }
1814
1815 // If OPUS, change what we send according to the "stereo" codec
1816 // parameter, and not the "channels" parameter. We set
1817 // voe_codec.channels to 2 if "stereo=1" and 1 otherwise. If
1818 // the bitrate is not specified, i.e. is zero, we set it to the
1819 // appropriate default value for mono or stereo Opus.
1820 if (IsOpus(*it)) {
1821 if (IsOpusStereoEnabled(*it)) {
1822 voe_codec.channels = 2;
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00001823 if (!IsValidOpusBitrate(it->bitrate)) {
1824 if (it->bitrate != 0) {
1825 LOG(LS_WARNING) << "Overrides the invalid supplied bitrate("
1826 << it->bitrate
1827 << ") with default opus stereo bitrate: "
1828 << kOpusStereoBitrate;
1829 }
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001830 voe_codec.rate = kOpusStereoBitrate;
1831 }
1832 } else {
1833 voe_codec.channels = 1;
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00001834 if (!IsValidOpusBitrate(it->bitrate)) {
1835 if (it->bitrate != 0) {
1836 LOG(LS_WARNING) << "Overrides the invalid supplied bitrate("
1837 << it->bitrate
1838 << ") with default opus mono bitrate: "
1839 << kOpusMonoBitrate;
1840 }
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001841 voe_codec.rate = kOpusMonoBitrate;
1842 }
1843 }
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00001844 int bitrate_from_params = GetOpusBitrateFromParams(*it);
1845 if (bitrate_from_params != 0) {
1846 voe_codec.rate = bitrate_from_params;
1847 }
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001848 }
1849
wu@webrtc.org9dba5252013-08-05 20:36:57 +00001850 // Find the DTMF telephone event "codec" and tell VoiceEngine channels
1851 // about it.
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001852 if (_stricmp(it->name.c_str(), "telephone-event") == 0 ||
1853 _stricmp(it->name.c_str(), "audio/telephone-event") == 0) {
wu@webrtc.orgcadf9042013-08-30 21:24:16 +00001854 if (engine()->voe()->dtmf()->SetSendTelephoneEventPayloadType(
1855 channel, it->id) == -1) {
1856 LOG_RTCERR2(SetSendTelephoneEventPayloadType, channel, it->id);
1857 return false;
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001858 }
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001859 }
1860
1861 // Turn voice activity detection/comfort noise on if supported.
1862 // Set the wideband CN payload type appropriately.
1863 // (narrowband always uses the static payload type 13).
1864 if (_stricmp(it->name.c_str(), "CN") == 0) {
1865 webrtc::PayloadFrequencies cn_freq;
1866 switch (it->clockrate) {
1867 case 8000:
1868 cn_freq = webrtc::kFreq8000Hz;
1869 break;
1870 case 16000:
1871 cn_freq = webrtc::kFreq16000Hz;
1872 break;
1873 case 32000:
1874 cn_freq = webrtc::kFreq32000Hz;
1875 break;
1876 default:
1877 LOG(LS_WARNING) << "CN frequency " << it->clockrate
1878 << " not supported.";
1879 continue;
1880 }
wu@webrtc.orgcadf9042013-08-30 21:24:16 +00001881 // Set the CN payloadtype and the VAD status.
1882 // The CN payload type for 8000 Hz clockrate is fixed at 13.
1883 if (cn_freq != webrtc::kFreq8000Hz) {
1884 if (engine()->voe()->codec()->SetSendCNPayloadType(
1885 channel, it->id, cn_freq) == -1) {
1886 LOG_RTCERR3(SetSendCNPayloadType, channel, it->id, cn_freq);
1887 // TODO(ajm): This failure condition will be removed from VoE.
1888 // Restore the return here when we update to a new enough webrtc.
1889 //
1890 // Not returning false because the SetSendCNPayloadType will fail if
1891 // the channel is already sending.
1892 // This can happen if the remote description is applied twice, for
1893 // example in the case of ROAP on top of JSEP, where both side will
1894 // send the offer.
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001895 }
wu@webrtc.orgcadf9042013-08-30 21:24:16 +00001896 }
wu@webrtc.org9dba5252013-08-05 20:36:57 +00001897
wu@webrtc.orgcadf9042013-08-30 21:24:16 +00001898 // Only turn on VAD if we have a CN payload type that matches the
1899 // clockrate for the codec we are going to use.
1900 if (it->clockrate == send_codec.plfreq) {
1901 LOG(LS_INFO) << "Enabling VAD";
1902 if (engine()->voe()->codec()->SetVADStatus(channel, true) == -1) {
1903 LOG_RTCERR2(SetVADStatus, channel, true);
1904 return false;
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001905 }
1906 }
1907 }
1908
1909 // We'll use the first codec in the list to actually send audio data.
1910 // Be sure to use the payload type requested by the remote side.
1911 // "red", for FEC audio, is a special case where the actual codec to be
1912 // used is specified in params.
1913 if (first) {
1914 if (_stricmp(it->name.c_str(), "red") == 0) {
1915 // Parse out the RED parameters. If we fail, just ignore RED;
1916 // we don't support all possible params/usage scenarios.
1917 if (!GetRedSendCodec(*it, codecs, &send_codec)) {
1918 continue;
1919 }
1920
1921 // Enable redundant encoding of the specified codec. Treat any
1922 // failure as a fatal internal error.
1923 LOG(LS_INFO) << "Enabling FEC";
wu@webrtc.orgcadf9042013-08-30 21:24:16 +00001924 if (engine()->voe()->rtp()->SetFECStatus(channel, true, it->id) == -1) {
1925 LOG_RTCERR3(SetFECStatus, channel, true, it->id);
1926 return false;
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001927 }
1928 } else {
1929 send_codec = voe_codec;
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00001930 nack_enabled_ = IsNackEnabled(*it);
wu@webrtc.orgcadf9042013-08-30 21:24:16 +00001931 SetNack(channel, nack_enabled_);
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001932 }
1933 first = false;
1934 // Set the codec immediately, since SetVADStatus() depends on whether
1935 // the current codec is mono or stereo.
wu@webrtc.orgcadf9042013-08-30 21:24:16 +00001936 if (!SetSendCodec(channel, send_codec))
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001937 return false;
1938 }
1939 }
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001940
1941 // If we're being asked to set an empty list of codecs, due to a buggy client,
1942 // choose the most common format: PCMU
1943 if (first) {
1944 LOG(LS_WARNING) << "Received empty list of codecs; using PCMU/8000";
1945 AudioCodec codec(0, "PCMU", 8000, 0, 1, 0);
1946 engine()->FindWebRtcCodec(codec, &send_codec);
wu@webrtc.orgcadf9042013-08-30 21:24:16 +00001947 if (!SetSendCodec(channel, send_codec))
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001948 return false;
1949 }
1950
wu@webrtc.orgcadf9042013-08-30 21:24:16 +00001951 // Always update the |send_codec_| to the currently set send codec.
1952 send_codec_.reset(new webrtc::CodecInst(send_codec));
1953
wu@webrtc.org1d1ffc92013-10-16 18:12:02 +00001954 if (send_bw_setting_) {
1955 SetSendBandwidthInternal(send_autobw_, send_bw_bps_);
1956 }
1957
wu@webrtc.orgcadf9042013-08-30 21:24:16 +00001958 return true;
1959}
1960
1961bool WebRtcVoiceMediaChannel::SetSendCodecs(
1962 const std::vector<AudioCodec>& codecs) {
1963 dtmf_allowed_ = false;
1964 for (std::vector<AudioCodec>::const_iterator it = codecs.begin();
1965 it != codecs.end(); ++it) {
1966 // Find the DTMF telephone event "codec".
1967 if (_stricmp(it->name.c_str(), "telephone-event") == 0 ||
1968 _stricmp(it->name.c_str(), "audio/telephone-event") == 0) {
1969 dtmf_allowed_ = true;
1970 }
1971 }
1972
1973 // Cache the codecs in order to configure the channel created later.
1974 send_codecs_ = codecs;
1975 for (ChannelMap::iterator iter = send_channels_.begin();
1976 iter != send_channels_.end(); ++iter) {
1977 if (!SetSendCodecs(iter->second.channel, codecs)) {
1978 return false;
1979 }
1980 }
1981
1982 SetNack(receive_channels_, nack_enabled_);
1983
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001984 return true;
1985}
wu@webrtc.org9dba5252013-08-05 20:36:57 +00001986
1987void WebRtcVoiceMediaChannel::SetNack(const ChannelMap& channels,
1988 bool nack_enabled) {
1989 for (ChannelMap::const_iterator it = channels.begin();
1990 it != channels.end(); ++it) {
wu@webrtc.orgcadf9042013-08-30 21:24:16 +00001991 SetNack(it->second.channel, nack_enabled);
wu@webrtc.org9dba5252013-08-05 20:36:57 +00001992 }
1993}
1994
wu@webrtc.orgcadf9042013-08-30 21:24:16 +00001995void WebRtcVoiceMediaChannel::SetNack(int channel, bool nack_enabled) {
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001996 if (nack_enabled) {
wu@webrtc.orgcadf9042013-08-30 21:24:16 +00001997 LOG(LS_INFO) << "Enabling NACK for channel " << channel;
henrike@webrtc.org28e20752013-07-10 00:45:36 +00001998 engine()->voe()->rtp()->SetNACKStatus(channel, true, kNackMaxPackets);
1999 } else {
wu@webrtc.orgcadf9042013-08-30 21:24:16 +00002000 LOG(LS_INFO) << "Disabling NACK for channel " << channel;
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002001 engine()->voe()->rtp()->SetNACKStatus(channel, false, 0);
2002 }
2003}
2004
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002005bool WebRtcVoiceMediaChannel::SetSendCodec(
2006 const webrtc::CodecInst& send_codec) {
2007 LOG(LS_INFO) << "Selected voice codec " << ToString(send_codec)
2008 << ", bitrate=" << send_codec.rate;
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002009 for (ChannelMap::iterator iter = send_channels_.begin();
2010 iter != send_channels_.end(); ++iter) {
2011 if (!SetSendCodec(iter->second.channel, send_codec))
2012 return false;
2013 }
2014
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002015 return true;
2016}
2017
2018bool WebRtcVoiceMediaChannel::SetSendCodec(
2019 int channel, const webrtc::CodecInst& send_codec) {
2020 LOG(LS_INFO) << "Send channel " << channel << " selected voice codec "
2021 << ToString(send_codec) << ", bitrate=" << send_codec.rate;
2022
2023 if (engine()->voe()->codec()->SetSendCodec(channel, send_codec) == -1) {
2024 LOG_RTCERR2(SetSendCodec, channel, ToString(send_codec));
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002025 return false;
2026 }
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002027 return true;
2028}
2029
2030bool WebRtcVoiceMediaChannel::SetRecvRtpHeaderExtensions(
2031 const std::vector<RtpHeaderExtension>& extensions) {
2032 // We don't support any incoming extensions headers right now.
2033 return true;
2034}
2035
2036bool WebRtcVoiceMediaChannel::SetSendRtpHeaderExtensions(
2037 const std::vector<RtpHeaderExtension>& extensions) {
2038 // Enable the audio level extension header if requested.
2039 std::vector<RtpHeaderExtension>::const_iterator it;
2040 for (it = extensions.begin(); it != extensions.end(); ++it) {
2041 if (it->uri == kRtpAudioLevelHeaderExtension) {
2042 break;
2043 }
2044 }
2045
2046 bool enable = (it != extensions.end());
2047 int id = 0;
2048
2049 if (enable) {
2050 id = it->id;
2051 if (id < kMinRtpHeaderExtensionId ||
2052 id > kMaxRtpHeaderExtensionId) {
2053 LOG(LS_WARNING) << "Invalid RTP header extension id " << id;
2054 return false;
2055 }
2056 }
2057
2058 LOG(LS_INFO) << "Enabling audio level header extension with ID " << id;
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002059 for (ChannelMap::const_iterator iter = send_channels_.begin();
2060 iter != send_channels_.end(); ++iter) {
2061 if (engine()->voe()->rtp()->SetRTPAudioLevelIndicationStatus(
2062 iter->second.channel, enable, id) == -1) {
2063 LOG_RTCERR3(SetRTPAudioLevelIndicationStatus,
2064 iter->second.channel, enable, id);
2065 return false;
2066 }
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002067 }
2068
2069 return true;
2070}
2071
2072bool WebRtcVoiceMediaChannel::SetPlayout(bool playout) {
2073 desired_playout_ = playout;
2074 return ChangePlayout(desired_playout_);
2075}
2076
2077bool WebRtcVoiceMediaChannel::PausePlayout() {
2078 return ChangePlayout(false);
2079}
2080
2081bool WebRtcVoiceMediaChannel::ResumePlayout() {
2082 return ChangePlayout(desired_playout_);
2083}
2084
2085bool WebRtcVoiceMediaChannel::ChangePlayout(bool playout) {
2086 if (playout_ == playout) {
2087 return true;
2088 }
2089
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002090 // Change the playout of all channels to the new state.
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002091 bool result = true;
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002092 if (receive_channels_.empty()) {
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002093 // Only toggle the default channel if we don't have any other channels.
2094 result = SetPlayout(voe_channel(), playout);
2095 }
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002096 for (ChannelMap::iterator it = receive_channels_.begin();
2097 it != receive_channels_.end() && result; ++it) {
2098 if (!SetPlayout(it->second.channel, playout)) {
2099 LOG(LS_ERROR) << "SetPlayout " << playout << " on channel "
2100 << it->second.channel << " failed";
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002101 result = false;
2102 }
2103 }
2104
2105 if (result) {
2106 playout_ = playout;
2107 }
2108 return result;
2109}
2110
2111bool WebRtcVoiceMediaChannel::SetSend(SendFlags send) {
2112 desired_send_ = send;
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002113 if (!send_channels_.empty())
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002114 return ChangeSend(desired_send_);
2115 return true;
2116}
2117
2118bool WebRtcVoiceMediaChannel::PauseSend() {
2119 return ChangeSend(SEND_NOTHING);
2120}
2121
2122bool WebRtcVoiceMediaChannel::ResumeSend() {
2123 return ChangeSend(desired_send_);
2124}
2125
2126bool WebRtcVoiceMediaChannel::ChangeSend(SendFlags send) {
2127 if (send_ == send) {
2128 return true;
2129 }
2130
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002131 // Change the settings on each send channel.
2132 if (send == SEND_MICROPHONE)
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002133 engine()->SetOptionOverrides(options_);
2134
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002135 // Change the settings on each send channel.
2136 for (ChannelMap::iterator iter = send_channels_.begin();
2137 iter != send_channels_.end(); ++iter) {
2138 if (!ChangeSend(iter->second.channel, send))
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002139 return false;
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002140 }
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002141
2142 // Clear up the options after stopping sending.
2143 if (send == SEND_NOTHING)
2144 engine()->ClearOptionOverrides();
2145
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002146 send_ = send;
2147 return true;
2148}
2149
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002150bool WebRtcVoiceMediaChannel::ChangeSend(int channel, SendFlags send) {
2151 if (send == SEND_MICROPHONE) {
2152 if (engine()->voe()->base()->StartSend(channel) == -1) {
2153 LOG_RTCERR1(StartSend, channel);
2154 return false;
2155 }
2156 if (engine()->voe()->file() &&
2157 engine()->voe()->file()->StopPlayingFileAsMicrophone(channel) == -1) {
2158 LOG_RTCERR1(StopPlayingFileAsMicrophone, channel);
2159 return false;
2160 }
2161 } else { // SEND_NOTHING
2162 ASSERT(send == SEND_NOTHING);
2163 if (engine()->voe()->base()->StopSend(channel) == -1) {
2164 LOG_RTCERR1(StopSend, channel);
2165 return false;
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002166 }
2167 }
2168
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002169 return true;
2170}
2171
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002172void WebRtcVoiceMediaChannel::ConfigureSendChannel(int channel) {
2173 if (engine()->voe()->network()->RegisterExternalTransport(
2174 channel, *this) == -1) {
2175 LOG_RTCERR2(RegisterExternalTransport, channel, this);
2176 }
2177
2178 // Enable RTCP (for quality stats and feedback messages)
2179 EnableRtcp(channel);
2180
2181 // Reset all recv codecs; they will be enabled via SetRecvCodecs.
2182 ResetRecvCodecs(channel);
2183}
2184
2185bool WebRtcVoiceMediaChannel::DeleteChannel(int channel) {
2186 if (engine()->voe()->network()->DeRegisterExternalTransport(channel) == -1) {
2187 LOG_RTCERR1(DeRegisterExternalTransport, channel);
2188 }
2189
2190 if (engine()->voe()->base()->DeleteChannel(channel) == -1) {
2191 LOG_RTCERR1(DeleteChannel, channel);
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002192 return false;
2193 }
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002194
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002195 return true;
2196}
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002197
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002198bool WebRtcVoiceMediaChannel::AddSendStream(const StreamParams& sp) {
2199 // If the default channel is already used for sending create a new channel
2200 // otherwise use the default channel for sending.
2201 int channel = GetSendChannelNum(sp.first_ssrc());
2202 if (channel != -1) {
2203 LOG(LS_ERROR) << "Stream already exists with ssrc " << sp.first_ssrc();
2204 return false;
2205 }
2206
2207 bool default_channel_is_available = true;
2208 for (ChannelMap::const_iterator iter = send_channels_.begin();
2209 iter != send_channels_.end(); ++iter) {
2210 if (IsDefaultChannel(iter->second.channel)) {
2211 default_channel_is_available = false;
2212 break;
2213 }
2214 }
2215 if (default_channel_is_available) {
2216 channel = voe_channel();
2217 } else {
2218 // Create a new channel for sending audio data.
2219 channel = engine()->voe()->base()->CreateChannel();
2220 if (channel == -1) {
2221 LOG_RTCERR0(CreateChannel);
2222 return false;
2223 }
2224
2225 ConfigureSendChannel(channel);
2226 }
2227
2228 // Save the channel to send_channels_, so that RemoveSendStream() can still
2229 // delete the channel in case failure happens below.
2230 send_channels_[sp.first_ssrc()] = WebRtcVoiceChannelInfo(channel, NULL);
2231
2232 // Set the send (local) SSRC.
2233 // If there are multiple send SSRCs, we can only set the first one here, and
2234 // the rest of the SSRC(s) need to be set after SetSendCodec has been called
2235 // (with a codec requires multiple SSRC(s)).
2236 if (engine()->voe()->rtp()->SetLocalSSRC(channel, sp.first_ssrc()) == -1) {
2237 LOG_RTCERR2(SetSendSSRC, channel, sp.first_ssrc());
2238 return false;
2239 }
2240
2241 // At this point the channel's local SSRC has been updated. If the channel is
2242 // the default channel make sure that all the receive channels are updated as
2243 // well. Receive channels have to have the same SSRC as the default channel in
2244 // order to send receiver reports with this SSRC.
2245 if (IsDefaultChannel(channel)) {
2246 for (ChannelMap::const_iterator it = receive_channels_.begin();
2247 it != receive_channels_.end(); ++it) {
2248 // Only update the SSRC for non-default channels.
2249 if (!IsDefaultChannel(it->second.channel)) {
2250 if (engine()->voe()->rtp()->SetLocalSSRC(it->second.channel,
2251 sp.first_ssrc()) != 0) {
2252 LOG_RTCERR2(SetLocalSSRC, it->second.channel, sp.first_ssrc());
2253 return false;
2254 }
2255 }
2256 }
2257 }
2258
2259 if (engine()->voe()->rtp()->SetRTCP_CNAME(channel, sp.cname.c_str()) == -1) {
2260 LOG_RTCERR2(SetRTCP_CNAME, channel, sp.cname);
2261 return false;
2262 }
2263
wu@webrtc.orgcadf9042013-08-30 21:24:16 +00002264 // Set the current codecs to be used for the new channel.
2265 if (!send_codecs_.empty() && !SetSendCodecs(channel, send_codecs_))
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002266 return false;
2267
2268 return ChangeSend(channel, desired_send_);
2269}
2270
2271bool WebRtcVoiceMediaChannel::RemoveSendStream(uint32 ssrc) {
2272 ChannelMap::iterator it = send_channels_.find(ssrc);
2273 if (it == send_channels_.end()) {
2274 LOG(LS_WARNING) << "Try to remove stream with ssrc " << ssrc
2275 << " which doesn't exist.";
2276 return false;
2277 }
2278
2279 int channel = it->second.channel;
2280 ChangeSend(channel, SEND_NOTHING);
2281
2282 // Notify the audio renderer that the send channel is going away.
2283 if (it->second.renderer)
2284 it->second.renderer->RemoveChannel(channel);
2285
2286 if (IsDefaultChannel(channel)) {
2287 // Do not delete the default channel since the receive channels depend on
2288 // the default channel, recycle it instead.
2289 ChangeSend(channel, SEND_NOTHING);
2290 } else {
2291 // Clean up and delete the send channel.
2292 LOG(LS_INFO) << "Removing audio send stream " << ssrc
2293 << " with VoiceEngine channel #" << channel << ".";
2294 if (!DeleteChannel(channel))
2295 return false;
2296 }
2297
2298 send_channels_.erase(it);
2299 if (send_channels_.empty())
2300 ChangeSend(SEND_NOTHING);
2301
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002302 return true;
2303}
2304
2305bool WebRtcVoiceMediaChannel::AddRecvStream(const StreamParams& sp) {
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002306 talk_base::CritScope lock(&receive_channels_cs_);
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002307
2308 if (!VERIFY(sp.ssrcs.size() == 1))
2309 return false;
2310 uint32 ssrc = sp.first_ssrc();
2311
wu@webrtc.org78187522013-10-07 23:32:02 +00002312 if (ssrc == 0) {
2313 LOG(LS_WARNING) << "AddRecvStream with 0 ssrc is not supported.";
2314 return false;
2315 }
2316
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002317 if (receive_channels_.find(ssrc) != receive_channels_.end()) {
2318 LOG(LS_ERROR) << "Stream already exists with ssrc " << ssrc;
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002319 return false;
2320 }
2321
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002322 // Reuse default channel for recv stream in non-conference mode call
2323 // when the default channel is not being used.
2324 if (!InConferenceMode() && default_receive_ssrc_ == 0) {
2325 LOG(LS_INFO) << "Recv stream " << sp.first_ssrc()
2326 << " reuse default channel";
2327 default_receive_ssrc_ = sp.first_ssrc();
2328 receive_channels_.insert(std::make_pair(
2329 default_receive_ssrc_, WebRtcVoiceChannelInfo(voe_channel(), NULL)));
2330 return SetPlayout(voe_channel(), playout_);
2331 }
2332
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002333 // Create a new channel for receiving audio data.
2334 int channel = engine()->voe()->base()->CreateChannel();
2335 if (channel == -1) {
2336 LOG_RTCERR0(CreateChannel);
2337 return false;
2338 }
2339
wu@webrtc.org78187522013-10-07 23:32:02 +00002340 if (!ConfigureRecvChannel(channel)) {
2341 DeleteChannel(channel);
2342 return false;
2343 }
2344
2345 receive_channels_.insert(
2346 std::make_pair(ssrc, WebRtcVoiceChannelInfo(channel, NULL)));
2347
2348 LOG(LS_INFO) << "New audio stream " << ssrc
2349 << " registered to VoiceEngine channel #"
2350 << channel << ".";
2351 return true;
2352}
2353
2354bool WebRtcVoiceMediaChannel::ConfigureRecvChannel(int channel) {
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002355 // Configure to use external transport, like our default channel.
2356 if (engine()->voe()->network()->RegisterExternalTransport(
2357 channel, *this) == -1) {
2358 LOG_RTCERR2(SetExternalTransport, channel, this);
2359 return false;
2360 }
2361
2362 // Use the same SSRC as our default channel (so the RTCP reports are correct).
2363 unsigned int send_ssrc;
2364 webrtc::VoERTP_RTCP* rtp = engine()->voe()->rtp();
2365 if (rtp->GetLocalSSRC(voe_channel(), send_ssrc) == -1) {
2366 LOG_RTCERR2(GetSendSSRC, channel, send_ssrc);
2367 return false;
2368 }
2369 if (rtp->SetLocalSSRC(channel, send_ssrc) == -1) {
2370 LOG_RTCERR2(SetSendSSRC, channel, send_ssrc);
2371 return false;
2372 }
2373
2374 // Use the same recv payload types as our default channel.
2375 ResetRecvCodecs(channel);
2376 if (!recv_codecs_.empty()) {
2377 for (std::vector<AudioCodec>::const_iterator it = recv_codecs_.begin();
2378 it != recv_codecs_.end(); ++it) {
2379 webrtc::CodecInst voe_codec;
2380 if (engine()->FindWebRtcCodec(*it, &voe_codec)) {
2381 voe_codec.pltype = it->id;
2382 voe_codec.rate = 0; // Needed to make GetRecPayloadType work for ISAC
2383 if (engine()->voe()->codec()->GetRecPayloadType(
2384 voe_channel(), voe_codec) != -1) {
2385 if (engine()->voe()->codec()->SetRecPayloadType(
2386 channel, voe_codec) == -1) {
2387 LOG_RTCERR2(SetRecPayloadType, channel, ToString(voe_codec));
2388 return false;
2389 }
2390 }
2391 }
2392 }
2393 }
2394
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002395 if (InConferenceMode()) {
2396 // To be in par with the video, voe_channel() is not used for receiving in
2397 // a conference call.
2398 if (receive_channels_.empty() && default_receive_ssrc_ == 0 && playout_) {
2399 // This is the first stream in a multi user meeting. We can now
2400 // disable playback of the default stream. This since the default
2401 // stream will probably have received some initial packets before
2402 // the new stream was added. This will mean that the CN state from
2403 // the default channel will be mixed in with the other streams
2404 // throughout the whole meeting, which might be disturbing.
2405 LOG(LS_INFO) << "Disabling playback on the default voice channel";
2406 SetPlayout(voe_channel(), false);
2407 }
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002408 }
wu@webrtc.orgcadf9042013-08-30 21:24:16 +00002409 SetNack(channel, nack_enabled_);
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002410
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002411 return SetPlayout(channel, playout_);
2412}
2413
2414bool WebRtcVoiceMediaChannel::RemoveRecvStream(uint32 ssrc) {
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002415 talk_base::CritScope lock(&receive_channels_cs_);
2416 ChannelMap::iterator it = receive_channels_.find(ssrc);
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002417 if (it == receive_channels_.end()) {
2418 LOG(LS_WARNING) << "Try to remove stream with ssrc " << ssrc
2419 << " which doesn't exist.";
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002420 return false;
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002421 }
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002422
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002423 if (ssrc == default_receive_ssrc_) {
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002424 ASSERT(IsDefaultChannel(it->second.channel));
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002425 // Recycle the default channel is for recv stream.
2426 if (playout_)
2427 SetPlayout(voe_channel(), false);
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002428
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002429 if (it->second.renderer)
2430 it->second.renderer->RemoveChannel(voe_channel());
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002431
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002432 default_receive_ssrc_ = 0;
2433 receive_channels_.erase(it);
2434 return true;
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002435 }
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002436
2437 // Non default channel.
2438 // Notify the renderer that channel is going away.
2439 if (it->second.renderer)
2440 it->second.renderer->RemoveChannel(it->second.channel);
2441
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002442 LOG(LS_INFO) << "Removing audio stream " << ssrc
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002443 << " with VoiceEngine channel #" << it->second.channel << ".";
2444 if (!DeleteChannel(it->second.channel)) {
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002445 // Erase the entry anyhow.
2446 receive_channels_.erase(it);
2447 return false;
2448 }
2449
2450 receive_channels_.erase(it);
2451 bool enable_default_channel_playout = false;
2452 if (receive_channels_.empty()) {
2453 // The last stream was removed. We can now enable the default
2454 // channel for new channels to be played out immediately without
2455 // waiting for AddStream messages.
2456 // We do this for both conference mode and non-conference mode.
2457 // TODO(oja): Does the default channel still have it's CN state?
2458 enable_default_channel_playout = true;
2459 }
2460 if (!InConferenceMode() && receive_channels_.size() == 1 &&
2461 default_receive_ssrc_ != 0) {
2462 // Only the default channel is active, enable the playout on default
2463 // channel.
2464 enable_default_channel_playout = true;
2465 }
2466 if (enable_default_channel_playout && playout_) {
2467 LOG(LS_INFO) << "Enabling playback on the default voice channel";
2468 SetPlayout(voe_channel(), true);
2469 }
2470
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002471 return true;
2472}
2473
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002474bool WebRtcVoiceMediaChannel::SetRemoteRenderer(uint32 ssrc,
2475 AudioRenderer* renderer) {
2476 ChannelMap::iterator it = receive_channels_.find(ssrc);
2477 if (it == receive_channels_.end()) {
2478 if (renderer) {
2479 // Return an error if trying to set a valid renderer with an invalid ssrc.
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002480 LOG(LS_ERROR) << "SetRemoteRenderer failed with ssrc "<< ssrc;
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002481 return false;
2482 }
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002483
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002484 // The channel likely has gone away, do nothing.
2485 return true;
2486 }
2487
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002488 AudioRenderer* remote_renderer = it->second.renderer;
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002489 if (renderer) {
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002490 ASSERT(remote_renderer == NULL || remote_renderer == renderer);
2491 if (!remote_renderer) {
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002492 renderer->AddChannel(it->second.channel);
2493 }
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002494 } else if (remote_renderer) {
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002495 // |renderer| == NULL, remove the channel from the renderer.
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002496 remote_renderer->RemoveChannel(it->second.channel);
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002497 }
2498
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002499 // Assign the new value to the struct.
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002500 it->second.renderer = renderer;
2501 return true;
2502}
2503
2504bool WebRtcVoiceMediaChannel::SetLocalRenderer(uint32 ssrc,
2505 AudioRenderer* renderer) {
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002506 ChannelMap::iterator it = send_channels_.find(ssrc);
2507 if (it == send_channels_.end()) {
2508 if (renderer) {
2509 // Return an error if trying to set a valid renderer with an invalid ssrc.
2510 LOG(LS_ERROR) << "SetLocalRenderer failed with ssrc "<< ssrc;
2511 return false;
2512 }
2513
2514 // The channel likely has gone away, do nothing.
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002515 return true;
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002516 }
2517
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002518 AudioRenderer* local_renderer = it->second.renderer;
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002519 if (renderer) {
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002520 ASSERT(local_renderer == NULL || local_renderer == renderer);
2521 if (!local_renderer)
2522 renderer->AddChannel(it->second.channel);
2523 } else if (local_renderer) {
2524 local_renderer->RemoveChannel(it->second.channel);
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002525 }
2526
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002527 // Assign the new value to the struct.
2528 it->second.renderer = renderer;
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002529 return true;
2530}
2531
2532bool WebRtcVoiceMediaChannel::GetActiveStreams(
2533 AudioInfo::StreamList* actives) {
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002534 // In conference mode, the default channel should not be in
2535 // |receive_channels_|.
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002536 actives->clear();
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002537 for (ChannelMap::iterator it = receive_channels_.begin();
2538 it != receive_channels_.end(); ++it) {
2539 int level = GetOutputLevel(it->second.channel);
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002540 if (level > 0) {
2541 actives->push_back(std::make_pair(it->first, level));
2542 }
2543 }
2544 return true;
2545}
2546
2547int WebRtcVoiceMediaChannel::GetOutputLevel() {
2548 // return the highest output level of all streams
2549 int highest = GetOutputLevel(voe_channel());
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002550 for (ChannelMap::iterator it = receive_channels_.begin();
2551 it != receive_channels_.end(); ++it) {
2552 int level = GetOutputLevel(it->second.channel);
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002553 highest = talk_base::_max(level, highest);
2554 }
2555 return highest;
2556}
2557
2558int WebRtcVoiceMediaChannel::GetTimeSinceLastTyping() {
2559 int ret;
2560 if (engine()->voe()->processing()->TimeSinceLastTyping(ret) == -1) {
2561 // In case of error, log the info and continue
2562 LOG_RTCERR0(TimeSinceLastTyping);
2563 ret = -1;
2564 } else {
2565 ret *= 1000; // We return ms, webrtc returns seconds.
2566 }
2567 return ret;
2568}
2569
2570void WebRtcVoiceMediaChannel::SetTypingDetectionParameters(int time_window,
2571 int cost_per_typing, int reporting_threshold, int penalty_decay,
2572 int type_event_delay) {
2573 if (engine()->voe()->processing()->SetTypingDetectionParameters(
2574 time_window, cost_per_typing,
2575 reporting_threshold, penalty_decay, type_event_delay) == -1) {
2576 // In case of error, log the info and continue
2577 LOG_RTCERR5(SetTypingDetectionParameters, time_window,
2578 cost_per_typing, reporting_threshold, penalty_decay,
2579 type_event_delay);
2580 }
2581}
2582
2583bool WebRtcVoiceMediaChannel::SetOutputScaling(
2584 uint32 ssrc, double left, double right) {
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002585 talk_base::CritScope lock(&receive_channels_cs_);
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002586 // Collect the channels to scale the output volume.
2587 std::vector<int> channels;
2588 if (0 == ssrc) { // Collect all channels, including the default one.
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002589 // Default channel is not in receive_channels_ if it is not being used for
2590 // playout.
2591 if (default_receive_ssrc_ == 0)
2592 channels.push_back(voe_channel());
2593 for (ChannelMap::const_iterator it = receive_channels_.begin();
2594 it != receive_channels_.end(); ++it) {
2595 channels.push_back(it->second.channel);
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002596 }
2597 } else { // Collect only the channel of the specified ssrc.
2598 int channel = GetReceiveChannelNum(ssrc);
2599 if (-1 == channel) {
2600 LOG(LS_WARNING) << "Cannot find channel for ssrc:" << ssrc;
2601 return false;
2602 }
2603 channels.push_back(channel);
2604 }
2605
2606 // Scale the output volume for the collected channels. We first normalize to
2607 // scale the volume and then set the left and right pan.
2608 float scale = static_cast<float>(talk_base::_max(left, right));
2609 if (scale > 0.0001f) {
2610 left /= scale;
2611 right /= scale;
2612 }
2613 for (std::vector<int>::const_iterator it = channels.begin();
2614 it != channels.end(); ++it) {
2615 if (-1 == engine()->voe()->volume()->SetChannelOutputVolumeScaling(
2616 *it, scale)) {
2617 LOG_RTCERR2(SetChannelOutputVolumeScaling, *it, scale);
2618 return false;
2619 }
2620 if (-1 == engine()->voe()->volume()->SetOutputVolumePan(
2621 *it, static_cast<float>(left), static_cast<float>(right))) {
2622 LOG_RTCERR3(SetOutputVolumePan, *it, left, right);
2623 // Do not return if fails. SetOutputVolumePan is not available for all
2624 // pltforms.
2625 }
2626 LOG(LS_INFO) << "SetOutputScaling to left=" << left * scale
2627 << " right=" << right * scale
2628 << " for channel " << *it << " and ssrc " << ssrc;
2629 }
2630 return true;
2631}
2632
2633bool WebRtcVoiceMediaChannel::GetOutputScaling(
2634 uint32 ssrc, double* left, double* right) {
2635 if (!left || !right) return false;
2636
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00002637 talk_base::CritScope lock(&receive_channels_cs_);
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002638 // Determine which channel based on ssrc.
2639 int channel = (0 == ssrc) ? voe_channel() : GetReceiveChannelNum(ssrc);
2640 if (channel == -1) {
2641 LOG(LS_WARNING) << "Cannot find channel for ssrc:" << ssrc;
2642 return false;
2643 }
2644
2645 float scaling;
2646 if (-1 == engine()->voe()->volume()->GetChannelOutputVolumeScaling(
2647 channel, scaling)) {
2648 LOG_RTCERR2(GetChannelOutputVolumeScaling, channel, scaling);
2649 return false;
2650 }
2651
2652 float left_pan;
2653 float right_pan;
2654 if (-1 == engine()->voe()->volume()->GetOutputVolumePan(
2655 channel, left_pan, right_pan)) {
2656 LOG_RTCERR3(GetOutputVolumePan, channel, left_pan, right_pan);
2657 // If GetOutputVolumePan fails, we use the default left and right pan.
2658 left_pan = 1.0f;
2659 right_pan = 1.0f;
2660 }
2661
2662 *left = scaling * left_pan;
2663 *right = scaling * right_pan;
2664 return true;
2665}
2666
2667bool WebRtcVoiceMediaChannel::SetRingbackTone(const char *buf, int len) {
2668 ringback_tone_.reset(new WebRtcSoundclipStream(buf, len));
2669 return true;
2670}
2671
2672bool WebRtcVoiceMediaChannel::PlayRingbackTone(uint32 ssrc,
2673 bool play, bool loop) {
2674 if (!ringback_tone_) {
2675 return false;
2676 }
2677
2678 // The voe file api is not available in chrome.
2679 if (!engine()->voe()->file()) {
2680 return false;
2681 }
2682
2683 // Determine which VoiceEngine channel to play on.
2684 int channel = (ssrc == 0) ? voe_channel() : GetReceiveChannelNum(ssrc);
2685 if (channel == -1) {
2686 return false;
2687 }
2688
2689 // Make sure the ringtone is cued properly, and play it out.
2690 if (play) {
2691 ringback_tone_->set_loop(loop);
2692 ringback_tone_->Rewind();
2693 if (engine()->voe()->file()->StartPlayingFileLocally(channel,
2694 ringback_tone_.get()) == -1) {
2695 LOG_RTCERR2(StartPlayingFileLocally, channel, ringback_tone_.get());
2696 LOG(LS_ERROR) << "Unable to start ringback tone";
2697 return false;
2698 }
2699 ringback_channels_.insert(channel);
2700 LOG(LS_INFO) << "Started ringback on channel " << channel;
2701 } else {
2702 if (engine()->voe()->file()->IsPlayingFileLocally(channel) == 1 &&
2703 engine()->voe()->file()->StopPlayingFileLocally(channel) == -1) {
2704 LOG_RTCERR1(StopPlayingFileLocally, channel);
2705 return false;
2706 }
2707 LOG(LS_INFO) << "Stopped ringback on channel " << channel;
2708 ringback_channels_.erase(channel);
2709 }
2710
2711 return true;
2712}
2713
2714bool WebRtcVoiceMediaChannel::CanInsertDtmf() {
2715 return dtmf_allowed_;
2716}
2717
2718bool WebRtcVoiceMediaChannel::InsertDtmf(uint32 ssrc, int event,
2719 int duration, int flags) {
2720 if (!dtmf_allowed_) {
2721 return false;
2722 }
2723
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002724 // Send the event.
2725 if (flags & cricket::DF_SEND) {
wu@webrtc.orgcadf9042013-08-30 21:24:16 +00002726 int channel = -1;
2727 if (ssrc == 0) {
2728 bool default_channel_is_inuse = false;
2729 for (ChannelMap::const_iterator iter = send_channels_.begin();
2730 iter != send_channels_.end(); ++iter) {
2731 if (IsDefaultChannel(iter->second.channel)) {
2732 default_channel_is_inuse = true;
2733 break;
2734 }
2735 }
2736 if (default_channel_is_inuse) {
2737 channel = voe_channel();
2738 } else if (!send_channels_.empty()) {
2739 channel = send_channels_.begin()->second.channel;
2740 }
2741 } else {
2742 channel = GetSendChannelNum(ssrc);
2743 }
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002744 if (channel == -1) {
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002745 LOG(LS_WARNING) << "InsertDtmf - The specified ssrc "
2746 << ssrc << " is not in use.";
2747 return false;
2748 }
2749 // Send DTMF using out-of-band DTMF. ("true", as 3rd arg)
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002750 if (engine()->voe()->dtmf()->SendTelephoneEvent(
2751 channel, event, true, duration) == -1) {
2752 LOG_RTCERR4(SendTelephoneEvent, channel, event, true, duration);
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002753 return false;
2754 }
2755 }
2756
2757 // Play the event.
2758 if (flags & cricket::DF_PLAY) {
2759 // Play DTMF tone locally.
2760 if (engine()->voe()->dtmf()->PlayDtmfTone(event, duration) == -1) {
2761 LOG_RTCERR2(PlayDtmfTone, event, duration);
2762 return false;
2763 }
2764 }
2765
2766 return true;
2767}
2768
2769void WebRtcVoiceMediaChannel::OnPacketReceived(talk_base::Buffer* packet) {
2770 // Pick which channel to send this packet to. If this packet doesn't match
2771 // any multiplexed streams, just send it to the default channel. Otherwise,
2772 // send it to the specific decoder instance for that stream.
2773 int which_channel = GetReceiveChannelNum(
2774 ParseSsrc(packet->data(), packet->length(), false));
2775 if (which_channel == -1) {
2776 which_channel = voe_channel();
2777 }
2778
2779 // Stop any ringback that might be playing on the channel.
2780 // It's possible the ringback has already stopped, ih which case we'll just
2781 // use the opportunity to remove the channel from ringback_channels_.
2782 if (engine()->voe()->file()) {
2783 const std::set<int>::iterator it = ringback_channels_.find(which_channel);
2784 if (it != ringback_channels_.end()) {
2785 if (engine()->voe()->file()->IsPlayingFileLocally(
2786 which_channel) == 1) {
2787 engine()->voe()->file()->StopPlayingFileLocally(which_channel);
2788 LOG(LS_INFO) << "Stopped ringback on channel " << which_channel
2789 << " due to incoming media";
2790 }
2791 ringback_channels_.erase(which_channel);
2792 }
2793 }
2794
2795 // Pass it off to the decoder.
henrike@webrtc.org28654cb2013-07-22 21:07:49 +00002796 engine()->voe()->network()->ReceivedRTPPacket(
2797 which_channel,
2798 packet->data(),
2799 static_cast<unsigned int>(packet->length()));
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002800}
2801
2802void WebRtcVoiceMediaChannel::OnRtcpReceived(talk_base::Buffer* packet) {
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002803 // Sending channels need all RTCP packets with feedback information.
2804 // Even sender reports can contain attached report blocks.
2805 // Receiving channels need sender reports in order to create
2806 // correct receiver reports.
2807 int type = 0;
2808 if (!GetRtcpType(packet->data(), packet->length(), &type)) {
2809 LOG(LS_WARNING) << "Failed to parse type from received RTCP packet";
2810 return;
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002811 }
2812
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002813 // If it is a sender report, find the channel that is listening.
2814 bool has_sent_to_default_channel = false;
2815 if (type == kRtcpTypeSR) {
2816 int which_channel = GetReceiveChannelNum(
2817 ParseSsrc(packet->data(), packet->length(), true));
2818 if (which_channel != -1) {
2819 engine()->voe()->network()->ReceivedRTCPPacket(
2820 which_channel,
2821 packet->data(),
2822 static_cast<unsigned int>(packet->length()));
2823
2824 if (IsDefaultChannel(which_channel))
2825 has_sent_to_default_channel = true;
2826 }
2827 }
2828
2829 // SR may continue RR and any RR entry may correspond to any one of the send
2830 // channels. So all RTCP packets must be forwarded all send channels. VoE
2831 // will filter out RR internally.
2832 for (ChannelMap::iterator iter = send_channels_.begin();
2833 iter != send_channels_.end(); ++iter) {
2834 // Make sure not sending the same packet to default channel more than once.
2835 if (IsDefaultChannel(iter->second.channel) && has_sent_to_default_channel)
2836 continue;
2837
2838 engine()->voe()->network()->ReceivedRTCPPacket(
2839 iter->second.channel,
2840 packet->data(),
2841 static_cast<unsigned int>(packet->length()));
2842 }
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002843}
2844
2845bool WebRtcVoiceMediaChannel::MuteStream(uint32 ssrc, bool muted) {
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002846 int channel = (ssrc == 0) ? voe_channel() : GetSendChannelNum(ssrc);
2847 if (channel == -1) {
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002848 LOG(LS_WARNING) << "The specified ssrc " << ssrc << " is not in use.";
2849 return false;
2850 }
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002851 if (engine()->voe()->volume()->SetInputMute(channel, muted) == -1) {
2852 LOG_RTCERR2(SetInputMute, channel, muted);
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002853 return false;
2854 }
2855 return true;
2856}
2857
2858bool WebRtcVoiceMediaChannel::SetSendBandwidth(bool autobw, int bps) {
2859 LOG(LS_INFO) << "WebRtcVoiceMediaChanne::SetSendBandwidth.";
2860
wu@webrtc.org1d1ffc92013-10-16 18:12:02 +00002861 send_bw_setting_ = true;
2862 send_autobw_ = autobw;
2863 send_bw_bps_ = bps;
2864
2865 return SetSendBandwidthInternal(send_autobw_, send_bw_bps_);
2866}
2867
2868bool WebRtcVoiceMediaChannel::SetSendBandwidthInternal(bool autobw, int bps) {
2869 LOG(LS_INFO) << "WebRtcVoiceMediaChanne::SetSendBandwidthInternal.";
2870
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002871 if (!send_codec_) {
wu@webrtc.org1d1ffc92013-10-16 18:12:02 +00002872 LOG(LS_INFO) << "The send codec has not been set up yet. "
2873 << "The send bandwidth setting will be applied later.";
2874 return true;
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002875 }
2876
2877 // Bandwidth is auto by default.
2878 if (autobw || bps <= 0)
2879 return true;
2880
2881 webrtc::CodecInst codec = *send_codec_;
2882 bool is_multi_rate = IsCodecMultiRate(codec);
2883
2884 if (is_multi_rate) {
2885 // If codec is multi-rate then just set the bitrate.
2886 codec.rate = bps;
2887 if (!SetSendCodec(codec)) {
2888 LOG(LS_INFO) << "Failed to set codec " << codec.plname
2889 << " to bitrate " << bps << " bps.";
2890 return false;
2891 }
2892 return true;
2893 } else {
2894 // If codec is not multi-rate and |bps| is less than the fixed bitrate
2895 // then fail. If codec is not multi-rate and |bps| exceeds or equal the
2896 // fixed bitrate then ignore.
2897 if (bps < codec.rate) {
2898 LOG(LS_INFO) << "Failed to set codec " << codec.plname
2899 << " to bitrate " << bps << " bps"
2900 << ", requires at least " << codec.rate << " bps.";
2901 return false;
2902 }
2903 return true;
2904 }
2905}
2906
2907bool WebRtcVoiceMediaChannel::GetStats(VoiceMediaInfo* info) {
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002908 bool echo_metrics_on = false;
2909 // These can take on valid negative values, so use the lowest possible level
2910 // as default rather than -1.
2911 int echo_return_loss = -100;
2912 int echo_return_loss_enhancement = -100;
2913 // These can also be negative, but in practice -1 is only used to signal
2914 // insufficient data, since the resolution is limited to multiples of 4 ms.
2915 int echo_delay_median_ms = -1;
2916 int echo_delay_std_ms = -1;
2917 if (engine()->voe()->processing()->GetEcMetricsStatus(
2918 echo_metrics_on) != -1 && echo_metrics_on) {
2919 // TODO(ajm): we may want to use VoECallReport::GetEchoMetricsSummary
2920 // here, but it appears to be unsuitable currently. Revisit after this is
2921 // investigated: http://b/issue?id=5666755
2922 int erl, erle, rerl, anlp;
2923 if (engine()->voe()->processing()->GetEchoMetrics(
2924 erl, erle, rerl, anlp) != -1) {
2925 echo_return_loss = erl;
2926 echo_return_loss_enhancement = erle;
2927 }
2928
2929 int median, std;
2930 if (engine()->voe()->processing()->GetEcDelayMetrics(median, std) != -1) {
2931 echo_delay_median_ms = median;
2932 echo_delay_std_ms = std;
2933 }
2934 }
2935
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002936 webrtc::CallStatistics cs;
2937 unsigned int ssrc;
2938 webrtc::CodecInst codec;
2939 unsigned int level;
2940
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002941 for (ChannelMap::const_iterator channel_iter = send_channels_.begin();
2942 channel_iter != send_channels_.end(); ++channel_iter) {
2943 const int channel = channel_iter->second.channel;
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002944
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002945 // Fill in the sender info, based on what we know, and what the
2946 // remote side told us it got from its RTCP report.
2947 VoiceSenderInfo sinfo;
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002948
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002949 if (engine()->voe()->rtp()->GetRTCPStatistics(channel, cs) == -1 ||
2950 engine()->voe()->rtp()->GetLocalSSRC(channel, ssrc) == -1) {
2951 continue;
2952 }
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002953
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002954 sinfo.ssrc = ssrc;
2955 sinfo.codec_name = send_codec_.get() ? send_codec_->plname : "";
2956 sinfo.bytes_sent = cs.bytesSent;
2957 sinfo.packets_sent = cs.packetsSent;
2958 // RTT isn't known until a RTCP report is received. Until then, VoiceEngine
2959 // returns 0 to indicate an error value.
2960 sinfo.rtt_ms = (cs.rttMs > 0) ? cs.rttMs : -1;
2961
2962 // Get data from the last remote RTCP report. Use default values if no data
2963 // available.
2964 sinfo.fraction_lost = -1.0;
2965 sinfo.jitter_ms = -1;
2966 sinfo.packets_lost = -1;
2967 sinfo.ext_seqnum = -1;
2968 std::vector<webrtc::ReportBlock> receive_blocks;
2969 if (engine()->voe()->rtp()->GetRemoteRTCPReportBlocks(
2970 channel, &receive_blocks) != -1 &&
2971 engine()->voe()->codec()->GetSendCodec(channel, codec) != -1) {
2972 std::vector<webrtc::ReportBlock>::iterator iter;
2973 for (iter = receive_blocks.begin(); iter != receive_blocks.end();
2974 ++iter) {
2975 // Lookup report for send ssrc only.
2976 if (iter->source_SSRC == sinfo.ssrc) {
2977 // Convert Q8 to floating point.
2978 sinfo.fraction_lost = static_cast<float>(iter->fraction_lost) / 256;
2979 // Convert samples to milliseconds.
2980 if (codec.plfreq / 1000 > 0) {
2981 sinfo.jitter_ms = iter->interarrival_jitter / (codec.plfreq / 1000);
2982 }
2983 sinfo.packets_lost = iter->cumulative_num_packets_lost;
2984 sinfo.ext_seqnum = iter->extended_highest_sequence_number;
2985 break;
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002986 }
henrike@webrtc.org28e20752013-07-10 00:45:36 +00002987 }
2988 }
wu@webrtc.org9dba5252013-08-05 20:36:57 +00002989
2990 // Local speech level.
2991 sinfo.audio_level = (engine()->voe()->volume()->
2992 GetSpeechInputLevelFullRange(level) != -1) ? level : -1;
2993
2994 // TODO(xians): We are injecting the same APM logging to all the send
2995 // channels here because there is no good way to know which send channel
2996 // is using the APM. The correct fix is to allow the send channels to have
2997 // their own APM so that we can feed the correct APM logging to different
2998 // send channels. See issue crbug/264611 .
2999 sinfo.echo_return_loss = echo_return_loss;
3000 sinfo.echo_return_loss_enhancement = echo_return_loss_enhancement;
3001 sinfo.echo_delay_median_ms = echo_delay_median_ms;
3002 sinfo.echo_delay_std_ms = echo_delay_std_ms;
mallinath@webrtc.orga27be8e2013-09-27 23:04:10 +00003003 // TODO(ajm): Re-enable this metric once we have a reliable implementation.
3004 sinfo.aec_quality_min = -1;
wu@webrtc.org967bfff2013-09-19 05:49:50 +00003005 sinfo.typing_noise_detected = typing_noise_detected_;
wu@webrtc.org9dba5252013-08-05 20:36:57 +00003006
3007 info->senders.push_back(sinfo);
henrike@webrtc.org28e20752013-07-10 00:45:36 +00003008 }
3009
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00003010 // Build the list of receivers, one for each receiving channel, or 1 in
3011 // a 1:1 call.
henrike@webrtc.org28e20752013-07-10 00:45:36 +00003012 std::vector<int> channels;
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00003013 for (ChannelMap::const_iterator it = receive_channels_.begin();
3014 it != receive_channels_.end(); ++it) {
3015 channels.push_back(it->second.channel);
henrike@webrtc.org28e20752013-07-10 00:45:36 +00003016 }
3017 if (channels.empty()) {
3018 channels.push_back(voe_channel());
3019 }
3020
3021 // Get the SSRC and stats for each receiver, based on our own calculations.
3022 for (std::vector<int>::const_iterator it = channels.begin();
3023 it != channels.end(); ++it) {
3024 memset(&cs, 0, sizeof(cs));
3025 if (engine()->voe()->rtp()->GetRemoteSSRC(*it, ssrc) != -1 &&
3026 engine()->voe()->rtp()->GetRTCPStatistics(*it, cs) != -1 &&
3027 engine()->voe()->codec()->GetRecCodec(*it, codec) != -1) {
3028 VoiceReceiverInfo rinfo;
3029 rinfo.ssrc = ssrc;
3030 rinfo.bytes_rcvd = cs.bytesReceived;
3031 rinfo.packets_rcvd = cs.packetsReceived;
3032 // The next four fields are from the most recently sent RTCP report.
3033 // Convert Q8 to floating point.
3034 rinfo.fraction_lost = static_cast<float>(cs.fractionLost) / (1 << 8);
3035 rinfo.packets_lost = cs.cumulativeLost;
3036 rinfo.ext_seqnum = cs.extendedMax;
3037 // Convert samples to milliseconds.
3038 if (codec.plfreq / 1000 > 0) {
3039 rinfo.jitter_ms = cs.jitterSamples / (codec.plfreq / 1000);
3040 }
3041
3042 // Get jitter buffer and total delay (alg + jitter + playout) stats.
3043 webrtc::NetworkStatistics ns;
3044 if (engine()->voe()->neteq() &&
3045 engine()->voe()->neteq()->GetNetworkStatistics(
3046 *it, ns) != -1) {
3047 rinfo.jitter_buffer_ms = ns.currentBufferSize;
3048 rinfo.jitter_buffer_preferred_ms = ns.preferredBufferSize;
3049 rinfo.expand_rate =
henrike@webrtc.org28654cb2013-07-22 21:07:49 +00003050 static_cast<float>(ns.currentExpandRate) / (1 << 14);
henrike@webrtc.org28e20752013-07-10 00:45:36 +00003051 }
3052 if (engine()->voe()->sync()) {
3053 int playout_buffer_delay_ms = 0;
3054 engine()->voe()->sync()->GetDelayEstimate(
3055 *it, &rinfo.delay_estimate_ms, &playout_buffer_delay_ms);
3056 }
3057
3058 // Get speech level.
3059 rinfo.audio_level = (engine()->voe()->volume()->
3060 GetSpeechOutputLevelFullRange(*it, level) != -1) ? level : -1;
3061 info->receivers.push_back(rinfo);
3062 }
3063 }
3064
3065 return true;
3066}
3067
3068void WebRtcVoiceMediaChannel::GetLastMediaError(
3069 uint32* ssrc, VoiceMediaChannel::Error* error) {
3070 ASSERT(ssrc != NULL);
3071 ASSERT(error != NULL);
3072 FindSsrc(voe_channel(), ssrc);
3073 *error = WebRtcErrorToChannelError(GetLastEngineError());
3074}
3075
3076bool WebRtcVoiceMediaChannel::FindSsrc(int channel_num, uint32* ssrc) {
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00003077 talk_base::CritScope lock(&receive_channels_cs_);
henrike@webrtc.org28e20752013-07-10 00:45:36 +00003078 ASSERT(ssrc != NULL);
wu@webrtc.org9dba5252013-08-05 20:36:57 +00003079 if (channel_num == -1 && send_ != SEND_NOTHING) {
henrike@webrtc.org28e20752013-07-10 00:45:36 +00003080 // Sometimes the VoiceEngine core will throw error with channel_num = -1.
3081 // This means the error is not limited to a specific channel. Signal the
3082 // message using ssrc=0. If the current channel is sending, use this
3083 // channel for sending the message.
3084 *ssrc = 0;
3085 return true;
3086 } else {
wu@webrtc.org9dba5252013-08-05 20:36:57 +00003087 // Check whether this is a sending channel.
3088 for (ChannelMap::const_iterator it = send_channels_.begin();
3089 it != send_channels_.end(); ++it) {
3090 if (it->second.channel == channel_num) {
3091 // This is a sending channel.
3092 uint32 local_ssrc = 0;
3093 if (engine()->voe()->rtp()->GetLocalSSRC(
3094 channel_num, local_ssrc) != -1) {
3095 *ssrc = local_ssrc;
3096 }
3097 return true;
3098 }
3099 }
3100
henrike@webrtc.org28e20752013-07-10 00:45:36 +00003101 // Check whether this is a receiving channel.
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00003102 for (ChannelMap::const_iterator it = receive_channels_.begin();
3103 it != receive_channels_.end(); ++it) {
3104 if (it->second.channel == channel_num) {
henrike@webrtc.org28e20752013-07-10 00:45:36 +00003105 *ssrc = it->first;
3106 return true;
3107 }
3108 }
3109 }
3110 return false;
3111}
3112
3113void WebRtcVoiceMediaChannel::OnError(uint32 ssrc, int error) {
wu@webrtc.org967bfff2013-09-19 05:49:50 +00003114 if (error == VE_TYPING_NOISE_WARNING) {
3115 typing_noise_detected_ = true;
3116 } else if (error == VE_TYPING_NOISE_OFF_WARNING) {
3117 typing_noise_detected_ = false;
3118 }
henrike@webrtc.org28e20752013-07-10 00:45:36 +00003119 SignalMediaError(ssrc, WebRtcErrorToChannelError(error));
3120}
3121
3122int WebRtcVoiceMediaChannel::GetOutputLevel(int channel) {
3123 unsigned int ulevel;
3124 int ret =
3125 engine()->voe()->volume()->GetSpeechOutputLevel(channel, ulevel);
3126 return (ret == 0) ? static_cast<int>(ulevel) : -1;
3127}
3128
3129int WebRtcVoiceMediaChannel::GetReceiveChannelNum(uint32 ssrc) {
henrike@webrtc.org1e09a712013-07-26 19:17:59 +00003130 ChannelMap::iterator it = receive_channels_.find(ssrc);
3131 if (it != receive_channels_.end())
3132 return it->second.channel;
henrike@webrtc.org28e20752013-07-10 00:45:36 +00003133 return (ssrc == default_receive_ssrc_) ? voe_channel() : -1;
3134}
3135
3136int WebRtcVoiceMediaChannel::GetSendChannelNum(uint32 ssrc) {
wu@webrtc.org9dba5252013-08-05 20:36:57 +00003137 ChannelMap::iterator it = send_channels_.find(ssrc);
3138 if (it != send_channels_.end())
3139 return it->second.channel;
3140
3141 return -1;
henrike@webrtc.org28e20752013-07-10 00:45:36 +00003142}
3143
3144bool WebRtcVoiceMediaChannel::GetRedSendCodec(const AudioCodec& red_codec,
3145 const std::vector<AudioCodec>& all_codecs, webrtc::CodecInst* send_codec) {
3146 // Get the RED encodings from the parameter with no name. This may
3147 // change based on what is discussed on the Jingle list.
3148 // The encoding parameter is of the form "a/b"; we only support where
3149 // a == b. Verify this and parse out the value into red_pt.
3150 // If the parameter value is absent (as it will be until we wire up the
3151 // signaling of this message), use the second codec specified (i.e. the
3152 // one after "red") as the encoding parameter.
3153 int red_pt = -1;
3154 std::string red_params;
3155 CodecParameterMap::const_iterator it = red_codec.params.find("");
3156 if (it != red_codec.params.end()) {
3157 red_params = it->second;
3158 std::vector<std::string> red_pts;
3159 if (talk_base::split(red_params, '/', &red_pts) != 2 ||
3160 red_pts[0] != red_pts[1] ||
3161 !talk_base::FromString(red_pts[0], &red_pt)) {
3162 LOG(LS_WARNING) << "RED params " << red_params << " not supported.";
3163 return false;
3164 }
3165 } else if (red_codec.params.empty()) {
3166 LOG(LS_WARNING) << "RED params not present, using defaults";
3167 if (all_codecs.size() > 1) {
3168 red_pt = all_codecs[1].id;
3169 }
3170 }
3171
3172 // Try to find red_pt in |codecs|.
3173 std::vector<AudioCodec>::const_iterator codec;
3174 for (codec = all_codecs.begin(); codec != all_codecs.end(); ++codec) {
3175 if (codec->id == red_pt)
3176 break;
3177 }
3178
3179 // If we find the right codec, that will be the codec we pass to
3180 // SetSendCodec, with the desired payload type.
3181 if (codec != all_codecs.end() &&
3182 engine()->FindWebRtcCodec(*codec, send_codec)) {
3183 } else {
3184 LOG(LS_WARNING) << "RED params " << red_params << " are invalid.";
3185 return false;
3186 }
3187
3188 return true;
3189}
3190
3191bool WebRtcVoiceMediaChannel::EnableRtcp(int channel) {
3192 if (engine()->voe()->rtp()->SetRTCPStatus(channel, true) == -1) {
wu@webrtc.org9dba5252013-08-05 20:36:57 +00003193 LOG_RTCERR2(SetRTCPStatus, channel, 1);
henrike@webrtc.org28e20752013-07-10 00:45:36 +00003194 return false;
3195 }
3196 // TODO(juberti): Enable VQMon and RTCP XR reports, once we know what
3197 // what we want to do with them.
3198 // engine()->voe().EnableVQMon(voe_channel(), true);
3199 // engine()->voe().EnableRTCP_XR(voe_channel(), true);
3200 return true;
3201}
3202
3203bool WebRtcVoiceMediaChannel::ResetRecvCodecs(int channel) {
3204 int ncodecs = engine()->voe()->codec()->NumOfCodecs();
3205 for (int i = 0; i < ncodecs; ++i) {
3206 webrtc::CodecInst voe_codec;
3207 if (engine()->voe()->codec()->GetCodec(i, voe_codec) != -1) {
3208 voe_codec.pltype = -1;
3209 if (engine()->voe()->codec()->SetRecPayloadType(
3210 channel, voe_codec) == -1) {
3211 LOG_RTCERR2(SetRecPayloadType, channel, ToString(voe_codec));
3212 return false;
3213 }
3214 }
3215 }
3216 return true;
3217}
3218
3219bool WebRtcVoiceMediaChannel::SetPlayout(int channel, bool playout) {
3220 if (playout) {
3221 LOG(LS_INFO) << "Starting playout for channel #" << channel;
3222 if (engine()->voe()->base()->StartPlayout(channel) == -1) {
3223 LOG_RTCERR1(StartPlayout, channel);
3224 return false;
3225 }
3226 } else {
3227 LOG(LS_INFO) << "Stopping playout for channel #" << channel;
3228 engine()->voe()->base()->StopPlayout(channel);
3229 }
3230 return true;
3231}
3232
3233uint32 WebRtcVoiceMediaChannel::ParseSsrc(const void* data, size_t len,
3234 bool rtcp) {
3235 size_t ssrc_pos = (!rtcp) ? 8 : 4;
3236 uint32 ssrc = 0;
3237 if (len >= (ssrc_pos + sizeof(ssrc))) {
3238 ssrc = talk_base::GetBE32(static_cast<const char*>(data) + ssrc_pos);
3239 }
3240 return ssrc;
3241}
3242
3243// Convert VoiceEngine error code into VoiceMediaChannel::Error enum.
3244VoiceMediaChannel::Error
3245 WebRtcVoiceMediaChannel::WebRtcErrorToChannelError(int err_code) {
3246 switch (err_code) {
3247 case 0:
3248 return ERROR_NONE;
3249 case VE_CANNOT_START_RECORDING:
3250 case VE_MIC_VOL_ERROR:
3251 case VE_GET_MIC_VOL_ERROR:
3252 case VE_CANNOT_ACCESS_MIC_VOL:
3253 return ERROR_REC_DEVICE_OPEN_FAILED;
3254 case VE_SATURATION_WARNING:
3255 return ERROR_REC_DEVICE_SATURATION;
3256 case VE_REC_DEVICE_REMOVED:
3257 return ERROR_REC_DEVICE_REMOVED;
3258 case VE_RUNTIME_REC_WARNING:
3259 case VE_RUNTIME_REC_ERROR:
3260 return ERROR_REC_RUNTIME_ERROR;
3261 case VE_CANNOT_START_PLAYOUT:
3262 case VE_SPEAKER_VOL_ERROR:
3263 case VE_GET_SPEAKER_VOL_ERROR:
3264 case VE_CANNOT_ACCESS_SPEAKER_VOL:
3265 return ERROR_PLAY_DEVICE_OPEN_FAILED;
3266 case VE_RUNTIME_PLAY_WARNING:
3267 case VE_RUNTIME_PLAY_ERROR:
3268 return ERROR_PLAY_RUNTIME_ERROR;
3269 case VE_TYPING_NOISE_WARNING:
3270 return ERROR_REC_TYPING_NOISE_DETECTED;
3271 default:
3272 return VoiceMediaChannel::ERROR_OTHER;
3273 }
3274}
3275
3276int WebRtcSoundclipStream::Read(void *buf, int len) {
3277 size_t res = 0;
3278 mem_.Read(buf, len, &res, NULL);
henrike@webrtc.org28654cb2013-07-22 21:07:49 +00003279 return static_cast<int>(res);
henrike@webrtc.org28e20752013-07-10 00:45:36 +00003280}
3281
3282int WebRtcSoundclipStream::Rewind() {
3283 mem_.Rewind();
3284 // Return -1 to keep VoiceEngine from looping.
3285 return (loop_) ? 0 : -1;
3286}
3287
3288} // namespace cricket
3289
3290#endif // HAVE_WEBRTC_VOICE