blob: 9dfe0ad428b8b336c828b5b363fdce7cd159d86d [file] [log] [blame]
niklase@google.com470e71d2011-07-07 08:21:25 +00001/*
2 * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11#include "output_mixer.h"
12
13#include "audio_processing.h"
14#include "audio_frame_operations.h"
15#include "critical_section_wrapper.h"
16#include "file_wrapper.h"
17#include "trace.h"
18#include "statistics.h"
19#include "voe_external_media.h"
20
21namespace webrtc {
22
23namespace voe {
24
25void
26OutputMixer::NewMixedAudio(const WebRtc_Word32 id,
27 const AudioFrame& generalAudioFrame,
28 const AudioFrame** uniqueAudioFrames,
29 const WebRtc_UWord32 size)
30{
31 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
32 "OutputMixer::NewMixedAudio(id=%d, size=%u)", id, size);
33
34 _audioFrame = generalAudioFrame;
35 _audioFrame._id = id;
36}
37
38void OutputMixer::MixedParticipants(
39 const WebRtc_Word32 id,
40 const ParticipantStatistics* participantStatistics,
41 const WebRtc_UWord32 size)
42{
43 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
44 "OutputMixer::MixedParticipants(id=%d, size=%u)", id, size);
45}
46
47void OutputMixer::VADPositiveParticipants(
48 const WebRtc_Word32 id,
49 const ParticipantStatistics* participantStatistics,
50 const WebRtc_UWord32 size)
51{
52 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
53 "OutputMixer::VADPositiveParticipants(id=%d, size=%u)",
54 id, size);
55}
56
57void OutputMixer::MixedAudioLevel(const WebRtc_Word32 id,
58 const WebRtc_UWord32 level)
59{
60 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
61 "OutputMixer::MixedAudioLevel(id=%d, level=%u)", id, level);
62}
63
64void OutputMixer::PlayNotification(const WebRtc_Word32 id,
65 const WebRtc_UWord32 durationMs)
66{
67 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
68 "OutputMixer::PlayNotification(id=%d, durationMs=%d)",
69 id, durationMs);
70 // Not implement yet
71}
72
73void OutputMixer::RecordNotification(const WebRtc_Word32 id,
74 const WebRtc_UWord32 durationMs)
75{
76 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
77 "OutputMixer::RecordNotification(id=%d, durationMs=%d)",
78 id, durationMs);
79
80 // Not implement yet
81}
82
83void OutputMixer::PlayFileEnded(const WebRtc_Word32 id)
84{
85 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
86 "OutputMixer::PlayFileEnded(id=%d)", id);
87
88 // not needed
89}
90
91void OutputMixer::RecordFileEnded(const WebRtc_Word32 id)
92{
93 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
94 "OutputMixer::RecordFileEnded(id=%d)", id);
95 assert(id == _instanceId);
96
97 CriticalSectionScoped cs(_fileCritSect);
98 _outputFileRecording = false;
99 WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
100 "OutputMixer::RecordFileEnded() =>"
101 "output file recorder module is shutdown");
102}
103
104WebRtc_Word32
105OutputMixer::Create(OutputMixer*& mixer, const WebRtc_UWord32 instanceId)
106{
107 WEBRTC_TRACE(kTraceMemory, kTraceVoice, instanceId,
108 "OutputMixer::Create(instanceId=%d)", instanceId);
109 mixer = new OutputMixer(instanceId);
110 if (mixer == NULL)
111 {
112 WEBRTC_TRACE(kTraceMemory, kTraceVoice, instanceId,
113 "OutputMixer::Create() unable to allocate memory for"
114 "mixer");
115 return -1;
116 }
117 return 0;
118}
119
120OutputMixer::OutputMixer(const WebRtc_UWord32 instanceId) :
121 _callbackCritSect(*CriticalSectionWrapper::CreateCriticalSection()),
122 _fileCritSect(*CriticalSectionWrapper::CreateCriticalSection()),
andrew@webrtc.orgc4f129f2011-11-10 03:41:22 +0000123 _mixerModule(*AudioConferenceMixer::Create(instanceId)),
niklase@google.com470e71d2011-07-07 08:21:25 +0000124 _audioLevel(),
xians@google.com22963ab2011-08-03 12:40:23 +0000125 _dtmfGenerator(instanceId),
126 _instanceId(instanceId),
127 _externalMediaCallbackPtr(NULL),
niklase@google.com470e71d2011-07-07 08:21:25 +0000128 _externalMedia(false),
129 _panLeft(1.0f),
130 _panRight(1.0f),
xians@google.com22963ab2011-08-03 12:40:23 +0000131 _mixingFrequencyHz(8000),
132 _outputFileRecorderPtr(NULL),
133 _outputFileRecording(false)
niklase@google.com470e71d2011-07-07 08:21:25 +0000134{
135 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,-1),
136 "OutputMixer::OutputMixer() - ctor");
137
138 if ((_mixerModule.RegisterMixedStreamCallback(*this) == -1) ||
139 (_mixerModule.RegisterMixerStatusCallback(*this, 100) == -1))
140 {
141 WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
142 "OutputMixer::OutputMixer() failed to register mixer"
143 "callbacks");
144 }
145
146 _dtmfGenerator.Init();
147}
148
149void
150OutputMixer::Destroy(OutputMixer*& mixer)
151{
152 if (mixer)
153 {
154 delete mixer;
155 mixer = NULL;
156 }
157}
158
159OutputMixer::~OutputMixer()
160{
161 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,-1),
162 "OutputMixer::~OutputMixer() - dtor");
163 if (_externalMedia)
164 {
165 DeRegisterExternalMediaProcessing();
166 }
167 {
168 CriticalSectionScoped cs(_fileCritSect);
169 if (_outputFileRecorderPtr)
170 {
171 _outputFileRecorderPtr->RegisterModuleFileCallback(NULL);
172 _outputFileRecorderPtr->StopRecording();
173 FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
174 _outputFileRecorderPtr = NULL;
175 }
176 }
177 _mixerModule.UnRegisterMixerStatusCallback();
178 _mixerModule.UnRegisterMixedStreamCallback();
179 delete &_mixerModule;
180 delete &_callbackCritSect;
181 delete &_fileCritSect;
182}
183
184WebRtc_Word32
185OutputMixer::SetEngineInformation(voe::Statistics& engineStatistics)
186{
187 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
188 "OutputMixer::SetEngineInformation()");
189 _engineStatisticsPtr = &engineStatistics;
190 return 0;
191}
192
193WebRtc_Word32
194OutputMixer::SetAudioProcessingModule(
195 AudioProcessing* audioProcessingModule)
196{
197 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
198 "OutputMixer::SetAudioProcessingModule("
199 "audioProcessingModule=0x%x)", audioProcessingModule);
200 _audioProcessingModulePtr = audioProcessingModule;
201 return 0;
202}
203
204int OutputMixer::RegisterExternalMediaProcessing(
205 VoEMediaProcess& proccess_object)
206{
207 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
208 "OutputMixer::RegisterExternalMediaProcessing()");
209
210 CriticalSectionScoped cs(_callbackCritSect);
211 _externalMediaCallbackPtr = &proccess_object;
212 _externalMedia = true;
213
214 return 0;
215}
216
217int OutputMixer::DeRegisterExternalMediaProcessing()
218{
219 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
220 "OutputMixer::DeRegisterExternalMediaProcessing()");
221
222 CriticalSectionScoped cs(_callbackCritSect);
223 _externalMedia = false;
224 _externalMediaCallbackPtr = NULL;
225
226 return 0;
227}
228
229int OutputMixer::PlayDtmfTone(WebRtc_UWord8 eventCode, int lengthMs,
230 int attenuationDb)
231{
232 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
233 "OutputMixer::PlayDtmfTone()");
234 if (_dtmfGenerator.AddTone(eventCode, lengthMs, attenuationDb) != 0)
235 {
236 _engineStatisticsPtr->SetLastError(VE_STILL_PLAYING_PREV_DTMF,
237 kTraceError,
238 "OutputMixer::PlayDtmfTone()");
239 return -1;
240 }
241 return 0;
242}
243
244int OutputMixer::StartPlayingDtmfTone(WebRtc_UWord8 eventCode,
245 int attenuationDb)
246{
247 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
248 "OutputMixer::StartPlayingDtmfTone()");
249 if (_dtmfGenerator.StartTone(eventCode, attenuationDb) != 0)
250 {
251 _engineStatisticsPtr->SetLastError(
252 VE_STILL_PLAYING_PREV_DTMF,
253 kTraceError,
254 "OutputMixer::StartPlayingDtmfTone())");
255 return -1;
256 }
257 return 0;
258}
259
260int OutputMixer::StopPlayingDtmfTone()
261{
262 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
263 "OutputMixer::StopPlayingDtmfTone()");
264 return (_dtmfGenerator.StopTone());
265}
266
267WebRtc_Word32
268OutputMixer::SetMixabilityStatus(MixerParticipant& participant,
269 const bool mixable)
270{
271 return _mixerModule.SetMixabilityStatus(participant, mixable);
272}
273
274WebRtc_Word32
henrike@webrtc.org066f9e52011-10-28 23:15:47 +0000275OutputMixer::SetAnonymousMixabilityStatus(MixerParticipant& participant,
276 const bool mixable)
277{
278 return _mixerModule.SetAnonymousMixabilityStatus(participant,mixable);
279}
280
281WebRtc_Word32
niklase@google.com470e71d2011-07-07 08:21:25 +0000282OutputMixer::MixActiveChannels()
283{
284 return _mixerModule.Process();
285}
286
287int
288OutputMixer::GetSpeechOutputLevel(WebRtc_UWord32& level)
289{
290 WebRtc_Word8 currentLevel = _audioLevel.Level();
291 level = static_cast<WebRtc_UWord32> (currentLevel);
292 WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
293 "GetSpeechOutputLevel() => level=%u", level);
294 return 0;
295}
296
297int
298OutputMixer::GetSpeechOutputLevelFullRange(WebRtc_UWord32& level)
299{
300 WebRtc_Word16 currentLevel = _audioLevel.LevelFullRange();
301 level = static_cast<WebRtc_UWord32> (currentLevel);
302 WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
303 "GetSpeechOutputLevelFullRange() => level=%u", level);
304 return 0;
305}
306
307int
308OutputMixer::SetOutputVolumePan(float left, float right)
309{
310 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
311 "OutputMixer::SetOutputVolumePan()");
312 _panLeft = left;
313 _panRight = right;
314 return 0;
315}
316
317int
318OutputMixer::GetOutputVolumePan(float& left, float& right)
319{
320 left = _panLeft;
321 right = _panRight;
322 WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
323 "GetOutputVolumePan() => left=%2.1f, right=%2.1f",
324 left, right);
325 return 0;
326}
327
328int OutputMixer::StartRecordingPlayout(const char* fileName,
329 const CodecInst* codecInst)
330{
331 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
332 "OutputMixer::StartRecordingPlayout(fileName=%s)", fileName);
333
334 if (_outputFileRecording)
335 {
336 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,-1),
337 "StartRecordingPlayout() is already recording");
338 return 0;
339 }
340
341 FileFormats format;
342 const WebRtc_UWord32 notificationTime(0);
343 CodecInst dummyCodec={100,"L16",16000,320,1,320000};
344
345 if (codecInst != NULL && codecInst->channels != 1)
346 {
347 _engineStatisticsPtr->SetLastError(
348 VE_BAD_ARGUMENT, kTraceError,
349 "StartRecordingPlayout() invalid compression");
350 return(-1);
351 }
352 if(codecInst == NULL)
353 {
354 format = kFileFormatPcm16kHzFile;
355 codecInst=&dummyCodec;
356 }
357 else if((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
358 (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
359 (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
360 {
361 format = kFileFormatWavFile;
362 }
363 else
364 {
365 format = kFileFormatCompressedFile;
366 }
367
368 CriticalSectionScoped cs(_fileCritSect);
369
370 // Destroy the old instance
371 if (_outputFileRecorderPtr)
372 {
373 _outputFileRecorderPtr->RegisterModuleFileCallback(NULL);
374 FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
375 _outputFileRecorderPtr = NULL;
376 }
377
378 _outputFileRecorderPtr = FileRecorder::CreateFileRecorder(
379 _instanceId,
380 (const FileFormats)format);
381 if (_outputFileRecorderPtr == NULL)
382 {
383 _engineStatisticsPtr->SetLastError(
384 VE_INVALID_ARGUMENT, kTraceError,
385 "StartRecordingPlayout() fileRecorder format isnot correct");
386 return -1;
387 }
388
389 if (_outputFileRecorderPtr->StartRecordingAudioFile(
390 fileName,
391 (const CodecInst&)*codecInst,
392 notificationTime) != 0)
393 {
394 _engineStatisticsPtr->SetLastError(
395 VE_BAD_FILE, kTraceError,
396 "StartRecordingAudioFile() failed to start file recording");
397 _outputFileRecorderPtr->StopRecording();
398 FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
399 _outputFileRecorderPtr = NULL;
400 return -1;
401 }
402 _outputFileRecorderPtr->RegisterModuleFileCallback(this);
403 _outputFileRecording = true;
404
405 return 0;
406}
407
408int OutputMixer::StartRecordingPlayout(OutStream* stream,
409 const CodecInst* codecInst)
410{
411 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
412 "OutputMixer::StartRecordingPlayout()");
413
414 if (_outputFileRecording)
415 {
416 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,-1),
417 "StartRecordingPlayout() is already recording");
418 return 0;
419 }
420
421 FileFormats format;
422 const WebRtc_UWord32 notificationTime(0);
423 CodecInst dummyCodec={100,"L16",16000,320,1,320000};
424
425 if (codecInst != NULL && codecInst->channels != 1)
426 {
427 _engineStatisticsPtr->SetLastError(
428 VE_BAD_ARGUMENT, kTraceError,
429 "StartRecordingPlayout() invalid compression");
430 return(-1);
431 }
432 if(codecInst == NULL)
433 {
434 format = kFileFormatPcm16kHzFile;
435 codecInst=&dummyCodec;
436 }
437 else if((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
438 (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
439 (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
440 {
441 format = kFileFormatWavFile;
442 }
443 else
444 {
445 format = kFileFormatCompressedFile;
446 }
447
448 CriticalSectionScoped cs(_fileCritSect);
449
450 // Destroy the old instance
451 if (_outputFileRecorderPtr)
452 {
453 _outputFileRecorderPtr->RegisterModuleFileCallback(NULL);
454 FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
455 _outputFileRecorderPtr = NULL;
456 }
457
458 _outputFileRecorderPtr = FileRecorder::CreateFileRecorder(
459 _instanceId,
460 (const FileFormats)format);
461 if (_outputFileRecorderPtr == NULL)
462 {
463 _engineStatisticsPtr->SetLastError(
464 VE_INVALID_ARGUMENT, kTraceError,
465 "StartRecordingPlayout() fileRecorder format isnot correct");
466 return -1;
467 }
468
469 if (_outputFileRecorderPtr->StartRecordingAudioFile(*stream,
470 *codecInst,
471 notificationTime) != 0)
472 {
473 _engineStatisticsPtr->SetLastError(VE_BAD_FILE, kTraceError,
474 "StartRecordingAudioFile() failed to start file recording");
475 _outputFileRecorderPtr->StopRecording();
476 FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
477 _outputFileRecorderPtr = NULL;
478 return -1;
479 }
480
481 _outputFileRecorderPtr->RegisterModuleFileCallback(this);
482 _outputFileRecording = true;
483
484 return 0;
485}
486
487int OutputMixer::StopRecordingPlayout()
488{
489 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
490 "OutputMixer::StopRecordingPlayout()");
491
492 if (!_outputFileRecording)
493 {
494 WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
495 "StopRecordingPlayout() file isnot recording");
496 return -1;
497 }
498
499 CriticalSectionScoped cs(_fileCritSect);
500
501 if (_outputFileRecorderPtr->StopRecording() != 0)
502 {
503 _engineStatisticsPtr->SetLastError(
504 VE_STOP_RECORDING_FAILED, kTraceError,
505 "StopRecording(), could not stop recording");
506 return -1;
507 }
508 _outputFileRecorderPtr->RegisterModuleFileCallback(NULL);
509 FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
510 _outputFileRecorderPtr = NULL;
511 _outputFileRecording = false;
512
513 return 0;
514}
515
516WebRtc_Word32
517OutputMixer::GetMixedAudio(const WebRtc_Word32 desiredFreqHz,
518 const WebRtc_UWord8 channels,
519 AudioFrame& audioFrame)
520{
521 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
522 "OutputMixer::GetMixedAudio(desiredFreqHz=%d, channels=&d)",
523 desiredFreqHz, channels);
524
525 audioFrame = _audioFrame;
526
527 // --- Record playout if enabled
528 {
529 CriticalSectionScoped cs(_fileCritSect);
530 if (_outputFileRecording)
531 {
532 assert(audioFrame._audioChannel == 1);
533
534 if (_outputFileRecorderPtr)
535 {
536 _outputFileRecorderPtr->RecordAudioToFile(audioFrame);
537 }
538 }
539 }
540
541 int outLen(0);
542
543 if (audioFrame._audioChannel == 1)
544 {
545 if (_resampler.ResetIfNeeded(audioFrame._frequencyInHz,
546 desiredFreqHz,
547 kResamplerSynchronous) != 0)
548 {
549 WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
550 "OutputMixer::GetMixedAudio() unable to resample - 1");
551 return -1;
552 }
553 }
554 else
555 {
556 if (_resampler.ResetIfNeeded(audioFrame._frequencyInHz,
557 desiredFreqHz,
558 kResamplerSynchronousStereo) != 0)
559 {
560 WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
561 "OutputMixer::GetMixedAudio() unable to resample - 2");
562 return -1;
563 }
564 }
565 if (_resampler.Push(
566 _audioFrame._payloadData,
567 _audioFrame._payloadDataLengthInSamples*_audioFrame._audioChannel,
568 audioFrame._payloadData,
569 AudioFrame::kMaxAudioFrameSizeSamples,
570 outLen) == 0)
571 {
572 // Ensure that output from resampler matches the audio-frame format.
573 // Example: 10ms stereo output at 48kHz => outLen = 960 =>
574 // convert _payloadDataLengthInSamples to 480
575 audioFrame._payloadDataLengthInSamples =
576 (outLen / _audioFrame._audioChannel);
577 audioFrame._frequencyInHz = desiredFreqHz;
578 }
579 else
580 {
581 WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
582 "OutputMixer::GetMixedAudio() resampling failed");
583 return -1;
584 }
585
586 if ((channels == 2) && (audioFrame._audioChannel == 1))
587 {
588 AudioFrameOperations::MonoToStereo(audioFrame);
589 }
590
591 return 0;
592}
593
594WebRtc_Word32
595OutputMixer::DoOperationsOnCombinedSignal()
596{
597 if (_audioFrame._frequencyInHz != _mixingFrequencyHz)
598 {
599 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
600 "OutputMixer::DoOperationsOnCombinedSignal() => "
601 "mixing frequency = %d", _audioFrame._frequencyInHz);
602 _mixingFrequencyHz = _audioFrame._frequencyInHz;
603 }
604
605 // --- Insert inband Dtmf tone
606 if (_dtmfGenerator.IsAddingTone())
607 {
608 InsertInbandDtmfTone();
609 }
610
611 // Scale left and/or right channel(s) if balance is active
612 if (_panLeft != 1.0 || _panRight != 1.0)
613 {
614 if (_audioFrame._audioChannel == 1)
615 {
616 AudioFrameOperations::MonoToStereo(_audioFrame);
617 }
618 else
619 {
620 // Pure stereo mode (we are receiving a stereo signal).
621 }
622
623 assert(_audioFrame._audioChannel == 2);
624 AudioFrameOperations::Scale(_panLeft, _panRight, _audioFrame);
625 }
626
627 // --- Far-end Voice Quality Enhancement (AudioProcessing Module)
628
629 APMAnalyzeReverseStream();
630
631 // --- External media processing
632
633 if (_externalMedia)
634 {
635 CriticalSectionScoped cs(_callbackCritSect);
636 const bool isStereo = (_audioFrame._audioChannel == 2);
637 if (_externalMediaCallbackPtr)
638 {
639 _externalMediaCallbackPtr->Process(
640 -1,
641 kPlaybackAllChannelsMixed,
642 (WebRtc_Word16*)_audioFrame._payloadData,
643 _audioFrame._payloadDataLengthInSamples,
644 _audioFrame._frequencyInHz,
645 isStereo);
646 }
647 }
648
649 // --- Measure audio level (0-9) for the combined signal
650 _audioLevel.ComputeLevel(_audioFrame);
651
652 return 0;
653}
654
655// ----------------------------------------------------------------------------
656// Private methods
657// ----------------------------------------------------------------------------
658
659int
660OutputMixer::APMAnalyzeReverseStream()
661{
662 int outLen(0);
663 AudioFrame audioFrame = _audioFrame;
664
665 // Convert from mixing frequency to APM frequency.
666 // Sending side determines APM frequency.
667
668 if (audioFrame._audioChannel == 1)
669 {
670 _apmResampler.ResetIfNeeded(_audioFrame._frequencyInHz,
671 _audioProcessingModulePtr->sample_rate_hz(),
672 kResamplerSynchronous);
673 }
674 else
675 {
676 _apmResampler.ResetIfNeeded(_audioFrame._frequencyInHz,
677 _audioProcessingModulePtr->sample_rate_hz(),
678 kResamplerSynchronousStereo);
679 }
680 if (_apmResampler.Push(
681 _audioFrame._payloadData,
682 _audioFrame._payloadDataLengthInSamples*_audioFrame._audioChannel,
683 audioFrame._payloadData,
684 AudioFrame::kMaxAudioFrameSizeSamples,
685 outLen) == 0)
686 {
687 audioFrame._payloadDataLengthInSamples =
688 (outLen / _audioFrame._audioChannel);
689 audioFrame._frequencyInHz = _audioProcessingModulePtr->sample_rate_hz();
690 }
691
692 if (audioFrame._audioChannel == 2)
693 {
694 AudioFrameOperations::StereoToMono(audioFrame);
695 }
696
697 // Perform far-end APM analyze
698
699 if (_audioProcessingModulePtr->AnalyzeReverseStream(&audioFrame) == -1)
700 {
701 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,-1),
702 "AudioProcessingModule::AnalyzeReverseStream() => error");
703 }
704
705 return 0;
706}
707
708int
709OutputMixer::InsertInbandDtmfTone()
710{
711 WebRtc_UWord16 sampleRate(0);
712 _dtmfGenerator.GetSampleRate(sampleRate);
713 if (sampleRate != _audioFrame._frequencyInHz)
714 {
715 // Update sample rate of Dtmf tone since the mixing frequency changed.
716 _dtmfGenerator.SetSampleRate(
717 (WebRtc_UWord16)(_audioFrame._frequencyInHz));
718 // Reset the tone to be added taking the new sample rate into account.
719 _dtmfGenerator.ResetTone();
720 }
721
722 WebRtc_Word16 toneBuffer[320];
723 WebRtc_UWord16 toneSamples(0);
724 if (_dtmfGenerator.Get10msTone(toneBuffer, toneSamples) == -1)
725 {
726 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
727 "OutputMixer::InsertInbandDtmfTone() inserting Dtmf"
728 "tone failed");
729 return -1;
730 }
731
732 // replace mixed audio with Dtmf tone
733 if (_audioFrame._audioChannel == 1)
734 {
735 // mono
736 memcpy(_audioFrame._payloadData, toneBuffer, sizeof(WebRtc_Word16)
737 * toneSamples);
738 } else
739 {
740 // stereo
741 for (int i = 0; i < _audioFrame._payloadDataLengthInSamples; i++)
742 {
743 _audioFrame._payloadData[2 * i] = toneBuffer[i];
744 _audioFrame._payloadData[2 * i + 1] = 0;
745 }
746 }
747 assert(_audioFrame._payloadDataLengthInSamples == toneSamples);
748
749 return 0;
750}
751
752} // namespace voe
753
754} // namespace webrtc