blob: 5e62939d548fcf32d90e52fd635d57b24e31b162 [file] [log] [blame]
andrew@webrtc.org40ee3d02014-04-03 21:56:01 +00001/*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11#include "webrtc/voice_engine/transmit_mixer.h"
12
Peter Kastingdce40cf2015-08-24 14:52:23 -070013#include "webrtc/base/format_macros.h"
andrew@webrtc.org40ee3d02014-04-03 21:56:01 +000014#include "webrtc/modules/utility/interface/audio_frame_operations.h"
15#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
16#include "webrtc/system_wrappers/interface/event_wrapper.h"
17#include "webrtc/system_wrappers/interface/logging.h"
18#include "webrtc/system_wrappers/interface/trace.h"
19#include "webrtc/voice_engine/channel.h"
20#include "webrtc/voice_engine/channel_manager.h"
21#include "webrtc/voice_engine/include/voe_external_media.h"
22#include "webrtc/voice_engine/statistics.h"
23#include "webrtc/voice_engine/utility.h"
24#include "webrtc/voice_engine/voe_base_impl.h"
25
26#define WEBRTC_ABS(a) (((a) < 0) ? -(a) : (a))
27
28namespace webrtc {
29namespace voe {
30
31// TODO(ajm): The thread safety of this is dubious...
32void
33TransmitMixer::OnPeriodicProcess()
34{
35 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
36 "TransmitMixer::OnPeriodicProcess()");
37
38#if defined(WEBRTC_VOICE_ENGINE_TYPING_DETECTION)
39 if (_typingNoiseWarningPending)
40 {
41 CriticalSectionScoped cs(&_callbackCritSect);
42 if (_voiceEngineObserverPtr)
43 {
44 if (_typingNoiseDetected) {
45 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
46 "TransmitMixer::OnPeriodicProcess() => "
47 "CallbackOnError(VE_TYPING_NOISE_WARNING)");
48 _voiceEngineObserverPtr->CallbackOnError(
49 -1,
50 VE_TYPING_NOISE_WARNING);
51 } else {
52 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
53 "TransmitMixer::OnPeriodicProcess() => "
54 "CallbackOnError(VE_TYPING_NOISE_OFF_WARNING)");
55 _voiceEngineObserverPtr->CallbackOnError(
56 -1,
57 VE_TYPING_NOISE_OFF_WARNING);
58 }
59 }
60 _typingNoiseWarningPending = false;
61 }
62#endif
63
64 bool saturationWarning = false;
65 {
66 // Modify |_saturationWarning| under lock to avoid conflict with write op
67 // in ProcessAudio and also ensure that we don't hold the lock during the
68 // callback.
69 CriticalSectionScoped cs(&_critSect);
70 saturationWarning = _saturationWarning;
71 if (_saturationWarning)
72 _saturationWarning = false;
73 }
74
75 if (saturationWarning)
76 {
77 CriticalSectionScoped cs(&_callbackCritSect);
78 if (_voiceEngineObserverPtr)
79 {
80 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
81 "TransmitMixer::OnPeriodicProcess() =>"
82 " CallbackOnError(VE_SATURATION_WARNING)");
83 _voiceEngineObserverPtr->CallbackOnError(-1, VE_SATURATION_WARNING);
84 }
85 }
86}
87
88
89void TransmitMixer::PlayNotification(int32_t id,
90 uint32_t durationMs)
91{
92 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
93 "TransmitMixer::PlayNotification(id=%d, durationMs=%d)",
94 id, durationMs);
95
96 // Not implement yet
97}
98
99void TransmitMixer::RecordNotification(int32_t id,
100 uint32_t durationMs)
101{
102 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
103 "TransmitMixer::RecordNotification(id=%d, durationMs=%d)",
104 id, durationMs);
105
106 // Not implement yet
107}
108
109void TransmitMixer::PlayFileEnded(int32_t id)
110{
111 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
112 "TransmitMixer::PlayFileEnded(id=%d)", id);
113
114 assert(id == _filePlayerId);
115
116 CriticalSectionScoped cs(&_critSect);
117
118 _filePlaying = false;
119 WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
120 "TransmitMixer::PlayFileEnded() =>"
121 "file player module is shutdown");
122}
123
124void
125TransmitMixer::RecordFileEnded(int32_t id)
126{
127 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
128 "TransmitMixer::RecordFileEnded(id=%d)", id);
129
130 if (id == _fileRecorderId)
131 {
132 CriticalSectionScoped cs(&_critSect);
133 _fileRecording = false;
134 WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
135 "TransmitMixer::RecordFileEnded() => fileRecorder module"
136 "is shutdown");
137 } else if (id == _fileCallRecorderId)
138 {
139 CriticalSectionScoped cs(&_critSect);
140 _fileCallRecording = false;
141 WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
142 "TransmitMixer::RecordFileEnded() => fileCallRecorder"
143 "module is shutdown");
144 }
145}
146
147int32_t
148TransmitMixer::Create(TransmitMixer*& mixer, uint32_t instanceId)
149{
150 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(instanceId, -1),
151 "TransmitMixer::Create(instanceId=%d)", instanceId);
152 mixer = new TransmitMixer(instanceId);
153 if (mixer == NULL)
154 {
155 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(instanceId, -1),
156 "TransmitMixer::Create() unable to allocate memory"
157 "for mixer");
158 return -1;
159 }
160 return 0;
161}
162
163void
164TransmitMixer::Destroy(TransmitMixer*& mixer)
165{
166 if (mixer)
167 {
168 delete mixer;
169 mixer = NULL;
170 }
171}
172
173TransmitMixer::TransmitMixer(uint32_t instanceId) :
174 _engineStatisticsPtr(NULL),
175 _channelManagerPtr(NULL),
176 audioproc_(NULL),
177 _voiceEngineObserverPtr(NULL),
178 _processThreadPtr(NULL),
179 _filePlayerPtr(NULL),
180 _fileRecorderPtr(NULL),
181 _fileCallRecorderPtr(NULL),
182 // Avoid conflict with other channels by adding 1024 - 1026,
183 // won't use as much as 1024 channels.
184 _filePlayerId(instanceId + 1024),
185 _fileRecorderId(instanceId + 1025),
186 _fileCallRecorderId(instanceId + 1026),
187 _filePlaying(false),
188 _fileRecording(false),
189 _fileCallRecording(false),
190 _audioLevel(),
191 _critSect(*CriticalSectionWrapper::CreateCriticalSection()),
192 _callbackCritSect(*CriticalSectionWrapper::CreateCriticalSection()),
193#ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
194 _typingNoiseWarningPending(false),
195 _typingNoiseDetected(false),
196#endif
197 _saturationWarning(false),
198 _instanceId(instanceId),
199 _mixFileWithMicrophone(false),
200 _captureLevel(0),
201 external_postproc_ptr_(NULL),
202 external_preproc_ptr_(NULL),
203 _mute(false),
204 _remainingMuteMicTimeMs(0),
205 stereo_codec_(false),
206 swap_stereo_channels_(false)
207{
208 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, -1),
209 "TransmitMixer::TransmitMixer() - ctor");
210}
211
212TransmitMixer::~TransmitMixer()
213{
214 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, -1),
215 "TransmitMixer::~TransmitMixer() - dtor");
216 _monitorModule.DeRegisterObserver();
217 if (_processThreadPtr)
218 {
219 _processThreadPtr->DeRegisterModule(&_monitorModule);
220 }
221 DeRegisterExternalMediaProcessing(kRecordingAllChannelsMixed);
222 DeRegisterExternalMediaProcessing(kRecordingPreprocessing);
223 {
224 CriticalSectionScoped cs(&_critSect);
225 if (_fileRecorderPtr)
226 {
227 _fileRecorderPtr->RegisterModuleFileCallback(NULL);
228 _fileRecorderPtr->StopRecording();
229 FileRecorder::DestroyFileRecorder(_fileRecorderPtr);
230 _fileRecorderPtr = NULL;
231 }
232 if (_fileCallRecorderPtr)
233 {
234 _fileCallRecorderPtr->RegisterModuleFileCallback(NULL);
235 _fileCallRecorderPtr->StopRecording();
236 FileRecorder::DestroyFileRecorder(_fileCallRecorderPtr);
237 _fileCallRecorderPtr = NULL;
238 }
239 if (_filePlayerPtr)
240 {
241 _filePlayerPtr->RegisterModuleFileCallback(NULL);
242 _filePlayerPtr->StopPlayingFile();
243 FilePlayer::DestroyFilePlayer(_filePlayerPtr);
244 _filePlayerPtr = NULL;
245 }
246 }
247 delete &_critSect;
248 delete &_callbackCritSect;
249}
250
251int32_t
252TransmitMixer::SetEngineInformation(ProcessThread& processThread,
253 Statistics& engineStatistics,
254 ChannelManager& channelManager)
255{
256 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
257 "TransmitMixer::SetEngineInformation()");
258
259 _processThreadPtr = &processThread;
260 _engineStatisticsPtr = &engineStatistics;
261 _channelManagerPtr = &channelManager;
262
tommi@webrtc.org3985f012015-02-27 13:36:34 +0000263 _processThreadPtr->RegisterModule(&_monitorModule);
264 _monitorModule.RegisterObserver(*this);
andrew@webrtc.org40ee3d02014-04-03 21:56:01 +0000265
266 return 0;
267}
268
269int32_t
270TransmitMixer::RegisterVoiceEngineObserver(VoiceEngineObserver& observer)
271{
272 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
273 "TransmitMixer::RegisterVoiceEngineObserver()");
274 CriticalSectionScoped cs(&_callbackCritSect);
275
276 if (_voiceEngineObserverPtr)
277 {
278 _engineStatisticsPtr->SetLastError(
279 VE_INVALID_OPERATION, kTraceError,
280 "RegisterVoiceEngineObserver() observer already enabled");
281 return -1;
282 }
283 _voiceEngineObserverPtr = &observer;
284 return 0;
285}
286
287int32_t
288TransmitMixer::SetAudioProcessingModule(AudioProcessing* audioProcessingModule)
289{
290 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
291 "TransmitMixer::SetAudioProcessingModule("
292 "audioProcessingModule=0x%x)",
293 audioProcessingModule);
294 audioproc_ = audioProcessingModule;
295 return 0;
296}
297
298void TransmitMixer::GetSendCodecInfo(int* max_sample_rate, int* max_channels) {
299 *max_sample_rate = 8000;
300 *max_channels = 1;
301 for (ChannelManager::Iterator it(_channelManagerPtr); it.IsValid();
302 it.Increment()) {
303 Channel* channel = it.GetChannel();
304 if (channel->Sending()) {
305 CodecInst codec;
306 channel->GetSendCodec(codec);
307 *max_sample_rate = std::max(*max_sample_rate, codec.plfreq);
308 *max_channels = std::max(*max_channels, codec.channels);
309 }
310 }
311}
312
313int32_t
314TransmitMixer::PrepareDemux(const void* audioSamples,
Peter Kastingdce40cf2015-08-24 14:52:23 -0700315 size_t nSamples,
andrew@webrtc.org40ee3d02014-04-03 21:56:01 +0000316 uint8_t nChannels,
317 uint32_t samplesPerSec,
318 uint16_t totalDelayMS,
319 int32_t clockDrift,
320 uint16_t currentMicLevel,
321 bool keyPressed)
322{
323 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
Peter Kastingdce40cf2015-08-24 14:52:23 -0700324 "TransmitMixer::PrepareDemux(nSamples=%" PRIuS ", "
325 "nChannels=%u, samplesPerSec=%u, totalDelayMS=%u, "
326 "clockDrift=%d, currentMicLevel=%u)",
327 nSamples, nChannels, samplesPerSec, totalDelayMS, clockDrift,
328 currentMicLevel);
andrew@webrtc.org40ee3d02014-04-03 21:56:01 +0000329
330 // --- Resample input audio and create/store the initial audio frame
331 GenerateAudioFrame(static_cast<const int16_t*>(audioSamples),
332 nSamples,
333 nChannels,
334 samplesPerSec);
335
336 {
337 CriticalSectionScoped cs(&_callbackCritSect);
338 if (external_preproc_ptr_) {
339 external_preproc_ptr_->Process(-1, kRecordingPreprocessing,
340 _audioFrame.data_,
341 _audioFrame.samples_per_channel_,
342 _audioFrame.sample_rate_hz_,
343 _audioFrame.num_channels_ == 2);
344 }
345 }
346
347 // --- Near-end audio processing.
348 ProcessAudio(totalDelayMS, clockDrift, currentMicLevel, keyPressed);
349
350 if (swap_stereo_channels_ && stereo_codec_)
351 // Only bother swapping if we're using a stereo codec.
352 AudioFrameOperations::SwapStereoChannels(&_audioFrame);
353
354 // --- Annoying typing detection (utilizes the APM/VAD decision)
355#ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
356 TypingDetection(keyPressed);
357#endif
358
359 // --- Mute during DTMF tone if direct feedback is enabled
360 if (_remainingMuteMicTimeMs > 0)
361 {
362 AudioFrameOperations::Mute(_audioFrame);
363 _remainingMuteMicTimeMs -= 10;
364 if (_remainingMuteMicTimeMs < 0)
365 {
366 _remainingMuteMicTimeMs = 0;
367 }
368 }
369
370 // --- Mute signal
371 if (_mute)
372 {
373 AudioFrameOperations::Mute(_audioFrame);
374 }
375
376 // --- Mix with file (does not affect the mixing frequency)
377 if (_filePlaying)
378 {
379 MixOrReplaceAudioWithFile(_audioFrame.sample_rate_hz_);
380 }
381
382 // --- Record to file
383 bool file_recording = false;
384 {
385 CriticalSectionScoped cs(&_critSect);
386 file_recording = _fileRecording;
387 }
388 if (file_recording)
389 {
390 RecordAudioToFile(_audioFrame.sample_rate_hz_);
391 }
392
393 {
394 CriticalSectionScoped cs(&_callbackCritSect);
395 if (external_postproc_ptr_) {
396 external_postproc_ptr_->Process(-1, kRecordingAllChannelsMixed,
397 _audioFrame.data_,
398 _audioFrame.samples_per_channel_,
399 _audioFrame.sample_rate_hz_,
400 _audioFrame.num_channels_ == 2);
401 }
402 }
403
404 // --- Measure audio level of speech after all processing.
405 _audioLevel.ComputeLevel(_audioFrame);
406 return 0;
407}
408
409int32_t
410TransmitMixer::DemuxAndMix()
411{
412 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
413 "TransmitMixer::DemuxAndMix()");
414
415 for (ChannelManager::Iterator it(_channelManagerPtr); it.IsValid();
416 it.Increment())
417 {
418 Channel* channelPtr = it.GetChannel();
henrika@webrtc.org66803482014-04-17 10:45:01 +0000419 if (channelPtr->Sending())
andrew@webrtc.org40ee3d02014-04-03 21:56:01 +0000420 {
421 // Demultiplex makes a copy of its input.
422 channelPtr->Demultiplex(_audioFrame);
423 channelPtr->PrepareEncodeAndSend(_audioFrame.sample_rate_hz_);
424 }
425 }
426 return 0;
427}
428
429void TransmitMixer::DemuxAndMix(const int voe_channels[],
430 int number_of_voe_channels) {
431 for (int i = 0; i < number_of_voe_channels; ++i) {
432 voe::ChannelOwner ch = _channelManagerPtr->GetChannel(voe_channels[i]);
433 voe::Channel* channel_ptr = ch.channel();
434 if (channel_ptr) {
henrika@webrtc.org66803482014-04-17 10:45:01 +0000435 if (channel_ptr->Sending()) {
andrew@webrtc.org40ee3d02014-04-03 21:56:01 +0000436 // Demultiplex makes a copy of its input.
437 channel_ptr->Demultiplex(_audioFrame);
438 channel_ptr->PrepareEncodeAndSend(_audioFrame.sample_rate_hz_);
439 }
440 }
441 }
442}
443
444int32_t
445TransmitMixer::EncodeAndSend()
446{
447 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
448 "TransmitMixer::EncodeAndSend()");
449
450 for (ChannelManager::Iterator it(_channelManagerPtr); it.IsValid();
451 it.Increment())
452 {
453 Channel* channelPtr = it.GetChannel();
henrika@webrtc.org66803482014-04-17 10:45:01 +0000454 if (channelPtr->Sending())
andrew@webrtc.org40ee3d02014-04-03 21:56:01 +0000455 {
456 channelPtr->EncodeAndSend();
457 }
458 }
459 return 0;
460}
461
462void TransmitMixer::EncodeAndSend(const int voe_channels[],
463 int number_of_voe_channels) {
464 for (int i = 0; i < number_of_voe_channels; ++i) {
465 voe::ChannelOwner ch = _channelManagerPtr->GetChannel(voe_channels[i]);
466 voe::Channel* channel_ptr = ch.channel();
henrika@webrtc.org66803482014-04-17 10:45:01 +0000467 if (channel_ptr && channel_ptr->Sending())
andrew@webrtc.org40ee3d02014-04-03 21:56:01 +0000468 channel_ptr->EncodeAndSend();
469 }
470}
471
472uint32_t TransmitMixer::CaptureLevel() const
473{
474 return _captureLevel;
475}
476
477void
478TransmitMixer::UpdateMuteMicrophoneTime(uint32_t lengthMs)
479{
480 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
481 "TransmitMixer::UpdateMuteMicrophoneTime(lengthMs=%d)",
482 lengthMs);
483 _remainingMuteMicTimeMs = lengthMs;
484}
485
486int32_t
487TransmitMixer::StopSend()
488{
489 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
490 "TransmitMixer::StopSend()");
491 _audioLevel.Clear();
492 return 0;
493}
494
495int TransmitMixer::StartPlayingFileAsMicrophone(const char* fileName,
496 bool loop,
497 FileFormats format,
498 int startPosition,
499 float volumeScaling,
500 int stopPosition,
501 const CodecInst* codecInst)
502{
503 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
504 "TransmitMixer::StartPlayingFileAsMicrophone("
505 "fileNameUTF8[]=%s,loop=%d, format=%d, volumeScaling=%5.3f,"
506 " startPosition=%d, stopPosition=%d)", fileName, loop,
507 format, volumeScaling, startPosition, stopPosition);
508
509 if (_filePlaying)
510 {
511 _engineStatisticsPtr->SetLastError(
512 VE_ALREADY_PLAYING, kTraceWarning,
513 "StartPlayingFileAsMicrophone() is already playing");
514 return 0;
515 }
516
517 CriticalSectionScoped cs(&_critSect);
518
519 // Destroy the old instance
520 if (_filePlayerPtr)
521 {
522 _filePlayerPtr->RegisterModuleFileCallback(NULL);
523 FilePlayer::DestroyFilePlayer(_filePlayerPtr);
524 _filePlayerPtr = NULL;
525 }
526
527 // Dynamically create the instance
528 _filePlayerPtr
529 = FilePlayer::CreateFilePlayer(_filePlayerId,
530 (const FileFormats) format);
531
532 if (_filePlayerPtr == NULL)
533 {
534 _engineStatisticsPtr->SetLastError(
535 VE_INVALID_ARGUMENT, kTraceError,
536 "StartPlayingFileAsMicrophone() filePlayer format isnot correct");
537 return -1;
538 }
539
540 const uint32_t notificationTime(0);
541
542 if (_filePlayerPtr->StartPlayingFile(
543 fileName,
544 loop,
545 startPosition,
546 volumeScaling,
547 notificationTime,
548 stopPosition,
549 (const CodecInst*) codecInst) != 0)
550 {
551 _engineStatisticsPtr->SetLastError(
552 VE_BAD_FILE, kTraceError,
553 "StartPlayingFile() failed to start file playout");
554 _filePlayerPtr->StopPlayingFile();
555 FilePlayer::DestroyFilePlayer(_filePlayerPtr);
556 _filePlayerPtr = NULL;
557 return -1;
558 }
559
560 _filePlayerPtr->RegisterModuleFileCallback(this);
561 _filePlaying = true;
562
563 return 0;
564}
565
566int TransmitMixer::StartPlayingFileAsMicrophone(InStream* stream,
567 FileFormats format,
568 int startPosition,
569 float volumeScaling,
570 int stopPosition,
571 const CodecInst* codecInst)
572{
573 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
574 "TransmitMixer::StartPlayingFileAsMicrophone(format=%d,"
575 " volumeScaling=%5.3f, startPosition=%d, stopPosition=%d)",
576 format, volumeScaling, startPosition, stopPosition);
577
578 if (stream == NULL)
579 {
580 _engineStatisticsPtr->SetLastError(
581 VE_BAD_FILE, kTraceError,
582 "StartPlayingFileAsMicrophone() NULL as input stream");
583 return -1;
584 }
585
586 if (_filePlaying)
587 {
588 _engineStatisticsPtr->SetLastError(
589 VE_ALREADY_PLAYING, kTraceWarning,
590 "StartPlayingFileAsMicrophone() is already playing");
591 return 0;
592 }
593
594 CriticalSectionScoped cs(&_critSect);
595
596 // Destroy the old instance
597 if (_filePlayerPtr)
598 {
599 _filePlayerPtr->RegisterModuleFileCallback(NULL);
600 FilePlayer::DestroyFilePlayer(_filePlayerPtr);
601 _filePlayerPtr = NULL;
602 }
603
604 // Dynamically create the instance
605 _filePlayerPtr
606 = FilePlayer::CreateFilePlayer(_filePlayerId,
607 (const FileFormats) format);
608
609 if (_filePlayerPtr == NULL)
610 {
611 _engineStatisticsPtr->SetLastError(
612 VE_INVALID_ARGUMENT, kTraceWarning,
613 "StartPlayingFileAsMicrophone() filePlayer format isnot correct");
614 return -1;
615 }
616
617 const uint32_t notificationTime(0);
618
619 if (_filePlayerPtr->StartPlayingFile(
620 (InStream&) *stream,
621 startPosition,
622 volumeScaling,
623 notificationTime,
624 stopPosition,
625 (const CodecInst*) codecInst) != 0)
626 {
627 _engineStatisticsPtr->SetLastError(
628 VE_BAD_FILE, kTraceError,
629 "StartPlayingFile() failed to start file playout");
630 _filePlayerPtr->StopPlayingFile();
631 FilePlayer::DestroyFilePlayer(_filePlayerPtr);
632 _filePlayerPtr = NULL;
633 return -1;
634 }
635 _filePlayerPtr->RegisterModuleFileCallback(this);
636 _filePlaying = true;
637
638 return 0;
639}
640
641int TransmitMixer::StopPlayingFileAsMicrophone()
642{
643 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
644 "TransmitMixer::StopPlayingFileAsMicrophone()");
645
646 if (!_filePlaying)
647 {
andrew@webrtc.org40ee3d02014-04-03 21:56:01 +0000648 return 0;
649 }
650
651 CriticalSectionScoped cs(&_critSect);
652
653 if (_filePlayerPtr->StopPlayingFile() != 0)
654 {
655 _engineStatisticsPtr->SetLastError(
656 VE_CANNOT_STOP_PLAYOUT, kTraceError,
657 "StopPlayingFile() couldnot stop playing file");
658 return -1;
659 }
660
661 _filePlayerPtr->RegisterModuleFileCallback(NULL);
662 FilePlayer::DestroyFilePlayer(_filePlayerPtr);
663 _filePlayerPtr = NULL;
664 _filePlaying = false;
665
666 return 0;
667}
668
669int TransmitMixer::IsPlayingFileAsMicrophone() const
670{
671 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
672 "TransmitMixer::IsPlayingFileAsMicrophone()");
673 return _filePlaying;
674}
675
andrew@webrtc.org40ee3d02014-04-03 21:56:01 +0000676int TransmitMixer::StartRecordingMicrophone(const char* fileName,
677 const CodecInst* codecInst)
678{
679 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
680 "TransmitMixer::StartRecordingMicrophone(fileName=%s)",
681 fileName);
682
683 CriticalSectionScoped cs(&_critSect);
684
685 if (_fileRecording)
686 {
687 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
688 "StartRecordingMicrophone() is already recording");
689 return 0;
690 }
691
692 FileFormats format;
693 const uint32_t notificationTime(0); // Not supported in VoE
694 CodecInst dummyCodec = { 100, "L16", 16000, 320, 1, 320000 };
695
696 if (codecInst != NULL &&
697 (codecInst->channels < 0 || codecInst->channels > 2))
698 {
699 _engineStatisticsPtr->SetLastError(
700 VE_BAD_ARGUMENT, kTraceError,
701 "StartRecordingMicrophone() invalid compression");
702 return (-1);
703 }
704 if (codecInst == NULL)
705 {
706 format = kFileFormatPcm16kHzFile;
707 codecInst = &dummyCodec;
708 } else if ((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
709 (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
710 (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
711 {
712 format = kFileFormatWavFile;
713 } else
714 {
715 format = kFileFormatCompressedFile;
716 }
717
718 // Destroy the old instance
719 if (_fileRecorderPtr)
720 {
721 _fileRecorderPtr->RegisterModuleFileCallback(NULL);
722 FileRecorder::DestroyFileRecorder(_fileRecorderPtr);
723 _fileRecorderPtr = NULL;
724 }
725
726 _fileRecorderPtr =
727 FileRecorder::CreateFileRecorder(_fileRecorderId,
728 (const FileFormats) format);
729 if (_fileRecorderPtr == NULL)
730 {
731 _engineStatisticsPtr->SetLastError(
732 VE_INVALID_ARGUMENT, kTraceError,
733 "StartRecordingMicrophone() fileRecorder format isnot correct");
734 return -1;
735 }
736
737 if (_fileRecorderPtr->StartRecordingAudioFile(
738 fileName,
739 (const CodecInst&) *codecInst,
740 notificationTime) != 0)
741 {
742 _engineStatisticsPtr->SetLastError(
743 VE_BAD_FILE, kTraceError,
744 "StartRecordingAudioFile() failed to start file recording");
745 _fileRecorderPtr->StopRecording();
746 FileRecorder::DestroyFileRecorder(_fileRecorderPtr);
747 _fileRecorderPtr = NULL;
748 return -1;
749 }
750 _fileRecorderPtr->RegisterModuleFileCallback(this);
751 _fileRecording = true;
752
753 return 0;
754}
755
756int TransmitMixer::StartRecordingMicrophone(OutStream* stream,
757 const CodecInst* codecInst)
758{
759 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
760 "TransmitMixer::StartRecordingMicrophone()");
761
762 CriticalSectionScoped cs(&_critSect);
763
764 if (_fileRecording)
765 {
766 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
767 "StartRecordingMicrophone() is already recording");
768 return 0;
769 }
770
771 FileFormats format;
772 const uint32_t notificationTime(0); // Not supported in VoE
773 CodecInst dummyCodec = { 100, "L16", 16000, 320, 1, 320000 };
774
775 if (codecInst != NULL && codecInst->channels != 1)
776 {
777 _engineStatisticsPtr->SetLastError(
778 VE_BAD_ARGUMENT, kTraceError,
779 "StartRecordingMicrophone() invalid compression");
780 return (-1);
781 }
782 if (codecInst == NULL)
783 {
784 format = kFileFormatPcm16kHzFile;
785 codecInst = &dummyCodec;
786 } else if ((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
787 (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
788 (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
789 {
790 format = kFileFormatWavFile;
791 } else
792 {
793 format = kFileFormatCompressedFile;
794 }
795
796 // Destroy the old instance
797 if (_fileRecorderPtr)
798 {
799 _fileRecorderPtr->RegisterModuleFileCallback(NULL);
800 FileRecorder::DestroyFileRecorder(_fileRecorderPtr);
801 _fileRecorderPtr = NULL;
802 }
803
804 _fileRecorderPtr =
805 FileRecorder::CreateFileRecorder(_fileRecorderId,
806 (const FileFormats) format);
807 if (_fileRecorderPtr == NULL)
808 {
809 _engineStatisticsPtr->SetLastError(
810 VE_INVALID_ARGUMENT, kTraceError,
811 "StartRecordingMicrophone() fileRecorder format isnot correct");
812 return -1;
813 }
814
815 if (_fileRecorderPtr->StartRecordingAudioFile(*stream,
816 *codecInst,
817 notificationTime) != 0)
818 {
819 _engineStatisticsPtr->SetLastError(VE_BAD_FILE, kTraceError,
820 "StartRecordingAudioFile() failed to start file recording");
821 _fileRecorderPtr->StopRecording();
822 FileRecorder::DestroyFileRecorder(_fileRecorderPtr);
823 _fileRecorderPtr = NULL;
824 return -1;
825 }
826
827 _fileRecorderPtr->RegisterModuleFileCallback(this);
828 _fileRecording = true;
829
830 return 0;
831}
832
833
834int TransmitMixer::StopRecordingMicrophone()
835{
836 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
837 "TransmitMixer::StopRecordingMicrophone()");
838
839 CriticalSectionScoped cs(&_critSect);
840
841 if (!_fileRecording)
842 {
843 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
844 "StopRecordingMicrophone() isnot recording");
845 return 0;
846 }
847
848 if (_fileRecorderPtr->StopRecording() != 0)
849 {
850 _engineStatisticsPtr->SetLastError(
851 VE_STOP_RECORDING_FAILED, kTraceError,
852 "StopRecording(), could not stop recording");
853 return -1;
854 }
855 _fileRecorderPtr->RegisterModuleFileCallback(NULL);
856 FileRecorder::DestroyFileRecorder(_fileRecorderPtr);
857 _fileRecorderPtr = NULL;
858 _fileRecording = false;
859
860 return 0;
861}
862
863int TransmitMixer::StartRecordingCall(const char* fileName,
864 const CodecInst* codecInst)
865{
866 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
867 "TransmitMixer::StartRecordingCall(fileName=%s)", fileName);
868
869 if (_fileCallRecording)
870 {
871 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
872 "StartRecordingCall() is already recording");
873 return 0;
874 }
875
876 FileFormats format;
877 const uint32_t notificationTime(0); // Not supported in VoE
878 CodecInst dummyCodec = { 100, "L16", 16000, 320, 1, 320000 };
879
880 if (codecInst != NULL && codecInst->channels != 1)
881 {
882 _engineStatisticsPtr->SetLastError(
883 VE_BAD_ARGUMENT, kTraceError,
884 "StartRecordingCall() invalid compression");
885 return (-1);
886 }
887 if (codecInst == NULL)
888 {
889 format = kFileFormatPcm16kHzFile;
890 codecInst = &dummyCodec;
891 } else if ((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
892 (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
893 (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
894 {
895 format = kFileFormatWavFile;
896 } else
897 {
898 format = kFileFormatCompressedFile;
899 }
900
901 CriticalSectionScoped cs(&_critSect);
902
903 // Destroy the old instance
904 if (_fileCallRecorderPtr)
905 {
906 _fileCallRecorderPtr->RegisterModuleFileCallback(NULL);
907 FileRecorder::DestroyFileRecorder(_fileCallRecorderPtr);
908 _fileCallRecorderPtr = NULL;
909 }
910
911 _fileCallRecorderPtr
912 = FileRecorder::CreateFileRecorder(_fileCallRecorderId,
913 (const FileFormats) format);
914 if (_fileCallRecorderPtr == NULL)
915 {
916 _engineStatisticsPtr->SetLastError(
917 VE_INVALID_ARGUMENT, kTraceError,
918 "StartRecordingCall() fileRecorder format isnot correct");
919 return -1;
920 }
921
922 if (_fileCallRecorderPtr->StartRecordingAudioFile(
923 fileName,
924 (const CodecInst&) *codecInst,
925 notificationTime) != 0)
926 {
927 _engineStatisticsPtr->SetLastError(
928 VE_BAD_FILE, kTraceError,
929 "StartRecordingAudioFile() failed to start file recording");
930 _fileCallRecorderPtr->StopRecording();
931 FileRecorder::DestroyFileRecorder(_fileCallRecorderPtr);
932 _fileCallRecorderPtr = NULL;
933 return -1;
934 }
935 _fileCallRecorderPtr->RegisterModuleFileCallback(this);
936 _fileCallRecording = true;
937
938 return 0;
939}
940
941int TransmitMixer::StartRecordingCall(OutStream* stream,
942 const CodecInst* codecInst)
943{
944 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
945 "TransmitMixer::StartRecordingCall()");
946
947 if (_fileCallRecording)
948 {
949 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
950 "StartRecordingCall() is already recording");
951 return 0;
952 }
953
954 FileFormats format;
955 const uint32_t notificationTime(0); // Not supported in VoE
956 CodecInst dummyCodec = { 100, "L16", 16000, 320, 1, 320000 };
957
958 if (codecInst != NULL && codecInst->channels != 1)
959 {
960 _engineStatisticsPtr->SetLastError(
961 VE_BAD_ARGUMENT, kTraceError,
962 "StartRecordingCall() invalid compression");
963 return (-1);
964 }
965 if (codecInst == NULL)
966 {
967 format = kFileFormatPcm16kHzFile;
968 codecInst = &dummyCodec;
969 } else if ((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
970 (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
971 (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
972 {
973 format = kFileFormatWavFile;
974 } else
975 {
976 format = kFileFormatCompressedFile;
977 }
978
979 CriticalSectionScoped cs(&_critSect);
980
981 // Destroy the old instance
982 if (_fileCallRecorderPtr)
983 {
984 _fileCallRecorderPtr->RegisterModuleFileCallback(NULL);
985 FileRecorder::DestroyFileRecorder(_fileCallRecorderPtr);
986 _fileCallRecorderPtr = NULL;
987 }
988
989 _fileCallRecorderPtr =
990 FileRecorder::CreateFileRecorder(_fileCallRecorderId,
991 (const FileFormats) format);
992 if (_fileCallRecorderPtr == NULL)
993 {
994 _engineStatisticsPtr->SetLastError(
995 VE_INVALID_ARGUMENT, kTraceError,
996 "StartRecordingCall() fileRecorder format isnot correct");
997 return -1;
998 }
999
1000 if (_fileCallRecorderPtr->StartRecordingAudioFile(*stream,
1001 *codecInst,
1002 notificationTime) != 0)
1003 {
1004 _engineStatisticsPtr->SetLastError(VE_BAD_FILE, kTraceError,
1005 "StartRecordingAudioFile() failed to start file recording");
1006 _fileCallRecorderPtr->StopRecording();
1007 FileRecorder::DestroyFileRecorder(_fileCallRecorderPtr);
1008 _fileCallRecorderPtr = NULL;
1009 return -1;
1010 }
1011
1012 _fileCallRecorderPtr->RegisterModuleFileCallback(this);
1013 _fileCallRecording = true;
1014
1015 return 0;
1016}
1017
1018int TransmitMixer::StopRecordingCall()
1019{
1020 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
1021 "TransmitMixer::StopRecordingCall()");
1022
1023 if (!_fileCallRecording)
1024 {
1025 WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId, -1),
1026 "StopRecordingCall() file isnot recording");
1027 return -1;
1028 }
1029
1030 CriticalSectionScoped cs(&_critSect);
1031
1032 if (_fileCallRecorderPtr->StopRecording() != 0)
1033 {
1034 _engineStatisticsPtr->SetLastError(
1035 VE_STOP_RECORDING_FAILED, kTraceError,
1036 "StopRecording(), could not stop recording");
1037 return -1;
1038 }
1039
1040 _fileCallRecorderPtr->RegisterModuleFileCallback(NULL);
1041 FileRecorder::DestroyFileRecorder(_fileCallRecorderPtr);
1042 _fileCallRecorderPtr = NULL;
1043 _fileCallRecording = false;
1044
1045 return 0;
1046}
1047
1048void
1049TransmitMixer::SetMixWithMicStatus(bool mix)
1050{
1051 _mixFileWithMicrophone = mix;
1052}
1053
1054int TransmitMixer::RegisterExternalMediaProcessing(
1055 VoEMediaProcess* object,
1056 ProcessingTypes type) {
1057 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
1058 "TransmitMixer::RegisterExternalMediaProcessing()");
1059
1060 CriticalSectionScoped cs(&_callbackCritSect);
1061 if (!object) {
1062 return -1;
1063 }
1064
1065 // Store the callback object according to the processing type.
1066 if (type == kRecordingAllChannelsMixed) {
1067 external_postproc_ptr_ = object;
1068 } else if (type == kRecordingPreprocessing) {
1069 external_preproc_ptr_ = object;
1070 } else {
1071 return -1;
1072 }
1073 return 0;
1074}
1075
1076int TransmitMixer::DeRegisterExternalMediaProcessing(ProcessingTypes type) {
1077 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
1078 "TransmitMixer::DeRegisterExternalMediaProcessing()");
1079
1080 CriticalSectionScoped cs(&_callbackCritSect);
1081 if (type == kRecordingAllChannelsMixed) {
1082 external_postproc_ptr_ = NULL;
1083 } else if (type == kRecordingPreprocessing) {
1084 external_preproc_ptr_ = NULL;
1085 } else {
1086 return -1;
1087 }
1088 return 0;
1089}
1090
1091int
1092TransmitMixer::SetMute(bool enable)
1093{
1094 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
1095 "TransmitMixer::SetMute(enable=%d)", enable);
1096 _mute = enable;
1097 return 0;
1098}
1099
1100bool
1101TransmitMixer::Mute() const
1102{
1103 return _mute;
1104}
1105
1106int8_t TransmitMixer::AudioLevel() const
1107{
1108 // Speech + file level [0,9]
1109 return _audioLevel.Level();
1110}
1111
1112int16_t TransmitMixer::AudioLevelFullRange() const
1113{
1114 // Speech + file level [0,32767]
1115 return _audioLevel.LevelFullRange();
1116}
1117
1118bool TransmitMixer::IsRecordingCall()
1119{
1120 return _fileCallRecording;
1121}
1122
1123bool TransmitMixer::IsRecordingMic()
1124{
1125 CriticalSectionScoped cs(&_critSect);
1126 return _fileRecording;
1127}
1128
1129void TransmitMixer::GenerateAudioFrame(const int16_t* audio,
Peter Kastingdce40cf2015-08-24 14:52:23 -07001130 size_t samples_per_channel,
andrew@webrtc.org40ee3d02014-04-03 21:56:01 +00001131 int num_channels,
1132 int sample_rate_hz) {
1133 int codec_rate;
1134 int num_codec_channels;
1135 GetSendCodecInfo(&codec_rate, &num_codec_channels);
andrew@webrtc.org40ee3d02014-04-03 21:56:01 +00001136 stereo_codec_ = num_codec_channels == 2;
1137
Alejandro Luebscdfe20b2015-09-23 12:49:12 -07001138 // We want to process at the lowest rate possible without losing information.
1139 // Choose the lowest native rate at least equal to the input and codec rates.
1140 const int min_processing_rate = std::min(sample_rate_hz, codec_rate);
1141 for (size_t i = 0; i < AudioProcessing::kNumNativeSampleRates; ++i) {
1142 _audioFrame.sample_rate_hz_ = AudioProcessing::kNativeSampleRatesHz[i];
1143 if (_audioFrame.sample_rate_hz_ >= min_processing_rate) {
1144 break;
1145 }
andrew@webrtc.org40ee3d02014-04-03 21:56:01 +00001146 }
Alejandro Luebscdfe20b2015-09-23 12:49:12 -07001147 if (audioproc_->echo_control_mobile()->is_enabled()) {
1148 // AECM only supports 8 and 16 kHz.
1149 _audioFrame.sample_rate_hz_ = std::min(
1150 _audioFrame.sample_rate_hz_, AudioProcessing::kMaxAECMSampleRateHz);
1151 }
1152 _audioFrame.num_channels_ = std::min(num_channels, num_codec_channels);
1153 RemixAndResample(audio, samples_per_channel, num_channels, sample_rate_hz,
1154 &resampler_, &_audioFrame);
andrew@webrtc.org40ee3d02014-04-03 21:56:01 +00001155}
1156
1157int32_t TransmitMixer::RecordAudioToFile(
1158 uint32_t mixingFrequency)
1159{
1160 CriticalSectionScoped cs(&_critSect);
1161 if (_fileRecorderPtr == NULL)
1162 {
1163 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
1164 "TransmitMixer::RecordAudioToFile() filerecorder doesnot"
1165 "exist");
1166 return -1;
1167 }
1168
1169 if (_fileRecorderPtr->RecordAudioToFile(_audioFrame) != 0)
1170 {
1171 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
1172 "TransmitMixer::RecordAudioToFile() file recording"
1173 "failed");
1174 return -1;
1175 }
1176
1177 return 0;
1178}
1179
1180int32_t TransmitMixer::MixOrReplaceAudioWithFile(
1181 int mixingFrequency)
1182{
kwiberg@webrtc.org00b8f6b2015-02-26 14:34:55 +00001183 rtc::scoped_ptr<int16_t[]> fileBuffer(new int16_t[640]);
andrew@webrtc.org40ee3d02014-04-03 21:56:01 +00001184
Peter Kastingdce40cf2015-08-24 14:52:23 -07001185 size_t fileSamples(0);
andrew@webrtc.org40ee3d02014-04-03 21:56:01 +00001186 {
1187 CriticalSectionScoped cs(&_critSect);
1188 if (_filePlayerPtr == NULL)
1189 {
1190 WEBRTC_TRACE(kTraceWarning, kTraceVoice,
1191 VoEId(_instanceId, -1),
1192 "TransmitMixer::MixOrReplaceAudioWithFile()"
1193 "fileplayer doesnot exist");
1194 return -1;
1195 }
1196
1197 if (_filePlayerPtr->Get10msAudioFromFile(fileBuffer.get(),
1198 fileSamples,
1199 mixingFrequency) == -1)
1200 {
1201 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
1202 "TransmitMixer::MixOrReplaceAudioWithFile() file"
1203 " mixing failed");
1204 return -1;
1205 }
1206 }
1207
1208 assert(_audioFrame.samples_per_channel_ == fileSamples);
1209
1210 if (_mixFileWithMicrophone)
1211 {
1212 // Currently file stream is always mono.
1213 // TODO(xians): Change the code when FilePlayer supports real stereo.
1214 MixWithSat(_audioFrame.data_,
1215 _audioFrame.num_channels_,
1216 fileBuffer.get(),
1217 1,
1218 fileSamples);
1219 } else
1220 {
1221 // Replace ACM audio with file.
1222 // Currently file stream is always mono.
1223 // TODO(xians): Change the code when FilePlayer supports real stereo.
1224 _audioFrame.UpdateFrame(-1,
tommi@webrtc.orgeec6ecd2014-07-11 19:09:59 +00001225 0xFFFFFFFF,
andrew@webrtc.org40ee3d02014-04-03 21:56:01 +00001226 fileBuffer.get(),
1227 fileSamples,
1228 mixingFrequency,
1229 AudioFrame::kNormalSpeech,
1230 AudioFrame::kVadUnknown,
1231 1);
1232 }
1233 return 0;
1234}
1235
1236void TransmitMixer::ProcessAudio(int delay_ms, int clock_drift,
1237 int current_mic_level, bool key_pressed) {
1238 if (audioproc_->set_stream_delay_ms(delay_ms) != 0) {
1239 // A redundant warning is reported in AudioDevice, which we've throttled
1240 // to avoid flooding the logs. Relegate this one to LS_VERBOSE to avoid
1241 // repeating the problem here.
1242 LOG_FERR1(LS_VERBOSE, set_stream_delay_ms, delay_ms);
1243 }
1244
1245 GainControl* agc = audioproc_->gain_control();
1246 if (agc->set_stream_analog_level(current_mic_level) != 0) {
1247 LOG_FERR1(LS_ERROR, set_stream_analog_level, current_mic_level);
1248 assert(false);
1249 }
1250
1251 EchoCancellation* aec = audioproc_->echo_cancellation();
1252 if (aec->is_drift_compensation_enabled()) {
1253 aec->set_stream_drift_samples(clock_drift);
1254 }
1255
1256 audioproc_->set_stream_key_pressed(key_pressed);
1257
1258 int err = audioproc_->ProcessStream(&_audioFrame);
1259 if (err != 0) {
1260 LOG(LS_ERROR) << "ProcessStream() error: " << err;
1261 assert(false);
1262 }
1263
1264 // Store new capture level. Only updated when analog AGC is enabled.
1265 _captureLevel = agc->stream_analog_level();
1266
1267 CriticalSectionScoped cs(&_critSect);
1268 // Triggers a callback in OnPeriodicProcess().
1269 _saturationWarning |= agc->stream_is_saturated();
1270}
1271
1272#ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
1273void TransmitMixer::TypingDetection(bool keyPressed)
1274{
1275 // We let the VAD determine if we're using this feature or not.
1276 if (_audioFrame.vad_activity_ == AudioFrame::kVadUnknown) {
1277 return;
1278 }
1279
1280 bool vadActive = _audioFrame.vad_activity_ == AudioFrame::kVadActive;
1281 if (_typingDetection.Process(keyPressed, vadActive)) {
1282 _typingNoiseWarningPending = true;
1283 _typingNoiseDetected = true;
1284 } else {
1285 // If there is already a warning pending, do not change the state.
1286 // Otherwise set a warning pending if last callback was for noise detected.
1287 if (!_typingNoiseWarningPending && _typingNoiseDetected) {
1288 _typingNoiseWarningPending = true;
1289 _typingNoiseDetected = false;
1290 }
1291 }
1292}
1293#endif
1294
1295int TransmitMixer::GetMixingFrequency()
1296{
1297 assert(_audioFrame.sample_rate_hz_ != 0);
1298 return _audioFrame.sample_rate_hz_;
1299}
1300
1301#ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
1302int TransmitMixer::TimeSinceLastTyping(int &seconds)
1303{
1304 // We check in VoEAudioProcessingImpl that this is only called when
1305 // typing detection is active.
1306 seconds = _typingDetection.TimeSinceLastDetectionInSeconds();
1307 return 0;
1308}
1309#endif
1310
1311#ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
1312int TransmitMixer::SetTypingDetectionParameters(int timeWindow,
1313 int costPerTyping,
1314 int reportingThreshold,
1315 int penaltyDecay,
1316 int typeEventDelay)
1317{
1318 _typingDetection.SetParameters(timeWindow,
1319 costPerTyping,
1320 reportingThreshold,
1321 penaltyDecay,
1322 typeEventDelay,
1323 0);
1324 return 0;
1325}
1326#endif
1327
1328void TransmitMixer::EnableStereoChannelSwapping(bool enable) {
1329 swap_stereo_channels_ = enable;
1330}
1331
1332bool TransmitMixer::IsStereoChannelSwappingEnabled() {
1333 return swap_stereo_channels_;
1334}
1335
1336} // namespace voe
1337} // namespace webrtc