blob: a02f298509fc521845f8ae602c0886dedaefb620 [file] [log] [blame]
andrew@webrtc.org40ee3d02014-04-03 21:56:01 +00001/*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11#include "webrtc/voice_engine/transmit_mixer.h"
12
Peter Kastingdce40cf2015-08-24 14:52:23 -070013#include "webrtc/base/format_macros.h"
andrew@webrtc.org40ee3d02014-04-03 21:56:01 +000014#include "webrtc/modules/utility/interface/audio_frame_operations.h"
15#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
16#include "webrtc/system_wrappers/interface/event_wrapper.h"
17#include "webrtc/system_wrappers/interface/logging.h"
18#include "webrtc/system_wrappers/interface/trace.h"
19#include "webrtc/voice_engine/channel.h"
20#include "webrtc/voice_engine/channel_manager.h"
21#include "webrtc/voice_engine/include/voe_external_media.h"
22#include "webrtc/voice_engine/statistics.h"
23#include "webrtc/voice_engine/utility.h"
24#include "webrtc/voice_engine/voe_base_impl.h"
25
26#define WEBRTC_ABS(a) (((a) < 0) ? -(a) : (a))
27
28namespace webrtc {
29namespace voe {
30
31// TODO(ajm): The thread safety of this is dubious...
32void
33TransmitMixer::OnPeriodicProcess()
34{
35 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
36 "TransmitMixer::OnPeriodicProcess()");
37
38#if defined(WEBRTC_VOICE_ENGINE_TYPING_DETECTION)
39 if (_typingNoiseWarningPending)
40 {
41 CriticalSectionScoped cs(&_callbackCritSect);
42 if (_voiceEngineObserverPtr)
43 {
44 if (_typingNoiseDetected) {
45 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
46 "TransmitMixer::OnPeriodicProcess() => "
47 "CallbackOnError(VE_TYPING_NOISE_WARNING)");
48 _voiceEngineObserverPtr->CallbackOnError(
49 -1,
50 VE_TYPING_NOISE_WARNING);
51 } else {
52 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
53 "TransmitMixer::OnPeriodicProcess() => "
54 "CallbackOnError(VE_TYPING_NOISE_OFF_WARNING)");
55 _voiceEngineObserverPtr->CallbackOnError(
56 -1,
57 VE_TYPING_NOISE_OFF_WARNING);
58 }
59 }
60 _typingNoiseWarningPending = false;
61 }
62#endif
63
64 bool saturationWarning = false;
65 {
66 // Modify |_saturationWarning| under lock to avoid conflict with write op
67 // in ProcessAudio and also ensure that we don't hold the lock during the
68 // callback.
69 CriticalSectionScoped cs(&_critSect);
70 saturationWarning = _saturationWarning;
71 if (_saturationWarning)
72 _saturationWarning = false;
73 }
74
75 if (saturationWarning)
76 {
77 CriticalSectionScoped cs(&_callbackCritSect);
78 if (_voiceEngineObserverPtr)
79 {
80 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
81 "TransmitMixer::OnPeriodicProcess() =>"
82 " CallbackOnError(VE_SATURATION_WARNING)");
83 _voiceEngineObserverPtr->CallbackOnError(-1, VE_SATURATION_WARNING);
84 }
85 }
86}
87
88
89void TransmitMixer::PlayNotification(int32_t id,
90 uint32_t durationMs)
91{
92 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
93 "TransmitMixer::PlayNotification(id=%d, durationMs=%d)",
94 id, durationMs);
95
96 // Not implement yet
97}
98
99void TransmitMixer::RecordNotification(int32_t id,
100 uint32_t durationMs)
101{
102 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
103 "TransmitMixer::RecordNotification(id=%d, durationMs=%d)",
104 id, durationMs);
105
106 // Not implement yet
107}
108
109void TransmitMixer::PlayFileEnded(int32_t id)
110{
111 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
112 "TransmitMixer::PlayFileEnded(id=%d)", id);
113
114 assert(id == _filePlayerId);
115
116 CriticalSectionScoped cs(&_critSect);
117
118 _filePlaying = false;
119 WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
120 "TransmitMixer::PlayFileEnded() =>"
121 "file player module is shutdown");
122}
123
124void
125TransmitMixer::RecordFileEnded(int32_t id)
126{
127 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
128 "TransmitMixer::RecordFileEnded(id=%d)", id);
129
130 if (id == _fileRecorderId)
131 {
132 CriticalSectionScoped cs(&_critSect);
133 _fileRecording = false;
134 WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
135 "TransmitMixer::RecordFileEnded() => fileRecorder module"
136 "is shutdown");
137 } else if (id == _fileCallRecorderId)
138 {
139 CriticalSectionScoped cs(&_critSect);
140 _fileCallRecording = false;
141 WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
142 "TransmitMixer::RecordFileEnded() => fileCallRecorder"
143 "module is shutdown");
144 }
145}
146
147int32_t
148TransmitMixer::Create(TransmitMixer*& mixer, uint32_t instanceId)
149{
150 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(instanceId, -1),
151 "TransmitMixer::Create(instanceId=%d)", instanceId);
152 mixer = new TransmitMixer(instanceId);
153 if (mixer == NULL)
154 {
155 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(instanceId, -1),
156 "TransmitMixer::Create() unable to allocate memory"
157 "for mixer");
158 return -1;
159 }
160 return 0;
161}
162
163void
164TransmitMixer::Destroy(TransmitMixer*& mixer)
165{
166 if (mixer)
167 {
168 delete mixer;
169 mixer = NULL;
170 }
171}
172
173TransmitMixer::TransmitMixer(uint32_t instanceId) :
174 _engineStatisticsPtr(NULL),
175 _channelManagerPtr(NULL),
176 audioproc_(NULL),
177 _voiceEngineObserverPtr(NULL),
178 _processThreadPtr(NULL),
179 _filePlayerPtr(NULL),
180 _fileRecorderPtr(NULL),
181 _fileCallRecorderPtr(NULL),
182 // Avoid conflict with other channels by adding 1024 - 1026,
183 // won't use as much as 1024 channels.
184 _filePlayerId(instanceId + 1024),
185 _fileRecorderId(instanceId + 1025),
186 _fileCallRecorderId(instanceId + 1026),
187 _filePlaying(false),
188 _fileRecording(false),
189 _fileCallRecording(false),
190 _audioLevel(),
191 _critSect(*CriticalSectionWrapper::CreateCriticalSection()),
192 _callbackCritSect(*CriticalSectionWrapper::CreateCriticalSection()),
193#ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
194 _typingNoiseWarningPending(false),
195 _typingNoiseDetected(false),
196#endif
197 _saturationWarning(false),
198 _instanceId(instanceId),
199 _mixFileWithMicrophone(false),
200 _captureLevel(0),
201 external_postproc_ptr_(NULL),
202 external_preproc_ptr_(NULL),
203 _mute(false),
204 _remainingMuteMicTimeMs(0),
205 stereo_codec_(false),
206 swap_stereo_channels_(false)
207{
208 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, -1),
209 "TransmitMixer::TransmitMixer() - ctor");
210}
211
212TransmitMixer::~TransmitMixer()
213{
214 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, -1),
215 "TransmitMixer::~TransmitMixer() - dtor");
216 _monitorModule.DeRegisterObserver();
217 if (_processThreadPtr)
218 {
219 _processThreadPtr->DeRegisterModule(&_monitorModule);
220 }
221 DeRegisterExternalMediaProcessing(kRecordingAllChannelsMixed);
222 DeRegisterExternalMediaProcessing(kRecordingPreprocessing);
223 {
224 CriticalSectionScoped cs(&_critSect);
225 if (_fileRecorderPtr)
226 {
227 _fileRecorderPtr->RegisterModuleFileCallback(NULL);
228 _fileRecorderPtr->StopRecording();
229 FileRecorder::DestroyFileRecorder(_fileRecorderPtr);
230 _fileRecorderPtr = NULL;
231 }
232 if (_fileCallRecorderPtr)
233 {
234 _fileCallRecorderPtr->RegisterModuleFileCallback(NULL);
235 _fileCallRecorderPtr->StopRecording();
236 FileRecorder::DestroyFileRecorder(_fileCallRecorderPtr);
237 _fileCallRecorderPtr = NULL;
238 }
239 if (_filePlayerPtr)
240 {
241 _filePlayerPtr->RegisterModuleFileCallback(NULL);
242 _filePlayerPtr->StopPlayingFile();
243 FilePlayer::DestroyFilePlayer(_filePlayerPtr);
244 _filePlayerPtr = NULL;
245 }
246 }
247 delete &_critSect;
248 delete &_callbackCritSect;
249}
250
251int32_t
252TransmitMixer::SetEngineInformation(ProcessThread& processThread,
253 Statistics& engineStatistics,
254 ChannelManager& channelManager)
255{
256 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
257 "TransmitMixer::SetEngineInformation()");
258
259 _processThreadPtr = &processThread;
260 _engineStatisticsPtr = &engineStatistics;
261 _channelManagerPtr = &channelManager;
262
tommi@webrtc.org3985f012015-02-27 13:36:34 +0000263 _processThreadPtr->RegisterModule(&_monitorModule);
264 _monitorModule.RegisterObserver(*this);
andrew@webrtc.org40ee3d02014-04-03 21:56:01 +0000265
266 return 0;
267}
268
269int32_t
270TransmitMixer::RegisterVoiceEngineObserver(VoiceEngineObserver& observer)
271{
272 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
273 "TransmitMixer::RegisterVoiceEngineObserver()");
274 CriticalSectionScoped cs(&_callbackCritSect);
275
276 if (_voiceEngineObserverPtr)
277 {
278 _engineStatisticsPtr->SetLastError(
279 VE_INVALID_OPERATION, kTraceError,
280 "RegisterVoiceEngineObserver() observer already enabled");
281 return -1;
282 }
283 _voiceEngineObserverPtr = &observer;
284 return 0;
285}
286
287int32_t
288TransmitMixer::SetAudioProcessingModule(AudioProcessing* audioProcessingModule)
289{
290 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
291 "TransmitMixer::SetAudioProcessingModule("
292 "audioProcessingModule=0x%x)",
293 audioProcessingModule);
294 audioproc_ = audioProcessingModule;
295 return 0;
296}
297
298void TransmitMixer::GetSendCodecInfo(int* max_sample_rate, int* max_channels) {
299 *max_sample_rate = 8000;
300 *max_channels = 1;
301 for (ChannelManager::Iterator it(_channelManagerPtr); it.IsValid();
302 it.Increment()) {
303 Channel* channel = it.GetChannel();
304 if (channel->Sending()) {
305 CodecInst codec;
306 channel->GetSendCodec(codec);
307 *max_sample_rate = std::max(*max_sample_rate, codec.plfreq);
308 *max_channels = std::max(*max_channels, codec.channels);
309 }
310 }
311}
312
313int32_t
314TransmitMixer::PrepareDemux(const void* audioSamples,
Peter Kastingdce40cf2015-08-24 14:52:23 -0700315 size_t nSamples,
andrew@webrtc.org40ee3d02014-04-03 21:56:01 +0000316 uint8_t nChannels,
317 uint32_t samplesPerSec,
318 uint16_t totalDelayMS,
319 int32_t clockDrift,
320 uint16_t currentMicLevel,
321 bool keyPressed)
322{
323 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
Peter Kastingdce40cf2015-08-24 14:52:23 -0700324 "TransmitMixer::PrepareDemux(nSamples=%" PRIuS ", "
325 "nChannels=%u, samplesPerSec=%u, totalDelayMS=%u, "
326 "clockDrift=%d, currentMicLevel=%u)",
327 nSamples, nChannels, samplesPerSec, totalDelayMS, clockDrift,
328 currentMicLevel);
andrew@webrtc.org40ee3d02014-04-03 21:56:01 +0000329
330 // --- Resample input audio and create/store the initial audio frame
331 GenerateAudioFrame(static_cast<const int16_t*>(audioSamples),
332 nSamples,
333 nChannels,
334 samplesPerSec);
335
336 {
337 CriticalSectionScoped cs(&_callbackCritSect);
338 if (external_preproc_ptr_) {
339 external_preproc_ptr_->Process(-1, kRecordingPreprocessing,
340 _audioFrame.data_,
341 _audioFrame.samples_per_channel_,
342 _audioFrame.sample_rate_hz_,
343 _audioFrame.num_channels_ == 2);
344 }
345 }
346
347 // --- Near-end audio processing.
348 ProcessAudio(totalDelayMS, clockDrift, currentMicLevel, keyPressed);
349
350 if (swap_stereo_channels_ && stereo_codec_)
351 // Only bother swapping if we're using a stereo codec.
352 AudioFrameOperations::SwapStereoChannels(&_audioFrame);
353
354 // --- Annoying typing detection (utilizes the APM/VAD decision)
355#ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
356 TypingDetection(keyPressed);
357#endif
358
359 // --- Mute during DTMF tone if direct feedback is enabled
360 if (_remainingMuteMicTimeMs > 0)
361 {
362 AudioFrameOperations::Mute(_audioFrame);
363 _remainingMuteMicTimeMs -= 10;
364 if (_remainingMuteMicTimeMs < 0)
365 {
366 _remainingMuteMicTimeMs = 0;
367 }
368 }
369
370 // --- Mute signal
371 if (_mute)
372 {
373 AudioFrameOperations::Mute(_audioFrame);
374 }
375
376 // --- Mix with file (does not affect the mixing frequency)
377 if (_filePlaying)
378 {
379 MixOrReplaceAudioWithFile(_audioFrame.sample_rate_hz_);
380 }
381
382 // --- Record to file
383 bool file_recording = false;
384 {
385 CriticalSectionScoped cs(&_critSect);
386 file_recording = _fileRecording;
387 }
388 if (file_recording)
389 {
390 RecordAudioToFile(_audioFrame.sample_rate_hz_);
391 }
392
393 {
394 CriticalSectionScoped cs(&_callbackCritSect);
395 if (external_postproc_ptr_) {
396 external_postproc_ptr_->Process(-1, kRecordingAllChannelsMixed,
397 _audioFrame.data_,
398 _audioFrame.samples_per_channel_,
399 _audioFrame.sample_rate_hz_,
400 _audioFrame.num_channels_ == 2);
401 }
402 }
403
404 // --- Measure audio level of speech after all processing.
405 _audioLevel.ComputeLevel(_audioFrame);
406 return 0;
407}
408
409int32_t
410TransmitMixer::DemuxAndMix()
411{
412 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
413 "TransmitMixer::DemuxAndMix()");
414
415 for (ChannelManager::Iterator it(_channelManagerPtr); it.IsValid();
416 it.Increment())
417 {
418 Channel* channelPtr = it.GetChannel();
henrika@webrtc.org66803482014-04-17 10:45:01 +0000419 if (channelPtr->Sending())
andrew@webrtc.org40ee3d02014-04-03 21:56:01 +0000420 {
421 // Demultiplex makes a copy of its input.
422 channelPtr->Demultiplex(_audioFrame);
423 channelPtr->PrepareEncodeAndSend(_audioFrame.sample_rate_hz_);
424 }
425 }
426 return 0;
427}
428
429void TransmitMixer::DemuxAndMix(const int voe_channels[],
430 int number_of_voe_channels) {
431 for (int i = 0; i < number_of_voe_channels; ++i) {
432 voe::ChannelOwner ch = _channelManagerPtr->GetChannel(voe_channels[i]);
433 voe::Channel* channel_ptr = ch.channel();
434 if (channel_ptr) {
henrika@webrtc.org66803482014-04-17 10:45:01 +0000435 if (channel_ptr->Sending()) {
andrew@webrtc.org40ee3d02014-04-03 21:56:01 +0000436 // Demultiplex makes a copy of its input.
437 channel_ptr->Demultiplex(_audioFrame);
438 channel_ptr->PrepareEncodeAndSend(_audioFrame.sample_rate_hz_);
439 }
440 }
441 }
442}
443
444int32_t
445TransmitMixer::EncodeAndSend()
446{
447 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
448 "TransmitMixer::EncodeAndSend()");
449
450 for (ChannelManager::Iterator it(_channelManagerPtr); it.IsValid();
451 it.Increment())
452 {
453 Channel* channelPtr = it.GetChannel();
henrika@webrtc.org66803482014-04-17 10:45:01 +0000454 if (channelPtr->Sending())
andrew@webrtc.org40ee3d02014-04-03 21:56:01 +0000455 {
456 channelPtr->EncodeAndSend();
457 }
458 }
459 return 0;
460}
461
462void TransmitMixer::EncodeAndSend(const int voe_channels[],
463 int number_of_voe_channels) {
464 for (int i = 0; i < number_of_voe_channels; ++i) {
465 voe::ChannelOwner ch = _channelManagerPtr->GetChannel(voe_channels[i]);
466 voe::Channel* channel_ptr = ch.channel();
henrika@webrtc.org66803482014-04-17 10:45:01 +0000467 if (channel_ptr && channel_ptr->Sending())
andrew@webrtc.org40ee3d02014-04-03 21:56:01 +0000468 channel_ptr->EncodeAndSend();
469 }
470}
471
472uint32_t TransmitMixer::CaptureLevel() const
473{
474 return _captureLevel;
475}
476
477void
478TransmitMixer::UpdateMuteMicrophoneTime(uint32_t lengthMs)
479{
480 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
481 "TransmitMixer::UpdateMuteMicrophoneTime(lengthMs=%d)",
482 lengthMs);
483 _remainingMuteMicTimeMs = lengthMs;
484}
485
486int32_t
487TransmitMixer::StopSend()
488{
489 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
490 "TransmitMixer::StopSend()");
491 _audioLevel.Clear();
492 return 0;
493}
494
495int TransmitMixer::StartPlayingFileAsMicrophone(const char* fileName,
496 bool loop,
497 FileFormats format,
498 int startPosition,
499 float volumeScaling,
500 int stopPosition,
501 const CodecInst* codecInst)
502{
503 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
504 "TransmitMixer::StartPlayingFileAsMicrophone("
505 "fileNameUTF8[]=%s,loop=%d, format=%d, volumeScaling=%5.3f,"
506 " startPosition=%d, stopPosition=%d)", fileName, loop,
507 format, volumeScaling, startPosition, stopPosition);
508
509 if (_filePlaying)
510 {
511 _engineStatisticsPtr->SetLastError(
512 VE_ALREADY_PLAYING, kTraceWarning,
513 "StartPlayingFileAsMicrophone() is already playing");
514 return 0;
515 }
516
517 CriticalSectionScoped cs(&_critSect);
518
519 // Destroy the old instance
520 if (_filePlayerPtr)
521 {
522 _filePlayerPtr->RegisterModuleFileCallback(NULL);
523 FilePlayer::DestroyFilePlayer(_filePlayerPtr);
524 _filePlayerPtr = NULL;
525 }
526
527 // Dynamically create the instance
528 _filePlayerPtr
529 = FilePlayer::CreateFilePlayer(_filePlayerId,
530 (const FileFormats) format);
531
532 if (_filePlayerPtr == NULL)
533 {
534 _engineStatisticsPtr->SetLastError(
535 VE_INVALID_ARGUMENT, kTraceError,
536 "StartPlayingFileAsMicrophone() filePlayer format isnot correct");
537 return -1;
538 }
539
540 const uint32_t notificationTime(0);
541
542 if (_filePlayerPtr->StartPlayingFile(
543 fileName,
544 loop,
545 startPosition,
546 volumeScaling,
547 notificationTime,
548 stopPosition,
549 (const CodecInst*) codecInst) != 0)
550 {
551 _engineStatisticsPtr->SetLastError(
552 VE_BAD_FILE, kTraceError,
553 "StartPlayingFile() failed to start file playout");
554 _filePlayerPtr->StopPlayingFile();
555 FilePlayer::DestroyFilePlayer(_filePlayerPtr);
556 _filePlayerPtr = NULL;
557 return -1;
558 }
559
560 _filePlayerPtr->RegisterModuleFileCallback(this);
561 _filePlaying = true;
562
563 return 0;
564}
565
566int TransmitMixer::StartPlayingFileAsMicrophone(InStream* stream,
567 FileFormats format,
568 int startPosition,
569 float volumeScaling,
570 int stopPosition,
571 const CodecInst* codecInst)
572{
573 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
574 "TransmitMixer::StartPlayingFileAsMicrophone(format=%d,"
575 " volumeScaling=%5.3f, startPosition=%d, stopPosition=%d)",
576 format, volumeScaling, startPosition, stopPosition);
577
578 if (stream == NULL)
579 {
580 _engineStatisticsPtr->SetLastError(
581 VE_BAD_FILE, kTraceError,
582 "StartPlayingFileAsMicrophone() NULL as input stream");
583 return -1;
584 }
585
586 if (_filePlaying)
587 {
588 _engineStatisticsPtr->SetLastError(
589 VE_ALREADY_PLAYING, kTraceWarning,
590 "StartPlayingFileAsMicrophone() is already playing");
591 return 0;
592 }
593
594 CriticalSectionScoped cs(&_critSect);
595
596 // Destroy the old instance
597 if (_filePlayerPtr)
598 {
599 _filePlayerPtr->RegisterModuleFileCallback(NULL);
600 FilePlayer::DestroyFilePlayer(_filePlayerPtr);
601 _filePlayerPtr = NULL;
602 }
603
604 // Dynamically create the instance
605 _filePlayerPtr
606 = FilePlayer::CreateFilePlayer(_filePlayerId,
607 (const FileFormats) format);
608
609 if (_filePlayerPtr == NULL)
610 {
611 _engineStatisticsPtr->SetLastError(
612 VE_INVALID_ARGUMENT, kTraceWarning,
613 "StartPlayingFileAsMicrophone() filePlayer format isnot correct");
614 return -1;
615 }
616
617 const uint32_t notificationTime(0);
618
619 if (_filePlayerPtr->StartPlayingFile(
620 (InStream&) *stream,
621 startPosition,
622 volumeScaling,
623 notificationTime,
624 stopPosition,
625 (const CodecInst*) codecInst) != 0)
626 {
627 _engineStatisticsPtr->SetLastError(
628 VE_BAD_FILE, kTraceError,
629 "StartPlayingFile() failed to start file playout");
630 _filePlayerPtr->StopPlayingFile();
631 FilePlayer::DestroyFilePlayer(_filePlayerPtr);
632 _filePlayerPtr = NULL;
633 return -1;
634 }
635 _filePlayerPtr->RegisterModuleFileCallback(this);
636 _filePlaying = true;
637
638 return 0;
639}
640
641int TransmitMixer::StopPlayingFileAsMicrophone()
642{
643 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
644 "TransmitMixer::StopPlayingFileAsMicrophone()");
645
646 if (!_filePlaying)
647 {
648 _engineStatisticsPtr->SetLastError(
649 VE_INVALID_OPERATION, kTraceWarning,
650 "StopPlayingFileAsMicrophone() isnot playing");
651 return 0;
652 }
653
654 CriticalSectionScoped cs(&_critSect);
655
656 if (_filePlayerPtr->StopPlayingFile() != 0)
657 {
658 _engineStatisticsPtr->SetLastError(
659 VE_CANNOT_STOP_PLAYOUT, kTraceError,
660 "StopPlayingFile() couldnot stop playing file");
661 return -1;
662 }
663
664 _filePlayerPtr->RegisterModuleFileCallback(NULL);
665 FilePlayer::DestroyFilePlayer(_filePlayerPtr);
666 _filePlayerPtr = NULL;
667 _filePlaying = false;
668
669 return 0;
670}
671
672int TransmitMixer::IsPlayingFileAsMicrophone() const
673{
674 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
675 "TransmitMixer::IsPlayingFileAsMicrophone()");
676 return _filePlaying;
677}
678
andrew@webrtc.org40ee3d02014-04-03 21:56:01 +0000679int TransmitMixer::StartRecordingMicrophone(const char* fileName,
680 const CodecInst* codecInst)
681{
682 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
683 "TransmitMixer::StartRecordingMicrophone(fileName=%s)",
684 fileName);
685
686 CriticalSectionScoped cs(&_critSect);
687
688 if (_fileRecording)
689 {
690 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
691 "StartRecordingMicrophone() is already recording");
692 return 0;
693 }
694
695 FileFormats format;
696 const uint32_t notificationTime(0); // Not supported in VoE
697 CodecInst dummyCodec = { 100, "L16", 16000, 320, 1, 320000 };
698
699 if (codecInst != NULL &&
700 (codecInst->channels < 0 || codecInst->channels > 2))
701 {
702 _engineStatisticsPtr->SetLastError(
703 VE_BAD_ARGUMENT, kTraceError,
704 "StartRecordingMicrophone() invalid compression");
705 return (-1);
706 }
707 if (codecInst == NULL)
708 {
709 format = kFileFormatPcm16kHzFile;
710 codecInst = &dummyCodec;
711 } else if ((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
712 (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
713 (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
714 {
715 format = kFileFormatWavFile;
716 } else
717 {
718 format = kFileFormatCompressedFile;
719 }
720
721 // Destroy the old instance
722 if (_fileRecorderPtr)
723 {
724 _fileRecorderPtr->RegisterModuleFileCallback(NULL);
725 FileRecorder::DestroyFileRecorder(_fileRecorderPtr);
726 _fileRecorderPtr = NULL;
727 }
728
729 _fileRecorderPtr =
730 FileRecorder::CreateFileRecorder(_fileRecorderId,
731 (const FileFormats) format);
732 if (_fileRecorderPtr == NULL)
733 {
734 _engineStatisticsPtr->SetLastError(
735 VE_INVALID_ARGUMENT, kTraceError,
736 "StartRecordingMicrophone() fileRecorder format isnot correct");
737 return -1;
738 }
739
740 if (_fileRecorderPtr->StartRecordingAudioFile(
741 fileName,
742 (const CodecInst&) *codecInst,
743 notificationTime) != 0)
744 {
745 _engineStatisticsPtr->SetLastError(
746 VE_BAD_FILE, kTraceError,
747 "StartRecordingAudioFile() failed to start file recording");
748 _fileRecorderPtr->StopRecording();
749 FileRecorder::DestroyFileRecorder(_fileRecorderPtr);
750 _fileRecorderPtr = NULL;
751 return -1;
752 }
753 _fileRecorderPtr->RegisterModuleFileCallback(this);
754 _fileRecording = true;
755
756 return 0;
757}
758
759int TransmitMixer::StartRecordingMicrophone(OutStream* stream,
760 const CodecInst* codecInst)
761{
762 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
763 "TransmitMixer::StartRecordingMicrophone()");
764
765 CriticalSectionScoped cs(&_critSect);
766
767 if (_fileRecording)
768 {
769 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
770 "StartRecordingMicrophone() is already recording");
771 return 0;
772 }
773
774 FileFormats format;
775 const uint32_t notificationTime(0); // Not supported in VoE
776 CodecInst dummyCodec = { 100, "L16", 16000, 320, 1, 320000 };
777
778 if (codecInst != NULL && codecInst->channels != 1)
779 {
780 _engineStatisticsPtr->SetLastError(
781 VE_BAD_ARGUMENT, kTraceError,
782 "StartRecordingMicrophone() invalid compression");
783 return (-1);
784 }
785 if (codecInst == NULL)
786 {
787 format = kFileFormatPcm16kHzFile;
788 codecInst = &dummyCodec;
789 } else if ((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
790 (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
791 (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
792 {
793 format = kFileFormatWavFile;
794 } else
795 {
796 format = kFileFormatCompressedFile;
797 }
798
799 // Destroy the old instance
800 if (_fileRecorderPtr)
801 {
802 _fileRecorderPtr->RegisterModuleFileCallback(NULL);
803 FileRecorder::DestroyFileRecorder(_fileRecorderPtr);
804 _fileRecorderPtr = NULL;
805 }
806
807 _fileRecorderPtr =
808 FileRecorder::CreateFileRecorder(_fileRecorderId,
809 (const FileFormats) format);
810 if (_fileRecorderPtr == NULL)
811 {
812 _engineStatisticsPtr->SetLastError(
813 VE_INVALID_ARGUMENT, kTraceError,
814 "StartRecordingMicrophone() fileRecorder format isnot correct");
815 return -1;
816 }
817
818 if (_fileRecorderPtr->StartRecordingAudioFile(*stream,
819 *codecInst,
820 notificationTime) != 0)
821 {
822 _engineStatisticsPtr->SetLastError(VE_BAD_FILE, kTraceError,
823 "StartRecordingAudioFile() failed to start file recording");
824 _fileRecorderPtr->StopRecording();
825 FileRecorder::DestroyFileRecorder(_fileRecorderPtr);
826 _fileRecorderPtr = NULL;
827 return -1;
828 }
829
830 _fileRecorderPtr->RegisterModuleFileCallback(this);
831 _fileRecording = true;
832
833 return 0;
834}
835
836
837int TransmitMixer::StopRecordingMicrophone()
838{
839 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
840 "TransmitMixer::StopRecordingMicrophone()");
841
842 CriticalSectionScoped cs(&_critSect);
843
844 if (!_fileRecording)
845 {
846 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
847 "StopRecordingMicrophone() isnot recording");
848 return 0;
849 }
850
851 if (_fileRecorderPtr->StopRecording() != 0)
852 {
853 _engineStatisticsPtr->SetLastError(
854 VE_STOP_RECORDING_FAILED, kTraceError,
855 "StopRecording(), could not stop recording");
856 return -1;
857 }
858 _fileRecorderPtr->RegisterModuleFileCallback(NULL);
859 FileRecorder::DestroyFileRecorder(_fileRecorderPtr);
860 _fileRecorderPtr = NULL;
861 _fileRecording = false;
862
863 return 0;
864}
865
866int TransmitMixer::StartRecordingCall(const char* fileName,
867 const CodecInst* codecInst)
868{
869 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
870 "TransmitMixer::StartRecordingCall(fileName=%s)", fileName);
871
872 if (_fileCallRecording)
873 {
874 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
875 "StartRecordingCall() is already recording");
876 return 0;
877 }
878
879 FileFormats format;
880 const uint32_t notificationTime(0); // Not supported in VoE
881 CodecInst dummyCodec = { 100, "L16", 16000, 320, 1, 320000 };
882
883 if (codecInst != NULL && codecInst->channels != 1)
884 {
885 _engineStatisticsPtr->SetLastError(
886 VE_BAD_ARGUMENT, kTraceError,
887 "StartRecordingCall() invalid compression");
888 return (-1);
889 }
890 if (codecInst == NULL)
891 {
892 format = kFileFormatPcm16kHzFile;
893 codecInst = &dummyCodec;
894 } else if ((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
895 (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
896 (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
897 {
898 format = kFileFormatWavFile;
899 } else
900 {
901 format = kFileFormatCompressedFile;
902 }
903
904 CriticalSectionScoped cs(&_critSect);
905
906 // Destroy the old instance
907 if (_fileCallRecorderPtr)
908 {
909 _fileCallRecorderPtr->RegisterModuleFileCallback(NULL);
910 FileRecorder::DestroyFileRecorder(_fileCallRecorderPtr);
911 _fileCallRecorderPtr = NULL;
912 }
913
914 _fileCallRecorderPtr
915 = FileRecorder::CreateFileRecorder(_fileCallRecorderId,
916 (const FileFormats) format);
917 if (_fileCallRecorderPtr == NULL)
918 {
919 _engineStatisticsPtr->SetLastError(
920 VE_INVALID_ARGUMENT, kTraceError,
921 "StartRecordingCall() fileRecorder format isnot correct");
922 return -1;
923 }
924
925 if (_fileCallRecorderPtr->StartRecordingAudioFile(
926 fileName,
927 (const CodecInst&) *codecInst,
928 notificationTime) != 0)
929 {
930 _engineStatisticsPtr->SetLastError(
931 VE_BAD_FILE, kTraceError,
932 "StartRecordingAudioFile() failed to start file recording");
933 _fileCallRecorderPtr->StopRecording();
934 FileRecorder::DestroyFileRecorder(_fileCallRecorderPtr);
935 _fileCallRecorderPtr = NULL;
936 return -1;
937 }
938 _fileCallRecorderPtr->RegisterModuleFileCallback(this);
939 _fileCallRecording = true;
940
941 return 0;
942}
943
944int TransmitMixer::StartRecordingCall(OutStream* stream,
945 const CodecInst* codecInst)
946{
947 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
948 "TransmitMixer::StartRecordingCall()");
949
950 if (_fileCallRecording)
951 {
952 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
953 "StartRecordingCall() is already recording");
954 return 0;
955 }
956
957 FileFormats format;
958 const uint32_t notificationTime(0); // Not supported in VoE
959 CodecInst dummyCodec = { 100, "L16", 16000, 320, 1, 320000 };
960
961 if (codecInst != NULL && codecInst->channels != 1)
962 {
963 _engineStatisticsPtr->SetLastError(
964 VE_BAD_ARGUMENT, kTraceError,
965 "StartRecordingCall() invalid compression");
966 return (-1);
967 }
968 if (codecInst == NULL)
969 {
970 format = kFileFormatPcm16kHzFile;
971 codecInst = &dummyCodec;
972 } else if ((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
973 (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
974 (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
975 {
976 format = kFileFormatWavFile;
977 } else
978 {
979 format = kFileFormatCompressedFile;
980 }
981
982 CriticalSectionScoped cs(&_critSect);
983
984 // Destroy the old instance
985 if (_fileCallRecorderPtr)
986 {
987 _fileCallRecorderPtr->RegisterModuleFileCallback(NULL);
988 FileRecorder::DestroyFileRecorder(_fileCallRecorderPtr);
989 _fileCallRecorderPtr = NULL;
990 }
991
992 _fileCallRecorderPtr =
993 FileRecorder::CreateFileRecorder(_fileCallRecorderId,
994 (const FileFormats) format);
995 if (_fileCallRecorderPtr == NULL)
996 {
997 _engineStatisticsPtr->SetLastError(
998 VE_INVALID_ARGUMENT, kTraceError,
999 "StartRecordingCall() fileRecorder format isnot correct");
1000 return -1;
1001 }
1002
1003 if (_fileCallRecorderPtr->StartRecordingAudioFile(*stream,
1004 *codecInst,
1005 notificationTime) != 0)
1006 {
1007 _engineStatisticsPtr->SetLastError(VE_BAD_FILE, kTraceError,
1008 "StartRecordingAudioFile() failed to start file recording");
1009 _fileCallRecorderPtr->StopRecording();
1010 FileRecorder::DestroyFileRecorder(_fileCallRecorderPtr);
1011 _fileCallRecorderPtr = NULL;
1012 return -1;
1013 }
1014
1015 _fileCallRecorderPtr->RegisterModuleFileCallback(this);
1016 _fileCallRecording = true;
1017
1018 return 0;
1019}
1020
1021int TransmitMixer::StopRecordingCall()
1022{
1023 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
1024 "TransmitMixer::StopRecordingCall()");
1025
1026 if (!_fileCallRecording)
1027 {
1028 WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId, -1),
1029 "StopRecordingCall() file isnot recording");
1030 return -1;
1031 }
1032
1033 CriticalSectionScoped cs(&_critSect);
1034
1035 if (_fileCallRecorderPtr->StopRecording() != 0)
1036 {
1037 _engineStatisticsPtr->SetLastError(
1038 VE_STOP_RECORDING_FAILED, kTraceError,
1039 "StopRecording(), could not stop recording");
1040 return -1;
1041 }
1042
1043 _fileCallRecorderPtr->RegisterModuleFileCallback(NULL);
1044 FileRecorder::DestroyFileRecorder(_fileCallRecorderPtr);
1045 _fileCallRecorderPtr = NULL;
1046 _fileCallRecording = false;
1047
1048 return 0;
1049}
1050
1051void
1052TransmitMixer::SetMixWithMicStatus(bool mix)
1053{
1054 _mixFileWithMicrophone = mix;
1055}
1056
1057int TransmitMixer::RegisterExternalMediaProcessing(
1058 VoEMediaProcess* object,
1059 ProcessingTypes type) {
1060 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
1061 "TransmitMixer::RegisterExternalMediaProcessing()");
1062
1063 CriticalSectionScoped cs(&_callbackCritSect);
1064 if (!object) {
1065 return -1;
1066 }
1067
1068 // Store the callback object according to the processing type.
1069 if (type == kRecordingAllChannelsMixed) {
1070 external_postproc_ptr_ = object;
1071 } else if (type == kRecordingPreprocessing) {
1072 external_preproc_ptr_ = object;
1073 } else {
1074 return -1;
1075 }
1076 return 0;
1077}
1078
1079int TransmitMixer::DeRegisterExternalMediaProcessing(ProcessingTypes type) {
1080 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
1081 "TransmitMixer::DeRegisterExternalMediaProcessing()");
1082
1083 CriticalSectionScoped cs(&_callbackCritSect);
1084 if (type == kRecordingAllChannelsMixed) {
1085 external_postproc_ptr_ = NULL;
1086 } else if (type == kRecordingPreprocessing) {
1087 external_preproc_ptr_ = NULL;
1088 } else {
1089 return -1;
1090 }
1091 return 0;
1092}
1093
1094int
1095TransmitMixer::SetMute(bool enable)
1096{
1097 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
1098 "TransmitMixer::SetMute(enable=%d)", enable);
1099 _mute = enable;
1100 return 0;
1101}
1102
1103bool
1104TransmitMixer::Mute() const
1105{
1106 return _mute;
1107}
1108
1109int8_t TransmitMixer::AudioLevel() const
1110{
1111 // Speech + file level [0,9]
1112 return _audioLevel.Level();
1113}
1114
1115int16_t TransmitMixer::AudioLevelFullRange() const
1116{
1117 // Speech + file level [0,32767]
1118 return _audioLevel.LevelFullRange();
1119}
1120
1121bool TransmitMixer::IsRecordingCall()
1122{
1123 return _fileCallRecording;
1124}
1125
1126bool TransmitMixer::IsRecordingMic()
1127{
1128 CriticalSectionScoped cs(&_critSect);
1129 return _fileRecording;
1130}
1131
1132void TransmitMixer::GenerateAudioFrame(const int16_t* audio,
Peter Kastingdce40cf2015-08-24 14:52:23 -07001133 size_t samples_per_channel,
andrew@webrtc.org40ee3d02014-04-03 21:56:01 +00001134 int num_channels,
1135 int sample_rate_hz) {
1136 int codec_rate;
1137 int num_codec_channels;
1138 GetSendCodecInfo(&codec_rate, &num_codec_channels);
1139 // TODO(ajm): This currently restricts the sample rate to 32 kHz.
1140 // See: https://code.google.com/p/webrtc/issues/detail?id=3146
1141 // When 48 kHz is supported natively by AudioProcessing, this will have
1142 // to be changed to handle 44.1 kHz.
andrew@webrtc.orge44a84d2014-04-30 18:58:23 +00001143 int max_sample_rate_hz = kAudioProcMaxNativeSampleRateHz;
1144 if (audioproc_->echo_control_mobile()->is_enabled()) {
1145 // AECM only supports 8 and 16 kHz.
1146 max_sample_rate_hz = 16000;
1147 }
1148 codec_rate = std::min(codec_rate, max_sample_rate_hz);
andrew@webrtc.org40ee3d02014-04-03 21:56:01 +00001149 stereo_codec_ = num_codec_channels == 2;
1150
1151 if (!mono_buffer_.get()) {
1152 // Temporary space for DownConvertToCodecFormat.
1153 mono_buffer_.reset(new int16_t[kMaxMonoDataSizeSamples]);
1154 }
1155 DownConvertToCodecFormat(audio,
1156 samples_per_channel,
1157 num_channels,
1158 sample_rate_hz,
1159 num_codec_channels,
1160 codec_rate,
1161 mono_buffer_.get(),
1162 &resampler_,
1163 &_audioFrame);
1164}
1165
1166int32_t TransmitMixer::RecordAudioToFile(
1167 uint32_t mixingFrequency)
1168{
1169 CriticalSectionScoped cs(&_critSect);
1170 if (_fileRecorderPtr == NULL)
1171 {
1172 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
1173 "TransmitMixer::RecordAudioToFile() filerecorder doesnot"
1174 "exist");
1175 return -1;
1176 }
1177
1178 if (_fileRecorderPtr->RecordAudioToFile(_audioFrame) != 0)
1179 {
1180 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
1181 "TransmitMixer::RecordAudioToFile() file recording"
1182 "failed");
1183 return -1;
1184 }
1185
1186 return 0;
1187}
1188
1189int32_t TransmitMixer::MixOrReplaceAudioWithFile(
1190 int mixingFrequency)
1191{
kwiberg@webrtc.org00b8f6b2015-02-26 14:34:55 +00001192 rtc::scoped_ptr<int16_t[]> fileBuffer(new int16_t[640]);
andrew@webrtc.org40ee3d02014-04-03 21:56:01 +00001193
Peter Kastingdce40cf2015-08-24 14:52:23 -07001194 size_t fileSamples(0);
andrew@webrtc.org40ee3d02014-04-03 21:56:01 +00001195 {
1196 CriticalSectionScoped cs(&_critSect);
1197 if (_filePlayerPtr == NULL)
1198 {
1199 WEBRTC_TRACE(kTraceWarning, kTraceVoice,
1200 VoEId(_instanceId, -1),
1201 "TransmitMixer::MixOrReplaceAudioWithFile()"
1202 "fileplayer doesnot exist");
1203 return -1;
1204 }
1205
1206 if (_filePlayerPtr->Get10msAudioFromFile(fileBuffer.get(),
1207 fileSamples,
1208 mixingFrequency) == -1)
1209 {
1210 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
1211 "TransmitMixer::MixOrReplaceAudioWithFile() file"
1212 " mixing failed");
1213 return -1;
1214 }
1215 }
1216
1217 assert(_audioFrame.samples_per_channel_ == fileSamples);
1218
1219 if (_mixFileWithMicrophone)
1220 {
1221 // Currently file stream is always mono.
1222 // TODO(xians): Change the code when FilePlayer supports real stereo.
1223 MixWithSat(_audioFrame.data_,
1224 _audioFrame.num_channels_,
1225 fileBuffer.get(),
1226 1,
1227 fileSamples);
1228 } else
1229 {
1230 // Replace ACM audio with file.
1231 // Currently file stream is always mono.
1232 // TODO(xians): Change the code when FilePlayer supports real stereo.
1233 _audioFrame.UpdateFrame(-1,
tommi@webrtc.orgeec6ecd2014-07-11 19:09:59 +00001234 0xFFFFFFFF,
andrew@webrtc.org40ee3d02014-04-03 21:56:01 +00001235 fileBuffer.get(),
1236 fileSamples,
1237 mixingFrequency,
1238 AudioFrame::kNormalSpeech,
1239 AudioFrame::kVadUnknown,
1240 1);
1241 }
1242 return 0;
1243}
1244
1245void TransmitMixer::ProcessAudio(int delay_ms, int clock_drift,
1246 int current_mic_level, bool key_pressed) {
1247 if (audioproc_->set_stream_delay_ms(delay_ms) != 0) {
1248 // A redundant warning is reported in AudioDevice, which we've throttled
1249 // to avoid flooding the logs. Relegate this one to LS_VERBOSE to avoid
1250 // repeating the problem here.
1251 LOG_FERR1(LS_VERBOSE, set_stream_delay_ms, delay_ms);
1252 }
1253
1254 GainControl* agc = audioproc_->gain_control();
1255 if (agc->set_stream_analog_level(current_mic_level) != 0) {
1256 LOG_FERR1(LS_ERROR, set_stream_analog_level, current_mic_level);
1257 assert(false);
1258 }
1259
1260 EchoCancellation* aec = audioproc_->echo_cancellation();
1261 if (aec->is_drift_compensation_enabled()) {
1262 aec->set_stream_drift_samples(clock_drift);
1263 }
1264
1265 audioproc_->set_stream_key_pressed(key_pressed);
1266
1267 int err = audioproc_->ProcessStream(&_audioFrame);
1268 if (err != 0) {
1269 LOG(LS_ERROR) << "ProcessStream() error: " << err;
1270 assert(false);
1271 }
1272
1273 // Store new capture level. Only updated when analog AGC is enabled.
1274 _captureLevel = agc->stream_analog_level();
1275
1276 CriticalSectionScoped cs(&_critSect);
1277 // Triggers a callback in OnPeriodicProcess().
1278 _saturationWarning |= agc->stream_is_saturated();
1279}
1280
1281#ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
1282void TransmitMixer::TypingDetection(bool keyPressed)
1283{
1284 // We let the VAD determine if we're using this feature or not.
1285 if (_audioFrame.vad_activity_ == AudioFrame::kVadUnknown) {
1286 return;
1287 }
1288
1289 bool vadActive = _audioFrame.vad_activity_ == AudioFrame::kVadActive;
1290 if (_typingDetection.Process(keyPressed, vadActive)) {
1291 _typingNoiseWarningPending = true;
1292 _typingNoiseDetected = true;
1293 } else {
1294 // If there is already a warning pending, do not change the state.
1295 // Otherwise set a warning pending if last callback was for noise detected.
1296 if (!_typingNoiseWarningPending && _typingNoiseDetected) {
1297 _typingNoiseWarningPending = true;
1298 _typingNoiseDetected = false;
1299 }
1300 }
1301}
1302#endif
1303
1304int TransmitMixer::GetMixingFrequency()
1305{
1306 assert(_audioFrame.sample_rate_hz_ != 0);
1307 return _audioFrame.sample_rate_hz_;
1308}
1309
1310#ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
1311int TransmitMixer::TimeSinceLastTyping(int &seconds)
1312{
1313 // We check in VoEAudioProcessingImpl that this is only called when
1314 // typing detection is active.
1315 seconds = _typingDetection.TimeSinceLastDetectionInSeconds();
1316 return 0;
1317}
1318#endif
1319
1320#ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
1321int TransmitMixer::SetTypingDetectionParameters(int timeWindow,
1322 int costPerTyping,
1323 int reportingThreshold,
1324 int penaltyDecay,
1325 int typeEventDelay)
1326{
1327 _typingDetection.SetParameters(timeWindow,
1328 costPerTyping,
1329 reportingThreshold,
1330 penaltyDecay,
1331 typeEventDelay,
1332 0);
1333 return 0;
1334}
1335#endif
1336
1337void TransmitMixer::EnableStereoChannelSwapping(bool enable) {
1338 swap_stereo_channels_ = enable;
1339}
1340
1341bool TransmitMixer::IsStereoChannelSwappingEnabled() {
1342 return swap_stereo_channels_;
1343}
1344
1345} // namespace voe
1346} // namespace webrtc