niklase@google.com | 470e71d | 2011-07-07 08:21:25 +0000 | [diff] [blame] | 1 | /* |
bjornv@webrtc.org | f4b77fd | 2012-01-25 12:40:00 +0000 | [diff] [blame] | 2 | * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
niklase@google.com | 470e71d | 2011-07-07 08:21:25 +0000 | [diff] [blame] | 3 | * |
| 4 | * Use of this source code is governed by a BSD-style license |
| 5 | * that can be found in the LICENSE file in the root of the source |
| 6 | * tree. An additional intellectual property rights grant can be found |
| 7 | * in the file PATENTS. All contributing project authors may |
| 8 | * be found in the AUTHORS file in the root of the source tree. |
| 9 | */ |
| 10 | |
| 11 | #include "voice_detection_impl.h" |
| 12 | |
| 13 | #include <cassert> |
| 14 | |
| 15 | #include "critical_section_wrapper.h" |
| 16 | #include "webrtc_vad.h" |
| 17 | |
| 18 | #include "audio_processing_impl.h" |
| 19 | #include "audio_buffer.h" |
| 20 | |
| 21 | namespace webrtc { |
| 22 | |
| 23 | typedef VadInst Handle; |
| 24 | |
| 25 | namespace { |
bjornv@webrtc.org | f4b77fd | 2012-01-25 12:40:00 +0000 | [diff] [blame] | 26 | int MapSetting(VoiceDetection::Likelihood likelihood) { |
niklase@google.com | 470e71d | 2011-07-07 08:21:25 +0000 | [diff] [blame] | 27 | switch (likelihood) { |
| 28 | case VoiceDetection::kVeryLowLikelihood: |
| 29 | return 3; |
| 30 | break; |
| 31 | case VoiceDetection::kLowLikelihood: |
| 32 | return 2; |
| 33 | break; |
| 34 | case VoiceDetection::kModerateLikelihood: |
| 35 | return 1; |
| 36 | break; |
| 37 | case VoiceDetection::kHighLikelihood: |
| 38 | return 0; |
| 39 | break; |
niklase@google.com | 470e71d | 2011-07-07 08:21:25 +0000 | [diff] [blame] | 40 | } |
mflodman@webrtc.org | ec31bc1 | 2012-02-06 12:42:45 +0000 | [diff] [blame^] | 41 | // TODO(mflodman) Needed for gcc to compile and assert can't be added due to |
| 42 | // ApmTest triggers this. |
| 43 | return -1; |
niklase@google.com | 470e71d | 2011-07-07 08:21:25 +0000 | [diff] [blame] | 44 | } |
| 45 | } // namespace |
| 46 | |
| 47 | |
| 48 | VoiceDetectionImpl::VoiceDetectionImpl(const AudioProcessingImpl* apm) |
| 49 | : ProcessingComponent(apm), |
| 50 | apm_(apm), |
| 51 | stream_has_voice_(false), |
| 52 | using_external_vad_(false), |
| 53 | likelihood_(kLowLikelihood), |
| 54 | frame_size_ms_(10), |
| 55 | frame_size_samples_(0) {} |
| 56 | |
| 57 | VoiceDetectionImpl::~VoiceDetectionImpl() {} |
| 58 | |
| 59 | int VoiceDetectionImpl::ProcessCaptureAudio(AudioBuffer* audio) { |
| 60 | if (!is_component_enabled()) { |
| 61 | return apm_->kNoError; |
| 62 | } |
| 63 | |
| 64 | if (using_external_vad_) { |
| 65 | using_external_vad_ = false; |
| 66 | return apm_->kNoError; |
| 67 | } |
| 68 | assert(audio->samples_per_split_channel() <= 160); |
| 69 | |
| 70 | WebRtc_Word16* mixed_data = audio->low_pass_split_data(0); |
| 71 | if (audio->num_channels() > 1) { |
| 72 | audio->CopyAndMixLowPass(1); |
| 73 | mixed_data = audio->mixed_low_pass_data(0); |
| 74 | } |
| 75 | |
| 76 | // TODO(ajm): concatenate data in frame buffer here. |
| 77 | |
andrew@webrtc.org | ed083d4 | 2011-09-19 15:28:51 +0000 | [diff] [blame] | 78 | int vad_ret = WebRtcVad_Process(static_cast<Handle*>(handle(0)), |
| 79 | apm_->split_sample_rate_hz(), |
| 80 | mixed_data, |
| 81 | frame_size_samples_); |
| 82 | if (vad_ret == 0) { |
niklase@google.com | 470e71d | 2011-07-07 08:21:25 +0000 | [diff] [blame] | 83 | stream_has_voice_ = false; |
andrew@webrtc.org | ed083d4 | 2011-09-19 15:28:51 +0000 | [diff] [blame] | 84 | audio->set_activity(AudioFrame::kVadPassive); |
| 85 | } else if (vad_ret == 1) { |
niklase@google.com | 470e71d | 2011-07-07 08:21:25 +0000 | [diff] [blame] | 86 | stream_has_voice_ = true; |
andrew@webrtc.org | ed083d4 | 2011-09-19 15:28:51 +0000 | [diff] [blame] | 87 | audio->set_activity(AudioFrame::kVadActive); |
niklase@google.com | 470e71d | 2011-07-07 08:21:25 +0000 | [diff] [blame] | 88 | } else { |
| 89 | return apm_->kUnspecifiedError; |
| 90 | } |
| 91 | |
| 92 | return apm_->kNoError; |
| 93 | } |
| 94 | |
| 95 | int VoiceDetectionImpl::Enable(bool enable) { |
andrew@webrtc.org | 4065403 | 2012-01-30 20:51:15 +0000 | [diff] [blame] | 96 | CriticalSectionScoped crit_scoped(apm_->crit()); |
niklase@google.com | 470e71d | 2011-07-07 08:21:25 +0000 | [diff] [blame] | 97 | return EnableComponent(enable); |
| 98 | } |
| 99 | |
| 100 | bool VoiceDetectionImpl::is_enabled() const { |
| 101 | return is_component_enabled(); |
| 102 | } |
| 103 | |
| 104 | int VoiceDetectionImpl::set_stream_has_voice(bool has_voice) { |
| 105 | using_external_vad_ = true; |
| 106 | stream_has_voice_ = has_voice; |
| 107 | return apm_->kNoError; |
| 108 | } |
| 109 | |
| 110 | bool VoiceDetectionImpl::stream_has_voice() const { |
| 111 | // TODO(ajm): enable this assertion? |
| 112 | //assert(using_external_vad_ || is_component_enabled()); |
| 113 | return stream_has_voice_; |
| 114 | } |
| 115 | |
| 116 | int VoiceDetectionImpl::set_likelihood(VoiceDetection::Likelihood likelihood) { |
andrew@webrtc.org | 4065403 | 2012-01-30 20:51:15 +0000 | [diff] [blame] | 117 | CriticalSectionScoped crit_scoped(apm_->crit()); |
niklase@google.com | 470e71d | 2011-07-07 08:21:25 +0000 | [diff] [blame] | 118 | if (MapSetting(likelihood) == -1) { |
| 119 | return apm_->kBadParameterError; |
| 120 | } |
| 121 | |
| 122 | likelihood_ = likelihood; |
| 123 | return Configure(); |
| 124 | } |
| 125 | |
| 126 | VoiceDetection::Likelihood VoiceDetectionImpl::likelihood() const { |
| 127 | return likelihood_; |
| 128 | } |
| 129 | |
| 130 | int VoiceDetectionImpl::set_frame_size_ms(int size) { |
andrew@webrtc.org | 4065403 | 2012-01-30 20:51:15 +0000 | [diff] [blame] | 131 | CriticalSectionScoped crit_scoped(apm_->crit()); |
niklase@google.com | 470e71d | 2011-07-07 08:21:25 +0000 | [diff] [blame] | 132 | assert(size == 10); // TODO(ajm): remove when supported. |
| 133 | if (size != 10 && |
| 134 | size != 20 && |
| 135 | size != 30) { |
| 136 | return apm_->kBadParameterError; |
| 137 | } |
| 138 | |
| 139 | frame_size_ms_ = size; |
| 140 | |
| 141 | return Initialize(); |
| 142 | } |
| 143 | |
| 144 | int VoiceDetectionImpl::frame_size_ms() const { |
| 145 | return frame_size_ms_; |
| 146 | } |
| 147 | |
| 148 | int VoiceDetectionImpl::Initialize() { |
| 149 | int err = ProcessingComponent::Initialize(); |
| 150 | if (err != apm_->kNoError || !is_component_enabled()) { |
| 151 | return err; |
| 152 | } |
| 153 | |
| 154 | using_external_vad_ = false; |
| 155 | frame_size_samples_ = frame_size_ms_ * (apm_->split_sample_rate_hz() / 1000); |
| 156 | // TODO(ajm): intialize frame buffer here. |
| 157 | |
| 158 | return apm_->kNoError; |
| 159 | } |
| 160 | |
niklase@google.com | 470e71d | 2011-07-07 08:21:25 +0000 | [diff] [blame] | 161 | void* VoiceDetectionImpl::CreateHandle() const { |
| 162 | Handle* handle = NULL; |
| 163 | if (WebRtcVad_Create(&handle) != apm_->kNoError) { |
| 164 | handle = NULL; |
| 165 | } else { |
| 166 | assert(handle != NULL); |
| 167 | } |
| 168 | |
| 169 | return handle; |
| 170 | } |
| 171 | |
| 172 | int VoiceDetectionImpl::DestroyHandle(void* handle) const { |
| 173 | return WebRtcVad_Free(static_cast<Handle*>(handle)); |
| 174 | } |
| 175 | |
| 176 | int VoiceDetectionImpl::InitializeHandle(void* handle) const { |
| 177 | return WebRtcVad_Init(static_cast<Handle*>(handle)); |
| 178 | } |
| 179 | |
| 180 | int VoiceDetectionImpl::ConfigureHandle(void* handle) const { |
| 181 | return WebRtcVad_set_mode(static_cast<Handle*>(handle), |
| 182 | MapSetting(likelihood_)); |
| 183 | } |
| 184 | |
| 185 | int VoiceDetectionImpl::num_handles_required() const { |
| 186 | return 1; |
| 187 | } |
| 188 | |
| 189 | int VoiceDetectionImpl::GetHandleError(void* handle) const { |
| 190 | // The VAD has no get_error() function. |
| 191 | assert(handle != NULL); |
| 192 | return apm_->kUnspecifiedError; |
| 193 | } |
| 194 | } // namespace webrtc |