blob: 97ce5297708c99adbaca07573d5be135b963da9e [file] [log] [blame]
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +00001/*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020011#include "modules/audio_coding/neteq/expand.h"
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000012
13#include <assert.h>
pbos@webrtc.org12dc1a32013-08-05 16:22:53 +000014#include <string.h> // memset
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000015
16#include <algorithm> // min, max
Yves Gerey665174f2018-06-19 15:03:05 +020017#include <limits> // numeric_limits<T>
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000018
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020019#include "common_audio/signal_processing/include/signal_processing_library.h"
Yves Gerey988cc082018-10-23 12:03:01 +020020#include "modules/audio_coding/neteq/audio_multi_vector.h"
Mirko Bonadei92ea95e2017-09-15 06:47:31 +020021#include "modules/audio_coding/neteq/background_noise.h"
22#include "modules/audio_coding/neteq/cross_correlation.h"
23#include "modules/audio_coding/neteq/dsp_helper.h"
24#include "modules/audio_coding/neteq/random_vector.h"
25#include "modules/audio_coding/neteq/statistics_calculator.h"
26#include "modules/audio_coding/neteq/sync_buffer.h"
Karl Wiberge40468b2017-11-22 10:42:26 +010027#include "rtc_base/numerics/safe_conversions.h"
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000028
29namespace webrtc {
30
Karl Wiberg7f6c4d42015-04-09 15:44:22 +020031Expand::Expand(BackgroundNoise* background_noise,
32 SyncBuffer* sync_buffer,
33 RandomVector* random_vector,
Henrik Lundinbef77e22015-08-18 14:58:09 +020034 StatisticsCalculator* statistics,
Karl Wiberg7f6c4d42015-04-09 15:44:22 +020035 int fs,
36 size_t num_channels)
37 : random_vector_(random_vector),
38 sync_buffer_(sync_buffer),
39 first_expand_(true),
40 fs_hz_(fs),
41 num_channels_(num_channels),
42 consecutive_expands_(0),
43 background_noise_(background_noise),
Henrik Lundinbef77e22015-08-18 14:58:09 +020044 statistics_(statistics),
Karl Wiberg7f6c4d42015-04-09 15:44:22 +020045 overlap_length_(5 * fs / 8000),
46 lag_index_direction_(0),
47 current_lag_index_(0),
48 stop_muting_(false),
Henrik Lundinbef77e22015-08-18 14:58:09 +020049 expand_duration_samples_(0),
Karl Wiberg7f6c4d42015-04-09 15:44:22 +020050 channel_parameters_(new ChannelParameters[num_channels_]) {
51 assert(fs == 8000 || fs == 16000 || fs == 32000 || fs == 48000);
Peter Kastingdce40cf2015-08-24 14:52:23 -070052 assert(fs <= static_cast<int>(kMaxSampleRate)); // Should not be possible.
Karl Wiberg7f6c4d42015-04-09 15:44:22 +020053 assert(num_channels_ > 0);
54 memset(expand_lags_, 0, sizeof(expand_lags_));
55 Reset();
56}
57
58Expand::~Expand() = default;
59
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000060void Expand::Reset() {
61 first_expand_ = true;
62 consecutive_expands_ = 0;
63 max_lag_ = 0;
64 for (size_t ix = 0; ix < num_channels_; ++ix) {
65 channel_parameters_[ix].expand_vector0.Clear();
66 channel_parameters_[ix].expand_vector1.Clear();
67 }
68}
69
henrik.lundin@webrtc.orgfd11bbf2013-09-30 20:38:44 +000070int Expand::Process(AudioMultiVector* output) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000071 int16_t random_vector[kMaxSampleRate / 8000 * 120 + 30];
72 int16_t scaled_random_vector[kMaxSampleRate / 8000 * 125];
73 static const int kTempDataSize = 3600;
74 int16_t temp_data[kTempDataSize]; // TODO(hlundin) Remove this.
75 int16_t* voiced_vector_storage = temp_data;
76 int16_t* voiced_vector = &voiced_vector_storage[overlap_length_];
Peter Kastingdce40cf2015-08-24 14:52:23 -070077 static const size_t kNoiseLpcOrder = BackgroundNoise::kMaxLpcOrder;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000078 int16_t unvoiced_array_memory[kNoiseLpcOrder + kMaxSampleRate / 8000 * 125];
79 int16_t* unvoiced_vector = unvoiced_array_memory + kUnvoicedLpcOrder;
80 int16_t* noise_vector = unvoiced_array_memory + kNoiseLpcOrder;
81
82 int fs_mult = fs_hz_ / 8000;
83
84 if (first_expand_) {
85 // Perform initial setup if this is the first expansion since last reset.
86 AnalyzeSignal(random_vector);
87 first_expand_ = false;
Henrik Lundinbef77e22015-08-18 14:58:09 +020088 expand_duration_samples_ = 0;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000089 } else {
90 // This is not the first expansion, parameters are already estimated.
91 // Extract a noise segment.
Peter Kastingdce40cf2015-08-24 14:52:23 -070092 size_t rand_length = max_lag_;
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +000093 // This only applies to SWB where length could be larger than 256.
94 assert(rand_length <= kMaxSampleRate / 8000 * 120 + 30);
95 GenerateRandomVector(2, rand_length, random_vector);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000096 }
97
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000098 // Generate signal.
99 UpdateLagIndex();
100
101 // Voiced part.
102 // Generate a weighted vector with the current lag.
103 size_t expansion_vector_length = max_lag_ + overlap_length_;
104 size_t current_lag = expand_lags_[current_lag_index_];
105 // Copy lag+overlap data.
Yves Gerey665174f2018-06-19 15:03:05 +0200106 size_t expansion_vector_position =
107 expansion_vector_length - current_lag - overlap_length_;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000108 size_t temp_length = current_lag + overlap_length_;
109 for (size_t channel_ix = 0; channel_ix < num_channels_; ++channel_ix) {
110 ChannelParameters& parameters = channel_parameters_[channel_ix];
111 if (current_lag_index_ == 0) {
112 // Use only expand_vector0.
113 assert(expansion_vector_position + temp_length <=
114 parameters.expand_vector0.Size());
minyue-webrtc79553cb2016-05-10 19:55:56 +0200115 parameters.expand_vector0.CopyTo(temp_length, expansion_vector_position,
116 voiced_vector_storage);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000117 } else if (current_lag_index_ == 1) {
minyue-webrtc79553cb2016-05-10 19:55:56 +0200118 std::unique_ptr<int16_t[]> temp_0(new int16_t[temp_length]);
119 parameters.expand_vector0.CopyTo(temp_length, expansion_vector_position,
120 temp_0.get());
121 std::unique_ptr<int16_t[]> temp_1(new int16_t[temp_length]);
122 parameters.expand_vector1.CopyTo(temp_length, expansion_vector_position,
123 temp_1.get());
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000124 // Mix 3/4 of expand_vector0 with 1/4 of expand_vector1.
minyue-webrtc79553cb2016-05-10 19:55:56 +0200125 WebRtcSpl_ScaleAndAddVectorsWithRound(temp_0.get(), 3, temp_1.get(), 1, 2,
126 voiced_vector_storage, temp_length);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000127 } else if (current_lag_index_ == 2) {
128 // Mix 1/2 of expand_vector0 with 1/2 of expand_vector1.
129 assert(expansion_vector_position + temp_length <=
130 parameters.expand_vector0.Size());
131 assert(expansion_vector_position + temp_length <=
132 parameters.expand_vector1.Size());
minyue-webrtc79553cb2016-05-10 19:55:56 +0200133
134 std::unique_ptr<int16_t[]> temp_0(new int16_t[temp_length]);
135 parameters.expand_vector0.CopyTo(temp_length, expansion_vector_position,
136 temp_0.get());
137 std::unique_ptr<int16_t[]> temp_1(new int16_t[temp_length]);
138 parameters.expand_vector1.CopyTo(temp_length, expansion_vector_position,
139 temp_1.get());
140 WebRtcSpl_ScaleAndAddVectorsWithRound(temp_0.get(), 1, temp_1.get(), 1, 1,
141 voiced_vector_storage, temp_length);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000142 }
143
144 // Get tapering window parameters. Values are in Q15.
145 int16_t muting_window, muting_window_increment;
146 int16_t unmuting_window, unmuting_window_increment;
147 if (fs_hz_ == 8000) {
148 muting_window = DspHelper::kMuteFactorStart8kHz;
149 muting_window_increment = DspHelper::kMuteFactorIncrement8kHz;
150 unmuting_window = DspHelper::kUnmuteFactorStart8kHz;
151 unmuting_window_increment = DspHelper::kUnmuteFactorIncrement8kHz;
152 } else if (fs_hz_ == 16000) {
153 muting_window = DspHelper::kMuteFactorStart16kHz;
154 muting_window_increment = DspHelper::kMuteFactorIncrement16kHz;
155 unmuting_window = DspHelper::kUnmuteFactorStart16kHz;
156 unmuting_window_increment = DspHelper::kUnmuteFactorIncrement16kHz;
157 } else if (fs_hz_ == 32000) {
158 muting_window = DspHelper::kMuteFactorStart32kHz;
159 muting_window_increment = DspHelper::kMuteFactorIncrement32kHz;
160 unmuting_window = DspHelper::kUnmuteFactorStart32kHz;
161 unmuting_window_increment = DspHelper::kUnmuteFactorIncrement32kHz;
162 } else { // fs_ == 48000
163 muting_window = DspHelper::kMuteFactorStart48kHz;
164 muting_window_increment = DspHelper::kMuteFactorIncrement48kHz;
165 unmuting_window = DspHelper::kUnmuteFactorStart48kHz;
166 unmuting_window_increment = DspHelper::kUnmuteFactorIncrement48kHz;
167 }
168
169 // Smooth the expanded if it has not been muted to a low amplitude and
170 // |current_voice_mix_factor| is larger than 0.5.
171 if ((parameters.mute_factor > 819) &&
172 (parameters.current_voice_mix_factor > 8192)) {
173 size_t start_ix = sync_buffer_->Size() - overlap_length_;
174 for (size_t i = 0; i < overlap_length_; i++) {
175 // Do overlap add between new vector and overlap.
176 (*sync_buffer_)[channel_ix][start_ix + i] =
177 (((*sync_buffer_)[channel_ix][start_ix + i] * muting_window) +
Yves Gerey665174f2018-06-19 15:03:05 +0200178 (((parameters.mute_factor * voiced_vector_storage[i]) >> 14) *
179 unmuting_window) +
180 16384) >>
181 15;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000182 muting_window += muting_window_increment;
183 unmuting_window += unmuting_window_increment;
184 }
185 } else if (parameters.mute_factor == 0) {
186 // The expanded signal will consist of only comfort noise if
187 // mute_factor = 0. Set the output length to 15 ms for best noise
188 // production.
189 // TODO(hlundin): This has been disabled since the length of
190 // parameters.expand_vector0 and parameters.expand_vector1 no longer
191 // match with expand_lags_, causing invalid reads and writes. Is it a good
192 // idea to enable this again, and solve the vector size problem?
Yves Gerey665174f2018-06-19 15:03:05 +0200193 // max_lag_ = fs_mult * 120;
194 // expand_lags_[0] = fs_mult * 120;
195 // expand_lags_[1] = fs_mult * 120;
196 // expand_lags_[2] = fs_mult * 120;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000197 }
198
199 // Unvoiced part.
200 // Filter |scaled_random_vector| through |ar_filter_|.
201 memcpy(unvoiced_vector - kUnvoicedLpcOrder, parameters.ar_filter_state,
202 sizeof(int16_t) * kUnvoicedLpcOrder);
203 int32_t add_constant = 0;
204 if (parameters.ar_gain_scale > 0) {
205 add_constant = 1 << (parameters.ar_gain_scale - 1);
206 }
207 WebRtcSpl_AffineTransformVector(scaled_random_vector, random_vector,
208 parameters.ar_gain, add_constant,
Yves Gerey665174f2018-06-19 15:03:05 +0200209 parameters.ar_gain_scale, current_lag);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000210 WebRtcSpl_FilterARFastQ12(scaled_random_vector, unvoiced_vector,
turaj@webrtc.org362a55e2013-09-20 16:25:28 +0000211 parameters.ar_filter, kUnvoicedLpcOrder + 1,
Peter Kastingdce40cf2015-08-24 14:52:23 -0700212 current_lag);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000213 memcpy(parameters.ar_filter_state,
214 &(unvoiced_vector[current_lag - kUnvoicedLpcOrder]),
215 sizeof(int16_t) * kUnvoicedLpcOrder);
216
217 // Combine voiced and unvoiced contributions.
218
219 // Set a suitable cross-fading slope.
220 // For lag =
221 // <= 31 * fs_mult => go from 1 to 0 in about 8 ms;
222 // (>= 31 .. <= 63) * fs_mult => go from 1 to 0 in about 16 ms;
223 // >= 64 * fs_mult => go from 1 to 0 in about 32 ms.
224 // temp_shift = getbits(max_lag_) - 5.
Peter Kastingdce40cf2015-08-24 14:52:23 -0700225 int temp_shift =
kwibergd3edd772017-03-01 18:52:48 -0800226 (31 - WebRtcSpl_NormW32(rtc::dchecked_cast<int32_t>(max_lag_))) - 5;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000227 int16_t mix_factor_increment = 256 >> temp_shift;
228 if (stop_muting_) {
229 mix_factor_increment = 0;
230 }
231
232 // Create combined signal by shifting in more and more of unvoiced part.
233 temp_shift = 8 - temp_shift; // = getbits(mix_factor_increment).
Yves Gerey665174f2018-06-19 15:03:05 +0200234 size_t temp_length =
235 (parameters.current_voice_mix_factor - parameters.voice_mix_factor) >>
236 temp_shift;
Peter Kasting728d9032015-06-11 14:31:38 -0700237 temp_length = std::min(temp_length, current_lag);
238 DspHelper::CrossFade(voiced_vector, unvoiced_vector, temp_length,
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000239 &parameters.current_voice_mix_factor,
240 mix_factor_increment, temp_data);
241
242 // End of cross-fading period was reached before end of expanded signal
243 // path. Mix the rest with a fixed mixing factor.
Peter Kasting728d9032015-06-11 14:31:38 -0700244 if (temp_length < current_lag) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000245 if (mix_factor_increment != 0) {
246 parameters.current_voice_mix_factor = parameters.voice_mix_factor;
247 }
Peter Kastingb7e50542015-06-11 12:55:50 -0700248 int16_t temp_scale = 16384 - parameters.current_voice_mix_factor;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000249 WebRtcSpl_ScaleAndAddVectorsWithRound(
Peter Kasting728d9032015-06-11 14:31:38 -0700250 voiced_vector + temp_length, parameters.current_voice_mix_factor,
251 unvoiced_vector + temp_length, temp_scale, 14,
Peter Kastingdce40cf2015-08-24 14:52:23 -0700252 temp_data + temp_length, current_lag - temp_length);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000253 }
254
255 // Select muting slope depending on how many consecutive expands we have
256 // done.
257 if (consecutive_expands_ == 3) {
258 // Let the mute factor decrease from 1.0 to 0.95 in 6.25 ms.
259 // mute_slope = 0.0010 / fs_mult in Q20.
Peter Kasting36b7cc32015-06-11 19:57:18 -0700260 parameters.mute_slope = std::max(parameters.mute_slope, 1049 / fs_mult);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000261 }
262 if (consecutive_expands_ == 7) {
263 // Let the mute factor decrease from 1.0 to 0.90 in 6.25 ms.
264 // mute_slope = 0.0020 / fs_mult in Q20.
Peter Kasting36b7cc32015-06-11 19:57:18 -0700265 parameters.mute_slope = std::max(parameters.mute_slope, 2097 / fs_mult);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000266 }
267
268 // Mute segment according to slope value.
269 if ((consecutive_expands_ != 0) || !parameters.onset) {
270 // Mute to the previous level, then continue with the muting.
Yves Gerey665174f2018-06-19 15:03:05 +0200271 WebRtcSpl_AffineTransformVector(
272 temp_data, temp_data, parameters.mute_factor, 8192, 14, current_lag);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000273
274 if (!stop_muting_) {
275 DspHelper::MuteSignal(temp_data, parameters.mute_slope, current_lag);
276
277 // Shift by 6 to go from Q20 to Q14.
278 // TODO(hlundin): Adding 8192 before shifting 6 steps seems wrong.
279 // Legacy.
Yves Gerey665174f2018-06-19 15:03:05 +0200280 int16_t gain = static_cast<int16_t>(
281 16384 - (((current_lag * parameters.mute_slope) + 8192) >> 6));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000282 gain = ((gain * parameters.mute_factor) + 8192) >> 14;
283
284 // Guard against getting stuck with very small (but sometimes audible)
285 // gain.
286 if ((consecutive_expands_ > 3) && (gain >= parameters.mute_factor)) {
287 parameters.mute_factor = 0;
288 } else {
289 parameters.mute_factor = gain;
290 }
291 }
292 }
293
294 // Background noise part.
Yves Gerey665174f2018-06-19 15:03:05 +0200295 GenerateBackgroundNoise(
296 random_vector, channel_ix, channel_parameters_[channel_ix].mute_slope,
297 TooManyExpands(), current_lag, unvoiced_array_memory);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000298
299 // Add background noise to the combined voiced-unvoiced signal.
300 for (size_t i = 0; i < current_lag; i++) {
301 temp_data[i] = temp_data[i] + noise_vector[i];
302 }
303 if (channel_ix == 0) {
304 output->AssertSize(current_lag);
305 } else {
306 assert(output->Size() == current_lag);
307 }
minyue-webrtc79553cb2016-05-10 19:55:56 +0200308 (*output)[channel_ix].OverwriteAt(temp_data, current_lag, 0);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000309 }
310
311 // Increase call number and cap it.
Yves Gerey665174f2018-06-19 15:03:05 +0200312 consecutive_expands_ = consecutive_expands_ >= kMaxConsecutiveExpands
313 ? kMaxConsecutiveExpands
314 : consecutive_expands_ + 1;
Henrik Lundinbef77e22015-08-18 14:58:09 +0200315 expand_duration_samples_ += output->Size();
316 // Clamp the duration counter at 2 seconds.
kwibergd3edd772017-03-01 18:52:48 -0800317 expand_duration_samples_ = std::min(expand_duration_samples_,
318 rtc::dchecked_cast<size_t>(fs_hz_ * 2));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000319 return 0;
320}
321
322void Expand::SetParametersForNormalAfterExpand() {
323 current_lag_index_ = 0;
324 lag_index_direction_ = 0;
325 stop_muting_ = true; // Do not mute signal any more.
Henrik Lundinbef77e22015-08-18 14:58:09 +0200326 statistics_->LogDelayedPacketOutageEvent(
kwibergd3edd772017-03-01 18:52:48 -0800327 rtc::dchecked_cast<int>(expand_duration_samples_) / (fs_hz_ / 1000));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000328}
329
330void Expand::SetParametersForMergeAfterExpand() {
Yves Gerey665174f2018-06-19 15:03:05 +0200331 current_lag_index_ = -1; /* out of the 3 possible ones */
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000332 lag_index_direction_ = 1; /* make sure we get the "optimal" lag */
333 stop_muting_ = true;
334}
335
henrik.lundinf3995f72016-05-10 05:54:35 -0700336bool Expand::Muted() const {
337 if (first_expand_ || stop_muting_)
338 return false;
339 RTC_DCHECK(channel_parameters_);
340 for (size_t ch = 0; ch < num_channels_; ++ch) {
341 if (channel_parameters_[ch].mute_factor != 0)
342 return false;
343 }
344 return true;
345}
346
Karl Wiberg7f6c4d42015-04-09 15:44:22 +0200347size_t Expand::overlap_length() const {
348 return overlap_length_;
349}
350
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +0000351void Expand::InitializeForAnExpandPeriod() {
352 lag_index_direction_ = 1;
353 current_lag_index_ = -1;
354 stop_muting_ = false;
355 random_vector_->set_seed_increment(1);
356 consecutive_expands_ = 0;
357 for (size_t ix = 0; ix < num_channels_; ++ix) {
358 channel_parameters_[ix].current_voice_mix_factor = 16384; // 1.0 in Q14.
Yves Gerey665174f2018-06-19 15:03:05 +0200359 channel_parameters_[ix].mute_factor = 16384; // 1.0 in Q14.
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +0000360 // Start with 0 gain for background noise.
361 background_noise_->SetMuteFactor(ix, 0);
362 }
363}
364
365bool Expand::TooManyExpands() {
366 return consecutive_expands_ >= kMaxConsecutiveExpands;
367}
368
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000369void Expand::AnalyzeSignal(int16_t* random_vector) {
370 int32_t auto_correlation[kUnvoicedLpcOrder + 1];
371 int16_t reflection_coeff[kUnvoicedLpcOrder];
372 int16_t correlation_vector[kMaxSampleRate / 8000 * 102];
Peter Kastingdce40cf2015-08-24 14:52:23 -0700373 size_t best_correlation_index[kNumCorrelationCandidates];
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000374 int16_t best_correlation[kNumCorrelationCandidates];
Peter Kastingdce40cf2015-08-24 14:52:23 -0700375 size_t best_distortion_index[kNumCorrelationCandidates];
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000376 int16_t best_distortion[kNumCorrelationCandidates];
377 int32_t correlation_vector2[(99 * kMaxSampleRate / 8000) + 1];
378 int32_t best_distortion_w32[kNumCorrelationCandidates];
Peter Kastingdce40cf2015-08-24 14:52:23 -0700379 static const size_t kNoiseLpcOrder = BackgroundNoise::kMaxLpcOrder;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000380 int16_t unvoiced_array_memory[kNoiseLpcOrder + kMaxSampleRate / 8000 * 125];
381 int16_t* unvoiced_vector = unvoiced_array_memory + kUnvoicedLpcOrder;
382
383 int fs_mult = fs_hz_ / 8000;
384
385 // Pre-calculate common multiplications with fs_mult.
Peter Kastingdce40cf2015-08-24 14:52:23 -0700386 size_t fs_mult_4 = static_cast<size_t>(fs_mult * 4);
387 size_t fs_mult_20 = static_cast<size_t>(fs_mult * 20);
388 size_t fs_mult_120 = static_cast<size_t>(fs_mult * 120);
389 size_t fs_mult_dist_len = fs_mult * kDistortionLength;
390 size_t fs_mult_lpc_analysis_len = fs_mult * kLpcAnalysisLength;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000391
Peter Kastingdce40cf2015-08-24 14:52:23 -0700392 const size_t signal_length = static_cast<size_t>(256 * fs_mult);
minyue-webrtc79553cb2016-05-10 19:55:56 +0200393
394 const size_t audio_history_position = sync_buffer_->Size() - signal_length;
395 std::unique_ptr<int16_t[]> audio_history(new int16_t[signal_length]);
396 (*sync_buffer_)[0].CopyTo(signal_length, audio_history_position,
397 audio_history.get());
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000398
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +0000399 // Initialize.
400 InitializeForAnExpandPeriod();
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000401
402 // Calculate correlation in downsampled domain (4 kHz sample rate).
Peter Kastingdce40cf2015-08-24 14:52:23 -0700403 size_t correlation_length = 51; // TODO(hlundin): Legacy bit-exactness.
turaj@webrtc.org58cd3162013-10-31 15:15:55 +0000404 // If it is decided to break bit-exactness |correlation_length| should be
405 // initialized to the return value of Correlation().
minyue-webrtc79553cb2016-05-10 19:55:56 +0200406 Correlation(audio_history.get(), signal_length, correlation_vector);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000407
408 // Find peaks in correlation vector.
409 DspHelper::PeakDetection(correlation_vector, correlation_length,
410 kNumCorrelationCandidates, fs_mult,
411 best_correlation_index, best_correlation);
412
413 // Adjust peak locations; cross-correlation lags start at 2.5 ms
414 // (20 * fs_mult samples).
415 best_correlation_index[0] += fs_mult_20;
416 best_correlation_index[1] += fs_mult_20;
417 best_correlation_index[2] += fs_mult_20;
418
419 // Calculate distortion around the |kNumCorrelationCandidates| best lags.
420 int distortion_scale = 0;
Peter Kastingdce40cf2015-08-24 14:52:23 -0700421 for (size_t i = 0; i < kNumCorrelationCandidates; i++) {
Yves Gerey665174f2018-06-19 15:03:05 +0200422 size_t min_index =
423 std::max(fs_mult_20, best_correlation_index[i] - fs_mult_4);
424 size_t max_index =
425 std::min(fs_mult_120 - 1, best_correlation_index[i] + fs_mult_4);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000426 best_distortion_index[i] = DspHelper::MinDistortion(
427 &(audio_history[signal_length - fs_mult_dist_len]), min_index,
428 max_index, fs_mult_dist_len, &best_distortion_w32[i]);
429 distortion_scale = std::max(16 - WebRtcSpl_NormW32(best_distortion_w32[i]),
430 distortion_scale);
431 }
432 // Shift the distortion values to fit in 16 bits.
433 WebRtcSpl_VectorBitShiftW32ToW16(best_distortion, kNumCorrelationCandidates,
434 best_distortion_w32, distortion_scale);
435
436 // Find the maximizing index |i| of the cost function
437 // f[i] = best_correlation[i] / best_distortion[i].
turaj@webrtc.org58cd3162013-10-31 15:15:55 +0000438 int32_t best_ratio = std::numeric_limits<int32_t>::min();
Peter Kastingdce40cf2015-08-24 14:52:23 -0700439 size_t best_index = std::numeric_limits<size_t>::max();
440 for (size_t i = 0; i < kNumCorrelationCandidates; ++i) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000441 int32_t ratio;
442 if (best_distortion[i] > 0) {
ivoc4843dd12017-01-09 08:31:42 -0800443 ratio = (best_correlation[i] * (1 << 16)) / best_distortion[i];
turaj@webrtc.org7126b382013-07-31 16:05:09 +0000444 } else if (best_correlation[i] == 0) {
445 ratio = 0; // No correlation set result to zero.
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000446 } else {
turaj@webrtc.org7126b382013-07-31 16:05:09 +0000447 ratio = std::numeric_limits<int32_t>::max(); // Denominator is zero.
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000448 }
449 if (ratio > best_ratio) {
450 best_index = i;
451 best_ratio = ratio;
452 }
453 }
454
Peter Kastingdce40cf2015-08-24 14:52:23 -0700455 size_t distortion_lag = best_distortion_index[best_index];
456 size_t correlation_lag = best_correlation_index[best_index];
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000457 max_lag_ = std::max(distortion_lag, correlation_lag);
458
459 // Calculate the exact best correlation in the range between
460 // |correlation_lag| and |distortion_lag|.
Yves Gerey665174f2018-06-19 15:03:05 +0200461 correlation_length = std::max(std::min(distortion_lag + 10, fs_mult_120),
462 static_cast<size_t>(60 * fs_mult));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000463
Peter Kastingdce40cf2015-08-24 14:52:23 -0700464 size_t start_index = std::min(distortion_lag, correlation_lag);
465 size_t correlation_lags = static_cast<size_t>(
Yves Gerey665174f2018-06-19 15:03:05 +0200466 WEBRTC_SPL_ABS_W16((distortion_lag - correlation_lag)) + 1);
Peter Kastingdce40cf2015-08-24 14:52:23 -0700467 assert(correlation_lags <= static_cast<size_t>(99 * fs_mult + 1));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000468
469 for (size_t channel_ix = 0; channel_ix < num_channels_; ++channel_ix) {
470 ChannelParameters& parameters = channel_parameters_[channel_ix];
minyue8c229622016-04-28 02:16:48 -0700471 // Calculate suitable scaling.
472 int16_t signal_max = WebRtcSpl_MaxAbsValueW16(
Yves Gerey665174f2018-06-19 15:03:05 +0200473 &audio_history[signal_length - correlation_length - start_index -
474 correlation_lags],
475 correlation_length + start_index + correlation_lags - 1);
476 int correlation_scale =
477 (31 - WebRtcSpl_NormW32(signal_max * signal_max)) +
minyue8c229622016-04-28 02:16:48 -0700478 (31 - WebRtcSpl_NormW32(static_cast<int32_t>(correlation_length))) - 31;
479 correlation_scale = std::max(0, correlation_scale);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000480
481 // Calculate the correlation, store in |correlation_vector2|.
minyue8c229622016-04-28 02:16:48 -0700482 WebRtcSpl_CrossCorrelation(
483 correlation_vector2,
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000484 &(audio_history[signal_length - correlation_length]),
485 &(audio_history[signal_length - correlation_length - start_index]),
minyue8c229622016-04-28 02:16:48 -0700486 correlation_length, correlation_lags, correlation_scale, -1);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000487
488 // Find maximizing index.
Peter Kasting1380e262015-08-28 17:31:03 -0700489 best_index = WebRtcSpl_MaxIndexW32(correlation_vector2, correlation_lags);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000490 int32_t max_correlation = correlation_vector2[best_index];
491 // Compensate index with start offset.
492 best_index = best_index + start_index;
493
494 // Calculate energies.
495 int32_t energy1 = WebRtcSpl_DotProductWithScale(
496 &(audio_history[signal_length - correlation_length]),
497 &(audio_history[signal_length - correlation_length]),
498 correlation_length, correlation_scale);
499 int32_t energy2 = WebRtcSpl_DotProductWithScale(
500 &(audio_history[signal_length - correlation_length - best_index]),
501 &(audio_history[signal_length - correlation_length - best_index]),
502 correlation_length, correlation_scale);
503
504 // Calculate the correlation coefficient between the two portions of the
505 // signal.
Peter Kasting36b7cc32015-06-11 19:57:18 -0700506 int32_t corr_coefficient;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000507 if ((energy1 > 0) && (energy2 > 0)) {
508 int energy1_scale = std::max(16 - WebRtcSpl_NormW32(energy1), 0);
509 int energy2_scale = std::max(16 - WebRtcSpl_NormW32(energy2), 0);
510 // Make sure total scaling is even (to simplify scale factor after sqrt).
511 if ((energy1_scale + energy2_scale) & 1) {
512 // If sum is odd, add 1 to make it even.
513 energy1_scale += 1;
514 }
Peter Kasting36b7cc32015-06-11 19:57:18 -0700515 int32_t scaled_energy1 = energy1 >> energy1_scale;
516 int32_t scaled_energy2 = energy2 >> energy2_scale;
517 int16_t sqrt_energy_product = static_cast<int16_t>(
518 WebRtcSpl_SqrtFloor(scaled_energy1 * scaled_energy2));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000519 // Calculate max_correlation / sqrt(energy1 * energy2) in Q14.
520 int cc_shift = 14 - (energy1_scale + energy2_scale) / 2;
521 max_correlation = WEBRTC_SPL_SHIFT_W32(max_correlation, cc_shift);
Yves Gerey665174f2018-06-19 15:03:05 +0200522 corr_coefficient =
523 WebRtcSpl_DivW32W16(max_correlation, sqrt_energy_product);
Peter Kasting36b7cc32015-06-11 19:57:18 -0700524 // Cap at 1.0 in Q14.
525 corr_coefficient = std::min(16384, corr_coefficient);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000526 } else {
527 corr_coefficient = 0;
528 }
529
530 // Extract the two vectors expand_vector0 and expand_vector1 from
531 // |audio_history|.
Peter Kastingdce40cf2015-08-24 14:52:23 -0700532 size_t expansion_length = max_lag_ + overlap_length_;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000533 const int16_t* vector1 = &(audio_history[signal_length - expansion_length]);
534 const int16_t* vector2 = vector1 - distortion_lag;
535 // Normalize the second vector to the same energy as the first.
536 energy1 = WebRtcSpl_DotProductWithScale(vector1, vector1, expansion_length,
537 correlation_scale);
538 energy2 = WebRtcSpl_DotProductWithScale(vector2, vector2, expansion_length,
539 correlation_scale);
540 // Confirm that amplitude ratio sqrt(energy1 / energy2) is within 0.5 - 2.0,
Henrik Lundine84e96e2016-01-12 16:36:13 +0100541 // i.e., energy1 / energy2 is within 0.25 - 4.
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000542 int16_t amplitude_ratio;
543 if ((energy1 / 4 < energy2) && (energy1 > energy2 / 4)) {
544 // Energy constraint fulfilled. Use both vectors and scale them
545 // accordingly.
Peter Kasting36b7cc32015-06-11 19:57:18 -0700546 int32_t scaled_energy2 = std::max(16 - WebRtcSpl_NormW32(energy2), 0);
547 int32_t scaled_energy1 = scaled_energy2 - 13;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000548 // Calculate scaled_energy1 / scaled_energy2 in Q13.
Yves Gerey665174f2018-06-19 15:03:05 +0200549 int32_t energy_ratio =
550 WebRtcSpl_DivW32W16(WEBRTC_SPL_SHIFT_W32(energy1, -scaled_energy1),
551 static_cast<int16_t>(energy2 >> scaled_energy2));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000552 // Calculate sqrt ratio in Q13 (sqrt of en1/en2 in Q26).
Peter Kastingdce40cf2015-08-24 14:52:23 -0700553 amplitude_ratio =
554 static_cast<int16_t>(WebRtcSpl_SqrtFloor(energy_ratio << 13));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000555 // Copy the two vectors and give them the same energy.
556 parameters.expand_vector0.Clear();
557 parameters.expand_vector0.PushBack(vector1, expansion_length);
558 parameters.expand_vector1.Clear();
Peter Kastingdce40cf2015-08-24 14:52:23 -0700559 if (parameters.expand_vector1.Size() < expansion_length) {
Yves Gerey665174f2018-06-19 15:03:05 +0200560 parameters.expand_vector1.Extend(expansion_length -
561 parameters.expand_vector1.Size());
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000562 }
minyue-webrtc79553cb2016-05-10 19:55:56 +0200563 std::unique_ptr<int16_t[]> temp_1(new int16_t[expansion_length]);
Yves Gerey665174f2018-06-19 15:03:05 +0200564 WebRtcSpl_AffineTransformVector(
565 temp_1.get(), const_cast<int16_t*>(vector2), amplitude_ratio, 4096,
566 13, expansion_length);
minyue-webrtc79553cb2016-05-10 19:55:56 +0200567 parameters.expand_vector1.OverwriteAt(temp_1.get(), expansion_length, 0);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000568 } else {
569 // Energy change constraint not fulfilled. Only use last vector.
570 parameters.expand_vector0.Clear();
571 parameters.expand_vector0.PushBack(vector1, expansion_length);
572 // Copy from expand_vector0 to expand_vector1.
henrik.lundin@webrtc.orgf6ab6f82014-09-04 10:58:43 +0000573 parameters.expand_vector0.CopyTo(&parameters.expand_vector1);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000574 // Set the energy_ratio since it is used by muting slope.
575 if ((energy1 / 4 < energy2) || (energy2 == 0)) {
576 amplitude_ratio = 4096; // 0.5 in Q13.
577 } else {
578 amplitude_ratio = 16384; // 2.0 in Q13.
579 }
580 }
581
582 // Set the 3 lag values.
Peter Kastingf045e4d2015-06-10 21:15:38 -0700583 if (distortion_lag == correlation_lag) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000584 expand_lags_[0] = distortion_lag;
585 expand_lags_[1] = distortion_lag;
586 expand_lags_[2] = distortion_lag;
587 } else {
588 // |distortion_lag| and |correlation_lag| are not equal; use different
589 // combinations of the two.
590 // First lag is |distortion_lag| only.
591 expand_lags_[0] = distortion_lag;
592 // Second lag is the average of the two.
593 expand_lags_[1] = (distortion_lag + correlation_lag) / 2;
594 // Third lag is the average again, but rounding towards |correlation_lag|.
Peter Kastingf045e4d2015-06-10 21:15:38 -0700595 if (distortion_lag > correlation_lag) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000596 expand_lags_[2] = (distortion_lag + correlation_lag - 1) / 2;
597 } else {
598 expand_lags_[2] = (distortion_lag + correlation_lag + 1) / 2;
599 }
600 }
601
602 // Calculate the LPC and the gain of the filters.
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000603
604 // Calculate kUnvoicedLpcOrder + 1 lags of the auto-correlation function.
Yves Gerey665174f2018-06-19 15:03:05 +0200605 size_t temp_index =
606 signal_length - fs_mult_lpc_analysis_len - kUnvoicedLpcOrder;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000607 // Copy signal to temporary vector to be able to pad with leading zeros.
Yves Gerey665174f2018-06-19 15:03:05 +0200608 int16_t* temp_signal =
609 new int16_t[fs_mult_lpc_analysis_len + kUnvoicedLpcOrder];
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000610 memset(temp_signal, 0,
611 sizeof(int16_t) * (fs_mult_lpc_analysis_len + kUnvoicedLpcOrder));
612 memcpy(&temp_signal[kUnvoicedLpcOrder],
613 &audio_history[temp_index + kUnvoicedLpcOrder],
614 sizeof(int16_t) * fs_mult_lpc_analysis_len);
minyue53ff70f2016-05-02 01:50:30 -0700615 CrossCorrelationWithAutoShift(
616 &temp_signal[kUnvoicedLpcOrder], &temp_signal[kUnvoicedLpcOrder],
617 fs_mult_lpc_analysis_len, kUnvoicedLpcOrder + 1, -1, auto_correlation);
Yves Gerey665174f2018-06-19 15:03:05 +0200618 delete[] temp_signal;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000619
620 // Verify that variance is positive.
621 if (auto_correlation[0] > 0) {
622 // Estimate AR filter parameters using Levinson-Durbin algorithm;
623 // kUnvoicedLpcOrder + 1 filter coefficients.
Yves Gerey665174f2018-06-19 15:03:05 +0200624 int16_t stability =
625 WebRtcSpl_LevinsonDurbin(auto_correlation, parameters.ar_filter,
626 reflection_coeff, kUnvoicedLpcOrder);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000627
628 // Keep filter parameters only if filter is stable.
629 if (stability != 1) {
630 // Set first coefficient to 4096 (1.0 in Q12).
631 parameters.ar_filter[0] = 4096;
632 // Set remaining |kUnvoicedLpcOrder| coefficients to zero.
633 WebRtcSpl_MemSetW16(parameters.ar_filter + 1, 0, kUnvoicedLpcOrder);
634 }
635 }
636
637 if (channel_ix == 0) {
638 // Extract a noise segment.
Peter Kastingdce40cf2015-08-24 14:52:23 -0700639 size_t noise_length;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000640 if (distortion_lag < 40) {
641 noise_length = 2 * distortion_lag + 30;
642 } else {
643 noise_length = distortion_lag + 30;
644 }
645 if (noise_length <= RandomVector::kRandomTableSize) {
646 memcpy(random_vector, RandomVector::kRandomTable,
647 sizeof(int16_t) * noise_length);
648 } else {
649 // Only applies to SWB where length could be larger than
650 // |kRandomTableSize|.
651 memcpy(random_vector, RandomVector::kRandomTable,
652 sizeof(int16_t) * RandomVector::kRandomTableSize);
653 assert(noise_length <= kMaxSampleRate / 8000 * 120 + 30);
654 random_vector_->IncreaseSeedIncrement(2);
655 random_vector_->Generate(
656 noise_length - RandomVector::kRandomTableSize,
657 &random_vector[RandomVector::kRandomTableSize]);
658 }
659 }
660
661 // Set up state vector and calculate scale factor for unvoiced filtering.
662 memcpy(parameters.ar_filter_state,
663 &(audio_history[signal_length - kUnvoicedLpcOrder]),
664 sizeof(int16_t) * kUnvoicedLpcOrder);
665 memcpy(unvoiced_vector - kUnvoicedLpcOrder,
666 &(audio_history[signal_length - 128 - kUnvoicedLpcOrder]),
667 sizeof(int16_t) * kUnvoicedLpcOrder);
bjornv@webrtc.orgc14e3572015-01-12 05:50:52 +0000668 WebRtcSpl_FilterMAFastQ12(&audio_history[signal_length - 128],
Yves Gerey665174f2018-06-19 15:03:05 +0200669 unvoiced_vector, parameters.ar_filter,
670 kUnvoicedLpcOrder + 1, 128);
ivocffecbbf2016-12-16 05:51:49 -0800671 const int unvoiced_max_abs = [&] {
672 const int16_t max_abs = WebRtcSpl_MaxAbsValueW16(unvoiced_vector, 128);
673 // Since WebRtcSpl_MaxAbsValueW16 returns 2^15 - 1 when the input contains
674 // -2^15, we have to conservatively bump the return value by 1
675 // if it is 2^15 - 1.
676 return max_abs == WEBRTC_SPL_WORD16_MAX ? max_abs + 1 : max_abs;
677 }();
678 // Pick the smallest n such that 2^n > unvoiced_max_abs; then the maximum
679 // value of the dot product is less than 2^7 * 2^(2*n) = 2^(2*n + 7), so to
680 // prevent overflows we want 2n + 7 <= 31, which means we should shift by
681 // 2n + 7 - 31 bits, if this value is greater than zero.
682 int unvoiced_prescale =
683 std::max(0, 2 * WebRtcSpl_GetSizeInBits(unvoiced_max_abs) - 24);
684
Yves Gerey665174f2018-06-19 15:03:05 +0200685 int32_t unvoiced_energy = WebRtcSpl_DotProductWithScale(
686 unvoiced_vector, unvoiced_vector, 128, unvoiced_prescale);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000687
688 // Normalize |unvoiced_energy| to 28 or 29 bits to preserve sqrt() accuracy.
689 int16_t unvoiced_scale = WebRtcSpl_NormW32(unvoiced_energy) - 3;
690 // Make sure we do an odd number of shifts since we already have 7 shifts
691 // from dividing with 128 earlier. This will make the total scale factor
692 // even, which is suitable for the sqrt.
693 unvoiced_scale += ((unvoiced_scale & 0x1) ^ 0x1);
694 unvoiced_energy = WEBRTC_SPL_SHIFT_W32(unvoiced_energy, unvoiced_scale);
Peter Kastingb7e50542015-06-11 12:55:50 -0700695 int16_t unvoiced_gain =
696 static_cast<int16_t>(WebRtcSpl_SqrtFloor(unvoiced_energy));
Yves Gerey665174f2018-06-19 15:03:05 +0200697 parameters.ar_gain_scale =
698 13 + (unvoiced_scale + 7 - unvoiced_prescale) / 2;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000699 parameters.ar_gain = unvoiced_gain;
700
701 // Calculate voice_mix_factor from corr_coefficient.
702 // Let x = corr_coefficient. Then, we compute:
703 // if (x > 0.48)
704 // voice_mix_factor = (-5179 + 19931x - 16422x^2 + 5776x^3) / 4096;
705 // else
706 // voice_mix_factor = 0;
707 if (corr_coefficient > 7875) {
708 int16_t x1, x2, x3;
Peter Kasting36b7cc32015-06-11 19:57:18 -0700709 // |corr_coefficient| is in Q14.
710 x1 = static_cast<int16_t>(corr_coefficient);
Yves Gerey665174f2018-06-19 15:03:05 +0200711 x2 = (x1 * x1) >> 14; // Shift 14 to keep result in Q14.
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000712 x3 = (x1 * x2) >> 14;
Yves Gerey665174f2018-06-19 15:03:05 +0200713 static const int kCoefficients[4] = {-5179, 19931, -16422, 5776};
henrik.lundin79dfdad2016-11-15 01:45:53 -0800714 int32_t temp_sum = kCoefficients[0] * 16384;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000715 temp_sum += kCoefficients[1] * x1;
716 temp_sum += kCoefficients[2] * x2;
717 temp_sum += kCoefficients[3] * x3;
Peter Kastingf045e4d2015-06-10 21:15:38 -0700718 parameters.voice_mix_factor =
719 static_cast<int16_t>(std::min(temp_sum / 4096, 16384));
Yves Gerey665174f2018-06-19 15:03:05 +0200720 parameters.voice_mix_factor =
721 std::max(parameters.voice_mix_factor, static_cast<int16_t>(0));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000722 } else {
723 parameters.voice_mix_factor = 0;
724 }
725
726 // Calculate muting slope. Reuse value from earlier scaling of
727 // |expand_vector0| and |expand_vector1|.
728 int16_t slope = amplitude_ratio;
729 if (slope > 12288) {
730 // slope > 1.5.
731 // Calculate (1 - (1 / slope)) / distortion_lag =
732 // (slope - 1) / (distortion_lag * slope).
733 // |slope| is in Q13, so 1 corresponds to 8192. Shift up to Q25 before
734 // the division.
735 // Shift the denominator from Q13 to Q5 before the division. The result of
736 // the division will then be in Q20.
Henrik Lundin9024da82018-05-21 13:41:16 +0200737 int16_t denom =
738 rtc::saturated_cast<int16_t>((distortion_lag * slope) >> 8);
739 int temp_ratio = WebRtcSpl_DivW32W16((slope - 8192) << 12, denom);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000740 if (slope > 14746) {
741 // slope > 1.8.
742 // Divide by 2, with proper rounding.
743 parameters.mute_slope = (temp_ratio + 1) / 2;
744 } else {
745 // Divide by 8, with proper rounding.
746 parameters.mute_slope = (temp_ratio + 4) / 8;
747 }
748 parameters.onset = true;
749 } else {
750 // Calculate (1 - slope) / distortion_lag.
751 // Shift |slope| by 7 to Q20 before the division. The result is in Q20.
Peter Kastingb7e50542015-06-11 12:55:50 -0700752 parameters.mute_slope = WebRtcSpl_DivW32W16(
henrik.lundin79dfdad2016-11-15 01:45:53 -0800753 (8192 - slope) * 128, static_cast<int16_t>(distortion_lag));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000754 if (parameters.voice_mix_factor <= 13107) {
755 // Make sure the mute factor decreases from 1.0 to 0.9 in no more than
756 // 6.25 ms.
757 // mute_slope >= 0.005 / fs_mult in Q20.
Peter Kasting36b7cc32015-06-11 19:57:18 -0700758 parameters.mute_slope = std::max(5243 / fs_mult, parameters.mute_slope);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000759 } else if (slope > 8028) {
760 parameters.mute_slope = 0;
761 }
762 parameters.onset = false;
763 }
764 }
765}
766
Karl Wiberg7f6c4d42015-04-09 15:44:22 +0200767Expand::ChannelParameters::ChannelParameters()
768 : mute_factor(16384),
769 ar_gain(0),
770 ar_gain_scale(0),
771 voice_mix_factor(0),
772 current_voice_mix_factor(0),
773 onset(false),
774 mute_slope(0) {
775 memset(ar_filter, 0, sizeof(ar_filter));
776 memset(ar_filter_state, 0, sizeof(ar_filter_state));
777}
778
Peter Kasting728d9032015-06-11 14:31:38 -0700779void Expand::Correlation(const int16_t* input,
780 size_t input_length,
minyue53ff70f2016-05-02 01:50:30 -0700781 int16_t* output) const {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000782 // Set parameters depending on sample rate.
783 const int16_t* filter_coefficients;
Peter Kastingdce40cf2015-08-24 14:52:23 -0700784 size_t num_coefficients;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000785 int16_t downsampling_factor;
786 if (fs_hz_ == 8000) {
787 num_coefficients = 3;
788 downsampling_factor = 2;
789 filter_coefficients = DspHelper::kDownsample8kHzTbl;
790 } else if (fs_hz_ == 16000) {
791 num_coefficients = 5;
792 downsampling_factor = 4;
793 filter_coefficients = DspHelper::kDownsample16kHzTbl;
794 } else if (fs_hz_ == 32000) {
795 num_coefficients = 7;
796 downsampling_factor = 8;
797 filter_coefficients = DspHelper::kDownsample32kHzTbl;
798 } else { // fs_hz_ == 48000.
799 num_coefficients = 7;
800 downsampling_factor = 12;
801 filter_coefficients = DspHelper::kDownsample48kHzTbl;
802 }
803
804 // Correlate from lag 10 to lag 60 in downsampled domain.
805 // (Corresponds to 20-120 for narrow-band, 40-240 for wide-band, and so on.)
Peter Kastingdce40cf2015-08-24 14:52:23 -0700806 static const size_t kCorrelationStartLag = 10;
807 static const size_t kNumCorrelationLags = 54;
808 static const size_t kCorrelationLength = 60;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000809 // Downsample to 4 kHz sample rate.
Yves Gerey665174f2018-06-19 15:03:05 +0200810 static const size_t kDownsampledLength =
811 kCorrelationStartLag + kNumCorrelationLags + kCorrelationLength;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000812 int16_t downsampled_input[kDownsampledLength];
Peter Kastingdce40cf2015-08-24 14:52:23 -0700813 static const size_t kFilterDelay = 0;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000814 WebRtcSpl_DownsampleFast(
815 input + input_length - kDownsampledLength * downsampling_factor,
816 kDownsampledLength * downsampling_factor, downsampled_input,
817 kDownsampledLength, filter_coefficients, num_coefficients,
818 downsampling_factor, kFilterDelay);
819
820 // Normalize |downsampled_input| to using all 16 bits.
Yves Gerey665174f2018-06-19 15:03:05 +0200821 int16_t max_value =
822 WebRtcSpl_MaxAbsValueW16(downsampled_input, kDownsampledLength);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000823 int16_t norm_shift = 16 - WebRtcSpl_NormW32(max_value);
824 WebRtcSpl_VectorBitShiftW16(downsampled_input, kDownsampledLength,
825 downsampled_input, norm_shift);
826
827 int32_t correlation[kNumCorrelationLags];
minyue53ff70f2016-05-02 01:50:30 -0700828 CrossCorrelationWithAutoShift(
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000829 &downsampled_input[kDownsampledLength - kCorrelationLength],
Yves Gerey665174f2018-06-19 15:03:05 +0200830 &downsampled_input[kDownsampledLength - kCorrelationLength -
831 kCorrelationStartLag],
minyue53ff70f2016-05-02 01:50:30 -0700832 kCorrelationLength, kNumCorrelationLags, -1, correlation);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000833
834 // Normalize and move data from 32-bit to 16-bit vector.
Yves Gerey665174f2018-06-19 15:03:05 +0200835 int32_t max_correlation =
836 WebRtcSpl_MaxAbsValueW32(correlation, kNumCorrelationLags);
Peter Kastingb7e50542015-06-11 12:55:50 -0700837 int16_t norm_shift2 = static_cast<int16_t>(
838 std::max(18 - WebRtcSpl_NormW32(max_correlation), 0));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000839 WebRtcSpl_VectorBitShiftW32ToW16(output, kNumCorrelationLags, correlation,
840 norm_shift2);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000841}
842
843void Expand::UpdateLagIndex() {
844 current_lag_index_ = current_lag_index_ + lag_index_direction_;
845 // Change direction if needed.
846 if (current_lag_index_ <= 0) {
847 lag_index_direction_ = 1;
848 }
849 if (current_lag_index_ >= kNumLags - 1) {
850 lag_index_direction_ = -1;
851 }
852}
853
henrik.lundin@webrtc.orgd9faa462014-01-14 10:18:45 +0000854Expand* ExpandFactory::Create(BackgroundNoise* background_noise,
855 SyncBuffer* sync_buffer,
856 RandomVector* random_vector,
Henrik Lundinbef77e22015-08-18 14:58:09 +0200857 StatisticsCalculator* statistics,
henrik.lundin@webrtc.orgd9faa462014-01-14 10:18:45 +0000858 int fs,
859 size_t num_channels) const {
Henrik Lundinbef77e22015-08-18 14:58:09 +0200860 return new Expand(background_noise, sync_buffer, random_vector, statistics,
861 fs, num_channels);
henrik.lundin@webrtc.orgd9faa462014-01-14 10:18:45 +0000862}
863
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +0000864// TODO(turajs): This can be moved to BackgroundNoise class.
865void Expand::GenerateBackgroundNoise(int16_t* random_vector,
866 size_t channel,
Peter Kasting36b7cc32015-06-11 19:57:18 -0700867 int mute_slope,
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +0000868 bool too_many_expands,
869 size_t num_noise_samples,
870 int16_t* buffer) {
Peter Kastingdce40cf2015-08-24 14:52:23 -0700871 static const size_t kNoiseLpcOrder = BackgroundNoise::kMaxLpcOrder;
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +0000872 int16_t scaled_random_vector[kMaxSampleRate / 8000 * 125];
Peter Kastingdce40cf2015-08-24 14:52:23 -0700873 assert(num_noise_samples <= (kMaxSampleRate / 8000 * 125));
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +0000874 int16_t* noise_samples = &buffer[kNoiseLpcOrder];
875 if (background_noise_->initialized()) {
876 // Use background noise parameters.
877 memcpy(noise_samples - kNoiseLpcOrder,
878 background_noise_->FilterState(channel),
879 sizeof(int16_t) * kNoiseLpcOrder);
880
881 int dc_offset = 0;
882 if (background_noise_->ScaleShift(channel) > 1) {
883 dc_offset = 1 << (background_noise_->ScaleShift(channel) - 1);
884 }
885
886 // Scale random vector to correct energy level.
887 WebRtcSpl_AffineTransformVector(
Yves Gerey665174f2018-06-19 15:03:05 +0200888 scaled_random_vector, random_vector, background_noise_->Scale(channel),
889 dc_offset, background_noise_->ScaleShift(channel), num_noise_samples);
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +0000890
891 WebRtcSpl_FilterARFastQ12(scaled_random_vector, noise_samples,
892 background_noise_->Filter(channel),
Yves Gerey665174f2018-06-19 15:03:05 +0200893 kNoiseLpcOrder + 1, num_noise_samples);
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +0000894
895 background_noise_->SetFilterState(
Yves Gerey665174f2018-06-19 15:03:05 +0200896 channel, &(noise_samples[num_noise_samples - kNoiseLpcOrder]),
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +0000897 kNoiseLpcOrder);
898
899 // Unmute the background noise.
900 int16_t bgn_mute_factor = background_noise_->MuteFactor(channel);
Henrik Lundin67190172018-04-20 15:34:48 +0200901 if (bgn_mute_factor < 16384) {
902 WebRtcSpl_AffineTransformVector(noise_samples, noise_samples,
903 bgn_mute_factor, 8192, 14,
904 num_noise_samples);
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +0000905 }
906 // Update mute_factor in BackgroundNoise class.
907 background_noise_->SetMuteFactor(channel, bgn_mute_factor);
908 } else {
909 // BGN parameters have not been initialized; use zero noise.
910 memset(noise_samples, 0, sizeof(int16_t) * num_noise_samples);
911 }
912}
913
Peter Kastingb7e50542015-06-11 12:55:50 -0700914void Expand::GenerateRandomVector(int16_t seed_increment,
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +0000915 size_t length,
916 int16_t* random_vector) {
917 // TODO(turajs): According to hlundin The loop should not be needed. Should be
918 // just as good to generate all of the vector in one call.
919 size_t samples_generated = 0;
920 const size_t kMaxRandSamples = RandomVector::kRandomTableSize;
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000921 while (samples_generated < length) {
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +0000922 size_t rand_length = std::min(length - samples_generated, kMaxRandSamples);
923 random_vector_->IncreaseSeedIncrement(seed_increment);
924 random_vector_->Generate(rand_length, &random_vector[samples_generated]);
925 samples_generated += rand_length;
926 }
927}
henrik.lundin@webrtc.orgd9faa462014-01-14 10:18:45 +0000928
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000929} // namespace webrtc