blob: 10f6a9f5bf4622f56ed59f338ab53a7098b4ce7a [file] [log] [blame]
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +00001/*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
henrik.lundin@webrtc.org9c55f0f2014-06-09 08:10:28 +000011#include "webrtc/modules/audio_coding/neteq/expand.h"
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000012
13#include <assert.h>
pbos@webrtc.org12dc1a32013-08-05 16:22:53 +000014#include <string.h> // memset
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000015
16#include <algorithm> // min, max
turaj@webrtc.org7126b382013-07-31 16:05:09 +000017#include <limits> // numeric_limits<T>
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000018
19#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
henrik.lundin@webrtc.org9c55f0f2014-06-09 08:10:28 +000020#include "webrtc/modules/audio_coding/neteq/background_noise.h"
21#include "webrtc/modules/audio_coding/neteq/dsp_helper.h"
22#include "webrtc/modules/audio_coding/neteq/random_vector.h"
23#include "webrtc/modules/audio_coding/neteq/sync_buffer.h"
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000024
25namespace webrtc {
26
Karl Wiberg7f6c4d42015-04-09 15:44:22 +020027Expand::Expand(BackgroundNoise* background_noise,
28 SyncBuffer* sync_buffer,
29 RandomVector* random_vector,
30 int fs,
31 size_t num_channels)
32 : random_vector_(random_vector),
33 sync_buffer_(sync_buffer),
34 first_expand_(true),
35 fs_hz_(fs),
36 num_channels_(num_channels),
37 consecutive_expands_(0),
38 background_noise_(background_noise),
39 overlap_length_(5 * fs / 8000),
40 lag_index_direction_(0),
41 current_lag_index_(0),
42 stop_muting_(false),
43 channel_parameters_(new ChannelParameters[num_channels_]) {
44 assert(fs == 8000 || fs == 16000 || fs == 32000 || fs == 48000);
45 assert(fs <= kMaxSampleRate); // Should not be possible.
46 assert(num_channels_ > 0);
47 memset(expand_lags_, 0, sizeof(expand_lags_));
48 Reset();
49}
50
51Expand::~Expand() = default;
52
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000053void Expand::Reset() {
54 first_expand_ = true;
55 consecutive_expands_ = 0;
56 max_lag_ = 0;
57 for (size_t ix = 0; ix < num_channels_; ++ix) {
58 channel_parameters_[ix].expand_vector0.Clear();
59 channel_parameters_[ix].expand_vector1.Clear();
60 }
61}
62
henrik.lundin@webrtc.orgfd11bbf2013-09-30 20:38:44 +000063int Expand::Process(AudioMultiVector* output) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000064 int16_t random_vector[kMaxSampleRate / 8000 * 120 + 30];
65 int16_t scaled_random_vector[kMaxSampleRate / 8000 * 125];
66 static const int kTempDataSize = 3600;
67 int16_t temp_data[kTempDataSize]; // TODO(hlundin) Remove this.
68 int16_t* voiced_vector_storage = temp_data;
69 int16_t* voiced_vector = &voiced_vector_storage[overlap_length_];
70 static const int kNoiseLpcOrder = BackgroundNoise::kMaxLpcOrder;
71 int16_t unvoiced_array_memory[kNoiseLpcOrder + kMaxSampleRate / 8000 * 125];
72 int16_t* unvoiced_vector = unvoiced_array_memory + kUnvoicedLpcOrder;
73 int16_t* noise_vector = unvoiced_array_memory + kNoiseLpcOrder;
74
75 int fs_mult = fs_hz_ / 8000;
76
77 if (first_expand_) {
78 // Perform initial setup if this is the first expansion since last reset.
79 AnalyzeSignal(random_vector);
80 first_expand_ = false;
81 } else {
82 // This is not the first expansion, parameters are already estimated.
83 // Extract a noise segment.
84 int16_t rand_length = max_lag_;
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +000085 // This only applies to SWB where length could be larger than 256.
86 assert(rand_length <= kMaxSampleRate / 8000 * 120 + 30);
87 GenerateRandomVector(2, rand_length, random_vector);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000088 }
89
90
91 // Generate signal.
92 UpdateLagIndex();
93
94 // Voiced part.
95 // Generate a weighted vector with the current lag.
96 size_t expansion_vector_length = max_lag_ + overlap_length_;
97 size_t current_lag = expand_lags_[current_lag_index_];
98 // Copy lag+overlap data.
99 size_t expansion_vector_position = expansion_vector_length - current_lag -
100 overlap_length_;
101 size_t temp_length = current_lag + overlap_length_;
102 for (size_t channel_ix = 0; channel_ix < num_channels_; ++channel_ix) {
103 ChannelParameters& parameters = channel_parameters_[channel_ix];
104 if (current_lag_index_ == 0) {
105 // Use only expand_vector0.
106 assert(expansion_vector_position + temp_length <=
107 parameters.expand_vector0.Size());
108 memcpy(voiced_vector_storage,
109 &parameters.expand_vector0[expansion_vector_position],
110 sizeof(int16_t) * temp_length);
111 } else if (current_lag_index_ == 1) {
112 // Mix 3/4 of expand_vector0 with 1/4 of expand_vector1.
113 WebRtcSpl_ScaleAndAddVectorsWithRound(
114 &parameters.expand_vector0[expansion_vector_position], 3,
115 &parameters.expand_vector1[expansion_vector_position], 1, 2,
turaj@webrtc.org362a55e2013-09-20 16:25:28 +0000116 voiced_vector_storage, static_cast<int>(temp_length));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000117 } else if (current_lag_index_ == 2) {
118 // Mix 1/2 of expand_vector0 with 1/2 of expand_vector1.
119 assert(expansion_vector_position + temp_length <=
120 parameters.expand_vector0.Size());
121 assert(expansion_vector_position + temp_length <=
122 parameters.expand_vector1.Size());
123 WebRtcSpl_ScaleAndAddVectorsWithRound(
124 &parameters.expand_vector0[expansion_vector_position], 1,
125 &parameters.expand_vector1[expansion_vector_position], 1, 1,
turaj@webrtc.org362a55e2013-09-20 16:25:28 +0000126 voiced_vector_storage, static_cast<int>(temp_length));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000127 }
128
129 // Get tapering window parameters. Values are in Q15.
130 int16_t muting_window, muting_window_increment;
131 int16_t unmuting_window, unmuting_window_increment;
132 if (fs_hz_ == 8000) {
133 muting_window = DspHelper::kMuteFactorStart8kHz;
134 muting_window_increment = DspHelper::kMuteFactorIncrement8kHz;
135 unmuting_window = DspHelper::kUnmuteFactorStart8kHz;
136 unmuting_window_increment = DspHelper::kUnmuteFactorIncrement8kHz;
137 } else if (fs_hz_ == 16000) {
138 muting_window = DspHelper::kMuteFactorStart16kHz;
139 muting_window_increment = DspHelper::kMuteFactorIncrement16kHz;
140 unmuting_window = DspHelper::kUnmuteFactorStart16kHz;
141 unmuting_window_increment = DspHelper::kUnmuteFactorIncrement16kHz;
142 } else if (fs_hz_ == 32000) {
143 muting_window = DspHelper::kMuteFactorStart32kHz;
144 muting_window_increment = DspHelper::kMuteFactorIncrement32kHz;
145 unmuting_window = DspHelper::kUnmuteFactorStart32kHz;
146 unmuting_window_increment = DspHelper::kUnmuteFactorIncrement32kHz;
147 } else { // fs_ == 48000
148 muting_window = DspHelper::kMuteFactorStart48kHz;
149 muting_window_increment = DspHelper::kMuteFactorIncrement48kHz;
150 unmuting_window = DspHelper::kUnmuteFactorStart48kHz;
151 unmuting_window_increment = DspHelper::kUnmuteFactorIncrement48kHz;
152 }
153
154 // Smooth the expanded if it has not been muted to a low amplitude and
155 // |current_voice_mix_factor| is larger than 0.5.
156 if ((parameters.mute_factor > 819) &&
157 (parameters.current_voice_mix_factor > 8192)) {
158 size_t start_ix = sync_buffer_->Size() - overlap_length_;
159 for (size_t i = 0; i < overlap_length_; i++) {
160 // Do overlap add between new vector and overlap.
161 (*sync_buffer_)[channel_ix][start_ix + i] =
162 (((*sync_buffer_)[channel_ix][start_ix + i] * muting_window) +
163 (((parameters.mute_factor * voiced_vector_storage[i]) >> 14) *
164 unmuting_window) + 16384) >> 15;
165 muting_window += muting_window_increment;
166 unmuting_window += unmuting_window_increment;
167 }
168 } else if (parameters.mute_factor == 0) {
169 // The expanded signal will consist of only comfort noise if
170 // mute_factor = 0. Set the output length to 15 ms for best noise
171 // production.
172 // TODO(hlundin): This has been disabled since the length of
173 // parameters.expand_vector0 and parameters.expand_vector1 no longer
174 // match with expand_lags_, causing invalid reads and writes. Is it a good
175 // idea to enable this again, and solve the vector size problem?
176// max_lag_ = fs_mult * 120;
177// expand_lags_[0] = fs_mult * 120;
178// expand_lags_[1] = fs_mult * 120;
179// expand_lags_[2] = fs_mult * 120;
180 }
181
182 // Unvoiced part.
183 // Filter |scaled_random_vector| through |ar_filter_|.
184 memcpy(unvoiced_vector - kUnvoicedLpcOrder, parameters.ar_filter_state,
185 sizeof(int16_t) * kUnvoicedLpcOrder);
186 int32_t add_constant = 0;
187 if (parameters.ar_gain_scale > 0) {
188 add_constant = 1 << (parameters.ar_gain_scale - 1);
189 }
190 WebRtcSpl_AffineTransformVector(scaled_random_vector, random_vector,
191 parameters.ar_gain, add_constant,
turaj@webrtc.org362a55e2013-09-20 16:25:28 +0000192 parameters.ar_gain_scale,
193 static_cast<int>(current_lag));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000194 WebRtcSpl_FilterARFastQ12(scaled_random_vector, unvoiced_vector,
turaj@webrtc.org362a55e2013-09-20 16:25:28 +0000195 parameters.ar_filter, kUnvoicedLpcOrder + 1,
196 static_cast<int>(current_lag));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000197 memcpy(parameters.ar_filter_state,
198 &(unvoiced_vector[current_lag - kUnvoicedLpcOrder]),
199 sizeof(int16_t) * kUnvoicedLpcOrder);
200
201 // Combine voiced and unvoiced contributions.
202
203 // Set a suitable cross-fading slope.
204 // For lag =
205 // <= 31 * fs_mult => go from 1 to 0 in about 8 ms;
206 // (>= 31 .. <= 63) * fs_mult => go from 1 to 0 in about 16 ms;
207 // >= 64 * fs_mult => go from 1 to 0 in about 32 ms.
208 // temp_shift = getbits(max_lag_) - 5.
209 int temp_shift = (31 - WebRtcSpl_NormW32(max_lag_)) - 5;
210 int16_t mix_factor_increment = 256 >> temp_shift;
211 if (stop_muting_) {
212 mix_factor_increment = 0;
213 }
214
215 // Create combined signal by shifting in more and more of unvoiced part.
216 temp_shift = 8 - temp_shift; // = getbits(mix_factor_increment).
Peter Kasting728d9032015-06-11 14:31:38 -0700217 size_t temp_length = (parameters.current_voice_mix_factor -
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000218 parameters.voice_mix_factor) >> temp_shift;
Peter Kasting728d9032015-06-11 14:31:38 -0700219 temp_length = std::min(temp_length, current_lag);
220 DspHelper::CrossFade(voiced_vector, unvoiced_vector, temp_length,
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000221 &parameters.current_voice_mix_factor,
222 mix_factor_increment, temp_data);
223
224 // End of cross-fading period was reached before end of expanded signal
225 // path. Mix the rest with a fixed mixing factor.
Peter Kasting728d9032015-06-11 14:31:38 -0700226 if (temp_length < current_lag) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000227 if (mix_factor_increment != 0) {
228 parameters.current_voice_mix_factor = parameters.voice_mix_factor;
229 }
Peter Kastingb7e50542015-06-11 12:55:50 -0700230 int16_t temp_scale = 16384 - parameters.current_voice_mix_factor;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000231 WebRtcSpl_ScaleAndAddVectorsWithRound(
Peter Kasting728d9032015-06-11 14:31:38 -0700232 voiced_vector + temp_length, parameters.current_voice_mix_factor,
233 unvoiced_vector + temp_length, temp_scale, 14,
234 temp_data + temp_length, static_cast<int>(current_lag - temp_length));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000235 }
236
237 // Select muting slope depending on how many consecutive expands we have
238 // done.
239 if (consecutive_expands_ == 3) {
240 // Let the mute factor decrease from 1.0 to 0.95 in 6.25 ms.
241 // mute_slope = 0.0010 / fs_mult in Q20.
Peter Kasting36b7cc32015-06-11 19:57:18 -0700242 parameters.mute_slope = std::max(parameters.mute_slope, 1049 / fs_mult);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000243 }
244 if (consecutive_expands_ == 7) {
245 // Let the mute factor decrease from 1.0 to 0.90 in 6.25 ms.
246 // mute_slope = 0.0020 / fs_mult in Q20.
Peter Kasting36b7cc32015-06-11 19:57:18 -0700247 parameters.mute_slope = std::max(parameters.mute_slope, 2097 / fs_mult);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000248 }
249
250 // Mute segment according to slope value.
251 if ((consecutive_expands_ != 0) || !parameters.onset) {
252 // Mute to the previous level, then continue with the muting.
253 WebRtcSpl_AffineTransformVector(temp_data, temp_data,
254 parameters.mute_factor, 8192,
turaj@webrtc.org362a55e2013-09-20 16:25:28 +0000255 14, static_cast<int>(current_lag));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000256
257 if (!stop_muting_) {
258 DspHelper::MuteSignal(temp_data, parameters.mute_slope, current_lag);
259
260 // Shift by 6 to go from Q20 to Q14.
261 // TODO(hlundin): Adding 8192 before shifting 6 steps seems wrong.
262 // Legacy.
turaj@webrtc.org362a55e2013-09-20 16:25:28 +0000263 int16_t gain = static_cast<int16_t>(16384 -
264 (((current_lag * parameters.mute_slope) + 8192) >> 6));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000265 gain = ((gain * parameters.mute_factor) + 8192) >> 14;
266
267 // Guard against getting stuck with very small (but sometimes audible)
268 // gain.
269 if ((consecutive_expands_ > 3) && (gain >= parameters.mute_factor)) {
270 parameters.mute_factor = 0;
271 } else {
272 parameters.mute_factor = gain;
273 }
274 }
275 }
276
277 // Background noise part.
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +0000278 GenerateBackgroundNoise(random_vector,
279 channel_ix,
280 channel_parameters_[channel_ix].mute_slope,
281 TooManyExpands(),
282 current_lag,
283 unvoiced_array_memory);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000284
285 // Add background noise to the combined voiced-unvoiced signal.
286 for (size_t i = 0; i < current_lag; i++) {
287 temp_data[i] = temp_data[i] + noise_vector[i];
288 }
289 if (channel_ix == 0) {
290 output->AssertSize(current_lag);
291 } else {
292 assert(output->Size() == current_lag);
293 }
294 memcpy(&(*output)[channel_ix][0], temp_data,
295 sizeof(temp_data[0]) * current_lag);
296 }
297
298 // Increase call number and cap it.
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +0000299 consecutive_expands_ = consecutive_expands_ >= kMaxConsecutiveExpands ?
300 kMaxConsecutiveExpands : consecutive_expands_ + 1;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000301 return 0;
302}
303
304void Expand::SetParametersForNormalAfterExpand() {
305 current_lag_index_ = 0;
306 lag_index_direction_ = 0;
307 stop_muting_ = true; // Do not mute signal any more.
308}
309
310void Expand::SetParametersForMergeAfterExpand() {
311 current_lag_index_ = -1; /* out of the 3 possible ones */
312 lag_index_direction_ = 1; /* make sure we get the "optimal" lag */
313 stop_muting_ = true;
314}
315
Karl Wiberg7f6c4d42015-04-09 15:44:22 +0200316size_t Expand::overlap_length() const {
317 return overlap_length_;
318}
319
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +0000320void Expand::InitializeForAnExpandPeriod() {
321 lag_index_direction_ = 1;
322 current_lag_index_ = -1;
323 stop_muting_ = false;
324 random_vector_->set_seed_increment(1);
325 consecutive_expands_ = 0;
326 for (size_t ix = 0; ix < num_channels_; ++ix) {
327 channel_parameters_[ix].current_voice_mix_factor = 16384; // 1.0 in Q14.
328 channel_parameters_[ix].mute_factor = 16384; // 1.0 in Q14.
329 // Start with 0 gain for background noise.
330 background_noise_->SetMuteFactor(ix, 0);
331 }
332}
333
334bool Expand::TooManyExpands() {
335 return consecutive_expands_ >= kMaxConsecutiveExpands;
336}
337
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000338void Expand::AnalyzeSignal(int16_t* random_vector) {
339 int32_t auto_correlation[kUnvoicedLpcOrder + 1];
340 int16_t reflection_coeff[kUnvoicedLpcOrder];
341 int16_t correlation_vector[kMaxSampleRate / 8000 * 102];
342 int best_correlation_index[kNumCorrelationCandidates];
343 int16_t best_correlation[kNumCorrelationCandidates];
344 int16_t best_distortion_index[kNumCorrelationCandidates];
345 int16_t best_distortion[kNumCorrelationCandidates];
346 int32_t correlation_vector2[(99 * kMaxSampleRate / 8000) + 1];
347 int32_t best_distortion_w32[kNumCorrelationCandidates];
348 static const int kNoiseLpcOrder = BackgroundNoise::kMaxLpcOrder;
349 int16_t unvoiced_array_memory[kNoiseLpcOrder + kMaxSampleRate / 8000 * 125];
350 int16_t* unvoiced_vector = unvoiced_array_memory + kUnvoicedLpcOrder;
351
352 int fs_mult = fs_hz_ / 8000;
353
354 // Pre-calculate common multiplications with fs_mult.
355 int fs_mult_4 = fs_mult * 4;
356 int fs_mult_20 = fs_mult * 20;
357 int fs_mult_120 = fs_mult * 120;
358 int fs_mult_dist_len = fs_mult * kDistortionLength;
359 int fs_mult_lpc_analysis_len = fs_mult * kLpcAnalysisLength;
360
361 const size_t signal_length = 256 * fs_mult;
362 const int16_t* audio_history =
363 &(*sync_buffer_)[0][sync_buffer_->Size() - signal_length];
364
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +0000365 // Initialize.
366 InitializeForAnExpandPeriod();
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000367
368 // Calculate correlation in downsampled domain (4 kHz sample rate).
Peter Kasting36b7cc32015-06-11 19:57:18 -0700369 int correlation_scale;
turaj@webrtc.org58cd3162013-10-31 15:15:55 +0000370 int correlation_length = 51; // TODO(hlundin): Legacy bit-exactness.
371 // If it is decided to break bit-exactness |correlation_length| should be
372 // initialized to the return value of Correlation().
373 Correlation(audio_history, signal_length, correlation_vector,
374 &correlation_scale);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000375
376 // Find peaks in correlation vector.
377 DspHelper::PeakDetection(correlation_vector, correlation_length,
378 kNumCorrelationCandidates, fs_mult,
379 best_correlation_index, best_correlation);
380
381 // Adjust peak locations; cross-correlation lags start at 2.5 ms
382 // (20 * fs_mult samples).
383 best_correlation_index[0] += fs_mult_20;
384 best_correlation_index[1] += fs_mult_20;
385 best_correlation_index[2] += fs_mult_20;
386
387 // Calculate distortion around the |kNumCorrelationCandidates| best lags.
388 int distortion_scale = 0;
389 for (int i = 0; i < kNumCorrelationCandidates; i++) {
390 int16_t min_index = std::max(fs_mult_20,
391 best_correlation_index[i] - fs_mult_4);
392 int16_t max_index = std::min(fs_mult_120 - 1,
393 best_correlation_index[i] + fs_mult_4);
394 best_distortion_index[i] = DspHelper::MinDistortion(
395 &(audio_history[signal_length - fs_mult_dist_len]), min_index,
396 max_index, fs_mult_dist_len, &best_distortion_w32[i]);
397 distortion_scale = std::max(16 - WebRtcSpl_NormW32(best_distortion_w32[i]),
398 distortion_scale);
399 }
400 // Shift the distortion values to fit in 16 bits.
401 WebRtcSpl_VectorBitShiftW32ToW16(best_distortion, kNumCorrelationCandidates,
402 best_distortion_w32, distortion_scale);
403
404 // Find the maximizing index |i| of the cost function
405 // f[i] = best_correlation[i] / best_distortion[i].
turaj@webrtc.org58cd3162013-10-31 15:15:55 +0000406 int32_t best_ratio = std::numeric_limits<int32_t>::min();
Peter Kastingf045e4d2015-06-10 21:15:38 -0700407 int best_index = std::numeric_limits<int>::max();
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000408 for (int i = 0; i < kNumCorrelationCandidates; ++i) {
409 int32_t ratio;
410 if (best_distortion[i] > 0) {
411 ratio = (best_correlation[i] << 16) / best_distortion[i];
turaj@webrtc.org7126b382013-07-31 16:05:09 +0000412 } else if (best_correlation[i] == 0) {
413 ratio = 0; // No correlation set result to zero.
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000414 } else {
turaj@webrtc.org7126b382013-07-31 16:05:09 +0000415 ratio = std::numeric_limits<int32_t>::max(); // Denominator is zero.
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000416 }
417 if (ratio > best_ratio) {
418 best_index = i;
419 best_ratio = ratio;
420 }
421 }
422
423 int distortion_lag = best_distortion_index[best_index];
424 int correlation_lag = best_correlation_index[best_index];
425 max_lag_ = std::max(distortion_lag, correlation_lag);
426
427 // Calculate the exact best correlation in the range between
428 // |correlation_lag| and |distortion_lag|.
Peter Kasting728d9032015-06-11 14:31:38 -0700429 correlation_length =
430 std::max(std::min(distortion_lag + 10, fs_mult_120), 60 * fs_mult);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000431
432 int start_index = std::min(distortion_lag, correlation_lag);
Peter Kasting728d9032015-06-11 14:31:38 -0700433 int correlation_lags =
434 WEBRTC_SPL_ABS_W16((distortion_lag-correlation_lag)) + 1;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000435 assert(correlation_lags <= 99 * fs_mult + 1); // Cannot be larger.
436
437 for (size_t channel_ix = 0; channel_ix < num_channels_; ++channel_ix) {
438 ChannelParameters& parameters = channel_parameters_[channel_ix];
439 // Calculate suitable scaling.
440 int16_t signal_max = WebRtcSpl_MaxAbsValueW16(
441 &audio_history[signal_length - correlation_length - start_index
442 - correlation_lags],
443 correlation_length + start_index + correlation_lags - 1);
444 correlation_scale = ((31 - WebRtcSpl_NormW32(signal_max * signal_max))
445 + (31 - WebRtcSpl_NormW32(correlation_length))) - 31;
Peter Kasting36b7cc32015-06-11 19:57:18 -0700446 correlation_scale = std::max(0, correlation_scale);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000447
448 // Calculate the correlation, store in |correlation_vector2|.
449 WebRtcSpl_CrossCorrelation(
450 correlation_vector2,
451 &(audio_history[signal_length - correlation_length]),
452 &(audio_history[signal_length - correlation_length - start_index]),
453 correlation_length, correlation_lags, correlation_scale, -1);
454
455 // Find maximizing index.
456 best_index = WebRtcSpl_MaxIndexW32(correlation_vector2, correlation_lags);
457 int32_t max_correlation = correlation_vector2[best_index];
458 // Compensate index with start offset.
459 best_index = best_index + start_index;
460
461 // Calculate energies.
462 int32_t energy1 = WebRtcSpl_DotProductWithScale(
463 &(audio_history[signal_length - correlation_length]),
464 &(audio_history[signal_length - correlation_length]),
465 correlation_length, correlation_scale);
466 int32_t energy2 = WebRtcSpl_DotProductWithScale(
467 &(audio_history[signal_length - correlation_length - best_index]),
468 &(audio_history[signal_length - correlation_length - best_index]),
469 correlation_length, correlation_scale);
470
471 // Calculate the correlation coefficient between the two portions of the
472 // signal.
Peter Kasting36b7cc32015-06-11 19:57:18 -0700473 int32_t corr_coefficient;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000474 if ((energy1 > 0) && (energy2 > 0)) {
475 int energy1_scale = std::max(16 - WebRtcSpl_NormW32(energy1), 0);
476 int energy2_scale = std::max(16 - WebRtcSpl_NormW32(energy2), 0);
477 // Make sure total scaling is even (to simplify scale factor after sqrt).
478 if ((energy1_scale + energy2_scale) & 1) {
479 // If sum is odd, add 1 to make it even.
480 energy1_scale += 1;
481 }
Peter Kasting36b7cc32015-06-11 19:57:18 -0700482 int32_t scaled_energy1 = energy1 >> energy1_scale;
483 int32_t scaled_energy2 = energy2 >> energy2_scale;
484 int16_t sqrt_energy_product = static_cast<int16_t>(
485 WebRtcSpl_SqrtFloor(scaled_energy1 * scaled_energy2));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000486 // Calculate max_correlation / sqrt(energy1 * energy2) in Q14.
487 int cc_shift = 14 - (energy1_scale + energy2_scale) / 2;
488 max_correlation = WEBRTC_SPL_SHIFT_W32(max_correlation, cc_shift);
489 corr_coefficient = WebRtcSpl_DivW32W16(max_correlation,
490 sqrt_energy_product);
Peter Kasting36b7cc32015-06-11 19:57:18 -0700491 // Cap at 1.0 in Q14.
492 corr_coefficient = std::min(16384, corr_coefficient);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000493 } else {
494 corr_coefficient = 0;
495 }
496
497 // Extract the two vectors expand_vector0 and expand_vector1 from
498 // |audio_history|.
turaj@webrtc.org362a55e2013-09-20 16:25:28 +0000499 int16_t expansion_length = static_cast<int16_t>(max_lag_ + overlap_length_);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000500 const int16_t* vector1 = &(audio_history[signal_length - expansion_length]);
501 const int16_t* vector2 = vector1 - distortion_lag;
502 // Normalize the second vector to the same energy as the first.
503 energy1 = WebRtcSpl_DotProductWithScale(vector1, vector1, expansion_length,
504 correlation_scale);
505 energy2 = WebRtcSpl_DotProductWithScale(vector2, vector2, expansion_length,
506 correlation_scale);
507 // Confirm that amplitude ratio sqrt(energy1 / energy2) is within 0.5 - 2.0,
508 // i.e., energy1 / energy1 is within 0.25 - 4.
509 int16_t amplitude_ratio;
510 if ((energy1 / 4 < energy2) && (energy1 > energy2 / 4)) {
511 // Energy constraint fulfilled. Use both vectors and scale them
512 // accordingly.
Peter Kasting36b7cc32015-06-11 19:57:18 -0700513 int32_t scaled_energy2 = std::max(16 - WebRtcSpl_NormW32(energy2), 0);
514 int32_t scaled_energy1 = scaled_energy2 - 13;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000515 // Calculate scaled_energy1 / scaled_energy2 in Q13.
516 int32_t energy_ratio = WebRtcSpl_DivW32W16(
517 WEBRTC_SPL_SHIFT_W32(energy1, -scaled_energy1),
bjornv@webrtc.orga5ce7bb2014-10-20 08:24:54 +0000518 energy2 >> scaled_energy2);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000519 // Calculate sqrt ratio in Q13 (sqrt of en1/en2 in Q26).
520 amplitude_ratio = WebRtcSpl_SqrtFloor(energy_ratio << 13);
521 // Copy the two vectors and give them the same energy.
522 parameters.expand_vector0.Clear();
523 parameters.expand_vector0.PushBack(vector1, expansion_length);
524 parameters.expand_vector1.Clear();
525 if (parameters.expand_vector1.Size() <
526 static_cast<size_t>(expansion_length)) {
527 parameters.expand_vector1.Extend(
528 expansion_length - parameters.expand_vector1.Size());
529 }
530 WebRtcSpl_AffineTransformVector(&parameters.expand_vector1[0],
531 const_cast<int16_t*>(vector2),
532 amplitude_ratio,
533 4096,
534 13,
535 expansion_length);
536 } else {
537 // Energy change constraint not fulfilled. Only use last vector.
538 parameters.expand_vector0.Clear();
539 parameters.expand_vector0.PushBack(vector1, expansion_length);
540 // Copy from expand_vector0 to expand_vector1.
henrik.lundin@webrtc.orgf6ab6f82014-09-04 10:58:43 +0000541 parameters.expand_vector0.CopyTo(&parameters.expand_vector1);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000542 // Set the energy_ratio since it is used by muting slope.
543 if ((energy1 / 4 < energy2) || (energy2 == 0)) {
544 amplitude_ratio = 4096; // 0.5 in Q13.
545 } else {
546 amplitude_ratio = 16384; // 2.0 in Q13.
547 }
548 }
549
550 // Set the 3 lag values.
Peter Kastingf045e4d2015-06-10 21:15:38 -0700551 if (distortion_lag == correlation_lag) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000552 expand_lags_[0] = distortion_lag;
553 expand_lags_[1] = distortion_lag;
554 expand_lags_[2] = distortion_lag;
555 } else {
556 // |distortion_lag| and |correlation_lag| are not equal; use different
557 // combinations of the two.
558 // First lag is |distortion_lag| only.
559 expand_lags_[0] = distortion_lag;
560 // Second lag is the average of the two.
561 expand_lags_[1] = (distortion_lag + correlation_lag) / 2;
562 // Third lag is the average again, but rounding towards |correlation_lag|.
Peter Kastingf045e4d2015-06-10 21:15:38 -0700563 if (distortion_lag > correlation_lag) {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000564 expand_lags_[2] = (distortion_lag + correlation_lag - 1) / 2;
565 } else {
566 expand_lags_[2] = (distortion_lag + correlation_lag + 1) / 2;
567 }
568 }
569
570 // Calculate the LPC and the gain of the filters.
571 // Calculate scale value needed for auto-correlation.
572 correlation_scale = WebRtcSpl_MaxAbsValueW16(
573 &(audio_history[signal_length - fs_mult_lpc_analysis_len]),
574 fs_mult_lpc_analysis_len);
575
576 correlation_scale = std::min(16 - WebRtcSpl_NormW32(correlation_scale), 0);
577 correlation_scale = std::max(correlation_scale * 2 + 7, 0);
578
579 // Calculate kUnvoicedLpcOrder + 1 lags of the auto-correlation function.
580 size_t temp_index = signal_length - fs_mult_lpc_analysis_len -
581 kUnvoicedLpcOrder;
582 // Copy signal to temporary vector to be able to pad with leading zeros.
583 int16_t* temp_signal = new int16_t[fs_mult_lpc_analysis_len
584 + kUnvoicedLpcOrder];
585 memset(temp_signal, 0,
586 sizeof(int16_t) * (fs_mult_lpc_analysis_len + kUnvoicedLpcOrder));
587 memcpy(&temp_signal[kUnvoicedLpcOrder],
588 &audio_history[temp_index + kUnvoicedLpcOrder],
589 sizeof(int16_t) * fs_mult_lpc_analysis_len);
590 WebRtcSpl_CrossCorrelation(auto_correlation,
591 &temp_signal[kUnvoicedLpcOrder],
592 &temp_signal[kUnvoicedLpcOrder],
593 fs_mult_lpc_analysis_len, kUnvoicedLpcOrder + 1,
594 correlation_scale, -1);
595 delete [] temp_signal;
596
597 // Verify that variance is positive.
598 if (auto_correlation[0] > 0) {
599 // Estimate AR filter parameters using Levinson-Durbin algorithm;
600 // kUnvoicedLpcOrder + 1 filter coefficients.
601 int16_t stability = WebRtcSpl_LevinsonDurbin(auto_correlation,
602 parameters.ar_filter,
603 reflection_coeff,
604 kUnvoicedLpcOrder);
605
606 // Keep filter parameters only if filter is stable.
607 if (stability != 1) {
608 // Set first coefficient to 4096 (1.0 in Q12).
609 parameters.ar_filter[0] = 4096;
610 // Set remaining |kUnvoicedLpcOrder| coefficients to zero.
611 WebRtcSpl_MemSetW16(parameters.ar_filter + 1, 0, kUnvoicedLpcOrder);
612 }
613 }
614
615 if (channel_ix == 0) {
616 // Extract a noise segment.
617 int16_t noise_length;
618 if (distortion_lag < 40) {
619 noise_length = 2 * distortion_lag + 30;
620 } else {
621 noise_length = distortion_lag + 30;
622 }
623 if (noise_length <= RandomVector::kRandomTableSize) {
624 memcpy(random_vector, RandomVector::kRandomTable,
625 sizeof(int16_t) * noise_length);
626 } else {
627 // Only applies to SWB where length could be larger than
628 // |kRandomTableSize|.
629 memcpy(random_vector, RandomVector::kRandomTable,
630 sizeof(int16_t) * RandomVector::kRandomTableSize);
631 assert(noise_length <= kMaxSampleRate / 8000 * 120 + 30);
632 random_vector_->IncreaseSeedIncrement(2);
633 random_vector_->Generate(
634 noise_length - RandomVector::kRandomTableSize,
635 &random_vector[RandomVector::kRandomTableSize]);
636 }
637 }
638
639 // Set up state vector and calculate scale factor for unvoiced filtering.
640 memcpy(parameters.ar_filter_state,
641 &(audio_history[signal_length - kUnvoicedLpcOrder]),
642 sizeof(int16_t) * kUnvoicedLpcOrder);
643 memcpy(unvoiced_vector - kUnvoicedLpcOrder,
644 &(audio_history[signal_length - 128 - kUnvoicedLpcOrder]),
645 sizeof(int16_t) * kUnvoicedLpcOrder);
bjornv@webrtc.orgc14e3572015-01-12 05:50:52 +0000646 WebRtcSpl_FilterMAFastQ12(&audio_history[signal_length - 128],
647 unvoiced_vector,
648 parameters.ar_filter,
649 kUnvoicedLpcOrder + 1,
650 128);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000651 int16_t unvoiced_prescale;
652 if (WebRtcSpl_MaxAbsValueW16(unvoiced_vector, 128) > 4000) {
653 unvoiced_prescale = 4;
654 } else {
655 unvoiced_prescale = 0;
656 }
657 int32_t unvoiced_energy = WebRtcSpl_DotProductWithScale(unvoiced_vector,
658 unvoiced_vector,
659 128,
660 unvoiced_prescale);
661
662 // Normalize |unvoiced_energy| to 28 or 29 bits to preserve sqrt() accuracy.
663 int16_t unvoiced_scale = WebRtcSpl_NormW32(unvoiced_energy) - 3;
664 // Make sure we do an odd number of shifts since we already have 7 shifts
665 // from dividing with 128 earlier. This will make the total scale factor
666 // even, which is suitable for the sqrt.
667 unvoiced_scale += ((unvoiced_scale & 0x1) ^ 0x1);
668 unvoiced_energy = WEBRTC_SPL_SHIFT_W32(unvoiced_energy, unvoiced_scale);
Peter Kastingb7e50542015-06-11 12:55:50 -0700669 int16_t unvoiced_gain =
670 static_cast<int16_t>(WebRtcSpl_SqrtFloor(unvoiced_energy));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000671 parameters.ar_gain_scale = 13
672 + (unvoiced_scale + 7 - unvoiced_prescale) / 2;
673 parameters.ar_gain = unvoiced_gain;
674
675 // Calculate voice_mix_factor from corr_coefficient.
676 // Let x = corr_coefficient. Then, we compute:
677 // if (x > 0.48)
678 // voice_mix_factor = (-5179 + 19931x - 16422x^2 + 5776x^3) / 4096;
679 // else
680 // voice_mix_factor = 0;
681 if (corr_coefficient > 7875) {
682 int16_t x1, x2, x3;
Peter Kasting36b7cc32015-06-11 19:57:18 -0700683 // |corr_coefficient| is in Q14.
684 x1 = static_cast<int16_t>(corr_coefficient);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000685 x2 = (x1 * x1) >> 14; // Shift 14 to keep result in Q14.
686 x3 = (x1 * x2) >> 14;
687 static const int kCoefficients[4] = { -5179, 19931, -16422, 5776 };
688 int32_t temp_sum = kCoefficients[0] << 14;
689 temp_sum += kCoefficients[1] * x1;
690 temp_sum += kCoefficients[2] * x2;
691 temp_sum += kCoefficients[3] * x3;
Peter Kastingf045e4d2015-06-10 21:15:38 -0700692 parameters.voice_mix_factor =
693 static_cast<int16_t>(std::min(temp_sum / 4096, 16384));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000694 parameters.voice_mix_factor = std::max(parameters.voice_mix_factor,
695 static_cast<int16_t>(0));
696 } else {
697 parameters.voice_mix_factor = 0;
698 }
699
700 // Calculate muting slope. Reuse value from earlier scaling of
701 // |expand_vector0| and |expand_vector1|.
702 int16_t slope = amplitude_ratio;
703 if (slope > 12288) {
704 // slope > 1.5.
705 // Calculate (1 - (1 / slope)) / distortion_lag =
706 // (slope - 1) / (distortion_lag * slope).
707 // |slope| is in Q13, so 1 corresponds to 8192. Shift up to Q25 before
708 // the division.
709 // Shift the denominator from Q13 to Q5 before the division. The result of
710 // the division will then be in Q20.
Peter Kasting36b7cc32015-06-11 19:57:18 -0700711 int temp_ratio = WebRtcSpl_DivW32W16(
Peter Kastingb7e50542015-06-11 12:55:50 -0700712 (slope - 8192) << 12,
713 static_cast<int16_t>((distortion_lag * slope) >> 8));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000714 if (slope > 14746) {
715 // slope > 1.8.
716 // Divide by 2, with proper rounding.
717 parameters.mute_slope = (temp_ratio + 1) / 2;
718 } else {
719 // Divide by 8, with proper rounding.
720 parameters.mute_slope = (temp_ratio + 4) / 8;
721 }
722 parameters.onset = true;
723 } else {
724 // Calculate (1 - slope) / distortion_lag.
725 // Shift |slope| by 7 to Q20 before the division. The result is in Q20.
Peter Kastingb7e50542015-06-11 12:55:50 -0700726 parameters.mute_slope = WebRtcSpl_DivW32W16(
727 (8192 - slope) << 7, static_cast<int16_t>(distortion_lag));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000728 if (parameters.voice_mix_factor <= 13107) {
729 // Make sure the mute factor decreases from 1.0 to 0.9 in no more than
730 // 6.25 ms.
731 // mute_slope >= 0.005 / fs_mult in Q20.
Peter Kasting36b7cc32015-06-11 19:57:18 -0700732 parameters.mute_slope = std::max(5243 / fs_mult, parameters.mute_slope);
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000733 } else if (slope > 8028) {
734 parameters.mute_slope = 0;
735 }
736 parameters.onset = false;
737 }
738 }
739}
740
Karl Wiberg7f6c4d42015-04-09 15:44:22 +0200741Expand::ChannelParameters::ChannelParameters()
742 : mute_factor(16384),
743 ar_gain(0),
744 ar_gain_scale(0),
745 voice_mix_factor(0),
746 current_voice_mix_factor(0),
747 onset(false),
748 mute_slope(0) {
749 memset(ar_filter, 0, sizeof(ar_filter));
750 memset(ar_filter_state, 0, sizeof(ar_filter_state));
751}
752
Peter Kasting728d9032015-06-11 14:31:38 -0700753void Expand::Correlation(const int16_t* input,
754 size_t input_length,
755 int16_t* output,
Peter Kasting36b7cc32015-06-11 19:57:18 -0700756 int* output_scale) const {
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000757 // Set parameters depending on sample rate.
758 const int16_t* filter_coefficients;
759 int16_t num_coefficients;
760 int16_t downsampling_factor;
761 if (fs_hz_ == 8000) {
762 num_coefficients = 3;
763 downsampling_factor = 2;
764 filter_coefficients = DspHelper::kDownsample8kHzTbl;
765 } else if (fs_hz_ == 16000) {
766 num_coefficients = 5;
767 downsampling_factor = 4;
768 filter_coefficients = DspHelper::kDownsample16kHzTbl;
769 } else if (fs_hz_ == 32000) {
770 num_coefficients = 7;
771 downsampling_factor = 8;
772 filter_coefficients = DspHelper::kDownsample32kHzTbl;
773 } else { // fs_hz_ == 48000.
774 num_coefficients = 7;
775 downsampling_factor = 12;
776 filter_coefficients = DspHelper::kDownsample48kHzTbl;
777 }
778
779 // Correlate from lag 10 to lag 60 in downsampled domain.
780 // (Corresponds to 20-120 for narrow-band, 40-240 for wide-band, and so on.)
781 static const int kCorrelationStartLag = 10;
782 static const int kNumCorrelationLags = 54;
783 static const int kCorrelationLength = 60;
784 // Downsample to 4 kHz sample rate.
785 static const int kDownsampledLength = kCorrelationStartLag
786 + kNumCorrelationLags + kCorrelationLength;
787 int16_t downsampled_input[kDownsampledLength];
788 static const int kFilterDelay = 0;
789 WebRtcSpl_DownsampleFast(
790 input + input_length - kDownsampledLength * downsampling_factor,
791 kDownsampledLength * downsampling_factor, downsampled_input,
792 kDownsampledLength, filter_coefficients, num_coefficients,
793 downsampling_factor, kFilterDelay);
794
795 // Normalize |downsampled_input| to using all 16 bits.
796 int16_t max_value = WebRtcSpl_MaxAbsValueW16(downsampled_input,
797 kDownsampledLength);
798 int16_t norm_shift = 16 - WebRtcSpl_NormW32(max_value);
799 WebRtcSpl_VectorBitShiftW16(downsampled_input, kDownsampledLength,
800 downsampled_input, norm_shift);
801
802 int32_t correlation[kNumCorrelationLags];
803 static const int kCorrelationShift = 6;
804 WebRtcSpl_CrossCorrelation(
805 correlation,
806 &downsampled_input[kDownsampledLength - kCorrelationLength],
807 &downsampled_input[kDownsampledLength - kCorrelationLength
808 - kCorrelationStartLag],
809 kCorrelationLength, kNumCorrelationLags, kCorrelationShift, -1);
810
811 // Normalize and move data from 32-bit to 16-bit vector.
812 int32_t max_correlation = WebRtcSpl_MaxAbsValueW32(correlation,
813 kNumCorrelationLags);
Peter Kastingb7e50542015-06-11 12:55:50 -0700814 int16_t norm_shift2 = static_cast<int16_t>(
815 std::max(18 - WebRtcSpl_NormW32(max_correlation), 0));
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000816 WebRtcSpl_VectorBitShiftW32ToW16(output, kNumCorrelationLags, correlation,
817 norm_shift2);
818 // Total scale factor (right shifts) of correlation value.
819 *output_scale = 2 * norm_shift + kCorrelationShift + norm_shift2;
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000820}
821
822void Expand::UpdateLagIndex() {
823 current_lag_index_ = current_lag_index_ + lag_index_direction_;
824 // Change direction if needed.
825 if (current_lag_index_ <= 0) {
826 lag_index_direction_ = 1;
827 }
828 if (current_lag_index_ >= kNumLags - 1) {
829 lag_index_direction_ = -1;
830 }
831}
832
henrik.lundin@webrtc.orgd9faa462014-01-14 10:18:45 +0000833Expand* ExpandFactory::Create(BackgroundNoise* background_noise,
834 SyncBuffer* sync_buffer,
835 RandomVector* random_vector,
836 int fs,
837 size_t num_channels) const {
838 return new Expand(background_noise, sync_buffer, random_vector, fs,
839 num_channels);
840}
841
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +0000842// TODO(turajs): This can be moved to BackgroundNoise class.
843void Expand::GenerateBackgroundNoise(int16_t* random_vector,
844 size_t channel,
Peter Kasting36b7cc32015-06-11 19:57:18 -0700845 int mute_slope,
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +0000846 bool too_many_expands,
847 size_t num_noise_samples,
848 int16_t* buffer) {
849 static const int kNoiseLpcOrder = BackgroundNoise::kMaxLpcOrder;
850 int16_t scaled_random_vector[kMaxSampleRate / 8000 * 125];
Peter Kasting728d9032015-06-11 14:31:38 -0700851 assert(num_noise_samples <= static_cast<size_t>(kMaxSampleRate / 8000 * 125));
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +0000852 int16_t* noise_samples = &buffer[kNoiseLpcOrder];
853 if (background_noise_->initialized()) {
854 // Use background noise parameters.
855 memcpy(noise_samples - kNoiseLpcOrder,
856 background_noise_->FilterState(channel),
857 sizeof(int16_t) * kNoiseLpcOrder);
858
859 int dc_offset = 0;
860 if (background_noise_->ScaleShift(channel) > 1) {
861 dc_offset = 1 << (background_noise_->ScaleShift(channel) - 1);
862 }
863
864 // Scale random vector to correct energy level.
865 WebRtcSpl_AffineTransformVector(
866 scaled_random_vector, random_vector,
867 background_noise_->Scale(channel), dc_offset,
868 background_noise_->ScaleShift(channel),
869 static_cast<int>(num_noise_samples));
870
871 WebRtcSpl_FilterARFastQ12(scaled_random_vector, noise_samples,
872 background_noise_->Filter(channel),
873 kNoiseLpcOrder + 1,
874 static_cast<int>(num_noise_samples));
875
876 background_noise_->SetFilterState(
877 channel,
878 &(noise_samples[num_noise_samples - kNoiseLpcOrder]),
879 kNoiseLpcOrder);
880
881 // Unmute the background noise.
882 int16_t bgn_mute_factor = background_noise_->MuteFactor(channel);
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000883 NetEq::BackgroundNoiseMode bgn_mode = background_noise_->mode();
884 if (bgn_mode == NetEq::kBgnFade && too_many_expands &&
885 bgn_mute_factor > 0) {
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +0000886 // Fade BGN to zero.
887 // Calculate muting slope, approximately -2^18 / fs_hz.
Peter Kasting36b7cc32015-06-11 19:57:18 -0700888 int mute_slope;
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +0000889 if (fs_hz_ == 8000) {
890 mute_slope = -32;
891 } else if (fs_hz_ == 16000) {
892 mute_slope = -16;
893 } else if (fs_hz_ == 32000) {
894 mute_slope = -8;
895 } else {
896 mute_slope = -5;
897 }
898 // Use UnmuteSignal function with negative slope.
899 // |bgn_mute_factor| is in Q14. |mute_slope| is in Q20.
900 DspHelper::UnmuteSignal(noise_samples,
901 num_noise_samples,
902 &bgn_mute_factor,
903 mute_slope,
904 noise_samples);
905 } else if (bgn_mute_factor < 16384) {
henrik.lundin@webrtc.org023f12f2014-08-13 09:45:40 +0000906 // If mode is kBgnOn, or if kBgnFade has started fading,
907 // use regular |mute_slope|.
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000908 if (!stop_muting_ && bgn_mode != NetEq::kBgnOff &&
909 !(bgn_mode == NetEq::kBgnFade && too_many_expands)) {
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +0000910 DspHelper::UnmuteSignal(noise_samples,
911 static_cast<int>(num_noise_samples),
912 &bgn_mute_factor,
913 mute_slope,
914 noise_samples);
915 } else {
916 // kBgnOn and stop muting, or
917 // kBgnOff (mute factor is always 0), or
918 // kBgnFade has reached 0.
919 WebRtcSpl_AffineTransformVector(noise_samples, noise_samples,
920 bgn_mute_factor, 8192, 14,
921 static_cast<int>(num_noise_samples));
922 }
923 }
924 // Update mute_factor in BackgroundNoise class.
925 background_noise_->SetMuteFactor(channel, bgn_mute_factor);
926 } else {
927 // BGN parameters have not been initialized; use zero noise.
928 memset(noise_samples, 0, sizeof(int16_t) * num_noise_samples);
929 }
930}
931
Peter Kastingb7e50542015-06-11 12:55:50 -0700932void Expand::GenerateRandomVector(int16_t seed_increment,
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +0000933 size_t length,
934 int16_t* random_vector) {
935 // TODO(turajs): According to hlundin The loop should not be needed. Should be
936 // just as good to generate all of the vector in one call.
937 size_t samples_generated = 0;
938 const size_t kMaxRandSamples = RandomVector::kRandomTableSize;
henrik.lundin@webrtc.orgea257842014-08-07 12:27:37 +0000939 while (samples_generated < length) {
turaj@webrtc.org8d1cdaa2014-04-11 18:47:55 +0000940 size_t rand_length = std::min(length - samples_generated, kMaxRandSamples);
941 random_vector_->IncreaseSeedIncrement(seed_increment);
942 random_vector_->Generate(rand_length, &random_vector[samples_generated]);
943 samples_generated += rand_length;
944 }
945}
henrik.lundin@webrtc.orgd9faa462014-01-14 10:18:45 +0000946
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +0000947} // namespace webrtc