blob: c15fc34ee2899233f64f903747b9ed379fd2b21f [file] [log] [blame]
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +00001/*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11#include "webrtc/modules/audio_coding/neteq4/merge.h"
12
13#include <assert.h>
pbos@webrtc.org12dc1a32013-08-05 16:22:53 +000014#include <string.h> // memmove, memcpy, memset, size_t
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000015
16#include <algorithm> // min, max
henrik.lundin@webrtc.orgd94659d2013-01-29 12:09:21 +000017
18#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
19#include "webrtc/modules/audio_coding/neteq4/audio_multi_vector.h"
20#include "webrtc/modules/audio_coding/neteq4/dsp_helper.h"
21#include "webrtc/modules/audio_coding/neteq4/expand.h"
22#include "webrtc/modules/audio_coding/neteq4/sync_buffer.h"
23
24namespace webrtc {
25
26int Merge::Process(int16_t* input, int input_length,
27 int16_t* external_mute_factor_array,
28 AudioMultiVector<int16_t>* output) {
29 // TODO(hlundin): Change to an enumerator and skip assert.
30 assert(fs_hz_ == 8000 || fs_hz_ == 16000 || fs_hz_ == 32000 ||
31 fs_hz_ == 48000);
32 assert(fs_hz_ <= kMaxSampleRate); // Should not be possible.
33
34 int old_length;
35 int expand_period;
36 // Get expansion data to overlap and mix with.
37 int expanded_length = GetExpandedSignal(&old_length, &expand_period);
38
39 // Transfer input signal to an AudioMultiVector.
40 AudioMultiVector<int16_t> input_vector(num_channels_);
41 input_vector.PushBackInterleaved(input, input_length);
42 size_t input_length_per_channel = input_vector.Size();
43 assert(input_length_per_channel == input_length / num_channels_);
44
45 int16_t best_correlation_index = 0;
46 size_t output_length = 0;
47
48 for (size_t channel = 0; channel < num_channels_; ++channel) {
49 int16_t* input_channel = &input_vector[channel][0];
50 int16_t* expanded_channel = &expanded_[channel][0];
51 int16_t expanded_max, input_max;
52 int16_t new_mute_factor = SignalScaling(input_channel,
53 input_length_per_channel,
54 expanded_channel, &expanded_max,
55 &input_max);
56
57 // Adjust muting factor (product of "main" muting factor and expand muting
58 // factor).
59 int16_t* external_mute_factor = &external_mute_factor_array[channel];
60 *external_mute_factor =
61 (*external_mute_factor * expand_->MuteFactor(channel)) >> 14;
62
63 // Update |external_mute_factor| if it is lower than |new_mute_factor|.
64 if (new_mute_factor > *external_mute_factor) {
65 *external_mute_factor = std::min(new_mute_factor,
66 static_cast<int16_t>(16384));
67 }
68
69 if (channel == 0) {
70 // Downsample, correlate, and find strongest correlation period for the
71 // master (i.e., first) channel only.
72 // Downsample to 4kHz sample rate.
73 Downsample(input_channel, input_length_per_channel, expanded_channel,
74 expanded_length);
75
76 // Calculate the lag of the strongest correlation period.
77 best_correlation_index = CorrelateAndPeakSearch(expanded_max,
78 input_max,
79 old_length,
80 input_length_per_channel,
81 expand_period);
82 }
83
84 static const int kTempDataSize = 3600;
85 int16_t temp_data[kTempDataSize]; // TODO(hlundin) Remove this.
86 int16_t* decoded_output = temp_data + best_correlation_index;
87
88 // Mute the new decoded data if needed (and unmute it linearly).
89 // This is the overlapping part of expanded_signal.
90 int interpolation_length = std::min(
91 kMaxCorrelationLength * fs_mult_,
92 expanded_length - best_correlation_index);
93 interpolation_length = std::min(interpolation_length,
94 static_cast<int>(input_length_per_channel));
95 if (*external_mute_factor < 16384) {
96 // Set a suitable muting slope (Q20). 0.004 for NB, 0.002 for WB,
97 // and so on.
98 int increment = 4194 / fs_mult_;
99 *external_mute_factor = DspHelper::RampSignal(input_channel,
100 interpolation_length,
101 *external_mute_factor,
102 increment);
103 DspHelper::UnmuteSignal(&input_channel[interpolation_length],
104 input_length_per_channel - interpolation_length,
105 external_mute_factor, increment,
106 &decoded_output[interpolation_length]);
107 } else {
108 // No muting needed.
109 memmove(
110 &decoded_output[interpolation_length],
111 &input_channel[interpolation_length],
112 sizeof(int16_t) * (input_length_per_channel - interpolation_length));
113 }
114
115 // Do overlap and mix linearly.
116 int increment = 16384 / (interpolation_length + 1); // In Q14.
117 int16_t mute_factor = 16384 - increment;
118 memmove(temp_data, expanded_channel,
119 sizeof(int16_t) * best_correlation_index);
120 DspHelper::CrossFade(&expanded_channel[best_correlation_index],
121 input_channel, interpolation_length,
122 &mute_factor, increment, decoded_output);
123
124 output_length = best_correlation_index + input_length_per_channel;
125 if (channel == 0) {
126 assert(output->Empty()); // Output should be empty at this point.
127 output->AssertSize(output_length);
128 } else {
129 assert(output->Size() == output_length);
130 }
131 memcpy(&(*output)[channel][0], temp_data,
132 sizeof(temp_data[0]) * output_length);
133 }
134
135 // Copy back the first part of the data to |sync_buffer_| and remove it from
136 // |output|.
137 sync_buffer_->ReplaceAtIndex(*output, old_length, sync_buffer_->next_index());
138 output->PopFront(old_length);
139
140 // Return new added length. |old_length| samples were borrowed from
141 // |sync_buffer_|.
142 return output_length - old_length;
143}
144
145int Merge::GetExpandedSignal(int* old_length, int* expand_period) {
146 // Check how much data that is left since earlier.
147 *old_length = sync_buffer_->FutureLength();
148 // Should never be less than overlap_length.
149 assert(*old_length >= static_cast<int>(expand_->overlap_length()));
150 // Generate data to merge the overlap with using expand.
151 expand_->SetParametersForMergeAfterExpand();
152
153 if (*old_length >= 210 * kMaxSampleRate / 8000) {
154 // TODO(hlundin): Write test case for this.
155 // The number of samples available in the sync buffer is more than what fits
156 // in expanded_signal. Keep the first 210 * kMaxSampleRate / 8000 samples,
157 // but shift them towards the end of the buffer. This is ok, since all of
158 // the buffer will be expand data anyway, so as long as the beginning is
159 // left untouched, we're fine.
160 int16_t length_diff = *old_length - 210 * kMaxSampleRate / 8000;
161 sync_buffer_->InsertZerosAtIndex(length_diff, sync_buffer_->next_index());
162 *old_length = 210 * kMaxSampleRate / 8000;
163 // This is the truncated length.
164 }
165 // This assert should always be true thanks to the if statement above.
166 assert(210 * kMaxSampleRate / 8000 - *old_length >= 0);
167
168 AudioMultiVector<int16_t> expanded_temp(num_channels_);
169 expand_->Process(&expanded_temp);
170 *expand_period = expanded_temp.Size(); // Samples per channel.
171
172 expanded_.Clear();
173 // Copy what is left since earlier into the expanded vector.
174 expanded_.PushBackFromIndex(*sync_buffer_, sync_buffer_->next_index());
175 assert(expanded_.Size() == static_cast<size_t>(*old_length));
176 assert(expanded_temp.Size() > 0);
177 // Do "ugly" copy and paste from the expanded in order to generate more data
178 // to correlate (but not interpolate) with.
179 const int required_length = (120 + 80 + 2) * fs_mult_;
180 if (expanded_.Size() < static_cast<size_t>(required_length)) {
181 while (expanded_.Size() < static_cast<size_t>(required_length)) {
182 // Append one more pitch period each time.
183 expanded_.PushBack(expanded_temp);
184 }
185 // Trim the length to exactly |required_length|.
186 expanded_.PopBack(expanded_.Size() - required_length);
187 }
188 assert(expanded_.Size() >= static_cast<size_t>(required_length));
189 return required_length;
190}
191
192int16_t Merge::SignalScaling(const int16_t* input, int input_length,
193 const int16_t* expanded_signal,
194 int16_t* expanded_max, int16_t* input_max) const {
195 // Adjust muting factor if new vector is more or less of the BGN energy.
196 const int mod_input_length = std::min(64 * fs_mult_, input_length);
197 *expanded_max = WebRtcSpl_MaxAbsValueW16(expanded_signal, mod_input_length);
198 *input_max = WebRtcSpl_MaxAbsValueW16(input, mod_input_length);
199
200 // Calculate energy of expanded signal.
201 // |log_fs_mult| is log2(fs_mult_), but is not exact for 48000 Hz.
202 int log_fs_mult = 30 - WebRtcSpl_NormW32(fs_mult_);
203 int expanded_shift = 6 + log_fs_mult
204 - WebRtcSpl_NormW32(*expanded_max * *expanded_max);
205 expanded_shift = std::max(expanded_shift, 0);
206 int32_t energy_expanded = WebRtcSpl_DotProductWithScale(expanded_signal,
207 expanded_signal,
208 mod_input_length,
209 expanded_shift);
210
211 // Calculate energy of input signal.
212 int input_shift = 6 + log_fs_mult -
213 WebRtcSpl_NormW32(*input_max * *input_max);
214 input_shift = std::max(input_shift, 0);
215 int32_t energy_input = WebRtcSpl_DotProductWithScale(input, input,
216 mod_input_length,
217 input_shift);
218
219 // Align to the same Q-domain.
220 if (input_shift > expanded_shift) {
221 energy_expanded = energy_expanded >> (input_shift - expanded_shift);
222 } else {
223 energy_input = energy_input >> (expanded_shift - input_shift);
224 }
225
226 // Calculate muting factor to use for new frame.
227 int16_t mute_factor;
228 if (energy_input > energy_expanded) {
229 // Normalize |energy_input| to 14 bits.
230 int16_t temp_shift = WebRtcSpl_NormW32(energy_input) - 17;
231 energy_input = WEBRTC_SPL_SHIFT_W32(energy_input, temp_shift);
232 // Put |energy_expanded| in a domain 14 higher, so that
233 // energy_expanded / energy_input is in Q14.
234 energy_expanded = WEBRTC_SPL_SHIFT_W32(energy_expanded, temp_shift + 14);
235 // Calculate sqrt(energy_expanded / energy_input) in Q14.
236 mute_factor = WebRtcSpl_SqrtFloor((energy_expanded / energy_input) << 14);
237 } else {
238 // Set to 1 (in Q14) when |expanded| has higher energy than |input|.
239 mute_factor = 16384;
240 }
241
242 return mute_factor;
243}
244
245// TODO(hlundin): There are some parameter values in this method that seem
246// strange. Compare with Expand::Correlation.
247void Merge::Downsample(const int16_t* input, int input_length,
248 const int16_t* expanded_signal, int expanded_length) {
249 const int16_t* filter_coefficients;
250 int num_coefficients;
251 int decimation_factor = fs_hz_ / 4000;
252 static const int kCompensateDelay = 0;
253 int length_limit = fs_hz_ / 100;
254 if (fs_hz_ == 8000) {
255 filter_coefficients = DspHelper::kDownsample8kHzTbl;
256 num_coefficients = 3;
257 } else if (fs_hz_ == 16000) {
258 filter_coefficients = DspHelper::kDownsample16kHzTbl;
259 num_coefficients = 5;
260 } else if (fs_hz_ == 32000) {
261 filter_coefficients = DspHelper::kDownsample32kHzTbl;
262 num_coefficients = 7;
263 } else { // fs_hz_ == 48000
264 filter_coefficients = DspHelper::kDownsample48kHzTbl;
265 num_coefficients = 7;
266 // TODO(hlundin) Why is |length_limit| not 480 (legacy)?
267 length_limit = 320;
268 }
269 int signal_offset = num_coefficients - 1;
270 WebRtcSpl_DownsampleFast(&expanded_signal[signal_offset],
271 expanded_length - signal_offset,
272 expanded_downsampled_, kExpandDownsampLength,
273 filter_coefficients, num_coefficients,
274 decimation_factor, kCompensateDelay);
275 if (input_length <= length_limit) {
276 // Not quite long enough, so we have to cheat a bit.
277 int16_t temp_len = input_length - signal_offset;
278 // TODO(hlundin): Should |downsamp_temp_len| be corrected for round-off
279 // errors? I.e., (temp_len + decimation_factor - 1) / decimation_factor?
280 int16_t downsamp_temp_len = temp_len / decimation_factor;
281 WebRtcSpl_DownsampleFast(&input[signal_offset], temp_len,
282 input_downsampled_, downsamp_temp_len,
283 filter_coefficients, num_coefficients,
284 decimation_factor, kCompensateDelay);
285 memset(&input_downsampled_[downsamp_temp_len], 0,
286 sizeof(int16_t) * (kInputDownsampLength - downsamp_temp_len));
287 } else {
288 WebRtcSpl_DownsampleFast(&input[signal_offset],
289 input_length - signal_offset, input_downsampled_,
290 kInputDownsampLength, filter_coefficients,
291 num_coefficients, decimation_factor,
292 kCompensateDelay);
293 }
294}
295
296int16_t Merge::CorrelateAndPeakSearch(int16_t expanded_max, int16_t input_max,
297 int start_position, int input_length,
298 int expand_period) const {
299 // Calculate correlation without any normalization.
300 const int max_corr_length = kMaxCorrelationLength;
301 int stop_position_downsamp = std::min(
302 max_corr_length, expand_->max_lag() / (fs_mult_ * 2) + 1);
303 int16_t correlation_shift = 0;
304 if (expanded_max * input_max > 26843546) {
305 correlation_shift = 3;
306 }
307
308 int32_t correlation[kMaxCorrelationLength];
309 WebRtcSpl_CrossCorrelation(correlation, input_downsampled_,
310 expanded_downsampled_, kInputDownsampLength,
311 stop_position_downsamp, correlation_shift, 1);
312
313 // Normalize correlation to 14 bits and copy to a 16-bit array.
314 static const int kPadLength = 4;
315 int16_t correlation16[kPadLength + kMaxCorrelationLength + kPadLength] = {0};
316 int16_t* correlation_ptr = &correlation16[kPadLength];
317 int32_t max_correlation = WebRtcSpl_MaxAbsValueW32(correlation,
318 stop_position_downsamp);
319 int16_t norm_shift = std::max(0, 17 - WebRtcSpl_NormW32(max_correlation));
320 WebRtcSpl_VectorBitShiftW32ToW16(correlation_ptr, stop_position_downsamp,
321 correlation, norm_shift);
322
323 // Calculate allowed starting point for peak finding.
324 // The peak location bestIndex must fulfill two criteria:
325 // (1) w16_bestIndex + input_length <
326 // timestamps_per_call_ + expand_->overlap_length();
327 // (2) w16_bestIndex + input_length < start_position.
328 int start_index = timestamps_per_call_ + expand_->overlap_length();
329 start_index = std::max(start_position, start_index);
330 start_index = std::max(start_index - input_length, 0);
331 // Downscale starting index to 4kHz domain. (fs_mult_ * 2 = fs_hz_ / 4000.)
332 int start_index_downsamp = start_index / (fs_mult_ * 2);
333
334 // Calculate a modified |stop_position_downsamp| to account for the increased
335 // start index |start_index_downsamp| and the effective array length.
336 int16_t modified_stop_pos =
337 std::min(stop_position_downsamp,
338 kMaxCorrelationLength + kPadLength - start_index_downsamp);
339 int best_correlation_index;
340 int16_t best_correlation;
341 static const int kNumCorrelationCandidates = 1;
342 DspHelper::PeakDetection(&correlation_ptr[start_index_downsamp],
343 modified_stop_pos, kNumCorrelationCandidates,
344 fs_mult_, &best_correlation_index,
345 &best_correlation);
346 // Compensate for modified start index.
347 best_correlation_index += start_index;
348
349 // Ensure that underrun does not occur for 10ms case => we have to get at
350 // least 10ms + overlap . (This should never happen thanks to the above
351 // modification of peak-finding starting point.)
352 while ((best_correlation_index + input_length) <
353 static_cast<int>(timestamps_per_call_ + expand_->overlap_length()) ||
354 best_correlation_index + input_length < start_position) {
355 assert(false); // Should never happen.
356 best_correlation_index += expand_period; // Jump one lag ahead.
357 }
358 return best_correlation_index;
359}
360
361} // namespace webrtc