henrik.lundin@webrtc.org | d94659d | 2013-01-29 12:09:21 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style license |
| 5 | * that can be found in the LICENSE file in the root of the source |
| 6 | * tree. An additional intellectual property rights grant can be found |
| 7 | * in the file PATENTS. All contributing project authors may |
| 8 | * be found in the AUTHORS file in the root of the source tree. |
| 9 | */ |
| 10 | |
henrik.lundin@webrtc.org | 9c55f0f | 2014-06-09 08:10:28 +0000 | [diff] [blame] | 11 | #include "webrtc/modules/audio_coding/neteq/merge.h" |
henrik.lundin@webrtc.org | d94659d | 2013-01-29 12:09:21 +0000 | [diff] [blame] | 12 | |
| 13 | #include <assert.h> |
pbos@webrtc.org | 12dc1a3 | 2013-08-05 16:22:53 +0000 | [diff] [blame] | 14 | #include <string.h> // memmove, memcpy, memset, size_t |
henrik.lundin@webrtc.org | d94659d | 2013-01-29 12:09:21 +0000 | [diff] [blame] | 15 | |
| 16 | #include <algorithm> // min, max |
henrik.lundin@webrtc.org | d94659d | 2013-01-29 12:09:21 +0000 | [diff] [blame] | 17 | |
kwiberg@webrtc.org | 00b8f6b | 2015-02-26 14:34:55 +0000 | [diff] [blame] | 18 | #include "webrtc/base/scoped_ptr.h" |
henrik.lundin@webrtc.org | d94659d | 2013-01-29 12:09:21 +0000 | [diff] [blame] | 19 | #include "webrtc/common_audio/signal_processing/include/signal_processing_library.h" |
henrik.lundin@webrtc.org | 9c55f0f | 2014-06-09 08:10:28 +0000 | [diff] [blame] | 20 | #include "webrtc/modules/audio_coding/neteq/audio_multi_vector.h" |
| 21 | #include "webrtc/modules/audio_coding/neteq/dsp_helper.h" |
| 22 | #include "webrtc/modules/audio_coding/neteq/expand.h" |
| 23 | #include "webrtc/modules/audio_coding/neteq/sync_buffer.h" |
henrik.lundin@webrtc.org | d94659d | 2013-01-29 12:09:21 +0000 | [diff] [blame] | 24 | |
| 25 | namespace webrtc { |
| 26 | |
Karl Wiberg | 7f6c4d4 | 2015-04-09 15:44:22 +0200 | [diff] [blame] | 27 | Merge::Merge(int fs_hz, |
| 28 | size_t num_channels, |
| 29 | Expand* expand, |
| 30 | SyncBuffer* sync_buffer) |
| 31 | : fs_hz_(fs_hz), |
| 32 | num_channels_(num_channels), |
| 33 | fs_mult_(fs_hz_ / 8000), |
| 34 | timestamps_per_call_(fs_hz_ / 100), |
| 35 | expand_(expand), |
| 36 | sync_buffer_(sync_buffer), |
| 37 | expanded_(num_channels_) { |
| 38 | assert(num_channels_ > 0); |
| 39 | } |
| 40 | |
turaj@webrtc.org | 362a55e | 2013-09-20 16:25:28 +0000 | [diff] [blame] | 41 | int Merge::Process(int16_t* input, size_t input_length, |
henrik.lundin@webrtc.org | d94659d | 2013-01-29 12:09:21 +0000 | [diff] [blame] | 42 | int16_t* external_mute_factor_array, |
henrik.lundin@webrtc.org | fd11bbf | 2013-09-30 20:38:44 +0000 | [diff] [blame] | 43 | AudioMultiVector* output) { |
henrik.lundin@webrtc.org | d94659d | 2013-01-29 12:09:21 +0000 | [diff] [blame] | 44 | // TODO(hlundin): Change to an enumerator and skip assert. |
| 45 | assert(fs_hz_ == 8000 || fs_hz_ == 16000 || fs_hz_ == 32000 || |
| 46 | fs_hz_ == 48000); |
| 47 | assert(fs_hz_ <= kMaxSampleRate); // Should not be possible. |
| 48 | |
| 49 | int old_length; |
| 50 | int expand_period; |
| 51 | // Get expansion data to overlap and mix with. |
| 52 | int expanded_length = GetExpandedSignal(&old_length, &expand_period); |
| 53 | |
| 54 | // Transfer input signal to an AudioMultiVector. |
henrik.lundin@webrtc.org | fd11bbf | 2013-09-30 20:38:44 +0000 | [diff] [blame] | 55 | AudioMultiVector input_vector(num_channels_); |
henrik.lundin@webrtc.org | d94659d | 2013-01-29 12:09:21 +0000 | [diff] [blame] | 56 | input_vector.PushBackInterleaved(input, input_length); |
| 57 | size_t input_length_per_channel = input_vector.Size(); |
| 58 | assert(input_length_per_channel == input_length / num_channels_); |
| 59 | |
| 60 | int16_t best_correlation_index = 0; |
| 61 | size_t output_length = 0; |
| 62 | |
| 63 | for (size_t channel = 0; channel < num_channels_; ++channel) { |
| 64 | int16_t* input_channel = &input_vector[channel][0]; |
| 65 | int16_t* expanded_channel = &expanded_[channel][0]; |
| 66 | int16_t expanded_max, input_max; |
turaj@webrtc.org | 362a55e | 2013-09-20 16:25:28 +0000 | [diff] [blame] | 67 | int16_t new_mute_factor = SignalScaling( |
| 68 | input_channel, static_cast<int>(input_length_per_channel), |
| 69 | expanded_channel, &expanded_max, &input_max); |
henrik.lundin@webrtc.org | d94659d | 2013-01-29 12:09:21 +0000 | [diff] [blame] | 70 | |
| 71 | // Adjust muting factor (product of "main" muting factor and expand muting |
| 72 | // factor). |
| 73 | int16_t* external_mute_factor = &external_mute_factor_array[channel]; |
| 74 | *external_mute_factor = |
| 75 | (*external_mute_factor * expand_->MuteFactor(channel)) >> 14; |
| 76 | |
| 77 | // Update |external_mute_factor| if it is lower than |new_mute_factor|. |
| 78 | if (new_mute_factor > *external_mute_factor) { |
| 79 | *external_mute_factor = std::min(new_mute_factor, |
| 80 | static_cast<int16_t>(16384)); |
| 81 | } |
| 82 | |
| 83 | if (channel == 0) { |
| 84 | // Downsample, correlate, and find strongest correlation period for the |
| 85 | // master (i.e., first) channel only. |
| 86 | // Downsample to 4kHz sample rate. |
turaj@webrtc.org | 362a55e | 2013-09-20 16:25:28 +0000 | [diff] [blame] | 87 | Downsample(input_channel, static_cast<int>(input_length_per_channel), |
| 88 | expanded_channel, expanded_length); |
henrik.lundin@webrtc.org | d94659d | 2013-01-29 12:09:21 +0000 | [diff] [blame] | 89 | |
| 90 | // Calculate the lag of the strongest correlation period. |
turaj@webrtc.org | 362a55e | 2013-09-20 16:25:28 +0000 | [diff] [blame] | 91 | best_correlation_index = CorrelateAndPeakSearch( |
| 92 | expanded_max, input_max, old_length, |
| 93 | static_cast<int>(input_length_per_channel), expand_period); |
henrik.lundin@webrtc.org | d94659d | 2013-01-29 12:09:21 +0000 | [diff] [blame] | 94 | } |
| 95 | |
| 96 | static const int kTempDataSize = 3600; |
| 97 | int16_t temp_data[kTempDataSize]; // TODO(hlundin) Remove this. |
| 98 | int16_t* decoded_output = temp_data + best_correlation_index; |
| 99 | |
| 100 | // Mute the new decoded data if needed (and unmute it linearly). |
| 101 | // This is the overlapping part of expanded_signal. |
| 102 | int interpolation_length = std::min( |
| 103 | kMaxCorrelationLength * fs_mult_, |
| 104 | expanded_length - best_correlation_index); |
| 105 | interpolation_length = std::min(interpolation_length, |
| 106 | static_cast<int>(input_length_per_channel)); |
| 107 | if (*external_mute_factor < 16384) { |
| 108 | // Set a suitable muting slope (Q20). 0.004 for NB, 0.002 for WB, |
| 109 | // and so on. |
| 110 | int increment = 4194 / fs_mult_; |
Peter Kasting | b7e5054 | 2015-06-11 12:55:50 -0700 | [diff] [blame] | 111 | *external_mute_factor = |
| 112 | static_cast<int16_t>(DspHelper::RampSignal(input_channel, |
| 113 | interpolation_length, |
| 114 | *external_mute_factor, |
| 115 | increment)); |
henrik.lundin@webrtc.org | d94659d | 2013-01-29 12:09:21 +0000 | [diff] [blame] | 116 | DspHelper::UnmuteSignal(&input_channel[interpolation_length], |
| 117 | input_length_per_channel - interpolation_length, |
| 118 | external_mute_factor, increment, |
| 119 | &decoded_output[interpolation_length]); |
| 120 | } else { |
| 121 | // No muting needed. |
| 122 | memmove( |
| 123 | &decoded_output[interpolation_length], |
| 124 | &input_channel[interpolation_length], |
| 125 | sizeof(int16_t) * (input_length_per_channel - interpolation_length)); |
| 126 | } |
| 127 | |
| 128 | // Do overlap and mix linearly. |
Peter Kasting | b7e5054 | 2015-06-11 12:55:50 -0700 | [diff] [blame] | 129 | int16_t increment = |
| 130 | static_cast<int16_t>(16384 / (interpolation_length + 1)); // In Q14. |
henrik.lundin@webrtc.org | d94659d | 2013-01-29 12:09:21 +0000 | [diff] [blame] | 131 | int16_t mute_factor = 16384 - increment; |
| 132 | memmove(temp_data, expanded_channel, |
| 133 | sizeof(int16_t) * best_correlation_index); |
| 134 | DspHelper::CrossFade(&expanded_channel[best_correlation_index], |
| 135 | input_channel, interpolation_length, |
| 136 | &mute_factor, increment, decoded_output); |
| 137 | |
| 138 | output_length = best_correlation_index + input_length_per_channel; |
| 139 | if (channel == 0) { |
| 140 | assert(output->Empty()); // Output should be empty at this point. |
| 141 | output->AssertSize(output_length); |
| 142 | } else { |
| 143 | assert(output->Size() == output_length); |
| 144 | } |
| 145 | memcpy(&(*output)[channel][0], temp_data, |
| 146 | sizeof(temp_data[0]) * output_length); |
| 147 | } |
| 148 | |
| 149 | // Copy back the first part of the data to |sync_buffer_| and remove it from |
| 150 | // |output|. |
| 151 | sync_buffer_->ReplaceAtIndex(*output, old_length, sync_buffer_->next_index()); |
| 152 | output->PopFront(old_length); |
| 153 | |
| 154 | // Return new added length. |old_length| samples were borrowed from |
| 155 | // |sync_buffer_|. |
turaj@webrtc.org | 362a55e | 2013-09-20 16:25:28 +0000 | [diff] [blame] | 156 | return static_cast<int>(output_length) - old_length; |
henrik.lundin@webrtc.org | d94659d | 2013-01-29 12:09:21 +0000 | [diff] [blame] | 157 | } |
| 158 | |
| 159 | int Merge::GetExpandedSignal(int* old_length, int* expand_period) { |
| 160 | // Check how much data that is left since earlier. |
turaj@webrtc.org | 362a55e | 2013-09-20 16:25:28 +0000 | [diff] [blame] | 161 | *old_length = static_cast<int>(sync_buffer_->FutureLength()); |
henrik.lundin@webrtc.org | d94659d | 2013-01-29 12:09:21 +0000 | [diff] [blame] | 162 | // Should never be less than overlap_length. |
| 163 | assert(*old_length >= static_cast<int>(expand_->overlap_length())); |
| 164 | // Generate data to merge the overlap with using expand. |
| 165 | expand_->SetParametersForMergeAfterExpand(); |
| 166 | |
| 167 | if (*old_length >= 210 * kMaxSampleRate / 8000) { |
| 168 | // TODO(hlundin): Write test case for this. |
| 169 | // The number of samples available in the sync buffer is more than what fits |
| 170 | // in expanded_signal. Keep the first 210 * kMaxSampleRate / 8000 samples, |
| 171 | // but shift them towards the end of the buffer. This is ok, since all of |
| 172 | // the buffer will be expand data anyway, so as long as the beginning is |
| 173 | // left untouched, we're fine. |
| 174 | int16_t length_diff = *old_length - 210 * kMaxSampleRate / 8000; |
| 175 | sync_buffer_->InsertZerosAtIndex(length_diff, sync_buffer_->next_index()); |
| 176 | *old_length = 210 * kMaxSampleRate / 8000; |
| 177 | // This is the truncated length. |
| 178 | } |
| 179 | // This assert should always be true thanks to the if statement above. |
Peter Kasting | f045e4d | 2015-06-10 21:15:38 -0700 | [diff] [blame] | 180 | assert(210 * kMaxSampleRate / 8000 >= *old_length); |
henrik.lundin@webrtc.org | d94659d | 2013-01-29 12:09:21 +0000 | [diff] [blame] | 181 | |
henrik.lundin@webrtc.org | fd11bbf | 2013-09-30 20:38:44 +0000 | [diff] [blame] | 182 | AudioMultiVector expanded_temp(num_channels_); |
henrik.lundin@webrtc.org | d94659d | 2013-01-29 12:09:21 +0000 | [diff] [blame] | 183 | expand_->Process(&expanded_temp); |
turaj@webrtc.org | 362a55e | 2013-09-20 16:25:28 +0000 | [diff] [blame] | 184 | *expand_period = static_cast<int>(expanded_temp.Size()); // Samples per |
| 185 | // channel. |
henrik.lundin@webrtc.org | d94659d | 2013-01-29 12:09:21 +0000 | [diff] [blame] | 186 | |
| 187 | expanded_.Clear(); |
| 188 | // Copy what is left since earlier into the expanded vector. |
| 189 | expanded_.PushBackFromIndex(*sync_buffer_, sync_buffer_->next_index()); |
| 190 | assert(expanded_.Size() == static_cast<size_t>(*old_length)); |
| 191 | assert(expanded_temp.Size() > 0); |
| 192 | // Do "ugly" copy and paste from the expanded in order to generate more data |
| 193 | // to correlate (but not interpolate) with. |
| 194 | const int required_length = (120 + 80 + 2) * fs_mult_; |
| 195 | if (expanded_.Size() < static_cast<size_t>(required_length)) { |
| 196 | while (expanded_.Size() < static_cast<size_t>(required_length)) { |
| 197 | // Append one more pitch period each time. |
| 198 | expanded_.PushBack(expanded_temp); |
| 199 | } |
| 200 | // Trim the length to exactly |required_length|. |
| 201 | expanded_.PopBack(expanded_.Size() - required_length); |
| 202 | } |
| 203 | assert(expanded_.Size() >= static_cast<size_t>(required_length)); |
| 204 | return required_length; |
| 205 | } |
| 206 | |
| 207 | int16_t Merge::SignalScaling(const int16_t* input, int input_length, |
| 208 | const int16_t* expanded_signal, |
| 209 | int16_t* expanded_max, int16_t* input_max) const { |
| 210 | // Adjust muting factor if new vector is more or less of the BGN energy. |
| 211 | const int mod_input_length = std::min(64 * fs_mult_, input_length); |
| 212 | *expanded_max = WebRtcSpl_MaxAbsValueW16(expanded_signal, mod_input_length); |
| 213 | *input_max = WebRtcSpl_MaxAbsValueW16(input, mod_input_length); |
| 214 | |
| 215 | // Calculate energy of expanded signal. |
| 216 | // |log_fs_mult| is log2(fs_mult_), but is not exact for 48000 Hz. |
| 217 | int log_fs_mult = 30 - WebRtcSpl_NormW32(fs_mult_); |
| 218 | int expanded_shift = 6 + log_fs_mult |
| 219 | - WebRtcSpl_NormW32(*expanded_max * *expanded_max); |
| 220 | expanded_shift = std::max(expanded_shift, 0); |
| 221 | int32_t energy_expanded = WebRtcSpl_DotProductWithScale(expanded_signal, |
| 222 | expanded_signal, |
| 223 | mod_input_length, |
| 224 | expanded_shift); |
| 225 | |
| 226 | // Calculate energy of input signal. |
| 227 | int input_shift = 6 + log_fs_mult - |
| 228 | WebRtcSpl_NormW32(*input_max * *input_max); |
| 229 | input_shift = std::max(input_shift, 0); |
| 230 | int32_t energy_input = WebRtcSpl_DotProductWithScale(input, input, |
| 231 | mod_input_length, |
| 232 | input_shift); |
| 233 | |
| 234 | // Align to the same Q-domain. |
| 235 | if (input_shift > expanded_shift) { |
| 236 | energy_expanded = energy_expanded >> (input_shift - expanded_shift); |
| 237 | } else { |
| 238 | energy_input = energy_input >> (expanded_shift - input_shift); |
| 239 | } |
| 240 | |
| 241 | // Calculate muting factor to use for new frame. |
| 242 | int16_t mute_factor; |
| 243 | if (energy_input > energy_expanded) { |
| 244 | // Normalize |energy_input| to 14 bits. |
| 245 | int16_t temp_shift = WebRtcSpl_NormW32(energy_input) - 17; |
| 246 | energy_input = WEBRTC_SPL_SHIFT_W32(energy_input, temp_shift); |
| 247 | // Put |energy_expanded| in a domain 14 higher, so that |
| 248 | // energy_expanded / energy_input is in Q14. |
| 249 | energy_expanded = WEBRTC_SPL_SHIFT_W32(energy_expanded, temp_shift + 14); |
| 250 | // Calculate sqrt(energy_expanded / energy_input) in Q14. |
Peter Kasting | b7e5054 | 2015-06-11 12:55:50 -0700 | [diff] [blame] | 251 | mute_factor = static_cast<int16_t>( |
| 252 | WebRtcSpl_SqrtFloor((energy_expanded / energy_input) << 14)); |
henrik.lundin@webrtc.org | d94659d | 2013-01-29 12:09:21 +0000 | [diff] [blame] | 253 | } else { |
| 254 | // Set to 1 (in Q14) when |expanded| has higher energy than |input|. |
| 255 | mute_factor = 16384; |
| 256 | } |
| 257 | |
| 258 | return mute_factor; |
| 259 | } |
| 260 | |
| 261 | // TODO(hlundin): There are some parameter values in this method that seem |
| 262 | // strange. Compare with Expand::Correlation. |
| 263 | void Merge::Downsample(const int16_t* input, int input_length, |
| 264 | const int16_t* expanded_signal, int expanded_length) { |
| 265 | const int16_t* filter_coefficients; |
| 266 | int num_coefficients; |
| 267 | int decimation_factor = fs_hz_ / 4000; |
| 268 | static const int kCompensateDelay = 0; |
henrik.lundin@webrtc.org | 367000f | 2014-03-14 12:28:39 +0000 | [diff] [blame] | 269 | int length_limit = fs_hz_ / 100; // 10 ms in samples. |
henrik.lundin@webrtc.org | d94659d | 2013-01-29 12:09:21 +0000 | [diff] [blame] | 270 | if (fs_hz_ == 8000) { |
| 271 | filter_coefficients = DspHelper::kDownsample8kHzTbl; |
| 272 | num_coefficients = 3; |
| 273 | } else if (fs_hz_ == 16000) { |
| 274 | filter_coefficients = DspHelper::kDownsample16kHzTbl; |
| 275 | num_coefficients = 5; |
| 276 | } else if (fs_hz_ == 32000) { |
| 277 | filter_coefficients = DspHelper::kDownsample32kHzTbl; |
| 278 | num_coefficients = 7; |
| 279 | } else { // fs_hz_ == 48000 |
| 280 | filter_coefficients = DspHelper::kDownsample48kHzTbl; |
| 281 | num_coefficients = 7; |
henrik.lundin@webrtc.org | d94659d | 2013-01-29 12:09:21 +0000 | [diff] [blame] | 282 | } |
| 283 | int signal_offset = num_coefficients - 1; |
| 284 | WebRtcSpl_DownsampleFast(&expanded_signal[signal_offset], |
| 285 | expanded_length - signal_offset, |
| 286 | expanded_downsampled_, kExpandDownsampLength, |
| 287 | filter_coefficients, num_coefficients, |
| 288 | decimation_factor, kCompensateDelay); |
| 289 | if (input_length <= length_limit) { |
| 290 | // Not quite long enough, so we have to cheat a bit. |
| 291 | int16_t temp_len = input_length - signal_offset; |
| 292 | // TODO(hlundin): Should |downsamp_temp_len| be corrected for round-off |
| 293 | // errors? I.e., (temp_len + decimation_factor - 1) / decimation_factor? |
| 294 | int16_t downsamp_temp_len = temp_len / decimation_factor; |
| 295 | WebRtcSpl_DownsampleFast(&input[signal_offset], temp_len, |
| 296 | input_downsampled_, downsamp_temp_len, |
| 297 | filter_coefficients, num_coefficients, |
| 298 | decimation_factor, kCompensateDelay); |
| 299 | memset(&input_downsampled_[downsamp_temp_len], 0, |
| 300 | sizeof(int16_t) * (kInputDownsampLength - downsamp_temp_len)); |
| 301 | } else { |
| 302 | WebRtcSpl_DownsampleFast(&input[signal_offset], |
| 303 | input_length - signal_offset, input_downsampled_, |
| 304 | kInputDownsampLength, filter_coefficients, |
| 305 | num_coefficients, decimation_factor, |
| 306 | kCompensateDelay); |
| 307 | } |
| 308 | } |
| 309 | |
| 310 | int16_t Merge::CorrelateAndPeakSearch(int16_t expanded_max, int16_t input_max, |
| 311 | int start_position, int input_length, |
| 312 | int expand_period) const { |
| 313 | // Calculate correlation without any normalization. |
| 314 | const int max_corr_length = kMaxCorrelationLength; |
Peter Kasting | 728d903 | 2015-06-11 14:31:38 -0700 | [diff] [blame^] | 315 | int stop_position_downsamp = |
| 316 | std::min(max_corr_length, expand_->max_lag() / (fs_mult_ * 2) + 1); |
Peter Kasting | cb18097 | 2015-06-11 12:42:27 -0700 | [diff] [blame] | 317 | int16_t correlation_shift = 0; |
henrik.lundin@webrtc.org | d94659d | 2013-01-29 12:09:21 +0000 | [diff] [blame] | 318 | if (expanded_max * input_max > 26843546) { |
| 319 | correlation_shift = 3; |
| 320 | } |
| 321 | |
| 322 | int32_t correlation[kMaxCorrelationLength]; |
| 323 | WebRtcSpl_CrossCorrelation(correlation, input_downsampled_, |
| 324 | expanded_downsampled_, kInputDownsampLength, |
| 325 | stop_position_downsamp, correlation_shift, 1); |
| 326 | |
| 327 | // Normalize correlation to 14 bits and copy to a 16-bit array. |
turaj@webrtc.org | 8d1cdaa | 2014-04-11 18:47:55 +0000 | [diff] [blame] | 328 | const int pad_length = static_cast<int>(expand_->overlap_length() - 1); |
| 329 | const int correlation_buffer_size = 2 * pad_length + kMaxCorrelationLength; |
kwiberg@webrtc.org | 00b8f6b | 2015-02-26 14:34:55 +0000 | [diff] [blame] | 330 | rtc::scoped_ptr<int16_t[]> correlation16( |
| 331 | new int16_t[correlation_buffer_size]); |
turaj@webrtc.org | 8d1cdaa | 2014-04-11 18:47:55 +0000 | [diff] [blame] | 332 | memset(correlation16.get(), 0, correlation_buffer_size * sizeof(int16_t)); |
| 333 | int16_t* correlation_ptr = &correlation16[pad_length]; |
henrik.lundin@webrtc.org | d94659d | 2013-01-29 12:09:21 +0000 | [diff] [blame] | 334 | int32_t max_correlation = WebRtcSpl_MaxAbsValueW32(correlation, |
| 335 | stop_position_downsamp); |
Peter Kasting | cb18097 | 2015-06-11 12:42:27 -0700 | [diff] [blame] | 336 | int16_t norm_shift = std::max(0, 17 - WebRtcSpl_NormW32(max_correlation)); |
henrik.lundin@webrtc.org | d94659d | 2013-01-29 12:09:21 +0000 | [diff] [blame] | 337 | WebRtcSpl_VectorBitShiftW32ToW16(correlation_ptr, stop_position_downsamp, |
| 338 | correlation, norm_shift); |
| 339 | |
| 340 | // Calculate allowed starting point for peak finding. |
| 341 | // The peak location bestIndex must fulfill two criteria: |
| 342 | // (1) w16_bestIndex + input_length < |
| 343 | // timestamps_per_call_ + expand_->overlap_length(); |
| 344 | // (2) w16_bestIndex + input_length < start_position. |
turaj@webrtc.org | 362a55e | 2013-09-20 16:25:28 +0000 | [diff] [blame] | 345 | int start_index = timestamps_per_call_ + |
| 346 | static_cast<int>(expand_->overlap_length()); |
henrik.lundin@webrtc.org | d94659d | 2013-01-29 12:09:21 +0000 | [diff] [blame] | 347 | start_index = std::max(start_position, start_index); |
Peter Kasting | f045e4d | 2015-06-10 21:15:38 -0700 | [diff] [blame] | 348 | start_index = (input_length > start_index) ? 0 : (start_index - input_length); |
henrik.lundin@webrtc.org | d94659d | 2013-01-29 12:09:21 +0000 | [diff] [blame] | 349 | // Downscale starting index to 4kHz domain. (fs_mult_ * 2 = fs_hz_ / 4000.) |
| 350 | int start_index_downsamp = start_index / (fs_mult_ * 2); |
| 351 | |
| 352 | // Calculate a modified |stop_position_downsamp| to account for the increased |
| 353 | // start index |start_index_downsamp| and the effective array length. |
turaj@webrtc.org | 362a55e | 2013-09-20 16:25:28 +0000 | [diff] [blame] | 354 | int modified_stop_pos = |
henrik.lundin@webrtc.org | d94659d | 2013-01-29 12:09:21 +0000 | [diff] [blame] | 355 | std::min(stop_position_downsamp, |
turaj@webrtc.org | 8d1cdaa | 2014-04-11 18:47:55 +0000 | [diff] [blame] | 356 | kMaxCorrelationLength + pad_length - start_index_downsamp); |
henrik.lundin@webrtc.org | d94659d | 2013-01-29 12:09:21 +0000 | [diff] [blame] | 357 | int best_correlation_index; |
| 358 | int16_t best_correlation; |
| 359 | static const int kNumCorrelationCandidates = 1; |
| 360 | DspHelper::PeakDetection(&correlation_ptr[start_index_downsamp], |
| 361 | modified_stop_pos, kNumCorrelationCandidates, |
| 362 | fs_mult_, &best_correlation_index, |
| 363 | &best_correlation); |
| 364 | // Compensate for modified start index. |
| 365 | best_correlation_index += start_index; |
| 366 | |
| 367 | // Ensure that underrun does not occur for 10ms case => we have to get at |
| 368 | // least 10ms + overlap . (This should never happen thanks to the above |
| 369 | // modification of peak-finding starting point.) |
Peter Kasting | 728d903 | 2015-06-11 14:31:38 -0700 | [diff] [blame^] | 370 | while (((best_correlation_index + input_length) < |
| 371 | static_cast<int>(timestamps_per_call_ + expand_->overlap_length())) || |
| 372 | ((best_correlation_index + input_length) < start_position)) { |
henrik.lundin@webrtc.org | d94659d | 2013-01-29 12:09:21 +0000 | [diff] [blame] | 373 | assert(false); // Should never happen. |
| 374 | best_correlation_index += expand_period; // Jump one lag ahead. |
| 375 | } |
| 376 | return best_correlation_index; |
| 377 | } |
| 378 | |
turaj@webrtc.org | 8d1cdaa | 2014-04-11 18:47:55 +0000 | [diff] [blame] | 379 | int Merge::RequiredFutureSamples() { |
| 380 | return static_cast<int>(fs_hz_ / 100 * num_channels_); // 10 ms. |
| 381 | } |
| 382 | |
| 383 | |
henrik.lundin@webrtc.org | d94659d | 2013-01-29 12:09:21 +0000 | [diff] [blame] | 384 | } // namespace webrtc |