blob: 8f164efaa32fdda9ab8ca8cd5c32b16e22b705dd [file] [log] [blame]
niklase@google.com470e71d2011-07-07 08:21:25 +00001/*
andrew@webrtc.org63a50982012-05-02 23:56:37 +00002 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
niklase@google.com470e71d2011-07-07 08:21:25 +00003 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
pbos@webrtc.org7fad4b82013-05-28 08:11:59 +000011#include "webrtc/modules/audio_processing/audio_buffer.h"
niklase@google.com470e71d2011-07-07 08:21:25 +000012
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +000013#include "webrtc/common_audio/resampler/push_sinc_resampler.h"
pbos@webrtc.org7fad4b82013-05-28 08:11:59 +000014#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
aluebs@webrtc.org79b9eba2014-11-26 20:21:38 +000015#include "webrtc/modules/audio_processing/channel_buffer.h"
aluebs@webrtc.org87893762014-11-27 23:40:25 +000016#include "webrtc/modules/audio_processing/common.h"
andrew@webrtc.org755b04a2011-11-15 16:57:56 +000017
niklase@google.com470e71d2011-07-07 08:21:25 +000018namespace webrtc {
19namespace {
20
andrew@webrtc.org103657b2014-04-24 18:28:56 +000021bool HasKeyboardChannel(AudioProcessing::ChannelLayout layout) {
22 switch (layout) {
23 case AudioProcessing::kMono:
24 case AudioProcessing::kStereo:
25 return false;
26 case AudioProcessing::kMonoAndKeyboard:
27 case AudioProcessing::kStereoAndKeyboard:
28 return true;
29 }
30 assert(false);
31 return false;
32}
33
34int KeyboardChannelIndex(AudioProcessing::ChannelLayout layout) {
35 switch (layout) {
36 case AudioProcessing::kMono:
37 case AudioProcessing::kStereo:
38 assert(false);
39 return -1;
40 case AudioProcessing::kMonoAndKeyboard:
41 return 1;
42 case AudioProcessing::kStereoAndKeyboard:
43 return 2;
44 }
45 assert(false);
46 return -1;
47}
48
andrew@webrtc.org8328e7c2014-10-31 04:58:14 +000049template <typename T>
50void StereoToMono(const T* left, const T* right, T* out,
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +000051 int samples_per_channel) {
andrew@webrtc.org8328e7c2014-10-31 04:58:14 +000052 for (int i = 0; i < samples_per_channel; ++i)
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +000053 out[i] = (left[i] + right[i]) / 2;
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +000054}
55
niklase@google.com470e71d2011-07-07 08:21:25 +000056} // namespace
57
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +000058AudioBuffer::AudioBuffer(int input_samples_per_channel,
59 int num_input_channels,
60 int process_samples_per_channel,
61 int num_process_channels,
62 int output_samples_per_channel)
63 : input_samples_per_channel_(input_samples_per_channel),
64 num_input_channels_(num_input_channels),
65 proc_samples_per_channel_(process_samples_per_channel),
66 num_proc_channels_(num_process_channels),
67 output_samples_per_channel_(output_samples_per_channel),
aluebs@webrtc.org27d106b2014-12-11 17:09:21 +000068 num_channels_(num_process_channels),
aluebs@webrtc.orgc5ebbd92014-12-10 19:30:57 +000069 num_bands_(1),
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +000070 samples_per_split_channel_(proc_samples_per_channel_),
aluebs@webrtc.org2561d522014-07-17 08:27:39 +000071 mixed_low_pass_valid_(false),
andrew@webrtc.orged083d42011-09-19 15:28:51 +000072 reference_copied_(false),
73 activity_(AudioFrame::kVadUnknown),
andrew@webrtc.org103657b2014-04-24 18:28:56 +000074 keyboard_data_(NULL),
mflodman@webrtc.orgd5da2502014-05-15 11:17:21 +000075 channels_(new IFChannelBuffer(proc_samples_per_channel_,
76 num_proc_channels_)) {
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +000077 assert(input_samples_per_channel_ > 0);
78 assert(proc_samples_per_channel_ > 0);
79 assert(output_samples_per_channel_ > 0);
80 assert(num_input_channels_ > 0 && num_input_channels_ <= 2);
aluebs@webrtc.org27d106b2014-12-11 17:09:21 +000081 assert(num_proc_channels_ <= num_input_channels_);
niklase@google.com470e71d2011-07-07 08:21:25 +000082
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +000083 if (num_input_channels_ == 2 && num_proc_channels_ == 1) {
84 input_buffer_.reset(new ChannelBuffer<float>(input_samples_per_channel_,
85 num_proc_channels_));
86 }
87
88 if (input_samples_per_channel_ != proc_samples_per_channel_ ||
89 output_samples_per_channel_ != proc_samples_per_channel_) {
90 // Create an intermediate buffer for resampling.
91 process_buffer_.reset(new ChannelBuffer<float>(proc_samples_per_channel_,
92 num_proc_channels_));
93 }
94
95 if (input_samples_per_channel_ != proc_samples_per_channel_) {
96 input_resamplers_.reserve(num_proc_channels_);
97 for (int i = 0; i < num_proc_channels_; ++i) {
98 input_resamplers_.push_back(
99 new PushSincResampler(input_samples_per_channel_,
100 proc_samples_per_channel_));
101 }
102 }
103
104 if (output_samples_per_channel_ != proc_samples_per_channel_) {
105 output_resamplers_.reserve(num_proc_channels_);
106 for (int i = 0; i < num_proc_channels_; ++i) {
107 output_resamplers_.push_back(
108 new PushSincResampler(proc_samples_per_channel_,
109 output_samples_per_channel_));
110 }
111 }
112
aluebs@webrtc.org087da132014-11-17 23:01:23 +0000113 if (proc_samples_per_channel_ == kSamplesPer32kHzChannel ||
114 proc_samples_per_channel_ == kSamplesPer48kHzChannel) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000115 samples_per_split_channel_ = kSamplesPer16kHzChannel;
aluebs@webrtc.orgc5ebbd92014-12-10 19:30:57 +0000116 num_bands_ = proc_samples_per_channel_ / samples_per_split_channel_;
aluebs@webrtc.org79b9eba2014-11-26 20:21:38 +0000117 split_channels_.push_back(new IFChannelBuffer(samples_per_split_channel_,
kwiberg@webrtc.org2b6bc8d2014-07-17 09:46:37 +0000118 num_proc_channels_));
aluebs@webrtc.org79b9eba2014-11-26 20:21:38 +0000119 split_channels_.push_back(new IFChannelBuffer(samples_per_split_channel_,
120 num_proc_channels_));
aluebs@webrtc.orgbe05c742014-11-14 22:18:10 +0000121 splitting_filter_.reset(new SplittingFilter(num_proc_channels_));
aluebs@webrtc.org087da132014-11-17 23:01:23 +0000122 if (proc_samples_per_channel_ == kSamplesPer48kHzChannel) {
aluebs@webrtc.org79b9eba2014-11-26 20:21:38 +0000123 split_channels_.push_back(new IFChannelBuffer(samples_per_split_channel_,
124 num_proc_channels_));
aluebs@webrtc.org087da132014-11-17 23:01:23 +0000125 }
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000126 }
aluebs@webrtc.orgc5ebbd92014-12-10 19:30:57 +0000127 bands_.reset(new int16_t*[num_proc_channels_ * kMaxNumBands]);
128 bands_f_.reset(new float*[num_proc_channels_ * kMaxNumBands]);
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000129}
130
andrew@webrtc.org103657b2014-04-24 18:28:56 +0000131AudioBuffer::~AudioBuffer() {}
132
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000133void AudioBuffer::CopyFrom(const float* const* data,
134 int samples_per_channel,
135 AudioProcessing::ChannelLayout layout) {
136 assert(samples_per_channel == input_samples_per_channel_);
137 assert(ChannelsFromLayout(layout) == num_input_channels_);
138 InitForNewData();
139
andrew@webrtc.org103657b2014-04-24 18:28:56 +0000140 if (HasKeyboardChannel(layout)) {
141 keyboard_data_ = data[KeyboardChannelIndex(layout)];
142 }
143
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000144 // Downmix.
145 const float* const* data_ptr = data;
146 if (num_input_channels_ == 2 && num_proc_channels_ == 1) {
147 StereoToMono(data[0],
148 data[1],
149 input_buffer_->channel(0),
150 input_samples_per_channel_);
151 data_ptr = input_buffer_->channels();
152 }
153
154 // Resample.
155 if (input_samples_per_channel_ != proc_samples_per_channel_) {
156 for (int i = 0; i < num_proc_channels_; ++i) {
157 input_resamplers_[i]->Resample(data_ptr[i],
158 input_samples_per_channel_,
159 process_buffer_->channel(i),
160 proc_samples_per_channel_);
161 }
162 data_ptr = process_buffer_->channels();
163 }
164
andrew@webrtc.org8328e7c2014-10-31 04:58:14 +0000165 // Convert to the S16 range.
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000166 for (int i = 0; i < num_proc_channels_; ++i) {
andrew@webrtc.org8328e7c2014-10-31 04:58:14 +0000167 FloatToFloatS16(data_ptr[i], proc_samples_per_channel_,
168 channels_->fbuf()->channel(i));
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000169 }
170}
171
172void AudioBuffer::CopyTo(int samples_per_channel,
173 AudioProcessing::ChannelLayout layout,
174 float* const* data) {
175 assert(samples_per_channel == output_samples_per_channel_);
aluebs@webrtc.org27d106b2014-12-11 17:09:21 +0000176 assert(ChannelsFromLayout(layout) == num_channels_);
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000177
andrew@webrtc.org8328e7c2014-10-31 04:58:14 +0000178 // Convert to the float range.
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000179 float* const* data_ptr = data;
180 if (output_samples_per_channel_ != proc_samples_per_channel_) {
181 // Convert to an intermediate buffer for subsequent resampling.
182 data_ptr = process_buffer_->channels();
183 }
aluebs@webrtc.org27d106b2014-12-11 17:09:21 +0000184 for (int i = 0; i < num_channels_; ++i) {
andrew@webrtc.org8328e7c2014-10-31 04:58:14 +0000185 FloatS16ToFloat(channels_->fbuf()->channel(i), proc_samples_per_channel_,
186 data_ptr[i]);
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000187 }
188
189 // Resample.
190 if (output_samples_per_channel_ != proc_samples_per_channel_) {
aluebs@webrtc.org27d106b2014-12-11 17:09:21 +0000191 for (int i = 0; i < num_channels_; ++i) {
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000192 output_resamplers_[i]->Resample(data_ptr[i],
193 proc_samples_per_channel_,
194 data[i],
195 output_samples_per_channel_);
196 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000197 }
198}
199
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000200void AudioBuffer::InitForNewData() {
andrew@webrtc.org103657b2014-04-24 18:28:56 +0000201 keyboard_data_ = NULL;
aluebs@webrtc.org2561d522014-07-17 08:27:39 +0000202 mixed_low_pass_valid_ = false;
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000203 reference_copied_ = false;
204 activity_ = AudioFrame::kVadUnknown;
aluebs@webrtc.org27d106b2014-12-11 17:09:21 +0000205 num_channels_ = num_proc_channels_;
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000206}
207
aluebs@webrtc.orga7384a12014-12-03 01:06:35 +0000208const int16_t* AudioBuffer::data_const(int channel) const {
209 return channels_const()[channel];
niklase@google.com470e71d2011-07-07 08:21:25 +0000210}
211
andrew@webrtc.org65f93382014-04-30 16:44:13 +0000212int16_t* AudioBuffer::data(int channel) {
aluebs@webrtc.orga7384a12014-12-03 01:06:35 +0000213 return channels()[channel];
andrew@webrtc.org65f93382014-04-30 16:44:13 +0000214}
215
aluebs@webrtc.orga7384a12014-12-03 01:06:35 +0000216const int16_t* const* AudioBuffer::channels_const() const {
aluebs@webrtc.orgbe05c742014-11-14 22:18:10 +0000217 return channels_->ibuf_const()->channels();
218}
219
220int16_t* const* AudioBuffer::channels() {
221 mixed_low_pass_valid_ = false;
222 return channels_->ibuf()->channels();
223}
224
aluebs@webrtc.orgc5ebbd92014-12-10 19:30:57 +0000225const int16_t* const* AudioBuffer::split_bands_const(int channel) const {
226 // This is necessary to make sure that the int16_t data is up to date in the
227 // IFChannelBuffer.
228 // TODO(aluebs): Having to depend on this to get the updated data is bug
229 // prone. One solution is to have ChannelBuffer track the bands as well.
230 for (int i = 0; i < kMaxNumBands; ++i) {
231 int16_t* const* channels =
232 const_cast<int16_t* const*>(split_channels_const(static_cast<Band>(i)));
233 bands_[kMaxNumBands * channel + i] = channels ? channels[channel] : NULL;
234 }
235 return &bands_[kMaxNumBands * channel];
aluebs@webrtc.orga7384a12014-12-03 01:06:35 +0000236}
237
aluebs@webrtc.orgc5ebbd92014-12-10 19:30:57 +0000238int16_t* const* AudioBuffer::split_bands(int channel) {
239 mixed_low_pass_valid_ = false;
240 // This is necessary to make sure that the int16_t data is up to date and the
241 // float data is marked as invalid in the IFChannelBuffer.
242 for (int i = 0; i < kMaxNumBands; ++i) {
243 int16_t* const* channels = split_channels(static_cast<Band>(i));
244 bands_[kMaxNumBands * channel + i] = channels ? channels[channel] : NULL;
245 }
246 return &bands_[kMaxNumBands * channel];
aluebs@webrtc.orga7384a12014-12-03 01:06:35 +0000247}
248
249const int16_t* const* AudioBuffer::split_channels_const(Band band) const {
250 if (split_channels_.size() > static_cast<size_t>(band)) {
251 return split_channels_[band]->ibuf_const()->channels();
252 } else {
253 return band == kBand0To8kHz ? channels_->ibuf_const()->channels() : NULL;
254 }
255}
256
257int16_t* const* AudioBuffer::split_channels(Band band) {
258 mixed_low_pass_valid_ = false;
259 if (split_channels_.size() > static_cast<size_t>(band)) {
260 return split_channels_[band]->ibuf()->channels();
261 } else {
262 return band == kBand0To8kHz ? channels_->ibuf()->channels() : NULL;
263 }
264}
265
266const float* AudioBuffer::data_const_f(int channel) const {
267 return channels_const_f()[channel];
mflodman@webrtc.orgd5da2502014-05-15 11:17:21 +0000268}
269
kwiberg@webrtc.org38214d52014-07-03 09:47:33 +0000270float* AudioBuffer::data_f(int channel) {
aluebs@webrtc.orga7384a12014-12-03 01:06:35 +0000271 return channels_f()[channel];
kwiberg@webrtc.org38214d52014-07-03 09:47:33 +0000272}
273
aluebs@webrtc.orga7384a12014-12-03 01:06:35 +0000274const float* const* AudioBuffer::channels_const_f() const {
claguna@google.combfacaab2014-09-25 20:52:08 +0000275 return channels_->fbuf_const()->channels();
276}
277
278float* const* AudioBuffer::channels_f() {
279 mixed_low_pass_valid_ = false;
280 return channels_->fbuf()->channels();
281}
282
aluebs@webrtc.orgc5ebbd92014-12-10 19:30:57 +0000283const float* const* AudioBuffer::split_bands_const_f(int channel) const {
284 // This is necessary to make sure that the float data is up to date in the
285 // IFChannelBuffer.
286 for (int i = 0; i < kMaxNumBands; ++i) {
287 float* const* channels =
288 const_cast<float* const*>(split_channels_const_f(static_cast<Band>(i)));
289 bands_f_[kMaxNumBands * channel + i] = channels ? channels[channel] : NULL;
290
291 }
292 return &bands_f_[kMaxNumBands * channel];
niklase@google.com470e71d2011-07-07 08:21:25 +0000293}
294
aluebs@webrtc.orgc5ebbd92014-12-10 19:30:57 +0000295float* const* AudioBuffer::split_bands_f(int channel) {
296 mixed_low_pass_valid_ = false;
297 // This is necessary to make sure that the float data is up to date and the
298 // int16_t data is marked as invalid in the IFChannelBuffer.
299 for (int i = 0; i < kMaxNumBands; ++i) {
300 float* const* channels = split_channels_f(static_cast<Band>(i));
301 bands_f_[kMaxNumBands * channel + i] = channels ? channels[channel] : NULL;
302
303 }
304 return &bands_f_[kMaxNumBands * channel];
aluebs@webrtc.orga7384a12014-12-03 01:06:35 +0000305}
306
307const float* const* AudioBuffer::split_channels_const_f(Band band) const {
308 if (split_channels_.size() > static_cast<size_t>(band)) {
309 return split_channels_[band]->fbuf_const()->channels();
310 } else {
311 return band == kBand0To8kHz ? channels_->fbuf_const()->channels() : NULL;
312 }
313}
314
315float* const* AudioBuffer::split_channels_f(Band band) {
aluebs@webrtc.org2561d522014-07-17 08:27:39 +0000316 mixed_low_pass_valid_ = false;
aluebs@webrtc.orga7384a12014-12-03 01:06:35 +0000317 if (split_channels_.size() > static_cast<size_t>(band)) {
318 return split_channels_[band]->fbuf()->channels();
319 } else {
320 return band == kBand0To8kHz ? channels_->fbuf()->channels() : NULL;
321 }
aluebs@webrtc.org087da132014-11-17 23:01:23 +0000322}
323
aluebs@webrtc.org2561d522014-07-17 08:27:39 +0000324const int16_t* AudioBuffer::mixed_low_pass_data() {
325 // Currently only mixing stereo to mono is supported.
326 assert(num_proc_channels_ == 1 || num_proc_channels_ == 2);
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000327
aluebs@webrtc.org2561d522014-07-17 08:27:39 +0000328 if (num_proc_channels_ == 1) {
aluebs@webrtc.orgc5ebbd92014-12-10 19:30:57 +0000329 return split_bands_const(0)[kBand0To8kHz];
aluebs@webrtc.org2561d522014-07-17 08:27:39 +0000330 }
331
332 if (!mixed_low_pass_valid_) {
333 if (!mixed_low_pass_channels_.get()) {
334 mixed_low_pass_channels_.reset(
335 new ChannelBuffer<int16_t>(samples_per_split_channel_, 1));
336 }
aluebs@webrtc.orgc5ebbd92014-12-10 19:30:57 +0000337 StereoToMono(split_bands_const(0)[kBand0To8kHz],
338 split_bands_const(1)[kBand0To8kHz],
aluebs@webrtc.org2561d522014-07-17 08:27:39 +0000339 mixed_low_pass_channels_->data(),
340 samples_per_split_channel_);
341 mixed_low_pass_valid_ = true;
342 }
343 return mixed_low_pass_channels_->data();
niklase@google.com470e71d2011-07-07 08:21:25 +0000344}
345
andrew@webrtc.org65f93382014-04-30 16:44:13 +0000346const int16_t* AudioBuffer::low_pass_reference(int channel) const {
niklase@google.com470e71d2011-07-07 08:21:25 +0000347 if (!reference_copied_) {
348 return NULL;
349 }
350
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000351 return low_pass_reference_channels_->channel(channel);
niklase@google.com470e71d2011-07-07 08:21:25 +0000352}
353
andrew@webrtc.org103657b2014-04-24 18:28:56 +0000354const float* AudioBuffer::keyboard_data() const {
355 return keyboard_data_;
356}
357
andrew@webrtc.orged083d42011-09-19 15:28:51 +0000358void AudioBuffer::set_activity(AudioFrame::VADActivity activity) {
359 activity_ = activity;
360}
361
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000362AudioFrame::VADActivity AudioBuffer::activity() const {
andrew@webrtc.orged083d42011-09-19 15:28:51 +0000363 return activity_;
364}
365
366int AudioBuffer::num_channels() const {
aluebs@webrtc.org27d106b2014-12-11 17:09:21 +0000367 return num_channels_;
368}
369
370void AudioBuffer::set_num_channels(int num_channels) {
371 num_channels_ = num_channels;
niklase@google.com470e71d2011-07-07 08:21:25 +0000372}
373
andrew@webrtc.orged083d42011-09-19 15:28:51 +0000374int AudioBuffer::samples_per_channel() const {
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000375 return proc_samples_per_channel_;
niklase@google.com470e71d2011-07-07 08:21:25 +0000376}
377
andrew@webrtc.orged083d42011-09-19 15:28:51 +0000378int AudioBuffer::samples_per_split_channel() const {
niklase@google.com470e71d2011-07-07 08:21:25 +0000379 return samples_per_split_channel_;
380}
381
andrew@webrtc.org103657b2014-04-24 18:28:56 +0000382int AudioBuffer::samples_per_keyboard_channel() const {
383 // We don't resample the keyboard channel.
384 return input_samples_per_channel_;
385}
386
aluebs@webrtc.orgc5ebbd92014-12-10 19:30:57 +0000387int AudioBuffer::num_bands() const {
388 return num_bands_;
389}
390
andrew@webrtc.orged083d42011-09-19 15:28:51 +0000391// TODO(andrew): Do deinterleaving and mixing in one step?
392void AudioBuffer::DeinterleaveFrom(AudioFrame* frame) {
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000393 assert(proc_samples_per_channel_ == input_samples_per_channel_);
andrew@webrtc.org30be8272014-09-24 20:06:23 +0000394 assert(frame->num_channels_ == num_input_channels_);
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000395 assert(frame->samples_per_channel_ == proc_samples_per_channel_);
396 InitForNewData();
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000397 activity_ = frame->vad_activity_;
niklase@google.com470e71d2011-07-07 08:21:25 +0000398
andrew@webrtc.org30be8272014-09-24 20:06:23 +0000399 if (num_input_channels_ == 2 && num_proc_channels_ == 1) {
400 // Downmix directly; no explicit deinterleaving needed.
401 int16_t* downmixed = channels_->ibuf()->channel(0);
402 for (int i = 0; i < input_samples_per_channel_; ++i) {
andrew@webrtc.org8328e7c2014-10-31 04:58:14 +0000403 downmixed[i] = (frame->data_[i * 2] + frame->data_[i * 2 + 1]) / 2;
andrew@webrtc.org30be8272014-09-24 20:06:23 +0000404 }
405 } else {
406 assert(num_proc_channels_ == num_input_channels_);
407 int16_t* interleaved = frame->data_;
408 for (int i = 0; i < num_proc_channels_; ++i) {
409 int16_t* deinterleaved = channels_->ibuf()->channel(i);
410 int interleaved_idx = i;
411 for (int j = 0; j < proc_samples_per_channel_; ++j) {
412 deinterleaved[j] = interleaved[interleaved_idx];
413 interleaved_idx += num_proc_channels_;
414 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000415 }
416 }
417}
418
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000419void AudioBuffer::InterleaveTo(AudioFrame* frame, bool data_changed) const {
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000420 assert(proc_samples_per_channel_ == output_samples_per_channel_);
aluebs@webrtc.org27d106b2014-12-11 17:09:21 +0000421 assert(num_channels_ == num_input_channels_);
422 assert(frame->num_channels_ == num_channels_);
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000423 assert(frame->samples_per_channel_ == proc_samples_per_channel_);
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000424 frame->vad_activity_ = activity_;
niklase@google.com470e71d2011-07-07 08:21:25 +0000425
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000426 if (!data_changed) {
427 return;
428 }
429
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000430 int16_t* interleaved = frame->data_;
aluebs@webrtc.org27d106b2014-12-11 17:09:21 +0000431 for (int i = 0; i < num_channels_; i++) {
mflodman@webrtc.orgd5da2502014-05-15 11:17:21 +0000432 int16_t* deinterleaved = channels_->ibuf()->channel(i);
andrew@webrtc.orged083d42011-09-19 15:28:51 +0000433 int interleaved_idx = i;
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000434 for (int j = 0; j < proc_samples_per_channel_; j++) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000435 interleaved[interleaved_idx] = deinterleaved[j];
aluebs@webrtc.org27d106b2014-12-11 17:09:21 +0000436 interleaved_idx += num_channels_;
niklase@google.com470e71d2011-07-07 08:21:25 +0000437 }
438 }
439}
440
niklase@google.com470e71d2011-07-07 08:21:25 +0000441void AudioBuffer::CopyLowPassToReference() {
442 reference_copied_ = true;
aluebs@webrtc.org27d106b2014-12-11 17:09:21 +0000443 if (!low_pass_reference_channels_.get() ||
444 low_pass_reference_channels_->num_channels() != num_channels_) {
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000445 low_pass_reference_channels_.reset(
446 new ChannelBuffer<int16_t>(samples_per_split_channel_,
447 num_proc_channels_));
448 }
449 for (int i = 0; i < num_proc_channels_; i++) {
aluebs@webrtc.orgc5ebbd92014-12-10 19:30:57 +0000450 low_pass_reference_channels_->CopyFrom(split_bands_const(i)[kBand0To8kHz],
aluebs@webrtc.orga7384a12014-12-03 01:06:35 +0000451 i);
niklase@google.com470e71d2011-07-07 08:21:25 +0000452 }
453}
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000454
aluebs@webrtc.orgbe05c742014-11-14 22:18:10 +0000455void AudioBuffer::SplitIntoFrequencyBands() {
aluebs@webrtc.org79b9eba2014-11-26 20:21:38 +0000456 splitting_filter_->Analysis(channels_.get(),
457 split_channels_.get());
aluebs@webrtc.orgbe05c742014-11-14 22:18:10 +0000458}
459
460void AudioBuffer::MergeFrequencyBands() {
aluebs@webrtc.org79b9eba2014-11-26 20:21:38 +0000461 splitting_filter_->Synthesis(split_channels_.get(),
462 channels_.get());
aluebs@webrtc.orgbe05c742014-11-14 22:18:10 +0000463}
464
niklase@google.com470e71d2011-07-07 08:21:25 +0000465} // namespace webrtc