blob: 024b700caca3ac17283b0c2906efaf6cbe1d935e [file] [log] [blame]
niklase@google.com470e71d2011-07-07 08:21:25 +00001/*
andrew@webrtc.org63a50982012-05-02 23:56:37 +00002 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
niklase@google.com470e71d2011-07-07 08:21:25 +00003 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
pbos@webrtc.org7fad4b82013-05-28 08:11:59 +000011#include "webrtc/modules/audio_processing/audio_buffer.h"
niklase@google.com470e71d2011-07-07 08:21:25 +000012
andrew@webrtc.org17e40642014-03-04 20:58:13 +000013#include "webrtc/common_audio/include/audio_util.h"
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +000014#include "webrtc/common_audio/resampler/push_sinc_resampler.h"
pbos@webrtc.org7fad4b82013-05-28 08:11:59 +000015#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
andrew@webrtc.org755b04a2011-11-15 16:57:56 +000016
niklase@google.com470e71d2011-07-07 08:21:25 +000017namespace webrtc {
18namespace {
19
20enum {
21 kSamplesPer8kHzChannel = 80,
22 kSamplesPer16kHzChannel = 160,
23 kSamplesPer32kHzChannel = 320
24};
25
andrew@webrtc.org103657b2014-04-24 18:28:56 +000026bool HasKeyboardChannel(AudioProcessing::ChannelLayout layout) {
27 switch (layout) {
28 case AudioProcessing::kMono:
29 case AudioProcessing::kStereo:
30 return false;
31 case AudioProcessing::kMonoAndKeyboard:
32 case AudioProcessing::kStereoAndKeyboard:
33 return true;
34 }
35 assert(false);
36 return false;
37}
38
39int KeyboardChannelIndex(AudioProcessing::ChannelLayout layout) {
40 switch (layout) {
41 case AudioProcessing::kMono:
42 case AudioProcessing::kStereo:
43 assert(false);
44 return -1;
45 case AudioProcessing::kMonoAndKeyboard:
46 return 1;
47 case AudioProcessing::kStereoAndKeyboard:
48 return 2;
49 }
50 assert(false);
51 return -1;
52}
53
54
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +000055void StereoToMono(const float* left, const float* right, float* out,
56 int samples_per_channel) {
57 for (int i = 0; i < samples_per_channel; ++i) {
58 out[i] = (left[i] + right[i]) / 2;
59 }
niklase@google.com470e71d2011-07-07 08:21:25 +000060}
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +000061
62void StereoToMono(const int16_t* left, const int16_t* right, int16_t* out,
63 int samples_per_channel) {
andrew@webrtc.org103657b2014-04-24 18:28:56 +000064 for (int i = 0; i < samples_per_channel; ++i) {
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +000065 out[i] = (left[i] + right[i]) >> 1;
andrew@webrtc.org103657b2014-04-24 18:28:56 +000066 }
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +000067}
68
niklase@google.com470e71d2011-07-07 08:21:25 +000069} // namespace
70
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +000071class SplitChannelBuffer {
72 public:
73 SplitChannelBuffer(int samples_per_split_channel, int num_channels)
74 : low_(samples_per_split_channel, num_channels),
75 high_(samples_per_split_channel, num_channels) {
niklase@google.com470e71d2011-07-07 08:21:25 +000076 }
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +000077 ~SplitChannelBuffer() {}
niklase@google.com470e71d2011-07-07 08:21:25 +000078
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +000079 int16_t* low_channel(int i) { return low_.channel(i); }
80 int16_t* high_channel(int i) { return high_.channel(i); }
81
82 private:
83 ChannelBuffer<int16_t> low_;
84 ChannelBuffer<int16_t> high_;
niklase@google.com470e71d2011-07-07 08:21:25 +000085};
86
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +000087AudioBuffer::AudioBuffer(int input_samples_per_channel,
88 int num_input_channels,
89 int process_samples_per_channel,
90 int num_process_channels,
91 int output_samples_per_channel)
92 : input_samples_per_channel_(input_samples_per_channel),
93 num_input_channels_(num_input_channels),
94 proc_samples_per_channel_(process_samples_per_channel),
95 num_proc_channels_(num_process_channels),
96 output_samples_per_channel_(output_samples_per_channel),
97 samples_per_split_channel_(proc_samples_per_channel_),
andrew@webrtc.orged083d42011-09-19 15:28:51 +000098 num_mixed_channels_(0),
99 num_mixed_low_pass_channels_(0),
andrew@webrtc.orged083d42011-09-19 15:28:51 +0000100 reference_copied_(false),
101 activity_(AudioFrame::kVadUnknown),
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000102 is_muted_(false),
andrew@webrtc.orged083d42011-09-19 15:28:51 +0000103 data_(NULL),
andrew@webrtc.org103657b2014-04-24 18:28:56 +0000104 keyboard_data_(NULL),
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000105 channels_(new ChannelBuffer<int16_t>(proc_samples_per_channel_,
106 num_proc_channels_)) {
107 assert(input_samples_per_channel_ > 0);
108 assert(proc_samples_per_channel_ > 0);
109 assert(output_samples_per_channel_ > 0);
110 assert(num_input_channels_ > 0 && num_input_channels_ <= 2);
111 assert(num_proc_channels_ <= num_input_channels);
niklase@google.com470e71d2011-07-07 08:21:25 +0000112
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000113 if (num_input_channels_ == 2 && num_proc_channels_ == 1) {
114 input_buffer_.reset(new ChannelBuffer<float>(input_samples_per_channel_,
115 num_proc_channels_));
116 }
117
118 if (input_samples_per_channel_ != proc_samples_per_channel_ ||
119 output_samples_per_channel_ != proc_samples_per_channel_) {
120 // Create an intermediate buffer for resampling.
121 process_buffer_.reset(new ChannelBuffer<float>(proc_samples_per_channel_,
122 num_proc_channels_));
123 }
124
125 if (input_samples_per_channel_ != proc_samples_per_channel_) {
126 input_resamplers_.reserve(num_proc_channels_);
127 for (int i = 0; i < num_proc_channels_; ++i) {
128 input_resamplers_.push_back(
129 new PushSincResampler(input_samples_per_channel_,
130 proc_samples_per_channel_));
131 }
132 }
133
134 if (output_samples_per_channel_ != proc_samples_per_channel_) {
135 output_resamplers_.reserve(num_proc_channels_);
136 for (int i = 0; i < num_proc_channels_; ++i) {
137 output_resamplers_.push_back(
138 new PushSincResampler(proc_samples_per_channel_,
139 output_samples_per_channel_));
140 }
141 }
142
143 if (proc_samples_per_channel_ == kSamplesPer32kHzChannel) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000144 samples_per_split_channel_ = kSamplesPer16kHzChannel;
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000145 split_channels_.reset(new SplitChannelBuffer(samples_per_split_channel_,
146 num_proc_channels_));
147 filter_states_.reset(new SplitFilterStates[num_proc_channels_]);
148 }
149}
150
andrew@webrtc.org103657b2014-04-24 18:28:56 +0000151AudioBuffer::~AudioBuffer() {}
152
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000153void AudioBuffer::CopyFrom(const float* const* data,
154 int samples_per_channel,
155 AudioProcessing::ChannelLayout layout) {
156 assert(samples_per_channel == input_samples_per_channel_);
157 assert(ChannelsFromLayout(layout) == num_input_channels_);
158 InitForNewData();
159
andrew@webrtc.org103657b2014-04-24 18:28:56 +0000160 if (HasKeyboardChannel(layout)) {
161 keyboard_data_ = data[KeyboardChannelIndex(layout)];
162 }
163
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000164 // Downmix.
165 const float* const* data_ptr = data;
166 if (num_input_channels_ == 2 && num_proc_channels_ == 1) {
167 StereoToMono(data[0],
168 data[1],
169 input_buffer_->channel(0),
170 input_samples_per_channel_);
171 data_ptr = input_buffer_->channels();
172 }
173
174 // Resample.
175 if (input_samples_per_channel_ != proc_samples_per_channel_) {
176 for (int i = 0; i < num_proc_channels_; ++i) {
177 input_resamplers_[i]->Resample(data_ptr[i],
178 input_samples_per_channel_,
179 process_buffer_->channel(i),
180 proc_samples_per_channel_);
181 }
182 data_ptr = process_buffer_->channels();
183 }
184
185 // Convert to int16.
186 for (int i = 0; i < num_proc_channels_; ++i) {
187 ScaleAndRoundToInt16(data_ptr[i], proc_samples_per_channel_,
188 channels_->channel(i));
189 }
190}
191
192void AudioBuffer::CopyTo(int samples_per_channel,
193 AudioProcessing::ChannelLayout layout,
194 float* const* data) {
195 assert(samples_per_channel == output_samples_per_channel_);
196 assert(ChannelsFromLayout(layout) == num_proc_channels_);
197
198 // Convert to float.
199 float* const* data_ptr = data;
200 if (output_samples_per_channel_ != proc_samples_per_channel_) {
201 // Convert to an intermediate buffer for subsequent resampling.
202 data_ptr = process_buffer_->channels();
203 }
204 for (int i = 0; i < num_proc_channels_; ++i) {
205 ScaleToFloat(channels_->channel(i), proc_samples_per_channel_, data_ptr[i]);
206 }
207
208 // Resample.
209 if (output_samples_per_channel_ != proc_samples_per_channel_) {
210 for (int i = 0; i < num_proc_channels_; ++i) {
211 output_resamplers_[i]->Resample(data_ptr[i],
212 proc_samples_per_channel_,
213 data[i],
214 output_samples_per_channel_);
215 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000216 }
217}
218
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000219void AudioBuffer::InitForNewData() {
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000220 data_ = NULL;
andrew@webrtc.org103657b2014-04-24 18:28:56 +0000221 keyboard_data_ = NULL;
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000222 num_mixed_channels_ = 0;
223 num_mixed_low_pass_channels_ = 0;
224 reference_copied_ = false;
225 activity_ = AudioFrame::kVadUnknown;
226 is_muted_ = false;
227}
228
andrew@webrtc.org65f93382014-04-30 16:44:13 +0000229const int16_t* AudioBuffer::data(int channel) const {
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000230 assert(channel >= 0 && channel < num_proc_channels_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000231 if (data_ != NULL) {
kwiberg@webrtc.org4cc76362014-05-08 07:10:11 +0000232 assert(channel == 0 && num_proc_channels_ == 1);
niklase@google.com470e71d2011-07-07 08:21:25 +0000233 return data_;
234 }
235
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000236 return channels_->channel(channel);
niklase@google.com470e71d2011-07-07 08:21:25 +0000237}
238
andrew@webrtc.org65f93382014-04-30 16:44:13 +0000239int16_t* AudioBuffer::data(int channel) {
240 const AudioBuffer* t = this;
241 return const_cast<int16_t*>(t->data(channel));
242}
243
244const int16_t* AudioBuffer::low_pass_split_data(int channel) const {
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000245 assert(channel >= 0 && channel < num_proc_channels_);
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000246 if (split_channels_.get() == NULL) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000247 return data(channel);
248 }
249
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000250 return split_channels_->low_channel(channel);
niklase@google.com470e71d2011-07-07 08:21:25 +0000251}
252
andrew@webrtc.org65f93382014-04-30 16:44:13 +0000253int16_t* AudioBuffer::low_pass_split_data(int channel) {
254 const AudioBuffer* t = this;
255 return const_cast<int16_t*>(t->low_pass_split_data(channel));
256}
257
258const int16_t* AudioBuffer::high_pass_split_data(int channel) const {
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000259 assert(channel >= 0 && channel < num_proc_channels_);
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000260 if (split_channels_.get() == NULL) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000261 return NULL;
262 }
263
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000264 return split_channels_->high_channel(channel);
niklase@google.com470e71d2011-07-07 08:21:25 +0000265}
266
andrew@webrtc.org65f93382014-04-30 16:44:13 +0000267int16_t* AudioBuffer::high_pass_split_data(int channel) {
268 const AudioBuffer* t = this;
269 return const_cast<int16_t*>(t->high_pass_split_data(channel));
270}
271
272const int16_t* AudioBuffer::mixed_data(int channel) const {
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000273 assert(channel >= 0 && channel < num_mixed_channels_);
274
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000275 return mixed_channels_->channel(channel);
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000276}
277
andrew@webrtc.org65f93382014-04-30 16:44:13 +0000278const int16_t* AudioBuffer::mixed_low_pass_data(int channel) const {
niklase@google.com470e71d2011-07-07 08:21:25 +0000279 assert(channel >= 0 && channel < num_mixed_low_pass_channels_);
280
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000281 return mixed_low_pass_channels_->channel(channel);
niklase@google.com470e71d2011-07-07 08:21:25 +0000282}
283
andrew@webrtc.org65f93382014-04-30 16:44:13 +0000284const int16_t* AudioBuffer::low_pass_reference(int channel) const {
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000285 assert(channel >= 0 && channel < num_proc_channels_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000286 if (!reference_copied_) {
287 return NULL;
288 }
289
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000290 return low_pass_reference_channels_->channel(channel);
niklase@google.com470e71d2011-07-07 08:21:25 +0000291}
292
andrew@webrtc.org103657b2014-04-24 18:28:56 +0000293const float* AudioBuffer::keyboard_data() const {
294 return keyboard_data_;
295}
296
andrew@webrtc.org65f93382014-04-30 16:44:13 +0000297SplitFilterStates* AudioBuffer::filter_states(int channel) {
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000298 assert(channel >= 0 && channel < num_proc_channels_);
299 return &filter_states_[channel];
niklase@google.com470e71d2011-07-07 08:21:25 +0000300}
301
andrew@webrtc.orged083d42011-09-19 15:28:51 +0000302void AudioBuffer::set_activity(AudioFrame::VADActivity activity) {
303 activity_ = activity;
304}
305
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000306AudioFrame::VADActivity AudioBuffer::activity() const {
andrew@webrtc.orged083d42011-09-19 15:28:51 +0000307 return activity_;
308}
309
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000310bool AudioBuffer::is_muted() const {
311 return is_muted_;
312}
313
andrew@webrtc.orged083d42011-09-19 15:28:51 +0000314int AudioBuffer::num_channels() const {
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000315 return num_proc_channels_;
niklase@google.com470e71d2011-07-07 08:21:25 +0000316}
317
andrew@webrtc.orged083d42011-09-19 15:28:51 +0000318int AudioBuffer::samples_per_channel() const {
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000319 return proc_samples_per_channel_;
niklase@google.com470e71d2011-07-07 08:21:25 +0000320}
321
andrew@webrtc.orged083d42011-09-19 15:28:51 +0000322int AudioBuffer::samples_per_split_channel() const {
niklase@google.com470e71d2011-07-07 08:21:25 +0000323 return samples_per_split_channel_;
324}
325
andrew@webrtc.org103657b2014-04-24 18:28:56 +0000326int AudioBuffer::samples_per_keyboard_channel() const {
327 // We don't resample the keyboard channel.
328 return input_samples_per_channel_;
329}
330
andrew@webrtc.orged083d42011-09-19 15:28:51 +0000331// TODO(andrew): Do deinterleaving and mixing in one step?
332void AudioBuffer::DeinterleaveFrom(AudioFrame* frame) {
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000333 assert(proc_samples_per_channel_ == input_samples_per_channel_);
334 assert(num_proc_channels_ == num_input_channels_);
335 assert(frame->num_channels_ == num_proc_channels_);
336 assert(frame->samples_per_channel_ == proc_samples_per_channel_);
337 InitForNewData();
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000338 activity_ = frame->vad_activity_;
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000339 if (frame->energy_ == 0) {
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000340 is_muted_ = true;
341 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000342
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000343 if (num_proc_channels_ == 1) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000344 // We can get away with a pointer assignment in this case.
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000345 data_ = frame->data_;
niklase@google.com470e71d2011-07-07 08:21:25 +0000346 return;
347 }
348
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000349 int16_t* interleaved = frame->data_;
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000350 for (int i = 0; i < num_proc_channels_; i++) {
351 int16_t* deinterleaved = channels_->channel(i);
andrew@webrtc.orged083d42011-09-19 15:28:51 +0000352 int interleaved_idx = i;
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000353 for (int j = 0; j < proc_samples_per_channel_; j++) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000354 deinterleaved[j] = interleaved[interleaved_idx];
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000355 interleaved_idx += num_proc_channels_;
niklase@google.com470e71d2011-07-07 08:21:25 +0000356 }
357 }
358}
359
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000360void AudioBuffer::InterleaveTo(AudioFrame* frame, bool data_changed) const {
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000361 assert(proc_samples_per_channel_ == output_samples_per_channel_);
362 assert(num_proc_channels_ == num_input_channels_);
363 assert(frame->num_channels_ == num_proc_channels_);
364 assert(frame->samples_per_channel_ == proc_samples_per_channel_);
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000365 frame->vad_activity_ = activity_;
niklase@google.com470e71d2011-07-07 08:21:25 +0000366
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000367 if (!data_changed) {
368 return;
369 }
370
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000371 if (num_proc_channels_ == 1) {
kwiberg@webrtc.org4cc76362014-05-08 07:10:11 +0000372 assert(data_ == frame->data_);
niklase@google.com470e71d2011-07-07 08:21:25 +0000373 return;
374 }
375
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000376 int16_t* interleaved = frame->data_;
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000377 for (int i = 0; i < num_proc_channels_; i++) {
378 int16_t* deinterleaved = channels_->channel(i);
andrew@webrtc.orged083d42011-09-19 15:28:51 +0000379 int interleaved_idx = i;
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000380 for (int j = 0; j < proc_samples_per_channel_; j++) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000381 interleaved[interleaved_idx] = deinterleaved[j];
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000382 interleaved_idx += num_proc_channels_;
niklase@google.com470e71d2011-07-07 08:21:25 +0000383 }
384 }
385}
386
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000387void AudioBuffer::CopyAndMix(int num_mixed_channels) {
388 // We currently only support the stereo to mono case.
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000389 assert(num_proc_channels_ == 2);
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000390 assert(num_mixed_channels == 1);
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000391 if (!mixed_channels_.get()) {
392 mixed_channels_.reset(
393 new ChannelBuffer<int16_t>(proc_samples_per_channel_,
394 num_mixed_channels));
395 }
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000396
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000397 StereoToMono(channels_->channel(0),
398 channels_->channel(1),
399 mixed_channels_->channel(0),
400 proc_samples_per_channel_);
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000401
niklase@google.com470e71d2011-07-07 08:21:25 +0000402 num_mixed_channels_ = num_mixed_channels;
403}
404
andrew@webrtc.orged083d42011-09-19 15:28:51 +0000405void AudioBuffer::CopyAndMixLowPass(int num_mixed_channels) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000406 // We currently only support the stereo to mono case.
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000407 assert(num_proc_channels_ == 2);
niklase@google.com470e71d2011-07-07 08:21:25 +0000408 assert(num_mixed_channels == 1);
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000409 if (!mixed_low_pass_channels_.get()) {
410 mixed_low_pass_channels_.reset(
411 new ChannelBuffer<int16_t>(samples_per_split_channel_,
412 num_mixed_channels));
413 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000414
415 StereoToMono(low_pass_split_data(0),
416 low_pass_split_data(1),
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000417 mixed_low_pass_channels_->channel(0),
niklase@google.com470e71d2011-07-07 08:21:25 +0000418 samples_per_split_channel_);
419
420 num_mixed_low_pass_channels_ = num_mixed_channels;
421}
422
423void AudioBuffer::CopyLowPassToReference() {
424 reference_copied_ = true;
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000425 if (!low_pass_reference_channels_.get()) {
426 low_pass_reference_channels_.reset(
427 new ChannelBuffer<int16_t>(samples_per_split_channel_,
428 num_proc_channels_));
429 }
430 for (int i = 0; i < num_proc_channels_; i++) {
431 low_pass_reference_channels_->CopyFrom(low_pass_split_data(i), i);
niklase@google.com470e71d2011-07-07 08:21:25 +0000432 }
433}
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000434
niklase@google.com470e71d2011-07-07 08:21:25 +0000435} // namespace webrtc