blob: 696c5b998b9d7a89387743cc06a98dc66be97c13 [file] [log] [blame]
niklase@google.com470e71d2011-07-07 08:21:25 +00001/*
andrew@webrtc.org63a50982012-05-02 23:56:37 +00002 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
niklase@google.com470e71d2011-07-07 08:21:25 +00003 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
pbos@webrtc.org7fad4b82013-05-28 08:11:59 +000011#include "webrtc/modules/audio_processing/audio_buffer.h"
niklase@google.com470e71d2011-07-07 08:21:25 +000012
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +000013#include "webrtc/common_audio/resampler/push_sinc_resampler.h"
pbos@webrtc.org7fad4b82013-05-28 08:11:59 +000014#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
aluebs@webrtc.org79b9eba2014-11-26 20:21:38 +000015#include "webrtc/modules/audio_processing/channel_buffer.h"
aluebs@webrtc.org87893762014-11-27 23:40:25 +000016#include "webrtc/modules/audio_processing/common.h"
andrew@webrtc.org755b04a2011-11-15 16:57:56 +000017
niklase@google.com470e71d2011-07-07 08:21:25 +000018namespace webrtc {
19namespace {
20
andrew@webrtc.org103657b2014-04-24 18:28:56 +000021bool HasKeyboardChannel(AudioProcessing::ChannelLayout layout) {
22 switch (layout) {
23 case AudioProcessing::kMono:
24 case AudioProcessing::kStereo:
25 return false;
26 case AudioProcessing::kMonoAndKeyboard:
27 case AudioProcessing::kStereoAndKeyboard:
28 return true;
29 }
30 assert(false);
31 return false;
32}
33
34int KeyboardChannelIndex(AudioProcessing::ChannelLayout layout) {
35 switch (layout) {
36 case AudioProcessing::kMono:
37 case AudioProcessing::kStereo:
38 assert(false);
39 return -1;
40 case AudioProcessing::kMonoAndKeyboard:
41 return 1;
42 case AudioProcessing::kStereoAndKeyboard:
43 return 2;
44 }
45 assert(false);
46 return -1;
47}
48
andrew@webrtc.org8328e7c2014-10-31 04:58:14 +000049template <typename T>
50void StereoToMono(const T* left, const T* right, T* out,
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +000051 int samples_per_channel) {
andrew@webrtc.org8328e7c2014-10-31 04:58:14 +000052 for (int i = 0; i < samples_per_channel; ++i)
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +000053 out[i] = (left[i] + right[i]) / 2;
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +000054}
55
niklase@google.com470e71d2011-07-07 08:21:25 +000056} // namespace
57
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +000058AudioBuffer::AudioBuffer(int input_samples_per_channel,
59 int num_input_channels,
60 int process_samples_per_channel,
61 int num_process_channels,
62 int output_samples_per_channel)
63 : input_samples_per_channel_(input_samples_per_channel),
64 num_input_channels_(num_input_channels),
65 proc_samples_per_channel_(process_samples_per_channel),
66 num_proc_channels_(num_process_channels),
67 output_samples_per_channel_(output_samples_per_channel),
68 samples_per_split_channel_(proc_samples_per_channel_),
aluebs@webrtc.org2561d522014-07-17 08:27:39 +000069 mixed_low_pass_valid_(false),
andrew@webrtc.orged083d42011-09-19 15:28:51 +000070 reference_copied_(false),
71 activity_(AudioFrame::kVadUnknown),
andrew@webrtc.org103657b2014-04-24 18:28:56 +000072 keyboard_data_(NULL),
mflodman@webrtc.orgd5da2502014-05-15 11:17:21 +000073 channels_(new IFChannelBuffer(proc_samples_per_channel_,
74 num_proc_channels_)) {
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +000075 assert(input_samples_per_channel_ > 0);
76 assert(proc_samples_per_channel_ > 0);
77 assert(output_samples_per_channel_ > 0);
78 assert(num_input_channels_ > 0 && num_input_channels_ <= 2);
79 assert(num_proc_channels_ <= num_input_channels);
niklase@google.com470e71d2011-07-07 08:21:25 +000080
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +000081 if (num_input_channels_ == 2 && num_proc_channels_ == 1) {
82 input_buffer_.reset(new ChannelBuffer<float>(input_samples_per_channel_,
83 num_proc_channels_));
84 }
85
86 if (input_samples_per_channel_ != proc_samples_per_channel_ ||
87 output_samples_per_channel_ != proc_samples_per_channel_) {
88 // Create an intermediate buffer for resampling.
89 process_buffer_.reset(new ChannelBuffer<float>(proc_samples_per_channel_,
90 num_proc_channels_));
91 }
92
93 if (input_samples_per_channel_ != proc_samples_per_channel_) {
94 input_resamplers_.reserve(num_proc_channels_);
95 for (int i = 0; i < num_proc_channels_; ++i) {
96 input_resamplers_.push_back(
97 new PushSincResampler(input_samples_per_channel_,
98 proc_samples_per_channel_));
99 }
100 }
101
102 if (output_samples_per_channel_ != proc_samples_per_channel_) {
103 output_resamplers_.reserve(num_proc_channels_);
104 for (int i = 0; i < num_proc_channels_; ++i) {
105 output_resamplers_.push_back(
106 new PushSincResampler(proc_samples_per_channel_,
107 output_samples_per_channel_));
108 }
109 }
110
aluebs@webrtc.org087da132014-11-17 23:01:23 +0000111 if (proc_samples_per_channel_ == kSamplesPer32kHzChannel ||
112 proc_samples_per_channel_ == kSamplesPer48kHzChannel) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000113 samples_per_split_channel_ = kSamplesPer16kHzChannel;
aluebs@webrtc.org79b9eba2014-11-26 20:21:38 +0000114 split_channels_.push_back(new IFChannelBuffer(samples_per_split_channel_,
kwiberg@webrtc.org2b6bc8d2014-07-17 09:46:37 +0000115 num_proc_channels_));
aluebs@webrtc.org79b9eba2014-11-26 20:21:38 +0000116 split_channels_.push_back(new IFChannelBuffer(samples_per_split_channel_,
117 num_proc_channels_));
aluebs@webrtc.orgbe05c742014-11-14 22:18:10 +0000118 splitting_filter_.reset(new SplittingFilter(num_proc_channels_));
aluebs@webrtc.org087da132014-11-17 23:01:23 +0000119 if (proc_samples_per_channel_ == kSamplesPer48kHzChannel) {
aluebs@webrtc.org79b9eba2014-11-26 20:21:38 +0000120 split_channels_.push_back(new IFChannelBuffer(samples_per_split_channel_,
121 num_proc_channels_));
aluebs@webrtc.org087da132014-11-17 23:01:23 +0000122 }
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000123 }
124}
125
andrew@webrtc.org103657b2014-04-24 18:28:56 +0000126AudioBuffer::~AudioBuffer() {}
127
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000128void AudioBuffer::CopyFrom(const float* const* data,
129 int samples_per_channel,
130 AudioProcessing::ChannelLayout layout) {
131 assert(samples_per_channel == input_samples_per_channel_);
132 assert(ChannelsFromLayout(layout) == num_input_channels_);
133 InitForNewData();
134
andrew@webrtc.org103657b2014-04-24 18:28:56 +0000135 if (HasKeyboardChannel(layout)) {
136 keyboard_data_ = data[KeyboardChannelIndex(layout)];
137 }
138
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000139 // Downmix.
140 const float* const* data_ptr = data;
141 if (num_input_channels_ == 2 && num_proc_channels_ == 1) {
142 StereoToMono(data[0],
143 data[1],
144 input_buffer_->channel(0),
145 input_samples_per_channel_);
146 data_ptr = input_buffer_->channels();
147 }
148
149 // Resample.
150 if (input_samples_per_channel_ != proc_samples_per_channel_) {
151 for (int i = 0; i < num_proc_channels_; ++i) {
152 input_resamplers_[i]->Resample(data_ptr[i],
153 input_samples_per_channel_,
154 process_buffer_->channel(i),
155 proc_samples_per_channel_);
156 }
157 data_ptr = process_buffer_->channels();
158 }
159
andrew@webrtc.org8328e7c2014-10-31 04:58:14 +0000160 // Convert to the S16 range.
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000161 for (int i = 0; i < num_proc_channels_; ++i) {
andrew@webrtc.org8328e7c2014-10-31 04:58:14 +0000162 FloatToFloatS16(data_ptr[i], proc_samples_per_channel_,
163 channels_->fbuf()->channel(i));
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000164 }
165}
166
167void AudioBuffer::CopyTo(int samples_per_channel,
168 AudioProcessing::ChannelLayout layout,
169 float* const* data) {
170 assert(samples_per_channel == output_samples_per_channel_);
171 assert(ChannelsFromLayout(layout) == num_proc_channels_);
172
andrew@webrtc.org8328e7c2014-10-31 04:58:14 +0000173 // Convert to the float range.
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000174 float* const* data_ptr = data;
175 if (output_samples_per_channel_ != proc_samples_per_channel_) {
176 // Convert to an intermediate buffer for subsequent resampling.
177 data_ptr = process_buffer_->channels();
178 }
179 for (int i = 0; i < num_proc_channels_; ++i) {
andrew@webrtc.org8328e7c2014-10-31 04:58:14 +0000180 FloatS16ToFloat(channels_->fbuf()->channel(i), proc_samples_per_channel_,
181 data_ptr[i]);
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000182 }
183
184 // Resample.
185 if (output_samples_per_channel_ != proc_samples_per_channel_) {
186 for (int i = 0; i < num_proc_channels_; ++i) {
187 output_resamplers_[i]->Resample(data_ptr[i],
188 proc_samples_per_channel_,
189 data[i],
190 output_samples_per_channel_);
191 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000192 }
193}
194
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000195void AudioBuffer::InitForNewData() {
andrew@webrtc.org103657b2014-04-24 18:28:56 +0000196 keyboard_data_ = NULL;
aluebs@webrtc.org2561d522014-07-17 08:27:39 +0000197 mixed_low_pass_valid_ = false;
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000198 reference_copied_ = false;
199 activity_ = AudioFrame::kVadUnknown;
andrew@webrtc.org17e40642014-03-04 20:58:13 +0000200}
201
aluebs@webrtc.orga7384a12014-12-03 01:06:35 +0000202const int16_t* AudioBuffer::data_const(int channel) const {
203 return channels_const()[channel];
niklase@google.com470e71d2011-07-07 08:21:25 +0000204}
205
andrew@webrtc.org65f93382014-04-30 16:44:13 +0000206int16_t* AudioBuffer::data(int channel) {
aluebs@webrtc.orga7384a12014-12-03 01:06:35 +0000207 return channels()[channel];
andrew@webrtc.org65f93382014-04-30 16:44:13 +0000208}
209
aluebs@webrtc.orga7384a12014-12-03 01:06:35 +0000210const int16_t* const* AudioBuffer::channels_const() const {
aluebs@webrtc.orgbe05c742014-11-14 22:18:10 +0000211 return channels_->ibuf_const()->channels();
212}
213
214int16_t* const* AudioBuffer::channels() {
215 mixed_low_pass_valid_ = false;
216 return channels_->ibuf()->channels();
217}
218
aluebs@webrtc.orga7384a12014-12-03 01:06:35 +0000219const int16_t* AudioBuffer::split_data_const(int channel, Band band) const {
220 const int16_t* const* chs = split_channels_const(band);
221 return chs ? chs[channel] : NULL;
222}
223
224int16_t* AudioBuffer::split_data(int channel, Band band) {
225 int16_t* const* chs = split_channels(band);
226 return chs ? chs[channel] : NULL;
227}
228
229const int16_t* const* AudioBuffer::split_channels_const(Band band) const {
230 if (split_channels_.size() > static_cast<size_t>(band)) {
231 return split_channels_[band]->ibuf_const()->channels();
232 } else {
233 return band == kBand0To8kHz ? channels_->ibuf_const()->channels() : NULL;
234 }
235}
236
237int16_t* const* AudioBuffer::split_channels(Band band) {
238 mixed_low_pass_valid_ = false;
239 if (split_channels_.size() > static_cast<size_t>(band)) {
240 return split_channels_[band]->ibuf()->channels();
241 } else {
242 return band == kBand0To8kHz ? channels_->ibuf()->channels() : NULL;
243 }
244}
245
246const float* AudioBuffer::data_const_f(int channel) const {
247 return channels_const_f()[channel];
mflodman@webrtc.orgd5da2502014-05-15 11:17:21 +0000248}
249
kwiberg@webrtc.org38214d52014-07-03 09:47:33 +0000250float* AudioBuffer::data_f(int channel) {
aluebs@webrtc.orga7384a12014-12-03 01:06:35 +0000251 return channels_f()[channel];
kwiberg@webrtc.org38214d52014-07-03 09:47:33 +0000252}
253
aluebs@webrtc.orga7384a12014-12-03 01:06:35 +0000254const float* const* AudioBuffer::channels_const_f() const {
claguna@google.combfacaab2014-09-25 20:52:08 +0000255 return channels_->fbuf_const()->channels();
256}
257
258float* const* AudioBuffer::channels_f() {
259 mixed_low_pass_valid_ = false;
260 return channels_->fbuf()->channels();
261}
262
aluebs@webrtc.orga7384a12014-12-03 01:06:35 +0000263const float* AudioBuffer::split_data_const_f(int channel, Band band) const {
264 const float* const* chs = split_channels_const_f(band);
265 return chs ? chs[channel] : NULL;
niklase@google.com470e71d2011-07-07 08:21:25 +0000266}
267
aluebs@webrtc.orga7384a12014-12-03 01:06:35 +0000268float* AudioBuffer::split_data_f(int channel, Band band) {
269 float* const* chs = split_channels_f(band);
270 return chs ? chs[channel] : NULL;
271}
272
273const float* const* AudioBuffer::split_channels_const_f(Band band) const {
274 if (split_channels_.size() > static_cast<size_t>(band)) {
275 return split_channels_[band]->fbuf_const()->channels();
276 } else {
277 return band == kBand0To8kHz ? channels_->fbuf_const()->channels() : NULL;
278 }
279}
280
281float* const* AudioBuffer::split_channels_f(Band band) {
aluebs@webrtc.org2561d522014-07-17 08:27:39 +0000282 mixed_low_pass_valid_ = false;
aluebs@webrtc.orga7384a12014-12-03 01:06:35 +0000283 if (split_channels_.size() > static_cast<size_t>(band)) {
284 return split_channels_[band]->fbuf()->channels();
285 } else {
286 return band == kBand0To8kHz ? channels_->fbuf()->channels() : NULL;
287 }
aluebs@webrtc.org087da132014-11-17 23:01:23 +0000288}
289
aluebs@webrtc.org2561d522014-07-17 08:27:39 +0000290const int16_t* AudioBuffer::mixed_low_pass_data() {
291 // Currently only mixing stereo to mono is supported.
292 assert(num_proc_channels_ == 1 || num_proc_channels_ == 2);
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000293
aluebs@webrtc.org2561d522014-07-17 08:27:39 +0000294 if (num_proc_channels_ == 1) {
aluebs@webrtc.orga7384a12014-12-03 01:06:35 +0000295 return split_data_const(0, kBand0To8kHz);
aluebs@webrtc.org2561d522014-07-17 08:27:39 +0000296 }
297
298 if (!mixed_low_pass_valid_) {
299 if (!mixed_low_pass_channels_.get()) {
300 mixed_low_pass_channels_.reset(
301 new ChannelBuffer<int16_t>(samples_per_split_channel_, 1));
302 }
aluebs@webrtc.orga7384a12014-12-03 01:06:35 +0000303 StereoToMono(split_data_const(0, kBand0To8kHz),
304 split_data_const(1, kBand0To8kHz),
aluebs@webrtc.org2561d522014-07-17 08:27:39 +0000305 mixed_low_pass_channels_->data(),
306 samples_per_split_channel_);
307 mixed_low_pass_valid_ = true;
308 }
309 return mixed_low_pass_channels_->data();
niklase@google.com470e71d2011-07-07 08:21:25 +0000310}
311
andrew@webrtc.org65f93382014-04-30 16:44:13 +0000312const int16_t* AudioBuffer::low_pass_reference(int channel) const {
niklase@google.com470e71d2011-07-07 08:21:25 +0000313 if (!reference_copied_) {
314 return NULL;
315 }
316
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000317 return low_pass_reference_channels_->channel(channel);
niklase@google.com470e71d2011-07-07 08:21:25 +0000318}
319
andrew@webrtc.org103657b2014-04-24 18:28:56 +0000320const float* AudioBuffer::keyboard_data() const {
321 return keyboard_data_;
322}
323
andrew@webrtc.orged083d42011-09-19 15:28:51 +0000324void AudioBuffer::set_activity(AudioFrame::VADActivity activity) {
325 activity_ = activity;
326}
327
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000328AudioFrame::VADActivity AudioBuffer::activity() const {
andrew@webrtc.orged083d42011-09-19 15:28:51 +0000329 return activity_;
330}
331
332int AudioBuffer::num_channels() const {
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000333 return num_proc_channels_;
niklase@google.com470e71d2011-07-07 08:21:25 +0000334}
335
andrew@webrtc.orged083d42011-09-19 15:28:51 +0000336int AudioBuffer::samples_per_channel() const {
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000337 return proc_samples_per_channel_;
niklase@google.com470e71d2011-07-07 08:21:25 +0000338}
339
andrew@webrtc.orged083d42011-09-19 15:28:51 +0000340int AudioBuffer::samples_per_split_channel() const {
niklase@google.com470e71d2011-07-07 08:21:25 +0000341 return samples_per_split_channel_;
342}
343
andrew@webrtc.org103657b2014-04-24 18:28:56 +0000344int AudioBuffer::samples_per_keyboard_channel() const {
345 // We don't resample the keyboard channel.
346 return input_samples_per_channel_;
347}
348
andrew@webrtc.orged083d42011-09-19 15:28:51 +0000349// TODO(andrew): Do deinterleaving and mixing in one step?
350void AudioBuffer::DeinterleaveFrom(AudioFrame* frame) {
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000351 assert(proc_samples_per_channel_ == input_samples_per_channel_);
andrew@webrtc.org30be8272014-09-24 20:06:23 +0000352 assert(frame->num_channels_ == num_input_channels_);
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000353 assert(frame->samples_per_channel_ == proc_samples_per_channel_);
354 InitForNewData();
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000355 activity_ = frame->vad_activity_;
niklase@google.com470e71d2011-07-07 08:21:25 +0000356
andrew@webrtc.org30be8272014-09-24 20:06:23 +0000357 if (num_input_channels_ == 2 && num_proc_channels_ == 1) {
358 // Downmix directly; no explicit deinterleaving needed.
359 int16_t* downmixed = channels_->ibuf()->channel(0);
360 for (int i = 0; i < input_samples_per_channel_; ++i) {
andrew@webrtc.org8328e7c2014-10-31 04:58:14 +0000361 downmixed[i] = (frame->data_[i * 2] + frame->data_[i * 2 + 1]) / 2;
andrew@webrtc.org30be8272014-09-24 20:06:23 +0000362 }
363 } else {
364 assert(num_proc_channels_ == num_input_channels_);
365 int16_t* interleaved = frame->data_;
366 for (int i = 0; i < num_proc_channels_; ++i) {
367 int16_t* deinterleaved = channels_->ibuf()->channel(i);
368 int interleaved_idx = i;
369 for (int j = 0; j < proc_samples_per_channel_; ++j) {
370 deinterleaved[j] = interleaved[interleaved_idx];
371 interleaved_idx += num_proc_channels_;
372 }
niklase@google.com470e71d2011-07-07 08:21:25 +0000373 }
374 }
375}
376
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000377void AudioBuffer::InterleaveTo(AudioFrame* frame, bool data_changed) const {
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000378 assert(proc_samples_per_channel_ == output_samples_per_channel_);
379 assert(num_proc_channels_ == num_input_channels_);
380 assert(frame->num_channels_ == num_proc_channels_);
381 assert(frame->samples_per_channel_ == proc_samples_per_channel_);
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000382 frame->vad_activity_ = activity_;
niklase@google.com470e71d2011-07-07 08:21:25 +0000383
andrew@webrtc.org755b04a2011-11-15 16:57:56 +0000384 if (!data_changed) {
385 return;
386 }
387
andrew@webrtc.org63a50982012-05-02 23:56:37 +0000388 int16_t* interleaved = frame->data_;
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000389 for (int i = 0; i < num_proc_channels_; i++) {
mflodman@webrtc.orgd5da2502014-05-15 11:17:21 +0000390 int16_t* deinterleaved = channels_->ibuf()->channel(i);
andrew@webrtc.orged083d42011-09-19 15:28:51 +0000391 int interleaved_idx = i;
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000392 for (int j = 0; j < proc_samples_per_channel_; j++) {
niklase@google.com470e71d2011-07-07 08:21:25 +0000393 interleaved[interleaved_idx] = deinterleaved[j];
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000394 interleaved_idx += num_proc_channels_;
niklase@google.com470e71d2011-07-07 08:21:25 +0000395 }
396 }
397}
398
niklase@google.com470e71d2011-07-07 08:21:25 +0000399void AudioBuffer::CopyLowPassToReference() {
400 reference_copied_ = true;
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000401 if (!low_pass_reference_channels_.get()) {
402 low_pass_reference_channels_.reset(
403 new ChannelBuffer<int16_t>(samples_per_split_channel_,
404 num_proc_channels_));
405 }
406 for (int i = 0; i < num_proc_channels_; i++) {
aluebs@webrtc.orga7384a12014-12-03 01:06:35 +0000407 low_pass_reference_channels_->CopyFrom(split_data_const(i, kBand0To8kHz),
408 i);
niklase@google.com470e71d2011-07-07 08:21:25 +0000409 }
410}
andrew@webrtc.orgddbb8a22014-04-22 21:00:04 +0000411
aluebs@webrtc.orgbe05c742014-11-14 22:18:10 +0000412void AudioBuffer::SplitIntoFrequencyBands() {
aluebs@webrtc.org79b9eba2014-11-26 20:21:38 +0000413 splitting_filter_->Analysis(channels_.get(),
414 split_channels_.get());
aluebs@webrtc.orgbe05c742014-11-14 22:18:10 +0000415}
416
417void AudioBuffer::MergeFrequencyBands() {
aluebs@webrtc.org79b9eba2014-11-26 20:21:38 +0000418 splitting_filter_->Synthesis(split_channels_.get(),
419 channels_.get());
aluebs@webrtc.orgbe05c742014-11-14 22:18:10 +0000420}
421
niklase@google.com470e71d2011-07-07 08:21:25 +0000422} // namespace webrtc