Make ChannelBuffer aware of frequency bands
Now the ChannelBuffer has 2 separate arrays, one for the full-band data and one for the splitted one. The corresponding accessors are added to the ChannelBuffer.
This is done to avoid having to refresh the bands pointers in AudioBuffer. It will also allow us to have a general accessor like data()[band][channel][sample].
All the files using the ChannelBuffer needed to be re-factored.
Tested with modules_unittests, common_audio_unittests, audioproc, audioproc_f, voe_cmd_test.
R=andrew@webrtc.org, kwiberg@webrtc.org
Review URL: https://webrtc-codereview.appspot.com/36999004
Cr-Commit-Position: refs/heads/master@{#8318}
git-svn-id: http://webrtc.googlecode.com/svn/trunk@8318 4adac7df-926f-26a2-2b94-8c16560cd09d
diff --git a/webrtc/modules/audio_processing/test/audio_processing_unittest.cc b/webrtc/modules/audio_processing/test/audio_processing_unittest.cc
index d9ebe8e..0c90758 100644
--- a/webrtc/modules/audio_processing/test/audio_processing_unittest.cc
+++ b/webrtc/modules/audio_processing/test/audio_processing_unittest.cc
@@ -62,15 +62,17 @@
sizeof(*kProcessSampleRates);
void ConvertToFloat(const int16_t* int_data, ChannelBuffer<float>* cb) {
- ChannelBuffer<int16_t> cb_int(cb->samples_per_channel(),
+ ChannelBuffer<int16_t> cb_int(cb->num_frames(),
cb->num_channels());
Deinterleave(int_data,
- cb->samples_per_channel(),
+ cb->num_frames(),
cb->num_channels(),
cb_int.channels());
- S16ToFloat(cb_int.data(),
- cb->samples_per_channel() * cb->num_channels(),
- cb->data());
+ for (int i = 0; i < cb->num_channels(); ++i) {
+ S16ToFloat(cb_int.channels()[i],
+ cb->num_frames(),
+ cb->channels()[i]);
+ }
}
void ConvertToFloat(const AudioFrame& frame, ChannelBuffer<float>* cb) {
@@ -294,7 +296,7 @@
bool ReadChunk(FILE* file, int16_t* int_data, float* float_data,
ChannelBuffer<float>* cb) {
// The files always contain stereo audio.
- size_t frame_size = cb->samples_per_channel() * 2;
+ size_t frame_size = cb->num_frames() * 2;
size_t read_count = fread(int_data, sizeof(int16_t), frame_size, file);
if (read_count != frame_size) {
// Check that the file really ended.
@@ -304,9 +306,9 @@
S16ToFloat(int_data, frame_size, float_data);
if (cb->num_channels() == 1) {
- MixStereoToMono(float_data, cb->data(), cb->samples_per_channel());
+ MixStereoToMono(float_data, cb->channels()[0], cb->num_frames());
} else {
- Deinterleave(float_data, cb->samples_per_channel(), 2,
+ Deinterleave(float_data, cb->num_frames(), 2,
cb->channels());
}
@@ -1250,12 +1252,14 @@
int_data.get(),
float_data.get(),
&src_buf));
- for (int j = 0; j < kNumInputChannels * kSamplesPerChannel; ++j) {
- src_buf.data()[j] *= kScaleFactor;
+ for (int j = 0; j < kNumInputChannels; ++j) {
+ for (int k = 0; k < kSamplesPerChannel; ++k) {
+ src_buf.channels()[j][k] *= kScaleFactor;
+ }
}
EXPECT_EQ(kNoErr,
apm->ProcessStream(src_buf.channels(),
- src_buf.samples_per_channel(),
+ src_buf.num_frames(),
kSampleRateHz,
LayoutFromChannels(src_buf.num_channels()),
kSampleRateHz,
@@ -1273,12 +1277,14 @@
int_data.get(),
float_data.get(),
&src_buf));
- for (int j = 0; j < kNumInputChannels * kSamplesPerChannel; ++j) {
- src_buf.data()[j] *= kScaleFactor;
+ for (int j = 0; j < kNumInputChannels; ++j) {
+ for (int k = 0; k < kSamplesPerChannel; ++k) {
+ src_buf.channels()[j][k] *= kScaleFactor;
+ }
}
EXPECT_EQ(kNoErr,
apm->ProcessStream(src_buf.channels(),
- src_buf.samples_per_channel(),
+ src_buf.num_frames(),
kSampleRateHz,
LayoutFromChannels(src_buf.num_channels()),
kSampleRateHz,
@@ -1648,7 +1654,8 @@
if (msg.channel_size() > 0) {
ASSERT_EQ(revframe_->num_channels_, msg.channel_size());
for (int i = 0; i < msg.channel_size(); ++i) {
- memcpy(revfloat_cb_->channel(i), msg.channel(i).data(),
+ memcpy(revfloat_cb_->channels()[i],
+ msg.channel(i).data(),
msg.channel(i).size());
}
} else {
@@ -1677,7 +1684,8 @@
if (msg.input_channel_size() > 0) {
ASSERT_EQ(frame_->num_channels_, msg.input_channel_size());
for (int i = 0; i < msg.input_channel_size(); ++i) {
- memcpy(float_cb_->channel(i), msg.input_channel(i).data(),
+ memcpy(float_cb_->channels()[i],
+ msg.input_channel(i).data(),
msg.input_channel(i).size());
}
} else {
@@ -1835,7 +1843,6 @@
const int num_output_channels = test->num_output_channels();
const int samples_per_channel = test->sample_rate() *
AudioProcessing::kChunkSizeMs / 1000;
- const int output_length = samples_per_channel * num_output_channels;
Init(test->sample_rate(), test->sample_rate(), test->sample_rate(),
num_input_channels, num_output_channels, num_render_channels, true);
@@ -1876,11 +1883,13 @@
test->sample_rate(),
LayoutFromChannels(num_output_channels),
float_cb_->channels()));
-
- FloatToS16(float_cb_->data(), output_length, output_cb.data());
for (int j = 0; j < num_output_channels; ++j) {
+ FloatToS16(float_cb_->channels()[j],
+ samples_per_channel,
+ output_cb.channels()[j]);
float variance = 0;
- float snr = ComputeSNR(output_int16.channel(j), output_cb.channel(j),
+ float snr = ComputeSNR(output_int16.channels()[j],
+ output_cb.channels()[j],
samples_per_channel, &variance);
#if defined(WEBRTC_AUDIOPROC_FIXED_PROFILE)
// There are a few chunks in the fixed-point profile that give low SNR.
@@ -2171,7 +2180,7 @@
for (int j = 0; j < 10; ++j) {
EXPECT_NOERR(ap->ProcessStream(
in_cb.channels(),
- in_cb.samples_per_channel(),
+ in_cb.num_frames(),
in_rate,
cf[i].in_layout,
out_rate,
@@ -2313,9 +2322,9 @@
// Temporary buffers.
const int max_length =
- 2 * std::max(out_cb.samples_per_channel(),
- std::max(fwd_cb.samples_per_channel(),
- rev_cb.samples_per_channel()));
+ 2 * std::max(out_cb.num_frames(),
+ std::max(fwd_cb.num_frames(),
+ rev_cb.num_frames()));
scoped_ptr<float[]> float_data(new float[max_length]);
scoped_ptr<int16_t[]> int_data(new int16_t[max_length]);
@@ -2324,7 +2333,7 @@
ReadChunk(near_file, int_data.get(), float_data.get(), &fwd_cb)) {
EXPECT_NOERR(ap->AnalyzeReverseStream(
rev_cb.channels(),
- rev_cb.samples_per_channel(),
+ rev_cb.num_frames(),
reverse_rate,
LayoutFromChannels(num_reverse_channels)));
@@ -2334,7 +2343,7 @@
EXPECT_NOERR(ap->ProcessStream(
fwd_cb.channels(),
- fwd_cb.samples_per_channel(),
+ fwd_cb.num_frames(),
input_rate,
LayoutFromChannels(num_input_channels),
output_rate,
@@ -2342,13 +2351,14 @@
out_cb.channels()));
Interleave(out_cb.channels(),
- out_cb.samples_per_channel(),
+ out_cb.num_frames(),
out_cb.num_channels(),
float_data.get());
// Dump output to file.
- ASSERT_EQ(static_cast<size_t>(out_cb.length()),
+ int out_length = out_cb.num_channels() * out_cb.num_frames();
+ ASSERT_EQ(static_cast<size_t>(out_length),
fwrite(float_data.get(), sizeof(float_data[0]),
- out_cb.length(), out_file));
+ out_length, out_file));
analog_level = ap->gain_control()->stream_analog_level();
}
diff --git a/webrtc/modules/audio_processing/test/audioproc_float.cc b/webrtc/modules/audio_processing/test/audioproc_float.cc
index bbac9f1..e1418e9 100644
--- a/webrtc/modules/audio_processing/test/audioproc_float.cc
+++ b/webrtc/modules/audio_processing/test/audioproc_float.cc
@@ -177,27 +177,30 @@
ChannelBuffer<float> o_buf(o_file.sample_rate() / kChunksPerSecond,
o_file.num_channels());
- const size_t c_length = static_cast<size_t>(c_buf.length());
+ const size_t c_length =
+ static_cast<size_t>(c_buf.num_channels() * c_buf.num_frames());
+ const size_t o_length =
+ static_cast<size_t>(o_buf.num_channels() * o_buf.num_frames());
scoped_ptr<float[]> c_interleaved(new float[c_length]);
- scoped_ptr<float[]> o_interleaved(new float[o_buf.length()]);
+ scoped_ptr<float[]> o_interleaved(new float[o_length]);
while (c_file.ReadSamples(c_length, c_interleaved.get()) == c_length) {
FloatS16ToFloat(c_interleaved.get(), c_length, c_interleaved.get());
- Deinterleave(c_interleaved.get(), c_buf.samples_per_channel(),
+ Deinterleave(c_interleaved.get(), c_buf.num_frames(),
c_buf.num_channels(), c_buf.channels());
CHECK_EQ(kNoErr,
ap->ProcessStream(c_buf.channels(),
- c_buf.samples_per_channel(),
+ c_buf.num_frames(),
c_file.sample_rate(),
LayoutFromChannels(c_buf.num_channels()),
o_file.sample_rate(),
LayoutFromChannels(o_buf.num_channels()),
o_buf.channels()));
- Interleave(o_buf.channels(), o_buf.samples_per_channel(),
+ Interleave(o_buf.channels(), o_buf.num_frames(),
o_buf.num_channels(), o_interleaved.get());
- FloatToFloatS16(o_interleaved.get(), o_buf.length(), o_interleaved.get());
- o_file.WriteSamples(o_interleaved.get(), o_buf.length());
+ FloatToFloatS16(o_interleaved.get(), o_length, o_interleaved.get());
+ o_file.WriteSamples(o_interleaved.get(), o_length);
}
return 0;
diff --git a/webrtc/modules/audio_processing/test/process_test.cc b/webrtc/modules/audio_processing/test/process_test.cc
index 3af495c..d1fa032 100644
--- a/webrtc/modules/audio_processing/test/process_test.cc
+++ b/webrtc/modules/audio_processing/test/process_test.cc
@@ -654,7 +654,10 @@
memcpy(far_frame.data_, msg.data().data(), msg.data().size());
} else {
for (int i = 0; i < msg.channel_size(); ++i) {
- reverse_cb->CopyFrom(msg.channel(i).data(), i);
+ memcpy(reverse_cb->channels()[i],
+ msg.channel(i).data(),
+ reverse_cb->num_frames() *
+ sizeof(reverse_cb->channels()[i][0]));
}
}
@@ -704,7 +707,10 @@
near_read_bytes += msg.input_data().size();
} else {
for (int i = 0; i < msg.input_channel_size(); ++i) {
- primary_cb->CopyFrom(msg.input_channel(i).data(), i);
+ memcpy(primary_cb->channels()[i],
+ msg.input_channel(i).data(),
+ primary_cb->num_frames() *
+ sizeof(primary_cb->channels()[i][0]));
near_read_bytes += msg.input_channel(i).size();
}
}