Fix constness of AudioBuffer accessors.
Don't return non-const pointers from const accessors and deal with the
spillover. Provide overloaded versions as needed.
Inspired by kwiberg:
https://webrtc-codereview.appspot.com/12379005/
R=bjornv@webrtc.org, kwiberg@webrtc.org
Review URL: https://webrtc-codereview.appspot.com/15379004
git-svn-id: http://webrtc.googlecode.com/svn/trunk@6030 4adac7df-926f-26a2-2b94-8c16560cd09d
diff --git a/webrtc/modules/audio_processing/audio_buffer.cc b/webrtc/modules/audio_processing/audio_buffer.cc
index c53d4df..9160f69 100644
--- a/webrtc/modules/audio_processing/audio_buffer.cc
+++ b/webrtc/modules/audio_processing/audio_buffer.cc
@@ -228,7 +228,7 @@
is_muted_ = false;
}
-int16_t* AudioBuffer::data(int channel) const {
+const int16_t* AudioBuffer::data(int channel) const {
assert(channel >= 0 && channel < num_proc_channels_);
if (data_ != NULL) {
return data_;
@@ -237,7 +237,12 @@
return channels_->channel(channel);
}
-int16_t* AudioBuffer::low_pass_split_data(int channel) const {
+int16_t* AudioBuffer::data(int channel) {
+ const AudioBuffer* t = this;
+ return const_cast<int16_t*>(t->data(channel));
+}
+
+const int16_t* AudioBuffer::low_pass_split_data(int channel) const {
assert(channel >= 0 && channel < num_proc_channels_);
if (split_channels_.get() == NULL) {
return data(channel);
@@ -246,7 +251,12 @@
return split_channels_->low_channel(channel);
}
-int16_t* AudioBuffer::high_pass_split_data(int channel) const {
+int16_t* AudioBuffer::low_pass_split_data(int channel) {
+ const AudioBuffer* t = this;
+ return const_cast<int16_t*>(t->low_pass_split_data(channel));
+}
+
+const int16_t* AudioBuffer::high_pass_split_data(int channel) const {
assert(channel >= 0 && channel < num_proc_channels_);
if (split_channels_.get() == NULL) {
return NULL;
@@ -255,19 +265,24 @@
return split_channels_->high_channel(channel);
}
-int16_t* AudioBuffer::mixed_data(int channel) const {
+int16_t* AudioBuffer::high_pass_split_data(int channel) {
+ const AudioBuffer* t = this;
+ return const_cast<int16_t*>(t->high_pass_split_data(channel));
+}
+
+const int16_t* AudioBuffer::mixed_data(int channel) const {
assert(channel >= 0 && channel < num_mixed_channels_);
return mixed_channels_->channel(channel);
}
-int16_t* AudioBuffer::mixed_low_pass_data(int channel) const {
+const int16_t* AudioBuffer::mixed_low_pass_data(int channel) const {
assert(channel >= 0 && channel < num_mixed_low_pass_channels_);
return mixed_low_pass_channels_->channel(channel);
}
-int16_t* AudioBuffer::low_pass_reference(int channel) const {
+const int16_t* AudioBuffer::low_pass_reference(int channel) const {
assert(channel >= 0 && channel < num_proc_channels_);
if (!reference_copied_) {
return NULL;
@@ -280,7 +295,7 @@
return keyboard_data_;
}
-SplitFilterStates* AudioBuffer::filter_states(int channel) const {
+SplitFilterStates* AudioBuffer::filter_states(int channel) {
assert(channel >= 0 && channel < num_proc_channels_);
return &filter_states_[channel];
}
diff --git a/webrtc/modules/audio_processing/audio_buffer.h b/webrtc/modules/audio_processing/audio_buffer.h
index eaf53eb..79f4689 100644
--- a/webrtc/modules/audio_processing/audio_buffer.h
+++ b/webrtc/modules/audio_processing/audio_buffer.h
@@ -55,15 +55,18 @@
int samples_per_split_channel() const;
int samples_per_keyboard_channel() const;
- int16_t* data(int channel) const;
- int16_t* low_pass_split_data(int channel) const;
- int16_t* high_pass_split_data(int channel) const;
- int16_t* mixed_data(int channel) const;
- int16_t* mixed_low_pass_data(int channel) const;
- int16_t* low_pass_reference(int channel) const;
+ int16_t* data(int channel);
+ const int16_t* data(int channel) const;
+ int16_t* low_pass_split_data(int channel);
+ const int16_t* low_pass_split_data(int channel) const;
+ int16_t* high_pass_split_data(int channel);
+ const int16_t* high_pass_split_data(int channel) const;
+ const int16_t* mixed_data(int channel) const;
+ const int16_t* mixed_low_pass_data(int channel) const;
+ const int16_t* low_pass_reference(int channel) const;
const float* keyboard_data() const;
- SplitFilterStates* filter_states(int channel) const;
+ SplitFilterStates* filter_states(int channel);
void set_activity(AudioFrame::VADActivity activity);
AudioFrame::VADActivity activity() const;
diff --git a/webrtc/modules/audio_processing/echo_control_mobile_impl.cc b/webrtc/modules/audio_processing/echo_control_mobile_impl.cc
index 1dce403..a03adc5 100644
--- a/webrtc/modules/audio_processing/echo_control_mobile_impl.cc
+++ b/webrtc/modules/audio_processing/echo_control_mobile_impl.cc
@@ -128,7 +128,7 @@
for (int i = 0; i < audio->num_channels(); i++) {
// TODO(ajm): improve how this works, possibly inside AECM.
// This is kind of hacked up.
- int16_t* noisy = audio->low_pass_reference(i);
+ const int16_t* noisy = audio->low_pass_reference(i);
int16_t* clean = audio->low_pass_split_data(i);
if (noisy == NULL) {
noisy = clean;
diff --git a/webrtc/modules/audio_processing/gain_control_impl.cc b/webrtc/modules/audio_processing/gain_control_impl.cc
index e859044..a67b67e 100644
--- a/webrtc/modules/audio_processing/gain_control_impl.cc
+++ b/webrtc/modules/audio_processing/gain_control_impl.cc
@@ -59,7 +59,7 @@
assert(audio->samples_per_split_channel() <= 160);
- int16_t* mixed_data = audio->low_pass_split_data(0);
+ const int16_t* mixed_data = audio->low_pass_split_data(0);
if (audio->num_channels() > 1) {
audio->CopyAndMixLowPass(1);
mixed_data = audio->mixed_low_pass_data(0);
diff --git a/webrtc/modules/audio_processing/level_estimator_impl.cc b/webrtc/modules/audio_processing/level_estimator_impl.cc
index c5985ce..a91e963 100644
--- a/webrtc/modules/audio_processing/level_estimator_impl.cc
+++ b/webrtc/modules/audio_processing/level_estimator_impl.cc
@@ -20,7 +20,15 @@
namespace webrtc {
namespace {
-const double kMaxSquaredLevel = 32768.0 * 32768.0;
+const float kMaxSquaredLevel = 32768.0 * 32768.0;
+
+float SumSquare(const int16_t* data, int length) {
+ float sum_square = 0.f;
+ for (int i = 0; i < length; ++i) {
+ sum_square += data[i] * data[i];
+ }
+ return sum_square;
+}
class Level {
public:
@@ -36,7 +44,7 @@
sample_count_ = 0;
}
- void Process(int16_t* data, int length) {
+ void Process(const int16_t* data, int length) {
assert(data != NULL);
assert(length > 0);
sum_square_ += SumSquare(data, length);
@@ -55,7 +63,7 @@
}
// Normalize by the max level.
- double rms = sum_square_ / (sample_count_ * kMaxSquaredLevel);
+ float rms = sum_square_ / (sample_count_ * kMaxSquaredLevel);
// 20log_10(x^0.5) = 10log_10(x)
rms = 10 * log10(rms);
if (rms > 0)
@@ -69,18 +77,10 @@
}
private:
- static double SumSquare(int16_t* data, int length) {
- double sum_square = 0.0;
- for (int i = 0; i < length; ++i) {
- double data_d = static_cast<double>(data[i]);
- sum_square += data_d * data_d;
- }
- return sum_square;
- }
-
- double sum_square_;
+ float sum_square_;
int sample_count_;
};
+
} // namespace
LevelEstimatorImpl::LevelEstimatorImpl(const AudioProcessing* apm,
@@ -102,7 +102,7 @@
return apm_->kNoError;
}
- int16_t* mixed_data = audio->data(0);
+ const int16_t* mixed_data = audio->data(0);
if (audio->num_channels() > 1) {
audio->CopyAndMix(1);
mixed_data = audio->mixed_data(0);
diff --git a/webrtc/modules/audio_processing/voice_detection_impl.cc b/webrtc/modules/audio_processing/voice_detection_impl.cc
index 1d3d124..c6e497f 100644
--- a/webrtc/modules/audio_processing/voice_detection_impl.cc
+++ b/webrtc/modules/audio_processing/voice_detection_impl.cc
@@ -61,7 +61,7 @@
}
assert(audio->samples_per_split_channel() <= 160);
- int16_t* mixed_data = audio->low_pass_split_data(0);
+ const int16_t* mixed_data = audio->low_pass_split_data(0);
if (audio->num_channels() > 1) {
audio->CopyAndMixLowPass(1);
mixed_data = audio->mixed_low_pass_data(0);