Reformat the WebRTC code base
Running clang-format with chromium's style guide.
The goal is n-fold:
* providing consistency and readability (that's what code guidelines are for)
* preventing noise with presubmit checks and git cl format
* building on the previous point: making it easier to automatically fix format issues
* you name it
Please consider using git-hyper-blame to ignore this commit.
Bug: webrtc:9340
Change-Id: I694567c4cdf8cee2860958cfe82bfaf25848bb87
Reviewed-on: https://webrtc-review.googlesource.com/81185
Reviewed-by: Patrik Höglund <phoglund@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#23660}
diff --git a/modules/audio_coding/neteq/audio_decoder_unittest.cc b/modules/audio_coding/neteq/audio_decoder_unittest.cc
index e8f7a4a..54ede6f 100644
--- a/modules/audio_coding/neteq/audio_decoder_unittest.cc
+++ b/modules/audio_coding/neteq/audio_decoder_unittest.cc
@@ -114,7 +114,7 @@
decoder_ = NULL;
}
- virtual void InitEncoder() { }
+ virtual void InitEncoder() {}
// TODO(henrik.lundin) Change return type to size_t once most/all overriding
// implementations are gone.
@@ -136,12 +136,13 @@
samples_per_10ms, channels_,
interleaved_input.get());
- encoded_info = audio_encoder_->Encode(
- 0, rtc::ArrayView<const int16_t>(interleaved_input.get(),
- audio_encoder_->NumChannels() *
- audio_encoder_->SampleRateHz() /
- 100),
- output);
+ encoded_info =
+ audio_encoder_->Encode(0,
+ rtc::ArrayView<const int16_t>(
+ interleaved_input.get(),
+ audio_encoder_->NumChannels() *
+ audio_encoder_->SampleRateHz() / 100),
+ output);
}
EXPECT_EQ(payload_type_, encoded_info.payload_type);
return static_cast<int>(encoded_info.encoded_bytes);
@@ -152,11 +153,14 @@
// with |mse|. The encoded stream should contain |expected_bytes|. For stereo
// audio, the absolute difference between the two channels is compared vs
// |channel_diff_tolerance|.
- void EncodeDecodeTest(size_t expected_bytes, int tolerance, double mse,
- int delay = 0, int channel_diff_tolerance = 0) {
+ void EncodeDecodeTest(size_t expected_bytes,
+ int tolerance,
+ double mse,
+ int delay = 0,
+ int channel_diff_tolerance = 0) {
ASSERT_GE(tolerance, 0) << "Test must define a tolerance >= 0";
- ASSERT_GE(channel_diff_tolerance, 0) <<
- "Test must define a channel_diff_tolerance >= 0";
+ ASSERT_GE(channel_diff_tolerance, 0)
+ << "Test must define a channel_diff_tolerance >= 0";
size_t processed_samples = 0u;
rtc::Buffer encoded;
size_t encoded_bytes = 0u;
@@ -168,10 +172,10 @@
input.resize(input.size() + frame_size_, 0);
// Read from input file.
ASSERT_GE(input.size() - processed_samples, frame_size_);
- ASSERT_TRUE(input_audio_.Read(
- frame_size_, codec_input_rate_hz_, &input[processed_samples]));
- size_t enc_len = EncodeFrame(
- &input[processed_samples], frame_size_, &encoded);
+ ASSERT_TRUE(input_audio_.Read(frame_size_, codec_input_rate_hz_,
+ &input[processed_samples]));
+ size_t enc_len =
+ EncodeFrame(&input[processed_samples], frame_size_, &encoded);
// Make sure that frame_size_ * channels_ samples are allocated and free.
decoded.resize((processed_samples + frame_size_) * channels_, 0);
AudioDecoder::SpeechType speech_type;
@@ -189,11 +193,11 @@
if (expected_bytes) {
EXPECT_EQ(expected_bytes, encoded_bytes);
}
- CompareInputOutput(
- input, decoded, processed_samples, channels_, tolerance, delay);
+ CompareInputOutput(input, decoded, processed_samples, channels_, tolerance,
+ delay);
if (channels_ == 2)
- CompareTwoChannels(
- decoded, processed_samples, channels_, channel_diff_tolerance);
+ CompareTwoChannels(decoded, processed_samples, channels_,
+ channel_diff_tolerance);
EXPECT_LE(
MseInputOutput(input, decoded, processed_samples, channels_, delay),
mse);
@@ -242,10 +246,9 @@
AudioDecoder::SpeechType speech_type;
decoder_->Reset();
std::unique_ptr<int16_t[]> output(new int16_t[frame_size_ * channels_]);
- size_t dec_len = decoder_->Decode(encoded.data(), enc_len,
- codec_input_rate_hz_,
- frame_size_ * channels_ * sizeof(int16_t),
- output.get(), &speech_type);
+ size_t dec_len = decoder_->Decode(
+ encoded.data(), enc_len, codec_input_rate_hz_,
+ frame_size_ * channels_ * sizeof(int16_t), output.get(), &speech_type);
EXPECT_EQ(frame_size_ * channels_, dec_len);
// Call DecodePlc and verify that we get one frame of data.
// (Overwrite the output from the above Decode call, but that does not
@@ -332,10 +335,9 @@
AudioDecoder::SpeechType speech_type;
decoder_->Reset();
std::unique_ptr<int16_t[]> output(new int16_t[frame_size_ * channels_]);
- size_t dec_len = decoder_->Decode(encoded.data(), enc_len,
- codec_input_rate_hz_,
- frame_size_ * channels_ * sizeof(int16_t),
- output.get(), &speech_type);
+ size_t dec_len = decoder_->Decode(
+ encoded.data(), enc_len, codec_input_rate_hz_,
+ frame_size_ * channels_ * sizeof(int16_t), output.get(), &speech_type);
EXPECT_EQ(frame_size_, dec_len);
// Simply call DecodePlc and verify that we get 0 as return value.
EXPECT_EQ(0U, decoder_->DecodePlc(1, output.get()));