Reformat the WebRTC code base
Running clang-format with chromium's style guide.
The goal is n-fold:
* providing consistency and readability (that's what code guidelines are for)
* preventing noise with presubmit checks and git cl format
* building on the previous point: making it easier to automatically fix format issues
* you name it
Please consider using git-hyper-blame to ignore this commit.
Bug: webrtc:9340
Change-Id: I694567c4cdf8cee2860958cfe82bfaf25848bb87
Reviewed-on: https://webrtc-review.googlesource.com/81185
Reviewed-by: Patrik Höglund <phoglund@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#23660}
diff --git a/modules/video_coding/codecs/h264/h264_decoder_impl.h b/modules/video_coding/codecs/h264/h264_decoder_impl.h
index 1172f14..a709177 100644
--- a/modules/video_coding/codecs/h264/h264_decoder_impl.h
+++ b/modules/video_coding/codecs/h264/h264_decoder_impl.h
@@ -58,8 +58,9 @@
// Called by FFmpeg when it needs a frame buffer to store decoded frames in.
// The |VideoFrame| returned by FFmpeg at |Decode| originate from here. Their
// buffers are reference counted and freed by FFmpeg using |AVFreeBuffer2|.
- static int AVGetBuffer2(
- AVCodecContext* context, AVFrame* av_frame, int flags);
+ static int AVGetBuffer2(AVCodecContext* context,
+ AVFrame* av_frame,
+ int flags);
// Called by FFmpeg when it is done with a video frame, see |AVGetBuffer2|.
static void AVFreeBuffer2(void* opaque, uint8_t* data);
diff --git a/modules/video_coding/codecs/h264/h264_encoder_impl.cc b/modules/video_coding/codecs/h264/h264_encoder_impl.cc
index 67c5abc..eee954d 100644
--- a/modules/video_coding/codecs/h264/h264_encoder_impl.cc
+++ b/modules/video_coding/codecs/h264/h264_encoder_impl.cc
@@ -45,17 +45,17 @@
int NumberOfThreads(int width, int height, int number_of_cores) {
// TODO(hbos): In Chromium, multiple threads do not work with sandbox on Mac,
// see crbug.com/583348. Until further investigated, only use one thread.
-// if (width * height >= 1920 * 1080 && number_of_cores > 8) {
-// return 8; // 8 threads for 1080p on high perf machines.
-// } else if (width * height > 1280 * 960 && number_of_cores >= 6) {
-// return 3; // 3 threads for 1080p.
-// } else if (width * height > 640 * 480 && number_of_cores >= 3) {
-// return 2; // 2 threads for qHD/HD.
-// } else {
-// return 1; // 1 thread for VGA or less.
-// }
-// TODO(sprang): Also check sSliceArgument.uiSliceNum om GetEncoderPrams(),
-// before enabling multithreading here.
+ // if (width * height >= 1920 * 1080 && number_of_cores > 8) {
+ // return 8; // 8 threads for 1080p on high perf machines.
+ // } else if (width * height > 1280 * 960 && number_of_cores >= 6) {
+ // return 3; // 3 threads for 1080p.
+ // } else if (width * height > 640 * 480 && number_of_cores >= 3) {
+ // return 2; // 2 threads for qHD/HD.
+ // } else {
+ // return 1; // 1 thread for VGA or less.
+ // }
+ // TODO(sprang): Also check sSliceArgument.uiSliceNum om GetEncoderPrams(),
+ // before enabling multithreading here.
return 1;
}
@@ -139,10 +139,10 @@
// Because the sum of all layer lengths, |required_size|, fits in a
// |size_t|, we know that any indices in-between will not overflow.
RTC_DCHECK_GE(layerInfo.pNalLengthInByte[nal], 4);
- RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len+0], start_code[0]);
- RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len+1], start_code[1]);
- RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len+2], start_code[2]);
- RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len+3], start_code[3]);
+ RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len + 0], start_code[0]);
+ RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len + 1], start_code[1]);
+ RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len + 2], start_code[2]);
+ RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len + 3], start_code[3]);
frag_header->fragmentationOffset[frag] =
encoded_image->_length + layer_len + sizeof(start_code);
frag_header->fragmentationLength[frag] =
@@ -150,8 +150,7 @@
layer_len += layerInfo.pNalLengthInByte[nal];
}
// Copy the entire layer's data (including start codes).
- memcpy(encoded_image->_buffer + encoded_image->_length,
- layerInfo.pBsBuf,
+ memcpy(encoded_image->_buffer + encoded_image->_length, layerInfo.pBsBuf,
layer_len);
encoded_image->_length += layer_len;
}
@@ -190,8 +189,7 @@
int32_t number_of_cores,
size_t max_payload_size) {
ReportInit();
- if (!codec_settings ||
- codec_settings->codecType != kVideoCodecH264) {
+ if (!codec_settings || codec_settings->codecType != kVideoCodecH264) {
ReportError();
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
@@ -222,8 +220,7 @@
RTC_DCHECK(openh264_encoder_);
if (kOpenH264EncoderDetailedLogging) {
int trace_level = WELS_LOG_DETAIL;
- openh264_encoder_->SetOption(ENCODER_OPTION_TRACE_LEVEL,
- &trace_level);
+ openh264_encoder_->SetOption(ENCODER_OPTION_TRACE_LEVEL, &trace_level);
}
// else WELS_LOG_DEFAULT is used by default.
@@ -255,8 +252,7 @@
}
// TODO(pbos): Base init params on these values before submitting.
int video_format = EVideoFormatType::videoFormatI420;
- openh264_encoder_->SetOption(ENCODER_OPTION_DATAFORMAT,
- &video_format);
+ openh264_encoder_->SetOption(ENCODER_OPTION_DATAFORMAT, &video_format);
// Initialize encoded image. Default buffer size: size of unencoded data.
encoded_image_._size = CalcBufferSize(VideoType::kI420, codec_settings->width,
@@ -300,8 +296,7 @@
memset(&target_bitrate, 0, sizeof(SBitrateInfo));
target_bitrate.iLayer = SPATIAL_LAYER_ALL,
target_bitrate.iBitrate = target_bps_;
- openh264_encoder_->SetOption(ENCODER_OPTION_BITRATE,
- &target_bitrate);
+ openh264_encoder_->SetOption(ENCODER_OPTION_BITRATE, &target_bitrate);
openh264_encoder_->SetOption(ENCODER_OPTION_FRAME_RATE, &max_frame_rate_);
return WEBRTC_VIDEO_CODEC_OK;
}
@@ -485,8 +480,7 @@
if (has_reported_init_)
return;
RTC_HISTOGRAM_ENUMERATION("WebRTC.Video.H264EncoderImpl.Event",
- kH264EncoderEventInit,
- kH264EncoderEventMax);
+ kH264EncoderEventInit, kH264EncoderEventMax);
has_reported_init_ = true;
}
@@ -494,13 +488,12 @@
if (has_reported_error_)
return;
RTC_HISTOGRAM_ENUMERATION("WebRTC.Video.H264EncoderImpl.Event",
- kH264EncoderEventError,
- kH264EncoderEventMax);
+ kH264EncoderEventError, kH264EncoderEventMax);
has_reported_error_ = true;
}
-int32_t H264EncoderImpl::SetChannelParameters(
- uint32_t packet_loss, int64_t rtt) {
+int32_t H264EncoderImpl::SetChannelParameters(uint32_t packet_loss,
+ int64_t rtt) {
return WEBRTC_VIDEO_CODEC_OK;
}
diff --git a/modules/video_coding/codecs/multiplex/multiplex_decoder_adapter.cc b/modules/video_coding/codecs/multiplex/multiplex_decoder_adapter.cc
index de4352f..dcf99f1 100644
--- a/modules/video_coding/codecs/multiplex/multiplex_decoder_adapter.cc
+++ b/modules/video_coding/codecs/multiplex/multiplex_decoder_adapter.cc
@@ -126,8 +126,8 @@
int32_t rv = 0;
for (size_t i = 0; i < image.image_components.size(); i++) {
rv = decoders_[image.image_components[i].component_index]->Decode(
- image.image_components[i].encoded_image, missing_frames,
- nullptr, render_time_ms);
+ image.image_components[i].encoded_image, missing_frames, nullptr,
+ render_time_ms);
if (rv != WEBRTC_VIDEO_CODEC_OK)
return rv;
}
diff --git a/modules/video_coding/codecs/multiplex/test/multiplex_adapter_unittest.cc b/modules/video_coding/codecs/multiplex/test/multiplex_adapter_unittest.cc
index fbefd60..be5c2cc 100644
--- a/modules/video_coding/codecs/multiplex/test/multiplex_adapter_unittest.cc
+++ b/modules/video_coding/codecs/multiplex/test/multiplex_adapter_unittest.cc
@@ -128,9 +128,8 @@
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
EXPECT_EQ(kVideoCodecMultiplex, codec_specific_info.codecType);
- EXPECT_EQ(
- WEBRTC_VIDEO_CODEC_OK,
- decoder_->Decode(encoded_frame, false, &codec_specific_info, -1));
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
+ decoder_->Decode(encoded_frame, false, &codec_specific_info, -1));
std::unique_ptr<VideoFrame> decoded_frame;
absl::optional<uint8_t> decoded_qp;
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
diff --git a/modules/video_coding/codecs/test/videocodec_test_fixture_impl.cc b/modules/video_coding/codecs/test/videocodec_test_fixture_impl.cc
index 5ac6986..394ee14 100644
--- a/modules/video_coding/codecs/test/videocodec_test_fixture_impl.cc
+++ b/modules/video_coding/codecs/test/videocodec_test_fixture_impl.cc
@@ -308,9 +308,9 @@
// TODO(kthelgason): Move this out of the test fixture impl and
// make available as a shared utility class.
-void VideoCodecTestFixtureImpl::H264KeyframeChecker::
- CheckEncodedFrame(webrtc::VideoCodecType codec,
- const EncodedImage& encoded_frame) const {
+void VideoCodecTestFixtureImpl::H264KeyframeChecker::CheckEncodedFrame(
+ webrtc::VideoCodecType codec,
+ const EncodedImage& encoded_frame) const {
EXPECT_EQ(kVideoCodecH264, codec);
bool contains_sps = false;
bool contains_pps = false;
@@ -390,8 +390,7 @@
decoder_factory_(std::move(decoder_factory)),
config_(config) {}
-VideoCodecTestFixtureImpl::
- ~VideoCodecTestFixtureImpl() = default;
+VideoCodecTestFixtureImpl::~VideoCodecTestFixtureImpl() = default;
// Processes all frames in the clip and verifies the result.
void VideoCodecTestFixtureImpl::RunTest(
diff --git a/modules/video_coding/codecs/test/videocodec_test_mediacodec.cc b/modules/video_coding/codecs/test/videocodec_test_mediacodec.cc
index fc49c92..f8580fc 100644
--- a/modules/video_coding/codecs/test/videocodec_test_mediacodec.cc
+++ b/modules/video_coding/codecs/test/videocodec_test_mediacodec.cc
@@ -70,8 +70,8 @@
TEST(VideoCodecTestMediaCodec, ForemanCif500kbpsH264CBP) {
auto config = CreateConfig();
- const auto frame_checker = rtc::MakeUnique<
- VideoCodecTestFixtureImpl::H264KeyframeChecker>();
+ const auto frame_checker =
+ rtc::MakeUnique<VideoCodecTestFixtureImpl::H264KeyframeChecker>();
config.encoded_frame_checker = frame_checker.get();
config.SetCodecSettings(cricket::kH264CodecName, 1, 1, 1, false, false, false,
352, 288);
@@ -95,8 +95,8 @@
// HW encoders that support CHP.
TEST(VideoCodecTestMediaCodec, DISABLED_ForemanCif500kbpsH264CHP) {
auto config = CreateConfig();
- const auto frame_checker = rtc::MakeUnique<
- VideoCodecTestFixtureImpl::H264KeyframeChecker>();
+ const auto frame_checker =
+ rtc::MakeUnique<VideoCodecTestFixtureImpl::H264KeyframeChecker>();
config.h264_codec_settings.profile = H264::kProfileConstrainedHigh;
config.encoded_frame_checker = frame_checker.get();
diff --git a/modules/video_coding/codecs/test/videocodec_test_openh264.cc b/modules/video_coding/codecs/test/videocodec_test_openh264.cc
index c8a6ba2..a0743d1 100644
--- a/modules/video_coding/codecs/test/videocodec_test_openh264.cc
+++ b/modules/video_coding/codecs/test/videocodec_test_openh264.cc
@@ -39,8 +39,8 @@
} // namespace
TEST(VideoCodecTestOpenH264, ConstantHighBitrate) {
- auto frame_checker = rtc::MakeUnique<
- VideoCodecTestFixtureImpl::H264KeyframeChecker>();
+ auto frame_checker =
+ rtc::MakeUnique<VideoCodecTestFixtureImpl::H264KeyframeChecker>();
auto config = CreateConfig();
config.SetCodecSettings(cricket::kH264CodecName, 1, 1, 1, false, true, false,
kCifWidth, kCifHeight);
@@ -60,8 +60,8 @@
// H264: Enable SingleNalUnit packetization mode. Encoder should split
// large frames into multiple slices and limit length of NAL units.
TEST(VideoCodecTestOpenH264, SingleNalUnit) {
- auto frame_checker = rtc::MakeUnique<
- VideoCodecTestFixtureImpl::H264KeyframeChecker>();
+ auto frame_checker =
+ rtc::MakeUnique<VideoCodecTestFixtureImpl::H264KeyframeChecker>();
auto config = CreateConfig();
config.h264_codec_settings.packetization_mode =
H264PacketizationMode::SingleNalUnit;
diff --git a/modules/video_coding/codecs/test/videocodec_test_videotoolbox.cc b/modules/video_coding/codecs/test/videocodec_test_videotoolbox.cc
index c91f340..a5667bc 100644
--- a/modules/video_coding/codecs/test/videocodec_test_videotoolbox.cc
+++ b/modules/video_coding/codecs/test/videocodec_test_videotoolbox.cc
@@ -37,8 +37,8 @@
VideoCodecTestFixture::Config config) {
auto decoder_factory = CreateObjCDecoderFactory();
auto encoder_factory = CreateObjCEncoderFactory();
- return CreateVideoCodecTestFixture(
- config, std::move(decoder_factory), std::move(encoder_factory));
+ return CreateVideoCodecTestFixture(config, std::move(decoder_factory),
+ std::move(encoder_factory));
}
} // namespace
@@ -53,8 +53,8 @@
// TODO(kthelgason): Use RC Thresholds when the internal bitrateAdjuster is no
// longer in use.
MAYBE_TEST(VideoCodecTestVideoToolbox, ForemanCif500kbpsH264CBP) {
- const auto frame_checker = rtc::MakeUnique<
- VideoCodecTestFixtureImpl::H264KeyframeChecker>();
+ const auto frame_checker =
+ rtc::MakeUnique<VideoCodecTestFixtureImpl::H264KeyframeChecker>();
auto config = CreateConfig();
config.SetCodecSettings(cricket::kH264CodecName, 1, 1, 1, false, false, false,
352, 288);
@@ -69,8 +69,8 @@
}
MAYBE_TEST(VideoCodecTestVideoToolbox, ForemanCif500kbpsH264CHP) {
- const auto frame_checker = rtc::MakeUnique<
- VideoCodecTestFixtureImpl::H264KeyframeChecker>();
+ const auto frame_checker =
+ rtc::MakeUnique<VideoCodecTestFixtureImpl::H264KeyframeChecker>();
auto config = CreateConfig();
config.h264_codec_settings.profile = H264::kProfileConstrainedHigh;
config.SetCodecSettings(cricket::kH264CodecName, 1, 1, 1, false, false, false,
diff --git a/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc b/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc
index bd2c992..9df5544 100644
--- a/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc
+++ b/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc
@@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#include <algorithm>
#include <string>
diff --git a/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc b/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc
index 0f8bc75..522c989 100644
--- a/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc
+++ b/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc
@@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#include <algorithm>
#include <string>
#include <vector>
diff --git a/modules/video_coding/codecs/vp8/screenshare_layers.cc b/modules/video_coding/codecs/vp8/screenshare_layers.cc
index cd24490..f7f1019 100644
--- a/modules/video_coding/codecs/vp8/screenshare_layers.cc
+++ b/modules/video_coding/codecs/vp8/screenshare_layers.cc
@@ -37,8 +37,7 @@
// been exceeded. This prevents needless keyframe requests.
const int ScreenshareLayers::kMaxFrameIntervalMs = 2750;
-ScreenshareLayers::ScreenshareLayers(int num_temporal_layers,
- Clock* clock)
+ScreenshareLayers::ScreenshareLayers(int num_temporal_layers, Clock* clock)
: clock_(clock),
number_of_temporal_layers_(
std::min(kMaxNumTemporalLayers, num_temporal_layers)),
diff --git a/modules/video_coding/codecs/vp8/screenshare_layers.h b/modules/video_coding/codecs/vp8/screenshare_layers.h
index c1b5fa7..5185b45 100644
--- a/modules/video_coding/codecs/vp8/screenshare_layers.h
+++ b/modules/video_coding/codecs/vp8/screenshare_layers.h
@@ -28,8 +28,7 @@
static const double kAcceptableTargetOvershoot;
static const int kMaxFrameIntervalMs;
- ScreenshareLayers(int num_temporal_layers,
- Clock* clock);
+ ScreenshareLayers(int num_temporal_layers, Clock* clock);
virtual ~ScreenshareLayers();
// Returns the recommended VP8 encode flags needed. May refresh the decoder
diff --git a/modules/video_coding/codecs/vp8/simulcast_rate_allocator.cc b/modules/video_coding/codecs/vp8/simulcast_rate_allocator.cc
index 6eea837..f8cfe88 100644
--- a/modules/video_coding/codecs/vp8/simulcast_rate_allocator.cc
+++ b/modules/video_coding/codecs/vp8/simulcast_rate_allocator.cc
@@ -12,8 +12,8 @@
#include <algorithm>
#include <memory>
-#include <vector>
#include <utility>
+#include <vector>
#include "modules/video_coding/codecs/vp8/include/vp8_common_types.h"
#include "rtc_base/checks.h"
diff --git a/modules/video_coding/codecs/vp9/include/vp9_globals.h b/modules/video_coding/codecs/vp9/include/vp9_globals.h
index 3ee9952..aa532a6 100644
--- a/modules/video_coding/codecs/vp9/include/vp9_globals.h
+++ b/modules/video_coding/codecs/vp9/include/vp9_globals.h
@@ -181,12 +181,12 @@
bool beginning_of_frame; // True if this packet is the first in a VP9 layer
// frame.
bool end_of_frame; // True if this packet is the last in a VP9 layer frame.
- bool ss_data_available; // True if SS data is available in this payload
- // descriptor.
+ bool ss_data_available; // True if SS data is available in this payload
+ // descriptor.
bool non_ref_for_inter_layer_pred; // True for frame which is not used as
// reference for inter-layer prediction.
- int16_t picture_id; // PictureID index, 15 bits;
- // kNoPictureId if PictureID does not exist.
+ int16_t picture_id; // PictureID index, 15 bits;
+ // kNoPictureId if PictureID does not exist.
int16_t max_picture_id; // Maximum picture ID index; either 0x7F or 0x7FFF;
int16_t tl0_pic_idx; // TL0PIC_IDX, 8 bits;
// kNoTl0PicIdx means no value provided.
diff --git a/modules/video_coding/codecs/vp9/svc_config.cc b/modules/video_coding/codecs/vp9/svc_config.cc
index c4ec288..22902fe 100644
--- a/modules/video_coding/codecs/vp9/svc_config.cc
+++ b/modules/video_coding/codecs/vp9/svc_config.cc
@@ -85,7 +85,7 @@
spatial_layer.minBitrate =
std::max(static_cast<size_t>(min_bitrate), kMinVp9SvcBitrateKbps);
spatial_layer.maxBitrate =
- static_cast<int>((1.6 * num_pixels + 50 * 1000) / 1000);
+ static_cast<int>((1.6 * num_pixels + 50 * 1000) / 1000);
spatial_layer.targetBitrate =
(spatial_layer.minBitrate + spatial_layer.maxBitrate) / 2;
spatial_layers.push_back(spatial_layer);
diff --git a/modules/video_coding/codecs/vp9/vp9_impl.cc b/modules/video_coding/codecs/vp9/vp9_impl.cc
index 7a1e2ab..24f86aa 100644
--- a/modules/video_coding/codecs/vp9/vp9_impl.cc
+++ b/modules/video_coding/codecs/vp9/vp9_impl.cc
@@ -15,10 +15,10 @@
#include <limits>
#include <vector>
-#include "vpx/vpx_encoder.h"
-#include "vpx/vpx_decoder.h"
#include "vpx/vp8cx.h"
#include "vpx/vp8dx.h"
+#include "vpx/vpx_decoder.h"
+#include "vpx/vpx_encoder.h"
#include "common_video/include/video_frame_buffer.h"
#include "common_video/libyuv/include/webrtc_libyuv.h"
@@ -164,9 +164,8 @@
RTC_LOG(LS_ERROR) << "Scaling factors not specified!";
return false;
}
- rate_ratio[i] =
- static_cast<float>(svc_params_.scaling_factor_num[i]) /
- svc_params_.scaling_factor_den[i];
+ rate_ratio[i] = static_cast<float>(svc_params_.scaling_factor_num[i]) /
+ svc_params_.scaling_factor_den[i];
total += rate_ratio[i];
}
@@ -409,7 +408,7 @@
} else if (width * height >= 640 * 360 && number_of_cores > 2) {
return 2;
} else {
- // Use 2 threads for low res on ARM.
+// Use 2 threads for low res on ARM.
#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64) || \
defined(WEBRTC_ANDROID)
if (width * height >= 320 * 180 && number_of_cores > 2) {
@@ -531,7 +530,7 @@
vpx_codec_control(encoder_, VP9E_SET_ROW_MT, 1);
#if !defined(WEBRTC_ARCH_ARM) && !defined(WEBRTC_ARCH_ARM64) && \
- !defined(ANDROID)
+ !defined(ANDROID)
// Do not enable the denoiser on ARM since optimization is pending.
// Denoiser is on by default on other platforms.
vpx_codec_control(encoder_, VP9E_SET_NOISE_SENSITIVITY,
@@ -716,11 +715,9 @@
if (vp9_info->ss_data_available) {
vp9_info->spatial_layer_resolution_present = true;
for (size_t i = 0; i < vp9_info->num_spatial_layers; ++i) {
- vp9_info->width[i] = codec_.width *
- svc_params_.scaling_factor_num[i] /
+ vp9_info->width[i] = codec_.width * svc_params_.scaling_factor_num[i] /
svc_params_.scaling_factor_den[i];
- vp9_info->height[i] = codec_.height *
- svc_params_.scaling_factor_num[i] /
+ vp9_info->height[i] = codec_.height * svc_params_.scaling_factor_num[i] /
svc_params_.scaling_factor_den[i];
}
if (!vp9_info->flexible_mode) {
diff --git a/modules/video_coding/encoded_frame.h b/modules/video_coding/encoded_frame.h
index 80f0dcc..17c61d7 100644
--- a/modules/video_coding/encoded_frame.h
+++ b/modules/video_coding/encoded_frame.h
@@ -29,19 +29,19 @@
~VCMEncodedFrame();
/**
- * Delete VideoFrame and resets members to zero
- */
+ * Delete VideoFrame and resets members to zero
+ */
void Free();
/**
- * Set render time in milliseconds
- */
+ * Set render time in milliseconds
+ */
void SetRenderTime(const int64_t renderTimeMs) {
_renderTimeMs = renderTimeMs;
}
/**
- * Set the encoded frame size
- */
+ * Set the encoded frame size
+ */
void SetEncodedSize(uint32_t width, uint32_t height) {
_encodedWidth = width;
_encodedHeight = height;
@@ -52,34 +52,34 @@
}
/**
- * Get the encoded image
- */
+ * Get the encoded image
+ */
const webrtc::EncodedImage& EncodedImage() const {
return static_cast<const webrtc::EncodedImage&>(*this);
}
/**
- * Get pointer to frame buffer
- */
+ * Get pointer to frame buffer
+ */
const uint8_t* Buffer() const { return _buffer; }
/**
- * Get frame length
- */
+ * Get frame length
+ */
size_t Length() const { return _length; }
/**
- * Get frame timestamp (90kHz)
- */
+ * Get frame timestamp (90kHz)
+ */
uint32_t TimeStamp() const { return _timeStamp; }
/**
- * Get render time in milliseconds
- */
+ * Get render time in milliseconds
+ */
int64_t RenderTimeMs() const { return _renderTimeMs; }
/**
- * Get frame type
- */
+ * Get frame type
+ */
webrtc::FrameType FrameType() const { return _frameType; }
/**
- * Get frame rotation
- */
+ * Get frame rotation
+ */
VideoRotation rotation() const { return rotation_; }
/**
* Get video content type
@@ -94,30 +94,30 @@
*/
bool Complete() const { return _completeFrame; }
/**
- * True if there's a frame missing before this frame
- */
+ * True if there's a frame missing before this frame
+ */
bool MissingFrame() const { return _missingFrame; }
/**
- * Payload type of the encoded payload
- */
+ * Payload type of the encoded payload
+ */
uint8_t PayloadType() const { return _payloadType; }
/**
- * Get codec specific info.
- * The returned pointer is only valid as long as the VCMEncodedFrame
- * is valid. Also, VCMEncodedFrame owns the pointer and will delete
- * the object.
- */
+ * Get codec specific info.
+ * The returned pointer is only valid as long as the VCMEncodedFrame
+ * is valid. Also, VCMEncodedFrame owns the pointer and will delete
+ * the object.
+ */
const CodecSpecificInfo* CodecSpecific() const { return &_codecSpecificInfo; }
protected:
/**
- * Verifies that current allocated buffer size is larger than or equal to the
- * input size.
- * If the current buffer size is smaller, a new allocation is made and the old
- * buffer data
- * is copied to the new buffer.
- * Buffer size is updated to minimumSize.
- */
+ * Verifies that current allocated buffer size is larger than or equal to the
+ * input size.
+ * If the current buffer size is smaller, a new allocation is made and the old
+ * buffer data
+ * is copied to the new buffer.
+ * Buffer size is updated to minimumSize.
+ */
void VerifyAndAllocate(size_t minimumSize);
void Reset();
diff --git a/modules/video_coding/frame_buffer.h b/modules/video_coding/frame_buffer.h
index 2fc2d21..66f338a 100644
--- a/modules/video_coding/frame_buffer.h
+++ b/modules/video_coding/frame_buffer.h
@@ -14,8 +14,8 @@
#include <vector>
#include "modules/include/module_common_types.h"
-#include "modules/video_coding/include/video_coding.h"
#include "modules/video_coding/encoded_frame.h"
+#include "modules/video_coding/include/video_coding.h"
#include "modules/video_coding/jitter_buffer_common.h"
#include "modules/video_coding/session_info.h"
#include "typedefs.h" // NOLINT(build/include)
@@ -71,7 +71,6 @@
webrtc::FrameType FrameType() const;
-
private:
void SetState(VCMFrameBufferStateEnum state); // Set state of frame
diff --git a/modules/video_coding/generic_decoder.cc b/modules/video_coding/generic_decoder.cc
index 580bfc9..01500e7 100644
--- a/modules/video_coding/generic_decoder.cc
+++ b/modules/video_coding/generic_decoder.cc
@@ -32,8 +32,7 @@
_clock->CurrentNtpInMilliseconds() - _clock->TimeInMilliseconds();
}
-VCMDecodedFrameCallback::~VCMDecodedFrameCallback() {
-}
+VCMDecodedFrameCallback::~VCMDecodedFrameCallback() {}
void VCMDecodedFrameCallback::SetUserReceiveCallback(
VCMReceiveCallback* receiveCallback) {
@@ -141,8 +140,8 @@
_timing->SetTimingFrameInfo(timing_frame_info);
}
- decodedImage.set_timestamp_us(
- frameInfo->renderTimeMs * rtc::kNumMicrosecsPerMillisec);
+ decodedImage.set_timestamp_us(frameInfo->renderTimeMs *
+ rtc::kNumMicrosecsPerMillisec);
decodedImage.set_rotation(frameInfo->rotation);
_receiveCallback->FrameToRender(decodedImage, qp, frameInfo->content_type);
}
@@ -211,39 +210,39 @@
}
int32_t VCMGenericDecoder::Decode(const VCMEncodedFrame& frame, int64_t nowMs) {
- TRACE_EVENT1("webrtc", "VCMGenericDecoder::Decode", "timestamp",
- frame.EncodedImage()._timeStamp);
- _frameInfos[_nextFrameInfoIdx].decodeStartTimeMs = nowMs;
- _frameInfos[_nextFrameInfoIdx].renderTimeMs = frame.RenderTimeMs();
- _frameInfos[_nextFrameInfoIdx].rotation = frame.rotation();
- _frameInfos[_nextFrameInfoIdx].timing = frame.video_timing();
- // Set correctly only for key frames. Thus, use latest key frame
- // content type. If the corresponding key frame was lost, decode will fail
- // and content type will be ignored.
- if (frame.FrameType() == kVideoFrameKey) {
- _frameInfos[_nextFrameInfoIdx].content_type = frame.contentType();
- _last_keyframe_content_type = frame.contentType();
- } else {
- _frameInfos[_nextFrameInfoIdx].content_type = _last_keyframe_content_type;
- }
- _callback->Map(frame.TimeStamp(), &_frameInfos[_nextFrameInfoIdx]);
+ TRACE_EVENT1("webrtc", "VCMGenericDecoder::Decode", "timestamp",
+ frame.EncodedImage()._timeStamp);
+ _frameInfos[_nextFrameInfoIdx].decodeStartTimeMs = nowMs;
+ _frameInfos[_nextFrameInfoIdx].renderTimeMs = frame.RenderTimeMs();
+ _frameInfos[_nextFrameInfoIdx].rotation = frame.rotation();
+ _frameInfos[_nextFrameInfoIdx].timing = frame.video_timing();
+ // Set correctly only for key frames. Thus, use latest key frame
+ // content type. If the corresponding key frame was lost, decode will fail
+ // and content type will be ignored.
+ if (frame.FrameType() == kVideoFrameKey) {
+ _frameInfos[_nextFrameInfoIdx].content_type = frame.contentType();
+ _last_keyframe_content_type = frame.contentType();
+ } else {
+ _frameInfos[_nextFrameInfoIdx].content_type = _last_keyframe_content_type;
+ }
+ _callback->Map(frame.TimeStamp(), &_frameInfos[_nextFrameInfoIdx]);
- _nextFrameInfoIdx = (_nextFrameInfoIdx + 1) % kDecoderFrameMemoryLength;
- int32_t ret = decoder_->Decode(frame.EncodedImage(), frame.MissingFrame(),
- frame.CodecSpecific(), frame.RenderTimeMs());
+ _nextFrameInfoIdx = (_nextFrameInfoIdx + 1) % kDecoderFrameMemoryLength;
+ int32_t ret = decoder_->Decode(frame.EncodedImage(), frame.MissingFrame(),
+ frame.CodecSpecific(), frame.RenderTimeMs());
- _callback->OnDecoderImplementationName(decoder_->ImplementationName());
- if (ret < WEBRTC_VIDEO_CODEC_OK) {
- RTC_LOG(LS_WARNING) << "Failed to decode frame with timestamp "
- << frame.TimeStamp() << ", error code: " << ret;
- _callback->Pop(frame.TimeStamp());
- return ret;
- } else if (ret == WEBRTC_VIDEO_CODEC_NO_OUTPUT ||
- ret == WEBRTC_VIDEO_CODEC_REQUEST_SLI) {
- // No output
- _callback->Pop(frame.TimeStamp());
- }
+ _callback->OnDecoderImplementationName(decoder_->ImplementationName());
+ if (ret < WEBRTC_VIDEO_CODEC_OK) {
+ RTC_LOG(LS_WARNING) << "Failed to decode frame with timestamp "
+ << frame.TimeStamp() << ", error code: " << ret;
+ _callback->Pop(frame.TimeStamp());
return ret;
+ } else if (ret == WEBRTC_VIDEO_CODEC_NO_OUTPUT ||
+ ret == WEBRTC_VIDEO_CODEC_REQUEST_SLI) {
+ // No output
+ _callback->Pop(frame.TimeStamp());
+ }
+ return ret;
}
int32_t VCMGenericDecoder::RegisterDecodeCompleteCallback(
diff --git a/modules/video_coding/generic_decoder.h b/modules/video_coding/generic_decoder.h
index 8137b20..17d2897 100644
--- a/modules/video_coding/generic_decoder.h
+++ b/modules/video_coding/generic_decoder.h
@@ -81,20 +81,20 @@
~VCMGenericDecoder();
/**
- * Initialize the decoder with the information from the VideoCodec
- */
+ * Initialize the decoder with the information from the VideoCodec
+ */
int32_t InitDecode(const VideoCodec* settings, int32_t numberOfCores);
/**
- * Decode to a raw I420 frame,
- *
- * inputVideoBuffer reference to encoded video frame
- */
+ * Decode to a raw I420 frame,
+ *
+ * inputVideoBuffer reference to encoded video frame
+ */
int32_t Decode(const VCMEncodedFrame& inputFrame, int64_t nowMs);
/**
- * Set decode callback. Deregistering while decoding is illegal.
- */
+ * Set decode callback. Deregistering while decoding is illegal.
+ */
int32_t RegisterDecodeCompleteCallback(VCMDecodedFrameCallback* callback);
bool External() const;
diff --git a/modules/video_coding/generic_encoder.cc b/modules/video_coding/generic_encoder.cc
index 202cc86..7eb35e7 100644
--- a/modules/video_coding/generic_encoder.cc
+++ b/modules/video_coding/generic_encoder.cc
@@ -1,12 +1,12 @@
/*
-* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
-*
-* Use of this source code is governed by a BSD-style license
-* that can be found in the LICENSE file in the root of the source
-* tree. An additional intellectual property rights grant can be found
-* in the file PATENTS. All contributing project authors may
-* be found in the AUTHORS file in the root of the source tree.
-*/
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
#include "modules/video_coding/generic_encoder.h"
@@ -70,8 +70,8 @@
if (encoder_->InitEncode(settings, number_of_cores, max_payload_size) != 0) {
RTC_LOG(LS_ERROR) << "Failed to initialize the encoder associated with "
"codec type: "
- << CodecTypeToPayloadString(settings->codecType)
- << " (" << settings->codecType <<")";
+ << CodecTypeToPayloadString(settings->codecType) << " ("
+ << settings->codecType << ")";
return -1;
}
vcm_encoded_frame_callback_->Reset();
@@ -151,9 +151,9 @@
// VideoSendStreamTest.VideoSendStreamStopSetEncoderRateToZero, set
// internal_source to true and use FakeEncoder. And the latter will
// happily encode this 1x1 frame and pass it on down the pipeline.
- return encoder_->Encode(VideoFrame(I420Buffer::Create(1, 1),
- kVideoRotation_0, 0),
- NULL, &frame_types);
+ return encoder_->Encode(
+ VideoFrame(I420Buffer::Create(1, 1), kVideoRotation_0, 0), NULL,
+ &frame_types);
return 0;
}
diff --git a/modules/video_coding/h264_sps_pps_tracker_unittest.cc b/modules/video_coding/h264_sps_pps_tracker_unittest.cc
index f88992f..0ad85ac 100644
--- a/modules/video_coding/h264_sps_pps_tracker_unittest.cc
+++ b/modules/video_coding/h264_sps_pps_tracker_unittest.cc
@@ -12,9 +12,9 @@
#include <vector>
+#include "common_video/h264/h264_common.h"
#include "modules/video_coding/packet.h"
#include "test/gtest.h"
-#include "common_video/h264/h264_common.h"
namespace webrtc {
namespace video_coding {
diff --git a/modules/video_coding/include/video_codec_interface.h b/modules/video_coding/include/video_codec_interface.h
index d61a926..1b5e155 100644
--- a/modules/video_coding/include/video_codec_interface.h
+++ b/modules/video_coding/include/video_codec_interface.h
@@ -37,8 +37,8 @@
struct CodecSpecificInfoVP9 {
bool first_frame_in_picture; // First frame, increment picture_id.
- bool inter_pic_predicted; // This layer frame is dependent on previously
- // coded frame(s).
+ bool inter_pic_predicted; // This layer frame is dependent on previously
+ // coded frame(s).
bool flexible_mode;
bool ss_data_available;
bool non_ref_for_inter_layer_pred;
diff --git a/modules/video_coding/include/video_coding.h b/modules/video_coding/include/video_coding.h
index 61ce66a..e5c30eb 100644
--- a/modules/video_coding/include/video_coding.h
+++ b/modules/video_coding/include/video_coding.h
@@ -72,8 +72,8 @@
static VideoCodingModule* Create(Clock* clock, EventFactory* event_factory);
/*
- * Sender
- */
+ * Sender
+ */
// Registers a codec to be used for encoding. Calling this
// API multiple times overwrites any previously registered codecs.
@@ -182,8 +182,8 @@
virtual int32_t EnableFrameDropper(bool enable) = 0;
/*
- * Receiver
- */
+ * Receiver
+ */
// Register possible receive codecs, can be called multiple times for
// different codecs.
diff --git a/modules/video_coding/jitter_buffer.cc b/modules/video_coding/jitter_buffer.cc
index 6e688fd..5f45ece 100644
--- a/modules/video_coding/jitter_buffer.cc
+++ b/modules/video_coding/jitter_buffer.cc
@@ -761,9 +761,8 @@
} else {
incomplete_frames_.InsertFrame(frame);
// If NACKs are enabled, keyframes are triggered by |GetNackList|.
- if (nack_mode_ == kNoNack &&
- NonContinuousOrIncompleteDuration() >
- 90 * kMaxDiscontinuousFramesTime) {
+ if (nack_mode_ == kNoNack && NonContinuousOrIncompleteDuration() >
+ 90 * kMaxDiscontinuousFramesTime) {
return kFlushIndicator;
}
}
@@ -777,9 +776,8 @@
} else {
incomplete_frames_.InsertFrame(frame);
// If NACKs are enabled, keyframes are triggered by |GetNackList|.
- if (nack_mode_ == kNoNack &&
- NonContinuousOrIncompleteDuration() >
- 90 * kMaxDiscontinuousFramesTime) {
+ if (nack_mode_ == kNoNack && NonContinuousOrIncompleteDuration() >
+ 90 * kMaxDiscontinuousFramesTime) {
return kFlushIndicator;
}
}
diff --git a/modules/video_coding/jitter_buffer_unittest.cc b/modules/video_coding/jitter_buffer_unittest.cc
index a1447c0..71c17ea 100644
--- a/modules/video_coding/jitter_buffer_unittest.cc
+++ b/modules/video_coding/jitter_buffer_unittest.cc
@@ -214,8 +214,7 @@
clock_.reset(new SimulatedClock(0));
jitter_buffer_.reset(new VCMJitterBuffer(
clock_.get(),
- std::unique_ptr<EventWrapper>(event_factory_.CreateEvent()),
- this,
+ std::unique_ptr<EventWrapper>(event_factory_.CreateEvent()), this,
this));
jitter_buffer_->Start();
seq_num_ = 1234;
@@ -333,8 +332,8 @@
oldest_packet_to_nack_ = 250;
jitter_buffer_ = new VCMJitterBuffer(
clock_.get(),
- std::unique_ptr<EventWrapper>(event_factory_.CreateEvent()),
- this, this);
+ std::unique_ptr<EventWrapper>(event_factory_.CreateEvent()), this,
+ this);
stream_generator_ = new StreamGenerator(0, clock_->TimeInMilliseconds());
jitter_buffer_->Start();
jitter_buffer_->SetNackSettings(max_nack_list_size_, oldest_packet_to_nack_,
diff --git a/modules/video_coding/jitter_estimator.cc b/modules/video_coding/jitter_estimator.cc
index 41db158..5e754f1 100644
--- a/modules/video_coding/jitter_estimator.cc
+++ b/modules/video_coding/jitter_estimator.cc
@@ -139,10 +139,10 @@
// Update the variance anyway since we want to capture cases where we only
// get
// key frames.
- _varFrameSize = VCM_MAX(_phi * _varFrameSize +
- (1 - _phi) * (frameSizeBytes - avgFrameSize) *
- (frameSizeBytes - avgFrameSize),
- 1.0);
+ _varFrameSize = VCM_MAX(
+ _phi * _varFrameSize + (1 - _phi) * (frameSizeBytes - avgFrameSize) *
+ (frameSizeBytes - avgFrameSize),
+ 1.0);
}
// Update max frameSize estimate
diff --git a/modules/video_coding/media_opt_util.cc b/modules/video_coding/media_opt_util.cc
index e3083b0..aea35b0 100644
--- a/modules/video_coding/media_opt_util.cc
+++ b/modules/video_coding/media_opt_util.cc
@@ -19,8 +19,8 @@
#include "modules/include/module_common_types.h"
#include "modules/video_coding/codecs/vp8/include/vp8_common_types.h"
-#include "modules/video_coding/include/video_coding_defines.h"
#include "modules/video_coding/fec_rate_table.h"
+#include "modules/video_coding/include/video_coding_defines.h"
#include "modules/video_coding/nack_fec_tables.h"
namespace webrtc {
@@ -285,9 +285,8 @@
// Average number of packets per frame (source and fec):
const uint8_t avgTotPackets = static_cast<uint8_t>(
std::min(static_cast<float>(std::numeric_limits<uint8_t>::max()),
- 1.5f +
- static_cast<float>(bitRatePerFrame) * 1000.0f /
- static_cast<float>(8.0 * _maxPayloadSize)));
+ 1.5f + static_cast<float>(bitRatePerFrame) * 1000.0f /
+ static_cast<float>(8.0 * _maxPayloadSize)));
// FEC rate parameters: for P and I frame
uint8_t codeRateDelta = 0;
diff --git a/modules/video_coding/media_optimization.cc b/modules/video_coding/media_optimization.cc
index ea70f3f..5433edb 100644
--- a/modules/video_coding/media_optimization.cc
+++ b/modules/video_coding/media_optimization.cc
@@ -30,8 +30,7 @@
memset(incoming_frame_times_, -1, sizeof(incoming_frame_times_));
}
-MediaOptimization::~MediaOptimization(void) {
-}
+MediaOptimization::~MediaOptimization(void) {}
void MediaOptimization::Reset() {
rtc::CritScope lock(&crit_sect_);
diff --git a/modules/video_coding/nack_module.h b/modules/video_coding/nack_module.h
index 5640ea8..fc2f2a7 100644
--- a/modules/video_coding/nack_module.h
+++ b/modules/video_coding/nack_module.h
@@ -12,8 +12,8 @@
#define MODULES_VIDEO_CODING_NACK_MODULE_H_
#include <map>
-#include <vector>
#include <set>
+#include <vector>
#include "modules/include/module.h"
#include "modules/include/module_common_types.h"
diff --git a/modules/video_coding/rtp_frame_reference_finder.cc b/modules/video_coding/rtp_frame_reference_finder.cc
index 9401243..396935c 100644
--- a/modules/video_coding/rtp_frame_reference_finder.cc
+++ b/modules/video_coding/rtp_frame_reference_finder.cc
@@ -572,7 +572,7 @@
last_picture_id = Add<kPicIdLength>(last_picture_id, 1);
while (last_picture_id != picture_id) {
- gof_idx = (gof_idx + 1) % gof_size;
+ gof_idx = (gof_idx + 1) % gof_size;
RTC_CHECK(gof_idx < kMaxVp9FramesInGof);
size_t temporal_idx = info->gof->temporal_idx[gof_idx];
diff --git a/modules/video_coding/rtp_frame_reference_finder.h b/modules/video_coding/rtp_frame_reference_finder.h
index 00e638d..d7d1c12 100644
--- a/modules/video_coding/rtp_frame_reference_finder.h
+++ b/modules/video_coding/rtp_frame_reference_finder.h
@@ -12,9 +12,9 @@
#define MODULES_VIDEO_CODING_RTP_FRAME_REFERENCE_FINDER_H_
#include <array>
+#include <deque>
#include <map>
#include <memory>
-#include <deque>
#include <set>
#include <utility>
diff --git a/modules/video_coding/test/test_util.h b/modules/video_coding/test/test_util.h
index 404e49e..a38fc58 100644
--- a/modules/video_coding/test/test_util.h
+++ b/modules/video_coding/test/test_util.h
@@ -18,6 +18,7 @@
virtual ~NullEventFactory() {}
webrtc::EventWrapper* CreateEvent() override { return new NullEvent; }
+
private:
// Private class to avoid more dependencies on it in tests.
class NullEvent : public webrtc::EventWrapper {
diff --git a/modules/video_coding/utility/ivf_file_writer.cc b/modules/video_coding/utility/ivf_file_writer.cc
index 4b2cf3d..d9342f6 100644
--- a/modules/video_coding/utility/ivf_file_writer.cc
+++ b/modules/video_coding/utility/ivf_file_writer.cc
@@ -122,8 +122,7 @@
if (!WriteHeader())
return false;
- const char* codec_name =
- CodecTypeToPayloadString(codec_type_);
+ const char* codec_name = CodecTypeToPayloadString(codec_type_);
RTC_LOG(LS_WARNING) << "Created IVF file for codec data of type "
<< codec_name << " at resolution " << width_ << " x "
<< height_ << ", using "
diff --git a/modules/video_coding/utility/quality_scaler_unittest.cc b/modules/video_coding/utility/quality_scaler_unittest.cc
index 58a381c..b17062d 100644
--- a/modules/video_coding/utility/quality_scaler_unittest.cc
+++ b/modules/video_coding/utility/quality_scaler_unittest.cc
@@ -28,16 +28,16 @@
static const size_t kDefaultTimeoutMs = 150;
} // namespace
-#define DO_SYNC(q, block) do { \
- rtc::Event event(false, false); \
- q->PostTask([this, &event] { \
- block; \
- event.Set(); \
- }); \
- RTC_CHECK(event.Wait(1000)); \
+#define DO_SYNC(q, block) \
+ do { \
+ rtc::Event event(false, false); \
+ q->PostTask([this, &event] { \
+ block; \
+ event.Set(); \
+ }); \
+ RTC_CHECK(event.Wait(1000)); \
} while (0)
-
class MockAdaptationObserver : public AdaptationObserverInterface {
public:
MockAdaptationObserver() : event(false, false) {}
@@ -87,7 +87,7 @@
}
~QualityScalerTest() {
- DO_SYNC(q_, {qs_.reset(nullptr);});
+ DO_SYNC(q_, { qs_.reset(nullptr); });
}
void TriggerScale(ScaleDirection scale_direction) {
diff --git a/modules/video_coding/utility/simulcast_rate_allocator_unittest.cc b/modules/video_coding/utility/simulcast_rate_allocator_unittest.cc
index f8c83d3..345fdcb 100644
--- a/modules/video_coding/utility/simulcast_rate_allocator_unittest.cc
+++ b/modules/video_coding/utility/simulcast_rate_allocator_unittest.cc
@@ -75,8 +75,8 @@
if (layer_bitrate == 0) {
EXPECT_FALSE(actual.IsSpatialLayerUsed(i));
}
- EXPECT_EQ(expected[i] * 1000U, layer_bitrate) << "Mismatch at index "
- << i;
+ EXPECT_EQ(expected[i] * 1000U, layer_bitrate)
+ << "Mismatch at index " << i;
sum += layer_bitrate;
}
EXPECT_EQ(sum, actual.get_sum_bps());
diff --git a/modules/video_coding/video_codec_initializer.cc b/modules/video_coding/video_codec_initializer.cc
index 8be04cd..7ef6cfe 100644
--- a/modules/video_coding/video_codec_initializer.cc
+++ b/modules/video_coding/video_codec_initializer.cc
@@ -33,8 +33,7 @@
if (config.codec_type == kVideoCodecMultiplex) {
VideoEncoderConfig associated_config = config.Copy();
associated_config.codec_type = kVideoCodecVP9;
- if (!SetupCodec(associated_config, streams, codec,
- bitrate_allocator)) {
+ if (!SetupCodec(associated_config, streams, codec, bitrate_allocator)) {
RTC_LOG(LS_ERROR) << "Failed to create stereo encoder configuration.";
return false;
}
@@ -42,8 +41,7 @@
return true;
}
- *codec =
- VideoEncoderConfigToVideoCodec(config, streams);
+ *codec = VideoEncoderConfigToVideoCodec(config, streams);
*bitrate_allocator = CreateBitrateAllocator(*codec);
return true;
diff --git a/modules/video_coding/video_codec_initializer_unittest.cc b/modules/video_coding/video_codec_initializer_unittest.cc
index 104b149..4edf31a 100644
--- a/modules/video_coding/video_codec_initializer_unittest.cc
+++ b/modules/video_coding/video_codec_initializer_unittest.cc
@@ -76,8 +76,7 @@
codec_out_ = VideoCodec();
bitrate_allocator_out_.reset();
temporal_layers_.clear();
- if (!VideoCodecInitializer::SetupCodec(config_, streams_,
- &codec_out_,
+ if (!VideoCodecInitializer::SetupCodec(config_, streams_, &codec_out_,
&bitrate_allocator_out_)) {
return false;
}
diff --git a/modules/video_coding/video_coding_impl.cc b/modules/video_coding/video_coding_impl.cc
index a0a3a7f..1127b0f 100644
--- a/modules/video_coding/video_coding_impl.cc
+++ b/modules/video_coding/video_coding_impl.cc
@@ -100,9 +100,7 @@
return receiver_time;
}
- void Process() override {
- receiver_.Process();
- }
+ void Process() override { receiver_.Process(); }
int32_t RegisterSendCodec(const VideoCodec* sendCodec,
uint32_t numberOfCores,
@@ -125,8 +123,7 @@
int32_t RegisterExternalEncoder(VideoEncoder* externalEncoder,
uint8_t /* payloadType */,
bool internalSource) override {
- sender_.RegisterExternalEncoder(externalEncoder,
- internalSource);
+ sender_.RegisterExternalEncoder(externalEncoder, internalSource);
return 0;
}
diff --git a/modules/video_coding/video_coding_impl.h b/modules/video_coding/video_coding_impl.h
index 4f96ad9..0f3903f 100644
--- a/modules/video_coding/video_coding_impl.h
+++ b/modules/video_coding/video_coding_impl.h
@@ -63,8 +63,7 @@
public:
typedef VideoCodingModule::SenderNackMode SenderNackMode;
- VideoSender(Clock* clock,
- EncodedImageCallback* post_encode_callback);
+ VideoSender(Clock* clock, EncodedImageCallback* post_encode_callback);
~VideoSender();
diff --git a/modules/video_coding/video_receiver.cc b/modules/video_coding/video_receiver.cc
index 25b2908..2ab6987 100644
--- a/modules/video_coding/video_receiver.cc
+++ b/modules/video_coding/video_receiver.cc
@@ -156,8 +156,7 @@
case kProtectionNackFEC: {
RTC_DCHECK(enable);
- _receiver.SetNackMode(kNack,
- media_optimization::kLowRttNackMs,
+ _receiver.SetNackMode(kNack, media_optimization::kLowRttNackMs,
media_optimization::kMaxRttDelayThreshold);
_receiver.SetDecodeErrorMode(kNoErrors);
break;
diff --git a/modules/video_coding/video_receiver_unittest.cc b/modules/video_coding/video_receiver_unittest.cc
index feca48e..2855f7a 100644
--- a/modules/video_coding/video_receiver_unittest.cc
+++ b/modules/video_coding/video_receiver_unittest.cc
@@ -76,7 +76,7 @@
EXPECT_EQ(0, receiver_->IncomingPacket(payload, length, *header));
++header->header.sequenceNumber;
EXPECT_CALL(packet_request_callback_, ResendPackets(_, _)).Times(0);
- receiver_->Process();;
+ receiver_->Process();
EXPECT_CALL(decoder_, Decode(_, _, _, _)).Times(1);
EXPECT_EQ(0, receiver_->Decode(100));
}
diff --git a/modules/video_coding/video_sender.cc b/modules/video_coding/video_sender.cc
index ec24a97..f10822d 100644
--- a/modules/video_coding/video_sender.cc
+++ b/modules/video_coding/video_sender.cc
@@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#include <algorithm> // std::max
#include "common_types.h" // NOLINT(build/include)
@@ -137,8 +136,7 @@
}
return;
}
- _codecDataBase.RegisterExternalEncoder(externalEncoder,
- internalSource);
+ _codecDataBase.RegisterExternalEncoder(externalEncoder, internalSource);
}
EncoderParameters VideoSender::UpdateEncoderParameters(
@@ -291,8 +289,7 @@
RTC_LOG(LS_ERROR) << "Frame conversion failed, dropping frame.";
return VCM_PARAMETER_ERROR;
}
- converted_frame = VideoFrame(converted_buffer,
- converted_frame.timestamp(),
+ converted_frame = VideoFrame(converted_buffer, converted_frame.timestamp(),
converted_frame.render_time_ms(),
converted_frame.rotation());
}
diff --git a/modules/video_coding/video_sender_unittest.cc b/modules/video_coding/video_sender_unittest.cc
index 877eb99..7321a08 100644
--- a/modules/video_coding/video_sender_unittest.cc
+++ b/modules/video_coding/video_sender_unittest.cc
@@ -19,8 +19,8 @@
#include "modules/video_coding/include/mock/mock_vcm_callbacks.h"
#include "modules/video_coding/include/mock/mock_video_codec_interface.h"
#include "modules/video_coding/include/video_coding.h"
-#include "modules/video_coding/video_coding_impl.h"
#include "modules/video_coding/utility/default_video_bitrate_allocator.h"
+#include "modules/video_coding/video_coding_impl.h"
#include "system_wrappers/include/clock.h"
#include "test/frame_generator.h"
#include "test/gtest.h"
@@ -228,17 +228,16 @@
ExpectEncodeWithFrameTypes(stream, false);
}
- void ExpectInitialKeyFrames() {
- ExpectEncodeWithFrameTypes(-1, true);
- }
+ void ExpectInitialKeyFrames() { ExpectEncodeWithFrameTypes(-1, true); }
void ExpectEncodeWithFrameTypes(int intra_request_stream, bool first_frame) {
if (intra_request_stream == -1) {
// No intra request expected, keyframes on first frame.
FrameType frame_type = first_frame ? kVideoFrameKey : kVideoFrameDelta;
- EXPECT_CALL(encoder_,
- Encode(_, _, Pointee(ElementsAre(frame_type, frame_type,
- frame_type))))
+ EXPECT_CALL(
+ encoder_,
+ Encode(_, _,
+ Pointee(ElementsAre(frame_type, frame_type, frame_type))))
.Times(1)
.WillRepeatedly(Return(0));
return;
@@ -248,9 +247,10 @@
ASSERT_LT(intra_request_stream, kNumberOfStreams);
std::vector<FrameType> frame_types(kNumberOfStreams, kVideoFrameDelta);
frame_types[intra_request_stream] = kVideoFrameKey;
- EXPECT_CALL(encoder_,
- Encode(_, _, Pointee(ElementsAreArray(&frame_types[0],
- frame_types.size()))))
+ EXPECT_CALL(
+ encoder_,
+ Encode(_, _,
+ Pointee(ElementsAreArray(&frame_types[0], frame_types.size()))))
.Times(1)
.WillRepeatedly(Return(0));
}