Rename EncodedImage::_length --> size_, and make private.

Use size() accessor function. Also replace most nearby uses of _buffer
with data().

Bug: webrtc:9378
Change-Id: I1ac3459612f7c6151bd057d05448da1c4e1c6e3d
Reviewed-on: https://webrtc-review.googlesource.com/c/116783
Commit-Queue: Niels Moller <nisse@webrtc.org>
Reviewed-by: Karl Wiberg <kwiberg@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#26273}
diff --git a/modules/video_coding/codecs/h264/h264_decoder_impl.cc b/modules/video_coding/codecs/h264/h264_decoder_impl.cc
index 89155f2..298485a 100644
--- a/modules/video_coding/codecs/h264/h264_decoder_impl.cc
+++ b/modules/video_coding/codecs/h264/h264_decoder_impl.cc
@@ -241,7 +241,7 @@
     ReportError();
     return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
   }
-  if (!input_image._buffer || !input_image._length) {
+  if (!input_image.data() || !input_image.size()) {
     ReportError();
     return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
   }
@@ -254,24 +254,23 @@
   // FFmpeg requires padding due to some optimized bitstream readers reading 32
   // or 64 bits at once and could read over the end. See avcodec_decode_video2.
   RTC_CHECK_GE(input_image.capacity(),
-               input_image._length +
+               input_image.size() +
                    EncodedImage::GetBufferPaddingBytes(kVideoCodecH264));
   // "If the first 23 bits of the additional bytes are not 0, then damaged MPEG
   // bitstreams could cause overread and segfault." See
   // AV_INPUT_BUFFER_PADDING_SIZE. We'll zero the entire padding just in case.
-  memset(input_image._buffer + input_image._length,
-         0,
+  memset(input_image._buffer + input_image.size(), 0,
          EncodedImage::GetBufferPaddingBytes(kVideoCodecH264));
 
   AVPacket packet;
   av_init_packet(&packet);
   packet.data = input_image._buffer;
-  if (input_image._length >
+  if (input_image.size() >
       static_cast<size_t>(std::numeric_limits<int>::max())) {
     ReportError();
     return WEBRTC_VIDEO_CODEC_ERROR;
   }
-  packet.size = static_cast<int>(input_image._length);
+  packet.size = static_cast<int>(input_image.size());
   int64_t frame_timestamp_us = input_image.ntp_time_ms_ * 1000;  // ms -> μs
   av_context_->reordered_opaque = frame_timestamp_us;
 
@@ -318,8 +317,7 @@
 
   absl::optional<uint8_t> qp;
   // TODO(sakal): Maybe it is possible to get QP directly from FFmpeg.
-  h264_bitstream_parser_.ParseBitstream(input_image._buffer,
-                                        input_image._length);
+  h264_bitstream_parser_.ParseBitstream(input_image.data(), input_image.size());
   int qp_int;
   if (h264_bitstream_parser_.GetLastSliceQp(&qp_int)) {
     qp.emplace(qp_int);
diff --git a/modules/video_coding/codecs/h264/h264_encoder_impl.cc b/modules/video_coding/codecs/h264/h264_encoder_impl.cc
index 05b26fe..3eee819b 100644
--- a/modules/video_coding/codecs/h264/h264_encoder_impl.cc
+++ b/modules/video_coding/codecs/h264/h264_encoder_impl.cc
@@ -135,7 +135,7 @@
   const uint8_t start_code[4] = {0, 0, 0, 1};
   frag_header->VerifyAndAllocateFragmentationHeader(fragments_count);
   size_t frag = 0;
-  encoded_image->_length = 0;
+  encoded_image->set_size(0);
   for (int layer = 0; layer < info->iLayerNum; ++layer) {
     const SLayerBSInfo& layerInfo = info->sLayerInfo[layer];
     // Iterate NAL units making up this layer, noting fragments.
@@ -149,15 +149,15 @@
       RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len + 2], start_code[2]);
       RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len + 3], start_code[3]);
       frag_header->fragmentationOffset[frag] =
-          encoded_image->_length + layer_len + sizeof(start_code);
+          encoded_image->size() + layer_len + sizeof(start_code);
       frag_header->fragmentationLength[frag] =
           layerInfo.pNalLengthInByte[nal] - sizeof(start_code);
       layer_len += layerInfo.pNalLengthInByte[nal];
     }
     // Copy the entire layer's data (including start codes).
-    memcpy(encoded_image->_buffer + encoded_image->_length, layerInfo.pBsBuf,
+    memcpy(encoded_image->data() + encoded_image->size(), layerInfo.pBsBuf,
            layer_len);
-    encoded_image->_length += layer_len;
+    encoded_image->set_size(encoded_image->size() + layer_len);
   }
 }
 
@@ -308,7 +308,7 @@
     encoded_images_[i]._completeFrame = true;
     encoded_images_[i]._encodedWidth = codec_.simulcastStream[idx].width;
     encoded_images_[i]._encodedHeight = codec_.simulcastStream[idx].height;
-    encoded_images_[i]._length = 0;
+    encoded_images_[i].set_size(0);
   }
 
   SimulcastRateAllocator init_allocator(codec_);
@@ -519,10 +519,10 @@
 
     // Encoder can skip frames to save bandwidth in which case
     // |encoded_images_[i]._length| == 0.
-    if (encoded_images_[i]._length > 0) {
+    if (encoded_images_[i].size() > 0) {
       // Parse QP.
-      h264_bitstream_parser_.ParseBitstream(encoded_images_[i]._buffer,
-                                            encoded_images_[i]._length);
+      h264_bitstream_parser_.ParseBitstream(encoded_images_[i].data(),
+                                            encoded_images_[i].size());
       h264_bitstream_parser_.GetLastSliceQp(&encoded_images_[i].qp_);
 
       // Deliver encoded image.
diff --git a/modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.cc b/modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.cc
index 65146d7..03f3621 100644
--- a/modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.cc
+++ b/modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.cc
@@ -128,7 +128,7 @@
 }
 
 void PackBitstream(uint8_t* buffer, MultiplexImageComponent image) {
-  memcpy(buffer, image.encoded_image._buffer, image.encoded_image._length);
+  memcpy(buffer, image.encoded_image.data(), image.encoded_image.size());
 }
 
 MultiplexImage::MultiplexImage(uint16_t picture_index,
@@ -170,7 +170,7 @@
     const size_t padding =
         EncodedImage::GetBufferPaddingBytes(images[i].codec_type);
     frame_header.bitstream_length =
-        static_cast<uint32_t>(images[i].encoded_image._length + padding);
+        static_cast<uint32_t>(images[i].encoded_image.size() + padding);
     bitstream_offset += frame_header.bitstream_length;
 
     frame_header.codec_type = images[i].codec_type;
@@ -188,9 +188,8 @@
     frame_headers.push_back(frame_header);
   }
 
-  combined_image._length = bitstream_offset;
-  combined_image.set_buffer(new uint8_t[combined_image._length],
-                            combined_image._length);
+  combined_image.set_buffer(new uint8_t[bitstream_offset], bitstream_offset);
+  combined_image.set_size(bitstream_offset);
 
   // header
   header_offset = PackHeader(combined_image._buffer, header);
@@ -268,7 +267,7 @@
         static_cast<size_t>(frame_headers[i].bitstream_length));
     const size_t padding =
         EncodedImage::GetBufferPaddingBytes(image_component.codec_type);
-    encoded_image._length = encoded_image.capacity() - padding;
+    encoded_image.set_size(encoded_image.capacity() - padding);
 
     image_component.encoded_image = encoded_image;
 
diff --git a/modules/video_coding/codecs/multiplex/multiplex_encoder_adapter.cc b/modules/video_coding/codecs/multiplex/multiplex_encoder_adapter.cc
index 450cc4b..158efd3 100644
--- a/modules/video_coding/codecs/multiplex/multiplex_encoder_adapter.cc
+++ b/modules/video_coding/codecs/multiplex/multiplex_encoder_adapter.cc
@@ -275,9 +275,11 @@
   image_component.codec_type =
       PayloadStringToCodecType(associated_format_.name);
   image_component.encoded_image = encodedImage;
-  image_component.encoded_image._buffer = new uint8_t[encodedImage._length];
-  std::memcpy(image_component.encoded_image._buffer, encodedImage._buffer,
-              encodedImage._length);
+  image_component.encoded_image.set_buffer(new uint8_t[encodedImage.size()],
+                                           encodedImage.size());
+  image_component.encoded_image.set_size(encodedImage.size());
+  std::memcpy(image_component.encoded_image.data(), encodedImage.data(),
+              encodedImage.size());
 
   rtc::CritScope cs(&crit_);
   const auto& stashed_image_itr =
diff --git a/modules/video_coding/codecs/test/videocodec_test_fixture_impl.cc b/modules/video_coding/codecs/test/videocodec_test_fixture_impl.cc
index de3dad3..88e11e9 100644
--- a/modules/video_coding/codecs/test/videocodec_test_fixture_impl.cc
+++ b/modules/video_coding/codecs/test/videocodec_test_fixture_impl.cc
@@ -320,8 +320,7 @@
   bool contains_pps = false;
   bool contains_idr = false;
   const std::vector<webrtc::H264::NaluIndex> nalu_indices =
-      webrtc::H264::FindNaluIndices(encoded_frame._buffer,
-                                    encoded_frame._length);
+      webrtc::H264::FindNaluIndices(encoded_frame.data(), encoded_frame.size());
   for (const webrtc::H264::NaluIndex& index : nalu_indices) {
     webrtc::H264::NaluType nalu_type = webrtc::H264::ParseNaluType(
         encoded_frame._buffer[index.payload_start_offset]);
diff --git a/modules/video_coding/codecs/test/videocodec_test_libvpx.cc b/modules/video_coding/codecs/test/videocodec_test_libvpx.cc
index 4f63aa9..ae079c6 100644
--- a/modules/video_coding/codecs/test/videocodec_test_libvpx.cc
+++ b/modules/video_coding/codecs/test/videocodec_test_libvpx.cc
@@ -45,11 +45,9 @@
                          const EncodedImage& encoded_frame) const override {
     int qp;
     if (codec == kVideoCodecVP8) {
-      EXPECT_TRUE(
-          vp8::GetQp(encoded_frame._buffer, encoded_frame._length, &qp));
+      EXPECT_TRUE(vp8::GetQp(encoded_frame.data(), encoded_frame.size(), &qp));
     } else if (codec == kVideoCodecVP9) {
-      EXPECT_TRUE(
-          vp9::GetQp(encoded_frame._buffer, encoded_frame._length, &qp));
+      EXPECT_TRUE(vp9::GetQp(encoded_frame.data(), encoded_frame.size(), &qp));
     } else {
       RTC_NOTREACHED();
     }
diff --git a/modules/video_coding/codecs/test/videoprocessor.cc b/modules/video_coding/codecs/test/videoprocessor.cc
index 85e7173..706df79 100644
--- a/modules/video_coding/codecs/test/videoprocessor.cc
+++ b/modules/video_coding/codecs/test/videoprocessor.cc
@@ -51,8 +51,7 @@
     return 0;
 
   std::vector<webrtc::H264::NaluIndex> nalu_indices =
-      webrtc::H264::FindNaluIndices(encoded_frame._buffer,
-                                    encoded_frame._length);
+      webrtc::H264::FindNaluIndices(encoded_frame.data(), encoded_frame.size());
 
   RTC_CHECK(!nalu_indices.empty());
 
@@ -392,7 +391,7 @@
       frame_stat->encode_start_ns, encode_stop_ns - post_encode_time_ns_);
   frame_stat->target_bitrate_kbps =
       bitrate_allocation_.GetTemporalLayerSum(spatial_idx, temporal_idx) / 1000;
-  frame_stat->length_bytes = encoded_image._length;
+  frame_stat->length_bytes = encoded_image.size();
   frame_stat->frame_type = encoded_image._frameType;
   frame_stat->temporal_idx = temporal_idx;
   frame_stat->max_nalu_size_bytes = GetMaxNaluSizeBytes(encoded_image, config_);
@@ -554,7 +553,7 @@
   RTC_CHECK_GT(config_.NumberOfSpatialLayers(), 1);
 
   EncodedImage base_image;
-  RTC_CHECK_EQ(base_image._length, 0);
+  RTC_CHECK_EQ(base_image.size(), 0);
 
   // Each SVC layer is decoded with dedicated decoder. Find the nearest
   // non-dropped base frame and merge it and current frame into superframe.
@@ -568,29 +567,29 @@
       }
     }
   }
-  const size_t payload_size_bytes = base_image._length + encoded_image._length;
+  const size_t payload_size_bytes = base_image.size() + encoded_image.size();
   const size_t buffer_size_bytes =
       payload_size_bytes + EncodedImage::GetBufferPaddingBytes(codec);
 
   uint8_t* copied_buffer = new uint8_t[buffer_size_bytes];
   RTC_CHECK(copied_buffer);
 
-  if (base_image._length) {
+  if (base_image.size()) {
     RTC_CHECK(base_image._buffer);
-    memcpy(copied_buffer, base_image._buffer, base_image._length);
+    memcpy(copied_buffer, base_image.data(), base_image.size());
   }
-  memcpy(copied_buffer + base_image._length, encoded_image._buffer,
-         encoded_image._length);
+  memcpy(copied_buffer + base_image.size(), encoded_image.data(),
+         encoded_image.size());
 
   EncodedImage copied_image = encoded_image;
   copied_image = encoded_image;
   copied_image.set_buffer(copied_buffer, buffer_size_bytes);
-  copied_image._length = payload_size_bytes;
+  copied_image.set_size(payload_size_bytes);
 
   // Replace previous EncodedImage for this spatial layer.
-  uint8_t* old_buffer = merged_encoded_frames_.at(spatial_idx)._buffer;
-  if (old_buffer) {
-    delete[] old_buffer;
+  uint8_t* old_data = merged_encoded_frames_.at(spatial_idx).data();
+  if (old_data) {
+    delete[] old_data;
   }
   merged_encoded_frames_.at(spatial_idx) = copied_image;
 
diff --git a/modules/video_coding/codecs/test/videoprocessor.h b/modules/video_coding/codecs/test/videoprocessor.h
index aaf9ccf..943ca87 100644
--- a/modules/video_coding/codecs/test/videoprocessor.h
+++ b/modules/video_coding/codecs/test/videoprocessor.h
@@ -109,7 +109,7 @@
                          const webrtc::EncodedImage& encoded_image,
                          const webrtc::CodecSpecificInfo* codec_specific_info)
           : video_processor_(video_processor),
-            buffer_(encoded_image._buffer, encoded_image._length),
+            buffer_(encoded_image._buffer, encoded_image.size()),
             encoded_image_(encoded_image),
             codec_specific_info_(*codec_specific_info) {
         encoded_image_._buffer = buffer_.data();
diff --git a/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc b/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc
index 7b95d2b..158f71b 100644
--- a/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc
+++ b/modules/video_coding/codecs/vp8/libvpx_vp8_decoder.cc
@@ -161,7 +161,7 @@
   if (decode_complete_callback_ == NULL) {
     return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
   }
-  if (input_image._buffer == NULL && input_image._length > 0) {
+  if (input_image.data() == NULL && input_image.size() > 0) {
     // Reset to avoid requesting key frames too often.
     if (propagation_cnt_ > 0)
       propagation_cnt_ = 0;
@@ -249,10 +249,10 @@
   }
 
   uint8_t* buffer = input_image._buffer;
-  if (input_image._length == 0) {
+  if (input_image.size() == 0) {
     buffer = NULL;  // Triggers full frame concealment.
   }
-  if (vpx_codec_decode(decoder_, buffer, input_image._length, 0,
+  if (vpx_codec_decode(decoder_, buffer, input_image.size(), 0,
                        kDecodeDeadlineRealtime)) {
     // Reset to avoid requesting key frames too often.
     if (propagation_cnt_ > 0) {
diff --git a/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc b/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc
index 9f2cb8e..57a362c 100644
--- a/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc
+++ b/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc
@@ -861,7 +861,7 @@
   int qp = 0;
   vpx_codec_control(&encoders_[encoder_idx], VP8E_GET_LAST_QUANTIZER_64, &qp);
   temporal_layers_[stream_idx]->OnEncodeDone(
-      timestamp, encoded_images_[encoder_idx]._length,
+      timestamp, encoded_images_[encoder_idx].size(),
       (pkt.data.frame.flags & VPX_FRAME_IS_KEY) != 0, qp, vp8Info);
 }
 
@@ -871,7 +871,7 @@
   for (size_t encoder_idx = 0; encoder_idx < encoders_.size();
        ++encoder_idx, --stream_idx) {
     vpx_codec_iter_t iter = NULL;
-    encoded_images_[encoder_idx]._length = 0;
+    encoded_images_[encoder_idx].set_size(0);
     encoded_images_[encoder_idx]._frameType = kVideoFrameDelta;
     CodecSpecificInfo codec_specific;
     const vpx_codec_cx_pkt_t* pkt = NULL;
@@ -879,7 +879,7 @@
            NULL) {
       switch (pkt->kind) {
         case VPX_CODEC_CX_FRAME_PKT: {
-          size_t length = encoded_images_[encoder_idx]._length;
+          size_t length = encoded_images_[encoder_idx].size();
           if (pkt->data.frame.sz + length >
               encoded_images_[encoder_idx].capacity()) {
             uint8_t* buffer = new uint8_t[pkt->data.frame.sz + length];
@@ -890,8 +890,8 @@
           }
           memcpy(&encoded_images_[encoder_idx]._buffer[length],
                  pkt->data.frame.buf, pkt->data.frame.sz);
-          encoded_images_[encoder_idx]._length += pkt->data.frame.sz;
-          assert(length <= encoded_images_[encoder_idx].capacity());
+          encoded_images_[encoder_idx].set_size(
+              encoded_images_[encoder_idx].size() + pkt->data.frame.sz);
           break;
         }
         default:
@@ -921,9 +921,9 @@
     encoded_images_[encoder_idx].SetColorSpace(input_image.color_space());
 
     if (send_stream_[stream_idx]) {
-      if (encoded_images_[encoder_idx]._length > 0) {
+      if (encoded_images_[encoder_idx].size() > 0) {
         TRACE_COUNTER_ID1("webrtc", "EncodedFrameSize", encoder_idx,
-                          encoded_images_[encoder_idx]._length);
+                          encoded_images_[encoder_idx].size());
         encoded_images_[encoder_idx]._encodedHeight =
             codec_.simulcastStream[stream_idx].height;
         encoded_images_[encoder_idx]._encodedWidth =
@@ -937,7 +937,7 @@
       } else if (!temporal_layers_[stream_idx]
                       ->SupportsEncoderFrameDropping()) {
         result = WEBRTC_VIDEO_CODEC_TARGET_BITRATE_OVERSHOOT;
-        if (encoded_images_[encoder_idx]._length == 0) {
+        if (encoded_images_[encoder_idx].size() == 0) {
           // Dropped frame that will be re-encoded.
           temporal_layers_[stream_idx]->OnEncodeDone(input_image.timestamp(), 0,
                                                      false, 0, nullptr);
diff --git a/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc b/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc
index ccc294c..12ffcb5 100644
--- a/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc
+++ b/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc
@@ -98,8 +98,8 @@
 
   void VerifyQpParser(const EncodedImage& encoded_frame) const {
     int qp;
-    EXPECT_GT(encoded_frame._length, 0u);
-    ASSERT_TRUE(vp8::GetQp(encoded_frame._buffer, encoded_frame._length, &qp));
+    EXPECT_GT(encoded_frame.size(), 0u);
+    ASSERT_TRUE(vp8::GetQp(encoded_frame.data(), encoded_frame.size(), &qp));
     EXPECT_EQ(encoded_frame.qp_, qp) << "Encoder QP != parsed bitstream QP.";
   }
 };
diff --git a/modules/video_coding/codecs/vp9/test/vp9_impl_unittest.cc b/modules/video_coding/codecs/vp9/test/vp9_impl_unittest.cc
index 085af97..98e0452 100644
--- a/modules/video_coding/codecs/vp9/test/vp9_impl_unittest.cc
+++ b/modules/video_coding/codecs/vp9/test/vp9_impl_unittest.cc
@@ -243,7 +243,7 @@
   ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
 
   int qp = 0;
-  ASSERT_TRUE(vp9::GetQp(encoded_frame._buffer, encoded_frame._length, &qp));
+  ASSERT_TRUE(vp9::GetQp(encoded_frame.data(), encoded_frame.size(), &qp));
   EXPECT_EQ(encoded_frame.qp_, qp);
 }
 
diff --git a/modules/video_coding/codecs/vp9/vp9_impl.cc b/modules/video_coding/codecs/vp9/vp9_impl.cc
index 6f3e979..3b92738 100644
--- a/modules/video_coding/codecs/vp9/vp9_impl.cc
+++ b/modules/video_coding/codecs/vp9/vp9_impl.cc
@@ -1263,7 +1263,7 @@
                               pkt->data.frame.sz);
   }
   memcpy(encoded_image_._buffer, pkt->data.frame.buf, pkt->data.frame.sz);
-  encoded_image_._length = pkt->data.frame.sz;
+  encoded_image_.set_size(pkt->data.frame.sz);
 
   const bool is_key_frame =
       (pkt->data.frame.flags & VPX_FRAME_IS_KEY) ? true : false;
@@ -1276,7 +1276,7 @@
     encoded_image_._frameType = kVideoFrameKey;
     force_key_frame_ = false;
   }
-  RTC_DCHECK_LE(encoded_image_._length, encoded_image_.capacity());
+  RTC_DCHECK_LE(encoded_image_.size(), encoded_image_.capacity());
 
   memset(&codec_specific_, 0, sizeof(codec_specific_));
   absl::optional<int> spatial_index;
@@ -1288,7 +1288,7 @@
     UpdateReferenceBuffers(*pkt, pics_since_key_);
   }
 
-  TRACE_COUNTER1("webrtc", "EncodedFrameSize", encoded_image_._length);
+  TRACE_COUNTER1("webrtc", "EncodedFrameSize", encoded_image_.size());
   encoded_image_.SetTimestamp(input_image_->timestamp());
   encoded_image_.capture_time_ms_ = input_image_->render_time_ms();
   encoded_image_.rotation_ = input_image_->rotation();
@@ -1315,7 +1315,7 @@
 }
 
 void VP9EncoderImpl::DeliverBufferedFrame(bool end_of_picture) {
-  if (encoded_image_._length > 0) {
+  if (encoded_image_.size() > 0) {
     codec_specific_.codecSpecific.VP9.end_of_picture = end_of_picture;
 
     // No data partitioning in VP9, so 1 partition only.
@@ -1323,13 +1323,13 @@
     RTPFragmentationHeader frag_info;
     frag_info.VerifyAndAllocateFragmentationHeader(1);
     frag_info.fragmentationOffset[part_idx] = 0;
-    frag_info.fragmentationLength[part_idx] = encoded_image_._length;
+    frag_info.fragmentationLength[part_idx] = encoded_image_.size();
     frag_info.fragmentationPlType[part_idx] = 0;
     frag_info.fragmentationTimeDiff[part_idx] = 0;
 
     encoded_complete_callback_->OnEncodedImage(encoded_image_, &codec_specific_,
                                                &frag_info);
-    encoded_image_._length = 0;
+    encoded_image_.set_size(0);
 
     if (codec_.mode == VideoCodecMode::kScreensharing) {
       const uint8_t spatial_idx = encoded_image_.SpatialIndex().value_or(0);
@@ -1432,13 +1432,13 @@
   vpx_codec_iter_t iter = nullptr;
   vpx_image_t* img;
   uint8_t* buffer = input_image._buffer;
-  if (input_image._length == 0) {
+  if (input_image.size() == 0) {
     buffer = nullptr;  // Triggers full frame concealment.
   }
   // During decode libvpx may get and release buffers from |frame_buffer_pool_|.
   // In practice libvpx keeps a few (~3-4) buffers alive at a time.
   if (vpx_codec_decode(decoder_, buffer,
-                       static_cast<unsigned int>(input_image._length), 0,
+                       static_cast<unsigned int>(input_image.size()), 0,
                        VPX_DL_REALTIME)) {
     return WEBRTC_VIDEO_CODEC_ERROR;
   }
diff --git a/modules/video_coding/encoded_frame.cc b/modules/video_coding/encoded_frame.cc
index e19146d..3c677f7 100644
--- a/modules/video_coding/encoded_frame.cc
+++ b/modules/video_coding/encoded_frame.cc
@@ -52,7 +52,7 @@
   _encodedHeight = 0;
   _completeFrame = false;
   _missingFrame = false;
-  _length = 0;
+  set_size(0);
   _codecSpecificInfo.codecType = kVideoCodecGeneric;
   _codec = kVideoCodecGeneric;
   rotation_ = kVideoRotation_0;
diff --git a/modules/video_coding/frame_buffer.cc b/modules/video_coding/frame_buffer.cc
index 9be2ef0..908a2a2 100644
--- a/modules/video_coding/frame_buffer.cc
+++ b/modules/video_coding/frame_buffer.cc
@@ -138,8 +138,8 @@
   } else if (retVal == -3) {
     return kOutOfBoundsPacket;
   }
-  // update length
-  _length = size() + static_cast<uint32_t>(retVal);
+  // update size
+  set_size(size() + static_cast<uint32_t>(retVal));
 
   _latestPacketTimeMs = timeInMs;
 
@@ -216,7 +216,7 @@
 
 void VCMFrameBuffer::Reset() {
   TRACE_EVENT0("webrtc", "VCMFrameBuffer::Reset");
-  _length = 0;
+  set_size(0);
   _sessionInfo.Reset();
   _payloadType = 0;
   _nackCount = 0;
@@ -265,7 +265,7 @@
 void VCMFrameBuffer::PrepareForDecode(bool continuous) {
   TRACE_EVENT0("webrtc", "VCMFrameBuffer::PrepareForDecode");
   size_t bytes_removed = _sessionInfo.MakeDecodable();
-  _length -= bytes_removed;
+  set_size(size() - bytes_removed);
   // Transfer frame information to EncodedFrame and create any codec
   // specific information.
   _frameType = _sessionInfo.FrameType();
diff --git a/modules/video_coding/frame_buffer2_unittest.cc b/modules/video_coding/frame_buffer2_unittest.cc
index 321281f..94a5551 100644
--- a/modules/video_coding/frame_buffer2_unittest.cc
+++ b/modules/video_coding/frame_buffer2_unittest.cc
@@ -91,10 +91,6 @@
   int64_t ReceivedTime() const override { return 0; }
 
   int64_t RenderTime() const override { return _renderTimeMs; }
-
-  // In EncodedImage |_length| is used to descibe its size and |_size| to
-  // describe its capacity.
-  void SetSize(int size) { _length = size; }
 };
 
 class VCMReceiveStatisticsCallbackMock : public VCMReceiveStatisticsCallback {
@@ -168,7 +164,7 @@
     frame->is_last_spatial_layer = last_spatial_layer;
     // Add some data to buffer.
     frame->VerifyAndAllocate(kFrameSize);
-    frame->SetSize(kFrameSize);
+    frame->set_size(kFrameSize);
     for (size_t r = 0; r < references.size(); ++r)
       frame->references[r] = references[r];
 
@@ -491,7 +487,7 @@
   {
     std::unique_ptr<FrameObjectFake> frame(new FrameObjectFake());
     frame->VerifyAndAllocate(kFrameSize);
-    frame->SetSize(kFrameSize);
+    frame->set_size(kFrameSize);
     frame->id.picture_id = pid;
     frame->id.spatial_layer = 0;
     frame->SetTimestamp(ts);
diff --git a/modules/video_coding/frame_object.cc b/modules/video_coding/frame_object.cc
index 8fd8983..6fcb573 100644
--- a/modules/video_coding/frame_object.cc
+++ b/modules/video_coding/frame_object.cc
@@ -139,11 +139,6 @@
   return _renderTimeMs;
 }
 
-void RtpFrameObject::SetSize(size_t size) {
-  RTC_DCHECK_LE(size, capacity());
-  _length = size;
-}
-
 bool RtpFrameObject::delayed_by_retransmission() const {
   return times_nacked() > 0;
 }
@@ -188,7 +183,7 @@
     set_buffer(new uint8_t[new_size], new_size);
   }
 
-  _length = frame_size;
+  set_size(frame_size);
 }
 
 }  // namespace video_coding
diff --git a/modules/video_coding/frame_object.h b/modules/video_coding/frame_object.h
index 5a3efd9..8b9ad92 100644
--- a/modules/video_coding/frame_object.h
+++ b/modules/video_coding/frame_object.h
@@ -40,7 +40,6 @@
   VideoCodecType codec_type() const;
   int64_t ReceivedTime() const override;
   int64_t RenderTime() const override;
-  void SetSize(size_t size);
   bool delayed_by_retransmission() const override;
   absl::optional<RTPVideoHeader> GetRtpVideoHeader() const;
   absl::optional<RtpGenericFrameDescriptor> GetGenericFrameDescriptor() const;
diff --git a/modules/video_coding/generic_encoder.cc b/modules/video_coding/generic_encoder.cc
index 221a8b2..52a82f3 100644
--- a/modules/video_coding/generic_encoder.cc
+++ b/modules/video_coding/generic_encoder.cc
@@ -327,7 +327,7 @@
 
     // Outliers trigger timing frames, but do not affect scheduled timing
     // frames.
-    if (outlier_frame_size && encoded_image->_length >= *outlier_frame_size) {
+    if (outlier_frame_size && encoded_image->size() >= *outlier_frame_size) {
       timing_flags |= VideoSendTiming::kTriggeredBySize;
     }
 
diff --git a/modules/video_coding/generic_encoder_unittest.cc b/modules/video_coding/generic_encoder_unittest.cc
index 66d7873..4ea2506 100644
--- a/modules/video_coding/generic_encoder_unittest.cc
+++ b/modules/video_coding/generic_encoder_unittest.cc
@@ -82,6 +82,7 @@
       {delay_ms, kDefaultOutlierFrameSizePercent});
   callback.OnFrameRateChanged(kFramerate);
   int s, i;
+  std::vector<uint8_t> frame_data(max_frame_size);
   std::vector<std::vector<FrameType>> result(num_streams);
   for (s = 0; s < num_streams; ++s)
     callback.OnTargetBitrateChanged(average_frame_sizes[s] * kFramerate, s);
@@ -94,7 +95,8 @@
 
       EncodedImage image;
       CodecSpecificInfo codec_specific;
-      image._length = FrameSize(min_frame_size, max_frame_size, s, i);
+      image.set_buffer(frame_data.data(), frame_data.size());
+      image.set_size(FrameSize(min_frame_size, max_frame_size, s, i));
       image.capture_time_ms_ = current_timestamp;
       image.SetTimestamp(static_cast<uint32_t>(current_timestamp * 90));
       image.SetSpatialIndex(s);
@@ -187,7 +189,9 @@
   EncodedImage image;
   CodecSpecificInfo codec_specific;
   int64_t timestamp = 1;
-  image._length = 500;
+  uint8_t frame_data[500];
+  image.set_buffer(frame_data, sizeof(frame_data));
+  image.set_size(sizeof(frame_data));
   image.capture_time_ms_ = timestamp;
   image.SetTimestamp(static_cast<uint32_t>(timestamp * 90));
   codec_specific.codecType = kVideoCodecGeneric;
@@ -218,7 +222,9 @@
   const int64_t kEncodeStartDelayMs = 2;
   const int64_t kEncodeFinishDelayMs = 10;
   int64_t timestamp = 1;
-  image._length = 500;
+  uint8_t frame_data[500];
+  image.set_buffer(frame_data, sizeof(frame_data));
+  image.set_size(sizeof(frame_data));
   image.capture_time_ms_ = timestamp;
   image.SetTimestamp(static_cast<uint32_t>(timestamp * 90));
   codec_specific.codecType = kVideoCodecGeneric;
diff --git a/modules/video_coding/utility/ivf_file_writer.cc b/modules/video_coding/utility/ivf_file_writer.cc
index d9917c8..225996a 100644
--- a/modules/video_coding/utility/ivf_file_writer.cc
+++ b/modules/video_coding/utility/ivf_file_writer.cc
@@ -160,7 +160,7 @@
 
   const size_t kFrameHeaderSize = 12;
   if (byte_limit_ != 0 &&
-      bytes_written_ + kFrameHeaderSize + encoded_image._length > byte_limit_) {
+      bytes_written_ + kFrameHeaderSize + encoded_image.size() > byte_limit_) {
     RTC_LOG(LS_WARNING) << "Closing IVF file due to reaching size limit: "
                         << byte_limit_ << " bytes.";
     Close();
@@ -168,16 +168,16 @@
   }
   uint8_t frame_header[kFrameHeaderSize] = {};
   ByteWriter<uint32_t>::WriteLittleEndian(
-      &frame_header[0], static_cast<uint32_t>(encoded_image._length));
+      &frame_header[0], static_cast<uint32_t>(encoded_image.size()));
   ByteWriter<uint64_t>::WriteLittleEndian(&frame_header[4], timestamp);
   if (file_.Write(frame_header, kFrameHeaderSize) < kFrameHeaderSize ||
-      file_.Write(encoded_image._buffer, encoded_image._length) <
-          encoded_image._length) {
+      file_.Write(encoded_image.data(), encoded_image.size()) <
+          encoded_image.size()) {
     RTC_LOG(LS_ERROR) << "Unable to write frame to file.";
     return false;
   }
 
-  bytes_written_ += kFrameHeaderSize + encoded_image._length;
+  bytes_written_ += kFrameHeaderSize + encoded_image.size();
 
   ++num_frames_;
   return true;
diff --git a/modules/video_coding/utility/ivf_file_writer_unittest.cc b/modules/video_coding/utility/ivf_file_writer_unittest.cc
index 6f71496..82d604a 100644
--- a/modules/video_coding/utility/ivf_file_writer_unittest.cc
+++ b/modules/video_coding/utility/ivf_file_writer_unittest.cc
@@ -41,11 +41,11 @@
                             int num_frames,
                             bool use_capture_tims_ms) {
     EncodedImage frame;
-    frame._buffer = dummy_payload;
+    frame.set_buffer(dummy_payload, sizeof(dummy_payload));
     frame._encodedWidth = width;
     frame._encodedHeight = height;
     for (int i = 1; i <= num_frames; ++i) {
-      frame._length = i % sizeof(dummy_payload);
+      frame.set_size(i % sizeof(dummy_payload));
       if (use_capture_tims_ms) {
         frame.capture_time_ms_ = i;
       } else {
diff --git a/modules/video_coding/utility/simulcast_test_fixture_impl.cc b/modules/video_coding/utility/simulcast_test_fixture_impl.cc
index 0230316..6700e87 100644
--- a/modules/video_coding/utility/simulcast_test_fixture_impl.cc
+++ b/modules/video_coding/utility/simulcast_test_fixture_impl.cc
@@ -85,18 +85,18 @@
         delete[] encoded_key_frame_._buffer;
         encoded_key_frame_.set_buffer(new uint8_t[encoded_image.capacity()],
                                       encoded_image.capacity());
-        encoded_key_frame_._length = encoded_image._length;
+        encoded_key_frame_.set_size(encoded_image.size());
         encoded_key_frame_._frameType = kVideoFrameKey;
         encoded_key_frame_._completeFrame = encoded_image._completeFrame;
         memcpy(encoded_key_frame_._buffer, encoded_image._buffer,
-               encoded_image._length);
+               encoded_image.size());
       } else {
         delete[] encoded_frame_._buffer;
         encoded_frame_.set_buffer(new uint8_t[encoded_image.capacity()],
                                   encoded_image.capacity());
-        encoded_frame_._length = encoded_image._length;
+        encoded_frame_.set_size(encoded_image.size());
         memcpy(encoded_frame_._buffer, encoded_image._buffer,
-               encoded_image._length);
+               encoded_image.size());
       }
     }
     if (is_vp8) {
@@ -858,11 +858,11 @@
             encoded_frame[index].set_buffer(
                 new uint8_t[encoded_image.capacity()],
                 encoded_image.capacity());
-            encoded_frame[index]._length = encoded_image._length;
+            encoded_frame[index].set_size(encoded_image.size());
             encoded_frame[index]._frameType = encoded_image._frameType;
             encoded_frame[index]._completeFrame = encoded_image._completeFrame;
             memcpy(encoded_frame[index]._buffer, encoded_image._buffer,
-                   encoded_image._length);
+                   encoded_image.size());
             return EncodedImageCallback::Result(
                 EncodedImageCallback::Result::OK, 0);
           }));
diff --git a/modules/video_coding/video_packet_buffer_unittest.cc b/modules/video_coding/video_packet_buffer_unittest.cc
index 49b1c26..190e0e7 100644
--- a/modules/video_coding/video_packet_buffer_unittest.cc
+++ b/modules/video_coding/video_packet_buffer_unittest.cc
@@ -643,7 +643,7 @@
   packet_buffer_->InsertPacket(&packet);
 
   ASSERT_EQ(1UL, frames_from_callback_.size());
-  EXPECT_EQ(frames_from_callback_[seq_num]->EncodedImage()._length,
+  EXPECT_EQ(frames_from_callback_[seq_num]->EncodedImage().size(),
             sizeof(data_data));
   EXPECT_EQ(frames_from_callback_[seq_num]->EncodedImage().capacity(),
             sizeof(data_data) + EncodedImage::kBufferPaddingBytesH264);
diff --git a/modules/video_coding/video_sender_unittest.cc b/modules/video_coding/video_sender_unittest.cc
index 44e9c34..11a69be 100644
--- a/modules/video_coding/video_sender_unittest.cc
+++ b/modules/video_coding/video_sender_unittest.cc
@@ -104,7 +104,7 @@
                         const RTPFragmentationHeader* fragmentation) override {
     assert(codec_specific_info);
     frame_data_.push_back(
-        FrameData(encoded_image._length, *codec_specific_info));
+        FrameData(encoded_image.size(), *codec_specific_info));
     return Result(Result::OK, encoded_image.Timestamp());
   }