Add owned data buffer to EncodedImage
Bug: webrtc:9378
Change-Id: I6a66b9301cbadf1d6517bf7a96028099970a20a3
Reviewed-on: https://webrtc-review.googlesource.com/c/117964
Commit-Queue: Niels Moller <nisse@webrtc.org>
Reviewed-by: Philip Eliasson <philipel@webrtc.org>
Reviewed-by: Karl Wiberg <kwiberg@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#26585}
diff --git a/modules/video_coding/codecs/h264/h264_decoder_impl.cc b/modules/video_coding/codecs/h264/h264_decoder_impl.cc
index 9f4c696..73190d1 100644
--- a/modules/video_coding/codecs/h264/h264_decoder_impl.cc
+++ b/modules/video_coding/codecs/h264/h264_decoder_impl.cc
@@ -259,12 +259,12 @@
// "If the first 23 bits of the additional bytes are not 0, then damaged MPEG
// bitstreams could cause overread and segfault." See
// AV_INPUT_BUFFER_PADDING_SIZE. We'll zero the entire padding just in case.
- memset(input_image.data() + input_image.size(), 0,
+ memset(input_image.mutable_data() + input_image.size(), 0,
EncodedImage::GetBufferPaddingBytes(kVideoCodecH264));
AVPacket packet;
av_init_packet(&packet);
- packet.data = input_image.data();
+ packet.data = input_image.mutable_data();
if (input_image.size() >
static_cast<size_t>(std::numeric_limits<int>::max())) {
ReportError();
diff --git a/modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.cc b/modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.cc
index af1c8a6..1afdf28 100644
--- a/modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.cc
+++ b/modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.cc
@@ -221,7 +221,7 @@
for (size_t i = 0; i < images.size(); i++) {
PackBitstream(combined_image.data() + frame_headers[i].bitstream_offset,
images[i]);
- delete[] images[i].encoded_image.data();
+ delete[] images[i].encoded_image.buffer();
}
return combined_image;
@@ -263,7 +263,7 @@
encoded_image.SetTimestamp(combined_image.Timestamp());
encoded_image._frameType = frame_headers[i].frame_type;
encoded_image.set_buffer(
- combined_image.data() + frame_headers[i].bitstream_offset,
+ combined_image.mutable_data() + frame_headers[i].bitstream_offset,
static_cast<size_t>(frame_headers[i].bitstream_length));
const size_t padding =
EncodedImage::GetBufferPaddingBytes(image_component.codec_type);
diff --git a/modules/video_coding/codecs/multiplex/multiplex_encoder_adapter.cc b/modules/video_coding/codecs/multiplex/multiplex_encoder_adapter.cc
index 637bce5..fb588eb 100644
--- a/modules/video_coding/codecs/multiplex/multiplex_encoder_adapter.cc
+++ b/modules/video_coding/codecs/multiplex/multiplex_encoder_adapter.cc
@@ -253,8 +253,8 @@
}
}
stashed_images_.clear();
- if (combined_image_.data()) {
- delete[] combined_image_.data();
+ if (combined_image_.buffer()) {
+ delete[] combined_image_.buffer();
combined_image_.set_buffer(nullptr, 0);
}
return WEBRTC_VIDEO_CODEC_OK;
@@ -302,8 +302,8 @@
// We have to send out those stashed frames, otherwise the delta frame
// dependency chain is broken.
- if (combined_image_.data())
- delete[] combined_image_.data();
+ if (combined_image_.buffer())
+ delete[] combined_image_.buffer();
combined_image_ =
MultiplexEncodedImagePacker::PackAndRelease(iter->second);
diff --git a/modules/video_coding/codecs/test/videoprocessor.cc b/modules/video_coding/codecs/test/videoprocessor.cc
index b30c032..c64d290 100644
--- a/modules/video_coding/codecs/test/videoprocessor.cc
+++ b/modules/video_coding/codecs/test/videoprocessor.cc
@@ -587,9 +587,9 @@
copied_image.set_size(payload_size_bytes);
// Replace previous EncodedImage for this spatial layer.
- uint8_t* old_data = merged_encoded_frames_.at(spatial_idx).data();
- if (old_data) {
- delete[] old_data;
+ uint8_t* old_buffer = merged_encoded_frames_.at(spatial_idx).buffer();
+ if (old_buffer) {
+ delete[] old_buffer;
}
merged_encoded_frames_.at(spatial_idx) = copied_image;
diff --git a/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc b/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc
index 5e6a402..42e57a6 100644
--- a/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc
+++ b/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc
@@ -888,7 +888,7 @@
encoded_images_[encoder_idx].capacity()) {
uint8_t* buffer = new uint8_t[pkt->data.frame.sz + length];
memcpy(buffer, encoded_images_[encoder_idx].data(), length);
- delete[] encoded_images_[encoder_idx].data();
+ delete[] encoded_images_[encoder_idx].buffer();
encoded_images_[encoder_idx].set_buffer(
buffer, pkt->data.frame.sz + length);
}
diff --git a/modules/video_coding/codecs/vp9/vp9_impl.cc b/modules/video_coding/codecs/vp9/vp9_impl.cc
index 3bf03fa..5feca0b 100644
--- a/modules/video_coding/codecs/vp9/vp9_impl.cc
+++ b/modules/video_coding/codecs/vp9/vp9_impl.cc
@@ -185,8 +185,8 @@
int VP9EncoderImpl::Release() {
int ret_val = WEBRTC_VIDEO_CODEC_OK;
- if (encoded_image_.data() != nullptr) {
- delete[] encoded_image_.data();
+ if (encoded_image_.buffer() != nullptr) {
+ delete[] encoded_image_.buffer();
encoded_image_.set_buffer(nullptr, 0);
}
if (encoder_ != nullptr) {
@@ -1266,7 +1266,7 @@
}
if (pkt->data.frame.sz > encoded_image_.capacity()) {
- delete[] encoded_image_.data();
+ delete[] encoded_image_.buffer();
encoded_image_.set_buffer(new uint8_t[pkt->data.frame.sz],
pkt->data.frame.sz);
}
diff --git a/modules/video_coding/encoded_frame.cc b/modules/video_coding/encoded_frame.cc
index aff9c5a..c18ef13 100644
--- a/modules/video_coding/encoded_frame.cc
+++ b/modules/video_coding/encoded_frame.cc
@@ -31,15 +31,7 @@
}
VCMEncodedFrame::~VCMEncodedFrame() {
- Free();
-}
-
-void VCMEncodedFrame::Free() {
Reset();
- if (data() != nullptr) {
- delete[] data();
- set_buffer(nullptr, 0);
- }
}
void VCMEncodedFrame::Reset() {
@@ -156,15 +148,10 @@
void VCMEncodedFrame::VerifyAndAllocate(size_t minimumSize) {
size_t old_capacity = capacity();
if (minimumSize > old_capacity) {
- // create buffer of sufficient size
- uint8_t* old_data = data();
-
- set_buffer(new uint8_t[minimumSize], minimumSize);
- if (old_data) {
- // copy old data
- memcpy(data(), old_data, old_capacity);
- delete[] old_data;
- }
+ // TODO(nisse): EncodedImage::Allocate is implemented as
+ // std::vector::resize, which means that old contents is kept. Find out if
+ // any code depends on that behavior.
+ Allocate(minimumSize);
}
}
diff --git a/modules/video_coding/encoded_frame.h b/modules/video_coding/encoded_frame.h
index 2cdcff9..eeaea15 100644
--- a/modules/video_coding/encoded_frame.h
+++ b/modules/video_coding/encoded_frame.h
@@ -28,10 +28,6 @@
~VCMEncodedFrame();
/**
- * Delete VideoFrame and resets members to zero
- */
- void Free();
- /**
* Set render time in milliseconds
*/
void SetRenderTime(const int64_t renderTimeMs) {
diff --git a/modules/video_coding/frame_object.cc b/modules/video_coding/frame_object.cc
index 1e0b647..c3c9f23 100644
--- a/modules/video_coding/frame_object.cc
+++ b/modules/video_coding/frame_object.cc
@@ -170,15 +170,11 @@
// Since FFmpeg use an optimized bitstream reader that reads in chunks of
// 32/64 bits we have to add at least that much padding to the buffer
// to make sure the decoder doesn't read out of bounds.
- // NOTE! EncodedImage::_size is the size of the buffer (think capacity of
- // an std::vector) and EncodedImage::_length is the actual size of
- // the bitstream (think size of an std::vector).
size_t new_size = frame_size + (codec_type_ == kVideoCodecH264
? EncodedImage::kBufferPaddingBytesH264
: 0);
if (capacity() < new_size) {
- delete[] data();
- set_buffer(new uint8_t[new_size], new_size);
+ Allocate(new_size);
}
set_size(frame_size);
diff --git a/modules/video_coding/utility/simulcast_test_fixture_impl.cc b/modules/video_coding/utility/simulcast_test_fixture_impl.cc
index c65941a..5af14cc 100644
--- a/modules/video_coding/utility/simulcast_test_fixture_impl.cc
+++ b/modules/video_coding/utility/simulcast_test_fixture_impl.cc
@@ -82,7 +82,7 @@
// Only store the base layer.
if (encoded_image.SpatialIndex().value_or(0) == 0) {
if (encoded_image._frameType == kVideoFrameKey) {
- delete[] encoded_key_frame_.data();
+ delete[] encoded_key_frame_.buffer();
encoded_key_frame_.set_buffer(new uint8_t[encoded_image.capacity()],
encoded_image.capacity());
encoded_key_frame_.set_size(encoded_image.size());
@@ -91,7 +91,7 @@
memcpy(encoded_key_frame_.data(), encoded_image.data(),
encoded_image.size());
} else {
- delete[] encoded_frame_.data();
+ delete[] encoded_frame_.buffer();
encoded_frame_.set_buffer(new uint8_t[encoded_image.capacity()],
encoded_image.capacity());
encoded_frame_.set_size(encoded_image.size());
@@ -905,7 +905,7 @@
EXPECT_EQ(0, decoder_->Decode(encoded_frame[2], false, NULL, 0));
for (int i = 0; i < 3; ++i) {
- delete[] encoded_frame[i].data();
+ delete[] encoded_frame[i].buffer();
}
}