Use size_t more consistently for packet/payload lengths.
See design doc at https://docs.google.com/a/chromium.org/document/d/1I6nmE9D_BmCY-IoV6MDPY2V6WYpEI-dg2apWXTfZyUI/edit?usp=sharing for more information.
This CL was reviewed and approved in pieces in the following CLs:
https://webrtc-codereview.appspot.com/24209004/
https://webrtc-codereview.appspot.com/24229004/
https://webrtc-codereview.appspot.com/24259004/
https://webrtc-codereview.appspot.com/25109004/
https://webrtc-codereview.appspot.com/26099004/
https://webrtc-codereview.appspot.com/27069004/
https://webrtc-codereview.appspot.com/27969004/
https://webrtc-codereview.appspot.com/27989004/
https://webrtc-codereview.appspot.com/29009004/
https://webrtc-codereview.appspot.com/30929004/
https://webrtc-codereview.appspot.com/30939004/
https://webrtc-codereview.appspot.com/31999004/
Committing as TBR to the original reviewers.
BUG=chromium:81439
TEST=none
TBR=pthatcher,henrik.lundin,tina.legrand,stefan,tkchin,glaznev,kjellander,perkj,mflodman,henrika,asapersson,niklas.enbom
Review URL: https://webrtc-codereview.appspot.com/23129004
git-svn-id: http://webrtc.googlecode.com/svn/trunk@7726 4adac7df-926f-26a2-2b94-8c16560cd09d
diff --git a/webrtc/modules/video_coding/codecs/i420/main/interface/i420.h b/webrtc/modules/video_coding/codecs/i420/main/interface/i420.h
index 1e99e69..2d41fd0 100644
--- a/webrtc/modules/video_coding/codecs/i420/main/interface/i420.h
+++ b/webrtc/modules/video_coding/codecs/i420/main/interface/i420.h
@@ -18,8 +18,6 @@
namespace webrtc {
-enum { kI420HeaderSize = 4 };
-
class I420Encoder : public VideoEncoder {
public:
I420Encoder();
@@ -38,7 +36,7 @@
// <0 - Error
virtual int InitEncode(const VideoCodec* codecSettings,
int /*numberOfCores*/,
- uint32_t /*maxPayloadSize*/) OVERRIDE;
+ size_t /*maxPayloadSize*/) OVERRIDE;
// "Encode" an I420 image (as a part of a video stream). The encoded image
// will be returned to the user via the encode complete callback.
diff --git a/webrtc/modules/video_coding/codecs/i420/main/source/i420.cc b/webrtc/modules/video_coding/codecs/i420/main/source/i420.cc
index 69cc9e2..bb61f5e 100644
--- a/webrtc/modules/video_coding/codecs/i420/main/source/i420.cc
+++ b/webrtc/modules/video_coding/codecs/i420/main/source/i420.cc
@@ -15,6 +15,10 @@
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
+namespace {
+const size_t kI420HeaderSize = 4;
+}
+
namespace webrtc {
I420Encoder::I420Encoder() : _inited(false), _encodedImage(),
@@ -39,7 +43,7 @@
int I420Encoder::InitEncode(const VideoCodec* codecSettings,
int /*numberOfCores*/,
- uint32_t /*maxPayloadSize */) {
+ size_t /*maxPayloadSize */) {
if (codecSettings == NULL) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
@@ -53,10 +57,9 @@
_encodedImage._buffer = NULL;
_encodedImage._size = 0;
}
- const uint32_t newSize = CalcBufferSize(kI420,
- codecSettings->width,
- codecSettings->height)
- + kI420HeaderSize;
+ const size_t newSize =
+ CalcBufferSize(kI420, codecSettings->width, codecSettings->height) +
+ kI420HeaderSize;
uint8_t* newBuffer = new uint8_t[newSize];
if (newBuffer == NULL) {
return WEBRTC_VIDEO_CODEC_MEMORY;
@@ -95,9 +98,10 @@
return WEBRTC_VIDEO_CODEC_ERR_SIZE;
}
- int req_length = CalcBufferSize(kI420, inputImage.width(),
- inputImage.height()) + kI420HeaderSize;
- if (_encodedImage._size > static_cast<unsigned int>(req_length)) {
+ size_t req_length =
+ CalcBufferSize(kI420, inputImage.width(), inputImage.height()) +
+ kI420HeaderSize;
+ if (_encodedImage._size > req_length) {
// Reallocate buffer.
delete [] _encodedImage._buffer;
@@ -194,8 +198,7 @@
_height = height;
// Verify that the available length is sufficient:
- uint32_t req_length = CalcBufferSize(kI420, _width, _height)
- + kI420HeaderSize;
+ size_t req_length = CalcBufferSize(kI420, _width, _height) + kI420HeaderSize;
if (req_length > inputImage._length) {
return WEBRTC_VIDEO_CODEC_ERROR;
diff --git a/webrtc/modules/video_coding/codecs/interface/mock/mock_video_codec_interface.h b/webrtc/modules/video_coding/codecs/interface/mock/mock_video_codec_interface.h
index 69e99ae..18bf5b8 100644
--- a/webrtc/modules/video_coding/codecs/interface/mock/mock_video_codec_interface.h
+++ b/webrtc/modules/video_coding/codecs/interface/mock/mock_video_codec_interface.h
@@ -31,7 +31,7 @@
MOCK_CONST_METHOD2(Version, int32_t(int8_t *version, int32_t length));
MOCK_METHOD3(InitEncode, int32_t(const VideoCodec* codecSettings,
int32_t numberOfCores,
- uint32_t maxPayloadSize));
+ size_t maxPayloadSize));
MOCK_METHOD3(Encode, int32_t(const I420VideoFrame& inputImage,
const CodecSpecificInfo* codecSpecificInfo,
const std::vector<VideoFrameType>* frame_types));
diff --git a/webrtc/modules/video_coding/codecs/test/packet_manipulator.cc b/webrtc/modules/video_coding/codecs/test/packet_manipulator.cc
index f4ca92a..6e7139e 100644
--- a/webrtc/modules/video_coding/codecs/test/packet_manipulator.cc
+++ b/webrtc/modules/video_coding/codecs/test/packet_manipulator.cc
@@ -13,6 +13,8 @@
#include <assert.h>
#include <stdio.h>
+#include "webrtc/base/format_macros.h"
+
namespace webrtc {
namespace test {
@@ -72,7 +74,7 @@
// Must set completeFrame to false to inform the decoder about this:
encoded_image->_completeFrame = false;
if (verbose_) {
- printf("Dropped %d packets for frame %d (frame length: %d)\n",
+ printf("Dropped %d packets for frame %d (frame length: %" PRIuS ")\n",
nbr_packets_dropped, encoded_image->_timeStamp,
encoded_image->_length);
}
diff --git a/webrtc/modules/video_coding/codecs/test/packet_manipulator.h b/webrtc/modules/video_coding/codecs/test/packet_manipulator.h
index 0fafa22..69bc35b 100644
--- a/webrtc/modules/video_coding/codecs/test/packet_manipulator.h
+++ b/webrtc/modules/video_coding/codecs/test/packet_manipulator.h
@@ -42,11 +42,11 @@
}
// Packet size in bytes. Default: 1500 bytes.
- int packet_size_in_bytes;
+ size_t packet_size_in_bytes;
// Encoder specific setting of maximum size in bytes of each payload.
// Default: 1440 bytes.
- int max_payload_size_in_bytes;
+ size_t max_payload_size_in_bytes;
// Packet loss mode. Two different packet loss models are supported:
// uniform or burst. This setting has no effect unless
diff --git a/webrtc/modules/video_coding/codecs/test/packet_manipulator_unittest.cc b/webrtc/modules/video_coding/codecs/test/packet_manipulator_unittest.cc
index 576d005..ace7bc0 100644
--- a/webrtc/modules/video_coding/codecs/test/packet_manipulator_unittest.cc
+++ b/webrtc/modules/video_coding/codecs/test/packet_manipulator_unittest.cc
@@ -60,11 +60,11 @@
void VerifyPacketLoss(int expected_nbr_packets_dropped,
int actual_nbr_packets_dropped,
- int expected_packet_data_length,
+ size_t expected_packet_data_length,
uint8_t* expected_packet_data,
EncodedImage& actual_image) {
EXPECT_EQ(expected_nbr_packets_dropped, actual_nbr_packets_dropped);
- EXPECT_EQ(expected_packet_data_length, static_cast<int>(image_._length));
+ EXPECT_EQ(expected_packet_data_length, image_._length);
EXPECT_EQ(0, memcmp(expected_packet_data, actual_image._buffer,
expected_packet_data_length));
}
@@ -82,7 +82,7 @@
}
TEST_F(PacketManipulatorTest, UniformDropNoneSmallFrame) {
- int data_length = 400; // smaller than the packet size
+ size_t data_length = 400; // smaller than the packet size
image_._length = data_length;
PacketManipulatorImpl manipulator(&packet_reader_, no_drop_config_, false);
int nbr_packets_dropped = manipulator.ManipulatePackets(&image_);
@@ -120,7 +120,7 @@
TEST_F(PacketManipulatorTest, BurstDropNinePackets) {
// Create a longer packet data structure (10 packets)
const int kNbrPackets = 10;
- const int kDataLength = kPacketSizeInBytes * kNbrPackets;
+ const size_t kDataLength = kPacketSizeInBytes * kNbrPackets;
uint8_t data[kDataLength];
uint8_t* data_pointer = data;
// Fill with 0s, 1s and so on to be able to easily verify which were dropped:
diff --git a/webrtc/modules/video_coding/codecs/test/stats.cc b/webrtc/modules/video_coding/codecs/test/stats.cc
index f6605f9..91a2f3c 100644
--- a/webrtc/modules/video_coding/codecs/test/stats.cc
+++ b/webrtc/modules/video_coding/codecs/test/stats.cc
@@ -15,6 +15,8 @@
#include <algorithm> // min_element, max_element
+#include "webrtc/base/format_macros.h"
+
namespace webrtc {
namespace test {
@@ -70,11 +72,11 @@
// Calculate min, max, average and total encoding time
int total_encoding_time_in_us = 0;
int total_decoding_time_in_us = 0;
- int total_encoded_frames_lengths = 0;
- int total_encoded_key_frames_lengths = 0;
- int total_encoded_nonkey_frames_lengths = 0;
- int nbr_keyframes = 0;
- int nbr_nonkeyframes = 0;
+ size_t total_encoded_frames_lengths = 0;
+ size_t total_encoded_key_frames_lengths = 0;
+ size_t total_encoded_nonkey_frames_lengths = 0;
+ size_t nbr_keyframes = 0;
+ size_t nbr_nonkeyframes = 0;
for (FrameStatisticsIterator it = stats_.begin();
it != stats_.end(); ++it) {
@@ -141,23 +143,24 @@
printf("Frame sizes:\n");
frame = std::min_element(stats_.begin(),
stats_.end(), LessForEncodedSize);
- printf(" Min : %7d bytes (frame %d)\n",
+ printf(" Min : %7" PRIuS " bytes (frame %d)\n",
frame->encoded_frame_length_in_bytes, frame->frame_number);
frame = std::max_element(stats_.begin(),
stats_.end(), LessForEncodedSize);
- printf(" Max : %7d bytes (frame %d)\n",
+ printf(" Max : %7" PRIuS " bytes (frame %d)\n",
frame->encoded_frame_length_in_bytes, frame->frame_number);
- printf(" Average : %7d bytes\n",
- static_cast<int>(total_encoded_frames_lengths / stats_.size()));
+ printf(" Average : %7" PRIuS " bytes\n",
+ total_encoded_frames_lengths / stats_.size());
if (nbr_keyframes > 0) {
- printf(" Average key frame size : %7d bytes (%d keyframes)\n",
- total_encoded_key_frames_lengths / nbr_keyframes,
- nbr_keyframes);
+ printf(" Average key frame size : %7" PRIuS " bytes (%" PRIuS
+ " keyframes)\n",
+ total_encoded_key_frames_lengths / nbr_keyframes, nbr_keyframes);
}
if (nbr_nonkeyframes > 0) {
- printf(" Average non-key frame size: %7d bytes (%d frames)\n",
+ printf(" Average non-key frame size: %7" PRIuS " bytes (%" PRIuS
+ " frames)\n",
total_encoded_nonkey_frames_lengths / nbr_nonkeyframes,
nbr_nonkeyframes);
}
diff --git a/webrtc/modules/video_coding/codecs/test/stats.h b/webrtc/modules/video_coding/codecs/test/stats.h
index 2998773..8dc8f15 100644
--- a/webrtc/modules/video_coding/codecs/test/stats.h
+++ b/webrtc/modules/video_coding/codecs/test/stats.h
@@ -31,14 +31,14 @@
int frame_number;
// How many packets were discarded of the encoded frame data (if any).
int packets_dropped;
- int total_packets;
+ size_t total_packets;
// Current bit rate. Calculated out of the size divided with the time
// interval per frame.
int bit_rate_in_kbps;
// Copied from EncodedImage
- int encoded_frame_length_in_bytes;
+ size_t encoded_frame_length_in_bytes;
webrtc::VideoFrameType frame_type;
};
diff --git a/webrtc/modules/video_coding/codecs/test/videoprocessor.cc b/webrtc/modules/video_coding/codecs/test/videoprocessor.cc
index 93738ca..412ec10 100644
--- a/webrtc/modules/video_coding/codecs/test/videoprocessor.cc
+++ b/webrtc/modules/video_coding/codecs/test/videoprocessor.cc
@@ -30,7 +30,7 @@
output_dir("out"),
networking_config(),
exclude_frame_types(kExcludeOnlyFirstKeyFrame),
- frame_length_in_bytes(-1),
+ frame_length_in_bytes(0),
use_single_core(false),
keyframe_interval(0),
codec_settings(NULL),
@@ -157,7 +157,7 @@
num_spatial_resizes_ = 0;
}
-int VideoProcessorImpl::EncodedFrameSize() {
+size_t VideoProcessorImpl::EncodedFrameSize() {
return encoded_frame_size_;
}
@@ -330,11 +330,12 @@
frame_number, ret_val);
}
// TODO(mikhal): Extracting the buffer for now - need to update test.
- int length = CalcBufferSize(kI420, up_image.width(), up_image.height());
+ size_t length = CalcBufferSize(kI420, up_image.width(), up_image.height());
scoped_ptr<uint8_t[]> image_buffer(new uint8_t[length]);
- length = ExtractBuffer(up_image, length, image_buffer.get());
+ int extracted_length = ExtractBuffer(up_image, length, image_buffer.get());
+ assert(extracted_length > 0);
// Update our copy of the last successful frame:
- memcpy(last_successful_frame_buffer_, image_buffer.get(), length);
+ memcpy(last_successful_frame_buffer_, image_buffer.get(), extracted_length);
bool write_success = frame_writer_->WriteFrame(image_buffer.get());
assert(write_success);
if (!write_success) {
@@ -343,11 +344,11 @@
} else { // No resize.
// Update our copy of the last successful frame:
// TODO(mikhal): Add as a member function, so won't be allocated per frame.
- int length = CalcBufferSize(kI420, image.width(), image.height());
+ size_t length = CalcBufferSize(kI420, image.width(), image.height());
scoped_ptr<uint8_t[]> image_buffer(new uint8_t[length]);
- length = ExtractBuffer(image, length, image_buffer.get());
- assert(length > 0);
- memcpy(last_successful_frame_buffer_, image_buffer.get(), length);
+ int extracted_length = ExtractBuffer(image, length, image_buffer.get());
+ assert(extracted_length > 0);
+ memcpy(last_successful_frame_buffer_, image_buffer.get(), extracted_length);
bool write_success = frame_writer_->WriteFrame(image_buffer.get());
assert(write_success);
diff --git a/webrtc/modules/video_coding/codecs/test/videoprocessor.h b/webrtc/modules/video_coding/codecs/test/videoprocessor.h
index 20bcab5..2cfde52 100644
--- a/webrtc/modules/video_coding/codecs/test/videoprocessor.h
+++ b/webrtc/modules/video_coding/codecs/test/videoprocessor.h
@@ -76,7 +76,7 @@
// The length of a single frame of the input video file. This value is
// calculated out of the width and height according to the video format
// specification. Must be set before processing.
- int frame_length_in_bytes;
+ size_t frame_length_in_bytes;
// Force the encoder and decoder to use a single core for processing.
// Using a single core is necessary to get a deterministic behavior for the
@@ -144,7 +144,7 @@
// Return the size of the encoded frame in bytes. Dropped frames by the
// encoder are regarded as zero size.
- virtual int EncodedFrameSize() = 0;
+ virtual size_t EncodedFrameSize() = 0;
// Return the number of dropped frames.
virtual int NumberDroppedFrames() = 0;
@@ -178,7 +178,7 @@
// Updates the encoder with the target bit rate and the frame rate.
virtual void SetRates(int bit_rate, int frame_rate) OVERRIDE;
// Return the size of the encoded frame in bytes.
- virtual int EncodedFrameSize() OVERRIDE;
+ virtual size_t EncodedFrameSize() OVERRIDE;
// Return the number of dropped frames.
virtual int NumberDroppedFrames() OVERRIDE;
// Return the number of spatial resizes.
@@ -206,7 +206,7 @@
bool last_frame_missing_;
// If Init() has executed successfully.
bool initialized_;
- int encoded_frame_size_;
+ size_t encoded_frame_size_;
int prev_time_stamp_;
int num_dropped_frames_;
int num_spatial_resizes_;
diff --git a/webrtc/modules/video_coding/codecs/test/videoprocessor_integrationtest.cc b/webrtc/modules/video_coding/codecs/test/videoprocessor_integrationtest.cc
index 420ef59..0c423a7 100644
--- a/webrtc/modules/video_coding/codecs/test/videoprocessor_integrationtest.cc
+++ b/webrtc/modules/video_coding/codecs/test/videoprocessor_integrationtest.cc
@@ -266,8 +266,7 @@
// For every encoded frame, update the rate control metrics.
void UpdateRateControlMetrics(int frame_num, VideoFrameType frame_type) {
- int encoded_frame_size = processor_->EncodedFrameSize();
- float encoded_size_kbits = encoded_frame_size * 8.0f / 1000.0f;
+ float encoded_size_kbits = processor_->EncodedFrameSize() * 8.0f / 1000.0f;
// Update layer data.
// Update rate mismatch relative to per-frame bandwidth for delta frames.
if (frame_type == kDeltaFrame) {
diff --git a/webrtc/modules/video_coding/codecs/test_framework/normal_async_test.cc b/webrtc/modules/video_coding/codecs/test_framework/normal_async_test.cc
index dcd7479..3ad6ed7 100644
--- a/webrtc/modules/video_coding/codecs/test_framework/normal_async_test.cc
+++ b/webrtc/modules/video_coding/codecs/test_framework/normal_async_test.cc
@@ -218,7 +218,7 @@
return _frameBufferQueue.empty();
}
-uint32_t VideoEncodeCompleteCallback::EncodedBytes()
+size_t VideoEncodeCompleteCallback::EncodedBytes()
{
return _encodedBytes;
}
@@ -251,7 +251,7 @@
return 0;
}
-uint32_t VideoDecodeCompleteCallback::DecodedBytes()
+size_t VideoDecodeCompleteCallback::DecodedBytes()
{
return _decodedBytes;
}
diff --git a/webrtc/modules/video_coding/codecs/test_framework/normal_async_test.h b/webrtc/modules/video_coding/codecs/test_framework/normal_async_test.h
index 1e62534..63ac0bf 100644
--- a/webrtc/modules/video_coding/codecs/test_framework/normal_async_test.h
+++ b/webrtc/modules/video_coding/codecs/test_framework/normal_async_test.h
@@ -153,12 +153,12 @@
Encoded(webrtc::EncodedImage& encodedImage,
const webrtc::CodecSpecificInfo* codecSpecificInfo = NULL,
const webrtc::RTPFragmentationHeader* fragmentation = NULL);
- uint32_t EncodedBytes();
+ size_t EncodedBytes();
private:
FILE* _encodedFile;
FrameQueue* _frameQueue;
NormalAsyncTest& _test;
- uint32_t _encodedBytes;
+ size_t _encodedBytes;
};
class VideoDecodeCompleteCallback : public webrtc::DecodedImageCallback
@@ -176,11 +176,11 @@
ReceivedDecodedReferenceFrame(const uint64_t pictureId);
virtual int32_t ReceivedDecodedFrame(const uint64_t pictureId);
- uint32_t DecodedBytes();
+ size_t DecodedBytes();
private:
FILE* _decodedFile;
NormalAsyncTest& _test;
- uint32_t _decodedBytes;
+ size_t _decodedBytes;
};
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_TEST_FRAMEWORK_NORMAL_ASYNC_TEST_H_
diff --git a/webrtc/modules/video_coding/codecs/test_framework/packet_loss_test.cc b/webrtc/modules/video_coding/codecs/test_framework/packet_loss_test.cc
index 6bb7bbe..c6315a7 100644
--- a/webrtc/modules/video_coding/codecs/test_framework/packet_loss_test.cc
+++ b/webrtc/modules/video_coding/codecs/test_framework/packet_loss_test.cc
@@ -92,8 +92,8 @@
_frameQueue.pop_front();
// save image for future freeze-frame
- unsigned int length = CalcBufferSize(kI420, decodedImage.width(),
- decodedImage.height());
+ size_t length =
+ CalcBufferSize(kI420, decodedImage.width(), decodedImage.height());
if (_lastFrameLength < length)
{
if (_lastFrame) delete [] _lastFrame;
@@ -189,7 +189,7 @@
newEncBuf.VerifyAndAllocate(_lengthSourceFrame);
_inBufIdx = 0;
_outBufIdx = 0;
- int size = 1;
+ size_t size = 1;
int kept = 0;
int thrown = 0;
while ((size = NextPacket(1500, &packet)) > 0)
@@ -204,7 +204,7 @@
// Use the ByteLoss function if you want to lose only
// parts of a packet, and not the whole packet.
- //int size2 = ByteLoss(size, packet, 15);
+ //size_t size2 = ByteLoss(size, packet, 15);
thrown++;
//if (size2 != size)
//{
@@ -227,28 +227,27 @@
//printf("Encoded left: %d bytes\n", _encodedVideoBuffer.Length());
}
-int PacketLossTest::NextPacket(int mtu, unsigned char **pkg)
+size_t PacketLossTest::NextPacket(size_t mtu, unsigned char **pkg)
{
unsigned char *buf = _frameToDecode->_frame->Buffer();
*pkg = buf + _inBufIdx;
- if (static_cast<long>(_frameToDecode->_frame->Length()) - _inBufIdx <= mtu)
- {
- int size = _frameToDecode->_frame->Length() - _inBufIdx;
- _inBufIdx = _frameToDecode->_frame->Length();
- return size;
- }
- _inBufIdx += mtu;
- return mtu;
+ size_t old_idx = _inBufIdx;
+ _inBufIdx = std::min(_inBufIdx + mtu, _frameToDecode->_frame->Length());
+ return _inBufIdx - old_idx;
}
-int PacketLossTest::ByteLoss(int size, unsigned char *pkg, int bytesToLose)
+size_t PacketLossTest::ByteLoss(size_t size,
+ unsigned char *pkg,
+ size_t bytesToLose)
{
return size;
}
-void PacketLossTest::InsertPacket(VideoFrame *buf, unsigned char *pkg, int size)
+void PacketLossTest::InsertPacket(VideoFrame *buf,
+ unsigned char *pkg,
+ size_t size)
{
- if (static_cast<long>(buf->Size()) - _outBufIdx < size)
+ if ((_outBufIdx + size) > buf->Size())
{
printf("InsertPacket error!\n");
return;
diff --git a/webrtc/modules/video_coding/codecs/test_framework/packet_loss_test.h b/webrtc/modules/video_coding/codecs/test_framework/packet_loss_test.h
index e917054..48a67a2 100644
--- a/webrtc/modules/video_coding/codecs/test_framework/packet_loss_test.h
+++ b/webrtc/modules/video_coding/codecs/test_framework/packet_loss_test.h
@@ -34,12 +34,15 @@
virtual void Teardown();
virtual void CodecSpecific_InitBitrate();
virtual int DoPacketLoss();
- virtual int NextPacket(int size, unsigned char **pkg);
- virtual int ByteLoss(int size, unsigned char *pkg, int bytesToLose);
- virtual void InsertPacket(webrtc::VideoFrame *buf, unsigned char *pkg,
- int size);
- int _inBufIdx;
- int _outBufIdx;
+ virtual size_t NextPacket(size_t mtu, unsigned char **pkg);
+ virtual size_t ByteLoss(size_t size,
+ unsigned char *pkg,
+ size_t bytesToLose);
+ virtual void InsertPacket(webrtc::VideoFrame *buf,
+ unsigned char *pkg,
+ size_t size);
+ size_t _inBufIdx;
+ size_t _outBufIdx;
// When NACK is being simulated _lossProbabilty is zero,
// otherwise it is set equal to _lossRate.
@@ -50,10 +53,10 @@
int _totalKept;
int _totalThrown;
- int _sumChannelBytes;
+ size_t _sumChannelBytes;
std::list<uint32_t> _frameQueue;
uint8_t* _lastFrame;
- uint32_t _lastFrameLength;
+ size_t _lastFrameLength;
};
diff --git a/webrtc/modules/video_coding/codecs/test_framework/test.h b/webrtc/modules/video_coding/codecs/test_framework/test.h
index 7558abe..db891ca 100644
--- a/webrtc/modules/video_coding/codecs/test_framework/test.h
+++ b/webrtc/modules/video_coding/codecs/test_framework/test.h
@@ -48,8 +48,8 @@
webrtc::VideoEncoder* _encoder;
webrtc::VideoDecoder* _decoder;
- uint32_t _bitRate;
- unsigned int _lengthSourceFrame;
+ uint32_t _bitRate;
+ size_t _lengthSourceFrame;
unsigned char* _sourceBuffer;
webrtc::I420VideoFrame _inputVideoBuffer;
// TODO(mikhal): For now using VideoFrame for encodedBuffer, should use a
@@ -61,7 +61,7 @@
std::string _inname;
std::string _outname;
std::string _encodedName;
- int _sumEncBytes;
+ size_t _sumEncBytes;
int _width;
int _halfWidth;
int _height;
diff --git a/webrtc/modules/video_coding/codecs/test_framework/unit_test.cc b/webrtc/modules/video_coding/codecs/test_framework/unit_test.cc
index ab8d4d2..1af462c 100644
--- a/webrtc/modules/video_coding/codecs/test_framework/unit_test.cc
+++ b/webrtc/modules/video_coding/codecs/test_framework/unit_test.cc
@@ -146,7 +146,7 @@
return false;
}
-uint32_t
+size_t
UnitTest::WaitForEncodedFrame() const
{
int64_t startTime = TickTime::MillisecondTimestamp();
@@ -160,7 +160,7 @@
return 0;
}
-uint32_t
+size_t
UnitTest::WaitForDecodedFrame() const
{
int64_t startTime = TickTime::MillisecondTimestamp();
@@ -225,8 +225,8 @@
_inst.codecSpecific.VP8.denoisingOn = true;
// Get input frame.
- ASSERT_TRUE(fread(_refFrame, 1, _lengthSourceFrame, _sourceFile)
- == _lengthSourceFrame);
+ ASSERT_EQ(_lengthSourceFrame,
+ fread(_refFrame, 1, _lengthSourceFrame, _sourceFile));
int size_y = _inst.width * _inst.height;
int size_uv = ((_inst.width + 1) / 2) * ((_inst.height + 1) / 2);
_inputVideoBuffer.CreateFrame(size_y, _refFrame,
@@ -244,7 +244,7 @@
EXPECT_TRUE(_encoder->InitEncode(&_inst, 1, 1440) == WEBRTC_VIDEO_CODEC_OK);
_encoder->Encode(_inputVideoBuffer, NULL, NULL);
_refEncFrameLength = WaitForEncodedFrame();
- ASSERT_TRUE(_refEncFrameLength > 0);
+ ASSERT_GT(_refEncFrameLength, 0u);
_refEncFrame = new unsigned char[_refEncFrameLength];
memcpy(_refEncFrame, _encodedVideoBuffer.Buffer(), _refEncFrameLength);
@@ -255,7 +255,7 @@
EXPECT_TRUE(_decoder->InitDecode(&_inst, 1) == WEBRTC_VIDEO_CODEC_OK);
ASSERT_FALSE(SetCodecSpecificParameters() != WEBRTC_VIDEO_CODEC_OK);
- unsigned int frameLength = 0;
+ size_t frameLength = 0;
int i = 0;
_inputVideoBuffer.CreateEmptyFrame(_inst.width, _inst.height, _inst.width,
(_inst.width + 1) / 2,
@@ -266,12 +266,12 @@
if (i > 0)
{
// Insert yet another frame.
- ASSERT_TRUE(fread(_refFrame, 1, _lengthSourceFrame,
- _sourceFile) == _lengthSourceFrame);
+ ASSERT_EQ(_lengthSourceFrame,
+ fread(_refFrame, 1, _lengthSourceFrame, _sourceFile));
EXPECT_EQ(0, ConvertToI420(kI420, _refFrame, 0, 0, _width, _height,
0, kRotateNone, &_inputVideoBuffer));
_encoder->Encode(_inputVideoBuffer, NULL, NULL);
- ASSERT_TRUE(WaitForEncodedFrame() > 0);
+ ASSERT_GT(WaitForEncodedFrame(), 0u);
} else {
// The first frame is always a key frame.
encodedImage._frameType = kKeyFrame;
@@ -285,7 +285,7 @@
i++;
}
rewind(_sourceFile);
- EXPECT_TRUE(frameLength == _lengthSourceFrame);
+ EXPECT_EQ(_lengthSourceFrame, frameLength);
ExtractBuffer(_decodedVideoBuffer, _lengthSourceFrame, _refDecFrame);
}
@@ -324,9 +324,9 @@
EncodedImage encodedImage;
VideoEncodedBufferToEncodedImage(_encodedVideoBuffer, encodedImage);
int ret = _decoder->Decode(encodedImage, 0, NULL);
- int frameLength = WaitForDecodedFrame();
+ size_t frameLength = WaitForDecodedFrame();
_encodedVideoBuffer.SetLength(0);
- return ret == WEBRTC_VIDEO_CODEC_OK ? frameLength : ret;
+ return ret == WEBRTC_VIDEO_CODEC_OK ? static_cast<int>(frameLength) : ret;
}
int
@@ -343,13 +343,11 @@
}
int ret = _decoder->Decode(encodedImage, 0, NULL);
- unsigned int frameLength = WaitForDecodedFrame();
- assert(ret == WEBRTC_VIDEO_CODEC_OK && (frameLength == 0 || frameLength
- == _lengthSourceFrame));
- EXPECT_TRUE(ret == WEBRTC_VIDEO_CODEC_OK && (frameLength == 0 || frameLength
- == _lengthSourceFrame));
+ size_t frameLength = WaitForDecodedFrame();
+ EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, ret);
+ EXPECT_TRUE(frameLength == 0 || frameLength == _lengthSourceFrame);
_encodedVideoBuffer.SetLength(0);
- return ret == WEBRTC_VIDEO_CODEC_OK ? frameLength : ret;
+ return ret == WEBRTC_VIDEO_CODEC_OK ? static_cast<int>(frameLength) : ret;
}
// Test pure virtual VideoEncoder and VideoDecoder APIs.
@@ -357,7 +355,7 @@
UnitTest::Perform()
{
UnitTest::Setup();
- int frameLength;
+ size_t frameLength;
I420VideoFrame inputImage;
EncodedImage encodedImage;
@@ -448,21 +446,21 @@
std::vector<VideoFrameType> frame_types(1, frame_type);
EXPECT_TRUE(_encoder->Encode(_inputVideoBuffer, NULL, &frame_types) ==
WEBRTC_VIDEO_CODEC_OK);
- EXPECT_TRUE(WaitForEncodedFrame() > 0);
+ EXPECT_GT(WaitForEncodedFrame(), 0u);
}
// Init then encode.
_encodedVideoBuffer.SetLength(0);
EXPECT_TRUE(_encoder->Encode(_inputVideoBuffer, NULL, NULL) ==
WEBRTC_VIDEO_CODEC_OK);
- EXPECT_TRUE(WaitForEncodedFrame() > 0);
+ EXPECT_GT(WaitForEncodedFrame(), 0u);
EXPECT_TRUE(_encoder->InitEncode(&_inst, 1, 1440) == WEBRTC_VIDEO_CODEC_OK);
_encoder->Encode(_inputVideoBuffer, NULL, NULL);
frameLength = WaitForEncodedFrame();
- EXPECT_TRUE(frameLength > 0);
+ EXPECT_GT(frameLength, 0u);
EXPECT_TRUE(CheckIfBitExact(_refEncFrame, _refEncFrameLength,
- _encodedVideoBuffer.Buffer(), frameLength) == true);
+ _encodedVideoBuffer.Buffer(), frameLength));
// Reset then encode.
_encodedVideoBuffer.SetLength(0);
@@ -472,9 +470,9 @@
EXPECT_TRUE(_encoder->InitEncode(&_inst, 1, 1440) == WEBRTC_VIDEO_CODEC_OK);
_encoder->Encode(_inputVideoBuffer, NULL, NULL);
frameLength = WaitForEncodedFrame();
- EXPECT_TRUE(frameLength > 0);
+ EXPECT_GT(frameLength, 0u);
EXPECT_TRUE(CheckIfBitExact(_refEncFrame, _refEncFrameLength,
- _encodedVideoBuffer.Buffer(), frameLength) == true);
+ _encodedVideoBuffer.Buffer(), frameLength));
// Release then encode.
_encodedVideoBuffer.SetLength(0);
@@ -485,9 +483,9 @@
EXPECT_TRUE(_encoder->InitEncode(&_inst, 1, 1440) == WEBRTC_VIDEO_CODEC_OK);
_encoder->Encode(_inputVideoBuffer, NULL, NULL);
frameLength = WaitForEncodedFrame();
- EXPECT_TRUE(frameLength > 0);
+ EXPECT_GT(frameLength, 0u);
EXPECT_TRUE(CheckIfBitExact(_refEncFrame, _refEncFrameLength,
- _encodedVideoBuffer.Buffer(), frameLength) == true);
+ _encodedVideoBuffer.Buffer(), frameLength));
//----- Decoder parameter tests -----
@@ -522,8 +520,8 @@
ASSERT_FALSE(SetCodecSpecificParameters() != WEBRTC_VIDEO_CODEC_OK);
for (int i = 0; i < 100; i++)
{
- ASSERT_TRUE(fread(tmpBuf, 1, _refEncFrameLength, _sourceFile)
- == _refEncFrameLength);
+ ASSERT_EQ(_refEncFrameLength,
+ fread(tmpBuf, 1, _refEncFrameLength, _sourceFile));
_encodedVideoBuffer.CopyFrame(_refEncFrameLength, tmpBuf);
VideoEncodedBufferToEncodedImage(_encodedVideoBuffer, encodedImage);
int ret = _decoder->Decode(encodedImage, false, NULL);
@@ -564,12 +562,12 @@
_decoder->Decode(encodedImage, false, NULL);
frameLength = WaitForDecodedFrame();
}
- unsigned int length = CalcBufferSize(kI420, width, height);
+ size_t length = CalcBufferSize(kI420, width, height);
scoped_ptr<uint8_t[]> decoded_buffer(new uint8_t[length]);
ExtractBuffer(_decodedVideoBuffer, _lengthSourceFrame,
decoded_buffer.get());
EXPECT_TRUE(CheckIfBitExact(decoded_buffer.get(), frameLength, _refDecFrame,
- _lengthSourceFrame) == true);
+ _lengthSourceFrame));
// Reset then decode.
EXPECT_TRUE(_decoder->Reset() == WEBRTC_VIDEO_CODEC_OK);
@@ -583,7 +581,7 @@
ExtractBuffer(_decodedVideoBuffer, _lengthSourceFrame,
decoded_buffer.get());
EXPECT_TRUE(CheckIfBitExact(decoded_buffer.get(), frameLength,
- _refDecFrame, _lengthSourceFrame) == true);
+ _refDecFrame, _lengthSourceFrame));
// Decode with other size, reset, then decode with original size again
// to verify that decoder is reset to a "fresh" state upon Reset().
@@ -614,7 +612,7 @@
tempInst.width, tmpHalfWidth, tmpHalfWidth);
_encoder->Encode(tempInput, NULL, NULL);
frameLength = WaitForEncodedFrame();
- EXPECT_TRUE(frameLength > 0);
+ EXPECT_GT(frameLength, 0u);
// Reset then decode.
EXPECT_TRUE(_decoder->Reset() == WEBRTC_VIDEO_CODEC_OK);
frameLength = 0;
@@ -631,7 +629,7 @@
WEBRTC_VIDEO_CODEC_OK);
_encoder->Encode(_inputVideoBuffer, NULL, NULL);
frameLength = WaitForEncodedFrame();
- EXPECT_TRUE(frameLength > 0);
+ EXPECT_GT(frameLength, 0u);
// Reset then decode original frame again.
EXPECT_TRUE(_decoder->Reset() == WEBRTC_VIDEO_CODEC_OK);
@@ -644,11 +642,11 @@
}
// check that decoded frame matches with reference
- unsigned int length = CalcBufferSize(kI420, width, height);
+ size_t length = CalcBufferSize(kI420, width, height);
scoped_ptr<uint8_t[]> decoded_buffer(new uint8_t[length]);
ExtractBuffer(_decodedVideoBuffer, length, decoded_buffer.get());
EXPECT_TRUE(CheckIfBitExact(decoded_buffer.get(), length,
- _refDecFrame, _lengthSourceFrame) == true);
+ _refDecFrame, _lengthSourceFrame));
}
// Release then decode.
@@ -664,7 +662,7 @@
}
ExtractBuffer(_decodedVideoBuffer, length, decoded_buffer.get());
EXPECT_TRUE(CheckIfBitExact(decoded_buffer.get(), frameLength,
- _refDecFrame, _lengthSourceFrame) == true);
+ _refDecFrame, _lengthSourceFrame));
_encodedVideoBuffer.SetLength(0);
delete [] tmpBuf;
@@ -697,8 +695,7 @@
ASSERT_TRUE(_encoder->Encode(_inputVideoBuffer, NULL, NULL) ==
WEBRTC_VIDEO_CODEC_OK);
frameLength = WaitForEncodedFrame();
- //ASSERT_TRUE(frameLength);
- EXPECT_TRUE(frameLength > 0);
+ EXPECT_GT(frameLength, 0u);
encTimeStamp = _encodedVideoBuffer.TimeStamp();
EXPECT_TRUE(_inputVideoBuffer.timestamp() ==
static_cast<unsigned>(encTimeStamp));
@@ -707,8 +704,7 @@
is_key_frame_ = true;
}
- frameLength = Decode();
- if (frameLength == 0)
+ if (Decode() == 0)
{
frameDelay++;
}
@@ -735,7 +731,7 @@
{
int frames = 0;
VideoFrame inputImage;
- uint32_t frameLength;
+ size_t frameLength;
// Do not specify maxBitRate (as in ViE).
_inst.maxBitrate = 0;
@@ -754,7 +750,7 @@
for (int i = 0; i < nBitrates; i++)
{
_bitRate = bitRate[i];
- int totalBytes = 0;
+ size_t totalBytes = 0;
_inst.startBitrate = _bitRate;
_encoder->InitEncode(&_inst, 4, 1440);
_decoder->Reset();
@@ -789,27 +785,26 @@
ASSERT_EQ(_encoder->Encode(_inputVideoBuffer, NULL, NULL),
WEBRTC_VIDEO_CODEC_OK);
frameLength = WaitForEncodedFrame();
- ASSERT_GE(frameLength, 0u);
totalBytes += frameLength;
frames++;
_encodedVideoBuffer.SetLength(0);
}
- uint32_t actualBitrate =
- (totalBytes / frames * _inst.maxFramerate * 8)/1000;
- printf("Target bitrate: %d kbps, actual bitrate: %d kbps\n", _bitRate,
- actualBitrate);
+ uint32_t actualBitrate = static_cast<uint32_t>(
+ (totalBytes / frames * _inst.maxFramerate * 8) / 1000);
+ printf("Target bitrate: %u kbps, actual bitrate: %u kbps\n", _bitRate,
+ actualBitrate);
// Test for close match over reasonable range.
- EXPECT_TRUE(abs(int32_t(actualBitrate - _bitRate)) <
- 0.12 * _bitRate);
+ EXPECT_LT(abs(static_cast<int32_t>(actualBitrate - _bitRate)),
+ 0.12 * _bitRate);
ASSERT_TRUE(feof(_sourceFile) != 0);
rewind(_sourceFile);
}
}
bool
-UnitTest::CheckIfBitExact(const void* ptrA, unsigned int aLengthBytes,
- const void* ptrB, unsigned int bLengthBytes)
+UnitTest::CheckIfBitExact(const void* ptrA, size_t aLengthBytes,
+ const void* ptrB, size_t bLengthBytes)
{
if (aLengthBytes != bLengthBytes)
{
diff --git a/webrtc/modules/video_coding/codecs/test_framework/unit_test.h b/webrtc/modules/video_coding/codecs/test_framework/unit_test.h
index 4e2fea0..7e55a90 100644
--- a/webrtc/modules/video_coding/codecs/test_framework/unit_test.h
+++ b/webrtc/modules/video_coding/codecs/test_framework/unit_test.h
@@ -48,11 +48,11 @@
virtual int DecodeWithoutAssert();
virtual int SetCodecSpecificParameters() {return 0;};
- virtual bool CheckIfBitExact(const void *ptrA, unsigned int aLengthBytes,
- const void *ptrB, unsigned int bLengthBytes);
+ virtual bool CheckIfBitExact(const void *ptrA, size_t aLengthBytes,
+ const void *ptrB, size_t bLengthBytes);
- uint32_t WaitForEncodedFrame() const;
- uint32_t WaitForDecodedFrame() const;
+ size_t WaitForEncodedFrame() const;
+ size_t WaitForDecodedFrame() const;
int _tests;
int _errors;
@@ -61,7 +61,7 @@
unsigned char* _refFrame;
unsigned char* _refEncFrame;
unsigned char* _refDecFrame;
- unsigned int _refEncFrameLength;
+ size_t _refEncFrameLength;
FILE* _sourceFile;
bool is_key_frame_;
diff --git a/webrtc/modules/video_coding/codecs/test_framework/video_source.cc b/webrtc/modules/video_coding/codecs/test_framework/video_source.cc
index 23fbaa8..7092e45 100644
--- a/webrtc/modules/video_coding/codecs/test_framework/video_source.cc
+++ b/webrtc/modules/video_coding/codecs/test_framework/video_source.cc
@@ -116,7 +116,7 @@
return kUndefined;
}
-unsigned int
+size_t
VideoSource::GetFrameLength() const
{
return webrtc::CalcBufferSize(_type, _width, _height);
diff --git a/webrtc/modules/video_coding/codecs/test_framework/video_source.h b/webrtc/modules/video_coding/codecs/test_framework/video_source.h
index b3c4e79..44f56ae 100644
--- a/webrtc/modules/video_coding/codecs/test_framework/video_source.h
+++ b/webrtc/modules/video_coding/codecs/test_framework/video_source.h
@@ -71,7 +71,7 @@
VideoSize GetSize() const;
static VideoSize GetSize(uint16_t width, uint16_t height);
- unsigned int GetFrameLength() const;
+ size_t GetFrameLength() const;
// Returns a human-readable size string.
static const char* GetSizeString(VideoSize size);
diff --git a/webrtc/modules/video_coding/codecs/tools/video_quality_measurement.cc b/webrtc/modules/video_coding/codecs/tools/video_quality_measurement.cc
index 7a12446..ced92bc 100644
--- a/webrtc/modules/video_coding/codecs/tools/video_quality_measurement.cc
+++ b/webrtc/modules/video_coding/codecs/tools/video_quality_measurement.cc
@@ -20,6 +20,7 @@
#endif
#include "gflags/gflags.h"
+#include "webrtc/base/format_macros.h"
#include "webrtc/common_types.h"
#include "webrtc/modules/video_coding/codecs/test/packet_manipulator.h"
#include "webrtc/modules/video_coding/codecs/test/stats.h"
@@ -204,7 +205,8 @@
FLAGS_packet_size);
return 7;
}
- config->networking_config.packet_size_in_bytes = FLAGS_packet_size;
+ config->networking_config.packet_size_in_bytes =
+ static_cast<size_t>(FLAGS_packet_size);
if (FLAGS_max_payload_size <= 0) {
fprintf(stderr, "Max payload size must be >0 bytes, was: %d\n",
@@ -212,7 +214,7 @@
return 8;
}
config->networking_config.max_payload_size_in_bytes =
- FLAGS_max_payload_size;
+ static_cast<size_t>(FLAGS_max_payload_size);
// Check the width and height
if (FLAGS_width <= 0 || FLAGS_height <= 0) {
@@ -290,10 +292,10 @@
Log(" Input filename : %s\n", config.input_filename.c_str());
Log(" Output directory : %s\n", config.output_dir.c_str());
Log(" Output filename : %s\n", config.output_filename.c_str());
- Log(" Frame length : %d bytes\n", config.frame_length_in_bytes);
- Log(" Packet size : %d bytes\n",
+ Log(" Frame length : %" PRIuS " bytes\n", config.frame_length_in_bytes);
+ Log(" Packet size : %" PRIuS " bytes\n",
config.networking_config.packet_size_in_bytes);
- Log(" Max payload size : %d bytes\n",
+ Log(" Max payload size : %" PRIuS " bytes\n",
config.networking_config.max_payload_size_in_bytes);
Log(" Packet loss:\n");
Log(" Mode : %s\n",
@@ -320,8 +322,8 @@
const webrtc::test::FrameStatistic& f = stats.stats_[i];
const webrtc::test::FrameResult& ssim = ssim_result.frames[i];
const webrtc::test::FrameResult& psnr = psnr_result.frames[i];
- printf("%4d, %d, %d, %2d, %2d, %6d, %6d, %5d, %7d, %d, %2d, %2d, "
- "%5.3f, %5.2f\n",
+ printf("%4d, %d, %d, %2d, %2d, %6d, %6d, %5d, %7" PRIuS ", %d, %2d, %2"
+ PRIuS ", %5.3f, %5.2f\n",
f.frame_number,
f.encoding_successful,
f.decoding_successful,
@@ -352,13 +354,13 @@
"{'name': 'input_filename', 'value': '%s'},\n"
"{'name': 'output_filename', 'value': '%s'},\n"
"{'name': 'output_dir', 'value': '%s'},\n"
- "{'name': 'packet_size_in_bytes', 'value': '%d'},\n"
- "{'name': 'max_payload_size_in_bytes', 'value': '%d'},\n"
+ "{'name': 'packet_size_in_bytes', 'value': '%" PRIuS "'},\n"
+ "{'name': 'max_payload_size_in_bytes', 'value': '%" PRIuS "'},\n"
"{'name': 'packet_loss_mode', 'value': '%s'},\n"
"{'name': 'packet_loss_probability', 'value': '%f'},\n"
"{'name': 'packet_loss_burst_length', 'value': '%d'},\n"
"{'name': 'exclude_frame_types', 'value': '%s'},\n"
- "{'name': 'frame_length_in_bytes', 'value': '%d'},\n"
+ "{'name': 'frame_length_in_bytes', 'value': '%" PRIuS "'},\n"
"{'name': 'use_single_core', 'value': '%s'},\n"
"{'name': 'keyframe_interval;', 'value': '%d'},\n"
"{'name': 'video_codec_type', 'value': '%s'},\n"
@@ -411,9 +413,9 @@
"'encoding_successful': %s, 'decoding_successful': %s, "
"'encode_time': %d, 'decode_time': %d, "
"'encode_return_code': %d, 'decode_return_code': %d, "
- "'bit_rate': %d, 'encoded_frame_length': %d, 'frame_type': %s, "
- "'packets_dropped': %d, 'total_packets': %d, "
- "'ssim': %f, 'psnr': %f},\n",
+ "'bit_rate': %d, 'encoded_frame_length': %" PRIuS ", "
+ "'frame_type': %s, 'packets_dropped': %d, "
+ "'total_packets': %" PRIuS ", 'ssim': %f, 'psnr': %f},\n",
f.frame_number,
f.encoding_successful ? "True " : "False",
f.decoding_successful ? "True " : "False",
diff --git a/webrtc/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc b/webrtc/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc
index 5e0bfc8..6666bab 100644
--- a/webrtc/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/test/vp8_impl_unittest.cc
@@ -148,7 +148,7 @@
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->InitDecode(&codec_inst_, 1));
}
- int WaitForEncodedFrame() const {
+ size_t WaitForEncodedFrame() const {
int64_t startTime = TickTime::MillisecondTimestamp();
while (TickTime::MillisecondTimestamp() - startTime < kMaxWaitEncTimeMs) {
if (encode_complete_callback_->EncodeComplete()) {
@@ -158,7 +158,7 @@
return 0;
}
- int WaitForDecodedFrame() const {
+ size_t WaitForDecodedFrame() const {
int64_t startTime = TickTime::MillisecondTimestamp();
while (TickTime::MillisecondTimestamp() - startTime < kMaxWaitDecTimeMs) {
if (decode_complete_callback_->DecodeComplete()) {
@@ -188,7 +188,7 @@
scoped_ptr<VideoDecoder> decoder_;
VideoFrame encoded_video_frame_;
I420VideoFrame decoded_video_frame_;
- unsigned int length_source_frame_;
+ size_t length_source_frame_;
VideoCodec codec_inst_;
};
@@ -239,14 +239,14 @@
TEST_F(TestVp8Impl, DISABLED_ON_ANDROID(AlignedStrideEncodeDecode)) {
SetUpEncodeDecode();
encoder_->Encode(input_frame_, NULL, NULL);
- EXPECT_GT(WaitForEncodedFrame(), 0);
+ EXPECT_GT(WaitForEncodedFrame(), 0u);
EncodedImage encodedImage;
VideoFrameToEncodedImage(encoded_video_frame_, encodedImage);
// First frame should be a key frame.
encodedImage._frameType = kKeyFrame;
encodedImage.ntp_time_ms_ = kTestNtpTimeMs;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encodedImage, false, NULL));
- EXPECT_GT(WaitForDecodedFrame(), 0);
+ EXPECT_GT(WaitForDecodedFrame(), 0u);
// Compute PSNR on all planes (faster than SSIM).
EXPECT_GT(I420PSNR(&input_frame_, &decoded_video_frame_), 36);
EXPECT_EQ(kTestTimestamp, decoded_video_frame_.timestamp());
@@ -256,7 +256,7 @@
TEST_F(TestVp8Impl, DISABLED_ON_ANDROID(DecodeWithACompleteKeyFrame)) {
SetUpEncodeDecode();
encoder_->Encode(input_frame_, NULL, NULL);
- EXPECT_GT(WaitForEncodedFrame(), 0);
+ EXPECT_GT(WaitForEncodedFrame(), 0u);
EncodedImage encodedImage;
VideoFrameToEncodedImage(encoded_video_frame_, encodedImage);
// Setting complete to false -> should return an error.
diff --git a/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc b/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc
index 2a2a9d0..5345c80 100644
--- a/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc
@@ -116,7 +116,7 @@
int VP8EncoderImpl::InitEncode(const VideoCodec* inst,
int number_of_cores,
- uint32_t /*max_payload_size*/) {
+ size_t /*max_payload_size*/) {
if (inst == NULL) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
@@ -791,7 +791,7 @@
for (int i = 0; i < fragmentation->fragmentationVectorSize; ++i) {
const uint8_t* partition = input_image._buffer +
fragmentation->fragmentationOffset[i];
- const uint32_t partition_length =
+ const size_t partition_length =
fragmentation->fragmentationLength[i];
if (vpx_codec_decode(decoder_,
partition,
diff --git a/webrtc/modules/video_coding/codecs/vp8/vp8_impl.h b/webrtc/modules/video_coding/codecs/vp8/vp8_impl.h
index fec53d5..06f2a26 100644
--- a/webrtc/modules/video_coding/codecs/vp8/vp8_impl.h
+++ b/webrtc/modules/video_coding/codecs/vp8/vp8_impl.h
@@ -39,7 +39,7 @@
virtual int InitEncode(const VideoCodec* codec_settings,
int number_of_cores,
- uint32_t max_payload_size);
+ size_t max_payload_size);
virtual int Encode(const I420VideoFrame& input_image,
const CodecSpecificInfo* codec_specific_info,
diff --git a/webrtc/modules/video_coding/codecs/vp8/vp8_sequence_coder.cc b/webrtc/modules/video_coding/codecs/vp8/vp8_sequence_coder.cc
index ffa0bcc..992f089 100644
--- a/webrtc/modules/video_coding/codecs/vp8/vp8_sequence_coder.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/vp8_sequence_coder.cc
@@ -30,11 +30,11 @@
const webrtc::RTPFragmentationHeader*);
// Returns the encoded image.
webrtc::EncodedImage encoded_image() { return encoded_image_; }
- int encoded_bytes() { return encoded_bytes_; }
+ size_t encoded_bytes() { return encoded_bytes_; }
private:
webrtc::EncodedImage encoded_image_;
FILE* encoded_file_;
- int encoded_bytes_;
+ size_t encoded_bytes_;
};
Vp8SequenceCoderEncodeCallback::~Vp8SequenceCoderEncodeCallback() {
@@ -141,7 +141,7 @@
}
EXPECT_EQ(0, decoder->InitDecode(&inst, 1));
webrtc::I420VideoFrame input_frame;
- unsigned int length = webrtc::CalcBufferSize(webrtc::kI420, width, height);
+ size_t length = webrtc::CalcBufferSize(webrtc::kI420, width, height);
webrtc::scoped_ptr<uint8_t[]> frame_buffer(new uint8_t[length]);
int half_width = (width + 1) / 2;
@@ -175,9 +175,8 @@
int64_t totalExecutionTime = endtime - starttime;
printf("Total execution time: %.2lf ms\n",
static_cast<double>(totalExecutionTime));
- int sum_enc_bytes = encoder_callback.encoded_bytes();
- double actual_bit_rate = 8.0 * sum_enc_bytes /
- (frame_cnt / inst.maxFramerate);
+ double actual_bit_rate =
+ 8.0 * encoder_callback.encoded_bytes() / (frame_cnt / inst.maxFramerate);
printf("Actual bitrate: %f kbps\n", actual_bit_rate / 1000);
webrtc::test::QualityMetricsResult psnr_result, ssim_result;
EXPECT_EQ(0, webrtc::test::I420MetricsFromFiles(
diff --git a/webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc b/webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc
index 734e73d..fa5b05b 100644
--- a/webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc
+++ b/webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc
@@ -103,7 +103,7 @@
int VP9EncoderImpl::InitEncode(const VideoCodec* inst,
int number_of_cores,
- uint32_t /*max_payload_size*/) {
+ size_t /*max_payload_size*/) {
if (inst == NULL) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
@@ -428,7 +428,7 @@
}
if (vpx_codec_decode(decoder_,
buffer,
- input_image._length,
+ static_cast<unsigned int>(input_image._length),
0,
VPX_DL_REALTIME)) {
return WEBRTC_VIDEO_CODEC_ERROR;
diff --git a/webrtc/modules/video_coding/codecs/vp9/vp9_impl.h b/webrtc/modules/video_coding/codecs/vp9/vp9_impl.h
index 94788db..355aadf 100644
--- a/webrtc/modules/video_coding/codecs/vp9/vp9_impl.h
+++ b/webrtc/modules/video_coding/codecs/vp9/vp9_impl.h
@@ -34,7 +34,7 @@
virtual int InitEncode(const VideoCodec* codec_settings,
int number_of_cores,
- uint32_t max_payload_size) OVERRIDE;
+ size_t max_payload_size) OVERRIDE;
virtual int Encode(const I420VideoFrame& input_image,
const CodecSpecificInfo* codec_specific_info,
diff --git a/webrtc/modules/video_coding/main/interface/video_coding.h b/webrtc/modules/video_coding/main/interface/video_coding.h
index ef9209a..d46ac15 100644
--- a/webrtc/modules/video_coding/main/interface/video_coding.h
+++ b/webrtc/modules/video_coding/main/interface/video_coding.h
@@ -467,8 +467,8 @@
// Return value : VCM_OK, on success.
// < 0, on error.
virtual int32_t IncomingPacket(const uint8_t* incomingPayload,
- uint32_t payloadLength,
- const WebRtcRTPHeader& rtpInfo) = 0;
+ size_t payloadLength,
+ const WebRtcRTPHeader& rtpInfo) = 0;
// Minimum playout delay (Used for lip-sync). This is the minimum delay required
// to sync with audio. Not included in VideoCodingModule::Delay()
diff --git a/webrtc/modules/video_coding/main/interface/video_coding_defines.h b/webrtc/modules/video_coding/main/interface/video_coding_defines.h
index efdc41b..72658a3 100644
--- a/webrtc/modules/video_coding/main/interface/video_coding_defines.h
+++ b/webrtc/modules/video_coding/main/interface/video_coding_defines.h
@@ -75,7 +75,7 @@
uint32_t timeStamp,
int64_t capture_time_ms,
const uint8_t* payloadData,
- uint32_t payloadSize,
+ size_t payloadSize,
const RTPFragmentationHeader& fragmentationHeader,
const RTPVideoHeader* rtpVideoHdr) = 0;
protected:
diff --git a/webrtc/modules/video_coding/main/source/codec_database.cc b/webrtc/modules/video_coding/main/source/codec_database.cc
index 2fc9246..3bd65d6 100644
--- a/webrtc/modules/video_coding/main/source/codec_database.cc
+++ b/webrtc/modules/video_coding/main/source/codec_database.cc
@@ -25,6 +25,10 @@
#include "webrtc/modules/video_coding/main/source/internal_defines.h"
#include "webrtc/system_wrappers/interface/logging.h"
+namespace {
+const size_t kDefaultPayloadSize = 1440;
+}
+
namespace webrtc {
VideoCodecVP8 VideoEncoder::GetDefaultVp8Settings() {
@@ -227,12 +231,12 @@
bool VCMCodecDataBase::SetSendCodec(
const VideoCodec* send_codec,
int number_of_cores,
- int max_payload_size,
+ size_t max_payload_size,
VCMEncodedFrameCallback* encoded_frame_callback) {
if (!send_codec) {
return false;
}
- if (max_payload_size <= 0) {
+ if (max_payload_size == 0) {
max_payload_size = kDefaultPayloadSize;
}
if (number_of_cores <= 0) {
diff --git a/webrtc/modules/video_coding/main/source/codec_database.h b/webrtc/modules/video_coding/main/source/codec_database.h
index f27218f..a31decb 100644
--- a/webrtc/modules/video_coding/main/source/codec_database.h
+++ b/webrtc/modules/video_coding/main/source/codec_database.h
@@ -22,10 +22,6 @@
namespace webrtc {
-enum VCMCodecDBProperties {
- kDefaultPayloadSize = 1440
-};
-
struct VCMDecoderMapItem {
public:
VCMDecoderMapItem(VideoCodec* settings,
@@ -70,7 +66,7 @@
// Returns true if the codec was successfully registered, false otherwise.
bool SetSendCodec(const VideoCodec* send_codec,
int number_of_cores,
- int max_payload_size,
+ size_t max_payload_size,
VCMEncodedFrameCallback* encoded_frame_callback);
// Gets the current send codec. Relevant for internal codecs only.
@@ -175,7 +171,7 @@
uint8_t payload_type) const;
int number_of_cores_;
- int max_payload_size_;
+ size_t max_payload_size_;
bool periodic_key_frames_;
bool pending_encoder_reset_;
bool current_enc_is_external_;
diff --git a/webrtc/modules/video_coding/main/source/encoded_frame.h b/webrtc/modules/video_coding/main/source/encoded_frame.h
index dd0f843..4be4e6b 100644
--- a/webrtc/modules/video_coding/main/source/encoded_frame.h
+++ b/webrtc/modules/video_coding/main/source/encoded_frame.h
@@ -56,7 +56,7 @@
/**
* Get frame length
*/
- uint32_t Length() const {return _length;}
+ size_t Length() const {return _length;}
/**
* Get frame timestamp (90kHz)
*/
diff --git a/webrtc/modules/video_coding/main/source/frame_buffer.cc b/webrtc/modules/video_coding/main/source/frame_buffer.cc
index fce68fb..6dd3554 100644
--- a/webrtc/modules/video_coding/main/source/frame_buffer.cc
+++ b/webrtc/modules/video_coding/main/source/frame_buffer.cc
@@ -268,11 +268,11 @@
_sessionInfo.BuildVP8FragmentationHeader(_buffer, _length,
&_fragmentation);
} else {
- int bytes_removed = _sessionInfo.MakeDecodable();
+ size_t bytes_removed = _sessionInfo.MakeDecodable();
_length -= bytes_removed;
}
#else
- int bytes_removed = _sessionInfo.MakeDecodable();
+ size_t bytes_removed = _sessionInfo.MakeDecodable();
_length -= bytes_removed;
#endif
// Transfer frame information to EncodedFrame and create any codec
diff --git a/webrtc/modules/video_coding/main/source/generic_encoder.cc b/webrtc/modules/video_coding/main/source/generic_encoder.cc
index 655f7ac..d6a7bbb 100644
--- a/webrtc/modules/video_coding/main/source/generic_encoder.cc
+++ b/webrtc/modules/video_coding/main/source/generic_encoder.cc
@@ -82,7 +82,7 @@
int32_t
VCMGenericEncoder::InitEncode(const VideoCodec* settings,
int32_t numberOfCores,
- uint32_t maxPayloadSize)
+ size_t maxPayloadSize)
{
_bitRate = settings->startBitrate * 1000;
_frameRate = settings->maxFramerate;
@@ -218,7 +218,7 @@
FrameType frameType = VCMEncodedFrame::ConvertFrameType(encodedImage._frameType);
- uint32_t encodedBytes = 0;
+ size_t encodedBytes = 0;
if (_sendCallback != NULL)
{
encodedBytes = encodedImage._length;
diff --git a/webrtc/modules/video_coding/main/source/generic_encoder.h b/webrtc/modules/video_coding/main/source/generic_encoder.h
index 9277260..8eb1480 100644
--- a/webrtc/modules/video_coding/main/source/generic_encoder.h
+++ b/webrtc/modules/video_coding/main/source/generic_encoder.h
@@ -84,7 +84,7 @@
*/
int32_t InitEncode(const VideoCodec* settings,
int32_t numberOfCores,
- uint32_t maxPayloadSize);
+ size_t maxPayloadSize);
/**
* Encode raw image
* inputFrame : Frame containing raw image
diff --git a/webrtc/modules/video_coding/main/source/media_optimization.cc b/webrtc/modules/video_coding/main/source/media_optimization.cc
index 5789480..630f013 100644
--- a/webrtc/modules/video_coding/main/source/media_optimization.cc
+++ b/webrtc/modules/video_coding/main/source/media_optimization.cc
@@ -62,14 +62,14 @@
} // namespace
struct MediaOptimization::EncodedFrameSample {
- EncodedFrameSample(int size_bytes,
+ EncodedFrameSample(size_t size_bytes,
uint32_t timestamp,
int64_t time_complete_ms)
: size_bytes(size_bytes),
timestamp(timestamp),
time_complete_ms(time_complete_ms) {}
- uint32_t size_bytes;
+ size_t size_bytes;
uint32_t timestamp;
int64_t time_complete_ms;
};
@@ -369,7 +369,7 @@
return count;
}
-int32_t MediaOptimization::UpdateWithEncodedData(int encoded_length,
+int32_t MediaOptimization::UpdateWithEncodedData(size_t encoded_length,
uint32_t timestamp,
FrameType encoded_frame_type) {
CriticalSectionScoped lock(crit_sect_.get());
@@ -532,7 +532,7 @@
avg_sent_bit_rate_bps_ = 0;
return;
}
- int framesize_sum = 0;
+ size_t framesize_sum = 0;
for (FrameSampleList::iterator it = encoded_frame_samples_.begin();
it != encoded_frame_samples_.end();
++it) {
diff --git a/webrtc/modules/video_coding/main/source/media_optimization.h b/webrtc/modules/video_coding/main/source/media_optimization.h
index df3fbb6..af35f01 100644
--- a/webrtc/modules/video_coding/main/source/media_optimization.h
+++ b/webrtc/modules/video_coding/main/source/media_optimization.h
@@ -77,7 +77,7 @@
void UpdateContentData(const VideoContentMetrics* content_metrics);
// Informs Media Optimization of encoding output: Length and frame type.
- int32_t UpdateWithEncodedData(int encoded_length,
+ int32_t UpdateWithEncodedData(size_t encoded_length,
uint32_t timestamp,
FrameType encoded_frame_type);
diff --git a/webrtc/modules/video_coding/main/source/media_optimization_unittest.cc b/webrtc/modules/video_coding/main/source/media_optimization_unittest.cc
index bacfdc6..df79fb7 100644
--- a/webrtc/modules/video_coding/main/source/media_optimization_unittest.cc
+++ b/webrtc/modules/video_coding/main/source/media_optimization_unittest.cc
@@ -30,12 +30,11 @@
next_timestamp_(0) {}
// This method mimics what happens in VideoSender::AddVideoFrame.
- void AddFrameAndAdvanceTime(int bitrate_bps, bool expect_frame_drop) {
- ASSERT_GE(bitrate_bps, 0);
+ void AddFrameAndAdvanceTime(uint32_t bitrate_bps, bool expect_frame_drop) {
bool frame_dropped = media_opt_.DropFrame();
EXPECT_EQ(expect_frame_drop, frame_dropped);
if (!frame_dropped) {
- int bytes_per_frame = bitrate_bps * frame_time_ms_ / (8 * 1000);
+ size_t bytes_per_frame = bitrate_bps * frame_time_ms_ / (8 * 1000);
ASSERT_EQ(VCM_OK, media_opt_.UpdateWithEncodedData(
bytes_per_frame, next_timestamp_, kVideoFrameDelta));
}
@@ -54,14 +53,14 @@
// Enable video suspension with these limits.
// Suspend the video when the rate is below 50 kbps and resume when it gets
// above 50 + 10 kbps again.
- const int kThresholdBps = 50000;
- const int kWindowBps = 10000;
+ const uint32_t kThresholdBps = 50000;
+ const uint32_t kWindowBps = 10000;
media_opt_.SuspendBelowMinBitrate(kThresholdBps, kWindowBps);
// The video should not be suspended from the start.
EXPECT_FALSE(media_opt_.IsVideoSuspended());
- int target_bitrate_kbps = 100;
+ uint32_t target_bitrate_kbps = 100;
media_opt_.SetTargetRates(target_bitrate_kbps * 1000,
0, // Lossrate.
100,
diff --git a/webrtc/modules/video_coding/main/source/packet.cc b/webrtc/modules/video_coding/main/source/packet.cc
index 63dcd63..dd3743f 100644
--- a/webrtc/modules/video_coding/main/source/packet.cc
+++ b/webrtc/modules/video_coding/main/source/packet.cc
@@ -35,7 +35,7 @@
}
VCMPacket::VCMPacket(const uint8_t* ptr,
- const uint32_t size,
+ const size_t size,
const WebRtcRTPHeader& rtpHeader) :
payloadType(rtpHeader.header.payloadType),
timestamp(rtpHeader.header.timestamp),
@@ -57,7 +57,11 @@
CopyCodecSpecifics(rtpHeader.type.Video);
}
-VCMPacket::VCMPacket(const uint8_t* ptr, uint32_t size, uint16_t seq, uint32_t ts, bool mBit) :
+VCMPacket::VCMPacket(const uint8_t* ptr,
+ size_t size,
+ uint16_t seq,
+ uint32_t ts,
+ bool mBit) :
payloadType(0),
timestamp(ts),
ntp_time_ms_(0),
diff --git a/webrtc/modules/video_coding/main/source/packet.h b/webrtc/modules/video_coding/main/source/packet.h
index 242d3a4..d98b6f6 100644
--- a/webrtc/modules/video_coding/main/source/packet.h
+++ b/webrtc/modules/video_coding/main/source/packet.h
@@ -21,10 +21,10 @@
public:
VCMPacket();
VCMPacket(const uint8_t* ptr,
- const uint32_t size,
+ const size_t size,
const WebRtcRTPHeader& rtpHeader);
VCMPacket(const uint8_t* ptr,
- uint32_t size,
+ size_t size,
uint16_t seqNum,
uint32_t timestamp,
bool markerBit);
@@ -37,7 +37,7 @@
int64_t ntp_time_ms_;
uint16_t seqNum;
const uint8_t* dataPtr;
- uint32_t sizeBytes;
+ size_t sizeBytes;
bool markerBit;
FrameType frameType;
diff --git a/webrtc/modules/video_coding/main/source/qm_select.cc b/webrtc/modules/video_coding/main/source/qm_select.cc
index 85c5f36..0df61b5 100644
--- a/webrtc/modules/video_coding/main/source/qm_select.cc
+++ b/webrtc/modules/video_coding/main/source/qm_select.cc
@@ -239,11 +239,11 @@
}
// Update rate data after every encoded frame.
-void VCMQmResolution::UpdateEncodedSize(int encoded_size,
+void VCMQmResolution::UpdateEncodedSize(size_t encoded_size,
FrameType encoded_frame_type) {
frame_cnt_++;
// Convert to Kbps.
- float encoded_size_kbits = static_cast<float>((encoded_size * 8.0) / 1000.0);
+ float encoded_size_kbits = 8.0f * static_cast<float>(encoded_size) / 1000.0f;
// Update the buffer level:
// Note this is not the actual encoder buffer level.
diff --git a/webrtc/modules/video_coding/main/source/qm_select.h b/webrtc/modules/video_coding/main/source/qm_select.h
index ce57236..a87d502 100644
--- a/webrtc/modules/video_coding/main/source/qm_select.h
+++ b/webrtc/modules/video_coding/main/source/qm_select.h
@@ -216,7 +216,7 @@
// Update with actual bit rate (size of the latest encoded frame)
// and frame type, after every encoded frame.
- void UpdateEncodedSize(int encoded_size,
+ void UpdateEncodedSize(size_t encoded_size,
FrameType encoded_frame_type);
// Update with new target bitrate, actual encoder sent rate, frame_rate,
diff --git a/webrtc/modules/video_coding/main/source/qm_select_unittest.cc b/webrtc/modules/video_coding/main/source/qm_select_unittest.cc
index 5a7daed..0120f20 100644
--- a/webrtc/modules/video_coding/main/source/qm_select_unittest.cc
+++ b/webrtc/modules/video_coding/main/source/qm_select_unittest.cc
@@ -46,7 +46,7 @@
int native_height,
int num_layers);
- void UpdateQmEncodedFrame(int* encoded_size, int num_updates);
+ void UpdateQmEncodedFrame(size_t* encoded_size, size_t num_updates);
void UpdateQmRateData(int* target_rate,
int* encoder_sent_rate,
@@ -315,8 +315,8 @@
// Update with encoded size over a number of frames.
// per-frame bandwidth = 15 = 450/30: simulate (decoder) buffer underflow:
- int encoded_size[] = {200, 100, 50, 30, 60, 40, 20, 30, 20, 40};
- UpdateQmEncodedFrame(encoded_size, 10);
+ size_t encoded_size[] = {200, 100, 50, 30, 60, 40, 20, 30, 20, 40};
+ UpdateQmEncodedFrame(encoded_size, GTEST_ARRAY_SIZE_(encoded_size));
// Update rates for a sequence of intervals.
int target_rate[] = {300, 300, 300};
@@ -359,8 +359,8 @@
// Update with encoded size over a number of frames.
// per-frame bandwidth = 15 = 450/30: simulate stable (decoder) buffer levels.
- int32_t encoded_size[] = {40, 10, 10, 16, 18, 20, 17, 20, 16, 15};
- UpdateQmEncodedFrame(encoded_size, 10);
+ size_t encoded_size[] = {40, 10, 10, 16, 18, 20, 17, 20, 16, 15};
+ UpdateQmEncodedFrame(encoded_size, GTEST_ARRAY_SIZE_(encoded_size));
// Update rates for a sequence of intervals.
int target_rate[] = {350, 350, 350};
@@ -1262,11 +1262,12 @@
qm_resolution_->UpdateContent(content_metrics_);
}
-void QmSelectTest::UpdateQmEncodedFrame(int* encoded_size, int num_updates) {
+void QmSelectTest::UpdateQmEncodedFrame(size_t* encoded_size,
+ size_t num_updates) {
FrameType frame_type = kVideoFrameDelta;
- for (int i = 0; i < num_updates; ++i) {
+ for (size_t i = 0; i < num_updates; ++i) {
// Convert to bytes.
- int32_t encoded_size_update = 1000 * encoded_size[i] / 8;
+ size_t encoded_size_update = 1000 * encoded_size[i] / 8;
qm_resolution_->UpdateEncodedSize(encoded_size_update, frame_type);
}
}
diff --git a/webrtc/modules/video_coding/main/source/session_info.cc b/webrtc/modules/video_coding/main/source/session_info.cc
index d7d576d..b165d7c 100644
--- a/webrtc/modules/video_coding/main/source/session_info.cc
+++ b/webrtc/modules/video_coding/main/source/session_info.cc
@@ -110,8 +110,8 @@
last_packet_seq_num_ = -1;
}
-int VCMSessionInfo::SessionLength() const {
- int length = 0;
+size_t VCMSessionInfo::SessionLength() const {
+ size_t length = 0;
for (PacketIteratorConst it = packets_.begin(); it != packets_.end(); ++it)
length += (*it).sizeBytes;
return length;
@@ -121,13 +121,13 @@
return packets_.size();
}
-int VCMSessionInfo::InsertBuffer(uint8_t* frame_buffer,
- PacketIterator packet_it) {
+size_t VCMSessionInfo::InsertBuffer(uint8_t* frame_buffer,
+ PacketIterator packet_it) {
VCMPacket& packet = *packet_it;
PacketIterator it;
// Calculate the offset into the frame buffer for this packet.
- int offset = 0;
+ size_t offset = 0;
for (it = packets_.begin(); it != packet_it; ++it)
offset += (*it).sizeBytes;
@@ -145,7 +145,7 @@
size_t required_length = 0;
const uint8_t* nalu_ptr = packet_buffer + kH264NALHeaderLengthInBytes;
while (nalu_ptr < packet_buffer + packet.sizeBytes) {
- uint32_t length = BufferToUWord16(nalu_ptr);
+ size_t length = BufferToUWord16(nalu_ptr);
required_length +=
length + (packet.insertStartCode ? kH264StartCodeLengthBytes : 0);
nalu_ptr += kLengthFieldLength + length;
@@ -154,7 +154,7 @@
nalu_ptr = packet_buffer + kH264NALHeaderLengthInBytes;
uint8_t* frame_buffer_ptr = frame_buffer + offset;
while (nalu_ptr < packet_buffer + packet.sizeBytes) {
- uint32_t length = BufferToUWord16(nalu_ptr);
+ size_t length = BufferToUWord16(nalu_ptr);
nalu_ptr += kLengthFieldLength;
frame_buffer_ptr += Insert(nalu_ptr,
length,
@@ -276,9 +276,9 @@
return --packet_it;
}
-int VCMSessionInfo::DeletePacketData(PacketIterator start,
- PacketIterator end) {
- int bytes_to_delete = 0; // The number of bytes to delete.
+size_t VCMSessionInfo::DeletePacketData(PacketIterator start,
+ PacketIterator end) {
+ size_t bytes_to_delete = 0; // The number of bytes to delete.
PacketIterator packet_after_end = end;
++packet_after_end;
@@ -290,20 +290,20 @@
(*it).dataPtr = NULL;
}
if (bytes_to_delete > 0)
- ShiftSubsequentPackets(end, -bytes_to_delete);
+ ShiftSubsequentPackets(end, -static_cast<int>(bytes_to_delete));
return bytes_to_delete;
}
-int VCMSessionInfo::BuildVP8FragmentationHeader(
+size_t VCMSessionInfo::BuildVP8FragmentationHeader(
uint8_t* frame_buffer,
- int frame_buffer_length,
+ size_t frame_buffer_length,
RTPFragmentationHeader* fragmentation) {
- int new_length = 0;
+ size_t new_length = 0;
// Allocate space for max number of partitions
fragmentation->VerifyAndAllocateFragmentationHeader(kMaxVP8Partitions);
fragmentation->fragmentationVectorSize = 0;
memset(fragmentation->fragmentationLength, 0,
- kMaxVP8Partitions * sizeof(uint32_t));
+ kMaxVP8Partitions * sizeof(size_t));
if (packets_.empty())
return new_length;
PacketIterator it = FindNextPartitionBeginning(packets_.begin());
@@ -314,11 +314,11 @@
fragmentation->fragmentationOffset[partition_id] =
(*it).dataPtr - frame_buffer;
assert(fragmentation->fragmentationOffset[partition_id] <
- static_cast<uint32_t>(frame_buffer_length));
+ frame_buffer_length);
fragmentation->fragmentationLength[partition_id] =
(*partition_end).dataPtr + (*partition_end).sizeBytes - (*it).dataPtr;
assert(fragmentation->fragmentationLength[partition_id] <=
- static_cast<uint32_t>(frame_buffer_length));
+ frame_buffer_length);
new_length += fragmentation->fragmentationLength[partition_id];
++partition_end;
it = FindNextPartitionBeginning(partition_end);
@@ -385,8 +385,8 @@
(*packet_it).seqNum));
}
-int VCMSessionInfo::MakeDecodable() {
- int return_length = 0;
+size_t VCMSessionInfo::MakeDecodable() {
+ size_t return_length = 0;
if (packets_.empty()) {
return 0;
}
@@ -511,13 +511,13 @@
// The insert operation invalidates the iterator |rit|.
PacketIterator packet_list_it = packets_.insert(rit.base(), packet);
- int returnLength = InsertBuffer(frame_buffer, packet_list_it);
+ size_t returnLength = InsertBuffer(frame_buffer, packet_list_it);
UpdateCompleteSession();
if (decode_error_mode == kWithErrors)
decodable_ = true;
else if (decode_error_mode == kSelectiveErrors)
UpdateDecodableSession(frame_data);
- return returnLength;
+ return static_cast<int>(returnLength);
}
void VCMSessionInfo::InformOfEmptyPacket(uint16_t seq_num) {
diff --git a/webrtc/modules/video_coding/main/source/session_info.h b/webrtc/modules/video_coding/main/source/session_info.h
index 25216c7..cd55130 100644
--- a/webrtc/modules/video_coding/main/source/session_info.h
+++ b/webrtc/modules/video_coding/main/source/session_info.h
@@ -56,15 +56,15 @@
// Builds fragmentation headers for VP8, each fragment being a decodable
// VP8 partition. Returns the total number of bytes which are decodable. Is
// used instead of MakeDecodable for VP8.
- int BuildVP8FragmentationHeader(uint8_t* frame_buffer,
- int frame_buffer_length,
- RTPFragmentationHeader* fragmentation);
+ size_t BuildVP8FragmentationHeader(uint8_t* frame_buffer,
+ size_t frame_buffer_length,
+ RTPFragmentationHeader* fragmentation);
// Makes the frame decodable. I.e., only contain decodable NALUs. All
// non-decodable NALUs will be deleted and packets will be moved to in
// memory to remove any empty space.
// Returns the number of bytes deleted from the session.
- int MakeDecodable();
+ size_t MakeDecodable();
// Sets decodable_ to false.
// Used by the dual decoder. After the mode is changed to kNoErrors from
@@ -72,7 +72,7 @@
// decodable and are not complete are marked as non-decodable.
void SetNotDecodableIfIncomplete();
- int SessionLength() const;
+ size_t SessionLength() const;
int NumPackets() const;
bool HaveFirstPacket() const;
bool HaveLastPacket() const;
@@ -114,8 +114,8 @@
PacketIterator FindPartitionEnd(PacketIterator it) const;
static bool InSequence(const PacketIterator& it,
const PacketIterator& prev_it);
- int InsertBuffer(uint8_t* frame_buffer,
- PacketIterator packetIterator);
+ size_t InsertBuffer(uint8_t* frame_buffer,
+ PacketIterator packetIterator);
size_t Insert(const uint8_t* buffer,
size_t length,
bool insert_start_code,
@@ -124,8 +124,8 @@
PacketIterator FindNaluEnd(PacketIterator packet_iter) const;
// Deletes the data of all packets between |start| and |end|, inclusively.
// Note that this function doesn't delete the actual packets.
- int DeletePacketData(PacketIterator start,
- PacketIterator end);
+ size_t DeletePacketData(PacketIterator start,
+ PacketIterator end);
void UpdateCompleteSession();
// When enabled, determine if session is decodable, i.e. incomplete but
diff --git a/webrtc/modules/video_coding/main/source/session_info_unittest.cc b/webrtc/modules/video_coding/main/source/session_info_unittest.cc
index 2fab94d..fae55f4 100644
--- a/webrtc/modules/video_coding/main/source/session_info_unittest.cc
+++ b/webrtc/modules/video_coding/main/source/session_info_unittest.cc
@@ -34,20 +34,20 @@
}
void FillPacket(uint8_t start_value) {
- for (int i = 0; i < packet_buffer_size(); ++i)
+ for (size_t i = 0; i < packet_buffer_size(); ++i)
packet_buffer_[i] = start_value + i;
}
void VerifyPacket(uint8_t* start_ptr, uint8_t start_value) {
- for (int j = 0; j < packet_buffer_size(); ++j) {
+ for (size_t j = 0; j < packet_buffer_size(); ++j) {
ASSERT_EQ(start_value + j, start_ptr[j]);
}
}
- int packet_buffer_size() const {
+ size_t packet_buffer_size() const {
return sizeof(packet_buffer_) / sizeof(packet_buffer_[0]);
}
- int frame_buffer_size() const {
+ size_t frame_buffer_size() const {
return sizeof(frame_buffer_) / sizeof(frame_buffer_[0]);
}
@@ -77,10 +77,10 @@
bool VerifyPartition(int partition_id,
int packets_expected,
int start_value) {
- EXPECT_EQ(static_cast<uint32_t>(packets_expected * packet_buffer_size()),
+ EXPECT_EQ(packets_expected * packet_buffer_size(),
fragmentation_.fragmentationLength[partition_id]);
for (int i = 0; i < packets_expected; ++i) {
- int packet_index = fragmentation_.fragmentationOffset[partition_id] +
+ size_t packet_index = fragmentation_.fragmentationOffset[partition_id] +
i * packet_buffer_size();
if (packet_index + packet_buffer_size() > frame_buffer_size())
return false;
@@ -154,10 +154,8 @@
packet_.frameType = kVideoFrameKey;
FillPacket(0);
EXPECT_EQ(packet_buffer_size(),
- session_.InsertPacket(packet_,
- frame_buffer_,
- kNoErrors,
- frame_data));
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
EXPECT_FALSE(session_.HaveLastPacket());
EXPECT_EQ(kVideoFrameKey, session_.FrameType());
@@ -165,10 +163,8 @@
packet_.markerBit = true;
packet_.seqNum += 1;
EXPECT_EQ(packet_buffer_size(),
- session_.InsertPacket(packet_,
- frame_buffer_,
- kNoErrors,
- frame_data));
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
EXPECT_TRUE(session_.HaveLastPacket());
EXPECT_EQ(packet_.seqNum, session_.HighSequenceNumber());
EXPECT_EQ(0xFFFE, session_.LowSequenceNumber());
@@ -193,31 +189,26 @@
packet_.isFirstPacket = true;
packet_.markerBit = false;
FillPacket(0);
- EXPECT_EQ(session_.InsertPacket(packet_,
- frame_buffer_,
- kNoErrors,
- frame_data),
- packet_buffer_size());
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
packet_.isFirstPacket = false;
for (int i = 1; i < 9; ++i) {
packet_.seqNum += 1;
FillPacket(i);
- ASSERT_EQ(session_.InsertPacket(packet_,
- frame_buffer_,
- kNoErrors,
- frame_data),
- packet_buffer_size());
+ ASSERT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors,
+ frame_data)));
}
packet_.seqNum += 1;
packet_.markerBit = true;
FillPacket(9);
- EXPECT_EQ(session_.InsertPacket(packet_,
- frame_buffer_,
- kNoErrors,
- frame_data),
- packet_buffer_size());
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
EXPECT_EQ(10 * packet_buffer_size(), session_.SessionLength());
for (int i = 0; i < 10; ++i) {
@@ -231,11 +222,10 @@
packet_.isFirstPacket = false;
packet_.markerBit = false;
FillPacket(3);
- EXPECT_EQ(session_.InsertPacket(packet_,
- frame_buffer_,
- kWithErrors,
- frame_data),
- packet_buffer_size());
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kWithErrors,
+ frame_data)));
EXPECT_TRUE(session_.decodable());
}
@@ -246,21 +236,19 @@
FillPacket(1);
frame_data.rolling_average_packets_per_frame = 11;
frame_data.rtt_ms = 150;
- EXPECT_EQ(session_.InsertPacket(packet_,
- frame_buffer_,
- kSelectiveErrors,
- frame_data),
- packet_buffer_size());
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kSelectiveErrors,
+ frame_data)));
EXPECT_FALSE(session_.decodable());
packet_.seqNum -= 1;
FillPacket(0);
packet_.isFirstPacket = true;
- EXPECT_EQ(session_.InsertPacket(packet_,
- frame_buffer_,
- kSelectiveErrors,
- frame_data),
- packet_buffer_size());
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kSelectiveErrors,
+ frame_data)));
EXPECT_TRUE(session_.decodable());
packet_.isFirstPacket = false;
@@ -268,21 +256,19 @@
for (int i = 2; i < 8; ++i) {
packet_.seqNum += 1;
FillPacket(i);
- EXPECT_EQ(session_.InsertPacket(packet_,
- frame_buffer_,
- kSelectiveErrors,
- frame_data),
- packet_buffer_size());
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kSelectiveErrors,
+ frame_data)));
EXPECT_TRUE(session_.decodable());
}
packet_.seqNum += 1;
FillPacket(8);
- EXPECT_EQ(session_.InsertPacket(packet_,
- frame_buffer_,
- kSelectiveErrors,
- frame_data),
- packet_buffer_size());
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kSelectiveErrors,
+ frame_data)));
EXPECT_TRUE(session_.decodable());
}
@@ -291,11 +277,9 @@
packet_.isFirstPacket = true;
packet_.markerBit = true;
FillPacket(1);
- EXPECT_EQ(session_.InsertPacket(packet_,
- frame_buffer_,
- kNoErrors,
- frame_data),
- packet_buffer_size());
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
packet_.seqNum = 0x0004;
packet_.isFirstPacket = true;
@@ -320,11 +304,9 @@
packet_.isFirstPacket = false;
packet_.markerBit = true;
FillPacket(1);
- EXPECT_EQ(session_.InsertPacket(packet_,
- frame_buffer_,
- kNoErrors,
- frame_data),
- packet_buffer_size());
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
++packet_.seqNum;
packet_.isFirstPacket = true;
packet_.markerBit = true;
@@ -342,10 +324,8 @@
packet_.markerBit = false;
FillPacket(1);
EXPECT_EQ(packet_buffer_size(),
- session_.InsertPacket(packet_,
- frame_buffer_,
- kNoErrors,
- frame_data));
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
// Insert an older packet with a first packet set.
packet_.seqNum = 0x0004;
packet_.isFirstPacket = true;
@@ -360,10 +340,8 @@
packet_.markerBit = true;
FillPacket(1);
EXPECT_EQ(packet_buffer_size(),
- session_.InsertPacket(packet_,
- frame_buffer_,
- kNoErrors,
- frame_data));
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
packet_.seqNum = 0x0008;
packet_.isFirstPacket = false;
packet_.markerBit = true;
@@ -380,29 +358,23 @@
packet_.markerBit = false;
FillPacket(1);
EXPECT_EQ(packet_buffer_size(),
- session_.InsertPacket(packet_,
- frame_buffer_,
- kNoErrors,
- frame_data));
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
packet_.seqNum = 0x0004;
packet_.isFirstPacket = false;
packet_.markerBit = true;
FillPacket(1);
EXPECT_EQ(packet_buffer_size(),
- session_.InsertPacket(packet_,
- frame_buffer_,
- kNoErrors,
- frame_data));
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
packet_.seqNum = 0x0002;
packet_.isFirstPacket = false;
packet_.markerBit = false;
FillPacket(1);
ASSERT_EQ(packet_buffer_size(),
- session_.InsertPacket(packet_,
- frame_buffer_,
- kNoErrors,
- frame_data));
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
packet_.seqNum = 0xFFF0;
packet_.isFirstPacket = false;
packet_.markerBit = false;
@@ -431,20 +403,16 @@
packet_.markerBit = false;
FillPacket(1);
EXPECT_EQ(packet_buffer_size(),
- session_.InsertPacket(packet_,
- frame_buffer_,
- kNoErrors,
- frame_data));
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
// Insert an older packet with a first packet set.
packet_.seqNum = 0x0005;
packet_.isFirstPacket = true;
packet_.markerBit = false;
FillPacket(1);
EXPECT_EQ(packet_buffer_size(),
- session_.InsertPacket(packet_,
- frame_buffer_,
- kNoErrors,
- frame_data));
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
packet_.seqNum = 0x0004;
packet_.isFirstPacket = false;
packet_.markerBit = false;
@@ -458,19 +426,15 @@
packet_.markerBit = false;
FillPacket(1);
EXPECT_EQ(packet_buffer_size(),
- session_.InsertPacket(packet_,
- frame_buffer_,
- kNoErrors,
- frame_data));
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
packet_.seqNum = 0x0008;
packet_.isFirstPacket = false;
packet_.markerBit = true;
FillPacket(1);
EXPECT_EQ(packet_buffer_size(),
- session_.InsertPacket(packet_,
- frame_buffer_,
- kNoErrors,
- frame_data));
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
packet_.seqNum = 0x0009;
packet_.isFirstPacket = false;
@@ -493,11 +457,9 @@
FillPacket(0);
VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
packet_header_);
- EXPECT_EQ(session_.InsertPacket(*packet,
- frame_buffer_,
- kNoErrors,
- frame_data),
- packet_buffer_size());
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
delete packet;
packet_header_.type.Video.isFirstPacket = false;
@@ -507,11 +469,9 @@
packet_header_.header.sequenceNumber += 2;
FillPacket(2);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
- EXPECT_EQ(session_.InsertPacket(*packet,
- frame_buffer_,
- kNoErrors,
- frame_data),
- packet_buffer_size());
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
delete packet;
packet_header_.type.Video.isFirstPacket = false;
@@ -521,18 +481,15 @@
packet_header_.header.sequenceNumber += 1;
FillPacket(3);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
- EXPECT_EQ(session_.InsertPacket(*packet,
- frame_buffer_,
- kNoErrors,
- frame_data),
- packet_buffer_size());
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
delete packet;
// One packet should be removed (end of partition 0).
- EXPECT_EQ(session_.BuildVP8FragmentationHeader(frame_buffer_,
- frame_buffer_size(),
- &fragmentation_),
- 2 * packet_buffer_size());
+ EXPECT_EQ(2 * packet_buffer_size(),
+ session_.BuildVP8FragmentationHeader(
+ frame_buffer_, frame_buffer_size(), &fragmentation_));
SCOPED_TRACE("Calling VerifyPartition");
EXPECT_TRUE(VerifyPartition(0, 1, 0));
SCOPED_TRACE("Calling VerifyPartition");
@@ -550,11 +507,9 @@
FillPacket(1);
VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
packet_header_);
- EXPECT_EQ(session_.InsertPacket(*packet,
- frame_buffer_,
- kNoErrors,
- frame_data),
- packet_buffer_size());
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
delete packet;
packet_header_.type.Video.isFirstPacket = false;
@@ -564,11 +519,9 @@
packet_header_.header.sequenceNumber += 1;
FillPacket(2);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
- EXPECT_EQ(session_.InsertPacket(*packet,
- frame_buffer_,
- kNoErrors,
- frame_data),
- packet_buffer_size());
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
delete packet;
packet_header_.type.Video.isFirstPacket = false;
@@ -578,11 +531,9 @@
packet_header_.header.sequenceNumber += 1;
FillPacket(3);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
- EXPECT_EQ(session_.InsertPacket(*packet,
- frame_buffer_,
- kNoErrors,
- frame_data),
- packet_buffer_size());
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
delete packet;
packet_header_.type.Video.isFirstPacket = false;
@@ -592,18 +543,15 @@
packet_header_.header.sequenceNumber += 2;
FillPacket(5);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
- EXPECT_EQ(session_.InsertPacket(*packet,
- frame_buffer_,
- kNoErrors,
- frame_data),
- packet_buffer_size());
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
delete packet;
// One packet should be removed (end of partition 2), 3 left.
- EXPECT_EQ(session_.BuildVP8FragmentationHeader(frame_buffer_,
- frame_buffer_size(),
- &fragmentation_),
- 3 * packet_buffer_size());
+ EXPECT_EQ(3 * packet_buffer_size(),
+ session_.BuildVP8FragmentationHeader(
+ frame_buffer_, frame_buffer_size(), &fragmentation_));
SCOPED_TRACE("Calling VerifyPartition");
EXPECT_TRUE(VerifyPartition(0, 2, 1));
SCOPED_TRACE("Calling VerifyPartition");
@@ -621,11 +569,9 @@
FillPacket(0);
VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
packet_header_);
- EXPECT_EQ(session_.InsertPacket(*packet,
- frame_buffer_,
- kNoErrors,
- frame_data),
- packet_buffer_size());
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
delete packet;
packet_header_.type.Video.isFirstPacket = false;
@@ -635,11 +581,9 @@
packet_header_.header.sequenceNumber += 1;
FillPacket(1);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
- EXPECT_EQ(session_.InsertPacket(*packet,
- frame_buffer_,
- kNoErrors,
- frame_data),
- packet_buffer_size());
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
delete packet;
packet_header_.type.Video.isFirstPacket = false;
@@ -649,11 +593,9 @@
packet_header_.header.sequenceNumber += 1;
FillPacket(2);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
- EXPECT_EQ(session_.InsertPacket(*packet,
- frame_buffer_,
- kNoErrors,
- frame_data),
- packet_buffer_size());
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
delete packet;
packet_header_.type.Video.isFirstPacket = false;
@@ -663,18 +605,15 @@
packet_header_.header.sequenceNumber += 1;
FillPacket(3);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
- EXPECT_EQ(session_.InsertPacket(*packet,
- frame_buffer_,
- kNoErrors,
- frame_data),
- packet_buffer_size());
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
delete packet;
// No packet should be removed.
- EXPECT_EQ(session_.BuildVP8FragmentationHeader(frame_buffer_,
- frame_buffer_size(),
- &fragmentation_),
- 4 * packet_buffer_size());
+ EXPECT_EQ(4 * packet_buffer_size(),
+ session_.BuildVP8FragmentationHeader(
+ frame_buffer_, frame_buffer_size(), &fragmentation_));
SCOPED_TRACE("Calling VerifyPartition");
EXPECT_TRUE(VerifyPartition(0, 2, 0));
SCOPED_TRACE("Calling VerifyPartition");
@@ -692,11 +631,9 @@
FillPacket(0);
VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
packet_header_);
- EXPECT_EQ(session_.InsertPacket(*packet,
- frame_buffer_,
- kNoErrors,
- frame_data),
- packet_buffer_size());
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
delete packet;
packet_header_.type.Video.isFirstPacket = false;
@@ -706,11 +643,9 @@
packet_header_.header.sequenceNumber += 1;
FillPacket(1);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
- EXPECT_EQ(session_.InsertPacket(*packet,
- frame_buffer_,
- kNoErrors,
- frame_data),
- packet_buffer_size());
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
delete packet;
packet_header_.type.Video.isFirstPacket = false;
@@ -720,11 +655,9 @@
packet_header_.header.sequenceNumber += 1;
FillPacket(2);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
- EXPECT_EQ(session_.InsertPacket(*packet,
- frame_buffer_,
- kNoErrors,
- frame_data),
- packet_buffer_size());
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
delete packet;
packet_header_.type.Video.isFirstPacket = false;
@@ -734,18 +667,15 @@
packet_header_.header.sequenceNumber += 2;
FillPacket(3);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
- EXPECT_EQ(session_.InsertPacket(*packet,
- frame_buffer_,
- kNoErrors,
- frame_data),
- packet_buffer_size());
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
delete packet;
// One packet should be removed from the last partition
- EXPECT_EQ(session_.BuildVP8FragmentationHeader(frame_buffer_,
- frame_buffer_size(),
- &fragmentation_),
- 3 * packet_buffer_size());
+ EXPECT_EQ(3 * packet_buffer_size(),
+ session_.BuildVP8FragmentationHeader(
+ frame_buffer_, frame_buffer_size(), &fragmentation_));
SCOPED_TRACE("Calling VerifyPartition");
EXPECT_TRUE(VerifyPartition(0, 2, 0));
SCOPED_TRACE("Calling VerifyPartition");
@@ -764,11 +694,9 @@
FillPacket(1);
VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
packet_header_);
- EXPECT_EQ(session_.InsertPacket(*packet,
- frame_buffer_,
- kNoErrors,
- frame_data),
- packet_buffer_size());
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
delete packet;
packet_header_.type.Video.isFirstPacket = false;
@@ -778,11 +706,9 @@
packet_header_.header.sequenceNumber += 1;
FillPacket(2);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
- EXPECT_EQ(session_.InsertPacket(*packet,
- frame_buffer_,
- kNoErrors,
- frame_data),
- packet_buffer_size());
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
delete packet;
packet_header_.type.Video.isFirstPacket = false;
@@ -792,11 +718,9 @@
packet_header_.header.sequenceNumber += 3;
FillPacket(5);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
- EXPECT_EQ(session_.InsertPacket(*packet,
- frame_buffer_,
- kNoErrors,
- frame_data),
- packet_buffer_size());
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
delete packet;
packet_header_.type.Video.isFirstPacket = false;
@@ -806,18 +730,15 @@
packet_header_.header.sequenceNumber += 1;
FillPacket(6);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
- EXPECT_EQ(session_.InsertPacket(*packet,
- frame_buffer_,
- kNoErrors,
- frame_data),
- packet_buffer_size());
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
delete packet;
// No packet should be removed.
- EXPECT_EQ(session_.BuildVP8FragmentationHeader(frame_buffer_,
- frame_buffer_size(),
- &fragmentation_),
- 4 * packet_buffer_size());
+ EXPECT_EQ(4 * packet_buffer_size(),
+ session_.BuildVP8FragmentationHeader(
+ frame_buffer_, frame_buffer_size(), &fragmentation_));
SCOPED_TRACE("Calling VerifyPartition");
EXPECT_TRUE(VerifyPartition(0, 2, 1));
SCOPED_TRACE("Calling VerifyPartition");
@@ -835,11 +756,9 @@
FillPacket(1);
VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
packet_header_);
- EXPECT_EQ(session_.InsertPacket(*packet,
- frame_buffer_,
- kNoErrors,
- frame_data),
- packet_buffer_size());
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
delete packet;
packet_header_.type.Video.isFirstPacket = false;
@@ -850,11 +769,9 @@
FillPacket(2);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
packet_header_);
- EXPECT_EQ(session_.InsertPacket(*packet,
- frame_buffer_,
- kNoErrors,
- frame_data),
- packet_buffer_size());
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
delete packet;
packet_header_.type.Video.isFirstPacket = false;
@@ -864,11 +781,9 @@
packet_header_.header.sequenceNumber += 2;
FillPacket(4);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
- EXPECT_EQ(session_.InsertPacket(*packet,
- frame_buffer_,
- kNoErrors,
- frame_data),
- packet_buffer_size());
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
delete packet;
packet_header_.type.Video.isFirstPacket = false;
@@ -878,11 +793,9 @@
packet_header_.header.sequenceNumber += 1;
FillPacket(5);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
- EXPECT_EQ(session_.InsertPacket(*packet,
- frame_buffer_,
- kNoErrors,
- frame_data),
- packet_buffer_size());
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
delete packet;
packet_header_.type.Video.isFirstPacket = false;
@@ -892,11 +805,9 @@
packet_header_.header.sequenceNumber += 1;
FillPacket(6);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
- EXPECT_EQ(session_.InsertPacket(*packet,
- frame_buffer_,
- kNoErrors,
- frame_data),
- packet_buffer_size());
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
delete packet;
packet_header_.type.Video.isFirstPacket = false;
@@ -906,18 +817,15 @@
packet_header_.header.sequenceNumber += 1;
FillPacket(7);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
- EXPECT_EQ(session_.InsertPacket(*packet,
- frame_buffer_,
- kNoErrors,
- frame_data),
- packet_buffer_size());
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
delete packet;
// 2 partitions left. 2 packets removed from second partition
- EXPECT_EQ(session_.BuildVP8FragmentationHeader(frame_buffer_,
- frame_buffer_size(),
- &fragmentation_),
- 4 * packet_buffer_size());
+ EXPECT_EQ(4 * packet_buffer_size(),
+ session_.BuildVP8FragmentationHeader(
+ frame_buffer_, frame_buffer_size(), &fragmentation_));
SCOPED_TRACE("Calling VerifyPartition");
EXPECT_TRUE(VerifyPartition(0, 2, 1));
SCOPED_TRACE("Calling VerifyPartition");
@@ -935,11 +843,9 @@
FillPacket(0);
VCMPacket* packet = new VCMPacket(packet_buffer_, packet_buffer_size(),
packet_header_);
- EXPECT_EQ(session_.InsertPacket(*packet,
- frame_buffer_,
- kNoErrors,
- frame_data),
- packet_buffer_size());
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
delete packet;
packet_header_.type.Video.isFirstPacket = false;
@@ -949,11 +855,9 @@
packet_header_.header.sequenceNumber += 1;
FillPacket(1);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
- EXPECT_EQ(session_.InsertPacket(*packet,
- frame_buffer_,
- kNoErrors,
- frame_data),
- packet_buffer_size());
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
delete packet;
packet_header_.type.Video.isFirstPacket = false;
@@ -963,18 +867,15 @@
packet_header_.header.sequenceNumber += 1;
FillPacket(2);
packet = new VCMPacket(packet_buffer_, packet_buffer_size(), packet_header_);
- EXPECT_EQ(session_.InsertPacket(*packet,
- frame_buffer_,
- kNoErrors,
- frame_data),
- packet_buffer_size());
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(*packet, frame_buffer_,
+ kNoErrors, frame_data)));
delete packet;
// No packets removed.
- EXPECT_EQ(session_.BuildVP8FragmentationHeader(frame_buffer_,
- frame_buffer_size(),
- &fragmentation_),
- 3 * packet_buffer_size());
+ EXPECT_EQ(3 * packet_buffer_size(),
+ session_.BuildVP8FragmentationHeader(
+ frame_buffer_, frame_buffer_size(), &fragmentation_));
SCOPED_TRACE("Calling VerifyPartition");
EXPECT_TRUE(VerifyPartition(0, 2, 0));
// This partition is aggregated in partition 0
@@ -996,8 +897,8 @@
kNoErrors,
frame_data));
- EXPECT_EQ(0, session_.MakeDecodable());
- EXPECT_EQ(0, session_.SessionLength());
+ EXPECT_EQ(0U, session_.MakeDecodable());
+ EXPECT_EQ(0U, session_.SessionLength());
}
TEST_F(TestNalUnits, OneIsolatedNaluLoss) {
@@ -1006,24 +907,20 @@
packet_.seqNum = 0;
packet_.markerBit = false;
FillPacket(0);
- EXPECT_EQ(session_.InsertPacket(packet_,
- frame_buffer_,
- kNoErrors,
- frame_data),
- packet_buffer_size());
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
packet_.isFirstPacket = false;
packet_.completeNALU = kNaluComplete;
packet_.seqNum += 2;
packet_.markerBit = true;
FillPacket(2);
- EXPECT_EQ(session_.InsertPacket(packet_,
- frame_buffer_,
- kNoErrors,
- frame_data),
- packet_buffer_size());
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
- EXPECT_EQ(0, session_.MakeDecodable());
+ EXPECT_EQ(0U, session_.MakeDecodable());
EXPECT_EQ(2 * packet_buffer_size(), session_.SessionLength());
SCOPED_TRACE("Calling VerifyNalu");
EXPECT_TRUE(VerifyNalu(0, 1, 0));
@@ -1037,22 +934,18 @@
packet_.seqNum = 0;
packet_.markerBit = false;
FillPacket(0);
- EXPECT_EQ(session_.InsertPacket(packet_,
- frame_buffer_,
- kNoErrors,
- frame_data),
- packet_buffer_size());
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
packet_.isFirstPacket = false;
packet_.completeNALU = kNaluEnd;
packet_.seqNum += 2;
packet_.markerBit = true;
FillPacket(2);
- EXPECT_EQ(session_.InsertPacket(packet_,
- frame_buffer_,
- kNoErrors,
- frame_data),
- packet_buffer_size());
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
EXPECT_EQ(packet_buffer_size(), session_.MakeDecodable());
EXPECT_EQ(packet_buffer_size(), session_.SessionLength());
@@ -1066,22 +959,18 @@
packet_.seqNum = 0;
packet_.markerBit = false;
FillPacket(0);
- EXPECT_EQ(session_.InsertPacket(packet_,
- frame_buffer_,
- kNoErrors,
- frame_data),
- packet_buffer_size());
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
packet_.isFirstPacket = false;
packet_.completeNALU = kNaluIncomplete;
packet_.seqNum += 2;
packet_.markerBit = false;
FillPacket(1);
- EXPECT_EQ(session_.InsertPacket(packet_,
- frame_buffer_,
- kNoErrors,
- frame_data),
- packet_buffer_size());
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
EXPECT_EQ(packet_buffer_size(), session_.MakeDecodable());
EXPECT_EQ(packet_buffer_size(), session_.SessionLength());
@@ -1096,35 +985,29 @@
packet_.seqNum += 1;
packet_.markerBit = false;
FillPacket(1);
- EXPECT_EQ(session_.InsertPacket(packet_,
- frame_buffer_,
- kNoErrors,
- frame_data),
- packet_buffer_size());
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
packet_.isFirstPacket = true;
packet_.completeNALU = kNaluComplete;
packet_.seqNum -= 1;
packet_.markerBit = false;
FillPacket(0);
- EXPECT_EQ(session_.InsertPacket(packet_,
- frame_buffer_,
- kNoErrors,
- frame_data),
- packet_buffer_size());
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
packet_.isFirstPacket = false;
packet_.completeNALU = kNaluEnd;
packet_.seqNum += 2;
packet_.markerBit = true;
FillPacket(2);
- EXPECT_EQ(session_.InsertPacket(packet_,
- frame_buffer_,
- kNoErrors,
- frame_data),
- packet_buffer_size());
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
- EXPECT_EQ(0, session_.MakeDecodable());
+ EXPECT_EQ(0U, session_.MakeDecodable());
EXPECT_EQ(3 * packet_buffer_size(), session_.SessionLength());
SCOPED_TRACE("Calling VerifyNalu");
EXPECT_TRUE(VerifyNalu(0, 1, 0));
@@ -1136,25 +1019,21 @@
packet_.completeNALU = kNaluIncomplete;
packet_.markerBit = false;
FillPacket(1);
- EXPECT_EQ(session_.InsertPacket(packet_,
- frame_buffer_,
- kNoErrors,
- frame_data),
- packet_buffer_size());
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
packet_.isFirstPacket = false;
packet_.completeNALU = kNaluEnd;
packet_.seqNum += 2;
packet_.markerBit = true;
FillPacket(2);
- EXPECT_EQ(session_.InsertPacket(packet_,
- frame_buffer_,
- kNoErrors,
- frame_data),
- packet_buffer_size());
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
EXPECT_EQ(2 * packet_buffer_size(), session_.MakeDecodable());
- EXPECT_EQ(0, session_.SessionLength());
+ EXPECT_EQ(0U, session_.SessionLength());
}
TEST_F(TestNalUnits, ReorderWrapLosses) {
@@ -1165,25 +1044,21 @@
packet_.seqNum += 2;
packet_.markerBit = true;
FillPacket(2);
- EXPECT_EQ(session_.InsertPacket(packet_,
- frame_buffer_,
- kNoErrors,
- frame_data),
- packet_buffer_size());
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
packet_.seqNum -= 2;
packet_.isFirstPacket = false;
packet_.completeNALU = kNaluIncomplete;
packet_.markerBit = false;
FillPacket(1);
- EXPECT_EQ(session_.InsertPacket(packet_,
- frame_buffer_,
- kNoErrors,
- frame_data),
- packet_buffer_size());
+ EXPECT_EQ(packet_buffer_size(),
+ static_cast<size_t>(session_.InsertPacket(packet_, frame_buffer_,
+ kNoErrors, frame_data)));
EXPECT_EQ(2 * packet_buffer_size(), session_.MakeDecodable());
- EXPECT_EQ(0, session_.SessionLength());
+ EXPECT_EQ(0U, session_.SessionLength());
}
} // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/source/video_coding_impl.cc b/webrtc/modules/video_coding/main/source/video_coding_impl.cc
index d566731..2dfa99a 100644
--- a/webrtc/modules/video_coding/main/source/video_coding_impl.cc
+++ b/webrtc/modules/video_coding/main/source/video_coding_impl.cc
@@ -301,7 +301,7 @@
}
virtual int32_t IncomingPacket(const uint8_t* incomingPayload,
- uint32_t payloadLength,
+ size_t payloadLength,
const WebRtcRTPHeader& rtpInfo) OVERRIDE {
return receiver_->IncomingPacket(incomingPayload, payloadLength, rtpInfo);
}
diff --git a/webrtc/modules/video_coding/main/source/video_coding_impl.h b/webrtc/modules/video_coding/main/source/video_coding_impl.h
index ac7a1f4..90186c7 100644
--- a/webrtc/modules/video_coding/main/source/video_coding_impl.h
+++ b/webrtc/modules/video_coding/main/source/video_coding_impl.h
@@ -160,7 +160,7 @@
VideoCodecType ReceiveCodec() const;
int32_t IncomingPacket(const uint8_t* incomingPayload,
- uint32_t payloadLength,
+ size_t payloadLength,
const WebRtcRTPHeader& rtpInfo);
int32_t SetMinimumPlayoutDelay(uint32_t minPlayoutDelayMs);
int32_t SetRenderDelay(uint32_t timeMS);
diff --git a/webrtc/modules/video_coding/main/source/video_receiver.cc b/webrtc/modules/video_coding/main/source/video_receiver.cc
index a8de28b..f58d64f 100644
--- a/webrtc/modules/video_coding/main/source/video_receiver.cc
+++ b/webrtc/modules/video_coding/main/source/video_receiver.cc
@@ -631,7 +631,7 @@
// Incoming packet from network parsed and ready for decode, non blocking.
int32_t VideoReceiver::IncomingPacket(const uint8_t* incomingPayload,
- uint32_t payloadLength,
+ size_t payloadLength,
const WebRtcRTPHeader& rtpInfo) {
if (rtpInfo.frameType == kVideoFrameKey) {
TRACE_EVENT1("webrtc",
diff --git a/webrtc/modules/video_coding/main/source/video_receiver_unittest.cc b/webrtc/modules/video_coding/main/source/video_receiver_unittest.cc
index 502dfa9..ec5ba93 100644
--- a/webrtc/modules/video_coding/main/source/video_receiver_unittest.cc
+++ b/webrtc/modules/video_coding/main/source/video_receiver_unittest.cc
@@ -49,7 +49,6 @@
}
void InsertAndVerifyPaddingFrame(const uint8_t* payload,
- int length,
WebRtcRTPHeader* header) {
ASSERT_TRUE(header != NULL);
for (int j = 0; j < 5; ++j) {
@@ -63,7 +62,7 @@
}
void InsertAndVerifyDecodableFrame(const uint8_t* payload,
- int length,
+ size_t length,
WebRtcRTPHeader* header) {
ASSERT_TRUE(header != NULL);
EXPECT_EQ(0, receiver_->IncomingPacket(payload, length, *header));
@@ -87,7 +86,7 @@
EXPECT_EQ(0, receiver_->SetVideoProtection(kProtectionNack, true));
EXPECT_EQ(
0, receiver_->RegisterPacketRequestCallback(&packet_request_callback_));
- const unsigned int kPaddingSize = 220;
+ const size_t kPaddingSize = 220;
const uint8_t payload[kPaddingSize] = {0};
WebRtcRTPHeader header;
memset(&header, 0, sizeof(header));
@@ -100,7 +99,7 @@
header.type.Video.codec = kRtpVideoVp8;
for (int i = 0; i < 10; ++i) {
EXPECT_CALL(packet_request_callback_, ResendPackets(_, _)).Times(0);
- InsertAndVerifyPaddingFrame(payload, 0, &header);
+ InsertAndVerifyPaddingFrame(payload, &header);
clock_.AdvanceTimeMilliseconds(33);
header.header.timestamp += 3000;
}
@@ -110,8 +109,8 @@
EXPECT_EQ(0, receiver_->SetVideoProtection(kProtectionNack, true));
EXPECT_EQ(
0, receiver_->RegisterPacketRequestCallback(&packet_request_callback_));
- const unsigned int kFrameSize = 1200;
- const unsigned int kPaddingSize = 220;
+ const size_t kFrameSize = 1200;
+ const size_t kPaddingSize = 220;
const uint8_t payload[kFrameSize] = {0};
WebRtcRTPHeader header;
memset(&header, 0, sizeof(header));
@@ -150,7 +149,7 @@
} else {
EXPECT_CALL(packet_request_callback_, ResendPackets(_, _)).Times(0);
}
- InsertAndVerifyPaddingFrame(payload, 0, &header);
+ InsertAndVerifyPaddingFrame(payload, &header);
}
clock_.AdvanceTimeMilliseconds(33);
header.header.timestamp += 3000;
@@ -161,8 +160,8 @@
EXPECT_EQ(0, receiver_->SetVideoProtection(kProtectionNack, true));
EXPECT_EQ(
0, receiver_->RegisterPacketRequestCallback(&packet_request_callback_));
- const unsigned int kFrameSize = 1200;
- const unsigned int kPaddingSize = 220;
+ const size_t kFrameSize = 1200;
+ const size_t kPaddingSize = 220;
const uint8_t payload[kFrameSize] = {0};
WebRtcRTPHeader header;
memset(&header, 0, sizeof(header));
@@ -195,7 +194,7 @@
header.type.Video.isFirstPacket = false;
header.header.markerBit = false;
for (int j = 0; j < 2; ++j) {
- // InsertAndVerifyPaddingFrame(payload, 0, &header);
+ // InsertAndVerifyPaddingFrame(payload, &header);
clock_.AdvanceTimeMilliseconds(33);
header.header.timestamp += 3000;
}
diff --git a/webrtc/modules/video_coding/main/source/video_sender_unittest.cc b/webrtc/modules/video_coding/main/source/video_sender_unittest.cc
index 6bc8b80..f689809 100644
--- a/webrtc/modules/video_coding/main/source/video_sender_unittest.cc
+++ b/webrtc/modules/video_coding/main/source/video_sender_unittest.cc
@@ -91,7 +91,7 @@
uint32_t timestamp,
int64_t capture_time_ms,
const uint8_t* payload_data,
- uint32_t payload_size,
+ size_t payload_size,
const RTPFragmentationHeader& fragmentation_header,
const RTPVideoHeader* rtp_video_header) OVERRIDE {
assert(rtp_video_header);
@@ -127,10 +127,10 @@
struct FrameData {
FrameData() {}
- FrameData(uint32_t payload_size, const RTPVideoHeader& rtp_video_header)
+ FrameData(size_t payload_size, const RTPVideoHeader& rtp_video_header)
: payload_size(payload_size), rtp_video_header(rtp_video_header) {}
- uint32_t payload_size;
+ size_t payload_size;
RTPVideoHeader rtp_video_header;
};
@@ -152,8 +152,8 @@
return frames;
}
- int SumPayloadBytesWithinTemporalLayer(int temporal_layer) {
- int payload_size = 0;
+ size_t SumPayloadBytesWithinTemporalLayer(int temporal_layer) {
+ size_t payload_size = 0;
for (size_t i = 0; i < frame_data_.size(); ++i) {
EXPECT_EQ(kRtpVideoVp8, frame_data_[i].rtp_video_header.codec);
const uint8_t temporal_idx =
diff --git a/webrtc/modules/video_coding/main/test/generic_codec_test.cc b/webrtc/modules/video_coding/main/test/generic_codec_test.cc
index 7179c80..2848212 100644
--- a/webrtc/modules/video_coding/main/test/generic_codec_test.cc
+++ b/webrtc/modules/video_coding/main/test/generic_codec_test.cc
@@ -279,8 +279,9 @@
const float nBitrates = sizeof(bitRate)/sizeof(*bitRate);
float _bitRate = 0;
int _frameCnt = 0;
- float totalBytesOneSec = 0;//, totalBytesTenSec;
- float totalBytes, actualBitrate;
+ size_t totalBytesOneSec = 0;//, totalBytesTenSec;
+ size_t totalBytes;
+ float actualBitrate;
VCMFrameCount frameCount; // testing frame type counters
// start test
NumberOfCodecs = _vcm->NumberOfCodecs();
@@ -478,7 +479,7 @@
}
}
-float
+size_t
GenericCodecTest::WaitForEncodedFrame() const
{
int64_t startTime = _clock->TimeInMilliseconds();
@@ -499,17 +500,17 @@
}
int
-RTPSendCallback_SizeTest::SendPacket(int channel, const void *data, int len)
+RTPSendCallback_SizeTest::SendPacket(int channel, const void *data, size_t len)
{
_nPackets++;
_payloadSizeSum += len;
// Make sure no payloads (len - header size) are larger than maxPayloadSize
- TEST(len > 0 && static_cast<uint32_t>(len - 12) <= _maxPayloadSize);
+ TEST(len > 0 && len - 12 <= _maxPayloadSize);
return 0;
}
void
-RTPSendCallback_SizeTest::SetMaxPayloadSize(uint32_t maxPayloadSize)
+RTPSendCallback_SizeTest::SetMaxPayloadSize(size_t maxPayloadSize)
{
_maxPayloadSize = maxPayloadSize;
}
@@ -533,12 +534,12 @@
int32_t
VCMEncComplete_KeyReqTest::SendData(
- const FrameType frameType,
- const uint8_t payloadType,
- const uint32_t timeStamp,
+ FrameType frameType,
+ uint8_t payloadType,
+ uint32_t timeStamp,
int64_t capture_time_ms,
const uint8_t* payloadData,
- const uint32_t payloadSize,
+ size_t payloadSize,
const RTPFragmentationHeader& /*fragmentationHeader*/,
const webrtc::RTPVideoHeader* /*videoHdr*/)
{
diff --git a/webrtc/modules/video_coding/main/test/generic_codec_test.h b/webrtc/modules/video_coding/main/test/generic_codec_test.h
index 841662a..9a450de 100644
--- a/webrtc/modules/video_coding/main/test/generic_codec_test.h
+++ b/webrtc/modules/video_coding/main/test/generic_codec_test.h
@@ -41,7 +41,7 @@
~GenericCodecTest();
static int RunTest(CmdArgs& args);
int32_t Perform(CmdArgs& args);
- float WaitForEncodedFrame() const;
+ size_t WaitForEncodedFrame() const;
private:
void Setup(CmdArgs& args);
@@ -75,14 +75,18 @@
public:
// constructor input: (receive side) rtp module to send encoded data to
RTPSendCallback_SizeTest() : _maxPayloadSize(0), _payloadSizeSum(0), _nPackets(0) {}
- virtual int SendPacket(int channel, const void *data, int len) OVERRIDE;
- virtual int SendRTCPPacket(int channel, const void *data, int len) OVERRIDE {return 0;}
- void SetMaxPayloadSize(uint32_t maxPayloadSize);
+ virtual int SendPacket(int channel, const void *data, size_t len) OVERRIDE;
+ virtual int SendRTCPPacket(int channel,
+ const void *data,
+ size_t len) OVERRIDE {
+ return 0;
+ }
+ void SetMaxPayloadSize(size_t maxPayloadSize);
void Reset();
float AveragePayloadSize() const;
private:
- uint32_t _maxPayloadSize;
- uint32_t _payloadSizeSum;
+ size_t _maxPayloadSize;
+ size_t _payloadSizeSum;
uint32_t _nPackets;
};
@@ -91,12 +95,12 @@
public:
VCMEncComplete_KeyReqTest(webrtc::VideoCodingModule &vcm) : _vcm(vcm), _seqNo(0), _timeStamp(0) {}
virtual int32_t SendData(
- const webrtc::FrameType frameType,
- const uint8_t payloadType,
+ webrtc::FrameType frameType,
+ uint8_t payloadType,
uint32_t timeStamp,
int64_t capture_time_ms,
const uint8_t* payloadData,
- const uint32_t payloadSize,
+ size_t payloadSize,
const webrtc::RTPFragmentationHeader& fragmentationHeader,
const webrtc::RTPVideoHeader* videoHdr) OVERRIDE;
private:
diff --git a/webrtc/modules/video_coding/main/test/media_opt_test.cc b/webrtc/modules/video_coding/main/test/media_opt_test.cc
index a8b8f19..f3b1cf0 100644
--- a/webrtc/modules/video_coding/main/test/media_opt_test.cc
+++ b/webrtc/modules/video_coding/main/test/media_opt_test.cc
@@ -308,7 +308,7 @@
_vcm->RegisterReceiveCallback(&receiveCallback);
_frameCnt = 0;
- _sumEncBytes = 0.0;
+ _sumEncBytes = 0;
_numFramesDropped = 0;
int half_width = (_width + 1) / 2;
int half_height = (_height + 1) / 2;
@@ -338,7 +338,7 @@
printf ("Decode error in frame # %d",_frameCnt);
}
- float encBytes = encodeCompleteCallback->EncodedBytes();
+ size_t encBytes = encodeCompleteCallback->EncodedBytes();
if (encBytes == 0)
{
_numFramesDropped += 1;
diff --git a/webrtc/modules/video_coding/main/test/media_opt_test.h b/webrtc/modules/video_coding/main/test/media_opt_test.h
index 5a95276..57398eb 100644
--- a/webrtc/modules/video_coding/main/test/media_opt_test.h
+++ b/webrtc/modules/video_coding/main/test/media_opt_test.h
@@ -80,7 +80,7 @@
double _lossRate;
uint32_t _renderDelayMs;
int32_t _frameCnt;
- float _sumEncBytes;
+ size_t _sumEncBytes;
int32_t _numFramesDropped;
std::string _codecName;
webrtc::VideoCodecType _sendCodecType;
diff --git a/webrtc/modules/video_coding/main/test/mt_test_common.cc b/webrtc/modules/video_coding/main/test/mt_test_common.cc
index 779ef7a..dec649f 100644
--- a/webrtc/modules/video_coding/main/test/mt_test_common.cc
+++ b/webrtc/modules/video_coding/main/test/mt_test_common.cc
@@ -30,7 +30,7 @@
}
int
-TransportCallback::SendPacket(int channel, const void *data, int len)
+TransportCallback::SendPacket(int channel, const void *data, size_t len)
{
_sendCount++;
_totalSentLength += len;
diff --git a/webrtc/modules/video_coding/main/test/mt_test_common.h b/webrtc/modules/video_coding/main/test/mt_test_common.h
index be6d9ea..78d73e2 100644
--- a/webrtc/modules/video_coding/main/test/mt_test_common.h
+++ b/webrtc/modules/video_coding/main/test/mt_test_common.h
@@ -52,7 +52,7 @@
// Add packets to list
// Incorporate network conditions - delay and packet loss
// Actual transmission will occur on a separate thread
- virtual int SendPacket(int channel, const void *data, int len) OVERRIDE;
+ virtual int SendPacket(int channel, const void *data, size_t len) OVERRIDE;
// Send to the receiver packets which are ready to be submitted
int TransportPackets();
};
diff --git a/webrtc/modules/video_coding/main/test/normal_test.cc b/webrtc/modules/video_coding/main/test/normal_test.cc
index 815c3ac..4ab97a1 100644
--- a/webrtc/modules/video_coding/main/test/normal_test.cc
+++ b/webrtc/modules/video_coding/main/test/normal_test.cc
@@ -71,12 +71,12 @@
int32_t
VCMNTEncodeCompleteCallback::SendData(
- const FrameType frameType,
- const uint8_t payloadType,
- const uint32_t timeStamp,
+ FrameType frameType,
+ uint8_t payloadType,
+ uint32_t timeStamp,
int64_t capture_time_ms,
const uint8_t* payloadData,
- const uint32_t payloadSize,
+ size_t payloadSize,
const RTPFragmentationHeader& /*fragmentationHeader*/,
const webrtc::RTPVideoHeader* videoHdr)
@@ -131,7 +131,7 @@
_VCMReceiver = vcm;
return;
}
- int32_t
+ size_t
VCMNTEncodeCompleteCallback::EncodedBytes()
{
return _encodedBytes;
@@ -144,13 +144,13 @@
}
// Decoded Frame Callback Implementation
-VCMNTDecodeCompleCallback::~VCMNTDecodeCompleCallback()
+VCMNTDecodeCompleteCallback::~VCMNTDecodeCompleteCallback()
{
if (_decodedFile)
fclose(_decodedFile);
}
int32_t
-VCMNTDecodeCompleCallback::FrameToRender(webrtc::I420VideoFrame& videoFrame)
+VCMNTDecodeCompleteCallback::FrameToRender(webrtc::I420VideoFrame& videoFrame)
{
if (videoFrame.width() != _currentWidth ||
videoFrame.height() != _currentHeight)
@@ -167,13 +167,13 @@
if (PrintI420VideoFrame(videoFrame, _decodedFile) < 0) {
return -1;
}
- _decodedBytes+= webrtc::CalcBufferSize(webrtc::kI420,
- videoFrame.width(), videoFrame.height());
+ _decodedBytes += webrtc::CalcBufferSize(webrtc::kI420, videoFrame.width(),
+ videoFrame.height());
return VCM_OK;
}
- int32_t
-VCMNTDecodeCompleCallback::DecodedBytes()
+ size_t
+VCMNTDecodeCompleteCallback::DecodedBytes()
{
return _decodedBytes;
}
@@ -260,7 +260,7 @@
// register a decoder (same codec for decoder and encoder )
TEST(_vcm->RegisterReceiveCodec(&_sendCodec, 1) == VCM_OK);
/* Callback Settings */
- VCMNTDecodeCompleCallback _decodeCallback(_outname);
+ VCMNTDecodeCompleteCallback _decodeCallback(_outname);
_vcm->RegisterReceiveCallback(&_decodeCallback);
VCMNTEncodeCompleteCallback _encodeCompleteCallback(_encodedFile, *this);
_vcm->RegisterTransportCallback(&_encodeCompleteCallback);
diff --git a/webrtc/modules/video_coding/main/test/normal_test.h b/webrtc/modules/video_coding/main/test/normal_test.h
index 63e66b3..4d33f3c 100644
--- a/webrtc/modules/video_coding/main/test/normal_test.h
+++ b/webrtc/modules/video_coding/main/test/normal_test.h
@@ -33,12 +33,12 @@
// process encoded data received from the encoder,
// pass stream to the VCMReceiver module
virtual int32_t SendData(
- const webrtc::FrameType frameType,
- const uint8_t payloadType,
- const uint32_t timeStamp,
+ webrtc::FrameType frameType,
+ uint8_t payloadType,
+ uint32_t timeStamp,
int64_t capture_time_ms,
const uint8_t* payloadData,
- const uint32_t payloadSize,
+ size_t payloadSize,
const webrtc::RTPFragmentationHeader& fragmentationHeader,
const webrtc::RTPVideoHeader* videoHdr) OVERRIDE;
@@ -46,15 +46,15 @@
// Currently - encode and decode with the same vcm module.
void RegisterReceiverVCM(webrtc::VideoCodingModule *vcm);
// Return sum of encoded data (all frames in the sequence)
- int32_t EncodedBytes();
+ size_t EncodedBytes();
// return number of encoder-skipped frames
- uint32_t SkipCnt();;
+ uint32_t SkipCnt();
// conversion function for payload type (needed for the callback function)
// RTPVideoVideoCodecTypes ConvertPayloadType(uint8_t payloadType);
private:
FILE* _encodedFile;
- uint32_t _encodedBytes;
+ size_t _encodedBytes;
uint32_t _skipCnt;
webrtc::VideoCodingModule* _VCMReceiver;
webrtc::FrameType _frameType;
@@ -62,29 +62,29 @@
NormalTest& _test;
}; // end of VCMEncodeCompleteCallback
-class VCMNTDecodeCompleCallback: public webrtc::VCMReceiveCallback
+class VCMNTDecodeCompleteCallback: public webrtc::VCMReceiveCallback
{
public:
- VCMNTDecodeCompleCallback(std::string outname): // or should it get a name?
- _decodedFile(NULL),
- _outname(outname),
- _decodedBytes(0),
- _currentWidth(0),
- _currentHeight(0) {}
- virtual ~VCMNTDecodeCompleCallback();
+ VCMNTDecodeCompleteCallback(std::string outname) // or should it get a name?
+ : _decodedFile(NULL),
+ _outname(outname),
+ _decodedBytes(0),
+ _currentWidth(0),
+ _currentHeight(0) {}
+ virtual ~VCMNTDecodeCompleteCallback();
void SetUserReceiveCallback(webrtc::VCMReceiveCallback* receiveCallback);
// will write decoded frame into file
virtual int32_t FrameToRender(webrtc::I420VideoFrame& videoFrame) OVERRIDE;
- int32_t DecodedBytes();
+ size_t DecodedBytes();
private:
FILE* _decodedFile;
std::string _outname;
- int _decodedBytes;
+ size_t _decodedBytes;
int _currentWidth;
int _currentHeight;
-}; // end of VCMDecodeCompleCallback class
+}; // end of VCMNTDecodeCompleteCallback class
class NormalTest
{
@@ -119,7 +119,7 @@
std::string _inname;
std::string _outname;
std::string _encodedName;
- int32_t _sumEncBytes;
+ size_t _sumEncBytes;
FILE* _sourceFile;
FILE* _decodedFile;
FILE* _encodedFile;
diff --git a/webrtc/modules/video_coding/main/test/quality_modes_test.cc b/webrtc/modules/video_coding/main/test/quality_modes_test.cc
index d488fa9..2993e53 100644
--- a/webrtc/modules/video_coding/main/test/quality_modes_test.cc
+++ b/webrtc/modules/video_coding/main/test/quality_modes_test.cc
@@ -212,7 +212,7 @@
// register a decoder (same codec for decoder and encoder )
TEST(_vcm->RegisterReceiveCodec(&codec, 2) == VCM_OK);
/* Callback Settings */
- VCMQMDecodeCompleCallback _decodeCallback(
+ VCMQMDecodeCompleteCallback _decodeCallback(
_decodedFile, _nativeFrameRate, feature_table_name_);
_vcm->RegisterReceiveCallback(&_decodeCallback);
VCMNTEncodeCompleteCallback _encodeCompleteCallback(_encodedFile, *this);
@@ -449,7 +449,7 @@
}
// Decoded Frame Callback Implementation
-VCMQMDecodeCompleCallback::VCMQMDecodeCompleCallback(
+VCMQMDecodeCompleteCallback::VCMQMDecodeCompleteCallback(
FILE* decodedFile, int frame_rate, std::string feature_table_name):
_decodedFile(decodedFile),
_decodedBytes(0),
@@ -468,7 +468,7 @@
//
}
-VCMQMDecodeCompleCallback::~VCMQMDecodeCompleCallback()
+VCMQMDecodeCompleteCallback::~VCMQMDecodeCompleteCallback()
{
// if (_interpolator != NULL)
// {
@@ -483,7 +483,7 @@
}
int32_t
-VCMQMDecodeCompleCallback::FrameToRender(I420VideoFrame& videoFrame)
+VCMQMDecodeCompleteCallback::FrameToRender(I420VideoFrame& videoFrame)
{
++frames_cnt_since_drop_;
@@ -537,19 +537,19 @@
return VCM_OK;
}
-int32_t VCMQMDecodeCompleCallback::DecodedBytes()
+size_t VCMQMDecodeCompleteCallback::DecodedBytes()
{
return _decodedBytes;
}
-void VCMQMDecodeCompleCallback::SetOriginalFrameDimensions(int32_t width,
- int32_t height)
+void VCMQMDecodeCompleteCallback::SetOriginalFrameDimensions(int32_t width,
+ int32_t height)
{
_origWidth = width;
_origHeight = height;
}
-int32_t VCMQMDecodeCompleCallback::buildInterpolator()
+int32_t VCMQMDecodeCompleteCallback::buildInterpolator()
{
uint32_t decFrameLength = _origWidth*_origHeight*3 >> 1;
if (_decBuffer != NULL)
@@ -569,7 +569,7 @@
// frame (or several consecutive frames from the end) must have been dropped. If
// this is the case, the last frame is repeated so that there are as many
// frames rendered as there are number of frames encoded.
-void VCMQMDecodeCompleCallback::WriteEnd(int input_frame_count)
+void VCMQMDecodeCompleteCallback::WriteEnd(int input_frame_count)
{
int num_missing_frames = input_frame_count - _frameCnt;
diff --git a/webrtc/modules/video_coding/main/test/quality_modes_test.h b/webrtc/modules/video_coding/main/test/quality_modes_test.h
index 38da78d..26c8229 100644
--- a/webrtc/modules/video_coding/main/test/quality_modes_test.h
+++ b/webrtc/modules/video_coding/main/test/quality_modes_test.h
@@ -51,18 +51,18 @@
}; // end of QualityModesTest class
-class VCMQMDecodeCompleCallback: public webrtc::VCMReceiveCallback
+class VCMQMDecodeCompleteCallback: public webrtc::VCMReceiveCallback
{
public:
- VCMQMDecodeCompleCallback(
+ VCMQMDecodeCompleteCallback(
FILE* decodedFile,
int frame_rate,
std::string feature_table_name);
- virtual ~VCMQMDecodeCompleCallback();
+ virtual ~VCMQMDecodeCompleteCallback();
void SetUserReceiveCallback(webrtc::VCMReceiveCallback* receiveCallback);
// will write decoded frame into file
int32_t FrameToRender(webrtc::I420VideoFrame& videoFrame);
- int32_t DecodedBytes();
+ size_t DecodedBytes();
void SetOriginalFrameDimensions(int32_t width, int32_t height);
int32_t buildInterpolator();
// Check if last frame is dropped, if so, repeat the last rendered frame.
@@ -70,7 +70,7 @@
private:
FILE* _decodedFile;
- uint32_t _decodedBytes;
+ size_t _decodedBytes;
// QualityModesTest& _test;
int _origWidth;
int _origHeight;
@@ -86,7 +86,7 @@
-}; // end of VCMQMDecodeCompleCallback class
+}; // end of VCMQMDecodeCompleteCallback class
class QMTestVideoSettingsCallback : public webrtc::VCMQMSettingsCallback
{
diff --git a/webrtc/modules/video_coding/main/test/receiver_tests.h b/webrtc/modules/video_coding/main/test/receiver_tests.h
index 91b7f8e..de1eb63 100644
--- a/webrtc/modules/video_coding/main/test/receiver_tests.h
+++ b/webrtc/modules/video_coding/main/test/receiver_tests.h
@@ -29,7 +29,7 @@
virtual int32_t OnReceivedPayloadData(
const uint8_t* payload_data,
- const uint16_t payload_size,
+ const size_t payload_size,
const webrtc::WebRtcRTPHeader* rtp_header) OVERRIDE {
return vcm_->IncomingPacket(payload_data, payload_size, *rtp_header);
}
diff --git a/webrtc/modules/video_coding/main/test/rtp_player.cc b/webrtc/modules/video_coding/main/test/rtp_player.cc
index 1aea7e0..81295ab 100644
--- a/webrtc/modules/video_coding/main/test/rtp_player.cc
+++ b/webrtc/modules/video_coding/main/test/rtp_player.cc
@@ -41,7 +41,7 @@
class RawRtpPacket {
public:
- RawRtpPacket(const uint8_t* data, uint32_t length, uint32_t ssrc,
+ RawRtpPacket(const uint8_t* data, size_t length, uint32_t ssrc,
uint16_t seq_num)
: data_(new uint8_t[length]),
length_(length),
@@ -53,7 +53,7 @@
}
const uint8_t* data() const { return data_.get(); }
- uint32_t length() const { return length_; }
+ size_t length() const { return length_; }
int64_t resend_time_ms() const { return resend_time_ms_; }
void set_resend_time_ms(int64_t timeMs) { resend_time_ms_ = timeMs; }
uint32_t ssrc() const { return ssrc_; }
@@ -61,7 +61,7 @@
private:
scoped_ptr<uint8_t[]> data_;
- uint32_t length_;
+ size_t length_;
int64_t resend_time_ms_;
uint32_t ssrc_;
uint16_t seq_num_;
@@ -251,7 +251,7 @@
return 0;
}
- void IncomingPacket(const uint8_t* data, uint32_t length) {
+ void IncomingPacket(const uint8_t* data, size_t length) {
for (HandlerMapIt it = handlers_.begin(); it != handlers_.end(); ++it) {
if (!it->second->rtp_header_parser_->IsRtcp(data, length)) {
RTPHeader header;
@@ -375,14 +375,10 @@
if (reordering_ && reorder_buffer_.get() == NULL) {
reorder_buffer_.reset(
- new RawRtpPacket(next_packet_.data,
- static_cast<uint32_t>(next_packet_.length),
- 0,
- 0));
+ new RawRtpPacket(next_packet_.data, next_packet_.length, 0, 0));
return 0;
}
- int ret = SendPacket(next_packet_.data,
- static_cast<uint32_t>(next_packet_.length));
+ int ret = SendPacket(next_packet_.data, next_packet_.length);
if (reorder_buffer_.get()) {
SendPacket(reorder_buffer_->data(), reorder_buffer_->length());
reorder_buffer_.reset(NULL);
@@ -421,7 +417,7 @@
}
private:
- int SendPacket(const uint8_t* data, uint32_t length) {
+ int SendPacket(const uint8_t* data, size_t length) {
assert(data);
assert(length > 0);
diff --git a/webrtc/modules/video_coding/main/test/test_callbacks.cc b/webrtc/modules/video_coding/main/test/test_callbacks.cc
index d68f994..35aaae1 100644
--- a/webrtc/modules/video_coding/main/test/test_callbacks.cc
+++ b/webrtc/modules/video_coding/main/test/test_callbacks.cc
@@ -57,7 +57,7 @@
const uint32_t timeStamp,
int64_t capture_time_ms,
const uint8_t* payloadData,
- const uint32_t payloadSize,
+ const size_t payloadSize,
const RTPFragmentationHeader& fragmentationHeader,
const RTPVideoHeader* videoHdr)
{
@@ -106,7 +106,7 @@
return ret;
}
-float
+size_t
VCMEncodeCompleteCallback::EncodedBytes()
{
return _encodedBytes;
@@ -147,12 +147,12 @@
int32_t
VCMRTPEncodeCompleteCallback::SendData(
- const FrameType frameType,
- const uint8_t payloadType,
- const uint32_t timeStamp,
+ FrameType frameType,
+ uint8_t payloadType,
+ uint32_t timeStamp,
int64_t capture_time_ms,
const uint8_t* payloadData,
- const uint32_t payloadSize,
+ size_t payloadSize,
const RTPFragmentationHeader& fragmentationHeader,
const RTPVideoHeader* videoHdr)
{
@@ -169,11 +169,11 @@
videoHdr);
}
-float
+size_t
VCMRTPEncodeCompleteCallback::EncodedBytes()
{
// only good for one call - after which will reset value;
- float tmp = _encodedBytes;
+ size_t tmp = _encodedBytes;
_encodedBytes = 0;
return tmp;
}
@@ -197,12 +197,12 @@
if (PrintI420VideoFrame(videoFrame, _decodedFile) < 0) {
return -1;
}
- _decodedBytes+= CalcBufferSize(kI420, videoFrame.width(),
- videoFrame.height());
+ _decodedBytes += CalcBufferSize(kI420, videoFrame.width(),
+ videoFrame.height());
return VCM_OK;
}
-int32_t
+size_t
VCMDecodeCompleteCallback::DecodedBytes()
{
return _decodedBytes;
@@ -248,7 +248,7 @@
}
int
-RTPSendCompleteCallback::SendPacket(int channel, const void *data, int len)
+RTPSendCompleteCallback::SendPacket(int channel, const void *data, size_t len)
{
_sendCount++;
_totalSentLength += len;
@@ -319,11 +319,13 @@
delete packet;
packet = NULL;
}
- return len; // OK
+ return static_cast<int>(len); // OK
}
int
-RTPSendCompleteCallback::SendRTCPPacket(int channel, const void *data, int len)
+RTPSendCompleteCallback::SendRTCPPacket(int channel,
+ const void *data,
+ size_t len)
{
// Incorporate network conditions
return SendPacket(channel, data, len);
diff --git a/webrtc/modules/video_coding/main/test/test_callbacks.h b/webrtc/modules/video_coding/main/test/test_callbacks.h
index 608d185..fb08e9c 100644
--- a/webrtc/modules/video_coding/main/test/test_callbacks.h
+++ b/webrtc/modules/video_coding/main/test/test_callbacks.h
@@ -44,12 +44,12 @@
void RegisterTransportCallback(VCMPacketizationCallback* transport);
// Process encoded data received from the encoder, pass stream to the
// VCMReceiver module
- virtual int32_t SendData(const FrameType frameType,
- const uint8_t payloadType,
- const uint32_t timeStamp,
+ virtual int32_t SendData(FrameType frameType,
+ uint8_t payloadType,
+ uint32_t timeStamp,
int64_t capture_time_ms,
const uint8_t* payloadData,
- const uint32_t payloadSize,
+ size_t payloadSize,
const RTPFragmentationHeader& fragmentationHeader,
const RTPVideoHeader* videoHdr) OVERRIDE;
// Register exisitng VCM. Currently - encode and decode under same module.
@@ -57,7 +57,7 @@
// Return size of last encoded frame data (all frames in the sequence)
// Good for only one call - after which will reset value
// (to allow detection of frame drop)
- float EncodedBytes();
+ size_t EncodedBytes();
// Return encode complete (true/false)
bool EncodeComplete();
// Inform callback of codec used
@@ -77,7 +77,7 @@
private:
FILE* _encodedFile;
- float _encodedBytes;
+ size_t _encodedBytes;
VideoCodingModule* _VCMReceiver;
FrameType _frameType;
uint16_t _seqNo;
@@ -101,17 +101,17 @@
virtual ~VCMRTPEncodeCompleteCallback() {}
// Process encoded data received from the encoder, pass stream to the
// RTP module
- virtual int32_t SendData(const FrameType frameType,
- const uint8_t payloadType,
- const uint32_t timeStamp,
+ virtual int32_t SendData(FrameType frameType,
+ uint8_t payloadType,
+ uint32_t timeStamp,
int64_t capture_time_ms,
const uint8_t* payloadData,
- const uint32_t payloadSize,
+ size_t payloadSize,
const RTPFragmentationHeader& fragmentationHeader,
const RTPVideoHeader* videoHdr) OVERRIDE;
// Return size of last encoded frame. Value good for one call
// (resets to zero after call to inform test of frame drop)
- float EncodedBytes();
+ size_t EncodedBytes();
// Return encode complete (true/false)
bool EncodeComplete();
// Inform callback of codec used
@@ -126,7 +126,7 @@
}
private:
- float _encodedBytes;
+ size_t _encodedBytes;
FrameType _frameType;
bool _encodeComplete;
RtpRtcp* _RTPModule;
@@ -145,10 +145,10 @@
virtual ~VCMDecodeCompleteCallback() {}
// Write decoded frame into file
virtual int32_t FrameToRender(webrtc::I420VideoFrame& videoFrame) OVERRIDE;
- int32_t DecodedBytes();
+ size_t DecodedBytes();
private:
- FILE* _decodedFile;
- uint32_t _decodedBytes;
+ FILE* _decodedFile;
+ size_t _decodedBytes;
}; // end of VCMDecodeCompleCallback class
// Transport callback
@@ -165,9 +165,11 @@
void SetRtpModule(RtpRtcp* rtp_module) { _rtp = rtp_module; }
// Send Packet to receive side RTP module
- virtual int SendPacket(int channel, const void *data, int len) OVERRIDE;
+ virtual int SendPacket(int channel, const void *data, size_t len) OVERRIDE;
// Send RTCP Packet to receive side RTP module
- virtual int SendRTCPPacket(int channel, const void *data, int len) OVERRIDE;
+ virtual int SendRTCPPacket(int channel,
+ const void *data,
+ size_t len) OVERRIDE;
// Set percentage of channel loss in the network
void SetLossPct(double lossPct);
// Set average size of burst loss
@@ -181,7 +183,7 @@
// Return send count
int SendCount() {return _sendCount; }
// Return accumulated length in bytes of transmitted packets
- uint32_t TotalSentLength() {return _totalSentLength;}
+ size_t TotalSentLength() {return _totalSentLength;}
protected:
// Randomly decide whether to drop packets, based on the channel model
bool PacketLoss();
@@ -198,7 +200,7 @@
uint32_t _networkDelayMs;
double _jitterVar;
bool _prevLossState;
- uint32_t _totalSentLength;
+ size_t _totalSentLength;
std::list<RtpPacket*> _rtpPackets;
RtpDump* _rtpDump;
};
diff --git a/webrtc/modules/video_coding/main/test/test_util.h b/webrtc/modules/video_coding/main/test/test_util.h
index 9f8b5a9..b1c156d 100644
--- a/webrtc/modules/video_coding/main/test/test_util.h
+++ b/webrtc/modules/video_coding/main/test/test_util.h
@@ -51,7 +51,7 @@
struct RtpPacket {
uint8_t data[1650]; // max packet size
- int32_t length;
+ size_t length;
int64_t receiveTime;
};
diff --git a/webrtc/modules/video_coding/main/test/vcm_payload_sink_factory.cc b/webrtc/modules/video_coding/main/test/vcm_payload_sink_factory.cc
index aa636a0..e0dd509 100644
--- a/webrtc/modules/video_coding/main/test/vcm_payload_sink_factory.cc
+++ b/webrtc/modules/video_coding/main/test/vcm_payload_sink_factory.cc
@@ -53,13 +53,13 @@
// PayloadSinkInterface
virtual int32_t OnReceivedPayloadData(
const uint8_t* payload_data,
- const uint16_t payload_size,
+ const size_t payload_size,
const WebRtcRTPHeader* rtp_header) OVERRIDE {
return vcm_->IncomingPacket(payload_data, payload_size, *rtp_header);
}
virtual bool OnRecoveredPacket(const uint8_t* packet,
- int packet_length) OVERRIDE {
+ size_t packet_length) OVERRIDE {
// We currently don't handle FEC.
return true;
}
diff --git a/webrtc/modules/video_coding/main/test/video_source.cc b/webrtc/modules/video_coding/main/test/video_source.cc
index 65ee6a3..0c02e29 100644
--- a/webrtc/modules/video_coding/main/test/video_source.cc
+++ b/webrtc/modules/video_coding/main/test/video_source.cc
@@ -55,7 +55,7 @@
assert(frameRate > 0);
}
-int32_t
+size_t
VideoSource::GetFrameLength() const
{
return webrtc::CalcBufferSize(_type, _width, _height);
diff --git a/webrtc/modules/video_coding/main/test/video_source.h b/webrtc/modules/video_coding/main/test/video_source.h
index 98cc3fe..05deb4a 100644
--- a/webrtc/modules/video_coding/main/test/video_source.h
+++ b/webrtc/modules/video_coding/main/test/video_source.h
@@ -69,7 +69,7 @@
// Returns the filename with the path (including the leading slash) removed.
std::string GetName() const;
- int32_t GetFrameLength() const;
+ size_t GetFrameLength() const;
private:
std::string _fileName;
diff --git a/webrtc/modules/video_coding/utility/frame_dropper.cc b/webrtc/modules/video_coding/utility/frame_dropper.cc
index 54c8cb8..a684af7 100644
--- a/webrtc/modules/video_coding/utility/frame_dropper.cc
+++ b/webrtc/modules/video_coding/utility/frame_dropper.cc
@@ -75,7 +75,7 @@
}
void
-FrameDropper::Fill(uint32_t frameSizeBytes, bool deltaFrame)
+FrameDropper::Fill(size_t frameSizeBytes, bool deltaFrame)
{
if (!_enabled)
{
diff --git a/webrtc/modules/video_coding/utility/include/frame_dropper.h b/webrtc/modules/video_coding/utility/include/frame_dropper.h
index 8eebd78..2b78a72 100644
--- a/webrtc/modules/video_coding/utility/include/frame_dropper.h
+++ b/webrtc/modules/video_coding/utility/include/frame_dropper.h
@@ -11,6 +11,8 @@
#ifndef WEBRTC_MODULES_VIDEO_CODING_UTILITY_INCLUDE_FRAME_DROPPER_H_
#define WEBRTC_MODULES_VIDEO_CODING_UTILITY_INCLUDE_FRAME_DROPPER_H_
+#include <cstddef>
+
#include "webrtc/base/exp_filter.h"
#include "webrtc/typedefs.h"
@@ -49,7 +51,7 @@
// returned from the encoder.
// - deltaFrame : True if the encoder returned
// a key frame.
- virtual void Fill(uint32_t frameSizeBytes, bool deltaFrame);
+ virtual void Fill(size_t frameSizeBytes, bool deltaFrame);
virtual void Leak(uint32_t inputFrameRate);