Revert "Implement H264 simulcast support and generalize SimulcastEncoderAdapter use for H264 & VP8."
This reverts commit 07efe436c9002e139845f62486e3ee4e29f0d85b.
Reason for revert: Breaks downstream project.
cricket::GetSimulcastConfig method signature has been updated.
I think you can get away with a default value for temporal_layers_supported (and then you can remove it after a few days when projects will be updated).
Original change's description:
> Implement H264 simulcast support and generalize SimulcastEncoderAdapter use for H264 & VP8.
>
> * Move SimulcastEncoderAdapter out under modules/video_coding
> * Move SimulcastRateAllocator back out to modules/video_coding/utility
> * Move TemporalLayers and ScreenshareLayers to modules/video_coding/utility
> * Move any VP8 specific code - such as temporal layer bitrate budgeting -
> under codec type dependent conditionals.
> * Plumb the simulcast index for H264 in the codec specific and RTP format data structures.
>
> Bug: webrtc:5840
> Change-Id: Ieced8a00e38f273c1a6cfd0f5431a87d07b8f44e
> Reviewed-on: https://webrtc-review.googlesource.com/64100
> Commit-Queue: Harald Alvestrand <hta@webrtc.org>
> Reviewed-by: Stefan Holmer <stefan@webrtc.org>
> Reviewed-by: Erik Språng <sprang@webrtc.org>
> Cr-Commit-Position: refs/heads/master@{#23705}
TBR=sprang@webrtc.org,stefan@webrtc.org,mflodman@webrtc.org,hta@webrtc.org,sergio.garcia.murillo@gmail.com,titovartem@webrtc.org,agouaillard@gmail.com
Change-Id: Ic9d3b1eeaf195bb5ec2063954421f5e77866d663
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: webrtc:5840
Reviewed-on: https://webrtc-review.googlesource.com/84760
Reviewed-by: Mirko Bonadei <mbonadei@webrtc.org>
Commit-Queue: Mirko Bonadei <mbonadei@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#23710}
diff --git a/modules/video_coding/codecs/h264/h264_encoder_impl.cc b/modules/video_coding/codecs/h264/h264_encoder_impl.cc
index cac198e..eee954d 100644
--- a/modules/video_coding/codecs/h264/h264_encoder_impl.cc
+++ b/modules/video_coding/codecs/h264/h264_encoder_impl.cc
@@ -20,14 +20,10 @@
#include "third_party/openh264/src/codec/api/svc/codec_ver.h"
#include "common_video/libyuv/include/webrtc_libyuv.h"
-#include "modules/video_coding/utility/simulcast_rate_allocator.h"
-#include "modules/video_coding/utility/simulcast_utility.h"
#include "rtc_base/checks.h"
#include "rtc_base/logging.h"
#include "rtc_base/timeutils.h"
#include "system_wrappers/include/metrics.h"
-#include "third_party/libyuv/include/libyuv/convert.h"
-#include "third_party/libyuv/include/libyuv/scale.h"
namespace webrtc {
@@ -161,7 +157,16 @@
}
H264EncoderImpl::H264EncoderImpl(const cricket::VideoCodec& codec)
- : packetization_mode_(H264PacketizationMode::SingleNalUnit),
+ : openh264_encoder_(nullptr),
+ width_(0),
+ height_(0),
+ max_frame_rate_(0.0f),
+ target_bps_(0),
+ max_bps_(0),
+ mode_(VideoCodecMode::kRealtimeVideo),
+ frame_dropping_on_(false),
+ key_frame_interval_(0),
+ packetization_mode_(H264PacketizationMode::SingleNalUnit),
max_payload_size_(0),
number_of_cores_(0),
encoded_image_callback_(nullptr),
@@ -174,30 +179,25 @@
packetization_mode_string == "1") {
packetization_mode_ = H264PacketizationMode::NonInterleaved;
}
- downscaled_buffers_.reserve(kMaxSimulcastStreams - 1);
- encoded_images_.reserve(kMaxSimulcastStreams);
- encoded_image_buffers_.reserve(kMaxSimulcastStreams);
- encoders_.reserve(kMaxSimulcastStreams);
- configurations_.reserve(kMaxSimulcastStreams);
}
H264EncoderImpl::~H264EncoderImpl() {
Release();
}
-int32_t H264EncoderImpl::InitEncode(const VideoCodec* inst,
+int32_t H264EncoderImpl::InitEncode(const VideoCodec* codec_settings,
int32_t number_of_cores,
size_t max_payload_size) {
ReportInit();
- if (!inst || inst->codecType != kVideoCodecH264) {
+ if (!codec_settings || codec_settings->codecType != kVideoCodecH264) {
ReportError();
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
- if (inst->maxFramerate == 0) {
+ if (codec_settings->maxFramerate == 0) {
ReportError();
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
- if (inst->width < 1 || inst->height < 1) {
+ if (codec_settings->width < 1 || codec_settings->height < 1) {
ReportError();
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
@@ -207,134 +207,73 @@
ReportError();
return release_ret;
}
+ RTC_DCHECK(!openh264_encoder_);
- int number_of_streams = SimulcastUtility::NumberOfSimulcastStreams(*inst);
- bool doing_simulcast = (number_of_streams > 1);
-
- if (doing_simulcast && (!SimulcastUtility::ValidSimulcastResolutions(
- *inst, number_of_streams) ||
- !SimulcastUtility::ValidSimulcastTemporalLayers(
- *inst, number_of_streams))) {
- return WEBRTC_VIDEO_CODEC_ERR_SIMULCAST_PARAMETERS_NOT_SUPPORTED;
+ // Create encoder.
+ if (WelsCreateSVCEncoder(&openh264_encoder_) != 0) {
+ // Failed to create encoder.
+ RTC_LOG(LS_ERROR) << "Failed to create OpenH264 encoder";
+ RTC_DCHECK(!openh264_encoder_);
+ ReportError();
+ return WEBRTC_VIDEO_CODEC_ERROR;
}
- downscaled_buffers_.resize(number_of_streams - 1);
- encoded_images_.resize(number_of_streams);
- encoded_image_buffers_.resize(number_of_streams);
- encoders_.resize(number_of_streams);
- pictures_.resize(number_of_streams);
- configurations_.resize(number_of_streams);
+ RTC_DCHECK(openh264_encoder_);
+ if (kOpenH264EncoderDetailedLogging) {
+ int trace_level = WELS_LOG_DETAIL;
+ openh264_encoder_->SetOption(ENCODER_OPTION_TRACE_LEVEL, &trace_level);
+ }
+ // else WELS_LOG_DEFAULT is used by default.
number_of_cores_ = number_of_cores;
+ // Set internal settings from codec_settings
+ width_ = codec_settings->width;
+ height_ = codec_settings->height;
+ max_frame_rate_ = static_cast<float>(codec_settings->maxFramerate);
+ mode_ = codec_settings->mode;
+ frame_dropping_on_ = codec_settings->H264().frameDroppingOn;
+ key_frame_interval_ = codec_settings->H264().keyFrameInterval;
max_payload_size_ = max_payload_size;
- codec_ = *inst;
- // Code expects simulcastStream resolutions to be correct, make sure they are
- // filled even when there are no simulcast layers.
- if (codec_.numberOfSimulcastStreams == 0) {
- codec_.simulcastStream[0].width = codec_.width;
- codec_.simulcastStream[0].height = codec_.height;
+ // Codec_settings uses kbits/second; encoder uses bits/second.
+ max_bps_ = codec_settings->maxBitrate * 1000;
+ if (codec_settings->targetBitrate == 0)
+ target_bps_ = codec_settings->startBitrate * 1000;
+ else
+ target_bps_ = codec_settings->targetBitrate * 1000;
+
+ SEncParamExt encoder_params = CreateEncoderParams();
+
+ // Initialize.
+ if (openh264_encoder_->InitializeExt(&encoder_params) != 0) {
+ RTC_LOG(LS_ERROR) << "Failed to initialize OpenH264 encoder";
+ Release();
+ ReportError();
+ return WEBRTC_VIDEO_CODEC_ERROR;
}
+ // TODO(pbos): Base init params on these values before submitting.
+ int video_format = EVideoFormatType::videoFormatI420;
+ openh264_encoder_->SetOption(ENCODER_OPTION_DATAFORMAT, &video_format);
- for (int i = 0, idx = number_of_streams - 1; i < number_of_streams;
- ++i, --idx) {
- // Temporal layers still not supported.
- if (inst->simulcastStream[i].numberOfTemporalLayers > 1) {
- Release();
- return WEBRTC_VIDEO_CODEC_ERR_SIMULCAST_PARAMETERS_NOT_SUPPORTED;
- }
- ISVCEncoder* openh264_encoder;
- // Create encoder.
- if (WelsCreateSVCEncoder(&openh264_encoder) != 0) {
- // Failed to create encoder.
- RTC_LOG(LS_ERROR) << "Failed to create OpenH264 encoder";
- RTC_DCHECK(!openh264_encoder);
- Release();
- ReportError();
- return WEBRTC_VIDEO_CODEC_ERROR;
- }
- RTC_DCHECK(openh264_encoder);
- if (kOpenH264EncoderDetailedLogging) {
- int trace_level = WELS_LOG_DETAIL;
- openh264_encoder->SetOption(ENCODER_OPTION_TRACE_LEVEL, &trace_level);
- }
- // else WELS_LOG_DEFAULT is used by default.
-
- // Store h264 encoder.
- encoders_[i] = openh264_encoder;
-
- // Set internal settings from codec_settings
- configurations_[i].simulcast_idx = idx;
- configurations_[i].sending = false;
- configurations_[i].width = codec_.simulcastStream[idx].width;
- configurations_[i].height = codec_.simulcastStream[idx].height;
- configurations_[i].max_frame_rate = static_cast<float>(codec_.maxFramerate);
- configurations_[i].frame_dropping_on = codec_.H264()->frameDroppingOn;
- configurations_[i].key_frame_interval = codec_.H264()->keyFrameInterval;
-
- // Create downscaled image buffers.
- if (i > 0) {
- downscaled_buffers_[i - 1] = I420Buffer::Create(
- configurations_[i].width, configurations_[i].height,
- configurations_[i].width, configurations_[i].width / 2,
- configurations_[i].width / 2);
- }
-
- // Codec_settings uses kbits/second; encoder uses bits/second.
- configurations_[i].max_bps = codec_.maxBitrate * 1000;
- if (codec_.targetBitrate == 0) {
- configurations_[i].target_bps = codec_.startBitrate * 1000;
- } else {
- configurations_[i].target_bps = codec_.targetBitrate * 1000;
- }
-
- // Create encoder parameters based on the layer configuration.
- SEncParamExt encoder_params = CreateEncoderParams(i);
-
- // Initialize.
- if (openh264_encoder->InitializeExt(&encoder_params) != 0) {
- RTC_LOG(LS_ERROR) << "Failed to initialize OpenH264 encoder";
- Release();
- ReportError();
- return WEBRTC_VIDEO_CODEC_ERROR;
- }
- // TODO(pbos): Base init params on these values before submitting.
- int video_format = EVideoFormatType::videoFormatI420;
- openh264_encoder->SetOption(ENCODER_OPTION_DATAFORMAT, &video_format);
-
- // Initialize encoded image. Default buffer size: size of unencoded data.
- encoded_images_[i]._size =
- CalcBufferSize(VideoType::kI420, codec_.simulcastStream[idx].width,
- codec_.simulcastStream[idx].height);
- encoded_images_[i]._buffer = new uint8_t[encoded_images_[i]._size];
- encoded_image_buffers_[i].reset(encoded_images_[i]._buffer);
- encoded_images_[i]._completeFrame = true;
- encoded_images_[i]._encodedWidth = codec_.simulcastStream[idx].width;
- encoded_images_[i]._encodedHeight = codec_.simulcastStream[idx].height;
- encoded_images_[i]._length = 0;
- }
-
- SimulcastRateAllocator init_allocator(codec_);
- BitrateAllocation allocation = init_allocator.GetAllocation(
- codec_.targetBitrate ? codec_.targetBitrate * 1000
- : codec_.startBitrate * 1000,
- codec_.maxFramerate);
- return SetRateAllocation(allocation, codec_.maxFramerate);
+ // Initialize encoded image. Default buffer size: size of unencoded data.
+ encoded_image_._size = CalcBufferSize(VideoType::kI420, codec_settings->width,
+ codec_settings->height);
+ encoded_image_._buffer = new uint8_t[encoded_image_._size];
+ encoded_image_buffer_.reset(encoded_image_._buffer);
+ encoded_image_._completeFrame = true;
+ encoded_image_._encodedWidth = 0;
+ encoded_image_._encodedHeight = 0;
+ encoded_image_._length = 0;
+ return WEBRTC_VIDEO_CODEC_OK;
}
int32_t H264EncoderImpl::Release() {
- while (!encoders_.empty()) {
- ISVCEncoder* openh264_encoder = encoders_.back();
- if (openh264_encoder) {
- RTC_CHECK_EQ(0, openh264_encoder->Uninitialize());
- WelsDestroySVCEncoder(openh264_encoder);
- }
- encoders_.pop_back();
+ if (openh264_encoder_) {
+ RTC_CHECK_EQ(0, openh264_encoder_->Uninitialize());
+ WelsDestroySVCEncoder(openh264_encoder_);
+ openh264_encoder_ = nullptr;
}
- downscaled_buffers_.clear();
- configurations_.clear();
- encoded_images_.clear();
- encoded_image_buffers_.clear();
- pictures_.clear();
+ encoded_image_._buffer = nullptr;
+ encoded_image_buffer_.reset();
return WEBRTC_VIDEO_CODEC_OK;
}
@@ -345,59 +284,27 @@
}
int32_t H264EncoderImpl::SetRateAllocation(
- const BitrateAllocation& bitrate,
- uint32_t new_framerate) {
- if (encoders_.empty())
- return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
-
- if (new_framerate < 1)
+ const VideoBitrateAllocation& bitrate_allocation,
+ uint32_t framerate) {
+ if (bitrate_allocation.get_sum_bps() <= 0 || framerate <= 0)
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
- if (bitrate.get_sum_bps() == 0) {
- // Encoder paused, turn off all encoding.
- for (size_t i = 0; i < configurations_.size(); ++i)
- configurations_[i].SetStreamState(false);
- return WEBRTC_VIDEO_CODEC_OK;
- }
+ target_bps_ = bitrate_allocation.get_sum_bps();
+ max_frame_rate_ = static_cast<float>(framerate);
- // At this point, bitrate allocation should already match codec settings.
- if (codec_.maxBitrate > 0)
- RTC_DCHECK_LE(bitrate.get_sum_kbps(), codec_.maxBitrate);
- RTC_DCHECK_GE(bitrate.get_sum_kbps(), codec_.minBitrate);
- if (codec_.numberOfSimulcastStreams > 0)
- RTC_DCHECK_GE(bitrate.get_sum_kbps(), codec_.simulcastStream[0].minBitrate);
-
- codec_.maxFramerate = new_framerate;
-
- size_t stream_idx = encoders_.size() - 1;
- for (size_t i = 0; i < encoders_.size(); ++i, --stream_idx) {
- // Update layer config.
- configurations_[i].target_bps = bitrate.GetSpatialLayerSum(stream_idx);
- configurations_[i].max_frame_rate = static_cast<float>(new_framerate);
-
- if (configurations_[i].target_bps) {
- configurations_[i].SetStreamState(true);
-
- // Update h264 encoder.
- SBitrateInfo target_bitrate;
- memset(&target_bitrate, 0, sizeof(SBitrateInfo));
- target_bitrate.iLayer = SPATIAL_LAYER_ALL,
- target_bitrate.iBitrate = configurations_[i].target_bps;
- encoders_[i]->SetOption(ENCODER_OPTION_BITRATE, &target_bitrate);
- encoders_[i]->SetOption(ENCODER_OPTION_FRAME_RATE,
- &configurations_[i].max_frame_rate);
- } else {
- configurations_[i].SetStreamState(false);
- }
- }
-
+ SBitrateInfo target_bitrate;
+ memset(&target_bitrate, 0, sizeof(SBitrateInfo));
+ target_bitrate.iLayer = SPATIAL_LAYER_ALL,
+ target_bitrate.iBitrate = target_bps_;
+ openh264_encoder_->SetOption(ENCODER_OPTION_BITRATE, &target_bitrate);
+ openh264_encoder_->SetOption(ENCODER_OPTION_FRAME_RATE, &max_frame_rate_);
return WEBRTC_VIDEO_CODEC_OK;
}
int32_t H264EncoderImpl::Encode(const VideoFrame& input_frame,
const CodecSpecificInfo* codec_specific_info,
const std::vector<FrameType>* frame_types) {
- if (encoders_.empty()) {
+ if (!IsInitialized()) {
ReportError();
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
@@ -409,134 +316,83 @@
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
+ bool force_key_frame = false;
+ if (frame_types != nullptr) {
+ // We only support a single stream.
+ RTC_DCHECK_EQ(frame_types->size(), 1);
+ // Skip frame?
+ if ((*frame_types)[0] == kEmptyFrame) {
+ return WEBRTC_VIDEO_CODEC_OK;
+ }
+ // Force key frame?
+ force_key_frame = (*frame_types)[0] == kVideoFrameKey;
+ }
+ if (force_key_frame) {
+ // API doc says ForceIntraFrame(false) does nothing, but calling this
+ // function forces a key frame regardless of the |bIDR| argument's value.
+ // (If every frame is a key frame we get lag/delays.)
+ openh264_encoder_->ForceIntraFrame(true);
+ }
rtc::scoped_refptr<const I420BufferInterface> frame_buffer =
input_frame.video_frame_buffer()->ToI420();
+ // EncodeFrame input.
+ SSourcePicture picture;
+ memset(&picture, 0, sizeof(SSourcePicture));
+ picture.iPicWidth = frame_buffer->width();
+ picture.iPicHeight = frame_buffer->height();
+ picture.iColorFormat = EVideoFormatType::videoFormatI420;
+ picture.uiTimeStamp = input_frame.ntp_time_ms();
+ picture.iStride[0] = frame_buffer->StrideY();
+ picture.iStride[1] = frame_buffer->StrideU();
+ picture.iStride[2] = frame_buffer->StrideV();
+ picture.pData[0] = const_cast<uint8_t*>(frame_buffer->DataY());
+ picture.pData[1] = const_cast<uint8_t*>(frame_buffer->DataU());
+ picture.pData[2] = const_cast<uint8_t*>(frame_buffer->DataV());
- bool send_key_frame = false;
- for (size_t i = 0; i < configurations_.size(); ++i) {
- if (configurations_[i].key_frame_request && configurations_[i].sending) {
- send_key_frame = true;
- break;
- }
- }
- if (!send_key_frame && frame_types) {
- for (size_t i = 0; i < frame_types->size() && i < configurations_.size();
- ++i) {
- if ((*frame_types)[i] == kVideoFrameKey && configurations_[i].sending) {
- send_key_frame = true;
- break;
- }
- }
+ // EncodeFrame output.
+ SFrameBSInfo info;
+ memset(&info, 0, sizeof(SFrameBSInfo));
+
+ // Encode!
+ int enc_ret = openh264_encoder_->EncodeFrame(&picture, &info);
+ if (enc_ret != 0) {
+ RTC_LOG(LS_ERROR) << "OpenH264 frame encoding failed, EncodeFrame returned "
+ << enc_ret << ".";
+ ReportError();
+ return WEBRTC_VIDEO_CODEC_ERROR;
}
- RTC_DCHECK_EQ(configurations_[0].width, frame_buffer->width());
- RTC_DCHECK_EQ(configurations_[0].height, frame_buffer->height());
+ encoded_image_._encodedWidth = frame_buffer->width();
+ encoded_image_._encodedHeight = frame_buffer->height();
+ encoded_image_._timeStamp = input_frame.timestamp();
+ encoded_image_.ntp_time_ms_ = input_frame.ntp_time_ms();
+ encoded_image_.capture_time_ms_ = input_frame.render_time_ms();
+ encoded_image_.rotation_ = input_frame.rotation();
+ encoded_image_.content_type_ = (mode_ == VideoCodecMode::kScreensharing)
+ ? VideoContentType::SCREENSHARE
+ : VideoContentType::UNSPECIFIED;
+ encoded_image_.timing_.flags = VideoSendTiming::kInvalid;
+ encoded_image_._frameType = ConvertToVideoFrameType(info.eFrameType);
- // Encode image for each layer.
- for (size_t i = 0; i < encoders_.size(); ++i) {
- // EncodeFrame input.
- pictures_[i] = {0};
- pictures_[i].iPicWidth = configurations_[i].width;
- pictures_[i].iPicHeight = configurations_[i].height;
- pictures_[i].iColorFormat = EVideoFormatType::videoFormatI420;
- pictures_[i].uiTimeStamp = input_frame.ntp_time_ms();
- // Downscale images on second and ongoing layers.
- if (i == 0) {
- pictures_[i].iStride[0] = frame_buffer->StrideY();
- pictures_[i].iStride[1] = frame_buffer->StrideU();
- pictures_[i].iStride[2] = frame_buffer->StrideV();
- pictures_[i].pData[0] = const_cast<uint8_t*>(frame_buffer->DataY());
- pictures_[i].pData[1] = const_cast<uint8_t*>(frame_buffer->DataU());
- pictures_[i].pData[2] = const_cast<uint8_t*>(frame_buffer->DataV());
- } else {
- pictures_[i].iStride[0] = downscaled_buffers_[i - 1]->StrideY();
- pictures_[i].iStride[1] = downscaled_buffers_[i - 1]->StrideU();
- pictures_[i].iStride[2] = downscaled_buffers_[i - 1]->StrideV();
- pictures_[i].pData[0] =
- const_cast<uint8_t*>(downscaled_buffers_[i - 1]->DataY());
- pictures_[i].pData[1] =
- const_cast<uint8_t*>(downscaled_buffers_[i - 1]->DataU());
- pictures_[i].pData[2] =
- const_cast<uint8_t*>(downscaled_buffers_[i - 1]->DataV());
- // Scale the image down a number of times by downsampling factor.
- libyuv::I420Scale(pictures_[i - 1].pData[0], pictures_[i - 1].iStride[0],
- pictures_[i - 1].pData[1], pictures_[i - 1].iStride[1],
- pictures_[i - 1].pData[2], pictures_[i - 1].iStride[2],
- configurations_[i - 1].width,
- configurations_[i - 1].height, pictures_[i].pData[0],
- pictures_[i].iStride[0], pictures_[i].pData[1],
- pictures_[i].iStride[1], pictures_[i].pData[2],
- pictures_[i].iStride[2], configurations_[i].width,
- configurations_[i].height, libyuv::kFilterBilinear);
- }
+ // Split encoded image up into fragments. This also updates |encoded_image_|.
+ RTPFragmentationHeader frag_header;
+ RtpFragmentize(&encoded_image_, &encoded_image_buffer_, *frame_buffer, &info,
+ &frag_header);
- if (!configurations_[i].sending) {
- continue;
- }
- if (frame_types != nullptr) {
- // Skip frame?
- if ((*frame_types)[i] == kEmptyFrame) {
- continue;
- }
- }
- if (send_key_frame) {
- // API doc says ForceIntraFrame(false) does nothing, but calling this
- // function forces a key frame regardless of the |bIDR| argument's value.
- // (If every frame is a key frame we get lag/delays.)
- encoders_[i]->ForceIntraFrame(true);
- configurations_[i].key_frame_request = false;
- }
- // EncodeFrame output.
- SFrameBSInfo info;
- memset(&info, 0, sizeof(SFrameBSInfo));
+ // Encoder can skip frames to save bandwidth in which case
+ // |encoded_image_._length| == 0.
+ if (encoded_image_._length > 0) {
+ // Parse QP.
+ h264_bitstream_parser_.ParseBitstream(encoded_image_._buffer,
+ encoded_image_._length);
+ h264_bitstream_parser_.GetLastSliceQp(&encoded_image_.qp_);
- // Encode!
- int enc_ret = encoders_[i]->EncodeFrame(&pictures_[i], &info);
- if (enc_ret != 0) {
- RTC_LOG(LS_ERROR)
- << "OpenH264 frame encoding failed, EncodeFrame returned " << enc_ret
- << ".";
- ReportError();
- return WEBRTC_VIDEO_CODEC_ERROR;
- }
-
- encoded_images_[i]._encodedWidth = configurations_[i].width;
- encoded_images_[i]._encodedHeight = configurations_[i].height;
- encoded_images_[i]._timeStamp = input_frame.timestamp();
- encoded_images_[i].ntp_time_ms_ = input_frame.ntp_time_ms();
- encoded_images_[i].capture_time_ms_ = input_frame.render_time_ms();
- encoded_images_[i].rotation_ = input_frame.rotation();
- encoded_images_[i].content_type_ =
- (codec_.mode == VideoCodecMode::kScreensharing)
- ? VideoContentType::SCREENSHARE
- : VideoContentType::UNSPECIFIED;
- encoded_images_[i].timing_.flags = VideoSendTiming::kInvalid;
- encoded_images_[i]._frameType = ConvertToVideoFrameType(info.eFrameType);
-
- // Split encoded image up into fragments. This also updates
- // |encoded_image_|.
- RTPFragmentationHeader frag_header;
- RtpFragmentize(&encoded_images_[i], &encoded_image_buffers_[i],
- *frame_buffer, &info, &frag_header);
-
- // Encoder can skip frames to save bandwidth in which case
- // |encoded_images_[i]._length| == 0.
- if (encoded_images_[i]._length > 0) {
- // Parse QP.
- h264_bitstream_parser_.ParseBitstream(encoded_images_[i]._buffer,
- encoded_images_[i]._length);
- h264_bitstream_parser_.GetLastSliceQp(&encoded_images_[i].qp_);
-
- // Deliver encoded image.
- CodecSpecificInfo codec_specific;
- codec_specific.codecType = kVideoCodecH264;
- codec_specific.codecSpecific.H264.packetization_mode =
- packetization_mode_;
- codec_specific.codecSpecific.H264.simulcast_idx =
- configurations_[i].simulcast_idx;
- encoded_image_callback_->OnEncodedImage(encoded_images_[i],
- &codec_specific, &frag_header);
- }
+ // Deliver encoded image.
+ CodecSpecificInfo codec_specific;
+ codec_specific.codecType = kVideoCodecH264;
+ codec_specific.codecSpecific.H264.packetization_mode = packetization_mode_;
+ encoded_image_callback_->OnEncodedImage(encoded_image_, &codec_specific,
+ &frag_header);
}
return WEBRTC_VIDEO_CODEC_OK;
}
@@ -545,35 +401,40 @@
return "OpenH264";
}
+bool H264EncoderImpl::IsInitialized() const {
+ return openh264_encoder_ != nullptr;
+}
+
// Initialization parameters.
// There are two ways to initialize. There is SEncParamBase (cleared with
// memset(&p, 0, sizeof(SEncParamBase)) used in Initialize, and SEncParamExt
// which is a superset of SEncParamBase (cleared with GetDefaultParams) used
// in InitializeExt.
-SEncParamExt H264EncoderImpl::CreateEncoderParams(size_t i) const {
+SEncParamExt H264EncoderImpl::CreateEncoderParams() const {
+ RTC_DCHECK(openh264_encoder_);
SEncParamExt encoder_params;
- encoders_[i]->GetDefaultParams(&encoder_params);
- if (codec_.mode == VideoCodecMode::kRealtimeVideo) {
+ openh264_encoder_->GetDefaultParams(&encoder_params);
+ if (mode_ == VideoCodecMode::kRealtimeVideo) {
encoder_params.iUsageType = CAMERA_VIDEO_REAL_TIME;
- } else if (codec_.mode == VideoCodecMode::kScreensharing) {
+ } else if (mode_ == VideoCodecMode::kScreensharing) {
encoder_params.iUsageType = SCREEN_CONTENT_REAL_TIME;
} else {
RTC_NOTREACHED();
}
- encoder_params.iPicWidth = configurations_[i].width;
- encoder_params.iPicHeight = configurations_[i].height;
- encoder_params.iTargetBitrate = configurations_[i].target_bps;
- encoder_params.iMaxBitrate = configurations_[i].max_bps;
+ encoder_params.iPicWidth = width_;
+ encoder_params.iPicHeight = height_;
+ encoder_params.iTargetBitrate = target_bps_;
+ encoder_params.iMaxBitrate = max_bps_;
// Rate Control mode
encoder_params.iRCMode = RC_BITRATE_MODE;
- encoder_params.fMaxFrameRate = configurations_[i].max_frame_rate;
+ encoder_params.fMaxFrameRate = max_frame_rate_;
// The following parameters are extension parameters (they're in SEncParamExt,
// not in SEncParamBase).
- encoder_params.bEnableFrameSkip = configurations_[i].frame_dropping_on;
+ encoder_params.bEnableFrameSkip = frame_dropping_on_;
// |uiIntraPeriod| - multiple of GOP size
// |keyFrameInterval| - number of frames
- encoder_params.uiIntraPeriod = configurations_[i].key_frame_interval;
+ encoder_params.uiIntraPeriod = key_frame_interval_;
encoder_params.uiMaxNalSize = 0;
// Threading model: use auto.
// 0: auto (dynamic imp. internal encoder)
@@ -641,12 +502,4 @@
kHighH264QpThreshold);
}
-void H264EncoderImpl::LayerConfig::SetStreamState(bool send_stream) {
- if (send_stream && !sending) {
- // Need a key frame if we have not sent this stream before.
- key_frame_request = true;
- }
- sending = send_stream;
-}
-
} // namespace webrtc
diff --git a/modules/video_coding/codecs/h264/h264_encoder_impl.h b/modules/video_coding/codecs/h264/h264_encoder_impl.h
index 0d25966..c48439b 100644
--- a/modules/video_coding/codecs/h264/h264_encoder_impl.h
+++ b/modules/video_coding/codecs/h264/h264_encoder_impl.h
@@ -15,7 +15,6 @@
#include <memory>
#include <vector>
-#include "api/video/i420_buffer.h"
#include "common_video/h264/h264_bitstream_parser.h"
#include "modules/video_coding/codecs/h264/include/h264.h"
#include "modules/video_coding/utility/quality_scaler.h"
@@ -28,22 +27,6 @@
class H264EncoderImpl : public H264Encoder {
public:
- struct LayerConfig {
- int simulcast_idx = 0;
- int width = -1;
- int height = -1;
- bool sending = true;
- bool key_frame_request = false;
- float max_frame_rate = 0;
- uint32_t target_bps = 0;
- uint32_t max_bps = 0;
- bool frame_dropping_on = false;
- int key_frame_interval = 0;
-
- void SetStreamState(bool send_stream);
- };
-
- public:
explicit H264EncoderImpl(const cricket::VideoCodec& codec);
~H264EncoderImpl() override;
@@ -83,24 +66,32 @@
}
private:
- SEncParamExt CreateEncoderParams(size_t i) const;
+ bool IsInitialized() const;
+ SEncParamExt CreateEncoderParams() const;
webrtc::H264BitstreamParser h264_bitstream_parser_;
// Reports statistics with histograms.
void ReportInit();
void ReportError();
- std::vector<ISVCEncoder*> encoders_;
- std::vector<SSourcePicture> pictures_;
- std::vector<rtc::scoped_refptr<I420Buffer>> downscaled_buffers_;
- std::vector<LayerConfig> configurations_;
- std::vector<EncodedImage> encoded_images_;
- std::vector<std::unique_ptr<uint8_t[]>> encoded_image_buffers_;
-
- VideoCodec codec_;
+ ISVCEncoder* openh264_encoder_;
+ // Settings that are used by this encoder.
+ int width_;
+ int height_;
+ float max_frame_rate_;
+ uint32_t target_bps_;
+ uint32_t max_bps_;
+ VideoCodecMode mode_;
+ // H.264 specifc parameters
+ bool frame_dropping_on_;
+ int key_frame_interval_;
H264PacketizationMode packetization_mode_;
+
size_t max_payload_size_;
int32_t number_of_cores_;
+
+ EncodedImage encoded_image_;
+ std::unique_ptr<uint8_t[]> encoded_image_buffer_;
EncodedImageCallback* encoded_image_callback_;
bool has_reported_init_;
diff --git a/modules/video_coding/codecs/h264/h264_simulcast_unittest.cc b/modules/video_coding/codecs/h264/h264_simulcast_unittest.cc
deleted file mode 100644
index 2377285..0000000
--- a/modules/video_coding/codecs/h264/h264_simulcast_unittest.cc
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <memory>
-
-#include "api/test/create_simulcast_test_fixture.h"
-#include "api/test/simulcast_test_fixture.h"
-#include "modules/video_coding/codecs/h264/include/h264.h"
-#include "rtc_base/ptr_util.h"
-#include "test/function_video_decoder_factory.h"
-#include "test/function_video_encoder_factory.h"
-#include "test/gtest.h"
-
-namespace webrtc {
-namespace test {
-
-namespace {
-std::unique_ptr<SimulcastTestFixture> CreateSpecificSimulcastTestFixture() {
- std::unique_ptr<VideoEncoderFactory> encoder_factory =
- rtc::MakeUnique<FunctionVideoEncoderFactory>(
- []() { return H264Encoder::Create(cricket::VideoCodec("H264")); });
- std::unique_ptr<VideoDecoderFactory> decoder_factory =
- rtc::MakeUnique<FunctionVideoDecoderFactory>(
- []() { return H264Decoder::Create(); });
- return CreateSimulcastTestFixture(std::move(encoder_factory),
- std::move(decoder_factory),
- SdpVideoFormat("H264"));
-}
-} // namespace
-
-TEST(TestH264Simulcast, TestKeyFrameRequestsOnAllStreams) {
- auto fixture = CreateSpecificSimulcastTestFixture();
- fixture->TestKeyFrameRequestsOnAllStreams();
-}
-
-TEST(TestH264Simulcast, TestPaddingAllStreams) {
- auto fixture = CreateSpecificSimulcastTestFixture();
- fixture->TestPaddingAllStreams();
-}
-
-TEST(TestH264Simulcast, TestPaddingTwoStreams) {
- auto fixture = CreateSpecificSimulcastTestFixture();
- fixture->TestPaddingTwoStreams();
-}
-
-TEST(TestH264Simulcast, TestPaddingTwoStreamsOneMaxedOut) {
- auto fixture = CreateSpecificSimulcastTestFixture();
- fixture->TestPaddingTwoStreamsOneMaxedOut();
-}
-
-TEST(TestH264Simulcast, TestPaddingOneStream) {
- auto fixture = CreateSpecificSimulcastTestFixture();
- fixture->TestPaddingOneStream();
-}
-
-TEST(TestH264Simulcast, TestPaddingOneStreamTwoMaxedOut) {
- auto fixture = CreateSpecificSimulcastTestFixture();
- fixture->TestPaddingOneStreamTwoMaxedOut();
-}
-
-TEST(TestH264Simulcast, TestSendAllStreams) {
- auto fixture = CreateSpecificSimulcastTestFixture();
- fixture->TestSendAllStreams();
-}
-
-TEST(TestH264Simulcast, TestDisablingStreams) {
- auto fixture = CreateSpecificSimulcastTestFixture();
- fixture->TestDisablingStreams();
-}
-
-TEST(TestH264Simulcast, TestActiveStreams) {
- auto fixture = CreateSpecificSimulcastTestFixture();
- fixture->TestActiveStreams();
-}
-
-TEST(TestH264Simulcast, TestSwitchingToOneStream) {
- auto fixture = CreateSpecificSimulcastTestFixture();
- fixture->TestSwitchingToOneStream();
-}
-
-TEST(TestH264Simulcast, TestSwitchingToOneOddStream) {
- auto fixture = CreateSpecificSimulcastTestFixture();
- fixture->TestSwitchingToOneOddStream();
-}
-
-TEST(TestH264Simulcast, TestStrideEncodeDecode) {
- auto fixture = CreateSpecificSimulcastTestFixture();
- fixture->TestStrideEncodeDecode();
-}
-
-} // namespace test
-} // namespace webrtc
diff --git a/modules/video_coding/codecs/test/videocodec_test_fixture_impl.cc b/modules/video_coding/codecs/test/videocodec_test_fixture_impl.cc
index abeef31..394ee14 100644
--- a/modules/video_coding/codecs/test/videocodec_test_fixture_impl.cc
+++ b/modules/video_coding/codecs/test/videocodec_test_fixture_impl.cc
@@ -52,7 +52,7 @@
const std::vector<webrtc::VideoStream> streams = cricket::GetSimulcastConfig(
codec_settings->numberOfSimulcastStreams, codec_settings->width,
codec_settings->height, kMaxBitrateBps, kBitratePriority, kMaxQp,
- kMaxFramerateFps, /* is_screenshare = */ false, true);
+ kMaxFramerateFps, /* is_screenshare = */ false);
for (size_t i = 0; i < streams.size(); ++i) {
SimulcastStream* ss = &codec_settings->simulcastStream[i];
diff --git a/modules/video_coding/codecs/test/videoprocessor.cc b/modules/video_coding/codecs/test/videoprocessor.cc
index 98ba07f..a4b8edb 100644
--- a/modules/video_coding/codecs/test/videoprocessor.cc
+++ b/modules/video_coding/codecs/test/videoprocessor.cc
@@ -19,10 +19,10 @@
#include "common_video/h264/h264_common.h"
#include "common_video/libyuv/include/webrtc_libyuv.h"
#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/video_coding/codecs/vp8/simulcast_rate_allocator.h"
#include "modules/video_coding/include/video_codec_initializer.h"
#include "modules/video_coding/include/video_error_codes.h"
#include "modules/video_coding/utility/default_video_bitrate_allocator.h"
-#include "modules/video_coding/utility/simulcast_rate_allocator.h"
#include "rtc_base/checks.h"
#include "rtc_base/timeutils.h"
#include "test/gtest.h"
diff --git a/modules/video_coding/codecs/vp8/default_temporal_layers.cc b/modules/video_coding/codecs/vp8/default_temporal_layers.cc
index 986c5ad..eea6933 100644
--- a/modules/video_coding/codecs/vp8/default_temporal_layers.cc
+++ b/modules/video_coding/codecs/vp8/default_temporal_layers.cc
@@ -18,6 +18,7 @@
#include <vector>
#include "modules/include/module_common_types.h"
+#include "modules/video_coding/codecs/vp8/include/vp8_common_types.h"
#include "modules/video_coding/include/video_codec_interface.h"
#include "rtc_base/checks.h"
#include "rtc_base/logging.h"
diff --git a/modules/video_coding/codecs/vp8/default_temporal_layers_unittest.cc b/modules/video_coding/codecs/vp8/default_temporal_layers_unittest.cc
index 2b69745..f61c302 100644
--- a/modules/video_coding/codecs/vp8/default_temporal_layers_unittest.cc
+++ b/modules/video_coding/codecs/vp8/default_temporal_layers_unittest.cc
@@ -10,8 +10,8 @@
#include "modules/video_coding/codecs/vp8/default_temporal_layers.h"
#include "modules/video_coding/codecs/vp8/libvpx_vp8_encoder.h"
+#include "modules/video_coding/codecs/vp8/simulcast_rate_allocator.h"
#include "modules/video_coding/include/video_codec_interface.h"
-#include "modules/video_coding/utility/simulcast_rate_allocator.h"
#include "test/field_trial.h"
#include "test/gtest.h"
diff --git a/modules/video_coding/codecs/vp8/include/vp8_common_types.h b/modules/video_coding/codecs/vp8/include/vp8_common_types.h
new file mode 100644
index 0000000..dff70ac
--- /dev/null
+++ b/modules/video_coding/codecs/vp8/include/vp8_common_types.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_COMMON_TYPES_H_
+#define MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_COMMON_TYPES_H_
+
+#include "common_types.h" // NOLINT(build/include)
+
+namespace webrtc {
+
+// Ratio allocation between temporal streams:
+// Values as required for the VP8 codec (accumulating).
+static const float
+ kVp8LayerRateAlloction[kMaxSimulcastStreams][kMaxTemporalStreams] = {
+ {1.0f, 1.0f, 1.0f, 1.0f}, // 1 layer
+ {0.6f, 1.0f, 1.0f, 1.0f}, // 2 layers {60%, 40%}
+ {0.4f, 0.6f, 1.0f, 1.0f}, // 3 layers {40%, 20%, 40%}
+ {0.25f, 0.4f, 0.6f, 1.0f} // 4 layers {25%, 15%, 20%, 40%}
+};
+
+} // namespace webrtc
+#endif // MODULES_VIDEO_CODING_CODECS_VP8_INCLUDE_VP8_COMMON_TYPES_H_
diff --git a/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc b/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc
index c345027..522c989 100644
--- a/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc
+++ b/modules/video_coding/codecs/vp8/libvpx_vp8_encoder.cc
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
@@ -14,8 +14,7 @@
#include "common_video/libyuv/include/webrtc_libyuv.h"
#include "modules/video_coding/codecs/vp8/libvpx_vp8_encoder.h"
-#include "modules/video_coding/utility/simulcast_rate_allocator.h"
-#include "modules/video_coding/utility/simulcast_utility.h"
+#include "modules/video_coding/codecs/vp8/simulcast_rate_allocator.h"
#include "rtc_base/checks.h"
#include "rtc_base/ptr_util.h"
#include "rtc_base/timeutils.h"
@@ -48,7 +47,7 @@
};
// Greatest common divisior
-static int GCD(int a, int b) {
+int GCD(int a, int b) {
int c = a % b;
while (c != 0) {
a = b;
@@ -58,6 +57,53 @@
return b;
}
+uint32_t SumStreamMaxBitrate(int streams, const VideoCodec& codec) {
+ uint32_t bitrate_sum = 0;
+ for (int i = 0; i < streams; ++i) {
+ bitrate_sum += codec.simulcastStream[i].maxBitrate;
+ }
+ return bitrate_sum;
+}
+
+int NumberOfStreams(const VideoCodec& codec) {
+ int streams =
+ codec.numberOfSimulcastStreams < 1 ? 1 : codec.numberOfSimulcastStreams;
+ uint32_t simulcast_max_bitrate = SumStreamMaxBitrate(streams, codec);
+ if (simulcast_max_bitrate == 0) {
+ streams = 1;
+ }
+ return streams;
+}
+
+bool ValidSimulcastResolutions(const VideoCodec& codec, int num_streams) {
+ if (codec.width != codec.simulcastStream[num_streams - 1].width ||
+ codec.height != codec.simulcastStream[num_streams - 1].height) {
+ return false;
+ }
+ for (int i = 0; i < num_streams; ++i) {
+ if (codec.width * codec.simulcastStream[i].height !=
+ codec.height * codec.simulcastStream[i].width) {
+ return false;
+ }
+ }
+ for (int i = 1; i < num_streams; ++i) {
+ if (codec.simulcastStream[i].width !=
+ codec.simulcastStream[i - 1].width * 2) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool ValidSimulcastTemporalLayers(const VideoCodec& codec, int num_streams) {
+ for (int i = 0; i < num_streams - 1; ++i) {
+ if (codec.simulcastStream[i].numberOfTemporalLayers !=
+ codec.simulcastStream[i + 1].numberOfTemporalLayers)
+ return false;
+ }
+ return true;
+}
+
bool GetGfBoostPercentageFromFieldTrialGroup(int* boost_percentage) {
std::string group = webrtc::field_trial::FindFullName(kVp8GfBoostFieldTrial);
if (group.empty())
@@ -323,13 +369,12 @@
return retVal;
}
- int number_of_streams = SimulcastUtility::NumberOfSimulcastStreams(*inst);
+ int number_of_streams = NumberOfStreams(*inst);
bool doing_simulcast = (number_of_streams > 1);
- if (doing_simulcast && (!SimulcastUtility::ValidSimulcastResolutions(
- *inst, number_of_streams) ||
- !SimulcastUtility::ValidSimulcastTemporalLayers(
- *inst, number_of_streams))) {
+ if (doing_simulcast &&
+ (!ValidSimulcastResolutions(*inst, number_of_streams) ||
+ !ValidSimulcastTemporalLayers(*inst, number_of_streams))) {
return WEBRTC_VIDEO_CODEC_ERR_SIMULCAST_PARAMETERS_NOT_SUPPORTED;
}
diff --git a/modules/video_coding/codecs/vp8/libvpx_vp8_simulcast_test.cc b/modules/video_coding/codecs/vp8/libvpx_vp8_simulcast_test.cc
index d8c0dbb..9ecb9cf 100644
--- a/modules/video_coding/codecs/vp8/libvpx_vp8_simulcast_test.cc
+++ b/modules/video_coding/codecs/vp8/libvpx_vp8_simulcast_test.cc
@@ -30,8 +30,7 @@
rtc::MakeUnique<FunctionVideoDecoderFactory>(
[]() { return VP8Decoder::Create(); });
return CreateSimulcastTestFixture(std::move(encoder_factory),
- std::move(decoder_factory),
- SdpVideoFormat("VP8"));
+ std::move(decoder_factory));
}
} // namespace
diff --git a/modules/video_coding/codecs/vp8/screenshare_layers.cc b/modules/video_coding/codecs/vp8/screenshare_layers.cc
index cd24490..f7f1019 100644
--- a/modules/video_coding/codecs/vp8/screenshare_layers.cc
+++ b/modules/video_coding/codecs/vp8/screenshare_layers.cc
@@ -37,8 +37,7 @@
// been exceeded. This prevents needless keyframe requests.
const int ScreenshareLayers::kMaxFrameIntervalMs = 2750;
-ScreenshareLayers::ScreenshareLayers(int num_temporal_layers,
- Clock* clock)
+ScreenshareLayers::ScreenshareLayers(int num_temporal_layers, Clock* clock)
: clock_(clock),
number_of_temporal_layers_(
std::min(kMaxNumTemporalLayers, num_temporal_layers)),
diff --git a/modules/video_coding/codecs/vp8/screenshare_layers.h b/modules/video_coding/codecs/vp8/screenshare_layers.h
index c1b5fa7..5185b45 100644
--- a/modules/video_coding/codecs/vp8/screenshare_layers.h
+++ b/modules/video_coding/codecs/vp8/screenshare_layers.h
@@ -28,8 +28,7 @@
static const double kAcceptableTargetOvershoot;
static const int kMaxFrameIntervalMs;
- ScreenshareLayers(int num_temporal_layers,
- Clock* clock);
+ ScreenshareLayers(int num_temporal_layers, Clock* clock);
virtual ~ScreenshareLayers();
// Returns the recommended VP8 encode flags needed. May refresh the decoder
diff --git a/modules/video_coding/codecs/vp8/simulcast_rate_allocator.cc b/modules/video_coding/codecs/vp8/simulcast_rate_allocator.cc
new file mode 100644
index 0000000..f8cfe88
--- /dev/null
+++ b/modules/video_coding/codecs/vp8/simulcast_rate_allocator.cc
@@ -0,0 +1,243 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/vp8/simulcast_rate_allocator.h"
+
+#include <algorithm>
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "modules/video_coding/codecs/vp8/include/vp8_common_types.h"
+#include "rtc_base/checks.h"
+
+namespace webrtc {
+
+SimulcastRateAllocator::SimulcastRateAllocator(const VideoCodec& codec)
+ : codec_(codec) {}
+
+VideoBitrateAllocation SimulcastRateAllocator::GetAllocation(
+ uint32_t total_bitrate_bps,
+ uint32_t framerate) {
+ VideoBitrateAllocation allocated_bitrates_bps;
+ DistributeAllocationToSimulcastLayers(total_bitrate_bps,
+ &allocated_bitrates_bps);
+ DistributeAllocationToTemporalLayers(framerate, &allocated_bitrates_bps);
+ return allocated_bitrates_bps;
+}
+
+void SimulcastRateAllocator::DistributeAllocationToSimulcastLayers(
+ uint32_t total_bitrate_bps,
+ VideoBitrateAllocation* allocated_bitrates_bps) const {
+ uint32_t left_to_allocate = total_bitrate_bps;
+ if (codec_.maxBitrate && codec_.maxBitrate * 1000 < left_to_allocate)
+ left_to_allocate = codec_.maxBitrate * 1000;
+
+ if (codec_.numberOfSimulcastStreams == 0) {
+ // No simulcast, just set the target as this has been capped already.
+ if (codec_.active) {
+ allocated_bitrates_bps->SetBitrate(
+ 0, 0, std::max(codec_.minBitrate * 1000, left_to_allocate));
+ }
+ return;
+ }
+ // Find the first active layer. We don't allocate to inactive layers.
+ size_t active_layer = 0;
+ for (; active_layer < codec_.numberOfSimulcastStreams; ++active_layer) {
+ if (codec_.simulcastStream[active_layer].active) {
+ // Found the first active layer.
+ break;
+ }
+ }
+ // All streams could be inactive, and nothing more to do.
+ if (active_layer == codec_.numberOfSimulcastStreams) {
+ return;
+ }
+
+ // Always allocate enough bitrate for the minimum bitrate of the first
+ // active layer. Suspending below min bitrate is controlled outside the
+ // codec implementation and is not overridden by this.
+ left_to_allocate = std::max(
+ codec_.simulcastStream[active_layer].minBitrate * 1000, left_to_allocate);
+
+ // Begin by allocating bitrate to simulcast streams, putting all bitrate in
+ // temporal layer 0. We'll then distribute this bitrate, across potential
+ // temporal layers, when stream allocation is done.
+
+ size_t top_active_layer = active_layer;
+ // Allocate up to the target bitrate for each active simulcast layer.
+ for (; active_layer < codec_.numberOfSimulcastStreams; ++active_layer) {
+ const SimulcastStream& stream = codec_.simulcastStream[active_layer];
+ if (!stream.active) {
+ continue;
+ }
+ // If we can't allocate to the current layer we can't allocate to higher
+ // layers because they require a higher minimum bitrate.
+ if (left_to_allocate < stream.minBitrate * 1000) {
+ break;
+ }
+ // We are allocating to this layer so it is the current active allocation.
+ top_active_layer = active_layer;
+ uint32_t allocation =
+ std::min(left_to_allocate, stream.targetBitrate * 1000);
+ allocated_bitrates_bps->SetBitrate(active_layer, 0, allocation);
+ RTC_DCHECK_LE(allocation, left_to_allocate);
+ left_to_allocate -= allocation;
+ }
+
+ // Next, try allocate remaining bitrate, up to max bitrate, in top active
+ // stream.
+ // TODO(sprang): Allocate up to max bitrate for all layers once we have a
+ // better idea of possible performance implications.
+ if (left_to_allocate > 0) {
+ const SimulcastStream& stream = codec_.simulcastStream[top_active_layer];
+ uint32_t bitrate_bps =
+ allocated_bitrates_bps->GetSpatialLayerSum(top_active_layer);
+ uint32_t allocation =
+ std::min(left_to_allocate, stream.maxBitrate * 1000 - bitrate_bps);
+ bitrate_bps += allocation;
+ RTC_DCHECK_LE(allocation, left_to_allocate);
+ left_to_allocate -= allocation;
+ allocated_bitrates_bps->SetBitrate(top_active_layer, 0, bitrate_bps);
+ }
+}
+
+void SimulcastRateAllocator::DistributeAllocationToTemporalLayers(
+ uint32_t framerate,
+ VideoBitrateAllocation* allocated_bitrates_bps) const {
+ const int num_spatial_streams =
+ std::max(1, static_cast<int>(codec_.numberOfSimulcastStreams));
+
+ // Finally, distribute the bitrate for the simulcast streams across the
+ // available temporal layers.
+ for (int simulcast_id = 0; simulcast_id < num_spatial_streams;
+ ++simulcast_id) {
+ uint32_t target_bitrate_kbps =
+ allocated_bitrates_bps->GetBitrate(simulcast_id, 0) / 1000;
+ if (target_bitrate_kbps == 0) {
+ continue;
+ }
+
+ const uint32_t expected_allocated_bitrate_kbps = target_bitrate_kbps;
+ RTC_DCHECK_EQ(
+ target_bitrate_kbps,
+ allocated_bitrates_bps->GetSpatialLayerSum(simulcast_id) / 1000);
+ const int num_temporal_streams = NumTemporalStreams(simulcast_id);
+ uint32_t max_bitrate_kbps;
+ // Legacy temporal-layered only screenshare, or simulcast screenshare
+ // with legacy mode for simulcast stream 0.
+ const bool conference_screenshare_mode =
+ codec_.mode == VideoCodecMode::kScreensharing &&
+ codec_.targetBitrate > 0 &&
+ ((num_spatial_streams == 1 && num_temporal_streams == 2) || // Legacy.
+ (num_spatial_streams > 1 && simulcast_id == 0)); // Simulcast.
+ if (conference_screenshare_mode) {
+ // TODO(holmer): This is a "temporary" hack for screensharing, where we
+ // interpret the startBitrate as the encoder target bitrate. This is
+ // to allow for a different max bitrate, so if the codec can't meet
+ // the target we still allow it to overshoot up to the max before dropping
+ // frames. This hack should be improved.
+ int tl0_bitrate = std::min(codec_.targetBitrate, target_bitrate_kbps);
+ max_bitrate_kbps = std::min(codec_.maxBitrate, target_bitrate_kbps);
+ target_bitrate_kbps = tl0_bitrate;
+ } else if (num_spatial_streams == 1) {
+ max_bitrate_kbps = codec_.maxBitrate;
+ } else {
+ max_bitrate_kbps = codec_.simulcastStream[simulcast_id].maxBitrate;
+ }
+
+ std::vector<uint32_t> tl_allocation;
+ if (num_temporal_streams == 1) {
+ tl_allocation.push_back(target_bitrate_kbps);
+ } else {
+ if (conference_screenshare_mode) {
+ tl_allocation = ScreenshareTemporalLayerAllocation(
+ target_bitrate_kbps, max_bitrate_kbps, framerate, simulcast_id);
+ } else {
+ tl_allocation = DefaultTemporalLayerAllocation(
+ target_bitrate_kbps, max_bitrate_kbps, framerate, simulcast_id);
+ }
+ }
+ RTC_DCHECK_GT(tl_allocation.size(), 0);
+ RTC_DCHECK_LE(tl_allocation.size(), num_temporal_streams);
+
+ uint64_t tl_allocation_sum_kbps = 0;
+ for (size_t tl_index = 0; tl_index < tl_allocation.size(); ++tl_index) {
+ uint32_t layer_rate_kbps = tl_allocation[tl_index];
+ if (layer_rate_kbps > 0) {
+ allocated_bitrates_bps->SetBitrate(simulcast_id, tl_index,
+ layer_rate_kbps * 1000);
+ }
+ tl_allocation_sum_kbps += layer_rate_kbps;
+ }
+ RTC_DCHECK_LE(tl_allocation_sum_kbps, expected_allocated_bitrate_kbps);
+ }
+}
+
+std::vector<uint32_t> SimulcastRateAllocator::DefaultTemporalLayerAllocation(
+ int bitrate_kbps,
+ int max_bitrate_kbps,
+ int framerate,
+ int simulcast_id) const {
+ const size_t num_temporal_layers = NumTemporalStreams(simulcast_id);
+ std::vector<uint32_t> bitrates;
+ for (size_t i = 0; i < num_temporal_layers; ++i) {
+ float layer_bitrate =
+ bitrate_kbps * kVp8LayerRateAlloction[num_temporal_layers - 1][i];
+ bitrates.push_back(static_cast<uint32_t>(layer_bitrate + 0.5));
+ }
+
+ // Allocation table is of aggregates, transform to individual rates.
+ uint32_t sum = 0;
+ for (size_t i = 0; i < num_temporal_layers; ++i) {
+ uint32_t layer_bitrate = bitrates[i];
+ RTC_DCHECK_LE(sum, bitrates[i]);
+ bitrates[i] -= sum;
+ sum = layer_bitrate;
+
+ if (sum >= static_cast<uint32_t>(bitrate_kbps)) {
+ // Sum adds up; any subsequent layers will be 0.
+ bitrates.resize(i + 1);
+ break;
+ }
+ }
+
+ return bitrates;
+}
+
+std::vector<uint32_t>
+SimulcastRateAllocator::ScreenshareTemporalLayerAllocation(
+ int bitrate_kbps,
+ int max_bitrate_kbps,
+ int framerate,
+ int simulcast_id) const {
+ if (simulcast_id > 0) {
+ return DefaultTemporalLayerAllocation(bitrate_kbps, max_bitrate_kbps,
+ framerate, simulcast_id);
+ }
+ std::vector<uint32_t> allocation;
+ allocation.push_back(bitrate_kbps);
+ if (max_bitrate_kbps > bitrate_kbps)
+ allocation.push_back(max_bitrate_kbps - bitrate_kbps);
+ return allocation;
+}
+
+const VideoCodec& webrtc::SimulcastRateAllocator::GetCodec() const {
+ return codec_;
+}
+
+int SimulcastRateAllocator::NumTemporalStreams(size_t simulcast_id) const {
+ return std::max<uint8_t>(
+ 1, codec_.numberOfSimulcastStreams == 0
+ ? codec_.VP8().numberOfTemporalLayers
+ : codec_.simulcastStream[simulcast_id].numberOfTemporalLayers);
+}
+
+} // namespace webrtc
diff --git a/modules/video_coding/codecs/vp8/simulcast_rate_allocator.h b/modules/video_coding/codecs/vp8/simulcast_rate_allocator.h
new file mode 100644
index 0000000..b958781
--- /dev/null
+++ b/modules/video_coding/codecs/vp8/simulcast_rate_allocator.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_VP8_SIMULCAST_RATE_ALLOCATOR_H_
+#define MODULES_VIDEO_CODING_CODECS_VP8_SIMULCAST_RATE_ALLOCATOR_H_
+
+#include <stdint.h>
+
+#include <map>
+#include <memory>
+#include <vector>
+
+#include "api/video_codecs/video_encoder.h"
+#include "common_types.h" // NOLINT(build/include)
+#include "common_video/include/video_bitrate_allocator.h"
+#include "modules/video_coding/codecs/vp8/temporal_layers.h"
+#include "rtc_base/constructormagic.h"
+
+namespace webrtc {
+
+class SimulcastRateAllocator : public VideoBitrateAllocator {
+ public:
+ explicit SimulcastRateAllocator(const VideoCodec& codec);
+
+ VideoBitrateAllocation GetAllocation(uint32_t total_bitrate_bps,
+ uint32_t framerate) override;
+ const VideoCodec& GetCodec() const;
+
+ private:
+ void DistributeAllocationToSimulcastLayers(
+ uint32_t total_bitrate_bps,
+ VideoBitrateAllocation* allocated_bitrates_bps) const;
+ void DistributeAllocationToTemporalLayers(
+ uint32_t framerate,
+ VideoBitrateAllocation* allocated_bitrates_bps) const;
+ std::vector<uint32_t> DefaultTemporalLayerAllocation(int bitrate_kbps,
+ int max_bitrate_kbps,
+ int framerate,
+ int simulcast_id) const;
+ std::vector<uint32_t> ScreenshareTemporalLayerAllocation(
+ int bitrate_kbps,
+ int max_bitrate_kbps,
+ int framerate,
+ int simulcast_id) const;
+ int NumTemporalStreams(size_t simulcast_id) const;
+
+ const VideoCodec codec_;
+
+ RTC_DISALLOW_COPY_AND_ASSIGN(SimulcastRateAllocator);
+};
+
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_VP8_SIMULCAST_RATE_ALLOCATOR_H_
diff --git a/modules/video_coding/codecs/vp8/simulcast_test_fixture_impl.cc b/modules/video_coding/codecs/vp8/simulcast_test_fixture_impl.cc
new file mode 100644
index 0000000..5a4712f
--- /dev/null
+++ b/modules/video_coding/codecs/vp8/simulcast_test_fixture_impl.cc
@@ -0,0 +1,806 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/video_coding/codecs/vp8/simulcast_test_fixture_impl.h"
+
+#include <algorithm>
+#include <map>
+#include <memory>
+#include <vector>
+
+#include "api/video_codecs/sdp_video_format.h"
+#include "common_video/include/video_frame.h"
+#include "common_video/libyuv/include/webrtc_libyuv.h"
+#include "modules/video_coding/codecs/vp8/include/vp8.h"
+#include "modules/video_coding/codecs/vp8/temporal_layers.h"
+#include "modules/video_coding/include/video_coding_defines.h"
+#include "rtc_base/checks.h"
+#include "test/gtest.h"
+
+using ::testing::_;
+using ::testing::AllOf;
+using ::testing::Field;
+using ::testing::Return;
+
+namespace webrtc {
+namespace test {
+
+namespace {
+
+const int kDefaultWidth = 1280;
+const int kDefaultHeight = 720;
+const int kNumberOfSimulcastStreams = 3;
+const int kColorY = 66;
+const int kColorU = 22;
+const int kColorV = 33;
+const int kMaxBitrates[kNumberOfSimulcastStreams] = {150, 600, 1200};
+const int kMinBitrates[kNumberOfSimulcastStreams] = {50, 150, 600};
+const int kTargetBitrates[kNumberOfSimulcastStreams] = {100, 450, 1000};
+const int kDefaultTemporalLayerProfile[3] = {3, 3, 3};
+
+template <typename T>
+void SetExpectedValues3(T value0, T value1, T value2, T* expected_values) {
+ expected_values[0] = value0;
+ expected_values[1] = value1;
+ expected_values[2] = value2;
+}
+
+enum PlaneType {
+ kYPlane = 0,
+ kUPlane = 1,
+ kVPlane = 2,
+ kNumOfPlanes = 3,
+};
+
+} // namespace
+
+class SimulcastTestFixtureImpl::Vp8TestEncodedImageCallback
+ : public EncodedImageCallback {
+ public:
+ Vp8TestEncodedImageCallback() : picture_id_(-1) {
+ memset(temporal_layer_, -1, sizeof(temporal_layer_));
+ memset(layer_sync_, false, sizeof(layer_sync_));
+ }
+
+ ~Vp8TestEncodedImageCallback() {
+ delete[] encoded_key_frame_._buffer;
+ delete[] encoded_frame_._buffer;
+ }
+
+ virtual Result OnEncodedImage(const EncodedImage& encoded_image,
+ const CodecSpecificInfo* codec_specific_info,
+ const RTPFragmentationHeader* fragmentation) {
+ // Only store the base layer.
+ if (codec_specific_info->codecSpecific.VP8.simulcastIdx == 0) {
+ if (encoded_image._frameType == kVideoFrameKey) {
+ delete[] encoded_key_frame_._buffer;
+ encoded_key_frame_._buffer = new uint8_t[encoded_image._size];
+ encoded_key_frame_._size = encoded_image._size;
+ encoded_key_frame_._length = encoded_image._length;
+ encoded_key_frame_._frameType = kVideoFrameKey;
+ encoded_key_frame_._completeFrame = encoded_image._completeFrame;
+ memcpy(encoded_key_frame_._buffer, encoded_image._buffer,
+ encoded_image._length);
+ } else {
+ delete[] encoded_frame_._buffer;
+ encoded_frame_._buffer = new uint8_t[encoded_image._size];
+ encoded_frame_._size = encoded_image._size;
+ encoded_frame_._length = encoded_image._length;
+ memcpy(encoded_frame_._buffer, encoded_image._buffer,
+ encoded_image._length);
+ }
+ }
+ layer_sync_[codec_specific_info->codecSpecific.VP8.simulcastIdx] =
+ codec_specific_info->codecSpecific.VP8.layerSync;
+ temporal_layer_[codec_specific_info->codecSpecific.VP8.simulcastIdx] =
+ codec_specific_info->codecSpecific.VP8.temporalIdx;
+ return Result(Result::OK, encoded_image._timeStamp);
+ }
+ void GetLastEncodedFrameInfo(int* picture_id,
+ int* temporal_layer,
+ bool* layer_sync,
+ int stream) {
+ *picture_id = picture_id_;
+ *temporal_layer = temporal_layer_[stream];
+ *layer_sync = layer_sync_[stream];
+ }
+ void GetLastEncodedKeyFrame(EncodedImage* encoded_key_frame) {
+ *encoded_key_frame = encoded_key_frame_;
+ }
+ void GetLastEncodedFrame(EncodedImage* encoded_frame) {
+ *encoded_frame = encoded_frame_;
+ }
+
+ private:
+ EncodedImage encoded_key_frame_;
+ EncodedImage encoded_frame_;
+ int picture_id_;
+ int temporal_layer_[kNumberOfSimulcastStreams];
+ bool layer_sync_[kNumberOfSimulcastStreams];
+};
+
+class SimulcastTestFixtureImpl::Vp8TestDecodedImageCallback
+ : public DecodedImageCallback {
+ public:
+ Vp8TestDecodedImageCallback() : decoded_frames_(0) {}
+ int32_t Decoded(VideoFrame& decoded_image) override {
+ rtc::scoped_refptr<I420BufferInterface> i420_buffer =
+ decoded_image.video_frame_buffer()->ToI420();
+ for (int i = 0; i < decoded_image.width(); ++i) {
+ EXPECT_NEAR(kColorY, i420_buffer->DataY()[i], 1);
+ }
+
+ // TODO(mikhal): Verify the difference between U,V and the original.
+ for (int i = 0; i < i420_buffer->ChromaWidth(); ++i) {
+ EXPECT_NEAR(kColorU, i420_buffer->DataU()[i], 4);
+ EXPECT_NEAR(kColorV, i420_buffer->DataV()[i], 4);
+ }
+ decoded_frames_++;
+ return 0;
+ }
+ int32_t Decoded(VideoFrame& decoded_image, int64_t decode_time_ms) override {
+ RTC_NOTREACHED();
+ return -1;
+ }
+ void Decoded(VideoFrame& decoded_image,
+ absl::optional<int32_t> decode_time_ms,
+ absl::optional<uint8_t> qp) override {
+ Decoded(decoded_image);
+ }
+ int DecodedFrames() { return decoded_frames_; }
+
+ private:
+ int decoded_frames_;
+};
+
+namespace {
+
+void SetPlane(uint8_t* data, uint8_t value, int width, int height, int stride) {
+ for (int i = 0; i < height; i++, data += stride) {
+ // Setting allocated area to zero - setting only image size to
+ // requested values - will make it easier to distinguish between image
+ // size and frame size (accounting for stride).
+ memset(data, value, width);
+ memset(data + width, 0, stride - width);
+ }
+}
+
+// Fills in an I420Buffer from |plane_colors|.
+void CreateImage(const rtc::scoped_refptr<I420Buffer>& buffer,
+ int plane_colors[kNumOfPlanes]) {
+ SetPlane(buffer->MutableDataY(), plane_colors[0], buffer->width(),
+ buffer->height(), buffer->StrideY());
+
+ SetPlane(buffer->MutableDataU(), plane_colors[1], buffer->ChromaWidth(),
+ buffer->ChromaHeight(), buffer->StrideU());
+
+ SetPlane(buffer->MutableDataV(), plane_colors[2], buffer->ChromaWidth(),
+ buffer->ChromaHeight(), buffer->StrideV());
+}
+
+void ConfigureStream(int width,
+ int height,
+ int max_bitrate,
+ int min_bitrate,
+ int target_bitrate,
+ SimulcastStream* stream,
+ int num_temporal_layers) {
+ assert(stream);
+ stream->width = width;
+ stream->height = height;
+ stream->maxBitrate = max_bitrate;
+ stream->minBitrate = min_bitrate;
+ stream->targetBitrate = target_bitrate;
+ stream->numberOfTemporalLayers = num_temporal_layers;
+ stream->qpMax = 45;
+ stream->active = true;
+}
+
+} // namespace
+
+void SimulcastTestFixtureImpl::DefaultSettings(
+ VideoCodec* settings,
+ const int* temporal_layer_profile) {
+ RTC_CHECK(settings);
+ memset(settings, 0, sizeof(VideoCodec));
+ settings->codecType = kVideoCodecVP8;
+ // 96 to 127 dynamic payload types for video codecs
+ settings->plType = 120;
+ settings->startBitrate = 300;
+ settings->minBitrate = 30;
+ settings->maxBitrate = 0;
+ settings->maxFramerate = 30;
+ settings->width = kDefaultWidth;
+ settings->height = kDefaultHeight;
+ settings->numberOfSimulcastStreams = kNumberOfSimulcastStreams;
+ settings->active = true;
+ ASSERT_EQ(3, kNumberOfSimulcastStreams);
+ settings->timing_frame_thresholds = {kDefaultTimingFramesDelayMs,
+ kDefaultOutlierFrameSizePercent};
+ ConfigureStream(kDefaultWidth / 4, kDefaultHeight / 4, kMaxBitrates[0],
+ kMinBitrates[0], kTargetBitrates[0],
+ &settings->simulcastStream[0], temporal_layer_profile[0]);
+ ConfigureStream(kDefaultWidth / 2, kDefaultHeight / 2, kMaxBitrates[1],
+ kMinBitrates[1], kTargetBitrates[1],
+ &settings->simulcastStream[1], temporal_layer_profile[1]);
+ ConfigureStream(kDefaultWidth, kDefaultHeight, kMaxBitrates[2],
+ kMinBitrates[2], kTargetBitrates[2],
+ &settings->simulcastStream[2], temporal_layer_profile[2]);
+ settings->VP8()->denoisingOn = true;
+ settings->VP8()->automaticResizeOn = false;
+ settings->VP8()->frameDroppingOn = true;
+ settings->VP8()->keyFrameInterval = 3000;
+}
+
+SimulcastTestFixtureImpl::SimulcastTestFixtureImpl(
+ std::unique_ptr<VideoEncoderFactory> encoder_factory,
+ std::unique_ptr<VideoDecoderFactory> decoder_factory) {
+ encoder_ = encoder_factory->CreateVideoEncoder(SdpVideoFormat("VP8"));
+ decoder_ = decoder_factory->CreateVideoDecoder(SdpVideoFormat("VP8"));
+ SetUpCodec(kDefaultTemporalLayerProfile);
+}
+
+SimulcastTestFixtureImpl::~SimulcastTestFixtureImpl() {
+ encoder_->Release();
+ decoder_->Release();
+}
+
+void SimulcastTestFixtureImpl::SetUpCodec(const int* temporal_layer_profile) {
+ encoder_->RegisterEncodeCompleteCallback(&encoder_callback_);
+ decoder_->RegisterDecodeCompleteCallback(&decoder_callback_);
+ DefaultSettings(&settings_, temporal_layer_profile);
+ SetUpRateAllocator();
+ EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200));
+ EXPECT_EQ(0, decoder_->InitDecode(&settings_, 1));
+ input_buffer_ = I420Buffer::Create(kDefaultWidth, kDefaultHeight);
+ input_buffer_->InitializeData();
+ input_frame_.reset(new VideoFrame(input_buffer_, webrtc::kVideoRotation_0,
+ 0 /* timestamp_us */));
+}
+
+void SimulcastTestFixtureImpl::SetUpRateAllocator() {
+ rate_allocator_.reset(new SimulcastRateAllocator(settings_));
+}
+
+void SimulcastTestFixtureImpl::SetRates(uint32_t bitrate_kbps, uint32_t fps) {
+ encoder_->SetRateAllocation(
+ rate_allocator_->GetAllocation(bitrate_kbps * 1000, fps), fps);
+}
+
+void SimulcastTestFixtureImpl::RunActiveStreamsTest(
+ const std::vector<bool> active_streams) {
+ std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
+ kVideoFrameDelta);
+ UpdateActiveStreams(active_streams);
+ // Set sufficient bitrate for all streams so we can test active without
+ // bitrate being an issue.
+ SetRates(kMaxBitrates[0] + kMaxBitrates[1] + kMaxBitrates[2], 30);
+
+ ExpectStreams(kVideoFrameKey, active_streams);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
+
+ ExpectStreams(kVideoFrameDelta, active_streams);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
+}
+
+void SimulcastTestFixtureImpl::UpdateActiveStreams(
+ const std::vector<bool> active_streams) {
+ ASSERT_EQ(static_cast<int>(active_streams.size()), kNumberOfSimulcastStreams);
+ for (size_t i = 0; i < active_streams.size(); ++i) {
+ settings_.simulcastStream[i].active = active_streams[i];
+ }
+ // Re initialize the allocator and encoder with the new settings.
+ // TODO(bugs.webrtc.org/8807): Currently, we do a full "hard"
+ // reconfiguration of the allocator and encoder. When the video bitrate
+ // allocator has support for updating active streams without a
+ // reinitialization, we can just call that here instead.
+ SetUpRateAllocator();
+ EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200));
+}
+
+void SimulcastTestFixtureImpl::ExpectStreams(
+ FrameType frame_type,
+ const std::vector<bool> expected_streams_active) {
+ ASSERT_EQ(static_cast<int>(expected_streams_active.size()),
+ kNumberOfSimulcastStreams);
+ if (expected_streams_active[0]) {
+ EXPECT_CALL(
+ encoder_callback_,
+ OnEncodedImage(
+ AllOf(Field(&EncodedImage::_frameType, frame_type),
+ Field(&EncodedImage::_encodedWidth, kDefaultWidth / 4),
+ Field(&EncodedImage::_encodedHeight, kDefaultHeight / 4)),
+ _, _))
+ .Times(1)
+ .WillRepeatedly(Return(
+ EncodedImageCallback::Result(EncodedImageCallback::Result::OK, 0)));
+ }
+ if (expected_streams_active[1]) {
+ EXPECT_CALL(
+ encoder_callback_,
+ OnEncodedImage(
+ AllOf(Field(&EncodedImage::_frameType, frame_type),
+ Field(&EncodedImage::_encodedWidth, kDefaultWidth / 2),
+ Field(&EncodedImage::_encodedHeight, kDefaultHeight / 2)),
+ _, _))
+ .Times(1)
+ .WillRepeatedly(Return(
+ EncodedImageCallback::Result(EncodedImageCallback::Result::OK, 0)));
+ }
+ if (expected_streams_active[2]) {
+ EXPECT_CALL(encoder_callback_,
+ OnEncodedImage(
+ AllOf(Field(&EncodedImage::_frameType, frame_type),
+ Field(&EncodedImage::_encodedWidth, kDefaultWidth),
+ Field(&EncodedImage::_encodedHeight, kDefaultHeight)),
+ _, _))
+ .Times(1)
+ .WillRepeatedly(Return(
+ EncodedImageCallback::Result(EncodedImageCallback::Result::OK, 0)));
+ }
+}
+
+void SimulcastTestFixtureImpl::ExpectStreams(FrameType frame_type,
+ int expected_video_streams) {
+ ASSERT_GE(expected_video_streams, 0);
+ ASSERT_LE(expected_video_streams, kNumberOfSimulcastStreams);
+ std::vector<bool> expected_streams_active(kNumberOfSimulcastStreams, false);
+ for (int i = 0; i < expected_video_streams; ++i) {
+ expected_streams_active[i] = true;
+ }
+ ExpectStreams(frame_type, expected_streams_active);
+}
+
+void SimulcastTestFixtureImpl::VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ Vp8TestEncodedImageCallback* encoder_callback,
+ const int* expected_temporal_idx,
+ const bool* expected_layer_sync,
+ int num_spatial_layers) {
+ int picture_id = -1;
+ int temporal_layer = -1;
+ bool layer_sync = false;
+ for (int i = 0; i < num_spatial_layers; i++) {
+ encoder_callback->GetLastEncodedFrameInfo(&picture_id, &temporal_layer,
+ &layer_sync, i);
+ EXPECT_EQ(expected_temporal_idx[i], temporal_layer);
+ EXPECT_EQ(expected_layer_sync[i], layer_sync);
+ }
+}
+
+// We currently expect all active streams to generate a key frame even though
+// a key frame was only requested for some of them.
+void SimulcastTestFixtureImpl::TestKeyFrameRequestsOnAllStreams() {
+ SetRates(kMaxBitrates[2], 30); // To get all three streams.
+ std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
+ kVideoFrameDelta);
+ ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
+
+ ExpectStreams(kVideoFrameDelta, kNumberOfSimulcastStreams);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
+
+ frame_types[0] = kVideoFrameKey;
+ ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
+
+ std::fill(frame_types.begin(), frame_types.end(), kVideoFrameDelta);
+ frame_types[1] = kVideoFrameKey;
+ ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
+
+ std::fill(frame_types.begin(), frame_types.end(), kVideoFrameDelta);
+ frame_types[2] = kVideoFrameKey;
+ ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
+
+ std::fill(frame_types.begin(), frame_types.end(), kVideoFrameDelta);
+ ExpectStreams(kVideoFrameDelta, kNumberOfSimulcastStreams);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
+}
+
+void SimulcastTestFixtureImpl::TestPaddingAllStreams() {
+ // We should always encode the base layer.
+ SetRates(kMinBitrates[0] - 1, 30);
+ std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
+ kVideoFrameDelta);
+ ExpectStreams(kVideoFrameKey, 1);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
+
+ ExpectStreams(kVideoFrameDelta, 1);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
+}
+
+void SimulcastTestFixtureImpl::TestPaddingTwoStreams() {
+ // We have just enough to get only the first stream and padding for two.
+ SetRates(kMinBitrates[0], 30);
+ std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
+ kVideoFrameDelta);
+ ExpectStreams(kVideoFrameKey, 1);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
+
+ ExpectStreams(kVideoFrameDelta, 1);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
+}
+
+void SimulcastTestFixtureImpl::TestPaddingTwoStreamsOneMaxedOut() {
+ // We are just below limit of sending second stream, so we should get
+ // the first stream maxed out (at |maxBitrate|), and padding for two.
+ SetRates(kTargetBitrates[0] + kMinBitrates[1] - 1, 30);
+ std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
+ kVideoFrameDelta);
+ ExpectStreams(kVideoFrameKey, 1);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
+
+ ExpectStreams(kVideoFrameDelta, 1);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
+}
+
+void SimulcastTestFixtureImpl::TestPaddingOneStream() {
+ // We have just enough to send two streams, so padding for one stream.
+ SetRates(kTargetBitrates[0] + kMinBitrates[1], 30);
+ std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
+ kVideoFrameDelta);
+ ExpectStreams(kVideoFrameKey, 2);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
+
+ ExpectStreams(kVideoFrameDelta, 2);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
+}
+
+void SimulcastTestFixtureImpl::TestPaddingOneStreamTwoMaxedOut() {
+ // We are just below limit of sending third stream, so we should get
+ // first stream's rate maxed out at |targetBitrate|, second at |maxBitrate|.
+ SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] - 1, 30);
+ std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
+ kVideoFrameDelta);
+ ExpectStreams(kVideoFrameKey, 2);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
+
+ ExpectStreams(kVideoFrameDelta, 2);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
+}
+
+void SimulcastTestFixtureImpl::TestSendAllStreams() {
+ // We have just enough to send all streams.
+ SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2], 30);
+ std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
+ kVideoFrameDelta);
+ ExpectStreams(kVideoFrameKey, 3);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
+
+ ExpectStreams(kVideoFrameDelta, 3);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
+}
+
+void SimulcastTestFixtureImpl::TestDisablingStreams() {
+ // We should get three media streams.
+ SetRates(kMaxBitrates[0] + kMaxBitrates[1] + kMaxBitrates[2], 30);
+ std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
+ kVideoFrameDelta);
+ ExpectStreams(kVideoFrameKey, 3);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
+
+ ExpectStreams(kVideoFrameDelta, 3);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
+
+ // We should only get two streams and padding for one.
+ SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] / 2, 30);
+ ExpectStreams(kVideoFrameDelta, 2);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
+
+ // We should only get the first stream and padding for two.
+ SetRates(kTargetBitrates[0] + kMinBitrates[1] / 2, 30);
+ ExpectStreams(kVideoFrameDelta, 1);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
+
+ // We don't have enough bitrate for the thumbnail stream, but we should get
+ // it anyway with current configuration.
+ SetRates(kTargetBitrates[0] - 1, 30);
+ ExpectStreams(kVideoFrameDelta, 1);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
+
+ // We should only get two streams and padding for one.
+ SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] / 2, 30);
+ // We get a key frame because a new stream is being enabled.
+ ExpectStreams(kVideoFrameKey, 2);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
+
+ // We should get all three streams.
+ SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kTargetBitrates[2], 30);
+ // We get a key frame because a new stream is being enabled.
+ ExpectStreams(kVideoFrameKey, 3);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
+}
+
+void SimulcastTestFixtureImpl::TestActiveStreams() {
+ // All streams on.
+ RunActiveStreamsTest({true, true, true});
+ // All streams off.
+ RunActiveStreamsTest({false, false, false});
+ // Low stream off.
+ RunActiveStreamsTest({false, true, true});
+ // Middle stream off.
+ RunActiveStreamsTest({true, false, true});
+ // High stream off.
+ RunActiveStreamsTest({true, true, false});
+ // Only low stream turned on.
+ RunActiveStreamsTest({true, false, false});
+ // Only middle stream turned on.
+ RunActiveStreamsTest({false, true, false});
+ // Only high stream turned on.
+ RunActiveStreamsTest({false, false, true});
+}
+
+void SimulcastTestFixtureImpl::SwitchingToOneStream(int width, int height) {
+ // Disable all streams except the last and set the bitrate of the last to
+ // 100 kbps. This verifies the way GTP switches to screenshare mode.
+ settings_.VP8()->numberOfTemporalLayers = 1;
+ settings_.maxBitrate = 100;
+ settings_.startBitrate = 100;
+ settings_.width = width;
+ settings_.height = height;
+ for (int i = 0; i < settings_.numberOfSimulcastStreams - 1; ++i) {
+ settings_.simulcastStream[i].maxBitrate = 0;
+ settings_.simulcastStream[i].width = settings_.width;
+ settings_.simulcastStream[i].height = settings_.height;
+ settings_.simulcastStream[i].numberOfTemporalLayers = 1;
+ }
+ // Setting input image to new resolution.
+ input_buffer_ = I420Buffer::Create(settings_.width, settings_.height);
+ input_buffer_->InitializeData();
+
+ input_frame_.reset(new VideoFrame(input_buffer_, webrtc::kVideoRotation_0,
+ 0 /* timestamp_us */));
+
+ // The for loop above did not set the bitrate of the highest layer.
+ settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].maxBitrate =
+ 0;
+ // The highest layer has to correspond to the non-simulcast resolution.
+ settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].width =
+ settings_.width;
+ settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].height =
+ settings_.height;
+ SetUpRateAllocator();
+ EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200));
+
+ // Encode one frame and verify.
+ SetRates(kMaxBitrates[0] + kMaxBitrates[1], 30);
+ std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
+ kVideoFrameDelta);
+ EXPECT_CALL(
+ encoder_callback_,
+ OnEncodedImage(AllOf(Field(&EncodedImage::_frameType, kVideoFrameKey),
+ Field(&EncodedImage::_encodedWidth, width),
+ Field(&EncodedImage::_encodedHeight, height)),
+ _, _))
+ .Times(1)
+ .WillRepeatedly(Return(
+ EncodedImageCallback::Result(EncodedImageCallback::Result::OK, 0)));
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
+
+ // Switch back.
+ DefaultSettings(&settings_, kDefaultTemporalLayerProfile);
+ // Start at the lowest bitrate for enabling base stream.
+ settings_.startBitrate = kMinBitrates[0];
+ SetUpRateAllocator();
+ EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200));
+ SetRates(settings_.startBitrate, 30);
+ ExpectStreams(kVideoFrameKey, 1);
+ // Resize |input_frame_| to the new resolution.
+ input_buffer_ = I420Buffer::Create(settings_.width, settings_.height);
+ input_buffer_->InitializeData();
+ input_frame_.reset(new VideoFrame(input_buffer_, webrtc::kVideoRotation_0,
+ 0 /* timestamp_us */));
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
+}
+
+void SimulcastTestFixtureImpl::TestSwitchingToOneStream() {
+ SwitchingToOneStream(1024, 768);
+}
+
+void SimulcastTestFixtureImpl::TestSwitchingToOneOddStream() {
+ SwitchingToOneStream(1023, 769);
+}
+
+void SimulcastTestFixtureImpl::TestSwitchingToOneSmallStream() {
+ SwitchingToOneStream(4, 4);
+}
+
+// Test the layer pattern and sync flag for various spatial-temporal patterns.
+// 3-3-3 pattern: 3 temporal layers for all spatial streams, so same
+// temporal_layer id and layer_sync is expected for all streams.
+void SimulcastTestFixtureImpl::TestSpatioTemporalLayers333PatternEncoder() {
+ Vp8TestEncodedImageCallback encoder_callback;
+ encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
+ SetRates(kMaxBitrates[2], 30); // To get all three streams.
+
+ int expected_temporal_idx[3] = {-1, -1, -1};
+ bool expected_layer_sync[3] = {false, false, false};
+
+ // First frame: #0.
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
+ SetExpectedValues3<int>(0, 0, 0, expected_temporal_idx);
+ SetExpectedValues3<bool>(true, true, true, expected_layer_sync);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
+
+ // Next frame: #1.
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
+ SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
+ SetExpectedValues3<bool>(true, true, true, expected_layer_sync);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
+
+ // Next frame: #2.
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
+ SetExpectedValues3<int>(1, 1, 1, expected_temporal_idx);
+ SetExpectedValues3<bool>(true, true, true, expected_layer_sync);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
+
+ // Next frame: #3.
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
+ SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
+ SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
+
+ // Next frame: #4.
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
+ SetExpectedValues3<int>(0, 0, 0, expected_temporal_idx);
+ SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
+
+ // Next frame: #5.
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
+ SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
+ SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
+}
+
+// Test the layer pattern and sync flag for various spatial-temporal patterns.
+// 3-2-1 pattern: 3 temporal layers for lowest resolution, 2 for middle, and
+// 1 temporal layer for highest resolution.
+// For this profile, we expect the temporal index pattern to be:
+// 1st stream: 0, 2, 1, 2, ....
+// 2nd stream: 0, 1, 0, 1, ...
+// 3rd stream: -1, -1, -1, -1, ....
+// Regarding the 3rd stream, note that a stream/encoder with 1 temporal layer
+// should always have temporal layer idx set to kNoTemporalIdx = -1.
+// Since CodecSpecificInfoVP8.temporalIdx is uint8_t, this will wrap to 255.
+// TODO(marpan): Although this seems safe for now, we should fix this.
+void SimulcastTestFixtureImpl::TestSpatioTemporalLayers321PatternEncoder() {
+ int temporal_layer_profile[3] = {3, 2, 1};
+ SetUpCodec(temporal_layer_profile);
+ Vp8TestEncodedImageCallback encoder_callback;
+ encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
+ SetRates(kMaxBitrates[2], 30); // To get all three streams.
+
+ int expected_temporal_idx[3] = {-1, -1, -1};
+ bool expected_layer_sync[3] = {false, false, false};
+
+ // First frame: #0.
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
+ SetExpectedValues3<int>(0, 0, 255, expected_temporal_idx);
+ SetExpectedValues3<bool>(true, true, false, expected_layer_sync);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
+
+ // Next frame: #1.
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
+ SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
+ SetExpectedValues3<bool>(true, true, false, expected_layer_sync);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
+
+ // Next frame: #2.
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
+ SetExpectedValues3<int>(1, 0, 255, expected_temporal_idx);
+ SetExpectedValues3<bool>(true, false, false, expected_layer_sync);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
+
+ // Next frame: #3.
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
+ SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
+ SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
+
+ // Next frame: #4.
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
+ SetExpectedValues3<int>(0, 0, 255, expected_temporal_idx);
+ SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
+
+ // Next frame: #5.
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
+ SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
+ SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
+ VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
+}
+
+void SimulcastTestFixtureImpl::TestStrideEncodeDecode() {
+ Vp8TestEncodedImageCallback encoder_callback;
+ Vp8TestDecodedImageCallback decoder_callback;
+ encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
+ decoder_->RegisterDecodeCompleteCallback(&decoder_callback);
+
+ SetRates(kMaxBitrates[2], 30); // To get all three streams.
+ // Setting two (possibly) problematic use cases for stride:
+ // 1. stride > width 2. stride_y != stride_uv/2
+ int stride_y = kDefaultWidth + 20;
+ int stride_uv = ((kDefaultWidth + 1) / 2) + 5;
+ input_buffer_ = I420Buffer::Create(kDefaultWidth, kDefaultHeight, stride_y,
+ stride_uv, stride_uv);
+ input_frame_.reset(new VideoFrame(input_buffer_, webrtc::kVideoRotation_0,
+ 0 /* timestamp_us */));
+
+ // Set color.
+ int plane_offset[kNumOfPlanes];
+ plane_offset[kYPlane] = kColorY;
+ plane_offset[kUPlane] = kColorU;
+ plane_offset[kVPlane] = kColorV;
+ CreateImage(input_buffer_, plane_offset);
+
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
+
+ // Change color.
+ plane_offset[kYPlane] += 1;
+ plane_offset[kUPlane] += 1;
+ plane_offset[kVPlane] += 1;
+ CreateImage(input_buffer_, plane_offset);
+ input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
+ EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
+
+ EncodedImage encoded_frame;
+ // Only encoding one frame - so will be a key frame.
+ encoder_callback.GetLastEncodedKeyFrame(&encoded_frame);
+ EXPECT_EQ(0, decoder_->Decode(encoded_frame, false, NULL, 0));
+ encoder_callback.GetLastEncodedFrame(&encoded_frame);
+ decoder_->Decode(encoded_frame, false, NULL, 0);
+ EXPECT_EQ(2, decoder_callback.DecodedFrames());
+}
+
+} // namespace test
+} // namespace webrtc
diff --git a/modules/video_coding/codecs/vp8/simulcast_test_fixture_impl.h b/modules/video_coding/codecs/vp8/simulcast_test_fixture_impl.h
new file mode 100644
index 0000000..1fcf48e
--- /dev/null
+++ b/modules/video_coding/codecs/vp8/simulcast_test_fixture_impl.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_VIDEO_CODING_CODECS_VP8_SIMULCAST_TEST_FIXTURE_IMPL_H_
+#define MODULES_VIDEO_CODING_CODECS_VP8_SIMULCAST_TEST_FIXTURE_IMPL_H_
+
+#include <memory>
+#include <vector>
+
+#include "api/test/simulcast_test_fixture.h"
+#include "api/video/i420_buffer.h"
+#include "api/video/video_frame.h"
+#include "api/video_codecs/video_decoder_factory.h"
+#include "api/video_codecs/video_encoder_factory.h"
+#include "common_types.h" // NOLINT(build/include)
+#include "modules/video_coding/codecs/vp8/simulcast_rate_allocator.h"
+#include "modules/video_coding/include/mock/mock_video_codec_interface.h"
+
+namespace webrtc {
+namespace test {
+
+class SimulcastTestFixtureImpl final : public SimulcastTestFixture {
+ public:
+ SimulcastTestFixtureImpl(
+ std::unique_ptr<VideoEncoderFactory> encoder_factory,
+ std::unique_ptr<VideoDecoderFactory> decoder_factory);
+ ~SimulcastTestFixtureImpl() final;
+
+ // Implements SimulcastTestFixture.
+ void TestKeyFrameRequestsOnAllStreams() override;
+ void TestPaddingAllStreams() override;
+ void TestPaddingTwoStreams() override;
+ void TestPaddingTwoStreamsOneMaxedOut() override;
+ void TestPaddingOneStream() override;
+ void TestPaddingOneStreamTwoMaxedOut() override;
+ void TestSendAllStreams() override;
+ void TestDisablingStreams() override;
+ void TestActiveStreams() override;
+ void TestSwitchingToOneStream() override;
+ void TestSwitchingToOneOddStream() override;
+ void TestSwitchingToOneSmallStream() override;
+ void TestSpatioTemporalLayers333PatternEncoder() override;
+ void TestSpatioTemporalLayers321PatternEncoder() override;
+ void TestStrideEncodeDecode() override;
+
+ static void DefaultSettings(VideoCodec* settings,
+ const int* temporal_layer_profile);
+
+ private:
+ class Vp8TestEncodedImageCallback;
+ class Vp8TestDecodedImageCallback;
+
+ void SetUpCodec(const int* temporal_layer_profile);
+ void SetUpRateAllocator();
+ void SetRates(uint32_t bitrate_kbps, uint32_t fps);
+ void RunActiveStreamsTest(const std::vector<bool> active_streams);
+ void UpdateActiveStreams(const std::vector<bool> active_streams);
+ void ExpectStreams(FrameType frame_type,
+ const std::vector<bool> expected_streams_active);
+ void ExpectStreams(FrameType frame_type, int expected_video_streams);
+ void VerifyTemporalIdxAndSyncForAllSpatialLayers(
+ Vp8TestEncodedImageCallback* encoder_callback,
+ const int* expected_temporal_idx,
+ const bool* expected_layer_sync,
+ int num_spatial_layers);
+ void SwitchingToOneStream(int width, int height);
+
+ std::unique_ptr<VideoEncoder> encoder_;
+ MockEncodedImageCallback encoder_callback_;
+ std::unique_ptr<VideoDecoder> decoder_;
+ MockDecodedImageCallback decoder_callback_;
+ VideoCodec settings_;
+ rtc::scoped_refptr<I420Buffer> input_buffer_;
+ std::unique_ptr<VideoFrame> input_frame_;
+ std::unique_ptr<SimulcastRateAllocator> rate_allocator_;
+};
+
+} // namespace test
+} // namespace webrtc
+
+#endif // MODULES_VIDEO_CODING_CODECS_VP8_SIMULCAST_TEST_FIXTURE_IMPL_H_
diff --git a/modules/video_coding/codecs/vp8/temporal_layers.cc b/modules/video_coding/codecs/vp8/temporal_layers.cc
index 9ee5ce3..67401cd 100644
--- a/modules/video_coding/codecs/vp8/temporal_layers.cc
+++ b/modules/video_coding/codecs/vp8/temporal_layers.cc
@@ -16,6 +16,7 @@
#include "modules/include/module_common_types.h"
#include "modules/video_coding/codecs/vp8/default_temporal_layers.h"
+#include "modules/video_coding/codecs/vp8/include/vp8_common_types.h"
#include "modules/video_coding/codecs/vp8/screenshare_layers.h"
#include "modules/video_coding/include/video_codec_interface.h"
#include "rtc_base/checks.h"