Revert "Reland "Using units in SendSideBandwidthEstimation.""
This reverts commit e2cb26cb4fa2a3ce7c12636225ba9c720d7c7e56.
Reason for revert: <INSERT REASONING HERE>
Original change's description:
> Reland "Using units in SendSideBandwidthEstimation."
>
> This reverts commit 917e5967a597fa8d6e6cae9ffccb21e3d35d553b.
>
> Reason for revert: Handling downstream use case.
>
> Original change's description:
> > Revert "Using units in SendSideBandwidthEstimation."
> >
> > This reverts commit 35b5e5f3b0dc409bf571b3609860ad5bb8e00c29.
> >
> > Reason for revert: Breaks downstream project
> >
> > Original change's description:
> > > Using units in SendSideBandwidthEstimation.
> > >
> > > This CL moves SendSideBandwidthEstimation to use the unit types
> > > DataRate, TimeDelta and Timestamp. This prepares for upcoming changes.
> > >
> > > Bug: webrtc:9718
> > > Change-Id: If10e329920dda037b53055ff3352ae7f8d7e32b8
> > > Reviewed-on: https://webrtc-review.googlesource.com/c/104021
> > > Commit-Queue: Sebastian Jansson <srte@webrtc.org>
> > > Reviewed-by: Björn Terelius <terelius@webrtc.org>
> > > Cr-Commit-Position: refs/heads/master@{#25029}
> >
> > TBR=terelius@webrtc.org,srte@webrtc.org
> >
> > No-Try: True
> > Bug: webrtc:9718
> > Change-Id: Iaf470f1eec9911ee6fc7c1b4f5db9675d89d3780
> > Reviewed-on: https://webrtc-review.googlesource.com/c/104480
> > Commit-Queue: Oleh Prypin <oprypin@webrtc.org>
> > Reviewed-by: Oleh Prypin <oprypin@webrtc.org>
> > Cr-Commit-Position: refs/heads/master@{#25035}
>
> TBR=oprypin@webrtc.org,terelius@webrtc.org,srte@webrtc.org
>
> Change-Id: I0940791fcd1e196598b0f0a2ec779c49931ee5df
> No-Presubmit: true
> No-Tree-Checks: true
> No-Try: true
> Bug: webrtc:9718
> Reviewed-on: https://webrtc-review.googlesource.com/c/104520
> Reviewed-by: Sebastian Jansson <srte@webrtc.org>
> Commit-Queue: Sebastian Jansson <srte@webrtc.org>
> Cr-Commit-Position: refs/heads/master@{#25036}
TBR=oprypin@webrtc.org,terelius@webrtc.org,srte@webrtc.org
Change-Id: I6628771c79fc78dfd856649ae92232e95df63495
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: webrtc:9718
Reviewed-on: https://webrtc-review.googlesource.com/c/104540
Reviewed-by: Sebastian Jansson <srte@webrtc.org>
Commit-Queue: Sebastian Jansson <srte@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#25037}
diff --git a/modules/bitrate_controller/BUILD.gn b/modules/bitrate_controller/BUILD.gn
index 72046f0..ae66765 100644
--- a/modules/bitrate_controller/BUILD.gn
+++ b/modules/bitrate_controller/BUILD.gn
@@ -44,7 +44,6 @@
"../rtp_rtcp",
"../rtp_rtcp:rtp_rtcp_format",
"//third_party/abseil-cpp/absl/memory",
- "//third_party/abseil-cpp/absl/types:optional",
]
}
diff --git a/modules/bitrate_controller/bitrate_controller_impl.cc b/modules/bitrate_controller/bitrate_controller_impl.cc
index 93045b7..9b816f6 100644
--- a/modules/bitrate_controller/bitrate_controller_impl.cc
+++ b/modules/bitrate_controller/bitrate_controller_impl.cc
@@ -20,18 +20,7 @@
#include "rtc_base/logging.h"
namespace webrtc {
-namespace {
-absl::optional<DataRate> ToOptionalDataRate(int start_bitrate_bps) {
- if (start_bitrate_bps == -1)
- return absl::nullopt;
- return DataRate::bps(start_bitrate_bps);
-}
-DataRate MaxRate(int max_bitrate_bps) {
- if (max_bitrate_bps == -1)
- return DataRate::Infinity();
- return DataRate::bps(max_bitrate_bps);
-}
-} // namespace
+
class BitrateControllerImpl::RtcpBandwidthObserverImpl
: public RtcpBandwidthObserver {
public:
@@ -91,9 +80,7 @@
void BitrateControllerImpl::SetStartBitrate(int start_bitrate_bps) {
{
rtc::CritScope cs(&critsect_);
- bandwidth_estimation_.SetSendBitrate(
- DataRate::bps(start_bitrate_bps),
- Timestamp::ms(clock_->TimeInMilliseconds()));
+ bandwidth_estimation_.SetSendBitrate(start_bitrate_bps);
}
MaybeTriggerOnNetworkChanged();
}
@@ -102,8 +89,7 @@
int max_bitrate_bps) {
{
rtc::CritScope cs(&critsect_);
- bandwidth_estimation_.SetMinMaxBitrate(DataRate::bps(min_bitrate_bps),
- DataRate::bps(max_bitrate_bps));
+ bandwidth_estimation_.SetMinMaxBitrate(min_bitrate_bps, max_bitrate_bps);
}
MaybeTriggerOnNetworkChanged();
}
@@ -113,9 +99,8 @@
int max_bitrate_bps) {
{
rtc::CritScope cs(&critsect_);
- bandwidth_estimation_.SetBitrates(
- ToOptionalDataRate(start_bitrate_bps), DataRate::bps(min_bitrate_bps),
- MaxRate(max_bitrate_bps), Timestamp::ms(clock_->TimeInMilliseconds()));
+ bandwidth_estimation_.SetBitrates(start_bitrate_bps, min_bitrate_bps,
+ max_bitrate_bps);
}
MaybeTriggerOnNetworkChanged();
}
@@ -126,9 +111,8 @@
{
rtc::CritScope cs(&critsect_);
bandwidth_estimation_ = SendSideBandwidthEstimation(event_log_);
- bandwidth_estimation_.SetBitrates(
- ToOptionalDataRate(bitrate_bps), DataRate::bps(min_bitrate_bps),
- MaxRate(max_bitrate_bps), Timestamp::ms(clock_->TimeInMilliseconds()));
+ bandwidth_estimation_.SetBitrates(bitrate_bps, min_bitrate_bps,
+ max_bitrate_bps);
}
MaybeTriggerOnNetworkChanged();
}
@@ -137,8 +121,8 @@
void BitrateControllerImpl::OnReceivedEstimatedBitrate(uint32_t bitrate) {
{
rtc::CritScope cs(&critsect_);
- bandwidth_estimation_.UpdateReceiverEstimate(
- Timestamp::ms(clock_->TimeInMilliseconds()), DataRate::bps(bitrate));
+ bandwidth_estimation_.UpdateReceiverEstimate(clock_->TimeInMilliseconds(),
+ bitrate);
BWE_TEST_LOGGING_PLOT(1, "REMB_kbps", clock_->TimeInMilliseconds(),
bitrate / 1000);
}
@@ -152,15 +136,12 @@
{
rtc::CritScope cs(&critsect_);
if (result.probe) {
- bandwidth_estimation_.SetSendBitrate(
- DataRate::bps(result.target_bitrate_bps),
- Timestamp::ms(clock_->TimeInMilliseconds()));
+ bandwidth_estimation_.SetSendBitrate(result.target_bitrate_bps);
}
// Since SetSendBitrate now resets the delay-based estimate, we have to call
// UpdateDelayBasedEstimate after SetSendBitrate.
- bandwidth_estimation_.UpdateDelayBasedEstimate(
- Timestamp::ms(clock_->TimeInMilliseconds()),
- DataRate::bps(result.target_bitrate_bps));
+ bandwidth_estimation_.UpdateDelayBasedEstimate(clock_->TimeInMilliseconds(),
+ result.target_bitrate_bps);
}
MaybeTriggerOnNetworkChanged();
}
@@ -177,8 +158,7 @@
void BitrateControllerImpl::Process() {
{
rtc::CritScope cs(&critsect_);
- bandwidth_estimation_.UpdateEstimate(
- Timestamp::ms(clock_->TimeInMilliseconds()));
+ bandwidth_estimation_.UpdateEstimate(clock_->TimeInMilliseconds());
}
MaybeTriggerOnNetworkChanged();
last_bitrate_update_ms_ = clock_->TimeInMilliseconds();
@@ -233,9 +213,8 @@
RTC_DCHECK_GE(total_number_of_packets, 0);
- bandwidth_estimation_.UpdateReceiverBlock(
- fraction_lost_aggregate, TimeDelta::ms(rtt), total_number_of_packets,
- Timestamp::ms(now_ms));
+ bandwidth_estimation_.UpdateReceiverBlock(fraction_lost_aggregate, rtt,
+ total_number_of_packets, now_ms);
}
MaybeTriggerOnNetworkChanged();
}
diff --git a/modules/bitrate_controller/send_side_bandwidth_estimation.cc b/modules/bitrate_controller/send_side_bandwidth_estimation.cc
index cc108a9..48eea3c 100644
--- a/modules/bitrate_controller/send_side_bandwidth_estimation.cc
+++ b/modules/bitrate_controller/send_side_bandwidth_estimation.cc
@@ -27,22 +27,22 @@
namespace webrtc {
namespace {
-constexpr TimeDelta kBweIncreaseInterval = TimeDelta::Millis<1000>();
-constexpr TimeDelta kBweDecreaseInterval = TimeDelta::Millis<300>();
-constexpr TimeDelta kStartPhase = TimeDelta::Millis<2000>();
-constexpr TimeDelta kBweConverganceTime = TimeDelta::Millis<20000>();
-constexpr int kLimitNumPackets = 20;
-constexpr DataRate kDefaultMaxBitrate = DataRate::BitsPerSec<1000000000>();
-constexpr TimeDelta kLowBitrateLogPeriod = TimeDelta::Millis<10000>();
-constexpr TimeDelta kRtcEventLogPeriod = TimeDelta::Millis<5000>();
+const int64_t kBweIncreaseIntervalMs = 1000;
+const int64_t kBweDecreaseIntervalMs = 300;
+const int64_t kStartPhaseMs = 2000;
+const int64_t kBweConverganceTimeMs = 20000;
+const int kLimitNumPackets = 20;
+const int kDefaultMaxBitrateBps = 1000000000;
+const int64_t kLowBitrateLogPeriodMs = 10000;
+const int64_t kRtcEventLogPeriodMs = 5000;
// Expecting that RTCP feedback is sent uniformly within [0.5, 1.5]s intervals.
-constexpr TimeDelta kMaxRtcpFeedbackInterval = TimeDelta::Millis<5000>();
-constexpr int kFeedbackTimeoutIntervals = 3;
-constexpr TimeDelta kTimeoutInterval = TimeDelta::Millis<1000>();
+const int64_t kFeedbackIntervalMs = 5000;
+const int64_t kFeedbackTimeoutIntervals = 3;
+const int64_t kTimeoutIntervalMs = 1000;
-constexpr float kDefaultLowLossThreshold = 0.02f;
-constexpr float kDefaultHighLossThreshold = 0.1f;
-constexpr DataRate kDefaultBitrateThreshold = DataRate::Zero();
+const float kDefaultLowLossThreshold = 0.02f;
+const float kDefaultHighLossThreshold = 0.1f;
+const int kDefaultBitrateThresholdKbps = 0;
struct UmaRampUpMetric {
const char* metric_name;
@@ -99,7 +99,7 @@
"experiment from field trial string. Using default.";
*low_loss_threshold = kDefaultLowLossThreshold;
*high_loss_threshold = kDefaultHighLossThreshold;
- *bitrate_threshold_kbps = kDefaultBitrateThreshold.kbps();
+ *bitrate_threshold_kbps = kDefaultBitrateThresholdKbps;
return false;
}
} // namespace
@@ -107,34 +107,33 @@
SendSideBandwidthEstimation::SendSideBandwidthEstimation(RtcEventLog* event_log)
: lost_packets_since_last_loss_update_(0),
expected_packets_since_last_loss_update_(0),
- current_bitrate_(DataRate::Zero()),
- min_bitrate_configured_(
- DataRate::bps(congestion_controller::GetMinBitrateBps())),
- max_bitrate_configured_(kDefaultMaxBitrate),
- last_low_bitrate_log_(Timestamp::MinusInfinity()),
+ current_bitrate_bps_(0),
+ min_bitrate_configured_(congestion_controller::GetMinBitrateBps()),
+ max_bitrate_configured_(kDefaultMaxBitrateBps),
+ last_low_bitrate_log_ms_(-1),
has_decreased_since_last_fraction_loss_(false),
- last_loss_feedback_(Timestamp::MinusInfinity()),
- last_loss_packet_report_(Timestamp::MinusInfinity()),
- last_timeout_(Timestamp::MinusInfinity()),
+ last_feedback_ms_(-1),
+ last_packet_report_ms_(-1),
+ last_timeout_ms_(-1),
last_fraction_loss_(0),
last_logged_fraction_loss_(0),
- last_round_trip_time_(TimeDelta::Zero()),
- bwe_incoming_(DataRate::Zero()),
- delay_based_bitrate_(DataRate::Zero()),
- time_last_decrease_(Timestamp::MinusInfinity()),
- first_report_time_(Timestamp::MinusInfinity()),
+ last_round_trip_time_ms_(0),
+ bwe_incoming_(0),
+ delay_based_bitrate_bps_(0),
+ time_last_decrease_ms_(0),
+ first_report_time_ms_(-1),
initially_lost_packets_(0),
- bitrate_at_2_seconds_(DataRate::Zero()),
+ bitrate_at_2_seconds_kbps_(0),
uma_update_state_(kNoUpdate),
uma_rtt_state_(kNoUpdate),
rampup_uma_stats_updated_(kNumUmaRampupMetrics, false),
event_log_(event_log),
- last_rtc_event_log_(Timestamp::MinusInfinity()),
+ last_rtc_event_log_ms_(-1),
in_timeout_experiment_(
webrtc::field_trial::IsEnabled("WebRTC-FeedbackTimeout")),
low_loss_threshold_(kDefaultLowLossThreshold),
high_loss_threshold_(kDefaultHighLossThreshold),
- bitrate_threshold_(kDefaultBitrateThreshold) {
+ bitrate_threshold_bps_(1000 * kDefaultBitrateThresholdKbps) {
RTC_DCHECK(event_log);
if (BweLossExperimentIsEnabled()) {
uint32_t bitrate_threshold_kbps;
@@ -144,87 +143,87 @@
RTC_LOG(LS_INFO) << "Enabled BweLossExperiment with parameters "
<< low_loss_threshold_ << ", " << high_loss_threshold_
<< ", " << bitrate_threshold_kbps;
- bitrate_threshold_ = DataRate::kbps(bitrate_threshold_kbps);
+ bitrate_threshold_bps_ = bitrate_threshold_kbps * 1000;
}
}
}
SendSideBandwidthEstimation::~SendSideBandwidthEstimation() {}
-void SendSideBandwidthEstimation::SetBitrates(
- absl::optional<DataRate> send_bitrate,
- DataRate min_bitrate,
- DataRate max_bitrate,
- Timestamp at_time) {
+void SendSideBandwidthEstimation::SetBitrates(int send_bitrate,
+ int min_bitrate,
+ int max_bitrate) {
SetMinMaxBitrate(min_bitrate, max_bitrate);
- if (send_bitrate)
- SetSendBitrate(*send_bitrate, at_time);
+ if (send_bitrate > 0)
+ SetSendBitrate(send_bitrate);
}
-void SendSideBandwidthEstimation::SetSendBitrate(DataRate bitrate,
- Timestamp at_time) {
- RTC_DCHECK(bitrate > DataRate::Zero());
- // Reset to avoid being capped by the estimate.
- delay_based_bitrate_ = DataRate::Zero();
- CapBitrateToThresholds(at_time, bitrate);
+void SendSideBandwidthEstimation::SetSendBitrate(int bitrate) {
+ RTC_DCHECK_GT(bitrate, 0);
+ delay_based_bitrate_bps_ = 0; // Reset to avoid being capped by the estimate.
+ CapBitrateToThresholds(Clock::GetRealTimeClock()->TimeInMilliseconds(),
+ bitrate);
// Clear last sent bitrate history so the new value can be used directly
// and not capped.
min_bitrate_history_.clear();
}
-void SendSideBandwidthEstimation::SetMinMaxBitrate(DataRate min_bitrate,
- DataRate max_bitrate) {
+void SendSideBandwidthEstimation::SetMinMaxBitrate(int min_bitrate,
+ int max_bitrate) {
+ RTC_DCHECK_GE(min_bitrate, 0);
min_bitrate_configured_ =
- std::max(min_bitrate, congestion_controller::GetMinBitrate());
- if (max_bitrate > DataRate::Zero() && max_bitrate.IsFinite()) {
- max_bitrate_configured_ = std::max(min_bitrate_configured_, max_bitrate);
+ std::max(min_bitrate, congestion_controller::GetMinBitrateBps());
+ if (max_bitrate > 0) {
+ max_bitrate_configured_ =
+ std::max<uint32_t>(min_bitrate_configured_, max_bitrate);
} else {
- max_bitrate_configured_ = kDefaultMaxBitrate;
+ max_bitrate_configured_ = kDefaultMaxBitrateBps;
}
}
int SendSideBandwidthEstimation::GetMinBitrate() const {
- return min_bitrate_configured_.bps<int>();
+ return min_bitrate_configured_;
}
void SendSideBandwidthEstimation::CurrentEstimate(int* bitrate,
uint8_t* loss,
int64_t* rtt) const {
- *bitrate = current_bitrate_.bps<int>();
+ *bitrate = current_bitrate_bps_;
*loss = last_fraction_loss_;
- *rtt = last_round_trip_time_.ms<int64_t>();
+ *rtt = last_round_trip_time_ms_;
}
-void SendSideBandwidthEstimation::UpdateReceiverEstimate(Timestamp at_time,
- DataRate bandwidth) {
+void SendSideBandwidthEstimation::UpdateReceiverEstimate(int64_t now_ms,
+ uint32_t bandwidth) {
bwe_incoming_ = bandwidth;
- CapBitrateToThresholds(at_time, current_bitrate_);
+ CapBitrateToThresholds(now_ms, current_bitrate_bps_);
}
-void SendSideBandwidthEstimation::UpdateDelayBasedEstimate(Timestamp at_time,
- DataRate bitrate) {
- delay_based_bitrate_ = bitrate;
- CapBitrateToThresholds(at_time, current_bitrate_);
+void SendSideBandwidthEstimation::UpdateDelayBasedEstimate(
+ int64_t now_ms,
+ uint32_t bitrate_bps) {
+ delay_based_bitrate_bps_ = bitrate_bps;
+ CapBitrateToThresholds(now_ms, current_bitrate_bps_);
}
void SendSideBandwidthEstimation::UpdateReceiverBlock(uint8_t fraction_loss,
- TimeDelta rtt,
+ int64_t rtt_ms,
int number_of_packets,
- Timestamp at_time) {
+ int64_t now_ms) {
const int kRoundingConstant = 128;
int packets_lost = (static_cast<int>(fraction_loss) * number_of_packets +
kRoundingConstant) >>
8;
- UpdatePacketsLost(packets_lost, number_of_packets, at_time);
- UpdateRtt(rtt, at_time);
+ UpdatePacketsLost(packets_lost, number_of_packets, now_ms);
+ UpdateRtt(rtt_ms, now_ms);
}
void SendSideBandwidthEstimation::UpdatePacketsLost(int packets_lost,
int number_of_packets,
- Timestamp at_time) {
- last_loss_feedback_ = at_time;
- if (first_report_time_.IsInfinite())
- first_report_time_ = at_time;
+ int64_t now_ms) {
+ last_feedback_ms_ = now_ms;
+ if (first_report_time_ms_ == -1)
+ first_report_time_ms_ = now_ms;
// Check sequence number diff and weight loss report
if (number_of_packets > 0) {
@@ -245,201 +244,202 @@
lost_packets_since_last_loss_update_ = 0;
expected_packets_since_last_loss_update_ = 0;
- last_loss_packet_report_ = at_time;
- UpdateEstimate(at_time);
+ last_packet_report_ms_ = now_ms;
+ UpdateEstimate(now_ms);
}
- UpdateUmaStatsPacketsLost(at_time, packets_lost);
+ UpdateUmaStatsPacketsLost(now_ms, packets_lost);
}
-void SendSideBandwidthEstimation::UpdateUmaStatsPacketsLost(Timestamp at_time,
+void SendSideBandwidthEstimation::UpdateUmaStatsPacketsLost(int64_t now_ms,
int packets_lost) {
- DataRate bitrate_kbps = DataRate::kbps((current_bitrate_.bps() + 500) / 1000);
+ int bitrate_kbps = static_cast<int>((current_bitrate_bps_ + 500) / 1000);
for (size_t i = 0; i < kNumUmaRampupMetrics; ++i) {
if (!rampup_uma_stats_updated_[i] &&
- bitrate_kbps.kbps() >= kUmaRampupMetrics[i].bitrate_kbps) {
+ bitrate_kbps >= kUmaRampupMetrics[i].bitrate_kbps) {
RTC_HISTOGRAMS_COUNTS_100000(i, kUmaRampupMetrics[i].metric_name,
- (at_time - first_report_time_).ms());
+ now_ms - first_report_time_ms_);
rampup_uma_stats_updated_[i] = true;
}
}
- if (IsInStartPhase(at_time)) {
+ if (IsInStartPhase(now_ms)) {
initially_lost_packets_ += packets_lost;
} else if (uma_update_state_ == kNoUpdate) {
uma_update_state_ = kFirstDone;
- bitrate_at_2_seconds_ = bitrate_kbps;
+ bitrate_at_2_seconds_kbps_ = bitrate_kbps;
RTC_HISTOGRAM_COUNTS("WebRTC.BWE.InitiallyLostPackets",
initially_lost_packets_, 0, 100, 50);
RTC_HISTOGRAM_COUNTS("WebRTC.BWE.InitialBandwidthEstimate",
- bitrate_at_2_seconds_.kbps(), 0, 2000, 50);
+ bitrate_at_2_seconds_kbps_, 0, 2000, 50);
} else if (uma_update_state_ == kFirstDone &&
- at_time - first_report_time_ >= kBweConverganceTime) {
+ now_ms - first_report_time_ms_ >= kBweConverganceTimeMs) {
uma_update_state_ = kDone;
- int bitrate_diff_kbps = std::max(
- bitrate_at_2_seconds_.kbps<int>() - bitrate_kbps.kbps<int>(), 0);
+ int bitrate_diff_kbps =
+ std::max(bitrate_at_2_seconds_kbps_ - bitrate_kbps, 0);
RTC_HISTOGRAM_COUNTS("WebRTC.BWE.InitialVsConvergedDiff", bitrate_diff_kbps,
0, 2000, 50);
}
}
-void SendSideBandwidthEstimation::UpdateRtt(TimeDelta rtt, Timestamp at_time) {
+void SendSideBandwidthEstimation::UpdateRtt(int64_t rtt_ms, int64_t now_ms) {
// Update RTT if we were able to compute an RTT based on this RTCP.
// FlexFEC doesn't send RTCP SR, which means we won't be able to compute RTT.
- if (rtt > TimeDelta::Zero())
- last_round_trip_time_ = rtt;
+ if (rtt_ms > 0)
+ last_round_trip_time_ms_ = rtt_ms;
- if (!IsInStartPhase(at_time) && uma_rtt_state_ == kNoUpdate) {
+ if (!IsInStartPhase(now_ms) && uma_rtt_state_ == kNoUpdate) {
uma_rtt_state_ = kDone;
- RTC_HISTOGRAM_COUNTS("WebRTC.BWE.InitialRtt", rtt.ms<int>(), 0, 2000, 50);
+ RTC_HISTOGRAM_COUNTS("WebRTC.BWE.InitialRtt", static_cast<int>(rtt_ms), 0,
+ 2000, 50);
}
}
-void SendSideBandwidthEstimation::UpdateEstimate(Timestamp at_time) {
- DataRate new_bitrate = current_bitrate_;
+void SendSideBandwidthEstimation::UpdateEstimate(int64_t now_ms) {
+ uint32_t new_bitrate = current_bitrate_bps_;
// We trust the REMB and/or delay-based estimate during the first 2 seconds if
// we haven't had any packet loss reported, to allow startup bitrate probing.
- if (last_fraction_loss_ == 0 && IsInStartPhase(at_time)) {
+ if (last_fraction_loss_ == 0 && IsInStartPhase(now_ms)) {
new_bitrate = std::max(bwe_incoming_, new_bitrate);
- new_bitrate = std::max(delay_based_bitrate_, new_bitrate);
+ new_bitrate = std::max(delay_based_bitrate_bps_, new_bitrate);
- if (new_bitrate != current_bitrate_) {
+ if (new_bitrate != current_bitrate_bps_) {
min_bitrate_history_.clear();
- min_bitrate_history_.push_back(std::make_pair(at_time, current_bitrate_));
- CapBitrateToThresholds(at_time, new_bitrate);
+ min_bitrate_history_.push_back(
+ std::make_pair(now_ms, current_bitrate_bps_));
+ CapBitrateToThresholds(now_ms, new_bitrate);
return;
}
}
- UpdateMinHistory(at_time);
- if (last_loss_packet_report_.IsInfinite()) {
+ UpdateMinHistory(now_ms);
+ if (last_packet_report_ms_ == -1) {
// No feedback received.
- CapBitrateToThresholds(at_time, current_bitrate_);
+ CapBitrateToThresholds(now_ms, current_bitrate_bps_);
return;
}
- TimeDelta time_since_loss_packet_report = at_time - last_loss_packet_report_;
- TimeDelta time_since_loss_feedback = at_time - last_loss_feedback_;
- if (time_since_loss_packet_report < 1.2 * kMaxRtcpFeedbackInterval) {
+ int64_t time_since_packet_report_ms = now_ms - last_packet_report_ms_;
+ int64_t time_since_feedback_ms = now_ms - last_feedback_ms_;
+ if (time_since_packet_report_ms < 1.2 * kFeedbackIntervalMs) {
// We only care about loss above a given bitrate threshold.
float loss = last_fraction_loss_ / 256.0f;
// We only make decisions based on loss when the bitrate is above a
// threshold. This is a crude way of handling loss which is uncorrelated
// to congestion.
- if (current_bitrate_ < bitrate_threshold_ || loss <= low_loss_threshold_) {
+ if (current_bitrate_bps_ < bitrate_threshold_bps_ ||
+ loss <= low_loss_threshold_) {
// Loss < 2%: Increase rate by 8% of the min bitrate in the last
- // kBweIncreaseInterval.
+ // kBweIncreaseIntervalMs.
// Note that by remembering the bitrate over the last second one can
// rampup up one second faster than if only allowed to start ramping
// at 8% per second rate now. E.g.:
// If sending a constant 100kbps it can rampup immediatly to 108kbps
// whenever a receiver report is received with lower packet loss.
- // If instead one would do: current_bitrate_ *= 1.08^(delta time),
+ // If instead one would do: current_bitrate_bps_ *= 1.08^(delta time),
// it would take over one second since the lower packet loss to achieve
// 108kbps.
- new_bitrate =
- DataRate::bps(min_bitrate_history_.front().second.bps() * 1.08 + 0.5);
+ new_bitrate = static_cast<uint32_t>(
+ min_bitrate_history_.front().second * 1.08 + 0.5);
// Add 1 kbps extra, just to make sure that we do not get stuck
// (gives a little extra increase at low rates, negligible at higher
// rates).
- new_bitrate += DataRate::bps(1000);
- } else if (current_bitrate_ > bitrate_threshold_) {
+ new_bitrate += 1000;
+ } else if (current_bitrate_bps_ > bitrate_threshold_bps_) {
if (loss <= high_loss_threshold_) {
// Loss between 2% - 10%: Do nothing.
} else {
- // Loss > 10%: Limit the rate decreases to once a kBweDecreaseInterval
+ // Loss > 10%: Limit the rate decreases to once a kBweDecreaseIntervalMs
// + rtt.
if (!has_decreased_since_last_fraction_loss_ &&
- (at_time - time_last_decrease_) >=
- (kBweDecreaseInterval + last_round_trip_time_)) {
- time_last_decrease_ = at_time;
+ (now_ms - time_last_decrease_ms_) >=
+ (kBweDecreaseIntervalMs + last_round_trip_time_ms_)) {
+ time_last_decrease_ms_ = now_ms;
// Reduce rate:
// newRate = rate * (1 - 0.5*lossRate);
// where packetLoss = 256*lossRate;
- new_bitrate =
- DataRate::bps((current_bitrate_.bps() *
- static_cast<double>(512 - last_fraction_loss_)) /
- 512.0);
+ new_bitrate = static_cast<uint32_t>(
+ (current_bitrate_bps_ *
+ static_cast<double>(512 - last_fraction_loss_)) /
+ 512.0);
has_decreased_since_last_fraction_loss_ = true;
}
}
}
- } else if (time_since_loss_feedback >
- kFeedbackTimeoutIntervals * kMaxRtcpFeedbackInterval &&
- (last_timeout_.IsInfinite() ||
- at_time - last_timeout_ > kTimeoutInterval)) {
+ } else if (time_since_feedback_ms >
+ kFeedbackTimeoutIntervals * kFeedbackIntervalMs &&
+ (last_timeout_ms_ == -1 ||
+ now_ms - last_timeout_ms_ > kTimeoutIntervalMs)) {
if (in_timeout_experiment_) {
- RTC_LOG(LS_WARNING) << "Feedback timed out ("
- << ToString(time_since_loss_feedback)
- << "), reducing bitrate.";
- new_bitrate = new_bitrate * 0.8;
+ RTC_LOG(LS_WARNING) << "Feedback timed out (" << time_since_feedback_ms
+ << " ms), reducing bitrate.";
+ new_bitrate *= 0.8;
// Reset accumulators since we've already acted on missing feedback and
// shouldn't to act again on these old lost packets.
lost_packets_since_last_loss_update_ = 0;
expected_packets_since_last_loss_update_ = 0;
- last_timeout_ = at_time;
+ last_timeout_ms_ = now_ms;
}
}
- CapBitrateToThresholds(at_time, new_bitrate);
+ CapBitrateToThresholds(now_ms, new_bitrate);
}
-bool SendSideBandwidthEstimation::IsInStartPhase(Timestamp at_time) const {
- return first_report_time_.IsInfinite() ||
- at_time - first_report_time_ < kStartPhase;
+bool SendSideBandwidthEstimation::IsInStartPhase(int64_t now_ms) const {
+ return first_report_time_ms_ == -1 ||
+ now_ms - first_report_time_ms_ < kStartPhaseMs;
}
-void SendSideBandwidthEstimation::UpdateMinHistory(Timestamp at_time) {
+void SendSideBandwidthEstimation::UpdateMinHistory(int64_t now_ms) {
// Remove old data points from history.
// Since history precision is in ms, add one so it is able to increase
// bitrate if it is off by as little as 0.5ms.
while (!min_bitrate_history_.empty() &&
- at_time - min_bitrate_history_.front().first + TimeDelta::ms(1) >
- kBweIncreaseInterval) {
+ now_ms - min_bitrate_history_.front().first + 1 >
+ kBweIncreaseIntervalMs) {
min_bitrate_history_.pop_front();
}
// Typical minimum sliding-window algorithm: Pop values higher than current
// bitrate before pushing it.
while (!min_bitrate_history_.empty() &&
- current_bitrate_ <= min_bitrate_history_.back().second) {
+ current_bitrate_bps_ <= min_bitrate_history_.back().second) {
min_bitrate_history_.pop_back();
}
- min_bitrate_history_.push_back(std::make_pair(at_time, current_bitrate_));
+ min_bitrate_history_.push_back(std::make_pair(now_ms, current_bitrate_bps_));
}
-void SendSideBandwidthEstimation::CapBitrateToThresholds(Timestamp at_time,
- DataRate bitrate) {
- if (bwe_incoming_ > DataRate::Zero() && bitrate > bwe_incoming_) {
- bitrate = bwe_incoming_;
+void SendSideBandwidthEstimation::CapBitrateToThresholds(int64_t now_ms,
+ uint32_t bitrate_bps) {
+ if (bwe_incoming_ > 0 && bitrate_bps > bwe_incoming_) {
+ bitrate_bps = bwe_incoming_;
}
- if (delay_based_bitrate_ > DataRate::Zero() &&
- bitrate > delay_based_bitrate_) {
- bitrate = delay_based_bitrate_;
+ if (delay_based_bitrate_bps_ > 0 && bitrate_bps > delay_based_bitrate_bps_) {
+ bitrate_bps = delay_based_bitrate_bps_;
}
- if (bitrate > max_bitrate_configured_) {
- bitrate = max_bitrate_configured_;
+ if (bitrate_bps > max_bitrate_configured_) {
+ bitrate_bps = max_bitrate_configured_;
}
- if (bitrate < min_bitrate_configured_) {
- if (last_low_bitrate_log_.IsInfinite() ||
- at_time - last_low_bitrate_log_ > kLowBitrateLogPeriod) {
+ if (bitrate_bps < min_bitrate_configured_) {
+ if (last_low_bitrate_log_ms_ == -1 ||
+ now_ms - last_low_bitrate_log_ms_ > kLowBitrateLogPeriodMs) {
RTC_LOG(LS_WARNING) << "Estimated available bandwidth "
- << ToString(bitrate)
- << " is below configured min bitrate "
- << ToString(min_bitrate_configured_) << ".";
- last_low_bitrate_log_ = at_time;
+ << bitrate_bps / 1000
+ << " kbps is below configured min bitrate "
+ << min_bitrate_configured_ / 1000 << " kbps.";
+ last_low_bitrate_log_ms_ = now_ms;
}
- bitrate = min_bitrate_configured_;
+ bitrate_bps = min_bitrate_configured_;
}
- if (bitrate != current_bitrate_ ||
+ if (bitrate_bps != current_bitrate_bps_ ||
last_fraction_loss_ != last_logged_fraction_loss_ ||
- at_time - last_rtc_event_log_ > kRtcEventLogPeriod) {
+ now_ms - last_rtc_event_log_ms_ > kRtcEventLogPeriodMs) {
event_log_->Log(absl::make_unique<RtcEventBweUpdateLossBased>(
- bitrate.bps(), last_fraction_loss_,
+ bitrate_bps, last_fraction_loss_,
expected_packets_since_last_loss_update_));
last_logged_fraction_loss_ = last_fraction_loss_;
- last_rtc_event_log_ = at_time;
+ last_rtc_event_log_ms_ = now_ms;
}
- current_bitrate_ = bitrate;
+ current_bitrate_bps_ = bitrate_bps;
}
} // namespace webrtc
diff --git a/modules/bitrate_controller/send_side_bandwidth_estimation.h b/modules/bitrate_controller/send_side_bandwidth_estimation.h
index 2c8b4ee..54b571e 100644
--- a/modules/bitrate_controller/send_side_bandwidth_estimation.h
+++ b/modules/bitrate_controller/send_side_bandwidth_estimation.h
@@ -17,7 +17,6 @@
#include <utility>
#include <vector>
-#include "absl/types/optional.h"
#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
namespace webrtc {
@@ -33,86 +32,83 @@
void CurrentEstimate(int* bitrate, uint8_t* loss, int64_t* rtt) const;
// Call periodically to update estimate.
- void UpdateEstimate(Timestamp at_time);
+ void UpdateEstimate(int64_t now_ms);
// Call when we receive a RTCP message with TMMBR or REMB.
- void UpdateReceiverEstimate(Timestamp at_time, DataRate bandwidth);
+ void UpdateReceiverEstimate(int64_t now_ms, uint32_t bandwidth);
// Call when a new delay-based estimate is available.
- void UpdateDelayBasedEstimate(Timestamp at_time, DataRate bitrate);
+ void UpdateDelayBasedEstimate(int64_t now_ms, uint32_t bitrate_bps);
// Call when we receive a RTCP message with a ReceiveBlock.
void UpdateReceiverBlock(uint8_t fraction_loss,
- TimeDelta rtt_ms,
+ int64_t rtt_ms,
int number_of_packets,
- Timestamp at_time);
+ int64_t now_ms);
// Call when we receive a RTCP message with a ReceiveBlock.
void UpdatePacketsLost(int packets_lost,
int number_of_packets,
- Timestamp at_time);
+ int64_t now_ms);
// Call when we receive a RTCP message with a ReceiveBlock.
- void UpdateRtt(TimeDelta rtt, Timestamp at_time);
+ void UpdateRtt(int64_t rtt, int64_t now_ms);
- void SetBitrates(absl::optional<DataRate> send_bitrate,
- DataRate min_bitrate,
- DataRate max_bitrate,
- Timestamp at_time);
- void SetSendBitrate(DataRate bitrate, Timestamp at_time);
- void SetMinMaxBitrate(DataRate min_bitrate, DataRate max_bitrate);
+ void SetBitrates(int send_bitrate, int min_bitrate, int max_bitrate);
+ void SetSendBitrate(int bitrate);
+ void SetMinMaxBitrate(int min_bitrate, int max_bitrate);
int GetMinBitrate() const;
private:
enum UmaState { kNoUpdate, kFirstDone, kDone };
- bool IsInStartPhase(Timestamp at_time) const;
+ bool IsInStartPhase(int64_t now_ms) const;
- void UpdateUmaStatsPacketsLost(Timestamp at_time, int packets_lost);
+ void UpdateUmaStatsPacketsLost(int64_t now_ms, int packets_lost);
// Updates history of min bitrates.
// After this method returns min_bitrate_history_.front().second contains the
// min bitrate used during last kBweIncreaseIntervalMs.
- void UpdateMinHistory(Timestamp at_time);
+ void UpdateMinHistory(int64_t now_ms);
- // Cap |bitrate| to [min_bitrate_configured_, max_bitrate_configured_] and
- // set |current_bitrate_| to the capped value and updates the event log.
- void CapBitrateToThresholds(Timestamp at_time, DataRate bitrate);
+ // Cap |bitrate_bps| to [min_bitrate_configured_, max_bitrate_configured_] and
+ // set |current_bitrate_bps_| to the capped value and updates the event log.
+ void CapBitrateToThresholds(int64_t now_ms, uint32_t bitrate_bps);
- std::deque<std::pair<Timestamp, DataRate> > min_bitrate_history_;
+ std::deque<std::pair<int64_t, uint32_t> > min_bitrate_history_;
// incoming filters
int lost_packets_since_last_loss_update_;
int expected_packets_since_last_loss_update_;
- DataRate current_bitrate_;
- DataRate min_bitrate_configured_;
- DataRate max_bitrate_configured_;
- Timestamp last_low_bitrate_log_;
+ uint32_t current_bitrate_bps_;
+ uint32_t min_bitrate_configured_;
+ uint32_t max_bitrate_configured_;
+ int64_t last_low_bitrate_log_ms_;
bool has_decreased_since_last_fraction_loss_;
- Timestamp last_loss_feedback_;
- Timestamp last_loss_packet_report_;
- Timestamp last_timeout_;
+ int64_t last_feedback_ms_;
+ int64_t last_packet_report_ms_;
+ int64_t last_timeout_ms_;
uint8_t last_fraction_loss_;
uint8_t last_logged_fraction_loss_;
- TimeDelta last_round_trip_time_;
+ int64_t last_round_trip_time_ms_;
- DataRate bwe_incoming_;
- DataRate delay_based_bitrate_;
- Timestamp time_last_decrease_;
- Timestamp first_report_time_;
+ uint32_t bwe_incoming_;
+ uint32_t delay_based_bitrate_bps_;
+ int64_t time_last_decrease_ms_;
+ int64_t first_report_time_ms_;
int initially_lost_packets_;
- DataRate bitrate_at_2_seconds_;
+ int bitrate_at_2_seconds_kbps_;
UmaState uma_update_state_;
UmaState uma_rtt_state_;
std::vector<bool> rampup_uma_stats_updated_;
RtcEventLog* event_log_;
- Timestamp last_rtc_event_log_;
+ int64_t last_rtc_event_log_ms_;
bool in_timeout_experiment_;
float low_loss_threshold_;
float high_loss_threshold_;
- DataRate bitrate_threshold_;
+ uint32_t bitrate_threshold_bps_;
};
} // namespace webrtc
#endif // MODULES_BITRATE_CONTROLLER_SEND_SIDE_BANDWIDTH_ESTIMATION_H_
diff --git a/modules/bitrate_controller/send_side_bandwidth_estimation_unittest.cc b/modules/bitrate_controller/send_side_bandwidth_estimation_unittest.cc
index becc616..8d5b08b 100644
--- a/modules/bitrate_controller/send_side_bandwidth_estimation_unittest.cc
+++ b/modules/bitrate_controller/send_side_bandwidth_estimation_unittest.cc
@@ -35,25 +35,24 @@
}
void TestProbing(bool use_delay_based) {
- testing::NiceMock<MockRtcEventLog> event_log;
+ MockRtcEventLog event_log;
SendSideBandwidthEstimation bwe(&event_log);
- int64_t now_ms = 0;
- bwe.SetMinMaxBitrate(DataRate::bps(100000), DataRate::bps(1500000));
- bwe.SetSendBitrate(DataRate::bps(200000), Timestamp::ms(now_ms));
+ bwe.SetMinMaxBitrate(100000, 1500000);
+ bwe.SetSendBitrate(200000);
const int kRembBps = 1000000;
const int kSecondRembBps = kRembBps + 500000;
+ int64_t now_ms = 0;
- bwe.UpdateReceiverBlock(0, TimeDelta::ms(50), 1, Timestamp::ms(now_ms));
+ bwe.UpdateReceiverBlock(0, 50, 1, now_ms);
// Initial REMB applies immediately.
if (use_delay_based) {
- bwe.UpdateDelayBasedEstimate(Timestamp::ms(now_ms),
- DataRate::bps(kRembBps));
+ bwe.UpdateDelayBasedEstimate(now_ms, kRembBps);
} else {
- bwe.UpdateReceiverEstimate(Timestamp::ms(now_ms), DataRate::bps(kRembBps));
+ bwe.UpdateReceiverEstimate(now_ms, kRembBps);
}
- bwe.UpdateEstimate(Timestamp::ms(now_ms));
+ bwe.UpdateEstimate(now_ms);
int bitrate;
uint8_t fraction_loss;
int64_t rtt;
@@ -63,13 +62,11 @@
// Second REMB doesn't apply immediately.
now_ms += 2001;
if (use_delay_based) {
- bwe.UpdateDelayBasedEstimate(Timestamp::ms(now_ms),
- DataRate::bps(kSecondRembBps));
+ bwe.UpdateDelayBasedEstimate(now_ms, kSecondRembBps);
} else {
- bwe.UpdateReceiverEstimate(Timestamp::ms(now_ms),
- DataRate::bps(kSecondRembBps));
+ bwe.UpdateReceiverEstimate(now_ms, kSecondRembBps);
}
- bwe.UpdateEstimate(Timestamp::ms(now_ms));
+ bwe.UpdateEstimate(now_ms);
bitrate = 0;
bwe.CurrentEstimate(&bitrate, &fraction_loss, &rtt);
EXPECT_EQ(kRembBps, bitrate);
@@ -89,18 +86,17 @@
.Times(1);
EXPECT_CALL(event_log,
LogProxy(LossBasedBweUpdateWithBitrateAndLossFraction()))
- .Times(1);
+ .Times(2);
SendSideBandwidthEstimation bwe(&event_log);
static const int kMinBitrateBps = 100000;
static const int kInitialBitrateBps = 1000000;
- int64_t now_ms = 1000;
- bwe.SetMinMaxBitrate(DataRate::bps(kMinBitrateBps), DataRate::bps(1500000));
- bwe.SetSendBitrate(DataRate::bps(kInitialBitrateBps), Timestamp::ms(now_ms));
+ bwe.SetMinMaxBitrate(kMinBitrateBps, 1500000);
+ bwe.SetSendBitrate(kInitialBitrateBps);
static const uint8_t kFractionLoss = 128;
static const int64_t kRttMs = 50;
- now_ms += 10000;
+ int64_t now_ms = 0;
int bitrate_bps;
uint8_t fraction_loss;
int64_t rtt_ms;
@@ -110,11 +106,10 @@
EXPECT_EQ(0, rtt_ms);
// Signal heavy loss to go down in bitrate.
- bwe.UpdateReceiverBlock(kFractionLoss, TimeDelta::ms(kRttMs), 100,
- Timestamp::ms(now_ms));
+ bwe.UpdateReceiverBlock(kFractionLoss, kRttMs, 100, now_ms);
// Trigger an update 2 seconds later to not be rate limited.
now_ms += 1000;
- bwe.UpdateEstimate(Timestamp::ms(now_ms));
+ bwe.UpdateEstimate(now_ms);
bwe.CurrentEstimate(&bitrate_bps, &fraction_loss, &rtt_ms);
EXPECT_LT(bitrate_bps, kInitialBitrateBps);
@@ -132,7 +127,7 @@
// Trigger an update 2 seconds later to not be rate limited (but it still
// shouldn't update).
now_ms += 1000;
- bwe.UpdateEstimate(Timestamp::ms(now_ms));
+ bwe.UpdateEstimate(now_ms);
bwe.CurrentEstimate(&bitrate_bps, &fraction_loss, &rtt_ms);
EXPECT_EQ(last_bitrate_bps, bitrate_bps);
@@ -155,18 +150,16 @@
uint8_t fraction_loss;
int64_t rtt_ms;
- bwe.SetMinMaxBitrate(DataRate::bps(kMinBitrateBps),
- DataRate::bps(kMaxBitrateBps));
- bwe.SetSendBitrate(DataRate::bps(kInitialBitrateBps), Timestamp::ms(now_ms));
+ bwe.SetMinMaxBitrate(kMinBitrateBps, kMaxBitrateBps);
+ bwe.SetSendBitrate(kInitialBitrateBps);
- bwe.UpdateDelayBasedEstimate(Timestamp::ms(now_ms),
- DataRate::bps(kDelayBasedBitrateBps));
- bwe.UpdateEstimate(Timestamp::ms(now_ms));
+ bwe.UpdateDelayBasedEstimate(now_ms, kDelayBasedBitrateBps);
+ bwe.UpdateEstimate(now_ms);
bwe.CurrentEstimate(&bitrate_bps, &fraction_loss, &rtt_ms);
EXPECT_GE(bitrate_bps, kInitialBitrateBps);
EXPECT_LE(bitrate_bps, kDelayBasedBitrateBps);
- bwe.SetSendBitrate(DataRate::bps(kForcedHighBitrate), Timestamp::ms(now_ms));
+ bwe.SetSendBitrate(kForcedHighBitrate);
bwe.CurrentEstimate(&bitrate_bps, &fraction_loss, &rtt_ms);
EXPECT_EQ(bitrate_bps, kForcedHighBitrate);
}