Introduce dchecked_cast, and start using it
It's the faster, less strict cousin of checked_cast.
BUG=none
Review-Url: https://codereview.webrtc.org/2714063002
Cr-Commit-Position: refs/heads/master@{#16958}
diff --git a/webrtc/modules/audio_coding/acm2/acm_receiver.cc b/webrtc/modules/audio_coding/acm2/acm_receiver.cc
index fd4e91a..1577d2d 100644
--- a/webrtc/modules/audio_coding/acm2/acm_receiver.cc
+++ b/webrtc/modules/audio_coding/acm2/acm_receiver.cc
@@ -162,7 +162,7 @@
audio_frame->sample_rate_hz_ = desired_freq_hz;
RTC_DCHECK_EQ(
audio_frame->sample_rate_hz_,
- rtc::checked_cast<int>(audio_frame->samples_per_channel_ * 100));
+ rtc::dchecked_cast<int>(audio_frame->samples_per_channel_ * 100));
resampled_last_output_frame_ = true;
} else {
resampled_last_output_frame_ = false;
diff --git a/webrtc/modules/audio_coding/acm2/audio_coding_module.cc b/webrtc/modules/audio_coding/acm2/audio_coding_module.cc
index 32e6940..daeea35 100644
--- a/webrtc/modules/audio_coding/acm2/audio_coding_module.cc
+++ b/webrtc/modules/audio_coding/acm2/audio_coding_module.cc
@@ -370,7 +370,7 @@
frag->fragmentationOffset[i] = offset;
offset += info.redundant[i].encoded_bytes;
frag->fragmentationLength[i] = info.redundant[i].encoded_bytes;
- frag->fragmentationTimeDiff[i] = rtc::checked_cast<uint16_t>(
+ frag->fragmentationTimeDiff[i] = rtc::dchecked_cast<uint16_t>(
info.encoded_timestamp - info.redundant[i].encoded_timestamp);
frag->fragmentationPlType[i] = info.redundant[i].payload_type;
}
diff --git a/webrtc/modules/audio_coding/codecs/audio_format_conversion.cc b/webrtc/modules/audio_coding/codecs/audio_format_conversion.cc
index 5d42409..5a69ae4 100644
--- a/webrtc/modules/audio_coding/codecs/audio_format_conversion.cc
+++ b/webrtc/modules/audio_coding/codecs/audio_format_conversion.cc
@@ -34,7 +34,7 @@
strncpy(ci.plname, name, sizeof(ci.plname));
ci.plname[sizeof(ci.plname) - 1] = '\0';
ci.plfreq = sample_rate;
- ci.channels = rtc::checked_cast<size_t>(num_channels);
+ ci.channels = rtc::dchecked_cast<size_t>(num_channels);
return ci;
}
diff --git a/webrtc/modules/audio_coding/neteq/expand.cc b/webrtc/modules/audio_coding/neteq/expand.cc
index 2154bfd..0c527fe 100644
--- a/webrtc/modules/audio_coding/neteq/expand.cc
+++ b/webrtc/modules/audio_coding/neteq/expand.cc
@@ -222,7 +222,7 @@
// >= 64 * fs_mult => go from 1 to 0 in about 32 ms.
// temp_shift = getbits(max_lag_) - 5.
int temp_shift =
- (31 - WebRtcSpl_NormW32(rtc::checked_cast<int32_t>(max_lag_))) - 5;
+ (31 - WebRtcSpl_NormW32(rtc::dchecked_cast<int32_t>(max_lag_))) - 5;
int16_t mix_factor_increment = 256 >> temp_shift;
if (stop_muting_) {
mix_factor_increment = 0;
@@ -315,8 +315,8 @@
kMaxConsecutiveExpands : consecutive_expands_ + 1;
expand_duration_samples_ += output->Size();
// Clamp the duration counter at 2 seconds.
- expand_duration_samples_ =
- std::min(expand_duration_samples_, rtc::checked_cast<size_t>(fs_hz_ * 2));
+ expand_duration_samples_ = std::min(expand_duration_samples_,
+ rtc::dchecked_cast<size_t>(fs_hz_ * 2));
return 0;
}
@@ -325,7 +325,7 @@
lag_index_direction_ = 0;
stop_muting_ = true; // Do not mute signal any more.
statistics_->LogDelayedPacketOutageEvent(
- rtc::checked_cast<int>(expand_duration_samples_) / (fs_hz_ / 1000));
+ rtc::dchecked_cast<int>(expand_duration_samples_) / (fs_hz_ / 1000));
}
void Expand::SetParametersForMergeAfterExpand() {
diff --git a/webrtc/modules/audio_coding/neteq/neteq_impl.cc b/webrtc/modules/audio_coding/neteq/neteq_impl.cc
index 09a3296..786cb84 100644
--- a/webrtc/modules/audio_coding/neteq/neteq_impl.cc
+++ b/webrtc/modules/audio_coding/neteq/neteq_impl.cc
@@ -199,7 +199,7 @@
}
RTC_DCHECK_EQ(
audio_frame->sample_rate_hz_,
- rtc::checked_cast<int>(audio_frame->samples_per_channel_ * 100));
+ rtc::dchecked_cast<int>(audio_frame->samples_per_channel_ * 100));
SetAudioFrameActivityAndType(vad_->enabled(), LastOutputType(),
last_vad_activity_, audio_frame);
last_vad_activity_ = audio_frame->vad_activity_;
@@ -826,7 +826,7 @@
if (packet_length_samples != decision_logic_->packet_length_samples()) {
decision_logic_->set_packet_length_samples(packet_length_samples);
delay_manager_->SetPacketAudioLength(
- rtc::checked_cast<int>((1000 * packet_length_samples) / fs_hz_));
+ rtc::dchecked_cast<int>((1000 * packet_length_samples) / fs_hz_));
}
}
@@ -1131,7 +1131,7 @@
last_mode_ == kModePreemptiveExpandLowEnergy) {
// Subtract (samples_left + output_size_samples_) from sampleMemory.
decision_logic_->AddSampleMemory(
- -(samples_left + rtc::checked_cast<int>(output_size_samples_)));
+ -(samples_left + rtc::dchecked_cast<int>(output_size_samples_)));
}
// Check if it is time to play a DTMF event.
@@ -1157,11 +1157,9 @@
// Check if we already have enough samples in the |sync_buffer_|. If so,
// change decision to normal, unless the decision was merge, accelerate, or
// preemptive expand.
- if (samples_left >= rtc::checked_cast<int>(output_size_samples_) &&
- *operation != kMerge &&
- *operation != kAccelerate &&
- *operation != kFastAccelerate &&
- *operation != kPreemptiveExpand) {
+ if (samples_left >= rtc::dchecked_cast<int>(output_size_samples_) &&
+ *operation != kMerge && *operation != kAccelerate &&
+ *operation != kFastAccelerate && *operation != kPreemptiveExpand) {
*operation = kNormal;
return 0;
}
@@ -1454,7 +1452,7 @@
return 0;
}
- while (*decoded_length < rtc::checked_cast<int>(output_size_samples_)) {
+ while (*decoded_length < rtc::dchecked_cast<int>(output_size_samples_)) {
const int length = decoder->Decode(
nullptr, 0, fs_hz_,
(decoded_buffer_length_ - *decoded_length) * sizeof(int16_t),
@@ -1500,7 +1498,7 @@
const auto& result = *opt_result;
*speech_type = result.speech_type;
if (result.num_decoded_samples > 0) {
- *decoded_length += rtc::checked_cast<int>(result.num_decoded_samples);
+ *decoded_length += rtc::dchecked_cast<int>(result.num_decoded_samples);
// Update |decoder_frame_length_| with number of samples per channel.
decoder_frame_length_ =
result.num_decoded_samples / decoder->Channels();
@@ -1513,7 +1511,7 @@
packet_list->clear();
break;
}
- if (*decoded_length > rtc::checked_cast<int>(decoded_buffer_length_)) {
+ if (*decoded_length > rtc::dchecked_cast<int>(decoded_buffer_length_)) {
// Guard against overflow.
LOG(LS_WARNING) << "Decoded too much.";
packet_list->clear();
@@ -1986,7 +1984,8 @@
packet_duration = packet->frame->Duration();
// TODO(ossu): Is this the correct way to track Opus FEC packets?
if (packet->priority.codec_level > 0) {
- stats_.SecondaryDecodedSamples(rtc::checked_cast<int>(packet_duration));
+ stats_.SecondaryDecodedSamples(
+ rtc::dchecked_cast<int>(packet_duration));
}
} else if (!has_cng_packet) {
LOG(LS_WARNING) << "Unknown payload type "
@@ -2029,7 +2028,7 @@
packet_buffer_->DiscardAllOldPackets(timestamp_);
}
- return rtc::checked_cast<int>(extracted_samples);
+ return rtc::dchecked_cast<int>(extracted_samples);
}
void NetEqImpl::UpdatePlcComponents(int fs_hz, size_t channels) {
diff --git a/webrtc/modules/audio_coding/neteq/red_payload_splitter.cc b/webrtc/modules/audio_coding/neteq/red_payload_splitter.cc
index 774832c..c14986d 100644
--- a/webrtc/modules/audio_coding/neteq/red_payload_splitter.cc
+++ b/webrtc/modules/audio_coding/neteq/red_payload_splitter.cc
@@ -110,7 +110,7 @@
new_packet.payload_type = new_header.payload_type;
new_packet.sequence_number = red_packet.sequence_number;
new_packet.priority.red_level =
- rtc::checked_cast<int>((new_headers.size() - 1) - i);
+ rtc::dchecked_cast<int>((new_headers.size() - 1) - i);
new_packet.payload.SetData(payload_ptr, payload_length);
new_packets.push_front(std::move(new_packet));
payload_ptr += payload_length;
diff --git a/webrtc/modules/audio_coding/neteq/statistics_calculator.cc b/webrtc/modules/audio_coding/neteq/statistics_calculator.cc
index e9bceb7..b47ca12 100644
--- a/webrtc/modules/audio_coding/neteq/statistics_calculator.cc
+++ b/webrtc/modules/audio_coding/neteq/statistics_calculator.cc
@@ -218,7 +218,7 @@
stats->added_zero_samples = added_zero_samples_;
stats->current_buffer_size_ms =
static_cast<uint16_t>(num_samples_in_buffers * 1000 / fs_hz);
- const int ms_per_packet = rtc::checked_cast<int>(
+ const int ms_per_packet = rtc::dchecked_cast<int>(
decision_logic.packet_length_samples() / (fs_hz / 1000));
stats->preferred_buffer_size_ms = (delay_manager.TargetLevel() >> 8) *
ms_per_packet;
diff --git a/webrtc/modules/audio_coding/neteq/time_stretch.cc b/webrtc/modules/audio_coding/neteq/time_stretch.cc
index c96d165..0b3bad9 100644
--- a/webrtc/modules/audio_coding/neteq/time_stretch.cc
+++ b/webrtc/modules/audio_coding/neteq/time_stretch.cc
@@ -195,7 +195,7 @@
right_scale = std::max(0, right_scale);
left_side = left_side >> right_scale;
right_side =
- rtc::checked_cast<int32_t>(peak_index) * (right_side >> right_scale);
+ rtc::dchecked_cast<int32_t>(peak_index) * (right_side >> right_scale);
// Scale |left_side| properly before comparing with |right_side|.
// (|scaling| is the scale factor before energy calculation, thus the scale
diff --git a/webrtc/modules/audio_coding/neteq/tools/encode_neteq_input.cc b/webrtc/modules/audio_coding/neteq/tools/encode_neteq_input.cc
index 263f7b4..f837ad6 100644
--- a/webrtc/modules/audio_coding/neteq/tools/encode_neteq_input.cc
+++ b/webrtc/modules/audio_coding/neteq/tools/encode_neteq_input.cc
@@ -72,7 +72,7 @@
info = encoder_->Encode(rtp_timestamp_, generator_->Generate(num_samples),
&packet_data_->payload);
- rtp_timestamp_ += rtc::checked_cast<uint32_t>(
+ rtp_timestamp_ += rtc::dchecked_cast<uint32_t>(
num_samples * encoder_->RtpTimestampRateHz() /
encoder_->SampleRateHz());
++num_blocks;