Introduce dchecked_cast, and start using it
It's the faster, less strict cousin of checked_cast. BUG=none Review-Url: https://codereview.webrtc.org/2714063002 Cr-Commit-Position: refs/heads/master@{#16958}
This commit is contained in:
@ -162,7 +162,7 @@ int AcmReceiver::GetAudio(int desired_freq_hz,
|
||||
audio_frame->sample_rate_hz_ = desired_freq_hz;
|
||||
RTC_DCHECK_EQ(
|
||||
audio_frame->sample_rate_hz_,
|
||||
rtc::checked_cast<int>(audio_frame->samples_per_channel_ * 100));
|
||||
rtc::dchecked_cast<int>(audio_frame->samples_per_channel_ * 100));
|
||||
resampled_last_output_frame_ = true;
|
||||
} else {
|
||||
resampled_last_output_frame_ = false;
|
||||
|
||||
@ -370,7 +370,7 @@ void ConvertEncodedInfoToFragmentationHeader(
|
||||
frag->fragmentationOffset[i] = offset;
|
||||
offset += info.redundant[i].encoded_bytes;
|
||||
frag->fragmentationLength[i] = info.redundant[i].encoded_bytes;
|
||||
frag->fragmentationTimeDiff[i] = rtc::checked_cast<uint16_t>(
|
||||
frag->fragmentationTimeDiff[i] = rtc::dchecked_cast<uint16_t>(
|
||||
info.encoded_timestamp - info.redundant[i].encoded_timestamp);
|
||||
frag->fragmentationPlType[i] = info.redundant[i].payload_type;
|
||||
}
|
||||
|
||||
@ -34,7 +34,7 @@ CodecInst MakeCodecInst(int payload_type,
|
||||
strncpy(ci.plname, name, sizeof(ci.plname));
|
||||
ci.plname[sizeof(ci.plname) - 1] = '\0';
|
||||
ci.plfreq = sample_rate;
|
||||
ci.channels = rtc::checked_cast<size_t>(num_channels);
|
||||
ci.channels = rtc::dchecked_cast<size_t>(num_channels);
|
||||
return ci;
|
||||
}
|
||||
|
||||
|
||||
@ -222,7 +222,7 @@ int Expand::Process(AudioMultiVector* output) {
|
||||
// >= 64 * fs_mult => go from 1 to 0 in about 32 ms.
|
||||
// temp_shift = getbits(max_lag_) - 5.
|
||||
int temp_shift =
|
||||
(31 - WebRtcSpl_NormW32(rtc::checked_cast<int32_t>(max_lag_))) - 5;
|
||||
(31 - WebRtcSpl_NormW32(rtc::dchecked_cast<int32_t>(max_lag_))) - 5;
|
||||
int16_t mix_factor_increment = 256 >> temp_shift;
|
||||
if (stop_muting_) {
|
||||
mix_factor_increment = 0;
|
||||
@ -315,8 +315,8 @@ int Expand::Process(AudioMultiVector* output) {
|
||||
kMaxConsecutiveExpands : consecutive_expands_ + 1;
|
||||
expand_duration_samples_ += output->Size();
|
||||
// Clamp the duration counter at 2 seconds.
|
||||
expand_duration_samples_ =
|
||||
std::min(expand_duration_samples_, rtc::checked_cast<size_t>(fs_hz_ * 2));
|
||||
expand_duration_samples_ = std::min(expand_duration_samples_,
|
||||
rtc::dchecked_cast<size_t>(fs_hz_ * 2));
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -325,7 +325,7 @@ void Expand::SetParametersForNormalAfterExpand() {
|
||||
lag_index_direction_ = 0;
|
||||
stop_muting_ = true; // Do not mute signal any more.
|
||||
statistics_->LogDelayedPacketOutageEvent(
|
||||
rtc::checked_cast<int>(expand_duration_samples_) / (fs_hz_ / 1000));
|
||||
rtc::dchecked_cast<int>(expand_duration_samples_) / (fs_hz_ / 1000));
|
||||
}
|
||||
|
||||
void Expand::SetParametersForMergeAfterExpand() {
|
||||
|
||||
@ -199,7 +199,7 @@ int NetEqImpl::GetAudio(AudioFrame* audio_frame, bool* muted) {
|
||||
}
|
||||
RTC_DCHECK_EQ(
|
||||
audio_frame->sample_rate_hz_,
|
||||
rtc::checked_cast<int>(audio_frame->samples_per_channel_ * 100));
|
||||
rtc::dchecked_cast<int>(audio_frame->samples_per_channel_ * 100));
|
||||
SetAudioFrameActivityAndType(vad_->enabled(), LastOutputType(),
|
||||
last_vad_activity_, audio_frame);
|
||||
last_vad_activity_ = audio_frame->vad_activity_;
|
||||
@ -826,7 +826,7 @@ int NetEqImpl::InsertPacketInternal(const WebRtcRTPHeader& rtp_header,
|
||||
if (packet_length_samples != decision_logic_->packet_length_samples()) {
|
||||
decision_logic_->set_packet_length_samples(packet_length_samples);
|
||||
delay_manager_->SetPacketAudioLength(
|
||||
rtc::checked_cast<int>((1000 * packet_length_samples) / fs_hz_));
|
||||
rtc::dchecked_cast<int>((1000 * packet_length_samples) / fs_hz_));
|
||||
}
|
||||
}
|
||||
|
||||
@ -1131,7 +1131,7 @@ int NetEqImpl::GetDecision(Operations* operation,
|
||||
last_mode_ == kModePreemptiveExpandLowEnergy) {
|
||||
// Subtract (samples_left + output_size_samples_) from sampleMemory.
|
||||
decision_logic_->AddSampleMemory(
|
||||
-(samples_left + rtc::checked_cast<int>(output_size_samples_)));
|
||||
-(samples_left + rtc::dchecked_cast<int>(output_size_samples_)));
|
||||
}
|
||||
|
||||
// Check if it is time to play a DTMF event.
|
||||
@ -1157,11 +1157,9 @@ int NetEqImpl::GetDecision(Operations* operation,
|
||||
// Check if we already have enough samples in the |sync_buffer_|. If so,
|
||||
// change decision to normal, unless the decision was merge, accelerate, or
|
||||
// preemptive expand.
|
||||
if (samples_left >= rtc::checked_cast<int>(output_size_samples_) &&
|
||||
*operation != kMerge &&
|
||||
*operation != kAccelerate &&
|
||||
*operation != kFastAccelerate &&
|
||||
*operation != kPreemptiveExpand) {
|
||||
if (samples_left >= rtc::dchecked_cast<int>(output_size_samples_) &&
|
||||
*operation != kMerge && *operation != kAccelerate &&
|
||||
*operation != kFastAccelerate && *operation != kPreemptiveExpand) {
|
||||
*operation = kNormal;
|
||||
return 0;
|
||||
}
|
||||
@ -1454,7 +1452,7 @@ int NetEqImpl::DecodeCng(AudioDecoder* decoder, int* decoded_length,
|
||||
return 0;
|
||||
}
|
||||
|
||||
while (*decoded_length < rtc::checked_cast<int>(output_size_samples_)) {
|
||||
while (*decoded_length < rtc::dchecked_cast<int>(output_size_samples_)) {
|
||||
const int length = decoder->Decode(
|
||||
nullptr, 0, fs_hz_,
|
||||
(decoded_buffer_length_ - *decoded_length) * sizeof(int16_t),
|
||||
@ -1500,7 +1498,7 @@ int NetEqImpl::DecodeLoop(PacketList* packet_list, const Operations& operation,
|
||||
const auto& result = *opt_result;
|
||||
*speech_type = result.speech_type;
|
||||
if (result.num_decoded_samples > 0) {
|
||||
*decoded_length += rtc::checked_cast<int>(result.num_decoded_samples);
|
||||
*decoded_length += rtc::dchecked_cast<int>(result.num_decoded_samples);
|
||||
// Update |decoder_frame_length_| with number of samples per channel.
|
||||
decoder_frame_length_ =
|
||||
result.num_decoded_samples / decoder->Channels();
|
||||
@ -1513,7 +1511,7 @@ int NetEqImpl::DecodeLoop(PacketList* packet_list, const Operations& operation,
|
||||
packet_list->clear();
|
||||
break;
|
||||
}
|
||||
if (*decoded_length > rtc::checked_cast<int>(decoded_buffer_length_)) {
|
||||
if (*decoded_length > rtc::dchecked_cast<int>(decoded_buffer_length_)) {
|
||||
// Guard against overflow.
|
||||
LOG(LS_WARNING) << "Decoded too much.";
|
||||
packet_list->clear();
|
||||
@ -1986,7 +1984,8 @@ int NetEqImpl::ExtractPackets(size_t required_samples,
|
||||
packet_duration = packet->frame->Duration();
|
||||
// TODO(ossu): Is this the correct way to track Opus FEC packets?
|
||||
if (packet->priority.codec_level > 0) {
|
||||
stats_.SecondaryDecodedSamples(rtc::checked_cast<int>(packet_duration));
|
||||
stats_.SecondaryDecodedSamples(
|
||||
rtc::dchecked_cast<int>(packet_duration));
|
||||
}
|
||||
} else if (!has_cng_packet) {
|
||||
LOG(LS_WARNING) << "Unknown payload type "
|
||||
@ -2029,7 +2028,7 @@ int NetEqImpl::ExtractPackets(size_t required_samples,
|
||||
packet_buffer_->DiscardAllOldPackets(timestamp_);
|
||||
}
|
||||
|
||||
return rtc::checked_cast<int>(extracted_samples);
|
||||
return rtc::dchecked_cast<int>(extracted_samples);
|
||||
}
|
||||
|
||||
void NetEqImpl::UpdatePlcComponents(int fs_hz, size_t channels) {
|
||||
|
||||
@ -110,7 +110,7 @@ bool RedPayloadSplitter::SplitRed(PacketList* packet_list) {
|
||||
new_packet.payload_type = new_header.payload_type;
|
||||
new_packet.sequence_number = red_packet.sequence_number;
|
||||
new_packet.priority.red_level =
|
||||
rtc::checked_cast<int>((new_headers.size() - 1) - i);
|
||||
rtc::dchecked_cast<int>((new_headers.size() - 1) - i);
|
||||
new_packet.payload.SetData(payload_ptr, payload_length);
|
||||
new_packets.push_front(std::move(new_packet));
|
||||
payload_ptr += payload_length;
|
||||
|
||||
@ -218,7 +218,7 @@ void StatisticsCalculator::GetNetworkStatistics(
|
||||
stats->added_zero_samples = added_zero_samples_;
|
||||
stats->current_buffer_size_ms =
|
||||
static_cast<uint16_t>(num_samples_in_buffers * 1000 / fs_hz);
|
||||
const int ms_per_packet = rtc::checked_cast<int>(
|
||||
const int ms_per_packet = rtc::dchecked_cast<int>(
|
||||
decision_logic.packet_length_samples() / (fs_hz / 1000));
|
||||
stats->preferred_buffer_size_ms = (delay_manager.TargetLevel() >> 8) *
|
||||
ms_per_packet;
|
||||
|
||||
@ -195,7 +195,7 @@ bool TimeStretch::SpeechDetection(int32_t vec1_energy, int32_t vec2_energy,
|
||||
right_scale = std::max(0, right_scale);
|
||||
left_side = left_side >> right_scale;
|
||||
right_side =
|
||||
rtc::checked_cast<int32_t>(peak_index) * (right_side >> right_scale);
|
||||
rtc::dchecked_cast<int32_t>(peak_index) * (right_side >> right_scale);
|
||||
|
||||
// Scale |left_side| properly before comparing with |right_side|.
|
||||
// (|scaling| is the scale factor before energy calculation, thus the scale
|
||||
|
||||
@ -72,7 +72,7 @@ void EncodeNetEqInput::CreatePacket() {
|
||||
info = encoder_->Encode(rtp_timestamp_, generator_->Generate(num_samples),
|
||||
&packet_data_->payload);
|
||||
|
||||
rtp_timestamp_ += rtc::checked_cast<uint32_t>(
|
||||
rtp_timestamp_ += rtc::dchecked_cast<uint32_t>(
|
||||
num_samples * encoder_->RtpTimestampRateHz() /
|
||||
encoder_->SampleRateHz());
|
||||
++num_blocks;
|
||||
|
||||
Reference in New Issue
Block a user