Reformat the WebRTC code base

Running clang-format with chromium's style guide.

The goal is n-fold:
 * providing consistency and readability (that's what code guidelines are for)
 * preventing noise with presubmit checks and git cl format
 * building on the previous point: making it easier to automatically fix format issues
 * you name it

Please consider using git-hyper-blame to ignore this commit.

Bug: webrtc:9340
Change-Id: I694567c4cdf8cee2860958cfe82bfaf25848bb87
Reviewed-on: https://webrtc-review.googlesource.com/81185
Reviewed-by: Patrik Höglund <phoglund@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#23660}
This commit is contained in:
Yves Gerey
2018-06-19 15:03:05 +02:00
parent b602123a5a
commit 665174fdbb
1569 changed files with 30495 additions and 30309 deletions

View File

@ -681,8 +681,7 @@ int NetEqImpl::InsertPacketInternal(const RTPHeader& rtp_header,
decoder->IncomingPacket(packet_list.front().payload.data(),
packet_list.front().payload.size(),
packet_list.front().sequence_number,
packet_list.front().timestamp,
receive_timestamp);
packet_list.front().timestamp, receive_timestamp);
}
PacketList parsed_packet_list;
@ -703,7 +702,7 @@ int NetEqImpl::InsertPacketInternal(const RTPHeader& rtp_header,
const auto sequence_number = packet.sequence_number;
const auto payload_type = packet.payload_type;
const Packet::Priority original_priority = packet.priority;
auto packet_from_result = [&] (AudioDecoder::ParseResult& result) {
auto packet_from_result = [&](AudioDecoder::ParseResult& result) {
Packet new_packet;
new_packet.sequence_number = sequence_number;
new_packet.payload_type = payload_type;
@ -788,8 +787,7 @@ int NetEqImpl::InsertPacketInternal(const RTPHeader& rtp_header,
assert(decoder_info);
if (decoder_info->SampleRateHz() != fs_hz_ ||
channels != algorithm_buffer_->Channels()) {
SetSampleRateAndChannels(decoder_info->SampleRateHz(),
channels);
SetSampleRateAndChannels(decoder_info->SampleRateHz(), channels);
}
if (nack_enabled_) {
RTC_DCHECK(nack_);
@ -866,8 +864,8 @@ int NetEqImpl::GetAudioInternal(AudioFrame* audio_frame, bool* muted) {
return 0;
}
int return_value = GetDecision(&operation, &packet_list, &dtmf_event,
&play_dtmf);
int return_value =
GetDecision(&operation, &packet_list, &dtmf_event, &play_dtmf);
if (return_value != 0) {
last_mode_ = kModeError;
return return_value;
@ -876,12 +874,11 @@ int NetEqImpl::GetAudioInternal(AudioFrame* audio_frame, bool* muted) {
AudioDecoder::SpeechType speech_type;
int length = 0;
const size_t start_num_packets = packet_list.size();
int decode_return_value = Decode(&packet_list, &operation,
&length, &speech_type);
int decode_return_value =
Decode(&packet_list, &operation, &length, &speech_type);
assert(vad_.get());
bool sid_frame_available =
(operation == kRfc3389Cng && !packet_list.empty());
bool sid_frame_available = (operation == kRfc3389Cng && !packet_list.empty());
vad_->Update(decoded_buffer_.get(), static_cast<size_t>(length), speech_type,
sid_frame_available, fs_hz_);
@ -1033,8 +1030,7 @@ int NetEqImpl::GetAudioInternal(AudioFrame* audio_frame, bool* muted) {
// Update the background noise parameters if last operation wrote data
// straight from the decoder to the |sync_buffer_|. That is, none of the
// operations that modify the signal can be followed by a parameter update.
if ((last_mode_ == kModeNormal) ||
(last_mode_ == kModeAccelerateFail) ||
if ((last_mode_ == kModeNormal) || (last_mode_ == kModeAccelerateFail) ||
(last_mode_ == kModePreemptiveExpandFail) ||
(last_mode_ == kModeRfc3389Cng) ||
(last_mode_ == kModeCodecInternalCng)) {
@ -1051,7 +1047,8 @@ int NetEqImpl::GetAudioInternal(AudioFrame* audio_frame, bool* muted) {
// If last operation was not expand, calculate the |playout_timestamp_| from
// the |sync_buffer_|. However, do not update the |playout_timestamp_| if it
// would be moved "backwards".
uint32_t temp_timestamp = sync_buffer_->end_timestamp() -
uint32_t temp_timestamp =
sync_buffer_->end_timestamp() -
static_cast<uint32_t>(sync_buffer_->FutureLength());
if (static_cast<int32_t>(temp_timestamp - playout_timestamp_) > 0) {
playout_timestamp_ = temp_timestamp;
@ -1070,13 +1067,13 @@ int NetEqImpl::GetAudioInternal(AudioFrame* audio_frame, bool* muted) {
: timestamp_scaler_->ToExternal(playout_timestamp_) -
static_cast<uint32_t>(audio_frame->samples_per_channel_);
if (!(last_mode_ == kModeRfc3389Cng ||
last_mode_ == kModeCodecInternalCng ||
last_mode_ == kModeExpand)) {
if (!(last_mode_ == kModeRfc3389Cng || last_mode_ == kModeCodecInternalCng ||
last_mode_ == kModeExpand)) {
generated_noise_stopwatch_.reset();
}
if (decode_return_value) return decode_return_value;
if (decode_return_value)
return decode_return_value;
return return_value;
}
@ -1100,11 +1097,10 @@ int NetEqImpl::GetDecision(Operations* operation,
RTC_DCHECK(!generated_noise_stopwatch_ ||
generated_noise_stopwatch_->ElapsedTicks() >= 1);
uint64_t generated_noise_samples =
generated_noise_stopwatch_
? (generated_noise_stopwatch_->ElapsedTicks() - 1) *
output_size_samples_ +
decision_logic_->noise_fast_forward()
: 0;
generated_noise_stopwatch_ ? (generated_noise_stopwatch_->ElapsedTicks() -
1) * output_size_samples_ +
decision_logic_->noise_fast_forward()
: 0;
if (decision_logic_->CngRfc3389On() || last_mode_ == kModeRfc3389Cng) {
// Because of timestamp peculiarities, we have to "manually" disallow using
@ -1127,7 +1123,7 @@ int NetEqImpl::GetDecision(Operations* operation,
assert(expand_.get());
const int samples_left = static_cast<int>(sync_buffer_->FutureLength() -
expand_->overlap_length());
expand_->overlap_length());
if (last_mode_ == kModeAccelerateSuccess ||
last_mode_ == kModeAccelerateLowEnergy ||
last_mode_ == kModePreemptiveExpandSuccess ||
@ -1139,9 +1135,8 @@ int NetEqImpl::GetDecision(Operations* operation,
// Check if it is time to play a DTMF event.
if (dtmf_buffer_->GetEvent(
static_cast<uint32_t>(
end_timestamp + generated_noise_samples),
dtmf_event)) {
static_cast<uint32_t>(end_timestamp + generated_noise_samples),
dtmf_event)) {
*play_dtmf = true;
}
@ -1243,12 +1238,12 @@ int NetEqImpl::GetDecision(Operations* operation,
decision_logic_->set_prev_time_scale(true);
return 0;
} else if (samples_left >= static_cast<int>(samples_10_ms) &&
decoder_frame_length_ >= samples_30_ms) {
decoder_frame_length_ >= samples_30_ms) {
// Avoid decoding more data as it might overflow the playout buffer.
*operation = kNormal;
return 0;
} else if (samples_left < static_cast<int>(samples_20_ms) &&
decoder_frame_length_ < samples_30_ms) {
decoder_frame_length_ < samples_30_ms) {
// Build up decoded data by decoding at least 20 ms of audio data. Do
// not perform accelerate yet, but wait until we only need to do one
// decoding.
@ -1267,7 +1262,7 @@ int NetEqImpl::GetDecision(Operations* operation,
// audio data.
if ((samples_left >= static_cast<int>(samples_30_ms)) ||
(samples_left >= static_cast<int>(samples_10_ms) &&
decoder_frame_length_ >= samples_30_ms)) {
decoder_frame_length_ >= samples_30_ms)) {
// Already have enough data, so we do not need to extract any more.
// Or, avoid decoding more data as it might overflow the playout buffer.
// Still try preemptive expand, though.
@ -1339,7 +1334,8 @@ int NetEqImpl::GetDecision(Operations* operation,
return 0;
}
int NetEqImpl::Decode(PacketList* packet_list, Operations* operation,
int NetEqImpl::Decode(PacketList* packet_list,
Operations* operation,
int* decoded_length,
AudioDecoder::SpeechType* speech_type) {
*speech_type = AudioDecoder::kSpeech;
@ -1364,8 +1360,8 @@ int NetEqImpl::Decode(PacketList* packet_list, Operations* operation,
decoder_database_->SetActiveDecoder(payload_type, &decoder_changed);
if (decoder_changed) {
// We have a new decoder. Re-init some values.
const DecoderDatabase::DecoderInfo* decoder_info = decoder_database_
->GetDecoderInfo(payload_type);
const DecoderDatabase::DecoderInfo* decoder_info =
decoder_database_->GetDecoderInfo(payload_type);
assert(decoder_info);
if (!decoder_info) {
RTC_LOG(LS_WARNING)
@ -1411,8 +1407,8 @@ int NetEqImpl::Decode(PacketList* packet_list, Operations* operation,
RTC_DCHECK(packet_list->empty());
return_value = DecodeCng(decoder, decoded_length, speech_type);
} else {
return_value = DecodeLoop(packet_list, *operation, decoder,
decoded_length, speech_type);
return_value = DecodeLoop(packet_list, *operation, decoder, decoded_length,
speech_type);
}
if (*decoded_length < 0) {
@ -1446,7 +1442,8 @@ int NetEqImpl::Decode(PacketList* packet_list, Operations* operation,
return return_value;
}
int NetEqImpl::DecodeCng(AudioDecoder* decoder, int* decoded_length,
int NetEqImpl::DecodeCng(AudioDecoder* decoder,
int* decoded_length,
AudioDecoder::SpeechType* speech_type) {
if (!decoder) {
// This happens when active decoder is not defined.
@ -1456,9 +1453,9 @@ int NetEqImpl::DecodeCng(AudioDecoder* decoder, int* decoded_length,
while (*decoded_length < rtc::dchecked_cast<int>(output_size_samples_)) {
const int length = decoder->Decode(
nullptr, 0, fs_hz_,
(decoded_buffer_length_ - *decoded_length) * sizeof(int16_t),
&decoded_buffer_[*decoded_length], speech_type);
nullptr, 0, fs_hz_,
(decoded_buffer_length_ - *decoded_length) * sizeof(int16_t),
&decoded_buffer_[*decoded_length], speech_type);
if (length > 0) {
*decoded_length += length;
} else {
@ -1476,15 +1473,16 @@ int NetEqImpl::DecodeCng(AudioDecoder* decoder, int* decoded_length,
return 0;
}
int NetEqImpl::DecodeLoop(PacketList* packet_list, const Operations& operation,
AudioDecoder* decoder, int* decoded_length,
int NetEqImpl::DecodeLoop(PacketList* packet_list,
const Operations& operation,
AudioDecoder* decoder,
int* decoded_length,
AudioDecoder::SpeechType* speech_type) {
RTC_DCHECK(last_decoded_timestamps_.empty());
// Do decoding.
while (
!packet_list->empty() &&
!decoder_database_->IsComfortNoise(packet_list->front().payload_type)) {
while (!packet_list->empty() && !decoder_database_->IsComfortNoise(
packet_list->front().payload_type)) {
assert(decoder); // At this point, we must have a decoder object.
// The number of channels in the |sync_buffer_| should be the same as the
// number decoder channels.
@ -1526,15 +1524,16 @@ int NetEqImpl::DecodeLoop(PacketList* packet_list, const Operations& operation,
// If the list is not empty at this point, either a decoding error terminated
// the while-loop, or list must hold exactly one CNG packet.
assert(
packet_list->empty() || *decoded_length < 0 ||
(packet_list->size() == 1 &&
decoder_database_->IsComfortNoise(packet_list->front().payload_type)));
assert(packet_list->empty() || *decoded_length < 0 ||
(packet_list->size() == 1 && decoder_database_->IsComfortNoise(
packet_list->front().payload_type)));
return 0;
}
void NetEqImpl::DoNormal(const int16_t* decoded_buffer, size_t decoded_length,
AudioDecoder::SpeechType speech_type, bool play_dtmf) {
void NetEqImpl::DoNormal(const int16_t* decoded_buffer,
size_t decoded_length,
AudioDecoder::SpeechType speech_type,
bool play_dtmf) {
assert(normal_.get());
normal_->Process(decoded_buffer, decoded_length, last_mode_,
algorithm_buffer_.get());
@ -1543,9 +1542,8 @@ void NetEqImpl::DoNormal(const int16_t* decoded_buffer, size_t decoded_length,
}
// If last packet was decoded as an inband CNG, set mode to CNG instead.
if ((speech_type == AudioDecoder::kComfortNoise)
|| ((last_mode_ == kModeCodecInternalCng)
&& (decoded_length == 0))) {
if ((speech_type == AudioDecoder::kComfortNoise) ||
((last_mode_ == kModeCodecInternalCng) && (decoded_length == 0))) {
// TODO(hlundin): Remove second part of || statement above.
last_mode_ = kModeCodecInternalCng;
}
@ -1555,11 +1553,13 @@ void NetEqImpl::DoNormal(const int16_t* decoded_buffer, size_t decoded_length,
}
}
void NetEqImpl::DoMerge(int16_t* decoded_buffer, size_t decoded_length,
AudioDecoder::SpeechType speech_type, bool play_dtmf) {
void NetEqImpl::DoMerge(int16_t* decoded_buffer,
size_t decoded_length,
AudioDecoder::SpeechType speech_type,
bool play_dtmf) {
assert(merge_.get());
size_t new_length = merge_->Process(decoded_buffer, decoded_length,
algorithm_buffer_.get());
size_t new_length =
merge_->Process(decoded_buffer, decoded_length, algorithm_buffer_.get());
// Correction can be negative.
int expand_length_correction =
rtc::dchecked_cast<int>(new_length) -
@ -1587,7 +1587,7 @@ void NetEqImpl::DoMerge(int16_t* decoded_buffer, size_t decoded_length,
int NetEqImpl::DoExpand(bool play_dtmf) {
while ((sync_buffer_->FutureLength() - expand_->overlap_length()) <
output_size_samples_) {
output_size_samples_) {
algorithm_buffer_->Clear();
int return_value = expand_->Process(algorithm_buffer_.get());
size_t length = algorithm_buffer_->Size();
@ -1635,11 +1635,10 @@ int NetEqImpl::DoAccelerate(int16_t* decoded_buffer,
size_t decoded_length_per_channel = decoded_length / num_channels;
if (decoded_length_per_channel < required_samples) {
// Must move data from the |sync_buffer_| in order to get 30 ms.
borrowed_samples_per_channel = static_cast<int>(required_samples -
decoded_length_per_channel);
borrowed_samples_per_channel =
static_cast<int>(required_samples - decoded_length_per_channel);
memmove(&decoded_buffer[borrowed_samples_per_channel * num_channels],
decoded_buffer,
sizeof(int16_t) * decoded_length);
decoded_buffer, sizeof(int16_t) * decoded_length);
sync_buffer_->ReadInterleavedFromEnd(borrowed_samples_per_channel,
decoded_buffer);
decoded_length = required_samples * num_channels;
@ -1672,17 +1671,16 @@ int NetEqImpl::DoAccelerate(int16_t* decoded_buffer,
if (length < borrowed_samples_per_channel) {
// This destroys the beginning of the buffer, but will not cause any
// problems.
sync_buffer_->ReplaceAtIndex(*algorithm_buffer_,
sync_buffer_->Size() -
borrowed_samples_per_channel);
sync_buffer_->ReplaceAtIndex(
*algorithm_buffer_,
sync_buffer_->Size() - borrowed_samples_per_channel);
sync_buffer_->PushFrontZeros(borrowed_samples_per_channel - length);
algorithm_buffer_->PopFront(length);
assert(algorithm_buffer_->Empty());
} else {
sync_buffer_->ReplaceAtIndex(*algorithm_buffer_,
borrowed_samples_per_channel,
sync_buffer_->Size() -
borrowed_samples_per_channel);
sync_buffer_->ReplaceAtIndex(
*algorithm_buffer_, borrowed_samples_per_channel,
sync_buffer_->Size() - borrowed_samples_per_channel);
algorithm_buffer_->PopFront(borrowed_samples_per_channel);
}
}
@ -1714,11 +1712,11 @@ int NetEqImpl::DoPreemptiveExpand(int16_t* decoded_buffer,
required_samples - decoded_length_per_channel;
// Calculate how many of these were already played out.
old_borrowed_samples_per_channel =
(borrowed_samples_per_channel > sync_buffer_->FutureLength()) ?
(borrowed_samples_per_channel - sync_buffer_->FutureLength()) : 0;
(borrowed_samples_per_channel > sync_buffer_->FutureLength())
? (borrowed_samples_per_channel - sync_buffer_->FutureLength())
: 0;
memmove(&decoded_buffer[borrowed_samples_per_channel * num_channels],
decoded_buffer,
sizeof(int16_t) * decoded_length);
decoded_buffer, sizeof(int16_t) * decoded_length);
sync_buffer_->ReadInterleavedFromEnd(borrowed_samples_per_channel,
decoded_buffer);
decoded_length = required_samples * num_channels;
@ -1726,8 +1724,7 @@ int NetEqImpl::DoPreemptiveExpand(int16_t* decoded_buffer,
size_t samples_added;
PreemptiveExpand::ReturnCodes return_code = preemptive_expand_->Process(
decoded_buffer, decoded_length,
old_borrowed_samples_per_channel,
decoded_buffer, decoded_length, old_borrowed_samples_per_channel,
algorithm_buffer_.get(), &samples_added);
stats_.PreemptiveExpandedSamples(samples_added);
switch (return_code) {
@ -1780,8 +1777,8 @@ int NetEqImpl::DoRfc3389Cng(PacketList* packet_list, bool play_dtmf) {
return -comfort_noise_->internal_error_code();
}
}
int cn_return = comfort_noise_->Generate(output_size_samples_,
algorithm_buffer_.get());
int cn_return =
comfort_noise_->Generate(output_size_samples_, algorithm_buffer_.get());
expand_->Reset();
last_mode_ = kModeRfc3389Cng;
if (!play_dtmf) {
@ -1909,16 +1906,17 @@ void NetEqImpl::DoAlternativePlc(bool increase_timestamp) {
expand_->Reset();
}
int NetEqImpl::DtmfOverdub(const DtmfEvent& dtmf_event, size_t num_channels,
int NetEqImpl::DtmfOverdub(const DtmfEvent& dtmf_event,
size_t num_channels,
int16_t* output) const {
size_t out_index = 0;
size_t overdub_length = output_size_samples_; // Default value.
if (sync_buffer_->dtmf_index() > sync_buffer_->next_index()) {
// Special operation for transition from "DTMF only" to "DTMF overdub".
out_index = std::min(
sync_buffer_->dtmf_index() - sync_buffer_->next_index(),
output_size_samples_);
out_index =
std::min(sync_buffer_->dtmf_index() - sync_buffer_->next_index(),
output_size_samples_);
overdub_length = output_size_samples_ - out_index;
}
@ -1929,8 +1927,8 @@ int NetEqImpl::DtmfOverdub(const DtmfEvent& dtmf_event, size_t num_channels,
dtmf_event.volume);
}
if (dtmf_return_value == 0) {
dtmf_return_value = dtmf_tone_generator_->Generate(overdub_length,
&dtmf_output);
dtmf_return_value =
dtmf_tone_generator_->Generate(overdub_length, &dtmf_output);
assert(overdub_length == dtmf_output.Size());
}
dtmf_output.ReadInterleaved(overdub_length, &output[out_index]);
@ -2051,7 +2049,7 @@ void NetEqImpl::SetSampleRateAndChannels(int fs_hz, size_t channels) {
RTC_LOG(LS_VERBOSE) << "SetSampleRateAndChannels " << fs_hz << " "
<< channels;
// TODO(hlundin): Change to an enumerator and skip assert.
assert(fs_hz == 8000 || fs_hz == 16000 || fs_hz == 32000 || fs_hz == 48000);
assert(fs_hz == 8000 || fs_hz == 16000 || fs_hz == 32000 || fs_hz == 48000);
assert(channels > 0);
fs_hz_ = fs_hz;
@ -2085,7 +2083,7 @@ void NetEqImpl::SetSampleRateAndChannels(int fs_hz, size_t channels) {
// Move index so that we create a small set of future samples (all 0).
sync_buffer_->set_next_index(sync_buffer_->next_index() -
expand_->overlap_length());
expand_->overlap_length());
normal_.reset(new Normal(fs_hz, decoder_database_.get(), *background_noise_,
expand_.get()));
@ -2095,8 +2093,8 @@ void NetEqImpl::SetSampleRateAndChannels(int fs_hz, size_t channels) {
fs_hz, channels, *background_noise_, expand_->overlap_length()));
// Delete ComfortNoise object and create a new one.
comfort_noise_.reset(new ComfortNoise(fs_hz, decoder_database_.get(),
sync_buffer_.get()));
comfort_noise_.reset(
new ComfortNoise(fs_hz, decoder_database_.get(), sync_buffer_.get()));
// Verify that |decoded_buffer_| is long enough.
if (decoded_buffer_length_ < kMaxFrameSize * channels) {