Use int64_t more consistently for times, in particular for RTT values.

Existing code was inconsistent about whether to use uint16_t, int, unsigned int,
or uint32_t, and sometimes silently truncated one to another, or truncated
int64_t.  Because most core time-handling functions use int64_t, being
consistent about using int64_t unless otherwise necessary minimizes the number
of explicit or implicit casts.

BUG=chromium:81439
TEST=none
R=henrik.lundin@webrtc.org, holmer@google.com, tommi@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/31349004

git-svn-id: http://webrtc.googlecode.com/svn/trunk@8045 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
pkasting@chromium.org
2015-01-12 21:51:21 +00:00
parent a7add19cf4
commit 16825b1a82
124 changed files with 422 additions and 417 deletions

View File

@ -345,7 +345,7 @@ void StreamStatisticianImpl::LastReceiveTimeNtp(uint32_t* secs,
}
bool StreamStatisticianImpl::IsRetransmitOfOldPacket(
const RTPHeader& header, int min_rtt) const {
const RTPHeader& header, int64_t min_rtt) const {
CriticalSectionScoped cs(stream_lock_.get());
if (InOrderPacketInternal(header.sequenceNumber)) {
return false;
@ -358,17 +358,16 @@ bool StreamStatisticianImpl::IsRetransmitOfOldPacket(
// Diff in time stamp since last received in order.
uint32_t timestamp_diff = header.timestamp - last_received_timestamp_;
int32_t rtp_time_stamp_diff_ms = static_cast<int32_t>(timestamp_diff) /
frequency_khz;
uint32_t rtp_time_stamp_diff_ms = timestamp_diff / frequency_khz;
int32_t max_delay_ms = 0;
int64_t max_delay_ms = 0;
if (min_rtt == 0) {
// Jitter standard deviation in samples.
float jitter_std = sqrt(static_cast<float>(jitter_q4_ >> 4));
// 2 times the standard deviation => 95% confidence.
// And transform to milliseconds by dividing by the frequency in kHz.
max_delay_ms = static_cast<int32_t>((2 * jitter_std) / frequency_khz);
max_delay_ms = static_cast<int64_t>((2 * jitter_std) / frequency_khz);
// Min max_delay_ms is 1.
if (max_delay_ms == 0) {