Implement timing frames.
Timing information is gathered in EncodedImage, starting at encoders. Then it's sent using RTP header extension. In the end, it's gathered at the GenericDecoder. Actual reporting and tests will be in the next CLs. BUG=webrtc:7594 Review-Url: https://codereview.webrtc.org/2911193002 Cr-Commit-Position: refs/heads/master@{#18659}
This commit is contained in:
@ -111,6 +111,34 @@ RtpFrameObject::RtpFrameObject(PacketBuffer* packet_buffer,
|
||||
rotation_ = last_packet->video_header.rotation;
|
||||
_rotation_set = true;
|
||||
content_type_ = last_packet->video_header.content_type;
|
||||
if (last_packet->video_header.video_timing.is_timing_frame) {
|
||||
// ntp_time_ms_ may be -1 if not estimated yet. This is not a problem,
|
||||
// as this will be dealt with at the time of reporting.
|
||||
timing_.is_timing_frame = true;
|
||||
timing_.encode_start_ms =
|
||||
ntp_time_ms_ +
|
||||
last_packet->video_header.video_timing.encode_start_delta_ms;
|
||||
timing_.encode_finish_ms =
|
||||
ntp_time_ms_ +
|
||||
last_packet->video_header.video_timing.encode_finish_delta_ms;
|
||||
timing_.packetization_finish_ms =
|
||||
ntp_time_ms_ +
|
||||
last_packet->video_header.video_timing.packetization_finish_delta_ms;
|
||||
timing_.pacer_exit_ms =
|
||||
ntp_time_ms_ +
|
||||
last_packet->video_header.video_timing.pacer_exit_delta_ms;
|
||||
timing_.network_timestamp_ms =
|
||||
ntp_time_ms_ +
|
||||
last_packet->video_header.video_timing.network_timstamp_delta_ms;
|
||||
timing_.network2_timestamp_ms =
|
||||
ntp_time_ms_ +
|
||||
last_packet->video_header.video_timing.network2_timstamp_delta_ms;
|
||||
|
||||
timing_.receive_start_ms = first_packet->receive_time_ms;
|
||||
timing_.receive_finish_ms = last_packet->receive_time_ms;
|
||||
} else {
|
||||
timing_.is_timing_frame = false;
|
||||
}
|
||||
}
|
||||
|
||||
RtpFrameObject::~RtpFrameObject() {
|
||||
|
||||
Reference in New Issue
Block a user