Get pure encode time.
Measure time spent in frame encode callback, accumulate it for layers and subtract it from measured encode time of next layer frame. Bug: none Change-Id: Ifc3baae2f9a49913a55a7de2de9507102edd0295 Reviewed-on: https://webrtc-review.googlesource.com/65981 Reviewed-by: Rasmus Brandt <brandtr@webrtc.org> Commit-Queue: Sergey Silkin <ssilkin@webrtc.org> Cr-Commit-Position: refs/heads/master@{#22720}
This commit is contained in:

committed by
Commit Bot

parent
ae3f02de10
commit
c89eed92ad
@ -181,7 +181,8 @@ VideoProcessor::VideoProcessor(webrtc::VideoEncoder* encoder,
|
||||
first_encoded_frame_(num_simulcast_or_spatial_layers_, true),
|
||||
last_encoded_frame_num_(num_simulcast_or_spatial_layers_),
|
||||
first_decoded_frame_(num_simulcast_or_spatial_layers_, true),
|
||||
last_decoded_frame_num_(num_simulcast_or_spatial_layers_) {
|
||||
last_decoded_frame_num_(num_simulcast_or_spatial_layers_),
|
||||
post_encode_time_ns_(0) {
|
||||
// Sanity checks.
|
||||
RTC_CHECK(rtc::TaskQueue::Current())
|
||||
<< "VideoProcessor must be run on a task queue.";
|
||||
@ -258,6 +259,8 @@ void VideoProcessor::ProcessFrame() {
|
||||
}
|
||||
last_inputed_timestamp_ = timestamp;
|
||||
|
||||
post_encode_time_ns_ = 0;
|
||||
|
||||
// Create frame statistics object for all simulcast/spatial layers.
|
||||
for (size_t simulcast_svc_idx = 0;
|
||||
simulcast_svc_idx < num_simulcast_or_spatial_layers_;
|
||||
@ -348,8 +351,8 @@ void VideoProcessor::FrameEncoded(
|
||||
|
||||
// Update frame statistics.
|
||||
frame_stat->encoding_successful = true;
|
||||
frame_stat->encode_time_us =
|
||||
GetElapsedTimeMicroseconds(frame_stat->encode_start_ns, encode_stop_ns);
|
||||
frame_stat->encode_time_us = GetElapsedTimeMicroseconds(
|
||||
frame_stat->encode_start_ns, encode_stop_ns - post_encode_time_ns_);
|
||||
frame_stat->target_bitrate_kbps = (bitrate_allocation_.GetTemporalLayerSum(
|
||||
simulcast_svc_idx, temporal_idx) +
|
||||
500) /
|
||||
@ -384,6 +387,12 @@ void VideoProcessor::FrameEncoded(
|
||||
->WriteFrame(*encoded_image_for_decode,
|
||||
config_.codec_settings.codecType));
|
||||
}
|
||||
|
||||
if (!config_.IsAsyncCodec()) {
|
||||
// To get pure encode time for next layers, measure time spent in encode
|
||||
// callback and subtract it from encode time of next layers.
|
||||
post_encode_time_ns_ += rtc::TimeNanos() - encode_stop_ns;
|
||||
}
|
||||
}
|
||||
|
||||
void VideoProcessor::FrameDecoded(const VideoFrame& decoded_frame) {
|
||||
|
@ -230,6 +230,11 @@ class VideoProcessor {
|
||||
std::map<size_t, size_t> frame_wxh_to_simulcast_svc_idx_
|
||||
RTC_GUARDED_BY(sequence_checker_);
|
||||
|
||||
// Time spent in frame encode callback. It is accumulated for layers and
|
||||
// reset when frame encode starts. When next layer is encoded post-encode time
|
||||
// is substracted from measured encode time. Thus we get pure encode time.
|
||||
int64_t post_encode_time_ns_ RTC_GUARDED_BY(sequence_checker_);
|
||||
|
||||
// This class must be operated on a TaskQueue.
|
||||
rtc::SequencedTaskChecker sequence_checker_;
|
||||
|
||||
|
Reference in New Issue
Block a user