Remove VCMEncodedFrameCallback and VCMGenericEncoder

This CL takes a few parts of VCMEncodedFrameCallback and
VCMGenericEncoder and folds some aspect directly into
VideoStreamEncoder. Parts related to timing frames are extracted
into a new class FrameEncodeTimer that explicitly handles that.

Bug: webrtc:10164
Change-Id: I9b26f734473b659e4093c84c09fb0ed441290e40
Reviewed-on: https://webrtc-review.googlesource.com/c/124122
Commit-Queue: Erik Språng <sprang@webrtc.org>
Reviewed-by: Ilya Nikolaevskiy <ilnik@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#26862}
This commit is contained in:
Erik Språng
2019-02-26 18:31:00 +01:00
committed by Commit Bot
parent c9d0b08982
commit 6a7baa7d0f
10 changed files with 609 additions and 726 deletions

View File

@ -113,8 +113,6 @@ rtc_static_library("video_coding") {
"frame_object.h",
"generic_decoder.cc",
"generic_decoder.h",
"generic_encoder.cc",
"generic_encoder.h",
"h264_sprop_parameter_sets.cc",
"h264_sprop_parameter_sets.h",
"h264_sps_pps_tracker.cc",
@ -844,7 +842,6 @@ if (rtc_include_tests) {
"decoding_state_unittest.cc",
"fec_controller_unittest.cc",
"frame_buffer2_unittest.cc",
"generic_encoder_unittest.cc",
"h264_sprop_parameter_sets_unittest.cc",
"h264_sps_pps_tracker_unittest.cc",
"histogram_unittest.cc",

View File

@ -1,407 +0,0 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_coding/generic_encoder.h"
#include <cstddef>
#include <cstdint>
#include <vector>
#include "absl/types/optional.h"
#include "api/video/i420_buffer.h"
#include "api/video/video_content_type.h"
#include "api/video/video_frame_buffer.h"
#include "api/video/video_rotation.h"
#include "api/video/video_timing.h"
#include "modules/include/module_common_types_public.h"
#include "modules/video_coding/include/video_coding_defines.h"
#include "rtc_base/checks.h"
#include "rtc_base/experiments/alr_experiment.h"
#include "rtc_base/logging.h"
#include "rtc_base/time_utils.h"
#include "rtc_base/trace_event.h"
namespace webrtc {
namespace {
const int kMessagesThrottlingThreshold = 2;
const int kThrottleRatio = 100000;
} // namespace
VCMEncodedFrameCallback::TimingFramesLayerInfo::TimingFramesLayerInfo() {}
VCMEncodedFrameCallback::TimingFramesLayerInfo::~TimingFramesLayerInfo() {}
VCMGenericEncoder::VCMGenericEncoder(
VideoEncoder* encoder,
VCMEncodedFrameCallback* encoded_frame_callback,
bool internal_source)
: encoder_(encoder),
vcm_encoded_frame_callback_(encoded_frame_callback),
internal_source_(internal_source),
input_frame_rate_(0),
streams_or_svc_num_(0),
codec_type_(VideoCodecType::kVideoCodecGeneric) {}
VCMGenericEncoder::~VCMGenericEncoder() {}
int32_t VCMGenericEncoder::Release() {
RTC_DCHECK_RUNS_SERIALIZED(&race_checker_);
TRACE_EVENT0("webrtc", "VCMGenericEncoder::Release");
return encoder_->Release();
}
int32_t VCMGenericEncoder::InitEncode(const VideoCodec* settings,
int32_t number_of_cores,
size_t max_payload_size) {
RTC_DCHECK_RUNS_SERIALIZED(&race_checker_);
TRACE_EVENT0("webrtc", "VCMGenericEncoder::InitEncode");
streams_or_svc_num_ = settings->numberOfSimulcastStreams;
codec_type_ = settings->codecType;
if (settings->codecType == kVideoCodecVP9) {
streams_or_svc_num_ = settings->VP9().numberOfSpatialLayers;
}
if (streams_or_svc_num_ == 0)
streams_or_svc_num_ = 1;
vcm_encoded_frame_callback_->SetTimingFramesThresholds(
settings->timing_frame_thresholds);
vcm_encoded_frame_callback_->OnFrameRateChanged(settings->maxFramerate);
if (encoder_->InitEncode(settings, number_of_cores, max_payload_size) != 0) {
RTC_LOG(LS_ERROR) << "Failed to initialize the encoder associated with "
"codec type: "
<< CodecTypeToPayloadString(settings->codecType) << " ("
<< settings->codecType << ")";
return -1;
}
vcm_encoded_frame_callback_->Reset();
encoder_->RegisterEncodeCompleteCallback(vcm_encoded_frame_callback_);
return 0;
}
int32_t VCMGenericEncoder::Encode(const VideoFrame& frame,
const CodecSpecificInfo* codec_specific,
const std::vector<FrameType>& frame_types) {
RTC_DCHECK_RUNS_SERIALIZED(&race_checker_);
TRACE_EVENT1("webrtc", "VCMGenericEncoder::Encode", "timestamp",
frame.timestamp());
for (FrameType frame_type : frame_types)
RTC_DCHECK(frame_type == kVideoFrameKey || frame_type == kVideoFrameDelta);
for (size_t i = 0; i < streams_or_svc_num_; ++i)
vcm_encoded_frame_callback_->OnEncodeStarted(frame.timestamp(),
frame.render_time_ms(), i);
return encoder_->Encode(frame, codec_specific, &frame_types);
}
void VCMGenericEncoder::SetEncoderParameters(
const VideoBitrateAllocation& target_bitrate,
uint32_t input_frame_rate) {
RTC_DCHECK_RUNS_SERIALIZED(&race_checker_);
bool rates_have_changed;
{
rtc::CritScope lock(&params_lock_);
rates_have_changed = target_bitrate != bitrate_allocation_ ||
input_frame_rate != input_frame_rate_;
bitrate_allocation_ = target_bitrate;
input_frame_rate_ = input_frame_rate;
}
if (rates_have_changed) {
int res = encoder_->SetRateAllocation(target_bitrate, input_frame_rate);
if (res != 0) {
RTC_LOG(LS_WARNING) << "Error set encoder rate (total bitrate bps = "
<< target_bitrate.get_sum_bps()
<< ", framerate = " << input_frame_rate
<< "): " << res;
}
vcm_encoded_frame_callback_->OnFrameRateChanged(input_frame_rate);
for (size_t i = 0; i < streams_or_svc_num_; ++i) {
vcm_encoded_frame_callback_->OnTargetBitrateChanged(
target_bitrate.GetSpatialLayerSum(i) / 8, i);
}
}
}
int32_t VCMGenericEncoder::RequestFrame(
const std::vector<FrameType>& frame_types) {
RTC_DCHECK_RUNS_SERIALIZED(&race_checker_);
// TODO(nisse): Used only with internal source. Delete as soon as
// that feature is removed. The only implementation I've been able
// to find ignores what's in the frame. With one exception: It seems
// a few test cases, e.g.,
// VideoSendStreamTest.VideoSendStreamStopSetEncoderRateToZero, set
// internal_source to true and use FakeEncoder. And the latter will
// happily encode this 1x1 frame and pass it on down the pipeline.
return encoder_->Encode(VideoFrame::Builder()
.set_video_frame_buffer(I420Buffer::Create(1, 1))
.set_rotation(kVideoRotation_0)
.set_timestamp_us(0)
.build(),
NULL, &frame_types);
}
bool VCMGenericEncoder::InternalSource() const {
return internal_source_;
}
VideoEncoder::EncoderInfo VCMGenericEncoder::GetEncoderInfo() const {
RTC_DCHECK_RUNS_SERIALIZED(&race_checker_);
return encoder_->GetEncoderInfo();
}
VCMEncodedFrameCallback::VCMEncodedFrameCallback(
EncodedImageCallback* post_encode_callback)
: internal_source_(false),
post_encode_callback_(post_encode_callback),
framerate_(1),
last_timing_frame_time_ms_(-1),
timing_frames_thresholds_({-1, 0}),
incorrect_capture_time_logged_messages_(0),
reordered_frames_logged_messages_(0),
stalled_encoder_logged_messages_(0) {
absl::optional<AlrExperimentSettings> experiment_settings =
AlrExperimentSettings::CreateFromFieldTrial(
AlrExperimentSettings::kStrictPacingAndProbingExperimentName);
if (experiment_settings) {
experiment_groups_[0] = experiment_settings->group_id + 1;
} else {
experiment_groups_[0] = 0;
}
experiment_settings = AlrExperimentSettings::CreateFromFieldTrial(
AlrExperimentSettings::kScreenshareProbingBweExperimentName);
if (experiment_settings) {
experiment_groups_[1] = experiment_settings->group_id + 1;
} else {
experiment_groups_[1] = 0;
}
}
VCMEncodedFrameCallback::~VCMEncodedFrameCallback() {}
void VCMEncodedFrameCallback::OnTargetBitrateChanged(
size_t bitrate_bytes_per_second,
size_t simulcast_svc_idx) {
rtc::CritScope crit(&timing_params_lock_);
if (timing_frames_info_.size() < simulcast_svc_idx + 1)
timing_frames_info_.resize(simulcast_svc_idx + 1);
timing_frames_info_[simulcast_svc_idx].target_bitrate_bytes_per_sec =
bitrate_bytes_per_second;
}
void VCMEncodedFrameCallback::OnFrameRateChanged(size_t framerate) {
rtc::CritScope crit(&timing_params_lock_);
framerate_ = framerate;
}
void VCMEncodedFrameCallback::OnEncodeStarted(uint32_t rtp_timestamp,
int64_t capture_time_ms,
size_t simulcast_svc_idx) {
if (internal_source_) {
return;
}
rtc::CritScope crit(&timing_params_lock_);
if (timing_frames_info_.size() < simulcast_svc_idx + 1)
timing_frames_info_.resize(simulcast_svc_idx + 1);
RTC_DCHECK(
timing_frames_info_[simulcast_svc_idx].encode_start_list.empty() ||
rtc::TimeDiff(capture_time_ms, timing_frames_info_[simulcast_svc_idx]
.encode_start_list.back()
.capture_time_ms) >= 0);
// If stream is disabled due to low bandwidth OnEncodeStarted still will be
// called and have to be ignored.
if (timing_frames_info_[simulcast_svc_idx].target_bitrate_bytes_per_sec == 0)
return;
if (timing_frames_info_[simulcast_svc_idx].encode_start_list.size() ==
kMaxEncodeStartTimeListSize) {
++stalled_encoder_logged_messages_;
if (stalled_encoder_logged_messages_ <= kMessagesThrottlingThreshold ||
stalled_encoder_logged_messages_ % kThrottleRatio == 0) {
RTC_LOG(LS_WARNING) << "Too many frames in the encode_start_list."
" Did encoder stall?";
if (stalled_encoder_logged_messages_ == kMessagesThrottlingThreshold) {
RTC_LOG(LS_WARNING) << "Too many log messages. Further stalled encoder"
"warnings will be throttled.";
}
}
post_encode_callback_->OnDroppedFrame(DropReason::kDroppedByEncoder);
timing_frames_info_[simulcast_svc_idx].encode_start_list.pop_front();
}
timing_frames_info_[simulcast_svc_idx].encode_start_list.emplace_back(
rtp_timestamp, capture_time_ms, rtc::TimeMillis());
}
absl::optional<int64_t> VCMEncodedFrameCallback::ExtractEncodeStartTime(
size_t simulcast_svc_idx,
EncodedImage* encoded_image) {
absl::optional<int64_t> result;
size_t num_simulcast_svc_streams = timing_frames_info_.size();
if (simulcast_svc_idx < num_simulcast_svc_streams) {
auto encode_start_list =
&timing_frames_info_[simulcast_svc_idx].encode_start_list;
// Skip frames for which there was OnEncodeStarted but no OnEncodedImage
// call. These are dropped by encoder internally.
// Because some hardware encoders don't preserve capture timestamp we
// use RTP timestamps here.
while (!encode_start_list->empty() &&
IsNewerTimestamp(encoded_image->Timestamp(),
encode_start_list->front().rtp_timestamp)) {
post_encode_callback_->OnDroppedFrame(DropReason::kDroppedByEncoder);
encode_start_list->pop_front();
}
if (encode_start_list->size() > 0 &&
encode_start_list->front().rtp_timestamp ==
encoded_image->Timestamp()) {
result.emplace(encode_start_list->front().encode_start_time_ms);
if (encoded_image->capture_time_ms_ !=
encode_start_list->front().capture_time_ms) {
// Force correct capture timestamp.
encoded_image->capture_time_ms_ =
encode_start_list->front().capture_time_ms;
++incorrect_capture_time_logged_messages_;
if (incorrect_capture_time_logged_messages_ <=
kMessagesThrottlingThreshold ||
incorrect_capture_time_logged_messages_ % kThrottleRatio == 0) {
RTC_LOG(LS_WARNING)
<< "Encoder is not preserving capture timestamps.";
if (incorrect_capture_time_logged_messages_ ==
kMessagesThrottlingThreshold) {
RTC_LOG(LS_WARNING) << "Too many log messages. Further incorrect "
"timestamps warnings will be throttled.";
}
}
}
encode_start_list->pop_front();
} else {
++reordered_frames_logged_messages_;
if (reordered_frames_logged_messages_ <= kMessagesThrottlingThreshold ||
reordered_frames_logged_messages_ % kThrottleRatio == 0) {
RTC_LOG(LS_WARNING) << "Frame with no encode started time recordings. "
"Encoder may be reordering frames "
"or not preserving RTP timestamps.";
if (reordered_frames_logged_messages_ == kMessagesThrottlingThreshold) {
RTC_LOG(LS_WARNING) << "Too many log messages. Further frames "
"reordering warnings will be throttled.";
}
}
}
}
return result;
}
void VCMEncodedFrameCallback::FillTimingInfo(size_t simulcast_svc_idx,
EncodedImage* encoded_image) {
absl::optional<size_t> outlier_frame_size;
absl::optional<int64_t> encode_start_ms;
uint8_t timing_flags = VideoSendTiming::kNotTriggered;
{
rtc::CritScope crit(&timing_params_lock_);
// Encoders with internal sources do not call OnEncodeStarted
// |timing_frames_info_| may be not filled here.
if (!internal_source_) {
encode_start_ms =
ExtractEncodeStartTime(simulcast_svc_idx, encoded_image);
}
if (timing_frames_info_.size() > simulcast_svc_idx) {
size_t target_bitrate =
timing_frames_info_[simulcast_svc_idx].target_bitrate_bytes_per_sec;
if (framerate_ > 0 && target_bitrate > 0) {
// framerate and target bitrate were reported by encoder.
size_t average_frame_size = target_bitrate / framerate_;
outlier_frame_size.emplace(
average_frame_size *
timing_frames_thresholds_.outlier_ratio_percent / 100);
}
}
// Outliers trigger timing frames, but do not affect scheduled timing
// frames.
if (outlier_frame_size && encoded_image->size() >= *outlier_frame_size) {
timing_flags |= VideoSendTiming::kTriggeredBySize;
}
// Check if it's time to send a timing frame.
int64_t timing_frame_delay_ms =
encoded_image->capture_time_ms_ - last_timing_frame_time_ms_;
// Trigger threshold if it's a first frame, too long passed since the last
// timing frame, or we already sent timing frame on a different simulcast
// stream with the same capture time.
if (last_timing_frame_time_ms_ == -1 ||
timing_frame_delay_ms >= timing_frames_thresholds_.delay_ms ||
timing_frame_delay_ms == 0) {
timing_flags |= VideoSendTiming::kTriggeredByTimer;
last_timing_frame_time_ms_ = encoded_image->capture_time_ms_;
}
} // rtc::CritScope crit(&timing_params_lock_);
int64_t now_ms = rtc::TimeMillis();
// Workaround for chromoting encoder: it passes encode start and finished
// timestamps in |timing_| field, but they (together with capture timestamp)
// are not in the WebRTC clock.
if (internal_source_ && encoded_image->timing_.encode_finish_ms > 0 &&
encoded_image->timing_.encode_start_ms > 0) {
int64_t clock_offset_ms = now_ms - encoded_image->timing_.encode_finish_ms;
// Translate capture timestamp to local WebRTC clock.
encoded_image->capture_time_ms_ += clock_offset_ms;
encoded_image->SetTimestamp(
static_cast<uint32_t>(encoded_image->capture_time_ms_ * 90));
encode_start_ms.emplace(encoded_image->timing_.encode_start_ms +
clock_offset_ms);
}
// If encode start is not available that means that encoder uses internal
// source. In that case capture timestamp may be from a different clock with a
// drift relative to rtc::TimeMillis(). We can't use it for Timing frames,
// because to being sent in the network capture time required to be less than
// all the other timestamps.
if (encode_start_ms) {
encoded_image->SetEncodeTime(*encode_start_ms, now_ms);
encoded_image->timing_.flags = timing_flags;
} else {
encoded_image->timing_.flags = VideoSendTiming::kInvalid;
}
}
EncodedImageCallback::Result VCMEncodedFrameCallback::OnEncodedImage(
const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific,
const RTPFragmentationHeader* fragmentation_header) {
TRACE_EVENT_INSTANT1("webrtc", "VCMEncodedFrameCallback::Encoded",
"timestamp", encoded_image.Timestamp());
const size_t spatial_idx = encoded_image.SpatialIndex().value_or(0);
EncodedImage image_copy(encoded_image);
FillTimingInfo(spatial_idx, &image_copy);
// Piggyback ALR experiment group id and simulcast id into the content type.
uint8_t experiment_id =
experiment_groups_[videocontenttypehelpers::IsScreenshare(
image_copy.content_type_)];
// TODO(ilnik): This will force content type extension to be present even
// for realtime video. At the expense of miniscule overhead we will get
// sliced receive statistics.
RTC_CHECK(videocontenttypehelpers::SetExperimentId(&image_copy.content_type_,
experiment_id));
// We count simulcast streams from 1 on the wire. That's why we set simulcast
// id in content type to +1 of that is actual simulcast index. This is because
// value 0 on the wire is reserved for 'no simulcast stream specified'.
RTC_CHECK(videocontenttypehelpers::SetSimulcastId(
&image_copy.content_type_, static_cast<uint8_t>(spatial_idx + 1)));
return post_encode_callback_->OnEncodedImage(image_copy, codec_specific,
fragmentation_header);
}
} // namespace webrtc

View File

@ -1,153 +0,0 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_VIDEO_CODING_GENERIC_ENCODER_H_
#define MODULES_VIDEO_CODING_GENERIC_ENCODER_H_
#include <stdio.h>
#include <list>
#include <vector>
#include "api/units/data_rate.h"
#include "modules/video_coding/include/video_codec_interface.h"
#include "modules/video_coding/include/video_coding_defines.h"
#include "rtc_base/critical_section.h"
#include "rtc_base/race_checker.h"
namespace webrtc {
class VCMEncodedFrameCallback : public EncodedImageCallback {
public:
explicit VCMEncodedFrameCallback(EncodedImageCallback* post_encode_callback);
~VCMEncodedFrameCallback() override;
// Implements EncodedImageCallback.
EncodedImageCallback::Result OnEncodedImage(
const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) override;
void SetInternalSource(bool internal_source) {
internal_source_ = internal_source;
}
// Timing frames configuration methods. These 4 should be called before
// |OnEncodedImage| at least once.
void OnTargetBitrateChanged(size_t bitrate_bytes_per_sec,
size_t simulcast_svc_idx);
void OnFrameRateChanged(size_t framerate);
void OnEncodeStarted(uint32_t rtp_timestamps,
int64_t capture_time_ms,
size_t simulcast_svc_idx);
void SetTimingFramesThresholds(
const VideoCodec::TimingFrameTriggerThresholds& thresholds) {
rtc::CritScope crit(&timing_params_lock_);
timing_frames_thresholds_ = thresholds;
}
// Clears all data stored by OnEncodeStarted().
void Reset() {
rtc::CritScope crit(&timing_params_lock_);
timing_frames_info_.clear();
last_timing_frame_time_ms_ = -1;
reordered_frames_logged_messages_ = 0;
stalled_encoder_logged_messages_ = 0;
}
private:
// For non-internal-source encoders, returns encode started time and fixes
// capture timestamp for the frame, if corrupted by the encoder.
absl::optional<int64_t> ExtractEncodeStartTime(size_t simulcast_svc_idx,
EncodedImage* encoded_image)
RTC_EXCLUSIVE_LOCKS_REQUIRED(timing_params_lock_);
void FillTimingInfo(size_t simulcast_svc_idx, EncodedImage* encoded_image);
rtc::CriticalSection timing_params_lock_;
bool internal_source_;
EncodedImageCallback* const post_encode_callback_;
struct EncodeStartTimeRecord {
EncodeStartTimeRecord(uint32_t timestamp,
int64_t capture_time,
int64_t encode_start_time)
: rtp_timestamp(timestamp),
capture_time_ms(capture_time),
encode_start_time_ms(encode_start_time) {}
uint32_t rtp_timestamp;
int64_t capture_time_ms;
int64_t encode_start_time_ms;
};
struct TimingFramesLayerInfo {
TimingFramesLayerInfo();
~TimingFramesLayerInfo();
size_t target_bitrate_bytes_per_sec = 0;
std::list<EncodeStartTimeRecord> encode_start_list;
};
// Separate instance for each simulcast stream or spatial layer.
std::vector<TimingFramesLayerInfo> timing_frames_info_
RTC_GUARDED_BY(timing_params_lock_);
size_t framerate_ RTC_GUARDED_BY(timing_params_lock_);
int64_t last_timing_frame_time_ms_ RTC_GUARDED_BY(timing_params_lock_);
VideoCodec::TimingFrameTriggerThresholds timing_frames_thresholds_
RTC_GUARDED_BY(timing_params_lock_);
size_t incorrect_capture_time_logged_messages_
RTC_GUARDED_BY(timing_params_lock_);
size_t reordered_frames_logged_messages_ RTC_GUARDED_BY(timing_params_lock_);
size_t stalled_encoder_logged_messages_ RTC_GUARDED_BY(timing_params_lock_);
// Experiment groups parsed from field trials for realtime video ([0]) and
// screenshare ([1]). 0 means no group specified. Positive values are
// experiment group numbers incremented by 1.
uint8_t experiment_groups_[2];
};
class VCMGenericEncoder {
friend class VCMCodecDataBase;
public:
VCMGenericEncoder(VideoEncoder* encoder,
VCMEncodedFrameCallback* encoded_frame_callback,
bool internal_source);
~VCMGenericEncoder();
int32_t Release();
int32_t InitEncode(const VideoCodec* settings,
int32_t number_of_cores,
size_t max_payload_size);
int32_t Encode(const VideoFrame& frame,
const CodecSpecificInfo* codec_specific,
const std::vector<FrameType>& frame_types);
void SetEncoderParameters(const VideoBitrateAllocation& target_bitrate,
uint32_t input_frame_rate);
int32_t RequestFrame(const std::vector<FrameType>& frame_types);
bool InternalSource() const;
VideoEncoder::EncoderInfo GetEncoderInfo() const;
private:
rtc::RaceChecker race_checker_;
VideoEncoder* const encoder_ RTC_GUARDED_BY(race_checker_);
VCMEncodedFrameCallback* const vcm_encoded_frame_callback_;
const bool internal_source_;
rtc::CriticalSection params_lock_;
VideoBitrateAllocation bitrate_allocation_ RTC_GUARDED_BY(params_lock_);
uint32_t input_frame_rate_ RTC_GUARDED_BY(params_lock_);
size_t streams_or_svc_num_ RTC_GUARDED_BY(race_checker_);
VideoCodecType codec_type_ RTC_GUARDED_BY(race_checker_);
};
} // namespace webrtc
#endif // MODULES_VIDEO_CODING_GENERIC_ENCODER_H_

View File

@ -21,7 +21,6 @@
#include "modules/video_coding/decoder_database.h"
#include "modules/video_coding/frame_buffer.h"
#include "modules/video_coding/generic_decoder.h"
#include "modules/video_coding/generic_encoder.h"
#include "modules/video_coding/jitter_buffer.h"
#include "modules/video_coding/receiver.h"
#include "modules/video_coding/timing.h"

View File

@ -173,6 +173,8 @@ rtc_source_set("video_stream_encoder_impl") {
"encoder_bitrate_adjuster.h",
"encoder_overshoot_detector.cc",
"encoder_overshoot_detector.h",
"frame_encode_timer.cc",
"frame_encode_timer.h",
"overuse_frame_detector.cc",
"overuse_frame_detector.h",
"video_stream_encoder.cc",
@ -195,7 +197,9 @@ rtc_source_set("video_stream_encoder_impl") {
"../api/video:video_stream_encoder",
"../api/video_codecs:video_codecs_api",
"../common_video:common_video",
"../modules:module_api_public",
"../modules/video_coding",
"../modules/video_coding:video_codec_interface",
"../modules/video_coding:video_coding_utility",
"../modules/video_coding:webrtc_vp9_helpers",
"../rtc_base:checks",
@ -208,6 +212,7 @@ rtc_source_set("video_stream_encoder_impl") {
"../rtc_base:rtc_task_queue",
"../rtc_base:sequenced_task_checker",
"../rtc_base:timeutils",
"../rtc_base/experiments:alr_experiment",
"../rtc_base/experiments:quality_scaling_experiment",
"../rtc_base/experiments:rate_control_settings",
"../rtc_base/system:fallthrough",
@ -480,6 +485,7 @@ if (rtc_include_tests) {
"end_to_end_tests/ssrc_tests.cc",
"end_to_end_tests/stats_tests.cc",
"end_to_end_tests/transport_feedback_tests.cc",
"frame_encode_timer_unittest.cc",
"overuse_frame_detector_unittest.cc",
"picture_id_tests.cc",
"quality_scaling_tests.cc",

256
video/frame_encode_timer.cc Normal file
View File

@ -0,0 +1,256 @@
/*
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "video/frame_encode_timer.h"
#include <algorithm>
#include "modules/include/module_common_types_public.h"
#include "modules/video_coding/include/video_coding_defines.h"
#include "rtc_base/logging.h"
#include "rtc_base/time_utils.h"
namespace webrtc {
namespace {
const int kMessagesThrottlingThreshold = 2;
const int kThrottleRatio = 100000;
} // namespace
FrameEncodeTimer::TimingFramesLayerInfo::TimingFramesLayerInfo() = default;
FrameEncodeTimer::TimingFramesLayerInfo::~TimingFramesLayerInfo() = default;
FrameEncodeTimer::FrameEncodeTimer(EncodedImageCallback* frame_drop_callback)
: frame_drop_callback_(frame_drop_callback),
internal_source_(false),
framerate_fps_(0),
last_timing_frame_time_ms_(-1),
incorrect_capture_time_logged_messages_(0),
reordered_frames_logged_messages_(0),
stalled_encoder_logged_messages_(0) {
codec_settings_.timing_frame_thresholds = {-1, 0};
}
FrameEncodeTimer::~FrameEncodeTimer() {}
void FrameEncodeTimer::OnEncoderInit(const VideoCodec& codec,
bool internal_source) {
rtc::CritScope cs(&lock_);
codec_settings_ = codec;
internal_source_ = internal_source;
}
void FrameEncodeTimer::OnSetRates(
const VideoBitrateAllocation& bitrate_allocation,
uint32_t framerate_fps) {
rtc::CritScope cs(&lock_);
framerate_fps_ = framerate_fps;
const size_t num_spatial_layers = NumSpatialLayers();
if (timing_frames_info_.size() < num_spatial_layers) {
timing_frames_info_.resize(num_spatial_layers);
}
for (size_t i = 0; i < num_spatial_layers; ++i) {
timing_frames_info_[i].target_bitrate_bytes_per_sec =
bitrate_allocation.GetSpatialLayerSum(i) / 8;
}
}
void FrameEncodeTimer::OnEncodeStarted(uint32_t rtp_timestamp,
int64_t capture_time_ms) {
rtc::CritScope cs(&lock_);
if (internal_source_) {
return;
}
const size_t num_spatial_layers = NumSpatialLayers();
timing_frames_info_.resize(num_spatial_layers);
for (size_t si = 0; si < num_spatial_layers; ++si) {
RTC_DCHECK(
timing_frames_info_[si].encode_start_list.empty() ||
rtc::TimeDiff(
capture_time_ms,
timing_frames_info_[si].encode_start_list.back().capture_time_ms) >=
0);
// If stream is disabled due to low bandwidth OnEncodeStarted still will be
// called and have to be ignored.
if (timing_frames_info_[si].target_bitrate_bytes_per_sec == 0)
return;
if (timing_frames_info_[si].encode_start_list.size() ==
kMaxEncodeStartTimeListSize) {
++stalled_encoder_logged_messages_;
if (stalled_encoder_logged_messages_ <= kMessagesThrottlingThreshold ||
stalled_encoder_logged_messages_ % kThrottleRatio == 0) {
RTC_LOG(LS_WARNING) << "Too many frames in the encode_start_list."
" Did encoder stall?";
if (stalled_encoder_logged_messages_ == kMessagesThrottlingThreshold) {
RTC_LOG(LS_WARNING)
<< "Too many log messages. Further stalled encoder"
"warnings will be throttled.";
}
}
frame_drop_callback_->OnDroppedFrame(
EncodedImageCallback::DropReason::kDroppedByEncoder);
timing_frames_info_[si].encode_start_list.pop_front();
}
timing_frames_info_[si].encode_start_list.emplace_back(
rtp_timestamp, capture_time_ms, rtc::TimeMillis());
}
}
void FrameEncodeTimer::FillTimingInfo(size_t simulcast_svc_idx,
EncodedImage* encoded_image,
int64_t encode_done_ms) {
rtc::CritScope cs(&lock_);
absl::optional<size_t> outlier_frame_size;
absl::optional<int64_t> encode_start_ms;
uint8_t timing_flags = VideoSendTiming::kNotTriggered;
// Encoders with internal sources do not call OnEncodeStarted
// |timing_frames_info_| may be not filled here.
if (!internal_source_) {
encode_start_ms = ExtractEncodeStartTime(simulcast_svc_idx, encoded_image);
}
if (timing_frames_info_.size() > simulcast_svc_idx) {
size_t target_bitrate =
timing_frames_info_[simulcast_svc_idx].target_bitrate_bytes_per_sec;
if (framerate_fps_ > 0 && target_bitrate > 0) {
// framerate and target bitrate were reported by encoder.
size_t average_frame_size = target_bitrate / framerate_fps_;
outlier_frame_size.emplace(
average_frame_size *
codec_settings_.timing_frame_thresholds.outlier_ratio_percent / 100);
}
}
// Outliers trigger timing frames, but do not affect scheduled timing
// frames.
if (outlier_frame_size && encoded_image->size() >= *outlier_frame_size) {
timing_flags |= VideoSendTiming::kTriggeredBySize;
}
// Check if it's time to send a timing frame.
int64_t timing_frame_delay_ms =
encoded_image->capture_time_ms_ - last_timing_frame_time_ms_;
// Trigger threshold if it's a first frame, too long passed since the last
// timing frame, or we already sent timing frame on a different simulcast
// stream with the same capture time.
if (last_timing_frame_time_ms_ == -1 ||
timing_frame_delay_ms >=
codec_settings_.timing_frame_thresholds.delay_ms ||
timing_frame_delay_ms == 0) {
timing_flags |= VideoSendTiming::kTriggeredByTimer;
last_timing_frame_time_ms_ = encoded_image->capture_time_ms_;
}
// Workaround for chromoting encoder: it passes encode start and finished
// timestamps in |timing_| field, but they (together with capture timestamp)
// are not in the WebRTC clock.
if (internal_source_ && encoded_image->timing_.encode_finish_ms > 0 &&
encoded_image->timing_.encode_start_ms > 0) {
int64_t clock_offset_ms =
encode_done_ms - encoded_image->timing_.encode_finish_ms;
// Translate capture timestamp to local WebRTC clock.
encoded_image->capture_time_ms_ += clock_offset_ms;
encoded_image->SetTimestamp(
static_cast<uint32_t>(encoded_image->capture_time_ms_ * 90));
encode_start_ms.emplace(encoded_image->timing_.encode_start_ms +
clock_offset_ms);
}
// If encode start is not available that means that encoder uses internal
// source. In that case capture timestamp may be from a different clock with a
// drift relative to rtc::TimeMillis(). We can't use it for Timing frames,
// because to being sent in the network capture time required to be less than
// all the other timestamps.
if (encode_start_ms) {
encoded_image->SetEncodeTime(*encode_start_ms, encode_done_ms);
encoded_image->timing_.flags = timing_flags;
} else {
encoded_image->timing_.flags = VideoSendTiming::kInvalid;
}
}
void FrameEncodeTimer::Reset() {
rtc::CritScope cs(&lock_);
timing_frames_info_.clear();
last_timing_frame_time_ms_ = -1;
reordered_frames_logged_messages_ = 0;
stalled_encoder_logged_messages_ = 0;
}
absl::optional<int64_t> FrameEncodeTimer::ExtractEncodeStartTime(
size_t simulcast_svc_idx,
EncodedImage* encoded_image) {
absl::optional<int64_t> result;
size_t num_simulcast_svc_streams = timing_frames_info_.size();
if (simulcast_svc_idx < num_simulcast_svc_streams) {
auto encode_start_list =
&timing_frames_info_[simulcast_svc_idx].encode_start_list;
// Skip frames for which there was OnEncodeStarted but no OnEncodedImage
// call. These are dropped by encoder internally.
// Because some hardware encoders don't preserve capture timestamp we
// use RTP timestamps here.
while (!encode_start_list->empty() &&
IsNewerTimestamp(encoded_image->Timestamp(),
encode_start_list->front().rtp_timestamp)) {
frame_drop_callback_->OnDroppedFrame(
EncodedImageCallback::DropReason::kDroppedByEncoder);
encode_start_list->pop_front();
}
if (encode_start_list->size() > 0 &&
encode_start_list->front().rtp_timestamp ==
encoded_image->Timestamp()) {
result.emplace(encode_start_list->front().encode_start_time_ms);
if (encoded_image->capture_time_ms_ !=
encode_start_list->front().capture_time_ms) {
// Force correct capture timestamp.
encoded_image->capture_time_ms_ =
encode_start_list->front().capture_time_ms;
++incorrect_capture_time_logged_messages_;
if (incorrect_capture_time_logged_messages_ <=
kMessagesThrottlingThreshold ||
incorrect_capture_time_logged_messages_ % kThrottleRatio == 0) {
RTC_LOG(LS_WARNING)
<< "Encoder is not preserving capture timestamps.";
if (incorrect_capture_time_logged_messages_ ==
kMessagesThrottlingThreshold) {
RTC_LOG(LS_WARNING) << "Too many log messages. Further incorrect "
"timestamps warnings will be throttled.";
}
}
}
encode_start_list->pop_front();
} else {
++reordered_frames_logged_messages_;
if (reordered_frames_logged_messages_ <= kMessagesThrottlingThreshold ||
reordered_frames_logged_messages_ % kThrottleRatio == 0) {
RTC_LOG(LS_WARNING) << "Frame with no encode started time recordings. "
"Encoder may be reordering frames "
"or not preserving RTP timestamps.";
if (reordered_frames_logged_messages_ == kMessagesThrottlingThreshold) {
RTC_LOG(LS_WARNING) << "Too many log messages. Further frames "
"reordering warnings will be throttled.";
}
}
}
}
return result;
}
size_t FrameEncodeTimer::NumSpatialLayers() const {
size_t num_spatial_layers = codec_settings_.numberOfSimulcastStreams;
if (codec_settings_.codecType == kVideoCodecVP9) {
num_spatial_layers = std::max(
num_spatial_layers,
static_cast<size_t>(codec_settings_.VP9().numberOfSpatialLayers));
}
return std::max(num_spatial_layers, size_t{1});
}
} // namespace webrtc

View File

@ -0,0 +1,84 @@
/*
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef VIDEO_FRAME_ENCODE_TIMER_H_
#define VIDEO_FRAME_ENCODE_TIMER_H_
#include <list>
#include <vector>
#include "absl/types/optional.h"
#include "api/video/encoded_image.h"
#include "api/video_codecs/video_codec.h"
#include "api/video_codecs/video_encoder.h"
#include "rtc_base/critical_section.h"
namespace webrtc {
class FrameEncodeTimer {
public:
explicit FrameEncodeTimer(EncodedImageCallback* frame_drop_callback);
~FrameEncodeTimer();
void OnEncoderInit(const VideoCodec& codec, bool internal_source);
void OnSetRates(const VideoBitrateAllocation& bitrate_allocation,
uint32_t framerate_fps);
void OnEncodeStarted(uint32_t rtp_timestamp, int64_t capture_time_ms);
void FillTimingInfo(size_t simulcast_svc_idx,
EncodedImage* encoded_image,
int64_t encode_done_ms);
void Reset();
private:
size_t NumSpatialLayers() const RTC_EXCLUSIVE_LOCKS_REQUIRED(lock_);
// For non-internal-source encoders, returns encode started time and fixes
// capture timestamp for the frame, if corrupted by the encoder.
absl::optional<int64_t> ExtractEncodeStartTime(size_t simulcast_svc_idx,
EncodedImage* encoded_image)
RTC_EXCLUSIVE_LOCKS_REQUIRED(lock_);
struct EncodeStartTimeRecord {
EncodeStartTimeRecord(uint32_t timestamp,
int64_t capture_time,
int64_t encode_start_time)
: rtp_timestamp(timestamp),
capture_time_ms(capture_time),
encode_start_time_ms(encode_start_time) {}
uint32_t rtp_timestamp;
int64_t capture_time_ms;
int64_t encode_start_time_ms;
};
struct TimingFramesLayerInfo {
TimingFramesLayerInfo();
~TimingFramesLayerInfo();
size_t target_bitrate_bytes_per_sec = 0;
std::list<EncodeStartTimeRecord> encode_start_list;
};
rtc::CriticalSection lock_;
EncodedImageCallback* const frame_drop_callback_;
VideoCodec codec_settings_ RTC_GUARDED_BY(&lock_);
bool internal_source_ RTC_GUARDED_BY(&lock_);
uint32_t framerate_fps_ RTC_GUARDED_BY(&lock_);
// Separate instance for each simulcast stream or spatial layer.
std::vector<TimingFramesLayerInfo> timing_frames_info_ RTC_GUARDED_BY(&lock_);
int64_t last_timing_frame_time_ms_ RTC_GUARDED_BY(&lock_);
size_t incorrect_capture_time_logged_messages_ RTC_GUARDED_BY(&lock_);
size_t reordered_frames_logged_messages_ RTC_GUARDED_BY(&lock_);
size_t stalled_encoder_logged_messages_ RTC_GUARDED_BY(&lock_);
};
} // namespace webrtc
#endif // VIDEO_FRAME_ENCODE_TIMER_H_

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
@ -12,11 +12,10 @@
#include <vector>
#include "api/video/video_timing.h"
#include "modules/video_coding/generic_encoder.h"
#include "modules/video_coding/include/video_coding_defines.h"
#include "rtc_base/fake_clock.h"
#include "rtc_base/time_utils.h"
#include "test/gtest.h"
#include "video/frame_encode_timer.h"
namespace webrtc {
namespace test {
@ -30,32 +29,17 @@ inline size_t FrameSize(const size_t& min_frame_size,
class FakeEncodedImageCallback : public EncodedImageCallback {
public:
FakeEncodedImageCallback()
: last_frame_was_timing_(false),
num_frames_dropped_(0),
last_capture_timestamp_(-1) {}
FakeEncodedImageCallback() : num_frames_dropped_(0) {}
Result OnEncodedImage(const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) override {
last_frame_was_timing_ =
encoded_image.timing_.flags != VideoSendTiming::kInvalid &&
encoded_image.timing_.flags != VideoSendTiming::kNotTriggered;
last_capture_timestamp_ = encoded_image.capture_time_ms_;
return Result(Result::OK);
}
void OnDroppedFrame(DropReason reason) override { ++num_frames_dropped_; }
bool WasTimingFrame() { return last_frame_was_timing_; }
size_t GetNumFramesDropped() { return num_frames_dropped_; }
int64_t GetLastCaptureTimestamp() { return last_capture_timestamp_; }
private:
bool last_frame_was_timing_;
size_t num_frames_dropped_;
int64_t last_capture_timestamp_;
};
enum class FrameType {
@ -64,6 +48,11 @@ enum class FrameType {
kDropped,
};
bool IsTimingFrame(const EncodedImage& image) {
return image.timing_.flags != VideoSendTiming::kInvalid &&
image.timing_.flags != VideoSendTiming::kNotTriggered;
}
// Emulates |num_frames| on |num_streams| frames with capture timestamps
// increased by 1 from 0. Size of each frame is between
// |min_frame_size| and |max_frame_size|, outliers are counted relatevely to
@ -76,41 +65,48 @@ std::vector<std::vector<FrameType>> GetTimingFrames(
const int num_streams,
const int num_frames) {
FakeEncodedImageCallback sink;
VCMEncodedFrameCallback callback(&sink);
FrameEncodeTimer encode_timer(&sink);
VideoCodec codec_settings;
codec_settings.numberOfSimulcastStreams = num_streams;
codec_settings.timing_frame_thresholds = {delay_ms,
kDefaultOutlierFrameSizePercent};
encode_timer.OnEncoderInit(codec_settings, false);
const size_t kFramerate = 30;
callback.SetTimingFramesThresholds(
{delay_ms, kDefaultOutlierFrameSizePercent});
callback.OnFrameRateChanged(kFramerate);
int s, i;
VideoBitrateAllocation bitrate_allocation;
for (int si = 0; si < num_streams; ++si) {
bitrate_allocation.SetBitrate(si, 0,
average_frame_sizes[si] * 8 * kFramerate);
}
encode_timer.OnSetRates(bitrate_allocation, kFramerate);
std::vector<std::vector<FrameType>> result(num_streams);
for (s = 0; s < num_streams; ++s)
callback.OnTargetBitrateChanged(average_frame_sizes[s] * kFramerate, s);
int64_t current_timestamp = 0;
for (i = 0; i < num_frames; ++i) {
for (int i = 0; i < num_frames; ++i) {
current_timestamp += 1;
for (s = 0; s < num_streams; ++s) {
encode_timer.OnEncodeStarted(static_cast<uint32_t>(current_timestamp * 90),
current_timestamp);
for (int si = 0; si < num_streams; ++si) {
// every (5+s)-th frame is dropped on s-th stream by design.
bool dropped = i % (5 + s) == 0;
bool dropped = i % (5 + si) == 0;
EncodedImage image;
CodecSpecificInfo codec_specific;
image.Allocate(max_frame_size);
image.set_size(FrameSize(min_frame_size, max_frame_size, s, i));
image.set_size(FrameSize(min_frame_size, max_frame_size, si, i));
image.capture_time_ms_ = current_timestamp;
image.SetTimestamp(static_cast<uint32_t>(current_timestamp * 90));
image.SetSpatialIndex(s);
codec_specific.codecType = kVideoCodecGeneric;
callback.OnEncodeStarted(static_cast<uint32_t>(current_timestamp * 90),
current_timestamp, s);
image.SetSpatialIndex(si);
if (dropped) {
result[s].push_back(FrameType::kDropped);
result[si].push_back(FrameType::kDropped);
continue;
}
callback.OnEncodedImage(image, &codec_specific, nullptr);
if (sink.WasTimingFrame()) {
result[s].push_back(FrameType::kTiming);
encode_timer.FillTimingInfo(si, &image, current_timestamp);
if (IsTimingFrame(image)) {
result[si].push_back(FrameType::kTiming);
} else {
result[s].push_back(FrameType::kNormal);
result[si].push_back(FrameType::kNormal);
}
}
}
@ -118,7 +114,7 @@ std::vector<std::vector<FrameType>> GetTimingFrames(
}
} // namespace
TEST(TestVCMEncodedFrameCallback, MarksTimingFramesPeriodicallyTogether) {
TEST(FrameEncodeTimerTest, MarksTimingFramesPeriodicallyTogether) {
const int64_t kDelayMs = 29;
const size_t kMinFrameSize = 10;
const size_t kMaxFrameSize = 20;
@ -162,7 +158,7 @@ TEST(TestVCMEncodedFrameCallback, MarksTimingFramesPeriodicallyTogether) {
}
}
TEST(TestVCMEncodedFrameCallback, MarksOutliers) {
TEST(FrameEncodeTimerTest, MarksOutliers) {
const int64_t kDelayMs = 29;
const size_t kMinFrameSize = 2495;
const size_t kMaxFrameSize = 2505;
@ -184,129 +180,147 @@ TEST(TestVCMEncodedFrameCallback, MarksOutliers) {
}
}
TEST(TestVCMEncodedFrameCallback, NoTimingFrameIfNoEncodeStartTime) {
EncodedImage image;
CodecSpecificInfo codec_specific;
TEST(FrameEncodeTimerTest, NoTimingFrameIfNoEncodeStartTime) {
int64_t timestamp = 1;
constexpr size_t kFrameSize = 500;
EncodedImage image;
image.Allocate(kFrameSize);
image.set_size(kFrameSize);
image.capture_time_ms_ = timestamp;
image.SetTimestamp(static_cast<uint32_t>(timestamp * 90));
codec_specific.codecType = kVideoCodecGeneric;
FakeEncodedImageCallback sink;
VCMEncodedFrameCallback callback(&sink);
VideoCodec::TimingFrameTriggerThresholds thresholds;
thresholds.delay_ms = 1; // Make all frames timing frames.
callback.SetTimingFramesThresholds(thresholds);
callback.OnTargetBitrateChanged(500, 0);
FrameEncodeTimer encode_timer(&sink);
VideoCodec codec_settings;
// Make all frames timing frames.
codec_settings.timing_frame_thresholds.delay_ms = 1;
encode_timer.OnEncoderInit(codec_settings, false);
VideoBitrateAllocation bitrate_allocation;
bitrate_allocation.SetBitrate(0, 0, 500000);
encode_timer.OnSetRates(bitrate_allocation, 30);
// Verify a single frame works with encode start time set.
callback.OnEncodeStarted(static_cast<uint32_t>(timestamp * 90), timestamp, 0);
callback.OnEncodedImage(image, &codec_specific, nullptr);
EXPECT_TRUE(sink.WasTimingFrame());
encode_timer.OnEncodeStarted(static_cast<uint32_t>(timestamp * 90),
timestamp);
encode_timer.FillTimingInfo(0, &image, timestamp);
EXPECT_TRUE(IsTimingFrame(image));
// New frame, now skip OnEncodeStarted. Should not result in timing frame.
image.capture_time_ms_ = ++timestamp;
image.SetTimestamp(static_cast<uint32_t>(timestamp * 90));
callback.OnEncodedImage(image, &codec_specific, nullptr);
EXPECT_FALSE(sink.WasTimingFrame());
image.timing_ = EncodedImage::Timing();
encode_timer.FillTimingInfo(0, &image, timestamp);
EXPECT_FALSE(IsTimingFrame(image));
}
TEST(TestVCMEncodedFrameCallback, AdjustsCaptureTimeForInternalSourceEncoder) {
rtc::ScopedFakeClock clock;
clock.SetTimeMicros(1234567);
EncodedImage image;
CodecSpecificInfo codec_specific;
TEST(FrameEncodeTimerTest, AdjustsCaptureTimeForInternalSourceEncoder) {
const int64_t kEncodeStartDelayMs = 2;
const int64_t kEncodeFinishDelayMs = 10;
int64_t timestamp = 1;
constexpr size_t kFrameSize = 500;
int64_t timestamp = 1;
EncodedImage image;
image.Allocate(kFrameSize);
image.set_size(kFrameSize);
image.capture_time_ms_ = timestamp;
image.SetTimestamp(static_cast<uint32_t>(timestamp * 90));
codec_specific.codecType = kVideoCodecGeneric;
FakeEncodedImageCallback sink;
VCMEncodedFrameCallback callback(&sink);
callback.SetInternalSource(true);
VideoCodec::TimingFrameTriggerThresholds thresholds;
thresholds.delay_ms = 1; // Make all frames timing frames.
callback.SetTimingFramesThresholds(thresholds);
callback.OnTargetBitrateChanged(500, 0);
FrameEncodeTimer encode_timer(&sink);
VideoCodec codec_settings;
// Make all frames timing frames.
codec_settings.timing_frame_thresholds.delay_ms = 1;
encode_timer.OnEncoderInit(codec_settings, true);
VideoBitrateAllocation bitrate_allocation;
bitrate_allocation.SetBitrate(0, 0, 500000);
encode_timer.OnSetRates(bitrate_allocation, 30);
// Verify a single frame without encode timestamps isn't a timing frame.
callback.OnEncodedImage(image, &codec_specific, nullptr);
EXPECT_FALSE(sink.WasTimingFrame());
encode_timer.FillTimingInfo(0, &image, timestamp);
EXPECT_FALSE(IsTimingFrame(image));
// New frame, but this time with encode timestamps set in timing_.
// This should be a timing frame.
image.capture_time_ms_ = ++timestamp;
image.SetTimestamp(static_cast<uint32_t>(timestamp * 90));
image.timing_ = EncodedImage::Timing();
image.timing_.encode_start_ms = timestamp + kEncodeStartDelayMs;
image.timing_.encode_finish_ms = timestamp + kEncodeFinishDelayMs;
callback.OnEncodedImage(image, &codec_specific, nullptr);
EXPECT_TRUE(sink.WasTimingFrame());
const int64_t kEncodeDoneTimestamp = 1234567;
encode_timer.FillTimingInfo(0, &image, kEncodeDoneTimestamp);
EXPECT_TRUE(IsTimingFrame(image));
// Frame is captured kEncodeFinishDelayMs before it's encoded, so restored
// capture timestamp should be kEncodeFinishDelayMs in the past.
EXPECT_EQ(
sink.GetLastCaptureTimestamp(),
clock.TimeNanos() / rtc::kNumNanosecsPerMillisec - kEncodeFinishDelayMs);
EXPECT_EQ(image.capture_time_ms_,
kEncodeDoneTimestamp - kEncodeFinishDelayMs);
}
TEST(TestVCMEncodedFrameCallback, NotifiesAboutDroppedFrames) {
EncodedImage image;
CodecSpecificInfo codec_specific;
TEST(FrameEncodeTimerTest, NotifiesAboutDroppedFrames) {
const int64_t kTimestampMs1 = 47721840;
const int64_t kTimestampMs2 = 47721850;
const int64_t kTimestampMs3 = 47721860;
const int64_t kTimestampMs4 = 47721870;
codec_specific.codecType = kVideoCodecGeneric;
FakeEncodedImageCallback sink;
VCMEncodedFrameCallback callback(&sink);
FrameEncodeTimer encode_timer(&sink);
encode_timer.OnEncoderInit(VideoCodec(), false);
// Any non-zero bitrate needed to be set before the first frame.
callback.OnTargetBitrateChanged(500, 0);
VideoBitrateAllocation bitrate_allocation;
bitrate_allocation.SetBitrate(0, 0, 500000);
encode_timer.OnSetRates(bitrate_allocation, 30);
EncodedImage image;
image.capture_time_ms_ = kTimestampMs1;
image.SetTimestamp(static_cast<uint32_t>(image.capture_time_ms_ * 90));
callback.OnEncodeStarted(image.Timestamp(), image.capture_time_ms_, 0);
encode_timer.OnEncodeStarted(image.Timestamp(), image.capture_time_ms_);
EXPECT_EQ(0u, sink.GetNumFramesDropped());
callback.OnEncodedImage(image, &codec_specific, nullptr);
encode_timer.FillTimingInfo(0, &image, kTimestampMs1);
image.capture_time_ms_ = kTimestampMs2;
image.SetTimestamp(static_cast<uint32_t>(image.capture_time_ms_ * 90));
callback.OnEncodeStarted(image.Timestamp(), image.capture_time_ms_, 0);
image.timing_ = EncodedImage::Timing();
encode_timer.OnEncodeStarted(image.Timestamp(), image.capture_time_ms_);
// No OnEncodedImageCall for timestamp2. Yet, at this moment it's not known
// that frame with timestamp2 was dropped.
EXPECT_EQ(0u, sink.GetNumFramesDropped());
image.capture_time_ms_ = kTimestampMs3;
image.SetTimestamp(static_cast<uint32_t>(image.capture_time_ms_ * 90));
callback.OnEncodeStarted(image.Timestamp(), image.capture_time_ms_, 0);
callback.OnEncodedImage(image, &codec_specific, nullptr);
image.timing_ = EncodedImage::Timing();
encode_timer.OnEncodeStarted(image.Timestamp(), image.capture_time_ms_);
encode_timer.FillTimingInfo(0, &image, kTimestampMs3);
EXPECT_EQ(1u, sink.GetNumFramesDropped());
image.capture_time_ms_ = kTimestampMs4;
image.SetTimestamp(static_cast<uint32_t>(image.capture_time_ms_ * 90));
callback.OnEncodeStarted(image.Timestamp(), image.capture_time_ms_, 0);
callback.OnEncodedImage(image, &codec_specific, nullptr);
image.timing_ = EncodedImage::Timing();
encode_timer.OnEncodeStarted(image.Timestamp(), image.capture_time_ms_);
encode_timer.FillTimingInfo(0, &image, kTimestampMs4);
EXPECT_EQ(1u, sink.GetNumFramesDropped());
}
TEST(TestVCMEncodedFrameCallback, RestoresCaptureTimestamps) {
TEST(FrameEncodeTimerTest, RestoresCaptureTimestamps) {
EncodedImage image;
CodecSpecificInfo codec_specific;
const int64_t kTimestampMs = 123456;
codec_specific.codecType = kVideoCodecGeneric;
FakeEncodedImageCallback sink;
VCMEncodedFrameCallback callback(&sink);
FrameEncodeTimer encode_timer(&sink);
encode_timer.OnEncoderInit(VideoCodec(), false);
// Any non-zero bitrate needed to be set before the first frame.
callback.OnTargetBitrateChanged(500, 0);
image.capture_time_ms_ = kTimestampMs; // Incorrect timesetamp.
VideoBitrateAllocation bitrate_allocation;
bitrate_allocation.SetBitrate(0, 0, 500000);
encode_timer.OnSetRates(bitrate_allocation, 30);
image.capture_time_ms_ = kTimestampMs; // Correct timestamp.
image.SetTimestamp(static_cast<uint32_t>(image.capture_time_ms_ * 90));
callback.OnEncodeStarted(image.Timestamp(), image.capture_time_ms_, 0);
image.capture_time_ms_ = 0; // Incorrect timesetamp.
callback.OnEncodedImage(image, &codec_specific, nullptr);
EXPECT_EQ(kTimestampMs, sink.GetLastCaptureTimestamp());
encode_timer.OnEncodeStarted(image.Timestamp(), image.capture_time_ms_);
image.capture_time_ms_ = 0; // Incorrect timestamp.
encode_timer.FillTimingInfo(0, &image, kTimestampMs);
EXPECT_EQ(kTimestampMs, image.capture_time_ms_);
}
} // namespace test

View File

@ -25,6 +25,7 @@
#include "modules/video_coding/utility/default_video_bitrate_allocator.h"
#include "rtc_base/arraysize.h"
#include "rtc_base/checks.h"
#include "rtc_base/experiments/alr_experiment.h"
#include "rtc_base/experiments/quality_scaling_experiment.h"
#include "rtc_base/experiments/rate_control_settings.h"
#include "rtc_base/location.h"
@ -34,7 +35,6 @@
#include "rtc_base/time_utils.h"
#include "rtc_base/trace_event.h"
#include "system_wrappers/include/field_trial.h"
#include "video/overuse_frame_detector.h"
namespace webrtc {
@ -165,6 +165,26 @@ bool RequiresEncoderReset(const VideoCodec& previous_send_codec,
}
return false;
}
std::array<uint8_t, 2> GetExperimentGroups() {
std::array<uint8_t, 2> experiment_groups;
absl::optional<AlrExperimentSettings> experiment_settings =
AlrExperimentSettings::CreateFromFieldTrial(
AlrExperimentSettings::kStrictPacingAndProbingExperimentName);
if (experiment_settings) {
experiment_groups[0] = experiment_settings->group_id + 1;
} else {
experiment_groups[0] = 0;
}
experiment_settings = AlrExperimentSettings::CreateFromFieldTrial(
AlrExperimentSettings::kScreenshareProbingBweExperimentName);
if (experiment_settings) {
experiment_groups[1] = experiment_settings->group_id + 1;
} else {
experiment_groups[1] = 0;
}
return experiment_groups;
}
} // namespace
// VideoSourceProxy is responsible ensuring thread safety between calls to
@ -415,6 +435,7 @@ VideoStreamEncoder::VideoStreamEncoder(
rate_control_settings_(RateControlSettings::ParseFromFieldTrials()),
overuse_detector_(std::move(overuse_detector)),
encoder_stats_observer_(encoder_stats_observer),
encoder_initialized_(false),
max_framerate_(-1),
pending_encoder_reconfiguration_(false),
pending_encoder_creation_(false),
@ -436,12 +457,13 @@ VideoStreamEncoder::VideoStreamEncoder(
pending_frame_post_time_us_(0),
accumulated_update_rect_{0, 0, 0, 0},
bitrate_observer_(nullptr),
last_framerate_fps_(0),
force_disable_frame_dropper_(false),
input_framerate_(kFrameRateAvergingWindowSizeMs, 1000),
pending_frame_drops_(0),
generic_encoder_(nullptr),
generic_encoder_callback_(this),
next_frame_types_(1, kVideoFrameDelta),
frame_encoder_timer_(this),
experiment_groups_(GetExperimentGroups()),
encoder_queue_("EncoderQueue") {
RTC_DCHECK(encoder_stats_observer);
RTC_DCHECK(overuse_detector_);
@ -462,10 +484,7 @@ void VideoStreamEncoder::Stop() {
overuse_detector_->StopCheckForOveruse();
rate_allocator_ = nullptr;
bitrate_observer_ = nullptr;
if (encoder_ != nullptr && generic_encoder_ != nullptr) {
encoder_->Release();
}
generic_encoder_ = nullptr;
ReleaseEncoder();
quality_scaler_ = nullptr;
shutdown_event_.Set();
});
@ -669,43 +688,39 @@ void VideoStreamEncoder::ReconfigureEncoder() {
// Encoder creation block is split in two since EncoderInfo needed to start
// CPU adaptation with the correct settings should be polled after
// encoder_->InitEncode().
if (pending_encoder_creation_) {
if (encoder_) {
encoder_->Release();
generic_encoder_ = nullptr;
}
encoder_ = settings_.encoder_factory->CreateVideoEncoder(
encoder_config_.video_format);
// TODO(nisse): What to do if creating the encoder fails? Crash,
// or just discard incoming frames?
RTC_CHECK(encoder_);
codec_info_ = settings_.encoder_factory->QueryVideoEncoder(
encoder_config_.video_format);
} else if (reset_required) {
RTC_DCHECK(encoder_);
encoder_->Release();
}
bool success = true;
if (pending_encoder_creation_ || reset_required || !generic_encoder_) {
RTC_DCHECK(encoder_);
generic_encoder_ = absl::make_unique<VCMGenericEncoder>(
encoder_.get(), &generic_encoder_callback_, HasInternalSource());
generic_encoder_callback_.SetInternalSource(HasInternalSource());
if (generic_encoder_->InitEncode(&send_codec_, number_of_cores_,
max_data_payload_length_ > 0
? max_data_payload_length_
: kDefaultPayloadSize) < 0) {
encoder_->Release();
generic_encoder_ = nullptr;
success = false;
if (pending_encoder_creation_ || reset_required) {
ReleaseEncoder();
if (pending_encoder_creation_) {
encoder_ = settings_.encoder_factory->CreateVideoEncoder(
encoder_config_.video_format);
// TODO(nisse): What to do if creating the encoder fails? Crash,
// or just discard incoming frames?
RTC_CHECK(encoder_);
codec_info_ = settings_.encoder_factory->QueryVideoEncoder(
encoder_config_.video_format);
}
if (encoder_->InitEncode(&send_codec_, number_of_cores_,
max_data_payload_length_ > 0
? max_data_payload_length_
: kDefaultPayloadSize) != 0) {
RTC_LOG(LS_ERROR) << "Failed to initialize the encoder associated with "
"codec type: "
<< CodecTypeToPayloadString(send_codec_.codecType)
<< " (" << send_codec_.codecType << ")";
ReleaseEncoder();
success = false;
} else {
encoder_initialized_ = true;
encoder_->RegisterEncodeCompleteCallback(this);
frame_encoder_timer_.OnEncoderInit(send_codec_, HasInternalSource());
}
frame_encoder_timer_.Reset();
}
if (success) {
RTC_DCHECK(generic_encoder_);
next_frame_types_.clear();
next_frame_types_.resize(
std::max(static_cast<int>(codec.numberOfSimulcastStreams), 1),
@ -984,7 +999,7 @@ uint32_t VideoStreamEncoder::GetInputFramerateFps() {
void VideoStreamEncoder::SetEncoderRates(
const VideoBitrateAllocation& bitrate_allocation,
uint32_t framerate_fps) {
if (!generic_encoder_) {
if (!encoder_) {
return;
}
@ -1001,7 +1016,20 @@ void VideoStreamEncoder::SetEncoderRates(
}
RTC_DCHECK_GT(framerate_fps, 0);
generic_encoder_->SetEncoderParameters(bitrate_allocation, framerate_fps);
if (bitrate_allocation != last_bitrate_allocation_ ||
framerate_fps != last_framerate_fps_) {
int res = encoder_->SetRateAllocation(bitrate_allocation, framerate_fps);
if (res != 0) {
RTC_LOG(LS_WARNING) << "Error set encoder rate (total bitrate bps = "
<< bitrate_allocation.get_sum_bps()
<< ", framerate = " << framerate_fps << "): " << res;
}
frame_encoder_timer_.OnSetRates(bitrate_allocation, framerate_fps);
}
last_bitrate_allocation_ = bitrate_allocation;
last_framerate_fps_ = framerate_fps;
}
void VideoStreamEncoder::MaybeEncodeVideoFrame(const VideoFrame& video_frame,
@ -1199,7 +1227,6 @@ void VideoStreamEncoder::EncodeVideoFrame(const VideoFrame& video_frame,
}
encoder_info_ = info;
RTC_DCHECK(generic_encoder_);
RTC_DCHECK_EQ(send_codec_.width, out_frame.width());
RTC_DCHECK_EQ(send_codec_.height, out_frame.height());
const VideoFrameBuffer::Type buffer_type =
@ -1207,7 +1234,7 @@ void VideoStreamEncoder::EncodeVideoFrame(const VideoFrame& video_frame,
const bool is_buffer_type_supported =
buffer_type == VideoFrameBuffer::Type::kI420 ||
(buffer_type == VideoFrameBuffer::Type::kNative &&
encoder_info_.supports_native_handle);
info.supports_native_handle);
if (!is_buffer_type_supported) {
// This module only supports software encoding.
@ -1230,8 +1257,16 @@ void VideoStreamEncoder::EncodeVideoFrame(const VideoFrame& video_frame,
.set_id(out_frame.id())
.build();
}
TRACE_EVENT1("webrtc", "VCMGenericEncoder::Encode", "timestamp",
out_frame.timestamp());
frame_encoder_timer_.OnEncodeStarted(out_frame.timestamp(),
out_frame.render_time_ms());
const int32_t encode_status =
generic_encoder_->Encode(out_frame, nullptr, next_frame_types_);
encoder_->Encode(out_frame, nullptr, &next_frame_types_);
if (encode_status < 0) {
RTC_LOG(LS_ERROR) << "Failed to encode frame. Error code: "
<< encode_status;
@ -1255,9 +1290,21 @@ void VideoStreamEncoder::SendKeyFrame() {
if (HasInternalSource()) {
// Try to request the frame if we have an external encoder with
// internal source since AddVideoFrame never will be called.
RTC_DCHECK(generic_encoder_);
if (generic_encoder_->RequestFrame(next_frame_types_) ==
WEBRTC_VIDEO_CODEC_OK) {
// TODO(nisse): Used only with internal source. Delete as soon as
// that feature is removed. The only implementation I've been able
// to find ignores what's in the frame. With one exception: It seems
// a few test cases, e.g.,
// VideoSendStreamTest.VideoSendStreamStopSetEncoderRateToZero, set
// internal_source to true and use FakeEncoder. And the latter will
// happily encode this 1x1 frame and pass it on down the pipeline.
if (encoder_->Encode(VideoFrame::Builder()
.set_video_frame_buffer(I420Buffer::Create(1, 1))
.set_rotation(kVideoRotation_0)
.set_timestamp_us(0)
.build(),
nullptr,
&next_frame_types_) == WEBRTC_VIDEO_CODEC_OK) {
// Try to remove just-performed keyframe request, if stream still exists.
next_frame_types_[0] = kVideoFrameDelta;
}
@ -1268,21 +1315,44 @@ EncodedImageCallback::Result VideoStreamEncoder::OnEncodedImage(
const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) {
const int64_t time_sent_us = rtc::TimeMicros();
TRACE_EVENT_INSTANT1("webrtc", "VCMEncodedFrameCallback::Encoded",
"timestamp", encoded_image.Timestamp());
const size_t spatial_idx = encoded_image.SpatialIndex().value_or(0);
EncodedImage image_copy(encoded_image);
frame_encoder_timer_.FillTimingInfo(
spatial_idx, &image_copy, time_sent_us / rtc::kNumMicrosecsPerMillisec);
// Piggyback ALR experiment group id and simulcast id into the content type.
const uint8_t experiment_id =
experiment_groups_[videocontenttypehelpers::IsScreenshare(
image_copy.content_type_)];
// TODO(ilnik): This will force content type extension to be present even
// for realtime video. At the expense of miniscule overhead we will get
// sliced receive statistics.
RTC_CHECK(videocontenttypehelpers::SetExperimentId(&image_copy.content_type_,
experiment_id));
// We count simulcast streams from 1 on the wire. That's why we set simulcast
// id in content type to +1 of that is actual simulcast index. This is because
// value 0 on the wire is reserved for 'no simulcast stream specified'.
RTC_CHECK(videocontenttypehelpers::SetSimulcastId(
&image_copy.content_type_, static_cast<uint8_t>(spatial_idx + 1)));
// Encoded is called on whatever thread the real encoder implementation run
// on. In the case of hardware encoders, there might be several encoders
// running in parallel on different threads.
encoder_stats_observer_->OnSendEncodedImage(encoded_image,
codec_specific_info);
encoder_stats_observer_->OnSendEncodedImage(image_copy, codec_specific_info);
EncodedImageCallback::Result result =
sink_->OnEncodedImage(encoded_image, codec_specific_info, fragmentation);
sink_->OnEncodedImage(image_copy, codec_specific_info, fragmentation);
int64_t time_sent_us = rtc::TimeMicros();
// We are only interested in propagating the meta-data about the image, not
// encoded data itself, to the post encode function. Since we cannot be sure
// the pointer will still be valid when run on the task queue, set it to null.
EncodedImage encoded_image_metadata = encoded_image;
encoded_image_metadata.set_buffer(nullptr, 0);
image_copy.set_buffer(nullptr, 0);
int temporal_index = 0;
if (codec_specific_info) {
@ -1296,7 +1366,7 @@ EncodedImageCallback::Result VideoStreamEncoder::OnEncodedImage(
temporal_index = 0;
}
RunPostEncode(encoded_image_metadata, time_sent_us, temporal_index);
RunPostEncode(image_copy, time_sent_us, temporal_index);
if (result.error == Result::OK) {
// In case of an internal encoder running on a separate thread, the
@ -1688,6 +1758,15 @@ bool VideoStreamEncoder::HasInternalSource() const {
return codec_info_.has_internal_source || encoder_info_.has_internal_source;
}
void VideoStreamEncoder::ReleaseEncoder() {
if (!encoder_ || !encoder_initialized_) {
return;
}
encoder_->Release();
encoder_initialized_ = false;
TRACE_EVENT0("webrtc", "VCMGenericEncoder::Release");
}
// Class holding adaptation information.
VideoStreamEncoder::AdaptCounter::AdaptCounter() {
fps_counters_.resize(kScaleReasonSize);

View File

@ -23,17 +23,19 @@
#include "api/video/video_stream_encoder_interface.h"
#include "api/video/video_stream_encoder_observer.h"
#include "api/video/video_stream_encoder_settings.h"
#include "api/video_codecs/video_codec.h"
#include "api/video_codecs/video_encoder.h"
#include "modules/video_coding/utility/frame_dropper.h"
#include "modules/video_coding/utility/quality_scaler.h"
#include "modules/video_coding/video_coding_impl.h"
#include "rtc_base/critical_section.h"
#include "rtc_base/event.h"
#include "rtc_base/experiments/rate_control_settings.h"
#include "rtc_base/race_checker.h"
#include "rtc_base/rate_statistics.h"
#include "rtc_base/sequenced_task_checker.h"
#include "rtc_base/task_queue.h"
#include "video/encoder_bitrate_adjuster.h"
#include "video/frame_encode_timer.h"
#include "video/overuse_frame_detector.h"
namespace webrtc {
@ -188,6 +190,7 @@ class VideoStreamEncoder : public VideoStreamEncoderInterface,
int64_t time_sent_us,
int temporal_index);
bool HasInternalSource() const RTC_RUN_ON(&encoder_queue_);
void ReleaseEncoder() RTC_RUN_ON(&encoder_queue_);
rtc::Event shutdown_event_;
@ -217,6 +220,7 @@ class VideoStreamEncoder : public VideoStreamEncoderInterface,
VideoEncoderConfig encoder_config_ RTC_GUARDED_BY(&encoder_queue_);
std::unique_ptr<VideoEncoder> encoder_ RTC_GUARDED_BY(&encoder_queue_)
RTC_PT_GUARDED_BY(&encoder_queue_);
bool encoder_initialized_;
std::unique_ptr<VideoBitrateAllocator> rate_allocator_
RTC_GUARDED_BY(&encoder_queue_) RTC_PT_GUARDED_BY(&encoder_queue_);
// The maximum frame rate of the current codec configuration, as determined
@ -286,8 +290,12 @@ class VideoStreamEncoder : public VideoStreamEncoderInterface,
VideoEncoder::EncoderInfo encoder_info_ RTC_GUARDED_BY(&encoder_queue_);
VideoEncoderFactory::CodecInfo codec_info_ RTC_GUARDED_BY(&encoder_queue_);
FrameDropper frame_dropper_ RTC_GUARDED_BY(&encoder_queue_);
VideoBitrateAllocation last_bitrate_allocation_
RTC_GUARDED_BY(&encoder_queue_);
uint32_t last_framerate_fps_ RTC_GUARDED_BY(&encoder_queue_);
VideoCodec send_codec_ RTC_GUARDED_BY(&encoder_queue_);
FrameDropper frame_dropper_ RTC_GUARDED_BY(&encoder_queue_);
// If frame dropper is not force disabled, frame dropping might still be
// disabled if VideoEncoder::GetEncoderInfo() indicates that the encoder has a
// trusted rate controller. This is determined on a per-frame basis, as the
@ -303,17 +311,17 @@ class VideoStreamEncoder : public VideoStreamEncoderInterface,
std::unique_ptr<EncoderBitrateAdjuster> bitrate_adjuster_
RTC_GUARDED_BY(&encoder_queue_);
// TODO(webrtc:10164): Refactor/remove these VCM classes.
std::unique_ptr<VCMGenericEncoder> generic_encoder_
RTC_GUARDED_BY(&encoder_queue_);
VCMEncodedFrameCallback generic_encoder_callback_
RTC_GUARDED_BY(&encoder_queue_);
VideoCodec send_codec_ RTC_GUARDED_BY(&encoder_queue_);
// TODO(sprang): Change actually support keyframe per simulcast stream, or
// turn this into a simple bool |pending_keyframe_request_|.
std::vector<FrameType> next_frame_types_ RTC_GUARDED_BY(&encoder_queue_);
FrameEncodeTimer frame_encoder_timer_;
// Experiment groups parsed from field trials for realtime video ([0]) and
// screenshare ([1]). 0 means no group specified. Positive values are
// experiment group numbers incremented by 1.
const std::array<uint8_t, 2> experiment_groups_;
// All public methods are proxied to |encoder_queue_|. It must must be
// destroyed first to make sure no tasks are run that use other members.
rtc::TaskQueue encoder_queue_;