Reland "Add fine grained dropped video frames counters on sending side"

Add fine grained dropped video frames counters on sending side

4 new counters added to SendStatisticsProxy and reported to UMA and logs.

Bug: webrtc:8355
Change-Id: I1f9bdfea9cbf17cf38b3cb2f55d406ffdb06614f
Reviewed-on: https://webrtc-review.googlesource.com/14580
Reviewed-by: Erik Språng <sprang@webrtc.org>
Reviewed-by: Taylor Brandstetter <deadbeef@webrtc.org>
Reviewed-by: Stefan Holmer <stefan@webrtc.org>
Commit-Queue: Ilya Nikolaevskiy <ilnik@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#20421}
This commit is contained in:
Ilya Nikolaevskiy
2017-10-23 10:45:37 +02:00
committed by Commit Bot
parent f0c44672df
commit d79314f9f9
16 changed files with 186 additions and 31 deletions

View File

@ -65,6 +65,7 @@ int32_t VCMGenericEncoder::InitEncode(const VideoCodec* settings,
<< settings->plName;
return -1;
}
vcm_encoded_frame_callback_->Reset();
encoder_->RegisterEncodeCompleteCallback(vcm_encoded_frame_callback_);
return 0;
}
@ -144,8 +145,6 @@ int32_t VCMGenericEncoder::RequestFrame(
const std::vector<FrameType>& frame_types) {
RTC_DCHECK_RUNS_SERIALIZED(&race_checker_);
for (size_t i = 0; i < streams_or_svc_num_; ++i)
vcm_encoded_frame_callback_->OnEncodeStarted(0, i);
// TODO(nisse): Used only with internal source. Delete as soon as
// that feature is removed. The only implementation I've been able
// to find ignores what's in the frame. With one exception: It seems
@ -213,11 +212,26 @@ void VCMEncodedFrameCallback::OnFrameRateChanged(size_t framerate) {
void VCMEncodedFrameCallback::OnEncodeStarted(int64_t capture_time_ms,
size_t simulcast_svc_idx) {
if (internal_source_) {
return;
}
rtc::CritScope crit(&timing_params_lock_);
if (timing_frames_info_.size() < simulcast_svc_idx + 1)
timing_frames_info_.resize(simulcast_svc_idx + 1);
timing_frames_info_[simulcast_svc_idx].encode_start_time_ms[capture_time_ms] =
rtc::TimeMillis();
RTC_DCHECK(
timing_frames_info_[simulcast_svc_idx].encode_start_list.empty() ||
rtc::TimeDiff(capture_time_ms, timing_frames_info_[simulcast_svc_idx]
.encode_start_list.back()
.capture_time_ms) >= 0);
if (timing_frames_info_[simulcast_svc_idx].encode_start_list.size() ==
kMaxEncodeStartTimeListSize) {
LOG(LS_WARNING) << "Too many frames in the encode_start_list."
" Did encoder stall?";
post_encode_callback_->OnDroppedFrame(DropReason::kDroppedByEncoder);
timing_frames_info_[simulcast_svc_idx].encode_start_list.pop_front();
}
timing_frames_info_[simulcast_svc_idx].encode_start_list.emplace_back(
capture_time_ms, rtc::TimeMillis());
}
EncodedImageCallback::Result VCMEncodedFrameCallback::OnEncodedImage(
@ -242,27 +256,29 @@ EncodedImageCallback::Result VCMEncodedFrameCallback::OnEncodedImage(
rtc::Optional<int64_t> encode_start_ms;
size_t num_simulcast_svc_streams = 1;
uint8_t timing_flags = TimingFrameFlags::kInvalid;
{
if (!internal_source_) {
rtc::CritScope crit(&timing_params_lock_);
// Encoders with internal sources do not call OnEncodeStarted and
// OnFrameRateChanged. |timing_frames_info_| may be not filled here.
num_simulcast_svc_streams = timing_frames_info_.size();
if (simulcast_svc_idx < num_simulcast_svc_streams) {
auto encode_start_map =
&timing_frames_info_[simulcast_svc_idx].encode_start_time_ms;
auto it = encode_start_map->find(encoded_image.capture_time_ms_);
if (it != encode_start_map->end()) {
encode_start_ms.emplace(it->second);
// Assuming all encoders do not reorder frames within single stream,
// there may be some dropped frames with smaller timestamps. These
// should be purged.
encode_start_map->erase(encode_start_map->begin(), it);
encode_start_map->erase(it);
} else {
// Encoder is with internal source: free our records of any frames just
// in case to free memory.
encode_start_map->clear();
auto encode_start_list =
&timing_frames_info_[simulcast_svc_idx].encode_start_list;
// Skip frames for which there was OnEncodeStarted but no OnEncodedImage
// call. These are dropped by encoder internally.
while (!encode_start_list->empty() &&
encode_start_list->front().capture_time_ms <
encoded_image.capture_time_ms_) {
post_encode_callback_->OnDroppedFrame(DropReason::kDroppedByEncoder);
encode_start_list->pop_front();
}
if (encode_start_list->size() > 0 &&
encode_start_list->front().capture_time_ms ==
encoded_image.capture_time_ms_) {
encode_start_ms.emplace(
encode_start_list->front().encode_start_time_ms);
encode_start_list->pop_front();
}
size_t target_bitrate =

View File

@ -12,7 +12,7 @@
#define MODULES_VIDEO_CODING_GENERIC_ENCODER_H_
#include <stdio.h>
#include <map>
#include <list>
#include <vector>
#include "modules/video_coding/include/video_codec_interface.h"
@ -65,15 +65,29 @@ class VCMEncodedFrameCallback : public EncodedImageCallback {
timing_frames_thresholds_ = thresholds;
}
// Clears all data stored by OnEncodeStarted().
void Reset() {
rtc::CritScope crit(&timing_params_lock_);
timing_frames_info_.clear();
last_timing_frame_time_ms_ = -1;
}
private:
rtc::CriticalSection timing_params_lock_;
bool internal_source_;
EncodedImageCallback* const post_encode_callback_;
media_optimization::MediaOptimization* const media_opt_;
struct EncodeStartTimeRecord {
EncodeStartTimeRecord(int64_t capture_time, int64_t encode_start_time)
: capture_time_ms(capture_time),
encode_start_time_ms(encode_start_time) {}
int64_t capture_time_ms;
int64_t encode_start_time_ms;
};
struct TimingFramesLayerInfo {
size_t target_bitrate_bytes_per_sec = 0;
std::map<int64_t, int64_t> encode_start_time_ms;
std::list<EncodeStartTimeRecord> encode_start_list;
};
// Separate instance for each simulcast stream or spatial layer.
std::vector<TimingFramesLayerInfo> timing_frames_info_
@ -111,7 +125,6 @@ class VCMGenericEncoder {
int32_t SetPeriodicKeyFrames(bool enable);
int32_t RequestFrame(const std::vector<FrameType>& frame_types);
bool InternalSource() const;
void OnDroppedFrame();
bool SupportsNativeHandle() const;
private:

View File

@ -27,7 +27,8 @@ inline size_t FrameSize(const size_t& min_frame_size,
class FakeEncodedImageCallback : public EncodedImageCallback {
public:
FakeEncodedImageCallback() : last_frame_was_timing_(false) {}
FakeEncodedImageCallback()
: last_frame_was_timing_(false), num_frames_dropped_(0) {}
Result OnEncodedImage(const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) override {
@ -36,10 +37,15 @@ class FakeEncodedImageCallback : public EncodedImageCallback {
return Result(Result::OK);
};
void OnDroppedFrame(DropReason reason) override { ++num_frames_dropped_; }
bool WasTimingFrame() { return last_frame_was_timing_; }
size_t GetNumFramesDropped() { return num_frames_dropped_; }
private:
bool last_frame_was_timing_;
size_t num_frames_dropped_;
};
enum class FrameType {
@ -190,5 +196,34 @@ TEST(TestVCMEncodedFrameCallback, NoTimingFrameIfNoEncodeStartTime) {
EXPECT_FALSE(sink.WasTimingFrame());
}
TEST(TestVCMEncodedFrameCallback, NotifiesAboutDroppedFrames) {
EncodedImage image;
CodecSpecificInfo codec_specific;
const int64_t timestamp1 = 100;
const int64_t timestamp2 = 110;
const int64_t timestamp3 = 120;
const int64_t timestamp4 = 130;
codec_specific.codecType = kVideoCodecGeneric;
codec_specific.codecSpecific.generic.simulcast_idx = 0;
FakeEncodedImageCallback sink;
VCMEncodedFrameCallback callback(&sink, nullptr);
callback.OnEncodeStarted(timestamp1, 0);
EXPECT_EQ(0u, sink.GetNumFramesDropped());
image.capture_time_ms_ = timestamp1;
callback.OnEncodedImage(image, &codec_specific, nullptr);
callback.OnEncodeStarted(timestamp2, 0);
// No OnEncodedImageCall for timestamp2. Yet, at this moment it's not known
// that frame with timestamp2 was dropped.
EXPECT_EQ(0u, sink.GetNumFramesDropped());
callback.OnEncodeStarted(timestamp3, 0);
image.capture_time_ms_ = timestamp3;
callback.OnEncodedImage(image, &codec_specific, nullptr);
EXPECT_EQ(1u, sink.GetNumFramesDropped());
callback.OnEncodeStarted(timestamp4, 0);
image.capture_time_ms_ = timestamp4;
callback.OnEncodedImage(image, &codec_specific, nullptr);
EXPECT_EQ(1u, sink.GetNumFramesDropped());
}
} // namespace test
} // namespace webrtc

View File

@ -45,6 +45,8 @@ enum {
// |kDefaultOutliserFrameSizePercent| in size of average frame.
kDefaultTimingFramesDelayMs = 200,
kDefaultOutlierFrameSizePercent = 250,
// Maximum number of frames for what we store encode start timing information.
kMaxEncodeStartTimeListSize = 50,
};
enum VCMVideoProtection {

View File

@ -324,7 +324,8 @@ int32_t VideoSender::AddVideoFrame(const VideoFrame& videoFrame,
<< " loss rate " << encoder_params.loss_rate << " rtt "
<< encoder_params.rtt << " input frame rate "
<< encoder_params.input_frame_rate;
post_encode_callback_->OnDroppedFrame();
post_encode_callback_->OnDroppedFrame(
EncodedImageCallback::DropReason::kDroppedByMediaOptimizations);
return VCM_OK;
}
// TODO(pbos): Make sure setting send codec is synchronized with video