Collecting encode_time_ms for each frame.
Also, in Sample struct, replacing double with the original type. It makes more sense to save the original data as truthful as possible, and then convert it to double later if necessary (in the plot script). Review URL: https://codereview.webrtc.org/1374233002 Cr-Commit-Position: refs/heads/master@{#10184}
This commit is contained in:
@ -34,15 +34,16 @@ import numpy
|
||||
|
||||
# Fields
|
||||
DROPPED = 0
|
||||
INPUT_TIME = 1 # ms
|
||||
SEND_TIME = 2 # ms
|
||||
RECV_TIME = 3 # ms
|
||||
ENCODED_FRAME_SIZE = 4 # bytes
|
||||
PSNR = 5
|
||||
SSIM = 6
|
||||
RENDER_TIME = 7 # ms
|
||||
INPUT_TIME = 1 # ms (timestamp)
|
||||
SEND_TIME = 2 # ms (timestamp)
|
||||
RECV_TIME = 3 # ms (timestamp)
|
||||
RENDER_TIME = 4 # ms (timestamp)
|
||||
ENCODED_FRAME_SIZE = 5 # bytes
|
||||
PSNR = 6
|
||||
SSIM = 7
|
||||
ENCODE_TIME = 8 # ms (time interval)
|
||||
|
||||
TOTAL_RAW_FIELDS = 8
|
||||
TOTAL_RAW_FIELDS = 9
|
||||
|
||||
SENDER_TIME = TOTAL_RAW_FIELDS + 0
|
||||
RECEIVER_TIME = TOTAL_RAW_FIELDS + 1
|
||||
@ -66,6 +67,7 @@ _fields = [
|
||||
(PSNR, "psnr", "PSNR"),
|
||||
(SSIM, "ssim", "SSIM"),
|
||||
(RENDER_TIME, "render_time_ms", "render time"),
|
||||
(ENCODE_TIME, "encode_time_ms", "encode time"),
|
||||
# Auto-generated
|
||||
(SENDER_TIME, "sender_time", "sender time"),
|
||||
(RECEIVER_TIME, "receiver_time", "receiver time"),
|
||||
|
@ -29,11 +29,13 @@
|
||||
namespace webrtc {
|
||||
|
||||
namespace internal {
|
||||
VideoCaptureInput::VideoCaptureInput(ProcessThread* module_process_thread,
|
||||
VideoCaptureCallback* frame_callback,
|
||||
VideoRenderer* local_renderer,
|
||||
SendStatisticsProxy* stats_proxy,
|
||||
CpuOveruseObserver* overuse_observer)
|
||||
VideoCaptureInput::VideoCaptureInput(
|
||||
ProcessThread* module_process_thread,
|
||||
VideoCaptureCallback* frame_callback,
|
||||
VideoRenderer* local_renderer,
|
||||
SendStatisticsProxy* stats_proxy,
|
||||
CpuOveruseObserver* overuse_observer,
|
||||
EncodingTimeObserver* encoding_time_observer)
|
||||
: capture_cs_(CriticalSectionWrapper::CreateCriticalSection()),
|
||||
module_process_thread_(module_process_thread),
|
||||
frame_callback_(frame_callback),
|
||||
@ -52,7 +54,8 @@ VideoCaptureInput::VideoCaptureInput(ProcessThread* module_process_thread,
|
||||
overuse_detector_(new OveruseFrameDetector(Clock::GetRealTimeClock(),
|
||||
CpuOveruseOptions(),
|
||||
overuse_observer,
|
||||
stats_proxy)) {
|
||||
stats_proxy)),
|
||||
encoding_time_observer_(encoding_time_observer) {
|
||||
encoder_thread_->Start();
|
||||
encoder_thread_->SetPriority(kHighPriority);
|
||||
module_process_thread_->RegisterModule(overuse_detector_.get());
|
||||
@ -149,6 +152,10 @@ bool VideoCaptureInput::EncoderProcess() {
|
||||
Clock::GetRealTimeClock()->TimeInMilliseconds() - encode_start_time);
|
||||
overuse_detector_->FrameEncoded(encode_time_ms);
|
||||
stats_proxy_->OnEncodedFrame(encode_time_ms);
|
||||
if (encoding_time_observer_) {
|
||||
encoding_time_observer_->OnReportEncodedTime(
|
||||
deliver_frame.ntp_time_ms(), encode_time_ms);
|
||||
}
|
||||
}
|
||||
}
|
||||
// We're done!
|
||||
|
@ -55,7 +55,8 @@ class VideoCaptureInput : public webrtc::VideoCaptureInput {
|
||||
VideoCaptureCallback* frame_callback,
|
||||
VideoRenderer* local_renderer,
|
||||
SendStatisticsProxy* send_stats_proxy,
|
||||
CpuOveruseObserver* overuse_observer);
|
||||
CpuOveruseObserver* overuse_observer,
|
||||
EncodingTimeObserver* encoding_time_observer);
|
||||
~VideoCaptureInput();
|
||||
|
||||
void IncomingCapturedFrame(const VideoFrame& video_frame) override;
|
||||
@ -90,6 +91,7 @@ class VideoCaptureInput : public webrtc::VideoCaptureInput {
|
||||
const int64_t delta_ntp_internal_ms_;
|
||||
|
||||
rtc::scoped_ptr<OveruseFrameDetector> overuse_detector_;
|
||||
EncodingTimeObserver* const encoding_time_observer_;
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
|
@ -64,7 +64,7 @@ class VideoCaptureInputTest : public ::testing::Test {
|
||||
Config config;
|
||||
input_.reset(new internal::VideoCaptureInput(
|
||||
mock_process_thread_.get(), mock_frame_callback_.get(), nullptr,
|
||||
&stats_proxy_, nullptr));
|
||||
&stats_proxy_, nullptr, nullptr));
|
||||
}
|
||||
|
||||
virtual void TearDown() {
|
||||
|
@ -42,16 +42,16 @@ class VideoAnalyzer : public PacketReceiver,
|
||||
public Transport,
|
||||
public VideoRenderer,
|
||||
public VideoCaptureInput,
|
||||
public EncodedFrameObserver {
|
||||
public EncodedFrameObserver,
|
||||
public EncodingTimeObserver {
|
||||
public:
|
||||
VideoAnalyzer(VideoCaptureInput* input,
|
||||
Transport* transport,
|
||||
VideoAnalyzer(Transport* transport,
|
||||
const std::string& test_label,
|
||||
double avg_psnr_threshold,
|
||||
double avg_ssim_threshold,
|
||||
int duration_frames,
|
||||
FILE* graph_data_output_file)
|
||||
: input_(input),
|
||||
: input_(nullptr),
|
||||
transport_(transport),
|
||||
receiver_(nullptr),
|
||||
send_stream_(nullptr),
|
||||
@ -123,6 +123,12 @@ class VideoAnalyzer : public PacketReceiver,
|
||||
return receiver_->DeliverPacket(media_type, packet, length, packet_time);
|
||||
}
|
||||
|
||||
// EncodingTimeObserver.
|
||||
void OnReportEncodedTime(int64_t ntp_time_ms, int encode_time_ms) override {
|
||||
rtc::CritScope crit(&comparison_lock_);
|
||||
samples_encode_time_ms_[ntp_time_ms] = encode_time_ms;
|
||||
}
|
||||
|
||||
void IncomingCapturedFrame(const VideoFrame& video_frame) override {
|
||||
VideoFrame copy = video_frame;
|
||||
copy.set_timestamp(copy.ntp_time_ms() * 90);
|
||||
@ -279,31 +285,31 @@ class VideoAnalyzer : public PacketReceiver,
|
||||
};
|
||||
|
||||
struct Sample {
|
||||
Sample(double dropped,
|
||||
double input_time_ms,
|
||||
double send_time_ms,
|
||||
double recv_time_ms,
|
||||
double encoded_frame_size,
|
||||
Sample(int dropped,
|
||||
int64_t input_time_ms,
|
||||
int64_t send_time_ms,
|
||||
int64_t recv_time_ms,
|
||||
int64_t render_time_ms,
|
||||
size_t encoded_frame_size,
|
||||
double psnr,
|
||||
double ssim,
|
||||
double render_time_ms)
|
||||
double ssim)
|
||||
: dropped(dropped),
|
||||
input_time_ms(input_time_ms),
|
||||
send_time_ms(send_time_ms),
|
||||
recv_time_ms(recv_time_ms),
|
||||
render_time_ms(render_time_ms),
|
||||
encoded_frame_size(encoded_frame_size),
|
||||
psnr(psnr),
|
||||
ssim(ssim),
|
||||
render_time_ms(render_time_ms) {}
|
||||
ssim(ssim) {}
|
||||
|
||||
double dropped;
|
||||
double input_time_ms;
|
||||
double send_time_ms;
|
||||
double recv_time_ms;
|
||||
double encoded_frame_size;
|
||||
int dropped;
|
||||
int64_t input_time_ms;
|
||||
int64_t send_time_ms;
|
||||
int64_t recv_time_ms;
|
||||
int64_t render_time_ms;
|
||||
size_t encoded_frame_size;
|
||||
double psnr;
|
||||
double ssim;
|
||||
double render_time_ms;
|
||||
};
|
||||
|
||||
void AddFrameComparison(const VideoFrame& reference,
|
||||
@ -465,8 +471,8 @@ class VideoAnalyzer : public PacketReceiver,
|
||||
if (graph_data_output_file_) {
|
||||
samples_.push_back(
|
||||
Sample(comparison.dropped, input_time_ms, comparison.send_time_ms,
|
||||
comparison.recv_time_ms, comparison.encoded_frame_size, psnr,
|
||||
ssim, comparison.render_time_ms));
|
||||
comparison.recv_time_ms, comparison.render_time_ms,
|
||||
comparison.encoded_frame_size, psnr, ssim));
|
||||
}
|
||||
psnr_.AddSample(psnr);
|
||||
ssim_.AddSample(ssim);
|
||||
@ -512,21 +518,39 @@ class VideoAnalyzer : public PacketReceiver,
|
||||
"input_time_ms "
|
||||
"send_time_ms "
|
||||
"recv_time_ms "
|
||||
"render_time_ms "
|
||||
"encoded_frame_size "
|
||||
"psnr "
|
||||
"ssim "
|
||||
"render_time_ms\n");
|
||||
"encode_time_ms\n");
|
||||
int missing_encode_time_samples = 0;
|
||||
for (const Sample& sample : samples_) {
|
||||
fprintf(out, "%lf %lf %lf %lf %lf %lf %lf %lf\n", sample.dropped,
|
||||
sample.input_time_ms, sample.send_time_ms, sample.recv_time_ms,
|
||||
auto it = samples_encode_time_ms_.find(sample.input_time_ms);
|
||||
int encode_time_ms;
|
||||
if (it != samples_encode_time_ms_.end()) {
|
||||
encode_time_ms = it->second;
|
||||
} else {
|
||||
++missing_encode_time_samples;
|
||||
encode_time_ms = -1;
|
||||
}
|
||||
fprintf(out, "%d %" PRId64 " %" PRId64 " %" PRId64 " %" PRId64 " %" PRIuS
|
||||
" %lf %lf %d\n",
|
||||
sample.dropped, sample.input_time_ms, sample.send_time_ms,
|
||||
sample.recv_time_ms, sample.render_time_ms,
|
||||
sample.encoded_frame_size, sample.psnr, sample.ssim,
|
||||
sample.render_time_ms);
|
||||
encode_time_ms);
|
||||
}
|
||||
if (missing_encode_time_samples) {
|
||||
fprintf(stderr,
|
||||
"Warning: Missing encode_time_ms samples for %d frame(s).\n",
|
||||
missing_encode_time_samples);
|
||||
}
|
||||
}
|
||||
|
||||
const std::string test_label_;
|
||||
FILE* const graph_data_output_file_;
|
||||
std::vector<Sample> samples_ GUARDED_BY(comparison_lock_);
|
||||
std::map<int64_t, int> samples_encode_time_ms_ GUARDED_BY(comparison_lock_);
|
||||
test::Statistics sender_time_ GUARDED_BY(comparison_lock_);
|
||||
test::Statistics receiver_time_ GUARDED_BY(comparison_lock_);
|
||||
test::Statistics psnr_ GUARDED_BY(comparison_lock_);
|
||||
@ -737,7 +761,7 @@ void VideoQualityTest::RunWithAnalyzer(const Params& params) {
|
||||
static_cast<uint8_t>(params.common.tl_discard_threshold), 0);
|
||||
test::DirectTransport recv_transport(params.pipe);
|
||||
VideoAnalyzer analyzer(
|
||||
nullptr, &send_transport, params.analyzer.test_label,
|
||||
&send_transport, params.analyzer.test_label,
|
||||
params.analyzer.avg_psnr_threshold, params.analyzer.avg_ssim_threshold,
|
||||
params.analyzer.test_durations_secs * params.common.fps,
|
||||
graph_data_output_file);
|
||||
@ -751,6 +775,7 @@ void VideoQualityTest::RunWithAnalyzer(const Params& params) {
|
||||
recv_transport.SetReceiver(sender_call_->Receiver());
|
||||
|
||||
SetupFullStack(params, &analyzer, &recv_transport);
|
||||
send_config_.encoding_time_observer = &analyzer;
|
||||
receive_configs_[0].renderer = &analyzer;
|
||||
for (auto& config : receive_configs_)
|
||||
config.pre_decode_callback = &analyzer;
|
||||
|
@ -162,7 +162,7 @@ VideoSendStream::VideoSendStream(
|
||||
|
||||
input_.reset(new internal::VideoCaptureInput(
|
||||
module_process_thread_, vie_encoder_, config_.local_renderer,
|
||||
&stats_proxy_, this));
|
||||
&stats_proxy_, this, config_.encoding_time_observer));
|
||||
|
||||
// 28 to match packet overhead in ModuleRtpRtcpImpl.
|
||||
RTC_DCHECK_LE(config_.rtp.max_packet_size, static_cast<size_t>(0xFFFF - 28));
|
||||
|
@ -26,6 +26,13 @@ namespace webrtc {
|
||||
class LoadObserver;
|
||||
class VideoEncoder;
|
||||
|
||||
class EncodingTimeObserver {
|
||||
public:
|
||||
virtual ~EncodingTimeObserver() {}
|
||||
|
||||
virtual void OnReportEncodedTime(int64_t ntp_time_ms, int encode_time_ms) = 0;
|
||||
};
|
||||
|
||||
// Class to deliver captured frame to the video send stream.
|
||||
class VideoCaptureInput {
|
||||
public:
|
||||
@ -152,6 +159,11 @@ class VideoSendStream : public SendStream {
|
||||
// below the minimum configured bitrate. If this variable is false, the
|
||||
// stream may send at a rate higher than the estimated available bitrate.
|
||||
bool suspend_below_min_bitrate = false;
|
||||
|
||||
// Called for each encoded frame. Passes the total time spent on encoding.
|
||||
// TODO(ivica): Consolidate with post_encode_callback:
|
||||
// https://code.google.com/p/webrtc/issues/detail?id=5042
|
||||
EncodingTimeObserver* encoding_time_observer = nullptr;
|
||||
};
|
||||
|
||||
// Gets interface used to insert captured frames. Valid as long as the
|
||||
|
Reference in New Issue
Block a user