VideoProcessor: mini-fixes in preparation for task queue CL.
* Make ProcessFrame return void. * Make |encode_callback_| and |decode_callback_| direct members. * Remove ::EncodedFrameSize() and ::EncodedFrameType() * Remove unused |timestamp| member from FrameInfo. * Reorder printf output from PrintCodecSettings. * Make some member functions const. BUG=webrtc:6634 Review-Url: https://codereview.webrtc.org/2998063002 Cr-Commit-Position: refs/heads/master@{#19421}
This commit is contained in:
@ -59,19 +59,30 @@ void PrintCodecSettings(const VideoCodec& codec_settings) {
|
|||||||
printf(" QPmax : %d\n", codec_settings.qpMax);
|
printf(" QPmax : %d\n", codec_settings.qpMax);
|
||||||
if (codec_settings.codecType == kVideoCodecVP8) {
|
if (codec_settings.codecType == kVideoCodecVP8) {
|
||||||
printf(" Complexity : %d\n", codec_settings.VP8().complexity);
|
printf(" Complexity : %d\n", codec_settings.VP8().complexity);
|
||||||
|
printf(" Resilience : %d\n", codec_settings.VP8().resilience);
|
||||||
|
printf(" # temporal layers : %d\n",
|
||||||
|
codec_settings.VP8().numberOfTemporalLayers);
|
||||||
printf(" Denoising : %d\n", codec_settings.VP8().denoisingOn);
|
printf(" Denoising : %d\n", codec_settings.VP8().denoisingOn);
|
||||||
printf(" Error concealment : %d\n",
|
printf(" Error concealment : %d\n",
|
||||||
codec_settings.VP8().errorConcealmentOn);
|
codec_settings.VP8().errorConcealmentOn);
|
||||||
|
printf(" Automatic resize : %d\n",
|
||||||
|
codec_settings.VP8().automaticResizeOn);
|
||||||
printf(" Frame dropping : %d\n", codec_settings.VP8().frameDroppingOn);
|
printf(" Frame dropping : %d\n", codec_settings.VP8().frameDroppingOn);
|
||||||
printf(" Resilience : %d\n", codec_settings.VP8().resilience);
|
|
||||||
printf(" Key frame interval: %d\n", codec_settings.VP8().keyFrameInterval);
|
printf(" Key frame interval: %d\n", codec_settings.VP8().keyFrameInterval);
|
||||||
} else if (codec_settings.codecType == kVideoCodecVP9) {
|
} else if (codec_settings.codecType == kVideoCodecVP9) {
|
||||||
printf(" Complexity : %d\n", codec_settings.VP9().complexity);
|
printf(" Complexity : %d\n", codec_settings.VP9().complexity);
|
||||||
|
printf(" Resilience : %d\n", codec_settings.VP9().resilienceOn);
|
||||||
|
printf(" # temporal layers : %d\n",
|
||||||
|
codec_settings.VP9().numberOfTemporalLayers);
|
||||||
printf(" Denoising : %d\n", codec_settings.VP9().denoisingOn);
|
printf(" Denoising : %d\n", codec_settings.VP9().denoisingOn);
|
||||||
printf(" Frame dropping : %d\n", codec_settings.VP9().frameDroppingOn);
|
printf(" Frame dropping : %d\n", codec_settings.VP9().frameDroppingOn);
|
||||||
printf(" Resilience : %d\n", codec_settings.VP9().resilienceOn);
|
|
||||||
printf(" Key frame interval: %d\n", codec_settings.VP9().keyFrameInterval);
|
printf(" Key frame interval: %d\n", codec_settings.VP9().keyFrameInterval);
|
||||||
printf(" Adaptive QP mode : %d\n", codec_settings.VP9().adaptiveQpMode);
|
printf(" Adaptive QP mode : %d\n", codec_settings.VP9().adaptiveQpMode);
|
||||||
|
printf(" Automatic resize : %d\n",
|
||||||
|
codec_settings.VP9().automaticResizeOn);
|
||||||
|
printf(" # spatial layers : %d\n",
|
||||||
|
codec_settings.VP9().numberOfSpatialLayers);
|
||||||
|
printf(" Flexible mode : %d\n", codec_settings.VP9().flexibleMode);
|
||||||
} else if (codec_settings.codecType == kVideoCodecH264) {
|
} else if (codec_settings.codecType == kVideoCodecH264) {
|
||||||
printf(" Frame dropping : %d\n", codec_settings.H264().frameDroppingOn);
|
printf(" Frame dropping : %d\n", codec_settings.H264().frameDroppingOn);
|
||||||
printf(" Key frame interval: %d\n",
|
printf(" Key frame interval: %d\n",
|
||||||
@ -114,8 +125,8 @@ VideoProcessor::VideoProcessor(webrtc::VideoEncoder* encoder,
|
|||||||
encoder_(encoder),
|
encoder_(encoder),
|
||||||
decoder_(decoder),
|
decoder_(decoder),
|
||||||
bitrate_allocator_(CreateBitrateAllocator(&config_)),
|
bitrate_allocator_(CreateBitrateAllocator(&config_)),
|
||||||
encode_callback_(new VideoProcessorEncodeCompleteCallback(this)),
|
encode_callback_(this),
|
||||||
decode_callback_(new VideoProcessorDecodeCompleteCallback(this)),
|
decode_callback_(this),
|
||||||
packet_manipulator_(packet_manipulator),
|
packet_manipulator_(packet_manipulator),
|
||||||
analysis_frame_reader_(analysis_frame_reader),
|
analysis_frame_reader_(analysis_frame_reader),
|
||||||
analysis_frame_writer_(analysis_frame_writer),
|
analysis_frame_writer_(analysis_frame_writer),
|
||||||
@ -125,7 +136,7 @@ VideoProcessor::VideoProcessor(webrtc::VideoEncoder* encoder,
|
|||||||
last_encoded_frame_num_(-1),
|
last_encoded_frame_num_(-1),
|
||||||
last_decoded_frame_num_(-1),
|
last_decoded_frame_num_(-1),
|
||||||
first_key_frame_has_been_excluded_(false),
|
first_key_frame_has_been_excluded_(false),
|
||||||
last_decoded_frame_buffer_(0, analysis_frame_reader->FrameLength()),
|
last_decoded_frame_buffer_(analysis_frame_reader->FrameLength()),
|
||||||
stats_(stats),
|
stats_(stats),
|
||||||
num_dropped_frames_(0),
|
num_dropped_frames_(0),
|
||||||
num_spatial_resizes_(0) {
|
num_spatial_resizes_(0) {
|
||||||
@ -145,10 +156,10 @@ void VideoProcessor::Init() {
|
|||||||
initialized_ = true;
|
initialized_ = true;
|
||||||
|
|
||||||
// Setup required callbacks for the encoder and decoder.
|
// Setup required callbacks for the encoder and decoder.
|
||||||
RTC_CHECK_EQ(encoder_->RegisterEncodeCompleteCallback(encode_callback_.get()),
|
RTC_CHECK_EQ(encoder_->RegisterEncodeCompleteCallback(&encode_callback_),
|
||||||
WEBRTC_VIDEO_CODEC_OK)
|
WEBRTC_VIDEO_CODEC_OK)
|
||||||
<< "Failed to register encode complete callback";
|
<< "Failed to register encode complete callback";
|
||||||
RTC_CHECK_EQ(decoder_->RegisterDecodeCompleteCallback(decode_callback_.get()),
|
RTC_CHECK_EQ(decoder_->RegisterDecodeCompleteCallback(&decode_callback_),
|
||||||
WEBRTC_VIDEO_CODEC_OK)
|
WEBRTC_VIDEO_CODEC_OK)
|
||||||
<< "Failed to register decode complete callback";
|
<< "Failed to register decode complete callback";
|
||||||
|
|
||||||
@ -187,36 +198,27 @@ void VideoProcessor::Init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void VideoProcessor::Release() {
|
void VideoProcessor::Release() {
|
||||||
encoder_->RegisterEncodeCompleteCallback(nullptr);
|
|
||||||
decoder_->RegisterDecodeCompleteCallback(nullptr);
|
|
||||||
|
|
||||||
RTC_CHECK_EQ(encoder_->Release(), WEBRTC_VIDEO_CODEC_OK);
|
RTC_CHECK_EQ(encoder_->Release(), WEBRTC_VIDEO_CODEC_OK);
|
||||||
RTC_CHECK_EQ(decoder_->Release(), WEBRTC_VIDEO_CODEC_OK);
|
RTC_CHECK_EQ(decoder_->Release(), WEBRTC_VIDEO_CODEC_OK);
|
||||||
|
|
||||||
|
encoder_->RegisterEncodeCompleteCallback(nullptr);
|
||||||
|
decoder_->RegisterDecodeCompleteCallback(nullptr);
|
||||||
|
|
||||||
initialized_ = false;
|
initialized_ = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool VideoProcessor::ProcessFrame(int frame_number) {
|
void VideoProcessor::ProcessFrame(int frame_number) {
|
||||||
RTC_DCHECK_GE(frame_number, 0);
|
RTC_DCHECK_EQ(frame_number, frame_infos_.size())
|
||||||
RTC_DCHECK_LE(frame_number, frame_infos_.size())
|
<< "Must process frames in sequence.";
|
||||||
<< "Must process frames without gaps.";
|
|
||||||
RTC_DCHECK(initialized_) << "VideoProcessor not initialized.";
|
RTC_DCHECK(initialized_) << "VideoProcessor not initialized.";
|
||||||
|
|
||||||
|
// Get frame from file.
|
||||||
rtc::scoped_refptr<I420BufferInterface> buffer(
|
rtc::scoped_refptr<I420BufferInterface> buffer(
|
||||||
analysis_frame_reader_->ReadFrame());
|
analysis_frame_reader_->ReadFrame());
|
||||||
|
RTC_CHECK(buffer) << "Tried to read too many frames from the file.";
|
||||||
if (!buffer) {
|
const int64_t kNoRenderTime = 0;
|
||||||
// Last frame has been reached.
|
VideoFrame source_frame(buffer, FrameNumberToTimestamp(frame_number),
|
||||||
return false;
|
kNoRenderTime, webrtc::kVideoRotation_0);
|
||||||
}
|
|
||||||
|
|
||||||
uint32_t timestamp = FrameNumberToTimestamp(frame_number);
|
|
||||||
VideoFrame source_frame(buffer, timestamp, 0, webrtc::kVideoRotation_0);
|
|
||||||
|
|
||||||
// Store frame information during the different stages of encode and decode.
|
|
||||||
frame_infos_.emplace_back();
|
|
||||||
FrameInfo* frame_info = &frame_infos_.back();
|
|
||||||
frame_info->timestamp = timestamp;
|
|
||||||
|
|
||||||
// Decide if we are going to force a keyframe.
|
// Decide if we are going to force a keyframe.
|
||||||
std::vector<FrameType> frame_types(1, kVideoFrameDelta);
|
std::vector<FrameType> frame_types(1, kVideoFrameDelta);
|
||||||
@ -225,6 +227,10 @@ bool VideoProcessor::ProcessFrame(int frame_number) {
|
|||||||
frame_types[0] = kVideoFrameKey;
|
frame_types[0] = kVideoFrameKey;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Store frame information during the different stages of encode and decode.
|
||||||
|
frame_infos_.emplace_back();
|
||||||
|
FrameInfo* frame_info = &frame_infos_.back();
|
||||||
|
|
||||||
// Create frame statistics object used for aggregation at end of test run.
|
// Create frame statistics object used for aggregation at end of test run.
|
||||||
FrameStatistic* frame_stat = &stats_->NewFrame(frame_number);
|
FrameStatistic* frame_stat = &stats_->NewFrame(frame_number);
|
||||||
|
|
||||||
@ -239,38 +245,26 @@ bool VideoProcessor::ProcessFrame(int frame_number) {
|
|||||||
<< ", return code: " << frame_stat->encode_return_code
|
<< ", return code: " << frame_stat->encode_return_code
|
||||||
<< ".";
|
<< ".";
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void VideoProcessor::SetRates(int bit_rate, int frame_rate) {
|
void VideoProcessor::SetRates(int bitrate_kbps, int framerate_fps) {
|
||||||
config_.codec_settings.maxFramerate = frame_rate;
|
config_.codec_settings.maxFramerate = framerate_fps;
|
||||||
int set_rates_result = encoder_->SetRateAllocation(
|
int set_rates_result = encoder_->SetRateAllocation(
|
||||||
bitrate_allocator_->GetAllocation(bit_rate * 1000, frame_rate),
|
bitrate_allocator_->GetAllocation(bitrate_kbps * 1000, framerate_fps),
|
||||||
frame_rate);
|
framerate_fps);
|
||||||
RTC_DCHECK_GE(set_rates_result, 0)
|
RTC_DCHECK_GE(set_rates_result, 0)
|
||||||
<< "Failed to update encoder with new rate " << bit_rate;
|
<< "Failed to update encoder with new rate " << bitrate_kbps << ".";
|
||||||
num_dropped_frames_ = 0;
|
num_dropped_frames_ = 0;
|
||||||
num_spatial_resizes_ = 0;
|
num_spatial_resizes_ = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t VideoProcessor::EncodedFrameSize(int frame_number) {
|
int VideoProcessor::GetQpFromEncoder(int frame_number) const {
|
||||||
RTC_DCHECK_LT(frame_number, frame_infos_.size());
|
RTC_CHECK_LT(frame_number, frame_infos_.size());
|
||||||
return frame_infos_[frame_number].encoded_frame_size;
|
|
||||||
}
|
|
||||||
|
|
||||||
FrameType VideoProcessor::EncodedFrameType(int frame_number) {
|
|
||||||
RTC_DCHECK_LT(frame_number, frame_infos_.size());
|
|
||||||
return frame_infos_[frame_number].encoded_frame_type;
|
|
||||||
}
|
|
||||||
|
|
||||||
int VideoProcessor::GetQpFromEncoder(int frame_number) {
|
|
||||||
RTC_DCHECK_LT(frame_number, frame_infos_.size());
|
|
||||||
return frame_infos_[frame_number].qp_encoder;
|
return frame_infos_[frame_number].qp_encoder;
|
||||||
}
|
}
|
||||||
|
|
||||||
int VideoProcessor::GetQpFromBitstream(int frame_number) {
|
int VideoProcessor::GetQpFromBitstream(int frame_number) const {
|
||||||
RTC_DCHECK_LT(frame_number, frame_infos_.size());
|
RTC_CHECK_LT(frame_number, frame_infos_.size());
|
||||||
return frame_infos_[frame_number].qp_bitstream;
|
return frame_infos_[frame_number].qp_bitstream;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -329,10 +323,8 @@ void VideoProcessor::FrameEncoded(
|
|||||||
last_encoded_frame_num_ = frame_number;
|
last_encoded_frame_num_ = frame_number;
|
||||||
|
|
||||||
// Frame is not dropped, so update frame information and statistics.
|
// Frame is not dropped, so update frame information and statistics.
|
||||||
RTC_DCHECK_LT(frame_number, frame_infos_.size());
|
RTC_CHECK_LT(frame_number, frame_infos_.size());
|
||||||
FrameInfo* frame_info = &frame_infos_[frame_number];
|
FrameInfo* frame_info = &frame_infos_[frame_number];
|
||||||
frame_info->encoded_frame_size = encoded_image._length;
|
|
||||||
frame_info->encoded_frame_type = encoded_image._frameType;
|
|
||||||
frame_info->qp_encoder = encoded_image.qp_;
|
frame_info->qp_encoder = encoded_image.qp_;
|
||||||
if (codec == kVideoCodecVP8) {
|
if (codec == kVideoCodecVP8) {
|
||||||
vp8::GetQp(encoded_image._buffer, encoded_image._length,
|
vp8::GetQp(encoded_image._buffer, encoded_image._length,
|
||||||
@ -486,14 +478,14 @@ void VideoProcessor::FrameDecoded(const VideoFrame& image) {
|
|||||||
last_decoded_frame_buffer_ = std::move(extracted_buffer);
|
last_decoded_frame_buffer_ = std::move(extracted_buffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t VideoProcessor::FrameNumberToTimestamp(int frame_number) {
|
uint32_t VideoProcessor::FrameNumberToTimestamp(int frame_number) const {
|
||||||
RTC_DCHECK_GE(frame_number, 0);
|
RTC_DCHECK_GE(frame_number, 0);
|
||||||
const int ticks_per_frame =
|
const int ticks_per_frame =
|
||||||
kRtpClockRateHz / config_.codec_settings.maxFramerate;
|
kRtpClockRateHz / config_.codec_settings.maxFramerate;
|
||||||
return (frame_number + 1) * ticks_per_frame;
|
return (frame_number + 1) * ticks_per_frame;
|
||||||
}
|
}
|
||||||
|
|
||||||
int VideoProcessor::TimestampToFrameNumber(uint32_t timestamp) {
|
int VideoProcessor::TimestampToFrameNumber(uint32_t timestamp) const {
|
||||||
RTC_DCHECK_GT(timestamp, 0);
|
RTC_DCHECK_GT(timestamp, 0);
|
||||||
const int ticks_per_frame =
|
const int ticks_per_frame =
|
||||||
kRtpClockRateHz / config_.codec_settings.maxFramerate;
|
kRtpClockRateHz / config_.codec_settings.maxFramerate;
|
||||||
|
|||||||
@ -25,6 +25,7 @@
|
|||||||
#include "webrtc/modules/video_coding/utility/vp9_uncompressed_header_parser.h"
|
#include "webrtc/modules/video_coding/utility/vp9_uncompressed_header_parser.h"
|
||||||
#include "webrtc/rtc_base/buffer.h"
|
#include "webrtc/rtc_base/buffer.h"
|
||||||
#include "webrtc/rtc_base/checks.h"
|
#include "webrtc/rtc_base/checks.h"
|
||||||
|
#include "webrtc/rtc_base/constructormagic.h"
|
||||||
#include "webrtc/test/testsupport/frame_reader.h"
|
#include "webrtc/test/testsupport/frame_reader.h"
|
||||||
#include "webrtc/test/testsupport/frame_writer.h"
|
#include "webrtc/test/testsupport/frame_writer.h"
|
||||||
|
|
||||||
@ -155,26 +156,19 @@ class VideoProcessor {
|
|||||||
// Tears down callbacks and releases the encoder and decoder.
|
// Tears down callbacks and releases the encoder and decoder.
|
||||||
void Release();
|
void Release();
|
||||||
|
|
||||||
// Processes a single frame. Returns true as long as there's more frames
|
// Processes a single frame. The frames must be processed in order, and the
|
||||||
// available in the source clip.
|
// VideoProcessor must be initialized first.
|
||||||
// |frame_number| must be an integer >= 0.
|
void ProcessFrame(int frame_number);
|
||||||
bool ProcessFrame(int frame_number);
|
|
||||||
|
|
||||||
// Updates the encoder with the target |bit_rate| and the |frame_rate|.
|
// Updates the encoder with target rates. Must be called at least once.
|
||||||
void SetRates(int bit_rate, int frame_rate);
|
void SetRates(int bitrate_kbps, int framerate_fps);
|
||||||
|
|
||||||
// Return the size of the encoded frame in bytes. Dropped frames by the
|
|
||||||
// encoder are regarded as zero size.
|
|
||||||
size_t EncodedFrameSize(int frame_number);
|
|
||||||
|
|
||||||
// Return the encoded frame type (key or delta).
|
// TODO(brandtr): Get rid of these functions by moving the corresponding QP
|
||||||
FrameType EncodedFrameType(int frame_number);
|
// fields to the Stats object.
|
||||||
|
int GetQpFromEncoder(int frame_number) const;
|
||||||
|
int GetQpFromBitstream(int frame_number) const;
|
||||||
|
|
||||||
// Return the qp used by encoder.
|
|
||||||
int GetQpFromEncoder(int frame_number);
|
|
||||||
|
|
||||||
// Return the qp from the qp parser.
|
|
||||||
int GetQpFromBitstream(int frame_number);
|
|
||||||
|
|
||||||
// Return the number of dropped frames.
|
// Return the number of dropped frames.
|
||||||
int NumberDroppedFrames();
|
int NumberDroppedFrames();
|
||||||
@ -186,32 +180,17 @@ class VideoProcessor {
|
|||||||
// Container that holds per-frame information that needs to be stored between
|
// Container that holds per-frame information that needs to be stored between
|
||||||
// calls to Encode and Decode, as well as the corresponding callbacks. It is
|
// calls to Encode and Decode, as well as the corresponding callbacks. It is
|
||||||
// not directly used for statistics -- for that, test::FrameStatistic is used.
|
// not directly used for statistics -- for that, test::FrameStatistic is used.
|
||||||
|
// TODO(brandtr): Get rid of this struct and use the Stats class instead.
|
||||||
struct FrameInfo {
|
struct FrameInfo {
|
||||||
FrameInfo()
|
int64_t encode_start_ns = 0;
|
||||||
: timestamp(0),
|
int64_t decode_start_ns = 0;
|
||||||
encode_start_ns(0),
|
int qp_encoder = 0;
|
||||||
decode_start_ns(0),
|
int qp_bitstream = 0;
|
||||||
encoded_frame_size(0),
|
int decoded_width = 0;
|
||||||
encoded_frame_type(kVideoFrameDelta),
|
int decoded_height = 0;
|
||||||
decoded_width(0),
|
size_t manipulated_length = 0;
|
||||||
decoded_height(0),
|
|
||||||
manipulated_length(0),
|
|
||||||
qp_encoder(0),
|
|
||||||
qp_bitstream(0) {}
|
|
||||||
|
|
||||||
uint32_t timestamp;
|
|
||||||
int64_t encode_start_ns;
|
|
||||||
int64_t decode_start_ns;
|
|
||||||
size_t encoded_frame_size;
|
|
||||||
FrameType encoded_frame_type;
|
|
||||||
int decoded_width;
|
|
||||||
int decoded_height;
|
|
||||||
size_t manipulated_length;
|
|
||||||
int qp_encoder;
|
|
||||||
int qp_bitstream;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Callback class required to implement according to the VideoEncoder API.
|
|
||||||
class VideoProcessorEncodeCompleteCallback
|
class VideoProcessorEncodeCompleteCallback
|
||||||
: public webrtc::EncodedImageCallback {
|
: public webrtc::EncodedImageCallback {
|
||||||
public:
|
public:
|
||||||
@ -233,7 +212,6 @@ class VideoProcessor {
|
|||||||
VideoProcessor* const video_processor_;
|
VideoProcessor* const video_processor_;
|
||||||
};
|
};
|
||||||
|
|
||||||
// Callback class required to implement according to the VideoDecoder API.
|
|
||||||
class VideoProcessorDecodeCompleteCallback
|
class VideoProcessorDecodeCompleteCallback
|
||||||
: public webrtc::DecodedImageCallback {
|
: public webrtc::DecodedImageCallback {
|
||||||
public:
|
public:
|
||||||
@ -259,19 +237,19 @@ class VideoProcessor {
|
|||||||
VideoProcessor* const video_processor_;
|
VideoProcessor* const video_processor_;
|
||||||
};
|
};
|
||||||
|
|
||||||
// Invoked by the callback when a frame has completed encoding.
|
// Invoked by the callback adapter when a frame has completed encoding.
|
||||||
void FrameEncoded(webrtc::VideoCodecType codec,
|
void FrameEncoded(webrtc::VideoCodecType codec,
|
||||||
const webrtc::EncodedImage& encodedImage,
|
const webrtc::EncodedImage& encodedImage,
|
||||||
const webrtc::RTPFragmentationHeader* fragmentation);
|
const webrtc::RTPFragmentationHeader* fragmentation);
|
||||||
|
|
||||||
// Invoked by the callback when a frame has completed decoding.
|
// Invoked by the callback adapter when a frame has completed decoding.
|
||||||
void FrameDecoded(const webrtc::VideoFrame& image);
|
void FrameDecoded(const webrtc::VideoFrame& image);
|
||||||
|
|
||||||
// Use the frame number as the basis for timestamp to identify frames. Let the
|
// Use the frame number as the basis for timestamp to identify frames. Let the
|
||||||
// first timestamp be non-zero, to not make the IvfFileWriter believe that we
|
// first timestamp be non-zero, to not make the IvfFileWriter believe that we
|
||||||
// want to use capture timestamps in the IVF files.
|
// want to use capture timestamps in the IVF files.
|
||||||
uint32_t FrameNumberToTimestamp(int frame_number);
|
uint32_t FrameNumberToTimestamp(int frame_number) const;
|
||||||
int TimestampToFrameNumber(uint32_t timestamp);
|
int TimestampToFrameNumber(uint32_t timestamp) const;
|
||||||
|
|
||||||
TestConfig config_;
|
TestConfig config_;
|
||||||
|
|
||||||
@ -280,8 +258,8 @@ class VideoProcessor {
|
|||||||
const std::unique_ptr<VideoBitrateAllocator> bitrate_allocator_;
|
const std::unique_ptr<VideoBitrateAllocator> bitrate_allocator_;
|
||||||
|
|
||||||
// Adapters for the codec callbacks.
|
// Adapters for the codec callbacks.
|
||||||
const std::unique_ptr<EncodedImageCallback> encode_callback_;
|
VideoProcessorEncodeCompleteCallback encode_callback_;
|
||||||
const std::unique_ptr<DecodedImageCallback> decode_callback_;
|
VideoProcessorDecodeCompleteCallback decode_callback_;
|
||||||
|
|
||||||
// Fake network.
|
// Fake network.
|
||||||
PacketManipulator* const packet_manipulator_;
|
PacketManipulator* const packet_manipulator_;
|
||||||
@ -318,6 +296,8 @@ class VideoProcessor {
|
|||||||
Stats* stats_;
|
Stats* stats_;
|
||||||
int num_dropped_frames_;
|
int num_dropped_frames_;
|
||||||
int num_spatial_resizes_;
|
int num_spatial_resizes_;
|
||||||
|
|
||||||
|
RTC_DISALLOW_COPY_AND_ASSIGN(VideoProcessor);
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace test
|
} // namespace test
|
||||||
|
|||||||
@ -266,10 +266,12 @@ class VideoProcessorIntegrationTest : public testing::Test {
|
|||||||
// For every encoded frame, update the rate control metrics.
|
// For every encoded frame, update the rate control metrics.
|
||||||
void UpdateRateControlMetrics(int frame_number) {
|
void UpdateRateControlMetrics(int frame_number) {
|
||||||
RTC_CHECK_GE(frame_number, 0);
|
RTC_CHECK_GE(frame_number, 0);
|
||||||
int tl_idx = TemporalLayerIndexForFrame(frame_number);
|
|
||||||
FrameType frame_type = processor_->EncodedFrameType(frame_number);
|
FrameType frame_type = stats_.stats_[frame_number].frame_type;
|
||||||
float encoded_size_kbits =
|
float encoded_size_kbits =
|
||||||
processor_->EncodedFrameSize(frame_number) * 8.0f / 1000.0f;
|
stats_.stats_[frame_number].encoded_frame_length_in_bytes * 8.0f /
|
||||||
|
1000.0f;
|
||||||
|
const int tl_idx = TemporalLayerIndexForFrame(frame_number);
|
||||||
|
|
||||||
// Update layer data.
|
// Update layer data.
|
||||||
// Update rate mismatch relative to per-frame bandwidth for delta frames.
|
// Update rate mismatch relative to per-frame bandwidth for delta frames.
|
||||||
@ -490,7 +492,7 @@ class VideoProcessorIntegrationTest : public testing::Test {
|
|||||||
// TODO(brandtr): Refactor "frame number accounting" so we don't have to
|
// TODO(brandtr): Refactor "frame number accounting" so we don't have to
|
||||||
// call ProcessFrame num_frames+1 times here.
|
// call ProcessFrame num_frames+1 times here.
|
||||||
for (frame_number = 0; frame_number <= num_frames; ++frame_number) {
|
for (frame_number = 0; frame_number <= num_frames; ++frame_number) {
|
||||||
EXPECT_TRUE(processor_->ProcessFrame(frame_number));
|
processor_->ProcessFrame(frame_number);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (frame_number = 0; frame_number < num_frames; ++frame_number) {
|
for (frame_number = 0; frame_number < num_frames; ++frame_number) {
|
||||||
@ -509,7 +511,7 @@ class VideoProcessorIntegrationTest : public testing::Test {
|
|||||||
}
|
}
|
||||||
|
|
||||||
while (frame_number < num_frames) {
|
while (frame_number < num_frames) {
|
||||||
EXPECT_TRUE(processor_->ProcessFrame(frame_number));
|
processor_->ProcessFrame(frame_number);
|
||||||
VerifyQpParser(frame_number);
|
VerifyQpParser(frame_number);
|
||||||
const int tl_idx = TemporalLayerIndexForFrame(frame_number);
|
const int tl_idx = TemporalLayerIndexForFrame(frame_number);
|
||||||
++num_frames_per_update_[tl_idx];
|
++num_frames_per_update_[tl_idx];
|
||||||
@ -536,7 +538,7 @@ class VideoProcessorIntegrationTest : public testing::Test {
|
|||||||
}
|
}
|
||||||
// TODO(brandtr): Refactor "frame number accounting" so we don't have to
|
// TODO(brandtr): Refactor "frame number accounting" so we don't have to
|
||||||
// call ProcessFrame one extra time here.
|
// call ProcessFrame one extra time here.
|
||||||
EXPECT_TRUE(processor_->ProcessFrame(frame_number));
|
processor_->ProcessFrame(frame_number);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify rate control metrics for all frames (if in batch mode), or for all
|
// Verify rate control metrics for all frames (if in batch mode), or for all
|
||||||
|
|||||||
@ -510,8 +510,10 @@ int main(int argc, char* argv[]) {
|
|||||||
nullptr /* decoded_frame_writer */);
|
nullptr /* decoded_frame_writer */);
|
||||||
processor->Init();
|
processor->Init();
|
||||||
|
|
||||||
|
const int num_frames = frame_reader.NumberOfFrames();
|
||||||
int frame_number = 0;
|
int frame_number = 0;
|
||||||
while (processor->ProcessFrame(frame_number)) {
|
while (frame_number < num_frames) {
|
||||||
|
processor->ProcessFrame(frame_number);
|
||||||
if (frame_number % 80 == 0) {
|
if (frame_number % 80 == 0) {
|
||||||
Log("\n"); // make the output a bit nicer.
|
Log("\n"); // make the output a bit nicer.
|
||||||
}
|
}
|
||||||
@ -522,8 +524,7 @@ int main(int argc, char* argv[]) {
|
|||||||
Log("Processed %d frames\n", frame_number);
|
Log("Processed %d frames\n", frame_number);
|
||||||
|
|
||||||
// Release encoder and decoder to make sure they have finished processing.
|
// Release encoder and decoder to make sure they have finished processing.
|
||||||
encoder->Release();
|
processor->Release();
|
||||||
decoder->Release();
|
|
||||||
|
|
||||||
// Verify statistics are correct:
|
// Verify statistics are correct:
|
||||||
assert(frame_number == static_cast<int>(stats.stats_.size()));
|
assert(frame_number == static_cast<int>(stats.stats_.size()));
|
||||||
|
|||||||
Reference in New Issue
Block a user