Add support for writing raw encoder output to .ivf files.

Also refactor GenericEncoder to use these file writers, and remove use
of preprocessor to enable file writing.

BUG=

Review URL: https://codereview.webrtc.org/1853813002

Cr-Commit-Position: refs/heads/master@{#12372}
This commit is contained in:
sprang
2016-04-15 01:24:14 -07:00
committed by Commit bot
parent 7789fe7ab1
commit 3911c26bc0
18 changed files with 663 additions and 276 deletions

View File

@ -107,6 +107,8 @@ source_set("video_coding_utility") {
sources = [
"utility/frame_dropper.cc",
"utility/frame_dropper.h",
"utility/ivf_file_writer.cc",
"utility/ivf_file_writer.h",
"utility/moving_average.h",
"utility/qp_parser.cc",
"utility/qp_parser.h",

View File

@ -1,12 +1,12 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/video_coding/generic_encoder.h"
@ -91,17 +91,15 @@ void CopyCodecSpecific(const CodecSpecificInfo* info, RTPVideoHeader* rtp) {
}
} // namespace
// #define DEBUG_ENCODER_BIT_STREAM
VCMGenericEncoder::VCMGenericEncoder(
VideoEncoder* encoder,
VideoEncoderRateObserver* rate_observer,
VCMEncodedFrameCallback* encoded_frame_callback,
bool internalSource)
bool internal_source)
: encoder_(encoder),
rate_observer_(rate_observer),
vcm_encoded_frame_callback_(encoded_frame_callback),
internal_source_(internalSource),
internal_source_(internal_source),
encoder_params_({0, 0, 0, 0}),
rotation_(kVideoRotation_0),
is_screenshare_(false) {}
@ -114,8 +112,8 @@ int32_t VCMGenericEncoder::Release() {
}
int32_t VCMGenericEncoder::InitEncode(const VideoCodec* settings,
int32_t numberOfCores,
size_t maxPayloadSize) {
int32_t number_of_cores,
size_t max_payload_size) {
TRACE_EVENT0("webrtc", "VCMGenericEncoder::InitEncode");
{
rtc::CritScope lock(&params_lock_);
@ -124,7 +122,7 @@ int32_t VCMGenericEncoder::InitEncode(const VideoCodec* settings,
}
is_screenshare_ = settings->mode == VideoCodecMode::kScreensharing;
if (encoder_->InitEncode(settings, numberOfCores, maxPayloadSize) != 0) {
if (encoder_->InitEncode(settings, number_of_cores, max_payload_size) != 0) {
LOG(LS_ERROR) << "Failed to initialize the encoder associated with "
"payload name: "
<< settings->plName;
@ -134,16 +132,16 @@ int32_t VCMGenericEncoder::InitEncode(const VideoCodec* settings,
return 0;
}
int32_t VCMGenericEncoder::Encode(const VideoFrame& inputFrame,
const CodecSpecificInfo* codecSpecificInfo,
const std::vector<FrameType>& frameTypes) {
int32_t VCMGenericEncoder::Encode(const VideoFrame& frame,
const CodecSpecificInfo* codec_specific,
const std::vector<FrameType>& frame_types) {
TRACE_EVENT1("webrtc", "VCMGenericEncoder::Encode", "timestamp",
inputFrame.timestamp());
frame.timestamp());
for (FrameType frame_type : frameTypes)
for (FrameType frame_type : frame_types)
RTC_DCHECK(frame_type == kVideoFrameKey || frame_type == kVideoFrameDelta);
rotation_ = inputFrame.rotation();
rotation_ = frame.rotation();
// Keep track of the current frame rotation and apply to the output of the
// encoder. There might not be exact as the encoder could have one frame delay
@ -152,7 +150,7 @@ int32_t VCMGenericEncoder::Encode(const VideoFrame& inputFrame,
// properly, which it isn't). More than one frame may be in the pipeline.
vcm_encoded_frame_callback_->SetRotation(rotation_);
int32_t result = encoder_->Encode(inputFrame, codecSpecificInfo, &frameTypes);
int32_t result = encoder_->Encode(frame, codec_specific, &frame_types);
if (vcm_encoded_frame_callback_) {
vcm_encoded_frame_callback_->SignalLastEncoderImplementationUsed(
@ -162,7 +160,7 @@ int32_t VCMGenericEncoder::Encode(const VideoFrame& inputFrame,
if (is_screenshare_ &&
result == WEBRTC_VIDEO_CODEC_TARGET_BITRATE_OVERSHOOT) {
// Target bitrate exceeded, encoder state has been reset - try again.
return encoder_->Encode(inputFrame, codecSpecificInfo, &frameTypes);
return encoder_->Encode(frame, codec_specific, &frame_types);
}
return result;
@ -186,7 +184,7 @@ void VCMGenericEncoder::SetEncoderParameters(const EncoderParameters& params) {
if (rates_have_changed) {
uint32_t target_bitrate_kbps = (params.target_bitrate + 500) / 1000;
encoder_->SetRates(target_bitrate_kbps, params.input_frame_rate);
if (rate_observer_ != nullptr) {
if (rate_observer_) {
rate_observer_->OnSetRates(params.target_bitrate,
params.input_frame_rate);
}
@ -224,32 +222,16 @@ int VCMGenericEncoder::GetTargetFramerate() {
return encoder_->GetTargetFramerate();
}
/***************************
* Callback Implementation
***************************/
VCMEncodedFrameCallback::VCMEncodedFrameCallback(
EncodedImageCallback* post_encode_callback)
: send_callback_(),
_mediaOpt(NULL),
_payloadType(0),
_internalSource(false),
_rotation(kVideoRotation_0),
post_encode_callback_(post_encode_callback)
#ifdef DEBUG_ENCODER_BIT_STREAM
,
_bitStreamAfterEncoder(NULL)
#endif
{
#ifdef DEBUG_ENCODER_BIT_STREAM
_bitStreamAfterEncoder = fopen("encoderBitStream.bit", "wb");
#endif
}
media_opt_(nullptr),
payload_type_(0),
internal_source_(false),
rotation_(kVideoRotation_0),
post_encode_callback_(post_encode_callback) {}
VCMEncodedFrameCallback::~VCMEncodedFrameCallback() {
#ifdef DEBUG_ENCODER_BIT_STREAM
fclose(_bitStreamAfterEncoder);
#endif
}
VCMEncodedFrameCallback::~VCMEncodedFrameCallback() {}
int32_t VCMEncodedFrameCallback::SetTransportCallback(
VCMPacketizationCallback* transport) {
@ -259,48 +241,37 @@ int32_t VCMEncodedFrameCallback::SetTransportCallback(
int32_t VCMEncodedFrameCallback::Encoded(
const EncodedImage& encoded_image,
const CodecSpecificInfo* codecSpecificInfo,
const RTPFragmentationHeader* fragmentationHeader) {
const CodecSpecificInfo* codec_specific,
const RTPFragmentationHeader* fragmentation_header) {
TRACE_EVENT_INSTANT1("webrtc", "VCMEncodedFrameCallback::Encoded",
"timestamp", encoded_image._timeStamp);
post_encode_callback_->Encoded(encoded_image, NULL, NULL);
post_encode_callback_->Encoded(encoded_image, nullptr, nullptr);
if (send_callback_ == NULL) {
if (send_callback_ == nullptr)
return VCM_UNINITIALIZED;
}
#ifdef DEBUG_ENCODER_BIT_STREAM
if (_bitStreamAfterEncoder != NULL) {
fwrite(encoded_image._buffer, 1, encoded_image._length,
_bitStreamAfterEncoder);
}
#endif
RTPVideoHeader rtp_video_header;
memset(&rtp_video_header, 0, sizeof(RTPVideoHeader));
if (codec_specific)
CopyCodecSpecific(codec_specific, &rtp_video_header);
rtp_video_header.rotation = rotation_;
RTPVideoHeader rtpVideoHeader;
memset(&rtpVideoHeader, 0, sizeof(RTPVideoHeader));
RTPVideoHeader* rtpVideoHeaderPtr = &rtpVideoHeader;
if (codecSpecificInfo) {
CopyCodecSpecific(codecSpecificInfo, rtpVideoHeaderPtr);
}
rtpVideoHeader.rotation = _rotation;
int32_t ret_val = send_callback_->SendData(
payload_type_, encoded_image, fragmentation_header, &rtp_video_header);
if (ret_val < 0)
return ret_val;
int32_t callbackReturn = send_callback_->SendData(
_payloadType, encoded_image, fragmentationHeader, rtpVideoHeaderPtr);
if (callbackReturn < 0) {
return callbackReturn;
}
if (_mediaOpt != NULL) {
_mediaOpt->UpdateWithEncodedData(encoded_image);
if (_internalSource)
return _mediaOpt->DropFrame(); // Signal to encoder to drop next frame.
if (media_opt_) {
media_opt_->UpdateWithEncodedData(encoded_image);
if (internal_source_)
return media_opt_->DropFrame(); // Signal to encoder to drop next frame.
}
return VCM_OK;
}
void VCMEncodedFrameCallback::SetMediaOpt(
media_optimization::MediaOptimization* mediaOpt) {
_mediaOpt = mediaOpt;
media_opt_ = mediaOpt;
}
void VCMEncodedFrameCallback::SignalLastEncoderImplementationUsed(
@ -308,5 +279,4 @@ void VCMEncodedFrameCallback::SignalLastEncoderImplementationUsed(
if (send_callback_)
send_callback_->OnEncoderImplementationName(implementation_name);
}
} // namespace webrtc

View File

@ -33,60 +33,35 @@ struct EncoderParameters {
uint32_t input_frame_rate;
};
/*************************************/
/* VCMEncodeFrameCallback class */
/***********************************/
class VCMEncodedFrameCallback : public EncodedImageCallback {
public:
explicit VCMEncodedFrameCallback(
EncodedImageCallback* post_encode_callback);
virtual ~VCMEncodedFrameCallback();
explicit VCMEncodedFrameCallback(EncodedImageCallback* post_encode_callback);
virtual ~VCMEncodedFrameCallback();
/*
* Callback implementation - codec encode complete
*/
int32_t Encoded(
const EncodedImage& encodedImage,
const CodecSpecificInfo* codecSpecificInfo = NULL,
const RTPFragmentationHeader* fragmentationHeader = NULL);
/*
* Callback implementation - generic encoder encode complete
*/
int32_t SetTransportCallback(VCMPacketizationCallback* transport);
/**
* Set media Optimization
*/
void SetMediaOpt(media_optimization::MediaOptimization* mediaOpt);
void SetPayloadType(uint8_t payloadType) {
_payloadType = payloadType;
}
void SetInternalSource(bool internalSource) {
_internalSource = internalSource;
}
void SetRotation(VideoRotation rotation) { _rotation = rotation; }
void SignalLastEncoderImplementationUsed(
const char* encoder_implementation_name);
// Implements EncodedImageCallback.
int32_t Encoded(const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific,
const RTPFragmentationHeader* fragmentation_header) override;
int32_t SetTransportCallback(VCMPacketizationCallback* transport);
void SetMediaOpt(media_optimization::MediaOptimization* media_opt);
void SetPayloadType(uint8_t payload_type) { payload_type_ = payload_type; }
void SetInternalSource(bool internal_source) {
internal_source_ = internal_source;
}
void SetRotation(VideoRotation rotation) { rotation_ = rotation; }
void SignalLastEncoderImplementationUsed(
const char* encoder_implementation_name);
private:
VCMPacketizationCallback* send_callback_;
media_optimization::MediaOptimization* _mediaOpt;
uint8_t _payloadType;
bool _internalSource;
VideoRotation _rotation;
VCMPacketizationCallback* send_callback_;
media_optimization::MediaOptimization* media_opt_;
uint8_t payload_type_;
bool internal_source_;
VideoRotation rotation_;
EncodedImageCallback* post_encode_callback_;
EncodedImageCallback* post_encode_callback_;
};
#ifdef DEBUG_ENCODER_BIT_STREAM
FILE* _bitStreamAfterEncoder;
#endif
}; // end of VCMEncodeFrameCallback class
/******************************/
/* VCMGenericEncoder class */
/******************************/
class VCMGenericEncoder {
friend class VCMCodecDataBase;
@ -94,42 +69,24 @@ class VCMGenericEncoder {
VCMGenericEncoder(VideoEncoder* encoder,
VideoEncoderRateObserver* rate_observer,
VCMEncodedFrameCallback* encoded_frame_callback,
bool internalSource);
bool internal_source);
~VCMGenericEncoder();
/**
* Free encoder memory
*/
int32_t Release();
/**
* Initialize the encoder with the information from the VideoCodec
*/
int32_t InitEncode(const VideoCodec* settings,
int32_t numberOfCores,
size_t maxPayloadSize);
/**
* Encode raw image
* inputFrame : Frame containing raw image
* codecSpecificInfo : Specific codec data
* cameraFrameRate : Request or information from the remote side
* frameType : The requested frame type to encode
*/
int32_t Encode(const VideoFrame& inputFrame,
const CodecSpecificInfo* codecSpecificInfo,
const std::vector<FrameType>& frameTypes);
int32_t number_of_cores,
size_t max_payload_size);
int32_t Encode(const VideoFrame& frame,
const CodecSpecificInfo* codec_specific,
const std::vector<FrameType>& frame_types);
void SetEncoderParameters(const EncoderParameters& params);
EncoderParameters GetEncoderParameters() const;
int32_t SetPeriodicKeyFrames(bool enable);
int32_t RequestFrame(const std::vector<FrameType>& frame_types);
bool InternalSource() const;
void OnDroppedFrame();
bool SupportsNativeHandle() const;
int GetTargetFramerate();
private:
@ -141,7 +98,7 @@ class VCMGenericEncoder {
EncoderParameters encoder_params_ GUARDED_BY(params_lock_);
VideoRotation rotation_;
bool is_screenshare_;
}; // end of VCMGenericEncoder class
};
} // namespace webrtc

View File

@ -78,7 +78,8 @@ class VideoCodingModule : public Module {
VideoEncoderRateObserver* encoder_rate_observer,
VCMQMSettingsCallback* qm_settings_callback,
NackSender* nack_sender,
KeyFrameRequestSender* keyframe_request_sender);
KeyFrameRequestSender* keyframe_request_sender,
EncodedImageCallback* pre_decode_image_callback);
static VideoCodingModule* Create(Clock* clock, EventFactory* event_factory);
@ -511,8 +512,6 @@ class VideoCodingModule : public Module {
// suspended due to bandwidth limitations; otherwise false.
virtual bool VideoSuspended() const = 0;
virtual void RegisterPreDecodeImageCallback(
EncodedImageCallback* observer) = 0;
virtual void RegisterPostEncodeImageCallback(
EncodedImageCallback* post_encode_callback) = 0;
// Releases pending decode calls, permitting faster thread shutdown.

View File

@ -0,0 +1,197 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/video_coding/utility/ivf_file_writer.h"
#include "webrtc/base/checks.h"
#include "webrtc/base/logging.h"
#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
namespace webrtc {
IvfFileWriter::IvfFileWriter(const std::string& file_name,
std::unique_ptr<FileWrapper> file,
RtpVideoCodecTypes codec_type)
: codec_type_(codec_type),
num_frames_(0),
width_(0),
height_(0),
last_timestamp_(-1),
using_capture_timestamps_(false),
file_name_(file_name),
file_(std::move(file)) {}
IvfFileWriter::~IvfFileWriter() {
Close();
}
const size_t kIvfHeaderSize = 32;
std::unique_ptr<IvfFileWriter> IvfFileWriter::Open(
const std::string& file_name,
RtpVideoCodecTypes codec_type) {
std::unique_ptr<IvfFileWriter> file_writer;
std::unique_ptr<FileWrapper> file(FileWrapper::Create());
if (file->OpenFile(file_name.c_str(), false) != 0)
return file_writer;
file_writer.reset(new IvfFileWriter(
file_name, std::unique_ptr<FileWrapper>(std::move(file)), codec_type));
if (!file_writer->WriteHeader())
file_writer.reset();
return file_writer;
}
bool IvfFileWriter::WriteHeader() {
if (file_->Rewind() != 0) {
LOG(LS_WARNING) << "Unable to rewind output file " << file_name_;
return false;
}
uint8_t ivf_header[kIvfHeaderSize] = {0};
ivf_header[0] = 'D';
ivf_header[1] = 'K';
ivf_header[2] = 'I';
ivf_header[3] = 'F';
ByteWriter<uint16_t>::WriteLittleEndian(&ivf_header[4], 0); // Version.
ByteWriter<uint16_t>::WriteLittleEndian(&ivf_header[6], 32); // Header size.
switch (codec_type_) {
case kRtpVideoVp8:
ivf_header[8] = 'V';
ivf_header[9] = 'P';
ivf_header[10] = '8';
ivf_header[11] = '0';
break;
case kRtpVideoVp9:
ivf_header[8] = 'V';
ivf_header[9] = 'P';
ivf_header[10] = '9';
ivf_header[11] = '0';
break;
case kRtpVideoH264:
ivf_header[8] = 'H';
ivf_header[9] = '2';
ivf_header[10] = '6';
ivf_header[11] = '4';
break;
default:
LOG(LS_ERROR) << "Unknown CODEC type: " << codec_type_;
return false;
}
ByteWriter<uint16_t>::WriteLittleEndian(&ivf_header[12], width_);
ByteWriter<uint16_t>::WriteLittleEndian(&ivf_header[14], height_);
// Render timestamps are in ms (1/1000 scale), while RTP timestamps use a
// 90kHz clock.
ByteWriter<uint32_t>::WriteLittleEndian(
&ivf_header[16], using_capture_timestamps_ ? 1000 : 90000);
ByteWriter<uint32_t>::WriteLittleEndian(&ivf_header[20], 1);
ByteWriter<uint32_t>::WriteLittleEndian(&ivf_header[24],
static_cast<uint32_t>(num_frames_));
ByteWriter<uint32_t>::WriteLittleEndian(&ivf_header[28], 0); // Reserved.
if (!file_->Write(ivf_header, kIvfHeaderSize)) {
LOG(LS_ERROR) << "Unable to write IVF header for file " << file_name_;
return false;
}
return true;
}
bool IvfFileWriter::InitFromFirstFrame(const EncodedImage& encoded_image) {
width_ = encoded_image._encodedWidth;
height_ = encoded_image._encodedHeight;
RTC_CHECK_GT(width_, 0);
RTC_CHECK_GT(height_, 0);
using_capture_timestamps_ = encoded_image._timeStamp == 0;
if (!WriteHeader())
return false;
std::string codec_name;
switch (codec_type_) {
case kRtpVideoVp8:
codec_name = "VP8";
break;
case kRtpVideoVp9:
codec_name = "VP9";
break;
case kRtpVideoH264:
codec_name = "H264";
break;
default:
codec_name = "Unkown";
}
LOG(LS_WARNING) << "Created IVF file " << file_name_
<< " for codec data of type " << codec_name
<< " at resolution " << width_ << " x " << height_
<< ", using " << (using_capture_timestamps_ ? "1" : "90")
<< "kHz clock resolution.";
return true;
}
bool IvfFileWriter::WriteFrame(const EncodedImage& encoded_image) {
RTC_DCHECK(file_->Open());
if (num_frames_ == 0 && !InitFromFirstFrame(encoded_image))
return false;
if ((encoded_image._encodedWidth > 0 || encoded_image._encodedHeight > 0) &&
(encoded_image._encodedHeight != height_ ||
encoded_image._encodedWidth != width_)) {
LOG(LS_WARNING)
<< "Incomig frame has diffferent resolution then previous: (" << width_
<< "x" << height_ << ") -> (" << encoded_image._encodedWidth << "x"
<< encoded_image._encodedHeight << ")";
}
int64_t timestamp = using_capture_timestamps_
? encoded_image.capture_time_ms_
: wrap_handler_.Unwrap(encoded_image._timeStamp);
if (last_timestamp_ != -1 && timestamp <= last_timestamp_) {
LOG(LS_WARNING) << "Timestamp no increasing: " << last_timestamp_ << " -> "
<< timestamp;
}
last_timestamp_ = timestamp;
const size_t kFrameHeaderSize = 12;
uint8_t frame_header[kFrameHeaderSize] = {};
ByteWriter<uint32_t>::WriteLittleEndian(
&frame_header[0], static_cast<uint32_t>(encoded_image._length));
ByteWriter<uint64_t>::WriteLittleEndian(&frame_header[4], timestamp);
if (!file_->Write(frame_header, kFrameHeaderSize) ||
!file_->Write(encoded_image._buffer, encoded_image._length)) {
LOG(LS_ERROR) << "Unable to write frame to file " << file_name_;
return false;
}
++num_frames_;
return true;
}
bool IvfFileWriter::Close() {
if (!file_->Open())
return false;
if (num_frames_ == 0) {
// No frame written to file, close and remove it entirely if possible.
file_->CloseFile();
if (remove(file_name_.c_str()) != 0)
LOG(LS_WARNING) << "Failed to remove empty IVF file " << file_name_;
return true;
}
return WriteHeader() && (file_->CloseFile() == 0);
}
} // namespace webrtc

View File

@ -0,0 +1,56 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_UTILITY_IVF_FILE_WRITER_H_
#define WEBRTC_MODULES_VIDEO_CODING_UTILITY_IVF_FILE_WRITER_H_
#include <memory>
#include <string>
#include "webrtc/base/constructormagic.h"
#include "webrtc/base/timeutils.h"
#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/video_frame.h"
#include "webrtc/system_wrappers/include/file_wrapper.h"
namespace webrtc {
class IvfFileWriter {
public:
~IvfFileWriter();
static std::unique_ptr<IvfFileWriter> Open(const std::string& file_name,
RtpVideoCodecTypes codec_type);
bool WriteFrame(const EncodedImage& encoded_image);
bool Close();
private:
IvfFileWriter(const std::string& path_name,
std::unique_ptr<FileWrapper> file,
RtpVideoCodecTypes codec_type);
bool WriteHeader();
bool InitFromFirstFrame(const EncodedImage& encoded_image);
const RtpVideoCodecTypes codec_type_;
size_t num_frames_;
uint16_t width_;
uint16_t height_;
int64_t last_timestamp_;
bool using_capture_timestamps_;
rtc::TimestampWrapAroundHandler wrap_handler_;
const std::string file_name_;
std::unique_ptr<FileWrapper> file_;
RTC_DISALLOW_COPY_AND_ASSIGN(IvfFileWriter);
};
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_UTILITY_IVF_FILE_WRITER_H_

View File

@ -0,0 +1,176 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/video_coding/utility/ivf_file_writer.h"
#include <memory>
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/base/logging.h"
#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
#include "webrtc/system_wrappers/include/tick_util.h"
#include "webrtc/test/testsupport/fileutils.h"
namespace webrtc {
namespace {
static const int kHeaderSize = 32;
static const int kFrameHeaderSize = 12;
static uint8_t dummy_payload[4] = {0, 1, 2, 3};
} // namespace
class IvfFileWriterTest : public ::testing::Test {
protected:
void SetUp() override {
const int64_t start_id =
reinterpret_cast<int64_t>(this) ^ TickTime::MicrosecondTimestamp();
int64_t id = start_id;
do {
std::ostringstream oss;
oss << test::OutputPath() << "ivf_test_file_" << id++ << ".ivf";
file_name_ = oss.str();
} while (id < start_id + 100 && FileExists());
ASSERT_LT(id, start_id + 100);
}
bool WriteDummyTestFrames(int width,
int height,
int num_frames,
bool use_capture_tims_ms) {
EncodedImage frame;
frame._buffer = dummy_payload;
frame._encodedWidth = width;
frame._encodedHeight = height;
for (int i = 1; i <= num_frames; ++i) {
frame._length = i % sizeof(dummy_payload);
if (use_capture_tims_ms) {
frame.capture_time_ms_ = i;
} else {
frame._timeStamp = i;
}
if (!file_writer_->WriteFrame(frame))
return false;
}
return true;
}
void VerifyIvfHeader(FileWrapper* file,
const uint8_t fourcc[4],
int width,
int height,
uint32_t num_frames,
bool use_capture_tims_ms) {
uint8_t data[kHeaderSize];
ASSERT_EQ(kHeaderSize, file->Read(data, kHeaderSize));
uint8_t dkif[4] = {'D', 'K', 'I', 'F'};
EXPECT_EQ(0, memcmp(dkif, data, 4));
EXPECT_EQ(0u, ByteReader<uint16_t>::ReadLittleEndian(&data[4]));
EXPECT_EQ(32u, ByteReader<uint16_t>::ReadLittleEndian(&data[6]));
EXPECT_EQ(0, memcmp(fourcc, &data[8], 4));
EXPECT_EQ(width, ByteReader<uint16_t>::ReadLittleEndian(&data[12]));
EXPECT_EQ(height, ByteReader<uint16_t>::ReadLittleEndian(&data[14]));
EXPECT_EQ(use_capture_tims_ms ? 1000u : 90000u,
ByteReader<uint32_t>::ReadLittleEndian(&data[16]));
EXPECT_EQ(1u, ByteReader<uint32_t>::ReadLittleEndian(&data[20]));
EXPECT_EQ(num_frames, ByteReader<uint32_t>::ReadLittleEndian(&data[24]));
EXPECT_EQ(0u, ByteReader<uint32_t>::ReadLittleEndian(&data[28]));
}
void VerifyDummyTestFrames(FileWrapper* file, uint32_t num_frames) {
const int kMaxFrameSize = 4;
for (uint32_t i = 1; i <= num_frames; ++i) {
uint8_t frame_header[kFrameHeaderSize];
ASSERT_EQ(kFrameHeaderSize, file->Read(frame_header, kFrameHeaderSize));
uint32_t frame_length =
ByteReader<uint32_t>::ReadLittleEndian(&frame_header[0]);
EXPECT_EQ(i % 4, frame_length);
uint64_t timestamp =
ByteReader<uint64_t>::ReadLittleEndian(&frame_header[4]);
EXPECT_EQ(i, timestamp);
uint8_t data[kMaxFrameSize] = {};
ASSERT_EQ(frame_length,
static_cast<uint32_t>(file->Read(data, frame_length)));
EXPECT_EQ(0, memcmp(data, dummy_payload, frame_length));
}
}
void RunBasicFileStructureTest(RtpVideoCodecTypes codec_type,
const uint8_t fourcc[4],
bool use_capture_tims_ms) {
file_writer_ = IvfFileWriter::Open(file_name_, codec_type);
ASSERT_TRUE(file_writer_.get());
const int kWidth = 320;
const int kHeight = 240;
const int kNumFrames = 257;
EXPECT_TRUE(
WriteDummyTestFrames(kWidth, kHeight, kNumFrames, use_capture_tims_ms));
EXPECT_TRUE(file_writer_->Close());
std::unique_ptr<FileWrapper> out_file(FileWrapper::Create());
ASSERT_EQ(0, out_file->OpenFile(file_name_.c_str(), true));
VerifyIvfHeader(out_file.get(), fourcc, kWidth, kHeight, kNumFrames,
use_capture_tims_ms);
VerifyDummyTestFrames(out_file.get(), kNumFrames);
EXPECT_EQ(0, out_file->CloseFile());
EXPECT_EQ(0, remove(file_name_.c_str()));
}
bool FileExists() {
std::unique_ptr<FileWrapper> file_wrapper(FileWrapper::Create());
return file_wrapper->OpenFile(file_name_.c_str(), true) == 0;
}
std::string file_name_;
std::unique_ptr<IvfFileWriter> file_writer_;
};
TEST_F(IvfFileWriterTest, RemovesUnusedFile) {
file_writer_ = IvfFileWriter::Open(file_name_, kRtpVideoVp8);
ASSERT_TRUE(file_writer_.get() != nullptr);
EXPECT_TRUE(FileExists());
EXPECT_TRUE(file_writer_->Close());
EXPECT_FALSE(FileExists());
EXPECT_FALSE(file_writer_->Close()); // Can't close twice.
}
TEST_F(IvfFileWriterTest, WritesBasicVP8FileNtpTimestamp) {
const uint8_t fourcc[4] = {'V', 'P', '8', '0'};
RunBasicFileStructureTest(kRtpVideoVp8, fourcc, false);
}
TEST_F(IvfFileWriterTest, WritesBasicVP8FileMsTimestamp) {
const uint8_t fourcc[4] = {'V', 'P', '8', '0'};
RunBasicFileStructureTest(kRtpVideoVp8, fourcc, true);
}
TEST_F(IvfFileWriterTest, WritesBasicVP9FileNtpTimestamp) {
const uint8_t fourcc[4] = {'V', 'P', '9', '0'};
RunBasicFileStructureTest(kRtpVideoVp9, fourcc, false);
}
TEST_F(IvfFileWriterTest, WritesBasicVP9FileMsTimestamp) {
const uint8_t fourcc[4] = {'V', 'P', '9', '0'};
RunBasicFileStructureTest(kRtpVideoVp9, fourcc, true);
}
TEST_F(IvfFileWriterTest, WritesBasicH264FileNtpTimestamp) {
const uint8_t fourcc[4] = {'H', '2', '6', '4'};
RunBasicFileStructureTest(kRtpVideoH264, fourcc, false);
}
TEST_F(IvfFileWriterTest, WritesBasicH264FileMsTimestamp) {
const uint8_t fourcc[4] = {'H', '2', '6', '4'};
RunBasicFileStructureTest(kRtpVideoH264, fourcc, true);
}
} // namespace webrtc

View File

@ -20,6 +20,8 @@
'sources': [
'frame_dropper.cc',
'frame_dropper.h',
'ivf_file_writer.cc',
'ivf_file_writer.h',
'moving_average.h',
'qp_parser.cc',
'qp_parser.h',

View File

@ -14,6 +14,7 @@
#include "webrtc/common_types.h"
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
#include "webrtc/base/criticalsection.h"
#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/modules/video_coding/encoded_frame.h"
#include "webrtc/modules/video_coding/jitter_buffer.h"
@ -77,7 +78,8 @@ class VideoCodingModuleImpl : public VideoCodingModule {
VideoEncoderRateObserver* encoder_rate_observer,
VCMQMSettingsCallback* qm_settings_callback,
NackSender* nack_sender,
KeyFrameRequestSender* keyframe_request_sender)
KeyFrameRequestSender* keyframe_request_sender,
EncodedImageCallback* pre_decode_image_callback)
: VideoCodingModule(),
sender_(clock,
&post_encode_callback_,
@ -85,6 +87,7 @@ class VideoCodingModuleImpl : public VideoCodingModule {
qm_settings_callback),
receiver_(clock,
event_factory,
pre_decode_image_callback,
nack_sender,
keyframe_request_sender),
own_event_factory_(owns_event_factory ? event_factory : NULL) {}
@ -273,10 +276,6 @@ class VideoCodingModuleImpl : public VideoCodingModule {
return receiver_.SetReceiveChannelParameters(rtt);
}
void RegisterPreDecodeImageCallback(EncodedImageCallback* observer) override {
receiver_.RegisterPreDecodeImageCallback(observer);
}
void RegisterPostEncodeImageCallback(
EncodedImageCallback* observer) override {
post_encode_callback_.Register(observer);
@ -305,7 +304,8 @@ VideoCodingModule* VideoCodingModule::Create(
return VideoCodingModule::Create(clock, encoder_rate_observer,
qm_settings_callback,
nullptr, // NackSender
nullptr); // KeyframeRequestSender
nullptr, // KeyframeRequestSender
nullptr); // Pre-decode image callback
}
// Create method for the new jitter buffer.
@ -314,11 +314,12 @@ VideoCodingModule* VideoCodingModule::Create(
VideoEncoderRateObserver* encoder_rate_observer,
VCMQMSettingsCallback* qm_settings_callback,
NackSender* nack_sender,
KeyFrameRequestSender* keyframe_request_sender) {
KeyFrameRequestSender* keyframe_request_sender,
EncodedImageCallback* pre_decode_image_callback) {
return new VideoCodingModuleImpl(clock, new EventFactoryImpl, true,
encoder_rate_observer, qm_settings_callback,
nack_sender,
keyframe_request_sender);
nack_sender, keyframe_request_sender,
pre_decode_image_callback);
}
// Create method for current interface, will be removed when the
@ -340,7 +341,7 @@ VideoCodingModule* VideoCodingModule::Create(
assert(event_factory);
return new VideoCodingModuleImpl(clock, event_factory, false, nullptr,
nullptr, nack_sender,
keyframe_request_sender);
keyframe_request_sender, nullptr);
}
} // namespace webrtc

View File

@ -16,6 +16,7 @@
#include <memory>
#include <vector>
#include "webrtc/frame_callback.h"
#include "webrtc/base/onetimeevent.h"
#include "webrtc/base/thread_annotations.h"
#include "webrtc/base/thread_checker.h"
@ -29,7 +30,6 @@
#include "webrtc/modules/video_coding/timing.h"
#include "webrtc/modules/video_coding/utility/qp_parser.h"
#include "webrtc/system_wrappers/include/clock.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
namespace webrtc {
@ -103,12 +103,12 @@ class VideoSender {
Clock* const clock_;
std::unique_ptr<CriticalSectionWrapper> process_crit_sect_;
rtc::CriticalSection process_crit_;
rtc::CriticalSection encoder_crit_;
VCMGenericEncoder* _encoder;
VCMEncodedFrameCallback _encodedFrameCallback GUARDED_BY(encoder_crit_);
media_optimization::MediaOptimization _mediaOpt;
VCMSendStatisticsCallback* _sendStatsCallback GUARDED_BY(process_crit_sect_);
VCMSendStatisticsCallback* _sendStatsCallback GUARDED_BY(process_crit_);
VCMCodecDataBase _codecDataBase GUARDED_BY(encoder_crit_);
bool frame_dropper_enabled_ GUARDED_BY(encoder_crit_);
VCMProcessTimer _sendStatsTimer;
@ -132,6 +132,7 @@ class VideoReceiver {
VideoReceiver(Clock* clock,
EventFactory* event_factory,
EncodedImageCallback* pre_decode_image_callback,
NackSender* nack_sender = nullptr,
KeyFrameRequestSender* keyframe_request_sender = nullptr);
~VideoReceiver();
@ -179,42 +180,36 @@ class VideoReceiver {
int64_t TimeUntilNextProcess();
void Process();
void RegisterPreDecodeImageCallback(EncodedImageCallback* observer);
void TriggerDecoderShutdown();
protected:
int32_t Decode(const webrtc::VCMEncodedFrame& frame)
EXCLUSIVE_LOCKS_REQUIRED(_receiveCritSect);
EXCLUSIVE_LOCKS_REQUIRED(receive_crit_);
int32_t RequestKeyFrame();
int32_t RequestSliceLossIndication(const uint64_t pictureID) const;
private:
Clock* const clock_;
std::unique_ptr<CriticalSectionWrapper> process_crit_sect_;
CriticalSectionWrapper* _receiveCritSect;
rtc::CriticalSection process_crit_;
rtc::CriticalSection receive_crit_;
VCMTiming _timing;
VCMReceiver _receiver;
VCMDecodedFrameCallback _decodedFrameCallback;
VCMFrameTypeCallback* _frameTypeCallback GUARDED_BY(process_crit_sect_);
VCMReceiveStatisticsCallback* _receiveStatsCallback
GUARDED_BY(process_crit_sect_);
VCMDecoderTimingCallback* _decoderTimingCallback
GUARDED_BY(process_crit_sect_);
VCMPacketRequestCallback* _packetRequestCallback
GUARDED_BY(process_crit_sect_);
VCMFrameTypeCallback* _frameTypeCallback GUARDED_BY(process_crit_);
VCMReceiveStatisticsCallback* _receiveStatsCallback GUARDED_BY(process_crit_);
VCMDecoderTimingCallback* _decoderTimingCallback GUARDED_BY(process_crit_);
VCMPacketRequestCallback* _packetRequestCallback GUARDED_BY(process_crit_);
VCMRenderBufferSizeCallback* render_buffer_callback_
GUARDED_BY(process_crit_sect_);
GUARDED_BY(process_crit_);
VCMGenericDecoder* _decoder;
#ifdef DEBUG_DECODER_BIT_STREAM
FILE* _bitStreamBeforeDecoder;
#endif
VCMFrameBuffer _frameFromFile;
bool _scheduleKeyRequest GUARDED_BY(process_crit_sect_);
bool drop_frames_until_keyframe_ GUARDED_BY(process_crit_sect_);
size_t max_nack_list_size_ GUARDED_BY(process_crit_sect_);
VCMCodecDataBase _codecDataBase GUARDED_BY(_receiveCritSect);
EncodedImageCallback* pre_decode_image_callback_ GUARDED_BY(_receiveCritSect);
VCMFrameBuffer _frameFromFile;
bool _scheduleKeyRequest GUARDED_BY(process_crit_);
bool drop_frames_until_keyframe_ GUARDED_BY(process_crit_);
size_t max_nack_list_size_ GUARDED_BY(process_crit_);
VCMCodecDataBase _codecDataBase GUARDED_BY(receive_crit_);
EncodedImageCallback* pre_decode_image_callback_;
VCMProcessTimer _receiveStatsTimer;
VCMProcessTimer _retransmissionTimer;

View File

@ -20,18 +20,15 @@
#include "webrtc/modules/video_coding/video_coding_impl.h"
#include "webrtc/system_wrappers/include/clock.h"
// #define DEBUG_DECODER_BIT_STREAM
namespace webrtc {
namespace vcm {
VideoReceiver::VideoReceiver(Clock* clock,
EventFactory* event_factory,
EncodedImageCallback* pre_decode_image_callback,
NackSender* nack_sender,
KeyFrameRequestSender* keyframe_request_sender)
: clock_(clock),
process_crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
_receiveCritSect(CriticalSectionWrapper::CreateCriticalSection()),
_timing(clock_),
_receiver(&_timing,
clock_,
@ -39,50 +36,37 @@ VideoReceiver::VideoReceiver(Clock* clock,
nack_sender,
keyframe_request_sender),
_decodedFrameCallback(&_timing, clock_),
_frameTypeCallback(NULL),
_receiveStatsCallback(NULL),
_decoderTimingCallback(NULL),
_packetRequestCallback(NULL),
render_buffer_callback_(NULL),
_decoder(NULL),
#ifdef DEBUG_DECODER_BIT_STREAM
_bitStreamBeforeDecoder(NULL),
#endif
_frameTypeCallback(nullptr),
_receiveStatsCallback(nullptr),
_decoderTimingCallback(nullptr),
_packetRequestCallback(nullptr),
render_buffer_callback_(nullptr),
_decoder(nullptr),
_frameFromFile(),
_scheduleKeyRequest(false),
drop_frames_until_keyframe_(false),
max_nack_list_size_(0),
_codecDataBase(nullptr, nullptr),
pre_decode_image_callback_(NULL),
pre_decode_image_callback_(pre_decode_image_callback),
_receiveStatsTimer(1000, clock_),
_retransmissionTimer(10, clock_),
_keyRequestTimer(500, clock_) {
assert(clock_);
#ifdef DEBUG_DECODER_BIT_STREAM
_bitStreamBeforeDecoder = fopen("decoderBitStream.bit", "wb");
#endif
}
_keyRequestTimer(500, clock_) {}
VideoReceiver::~VideoReceiver() {
delete _receiveCritSect;
#ifdef DEBUG_DECODER_BIT_STREAM
fclose(_bitStreamBeforeDecoder);
#endif
}
VideoReceiver::~VideoReceiver() {}
void VideoReceiver::Process() {
// Receive-side statistics
if (_receiveStatsTimer.TimeUntilProcess() == 0) {
_receiveStatsTimer.Processed();
CriticalSectionScoped cs(process_crit_sect_.get());
if (_receiveStatsCallback != NULL) {
rtc::CritScope cs(&process_crit_);
if (_receiveStatsCallback != nullptr) {
uint32_t bitRate;
uint32_t frameRate;
_receiver.ReceiveStatistics(&bitRate, &frameRate);
_receiveStatsCallback->OnReceiveRatesUpdated(bitRate, frameRate);
}
if (_decoderTimingCallback != NULL) {
if (_decoderTimingCallback != nullptr) {
int decode_ms;
int max_decode_ms;
int current_delay_ms;
@ -110,8 +94,8 @@ void VideoReceiver::Process() {
_keyRequestTimer.Processed();
bool request_key_frame = false;
{
CriticalSectionScoped cs(process_crit_sect_.get());
request_key_frame = _scheduleKeyRequest && _frameTypeCallback != NULL;
rtc::CritScope cs(&process_crit_);
request_key_frame = _scheduleKeyRequest && _frameTypeCallback != nullptr;
}
if (request_key_frame)
RequestKeyFrame();
@ -129,9 +113,9 @@ void VideoReceiver::Process() {
bool callback_registered = false;
uint16_t length;
{
CriticalSectionScoped cs(process_crit_sect_.get());
rtc::CritScope cs(&process_crit_);
length = max_nack_list_size_;
callback_registered = _packetRequestCallback != NULL;
callback_registered = _packetRequestCallback != nullptr;
}
if (callback_registered && length > 0) {
// Collect sequence numbers from the default receiver.
@ -142,8 +126,8 @@ void VideoReceiver::Process() {
ret = RequestKeyFrame();
}
if (ret == VCM_OK && !nackList.empty()) {
CriticalSectionScoped cs(process_crit_sect_.get());
if (_packetRequestCallback != NULL) {
rtc::CritScope cs(&process_crit_);
if (_packetRequestCallback != nullptr) {
_packetRequestCallback->ResendPackets(&nackList[0], nackList.size());
}
}
@ -168,7 +152,7 @@ int64_t VideoReceiver::TimeUntilNextProcess() {
}
int32_t VideoReceiver::SetReceiveChannelParameters(int64_t rtt) {
CriticalSectionScoped receiveCs(_receiveCritSect);
rtc::CritScope cs(&receive_crit_);
_receiver.UpdateRtt(rtt);
return 0;
}
@ -189,7 +173,7 @@ int32_t VideoReceiver::SetVideoProtection(VCMVideoProtection videoProtection,
}
case kProtectionNackFEC: {
CriticalSectionScoped cs(_receiveCritSect);
rtc::CritScope cs(&receive_crit_);
RTC_DCHECK(enable);
_receiver.SetNackMode(kNack, media_optimization::kLowRttNackMs, -1);
_receiver.SetDecodeErrorMode(kNoErrors);
@ -210,14 +194,14 @@ int32_t VideoReceiver::SetVideoProtection(VCMVideoProtection videoProtection,
// ready for rendering.
int32_t VideoReceiver::RegisterReceiveCallback(
VCMReceiveCallback* receiveCallback) {
CriticalSectionScoped cs(_receiveCritSect);
rtc::CritScope cs(&receive_crit_);
_decodedFrameCallback.SetUserReceiveCallback(receiveCallback);
return VCM_OK;
}
int32_t VideoReceiver::RegisterReceiveStatisticsCallback(
VCMReceiveStatisticsCallback* receiveStats) {
CriticalSectionScoped cs(process_crit_sect_.get());
rtc::CritScope cs(&process_crit_);
_receiver.RegisterStatsCallback(receiveStats);
_receiveStatsCallback = receiveStats;
return VCM_OK;
@ -225,7 +209,7 @@ int32_t VideoReceiver::RegisterReceiveStatisticsCallback(
int32_t VideoReceiver::RegisterDecoderTimingCallback(
VCMDecoderTimingCallback* decoderTiming) {
CriticalSectionScoped cs(process_crit_sect_.get());
rtc::CritScope cs(&process_crit_);
_decoderTimingCallback = decoderTiming;
return VCM_OK;
}
@ -233,10 +217,10 @@ int32_t VideoReceiver::RegisterDecoderTimingCallback(
// Register an externally defined decoder object.
void VideoReceiver::RegisterExternalDecoder(VideoDecoder* externalDecoder,
uint8_t payloadType) {
CriticalSectionScoped cs(_receiveCritSect);
if (externalDecoder == NULL) {
rtc::CritScope cs(&receive_crit_);
if (externalDecoder == nullptr) {
// Make sure the VCM updates the decoder next time it decodes.
_decoder = NULL;
_decoder = nullptr;
RTC_CHECK(_codecDataBase.DeregisterExternalDecoder(payloadType));
return;
}
@ -246,21 +230,21 @@ void VideoReceiver::RegisterExternalDecoder(VideoDecoder* externalDecoder,
// Register a frame type request callback.
int32_t VideoReceiver::RegisterFrameTypeCallback(
VCMFrameTypeCallback* frameTypeCallback) {
CriticalSectionScoped cs(process_crit_sect_.get());
rtc::CritScope cs(&process_crit_);
_frameTypeCallback = frameTypeCallback;
return VCM_OK;
}
int32_t VideoReceiver::RegisterPacketRequestCallback(
VCMPacketRequestCallback* callback) {
CriticalSectionScoped cs(process_crit_sect_.get());
rtc::CritScope cs(&process_crit_);
_packetRequestCallback = callback;
return VCM_OK;
}
int VideoReceiver::RegisterRenderBufferSizeCallback(
VCMRenderBufferSizeCallback* callback) {
CriticalSectionScoped cs(process_crit_sect_.get());
rtc::CritScope cs(&process_crit_);
render_buffer_callback_ = callback;
return VCM_OK;
}
@ -275,7 +259,7 @@ int32_t VideoReceiver::Decode(uint16_t maxWaitTimeMs) {
int64_t nextRenderTimeMs;
bool prefer_late_decoding = false;
{
CriticalSectionScoped cs(_receiveCritSect);
rtc::CritScope cs(&receive_crit_);
prefer_late_decoding = _codecDataBase.PrefersLateDecoding();
}
@ -286,7 +270,7 @@ int32_t VideoReceiver::Decode(uint16_t maxWaitTimeMs) {
return VCM_FRAME_NOT_READY;
{
CriticalSectionScoped cs(process_crit_sect_.get());
rtc::CritScope cs(&process_crit_);
if (drop_frames_until_keyframe_) {
// Still getting delta frames, schedule another keyframe request as if
// decode failed.
@ -298,11 +282,6 @@ int32_t VideoReceiver::Decode(uint16_t maxWaitTimeMs) {
drop_frames_until_keyframe_ = false;
}
}
CriticalSectionScoped cs(_receiveCritSect);
// If this frame was too late, we should adjust the delay accordingly
_timing.UpdateCurrentDelay(frame->RenderTimeMs(),
clock_->TimeInMilliseconds());
if (pre_decode_image_callback_) {
EncodedImage encoded_image(frame->EncodedImage());
@ -311,18 +290,13 @@ int32_t VideoReceiver::Decode(uint16_t maxWaitTimeMs) {
encoded_image.qp_ = qp;
}
pre_decode_image_callback_->Encoded(encoded_image, frame->CodecSpecific(),
NULL);
nullptr);
}
#ifdef DEBUG_DECODER_BIT_STREAM
if (_bitStreamBeforeDecoder != NULL) {
// Write bit stream to file for debugging purposes
if (fwrite(frame->Buffer(), 1, frame->Length(), _bitStreamBeforeDecoder) !=
frame->Length()) {
return -1;
}
}
#endif
rtc::CritScope cs(&receive_crit_);
// If this frame was too late, we should adjust the delay accordingly
_timing.UpdateCurrentDelay(frame->RenderTimeMs(),
clock_->TimeInMilliseconds());
if (first_frame_received_()) {
LOG(LS_INFO) << "Received first "
@ -338,8 +312,8 @@ int32_t VideoReceiver::Decode(uint16_t maxWaitTimeMs) {
int32_t VideoReceiver::RequestSliceLossIndication(
const uint64_t pictureID) const {
TRACE_EVENT1("webrtc", "RequestSLI", "picture_id", pictureID);
CriticalSectionScoped cs(process_crit_sect_.get());
if (_frameTypeCallback != NULL) {
rtc::CritScope cs(&process_crit_);
if (_frameTypeCallback != nullptr) {
const int32_t ret =
_frameTypeCallback->SliceLossIndicationRequest(pictureID);
if (ret < 0) {
@ -353,8 +327,8 @@ int32_t VideoReceiver::RequestSliceLossIndication(
int32_t VideoReceiver::RequestKeyFrame() {
TRACE_EVENT0("webrtc", "RequestKeyFrame");
CriticalSectionScoped process_cs(process_crit_sect_.get());
if (_frameTypeCallback != NULL) {
rtc::CritScope cs(&process_crit_);
if (_frameTypeCallback != nullptr) {
const int32_t ret = _frameTypeCallback->RequestKeyFrame();
if (ret < 0) {
return ret;
@ -372,7 +346,7 @@ int32_t VideoReceiver::Decode(const VCMEncodedFrame& frame) {
"type", frame.FrameType());
// Change decoder if payload type has changed
_decoder = _codecDataBase.GetDecoder(frame, &_decodedFrameCallback);
if (_decoder == NULL) {
if (_decoder == nullptr) {
return VCM_NO_CODEC_REGISTERED;
}
// Decode a frame
@ -396,7 +370,7 @@ int32_t VideoReceiver::Decode(const VCMEncodedFrame& frame) {
ret = VCM_OK;
}
if (request_key_frame) {
CriticalSectionScoped cs(process_crit_sect_.get());
rtc::CritScope cs(&process_crit_);
_scheduleKeyRequest = true;
}
TRACE_EVENT_ASYNC_END0("webrtc", "Video", frame.TimeStamp());
@ -407,8 +381,8 @@ int32_t VideoReceiver::Decode(const VCMEncodedFrame& frame) {
int32_t VideoReceiver::RegisterReceiveCodec(const VideoCodec* receiveCodec,
int32_t numberOfCores,
bool requireKeyFrame) {
CriticalSectionScoped cs(_receiveCritSect);
if (receiveCodec == NULL) {
rtc::CritScope cs(&receive_crit_);
if (receiveCodec == nullptr) {
return VCM_PARAMETER_ERROR;
}
if (!_codecDataBase.RegisterReceiveCodec(receiveCodec, numberOfCores,
@ -420,8 +394,8 @@ int32_t VideoReceiver::RegisterReceiveCodec(const VideoCodec* receiveCodec,
// Get current received codec
int32_t VideoReceiver::ReceiveCodec(VideoCodec* currentReceiveCodec) const {
CriticalSectionScoped cs(_receiveCritSect);
if (currentReceiveCodec == NULL) {
rtc::CritScope cs(&receive_crit_);
if (currentReceiveCodec == nullptr) {
return VCM_PARAMETER_ERROR;
}
return _codecDataBase.ReceiveCodec(currentReceiveCodec) ? 0 : -1;
@ -429,7 +403,7 @@ int32_t VideoReceiver::ReceiveCodec(VideoCodec* currentReceiveCodec) const {
// Get current received codec
VideoCodecType VideoReceiver::ReceiveCodec() const {
CriticalSectionScoped cs(_receiveCritSect);
rtc::CritScope cs(&receive_crit_);
return _codecDataBase.ReceiveCodec();
}
@ -441,7 +415,7 @@ int32_t VideoReceiver::IncomingPacket(const uint8_t* incomingPayload,
TRACE_EVENT1("webrtc", "VCM::PacketKeyFrame", "seqnum",
rtpInfo.header.sequenceNumber);
}
if (incomingPayload == NULL) {
if (incomingPayload == nullptr) {
// The jitter buffer doesn't handle non-zero payload lengths for packets
// without payload.
// TODO(holmer): We should fix this in the jitter buffer.
@ -450,11 +424,12 @@ int32_t VideoReceiver::IncomingPacket(const uint8_t* incomingPayload,
const VCMPacket packet(incomingPayload, payloadLength, rtpInfo);
int32_t ret = _receiver.InsertPacket(packet, rtpInfo.type.Video.width,
rtpInfo.type.Video.height);
// TODO(holmer): Investigate if this somehow should use the key frame
// request scheduling to throttle the requests.
if (ret == VCM_FLUSH_INDICATOR) {
{
CriticalSectionScoped process_cs(process_crit_sect_.get());
rtc::CritScope cs(&process_crit_);
drop_frames_until_keyframe_ = true;
}
RequestKeyFrame();
@ -491,7 +466,7 @@ uint32_t VideoReceiver::DiscardedPackets() const {
int VideoReceiver::SetReceiverRobustnessMode(
ReceiverRobustness robustnessMode,
VCMDecodeErrorMode decode_error_mode) {
CriticalSectionScoped cs(_receiveCritSect);
rtc::CritScope cs(&receive_crit_);
switch (robustnessMode) {
case VideoCodingModule::kNone:
_receiver.SetNackMode(kNoNack, -1, -1);
@ -527,7 +502,7 @@ int VideoReceiver::SetReceiverRobustnessMode(
}
void VideoReceiver::SetDecodeErrorMode(VCMDecodeErrorMode decode_error_mode) {
CriticalSectionScoped cs(_receiveCritSect);
rtc::CritScope cs(&receive_crit_);
_receiver.SetDecodeErrorMode(decode_error_mode);
}
@ -535,7 +510,7 @@ void VideoReceiver::SetNackSettings(size_t max_nack_list_size,
int max_packet_age_to_nack,
int max_incomplete_time_ms) {
if (max_nack_list_size != 0) {
CriticalSectionScoped process_cs(process_crit_sect_.get());
rtc::CritScope cs(&process_crit_);
max_nack_list_size_ = max_nack_list_size;
}
_receiver.SetNackSettings(max_nack_list_size, max_packet_age_to_nack,
@ -546,11 +521,5 @@ int VideoReceiver::SetMinReceiverDelay(int desired_delay_ms) {
return _receiver.SetMinReceiverDelay(desired_delay_ms);
}
void VideoReceiver::RegisterPreDecodeImageCallback(
EncodedImageCallback* observer) {
CriticalSectionScoped cs(_receiveCritSect);
pre_decode_image_callback_ = observer;
}
} // namespace vcm
} // namespace webrtc

View File

@ -33,7 +33,7 @@ class TestVideoReceiver : public ::testing::Test {
TestVideoReceiver() : clock_(0) {}
virtual void SetUp() {
receiver_.reset(new VideoReceiver(&clock_, &event_factory_));
receiver_.reset(new VideoReceiver(&clock_, &event_factory_, nullptr));
receiver_->RegisterExternalDecoder(&decoder_, kUnusedPayloadType);
const size_t kMaxNackListSize = 250;
const int kMaxPacketAgeToNack = 450;

View File

@ -29,7 +29,6 @@ VideoSender::VideoSender(Clock* clock,
VideoEncoderRateObserver* encoder_rate_observer,
VCMQMSettingsCallback* qm_settings_callback)
: clock_(clock),
process_crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
_encoder(nullptr),
_encodedFrameCallback(post_encode_callback),
_mediaOpt(clock_),
@ -56,7 +55,7 @@ VideoSender::~VideoSender() {}
void VideoSender::Process() {
if (_sendStatsTimer.TimeUntilProcess() == 0) {
_sendStatsTimer.Processed();
CriticalSectionScoped cs(process_crit_sect_.get());
rtc::CritScope cs(&process_crit_);
if (_sendStatsCallback != nullptr) {
uint32_t bitRate = _mediaOpt.SentBitRate();
uint32_t frameRate = _mediaOpt.SentFrameRate();
@ -249,7 +248,7 @@ int32_t VideoSender::RegisterTransportCallback(
// average frame rate and bit rate.
int32_t VideoSender::RegisterSendStatisticsCallback(
VCMSendStatisticsCallback* sendStats) {
CriticalSectionScoped cs(process_crit_sect_.get());
rtc::CritScope cs(&process_crit_);
_sendStatsCallback = sendStats;
return VCM_OK;
}