Reland of Deprecate VCMPacketizationCallback::SendData and use EncodedImageCallback instead. (patchset #1 id:1 of https://codereview.webrtc.org/1903193002/ )
Reason for revert: A fix is being prepared downstream so this can now go in. Original issue's description: > Revert of Deprecate VCMPacketizationCallback::SendData and use EncodedImageCallback instead. (patchset #5 id:80001 of https://codereview.webrtc.org/1897233002/ ) > > Reason for revert: > API changes broke downstream. > > Original issue's description: > > Deprecate VCMPacketizationCallback::SendData and use EncodedImageCallback instead. > > EncodedImageCallback is used by all encoder implementations and seems to be what we should try to use in the transport. > > EncodedImageCallback can of course be cleaned up in the future. > > > > This moves creation of RTPVideoHeader from the GenericEncoder to the PayLoadRouter. > > > > BUG=webrtc::5687 > > > > Committed: https://crrev.com/f5d55aaecdc39e9cc66eb6e87614f04afe28f6eb > > Cr-Commit-Position: refs/heads/master@{#12436} > > TBR=stefan@webrtc.org,pbos@webrtc.org,perkj@webrtc.org > # Skipping CQ checks because original CL landed less than 1 days ago. > NOPRESUBMIT=true > NOTREECHECKS=true > NOTRY=true > BUG=webrtc:5687 > > Committed: https://crrev.com/a261e6136655af33f283eda8e60a6dd93dd746a4 > Cr-Commit-Position: refs/heads/master@{#12441} TBR=stefan@webrtc.org,pbos@webrtc.org,perkj@webrtc.org # Skipping CQ checks because original CL landed less than 1 days ago. NOPRESUBMIT=true NOTREECHECKS=true NOTRY=true BUG=webrtc:5687 Review URL: https://codereview.webrtc.org/1905583002 Cr-Commit-Position: refs/heads/master@{#12442}
This commit is contained in:
@ -238,7 +238,6 @@ bool VCMCodecDataBase::SetSendCodec(const VideoCodec* send_codec,
|
|||||||
memcpy(&send_codec_, &new_send_codec, sizeof(send_codec_));
|
memcpy(&send_codec_, &new_send_codec, sizeof(send_codec_));
|
||||||
|
|
||||||
if (!reset_required) {
|
if (!reset_required) {
|
||||||
encoded_frame_callback_->SetPayloadType(send_codec_.plType);
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -249,7 +248,6 @@ bool VCMCodecDataBase::SetSendCodec(const VideoCodec* send_codec,
|
|||||||
ptr_encoder_.reset(
|
ptr_encoder_.reset(
|
||||||
new VCMGenericEncoder(external_encoder_, encoder_rate_observer_,
|
new VCMGenericEncoder(external_encoder_, encoder_rate_observer_,
|
||||||
encoded_frame_callback_, internal_source_));
|
encoded_frame_callback_, internal_source_));
|
||||||
encoded_frame_callback_->SetPayloadType(send_codec_.plType);
|
|
||||||
encoded_frame_callback_->SetInternalSource(internal_source_);
|
encoded_frame_callback_->SetInternalSource(internal_source_);
|
||||||
if (ptr_encoder_->InitEncode(&send_codec_, number_of_cores_,
|
if (ptr_encoder_->InitEncode(&send_codec_, number_of_cores_,
|
||||||
max_payload_size_) < 0) {
|
max_payload_size_) < 0) {
|
||||||
|
@ -21,76 +21,6 @@
|
|||||||
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
|
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
|
||||||
|
|
||||||
namespace webrtc {
|
namespace webrtc {
|
||||||
namespace {
|
|
||||||
// Map information from info into rtp. If no relevant information is found
|
|
||||||
// in info, rtp is set to NULL.
|
|
||||||
void CopyCodecSpecific(const CodecSpecificInfo* info, RTPVideoHeader* rtp) {
|
|
||||||
RTC_DCHECK(info);
|
|
||||||
switch (info->codecType) {
|
|
||||||
case kVideoCodecVP8: {
|
|
||||||
rtp->codec = kRtpVideoVp8;
|
|
||||||
rtp->codecHeader.VP8.InitRTPVideoHeaderVP8();
|
|
||||||
rtp->codecHeader.VP8.pictureId = info->codecSpecific.VP8.pictureId;
|
|
||||||
rtp->codecHeader.VP8.nonReference = info->codecSpecific.VP8.nonReference;
|
|
||||||
rtp->codecHeader.VP8.temporalIdx = info->codecSpecific.VP8.temporalIdx;
|
|
||||||
rtp->codecHeader.VP8.layerSync = info->codecSpecific.VP8.layerSync;
|
|
||||||
rtp->codecHeader.VP8.tl0PicIdx = info->codecSpecific.VP8.tl0PicIdx;
|
|
||||||
rtp->codecHeader.VP8.keyIdx = info->codecSpecific.VP8.keyIdx;
|
|
||||||
rtp->simulcastIdx = info->codecSpecific.VP8.simulcastIdx;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
case kVideoCodecVP9: {
|
|
||||||
rtp->codec = kRtpVideoVp9;
|
|
||||||
rtp->codecHeader.VP9.InitRTPVideoHeaderVP9();
|
|
||||||
rtp->codecHeader.VP9.inter_pic_predicted =
|
|
||||||
info->codecSpecific.VP9.inter_pic_predicted;
|
|
||||||
rtp->codecHeader.VP9.flexible_mode =
|
|
||||||
info->codecSpecific.VP9.flexible_mode;
|
|
||||||
rtp->codecHeader.VP9.ss_data_available =
|
|
||||||
info->codecSpecific.VP9.ss_data_available;
|
|
||||||
rtp->codecHeader.VP9.picture_id = info->codecSpecific.VP9.picture_id;
|
|
||||||
rtp->codecHeader.VP9.tl0_pic_idx = info->codecSpecific.VP9.tl0_pic_idx;
|
|
||||||
rtp->codecHeader.VP9.temporal_idx = info->codecSpecific.VP9.temporal_idx;
|
|
||||||
rtp->codecHeader.VP9.spatial_idx = info->codecSpecific.VP9.spatial_idx;
|
|
||||||
rtp->codecHeader.VP9.temporal_up_switch =
|
|
||||||
info->codecSpecific.VP9.temporal_up_switch;
|
|
||||||
rtp->codecHeader.VP9.inter_layer_predicted =
|
|
||||||
info->codecSpecific.VP9.inter_layer_predicted;
|
|
||||||
rtp->codecHeader.VP9.gof_idx = info->codecSpecific.VP9.gof_idx;
|
|
||||||
rtp->codecHeader.VP9.num_spatial_layers =
|
|
||||||
info->codecSpecific.VP9.num_spatial_layers;
|
|
||||||
|
|
||||||
if (info->codecSpecific.VP9.ss_data_available) {
|
|
||||||
rtp->codecHeader.VP9.spatial_layer_resolution_present =
|
|
||||||
info->codecSpecific.VP9.spatial_layer_resolution_present;
|
|
||||||
if (info->codecSpecific.VP9.spatial_layer_resolution_present) {
|
|
||||||
for (size_t i = 0; i < info->codecSpecific.VP9.num_spatial_layers;
|
|
||||||
++i) {
|
|
||||||
rtp->codecHeader.VP9.width[i] = info->codecSpecific.VP9.width[i];
|
|
||||||
rtp->codecHeader.VP9.height[i] = info->codecSpecific.VP9.height[i];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
rtp->codecHeader.VP9.gof.CopyGofInfoVP9(info->codecSpecific.VP9.gof);
|
|
||||||
}
|
|
||||||
|
|
||||||
rtp->codecHeader.VP9.num_ref_pics = info->codecSpecific.VP9.num_ref_pics;
|
|
||||||
for (int i = 0; i < info->codecSpecific.VP9.num_ref_pics; ++i)
|
|
||||||
rtp->codecHeader.VP9.pid_diff[i] = info->codecSpecific.VP9.p_diff[i];
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
case kVideoCodecH264:
|
|
||||||
rtp->codec = kRtpVideoH264;
|
|
||||||
return;
|
|
||||||
case kVideoCodecGeneric:
|
|
||||||
rtp->codec = kRtpVideoGeneric;
|
|
||||||
rtp->simulcastIdx = info->codecSpecific.generic.simulcast_idx;
|
|
||||||
return;
|
|
||||||
default:
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} // namespace
|
|
||||||
|
|
||||||
VCMGenericEncoder::VCMGenericEncoder(
|
VCMGenericEncoder::VCMGenericEncoder(
|
||||||
VideoEncoder* encoder,
|
VideoEncoder* encoder,
|
||||||
VideoEncoderRateObserver* rate_observer,
|
VideoEncoderRateObserver* rate_observer,
|
||||||
@ -216,7 +146,6 @@ VCMEncodedFrameCallback::VCMEncodedFrameCallback(
|
|||||||
EncodedImageCallback* post_encode_callback)
|
EncodedImageCallback* post_encode_callback)
|
||||||
: send_callback_(),
|
: send_callback_(),
|
||||||
media_opt_(nullptr),
|
media_opt_(nullptr),
|
||||||
payload_type_(0),
|
|
||||||
internal_source_(false),
|
internal_source_(false),
|
||||||
post_encode_callback_(post_encode_callback) {}
|
post_encode_callback_(post_encode_callback) {}
|
||||||
|
|
||||||
@ -234,19 +163,8 @@ int32_t VCMEncodedFrameCallback::Encoded(
|
|||||||
const RTPFragmentationHeader* fragmentation_header) {
|
const RTPFragmentationHeader* fragmentation_header) {
|
||||||
TRACE_EVENT_INSTANT1("webrtc", "VCMEncodedFrameCallback::Encoded",
|
TRACE_EVENT_INSTANT1("webrtc", "VCMEncodedFrameCallback::Encoded",
|
||||||
"timestamp", encoded_image._timeStamp);
|
"timestamp", encoded_image._timeStamp);
|
||||||
post_encode_callback_->Encoded(encoded_image, nullptr, nullptr);
|
int ret_val = post_encode_callback_->Encoded(encoded_image, codec_specific,
|
||||||
|
fragmentation_header);
|
||||||
if (send_callback_ == nullptr)
|
|
||||||
return VCM_UNINITIALIZED;
|
|
||||||
|
|
||||||
RTPVideoHeader rtp_video_header;
|
|
||||||
memset(&rtp_video_header, 0, sizeof(RTPVideoHeader));
|
|
||||||
if (codec_specific)
|
|
||||||
CopyCodecSpecific(codec_specific, &rtp_video_header);
|
|
||||||
rtp_video_header.rotation = encoded_image.rotation_;
|
|
||||||
|
|
||||||
int32_t ret_val = send_callback_->SendData(
|
|
||||||
payload_type_, encoded_image, fragmentation_header, &rtp_video_header);
|
|
||||||
if (ret_val < 0)
|
if (ret_val < 0)
|
||||||
return ret_val;
|
return ret_val;
|
||||||
|
|
||||||
|
@ -44,7 +44,6 @@ class VCMEncodedFrameCallback : public EncodedImageCallback {
|
|||||||
const RTPFragmentationHeader* fragmentation_header) override;
|
const RTPFragmentationHeader* fragmentation_header) override;
|
||||||
int32_t SetTransportCallback(VCMPacketizationCallback* transport);
|
int32_t SetTransportCallback(VCMPacketizationCallback* transport);
|
||||||
void SetMediaOpt(media_optimization::MediaOptimization* media_opt);
|
void SetMediaOpt(media_optimization::MediaOptimization* media_opt);
|
||||||
void SetPayloadType(uint8_t payload_type) { payload_type_ = payload_type; }
|
|
||||||
void SetInternalSource(bool internal_source) {
|
void SetInternalSource(bool internal_source) {
|
||||||
internal_source_ = internal_source;
|
internal_source_ = internal_source;
|
||||||
}
|
}
|
||||||
@ -54,7 +53,6 @@ class VCMEncodedFrameCallback : public EncodedImageCallback {
|
|||||||
private:
|
private:
|
||||||
VCMPacketizationCallback* send_callback_;
|
VCMPacketizationCallback* send_callback_;
|
||||||
media_optimization::MediaOptimization* media_opt_;
|
media_optimization::MediaOptimization* media_opt_;
|
||||||
uint8_t payload_type_;
|
|
||||||
bool internal_source_;
|
bool internal_source_;
|
||||||
|
|
||||||
EncodedImageCallback* post_encode_callback_;
|
EncodedImageCallback* post_encode_callback_;
|
||||||
|
@ -57,13 +57,11 @@ struct VCMFrameCount {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// Callback class used for sending data ready to be packetized
|
// Callback class used for sending data ready to be packetized
|
||||||
|
// Deprecated.
|
||||||
|
// TODO(perkj): Remove once OnEncoderImplementationName is not used.
|
||||||
class VCMPacketizationCallback {
|
class VCMPacketizationCallback {
|
||||||
public:
|
public:
|
||||||
virtual int32_t SendData(uint8_t payloadType,
|
// TODO(perkj): Refactor this. It does not belong in VCMPacketizationCallback.
|
||||||
const EncodedImage& encoded_image,
|
|
||||||
const RTPFragmentationHeader* fragmentationHeader,
|
|
||||||
const RTPVideoHeader* rtpVideoHdr) = 0;
|
|
||||||
|
|
||||||
virtual void OnEncoderImplementationName(const char* implementation_name) {}
|
virtual void OnEncoderImplementationName(const char* implementation_name) {}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
@ -18,7 +18,7 @@ namespace webrtc {
|
|||||||
|
|
||||||
IvfFileWriter::IvfFileWriter(const std::string& file_name,
|
IvfFileWriter::IvfFileWriter(const std::string& file_name,
|
||||||
std::unique_ptr<FileWrapper> file,
|
std::unique_ptr<FileWrapper> file,
|
||||||
RtpVideoCodecTypes codec_type)
|
VideoCodecType codec_type)
|
||||||
: codec_type_(codec_type),
|
: codec_type_(codec_type),
|
||||||
num_frames_(0),
|
num_frames_(0),
|
||||||
width_(0),
|
width_(0),
|
||||||
@ -34,9 +34,8 @@ IvfFileWriter::~IvfFileWriter() {
|
|||||||
|
|
||||||
const size_t kIvfHeaderSize = 32;
|
const size_t kIvfHeaderSize = 32;
|
||||||
|
|
||||||
std::unique_ptr<IvfFileWriter> IvfFileWriter::Open(
|
std::unique_ptr<IvfFileWriter> IvfFileWriter::Open(const std::string& file_name,
|
||||||
const std::string& file_name,
|
VideoCodecType codec_type) {
|
||||||
RtpVideoCodecTypes codec_type) {
|
|
||||||
std::unique_ptr<IvfFileWriter> file_writer;
|
std::unique_ptr<IvfFileWriter> file_writer;
|
||||||
std::unique_ptr<FileWrapper> file(FileWrapper::Create());
|
std::unique_ptr<FileWrapper> file(FileWrapper::Create());
|
||||||
if (file->OpenFile(file_name.c_str(), false) != 0)
|
if (file->OpenFile(file_name.c_str(), false) != 0)
|
||||||
@ -65,19 +64,19 @@ bool IvfFileWriter::WriteHeader() {
|
|||||||
ByteWriter<uint16_t>::WriteLittleEndian(&ivf_header[6], 32); // Header size.
|
ByteWriter<uint16_t>::WriteLittleEndian(&ivf_header[6], 32); // Header size.
|
||||||
|
|
||||||
switch (codec_type_) {
|
switch (codec_type_) {
|
||||||
case kRtpVideoVp8:
|
case kVideoCodecVP8:
|
||||||
ivf_header[8] = 'V';
|
ivf_header[8] = 'V';
|
||||||
ivf_header[9] = 'P';
|
ivf_header[9] = 'P';
|
||||||
ivf_header[10] = '8';
|
ivf_header[10] = '8';
|
||||||
ivf_header[11] = '0';
|
ivf_header[11] = '0';
|
||||||
break;
|
break;
|
||||||
case kRtpVideoVp9:
|
case kVideoCodecVP9:
|
||||||
ivf_header[8] = 'V';
|
ivf_header[8] = 'V';
|
||||||
ivf_header[9] = 'P';
|
ivf_header[9] = 'P';
|
||||||
ivf_header[10] = '9';
|
ivf_header[10] = '9';
|
||||||
ivf_header[11] = '0';
|
ivf_header[11] = '0';
|
||||||
break;
|
break;
|
||||||
case kRtpVideoH264:
|
case kVideoCodecH264:
|
||||||
ivf_header[8] = 'H';
|
ivf_header[8] = 'H';
|
||||||
ivf_header[9] = '2';
|
ivf_header[9] = '2';
|
||||||
ivf_header[10] = '6';
|
ivf_header[10] = '6';
|
||||||
|
@ -27,18 +27,18 @@ class IvfFileWriter {
|
|||||||
~IvfFileWriter();
|
~IvfFileWriter();
|
||||||
|
|
||||||
static std::unique_ptr<IvfFileWriter> Open(const std::string& file_name,
|
static std::unique_ptr<IvfFileWriter> Open(const std::string& file_name,
|
||||||
RtpVideoCodecTypes codec_type);
|
VideoCodecType codec_type);
|
||||||
bool WriteFrame(const EncodedImage& encoded_image);
|
bool WriteFrame(const EncodedImage& encoded_image);
|
||||||
bool Close();
|
bool Close();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
IvfFileWriter(const std::string& path_name,
|
IvfFileWriter(const std::string& path_name,
|
||||||
std::unique_ptr<FileWrapper> file,
|
std::unique_ptr<FileWrapper> file,
|
||||||
RtpVideoCodecTypes codec_type);
|
VideoCodecType codec_type);
|
||||||
bool WriteHeader();
|
bool WriteHeader();
|
||||||
bool InitFromFirstFrame(const EncodedImage& encoded_image);
|
bool InitFromFirstFrame(const EncodedImage& encoded_image);
|
||||||
|
|
||||||
const RtpVideoCodecTypes codec_type_;
|
const VideoCodecType codec_type_;
|
||||||
size_t num_frames_;
|
size_t num_frames_;
|
||||||
uint16_t width_;
|
uint16_t width_;
|
||||||
uint16_t height_;
|
uint16_t height_;
|
||||||
|
@ -103,7 +103,7 @@ class IvfFileWriterTest : public ::testing::Test {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void RunBasicFileStructureTest(RtpVideoCodecTypes codec_type,
|
void RunBasicFileStructureTest(VideoCodecType codec_type,
|
||||||
const uint8_t fourcc[4],
|
const uint8_t fourcc[4],
|
||||||
bool use_capture_tims_ms) {
|
bool use_capture_tims_ms) {
|
||||||
file_writer_ = IvfFileWriter::Open(file_name_, codec_type);
|
file_writer_ = IvfFileWriter::Open(file_name_, codec_type);
|
||||||
@ -135,7 +135,7 @@ class IvfFileWriterTest : public ::testing::Test {
|
|||||||
};
|
};
|
||||||
|
|
||||||
TEST_F(IvfFileWriterTest, RemovesUnusedFile) {
|
TEST_F(IvfFileWriterTest, RemovesUnusedFile) {
|
||||||
file_writer_ = IvfFileWriter::Open(file_name_, kRtpVideoVp8);
|
file_writer_ = IvfFileWriter::Open(file_name_, kVideoCodecVP8);
|
||||||
ASSERT_TRUE(file_writer_.get() != nullptr);
|
ASSERT_TRUE(file_writer_.get() != nullptr);
|
||||||
EXPECT_TRUE(FileExists());
|
EXPECT_TRUE(FileExists());
|
||||||
EXPECT_TRUE(file_writer_->Close());
|
EXPECT_TRUE(file_writer_->Close());
|
||||||
@ -145,32 +145,32 @@ TEST_F(IvfFileWriterTest, RemovesUnusedFile) {
|
|||||||
|
|
||||||
TEST_F(IvfFileWriterTest, WritesBasicVP8FileNtpTimestamp) {
|
TEST_F(IvfFileWriterTest, WritesBasicVP8FileNtpTimestamp) {
|
||||||
const uint8_t fourcc[4] = {'V', 'P', '8', '0'};
|
const uint8_t fourcc[4] = {'V', 'P', '8', '0'};
|
||||||
RunBasicFileStructureTest(kRtpVideoVp8, fourcc, false);
|
RunBasicFileStructureTest(kVideoCodecVP8, fourcc, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(IvfFileWriterTest, WritesBasicVP8FileMsTimestamp) {
|
TEST_F(IvfFileWriterTest, WritesBasicVP8FileMsTimestamp) {
|
||||||
const uint8_t fourcc[4] = {'V', 'P', '8', '0'};
|
const uint8_t fourcc[4] = {'V', 'P', '8', '0'};
|
||||||
RunBasicFileStructureTest(kRtpVideoVp8, fourcc, true);
|
RunBasicFileStructureTest(kVideoCodecVP8, fourcc, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(IvfFileWriterTest, WritesBasicVP9FileNtpTimestamp) {
|
TEST_F(IvfFileWriterTest, WritesBasicVP9FileNtpTimestamp) {
|
||||||
const uint8_t fourcc[4] = {'V', 'P', '9', '0'};
|
const uint8_t fourcc[4] = {'V', 'P', '9', '0'};
|
||||||
RunBasicFileStructureTest(kRtpVideoVp9, fourcc, false);
|
RunBasicFileStructureTest(kVideoCodecVP9, fourcc, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(IvfFileWriterTest, WritesBasicVP9FileMsTimestamp) {
|
TEST_F(IvfFileWriterTest, WritesBasicVP9FileMsTimestamp) {
|
||||||
const uint8_t fourcc[4] = {'V', 'P', '9', '0'};
|
const uint8_t fourcc[4] = {'V', 'P', '9', '0'};
|
||||||
RunBasicFileStructureTest(kRtpVideoVp9, fourcc, true);
|
RunBasicFileStructureTest(kVideoCodecVP9, fourcc, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(IvfFileWriterTest, WritesBasicH264FileNtpTimestamp) {
|
TEST_F(IvfFileWriterTest, WritesBasicH264FileNtpTimestamp) {
|
||||||
const uint8_t fourcc[4] = {'H', '2', '6', '4'};
|
const uint8_t fourcc[4] = {'H', '2', '6', '4'};
|
||||||
RunBasicFileStructureTest(kRtpVideoH264, fourcc, false);
|
RunBasicFileStructureTest(kVideoCodecH264, fourcc, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(IvfFileWriterTest, WritesBasicH264FileMsTimestamp) {
|
TEST_F(IvfFileWriterTest, WritesBasicH264FileMsTimestamp) {
|
||||||
const uint8_t fourcc[4] = {'H', '2', '6', '4'};
|
const uint8_t fourcc[4] = {'H', '2', '6', '4'};
|
||||||
RunBasicFileStructureTest(kRtpVideoH264, fourcc, true);
|
RunBasicFileStructureTest(kVideoCodecH264, fourcc, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace webrtc
|
} // namespace webrtc
|
||||||
|
@ -54,7 +54,6 @@ class EncodedImageCallbackWrapper : public EncodedImageCallback {
|
|||||||
callback_ = callback;
|
callback_ = callback;
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(andresp): Change to void as return value is ignored.
|
|
||||||
virtual int32_t Encoded(const EncodedImage& encoded_image,
|
virtual int32_t Encoded(const EncodedImage& encoded_image,
|
||||||
const CodecSpecificInfo* codec_specific_info,
|
const CodecSpecificInfo* codec_specific_info,
|
||||||
const RTPFragmentationHeader* fragmentation) {
|
const RTPFragmentationHeader* fragmentation) {
|
||||||
|
@ -79,6 +79,8 @@ class VideoSender {
|
|||||||
uint8_t lossRate,
|
uint8_t lossRate,
|
||||||
int64_t rtt);
|
int64_t rtt);
|
||||||
|
|
||||||
|
// Deprecated. Use |post_encode_callback| instead.
|
||||||
|
// TODO(perkj): Remove once |OnEncoderImplementationName| is not used.
|
||||||
int32_t RegisterTransportCallback(VCMPacketizationCallback* transport);
|
int32_t RegisterTransportCallback(VCMPacketizationCallback* transport);
|
||||||
int32_t RegisterSendStatisticsCallback(VCMSendStatisticsCallback* sendStats);
|
int32_t RegisterSendStatisticsCallback(VCMSendStatisticsCallback* sendStats);
|
||||||
int32_t RegisterProtectionCallback(VCMProtectionCallback* protection);
|
int32_t RegisterProtectionCallback(VCMProtectionCallback* protection);
|
||||||
|
@ -86,19 +86,19 @@ class EmptyFrameGenerator : public FrameGenerator {
|
|||||||
std::unique_ptr<VideoFrame> frame_;
|
std::unique_ptr<VideoFrame> frame_;
|
||||||
};
|
};
|
||||||
|
|
||||||
class PacketizationCallback : public VCMPacketizationCallback {
|
class EncodedImageCallbackImpl : public EncodedImageCallback {
|
||||||
public:
|
public:
|
||||||
explicit PacketizationCallback(Clock* clock)
|
explicit EncodedImageCallbackImpl(Clock* clock)
|
||||||
: clock_(clock), start_time_ms_(clock_->TimeInMilliseconds()) {}
|
: clock_(clock), start_time_ms_(clock_->TimeInMilliseconds()) {}
|
||||||
|
|
||||||
virtual ~PacketizationCallback() {}
|
virtual ~EncodedImageCallbackImpl() {}
|
||||||
|
|
||||||
int32_t SendData(uint8_t payload_type,
|
int32_t Encoded(const EncodedImage& encoded_image,
|
||||||
const EncodedImage& encoded_image,
|
const CodecSpecificInfo* codec_specific_info,
|
||||||
const RTPFragmentationHeader* fragmentation_header,
|
const RTPFragmentationHeader* fragmentation) override {
|
||||||
const RTPVideoHeader* rtp_video_header) override {
|
assert(codec_specific_info);
|
||||||
assert(rtp_video_header);
|
frame_data_.push_back(
|
||||||
frame_data_.push_back(FrameData(encoded_image._length, *rtp_video_header));
|
FrameData(encoded_image._length, *codec_specific_info));
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -130,11 +130,12 @@ class PacketizationCallback : public VCMPacketizationCallback {
|
|||||||
struct FrameData {
|
struct FrameData {
|
||||||
FrameData() {}
|
FrameData() {}
|
||||||
|
|
||||||
FrameData(size_t payload_size, const RTPVideoHeader& rtp_video_header)
|
FrameData(size_t payload_size, const CodecSpecificInfo& codec_specific_info)
|
||||||
: payload_size(payload_size), rtp_video_header(rtp_video_header) {}
|
: payload_size(payload_size),
|
||||||
|
codec_specific_info(codec_specific_info) {}
|
||||||
|
|
||||||
size_t payload_size;
|
size_t payload_size;
|
||||||
RTPVideoHeader rtp_video_header;
|
CodecSpecificInfo codec_specific_info;
|
||||||
};
|
};
|
||||||
|
|
||||||
int64_t interval_ms() {
|
int64_t interval_ms() {
|
||||||
@ -146,9 +147,9 @@ class PacketizationCallback : public VCMPacketizationCallback {
|
|||||||
int CountFramesWithinTemporalLayer(int temporal_layer) {
|
int CountFramesWithinTemporalLayer(int temporal_layer) {
|
||||||
int frames = 0;
|
int frames = 0;
|
||||||
for (size_t i = 0; i < frame_data_.size(); ++i) {
|
for (size_t i = 0; i < frame_data_.size(); ++i) {
|
||||||
EXPECT_EQ(kRtpVideoVp8, frame_data_[i].rtp_video_header.codec);
|
EXPECT_EQ(kVideoCodecVP8, frame_data_[i].codec_specific_info.codecType);
|
||||||
const uint8_t temporal_idx =
|
const uint8_t temporal_idx =
|
||||||
frame_data_[i].rtp_video_header.codecHeader.VP8.temporalIdx;
|
frame_data_[i].codec_specific_info.codecSpecific.VP8.temporalIdx;
|
||||||
if (temporal_idx <= temporal_layer || temporal_idx == kNoTemporalIdx)
|
if (temporal_idx <= temporal_layer || temporal_idx == kNoTemporalIdx)
|
||||||
frames++;
|
frames++;
|
||||||
}
|
}
|
||||||
@ -158,9 +159,9 @@ class PacketizationCallback : public VCMPacketizationCallback {
|
|||||||
size_t SumPayloadBytesWithinTemporalLayer(int temporal_layer) {
|
size_t SumPayloadBytesWithinTemporalLayer(int temporal_layer) {
|
||||||
size_t payload_size = 0;
|
size_t payload_size = 0;
|
||||||
for (size_t i = 0; i < frame_data_.size(); ++i) {
|
for (size_t i = 0; i < frame_data_.size(); ++i) {
|
||||||
EXPECT_EQ(kRtpVideoVp8, frame_data_[i].rtp_video_header.codec);
|
EXPECT_EQ(kVideoCodecVP8, frame_data_[i].codec_specific_info.codecType);
|
||||||
const uint8_t temporal_idx =
|
const uint8_t temporal_idx =
|
||||||
frame_data_[i].rtp_video_header.codecHeader.VP8.temporalIdx;
|
frame_data_[i].codec_specific_info.codecSpecific.VP8.temporalIdx;
|
||||||
if (temporal_idx <= temporal_layer || temporal_idx == kNoTemporalIdx)
|
if (temporal_idx <= temporal_layer || temporal_idx == kNoTemporalIdx)
|
||||||
payload_size += frame_data_[i].payload_size;
|
payload_size += frame_data_[i].payload_size;
|
||||||
}
|
}
|
||||||
@ -176,12 +177,11 @@ class TestVideoSender : public ::testing::Test {
|
|||||||
protected:
|
protected:
|
||||||
// Note: simulated clock starts at 1 seconds, since parts of webrtc use 0 as
|
// Note: simulated clock starts at 1 seconds, since parts of webrtc use 0 as
|
||||||
// a special case (e.g. frame rate in media optimization).
|
// a special case (e.g. frame rate in media optimization).
|
||||||
TestVideoSender() : clock_(1000), packetization_callback_(&clock_) {}
|
TestVideoSender() : clock_(1000), encoded_frame_callback_(&clock_) {}
|
||||||
|
|
||||||
void SetUp() override {
|
void SetUp() override {
|
||||||
sender_.reset(
|
sender_.reset(
|
||||||
new VideoSender(&clock_, &post_encode_callback_, nullptr, nullptr));
|
new VideoSender(&clock_, &encoded_frame_callback_, nullptr, nullptr));
|
||||||
EXPECT_EQ(0, sender_->RegisterTransportCallback(&packetization_callback_));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void AddFrame() {
|
void AddFrame() {
|
||||||
@ -190,8 +190,7 @@ class TestVideoSender : public ::testing::Test {
|
|||||||
}
|
}
|
||||||
|
|
||||||
SimulatedClock clock_;
|
SimulatedClock clock_;
|
||||||
PacketizationCallback packetization_callback_;
|
EncodedImageCallbackImpl encoded_frame_callback_;
|
||||||
MockEncodedImageCallback post_encode_callback_;
|
|
||||||
// Used by subclassing tests, need to outlive sender_.
|
// Used by subclassing tests, need to outlive sender_.
|
||||||
std::unique_ptr<VideoEncoder> encoder_;
|
std::unique_ptr<VideoEncoder> encoder_;
|
||||||
std::unique_ptr<VideoSender> sender_;
|
std::unique_ptr<VideoSender> sender_;
|
||||||
@ -415,8 +414,6 @@ class TestVideoSenderWithVp8 : public TestVideoSender {
|
|||||||
void InsertFrames(float framerate, float seconds) {
|
void InsertFrames(float framerate, float seconds) {
|
||||||
for (int i = 0; i < seconds * framerate; ++i) {
|
for (int i = 0; i < seconds * framerate; ++i) {
|
||||||
clock_.AdvanceTimeMilliseconds(1000.0f / framerate);
|
clock_.AdvanceTimeMilliseconds(1000.0f / framerate);
|
||||||
EXPECT_CALL(post_encode_callback_, Encoded(_, NULL, NULL))
|
|
||||||
.WillOnce(Return(0));
|
|
||||||
AddFrame();
|
AddFrame();
|
||||||
// SetChannelParameters needs to be called frequently to propagate
|
// SetChannelParameters needs to be called frequently to propagate
|
||||||
// framerate from the media optimization into the encoder.
|
// framerate from the media optimization into the encoder.
|
||||||
@ -435,10 +432,10 @@ class TestVideoSenderWithVp8 : public TestVideoSender {
|
|||||||
// It appears that this 5 seconds simulation is needed to allow
|
// It appears that this 5 seconds simulation is needed to allow
|
||||||
// bitrate and framerate to stabilize.
|
// bitrate and framerate to stabilize.
|
||||||
InsertFrames(framerate, short_simulation_interval);
|
InsertFrames(framerate, short_simulation_interval);
|
||||||
packetization_callback_.Reset();
|
encoded_frame_callback_.Reset();
|
||||||
|
|
||||||
InsertFrames(framerate, long_simulation_interval);
|
InsertFrames(framerate, long_simulation_interval);
|
||||||
return packetization_callback_.CalculateVp8StreamInfo();
|
return encoded_frame_callback_.CalculateVp8StreamInfo();
|
||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
@ -35,6 +35,7 @@ class MockVieEncoder : public ViEEncoder {
|
|||||||
nullptr,
|
nullptr,
|
||||||
nullptr,
|
nullptr,
|
||||||
pacer,
|
pacer,
|
||||||
|
nullptr,
|
||||||
nullptr) {}
|
nullptr) {}
|
||||||
~MockVieEncoder() {}
|
~MockVieEncoder() {}
|
||||||
|
|
||||||
|
@ -13,11 +13,85 @@
|
|||||||
#include "webrtc/base/checks.h"
|
#include "webrtc/base/checks.h"
|
||||||
#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp.h"
|
#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp.h"
|
||||||
#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
|
#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
|
||||||
|
#include "webrtc/modules/video_coding/include/video_codec_interface.h"
|
||||||
|
|
||||||
namespace webrtc {
|
namespace webrtc {
|
||||||
|
|
||||||
PayloadRouter::PayloadRouter(const std::vector<RtpRtcp*>& rtp_modules)
|
namespace {
|
||||||
: active_(false), num_sending_modules_(1), rtp_modules_(rtp_modules) {
|
// Map information from info into rtp.
|
||||||
|
void CopyCodecSpecific(const CodecSpecificInfo* info, RTPVideoHeader* rtp) {
|
||||||
|
RTC_DCHECK(info);
|
||||||
|
switch (info->codecType) {
|
||||||
|
case kVideoCodecVP8: {
|
||||||
|
rtp->codec = kRtpVideoVp8;
|
||||||
|
rtp->codecHeader.VP8.InitRTPVideoHeaderVP8();
|
||||||
|
rtp->codecHeader.VP8.pictureId = info->codecSpecific.VP8.pictureId;
|
||||||
|
rtp->codecHeader.VP8.nonReference = info->codecSpecific.VP8.nonReference;
|
||||||
|
rtp->codecHeader.VP8.temporalIdx = info->codecSpecific.VP8.temporalIdx;
|
||||||
|
rtp->codecHeader.VP8.layerSync = info->codecSpecific.VP8.layerSync;
|
||||||
|
rtp->codecHeader.VP8.tl0PicIdx = info->codecSpecific.VP8.tl0PicIdx;
|
||||||
|
rtp->codecHeader.VP8.keyIdx = info->codecSpecific.VP8.keyIdx;
|
||||||
|
rtp->simulcastIdx = info->codecSpecific.VP8.simulcastIdx;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
case kVideoCodecVP9: {
|
||||||
|
rtp->codec = kRtpVideoVp9;
|
||||||
|
rtp->codecHeader.VP9.InitRTPVideoHeaderVP9();
|
||||||
|
rtp->codecHeader.VP9.inter_pic_predicted =
|
||||||
|
info->codecSpecific.VP9.inter_pic_predicted;
|
||||||
|
rtp->codecHeader.VP9.flexible_mode =
|
||||||
|
info->codecSpecific.VP9.flexible_mode;
|
||||||
|
rtp->codecHeader.VP9.ss_data_available =
|
||||||
|
info->codecSpecific.VP9.ss_data_available;
|
||||||
|
rtp->codecHeader.VP9.picture_id = info->codecSpecific.VP9.picture_id;
|
||||||
|
rtp->codecHeader.VP9.tl0_pic_idx = info->codecSpecific.VP9.tl0_pic_idx;
|
||||||
|
rtp->codecHeader.VP9.temporal_idx = info->codecSpecific.VP9.temporal_idx;
|
||||||
|
rtp->codecHeader.VP9.spatial_idx = info->codecSpecific.VP9.spatial_idx;
|
||||||
|
rtp->codecHeader.VP9.temporal_up_switch =
|
||||||
|
info->codecSpecific.VP9.temporal_up_switch;
|
||||||
|
rtp->codecHeader.VP9.inter_layer_predicted =
|
||||||
|
info->codecSpecific.VP9.inter_layer_predicted;
|
||||||
|
rtp->codecHeader.VP9.gof_idx = info->codecSpecific.VP9.gof_idx;
|
||||||
|
rtp->codecHeader.VP9.num_spatial_layers =
|
||||||
|
info->codecSpecific.VP9.num_spatial_layers;
|
||||||
|
|
||||||
|
if (info->codecSpecific.VP9.ss_data_available) {
|
||||||
|
rtp->codecHeader.VP9.spatial_layer_resolution_present =
|
||||||
|
info->codecSpecific.VP9.spatial_layer_resolution_present;
|
||||||
|
if (info->codecSpecific.VP9.spatial_layer_resolution_present) {
|
||||||
|
for (size_t i = 0; i < info->codecSpecific.VP9.num_spatial_layers;
|
||||||
|
++i) {
|
||||||
|
rtp->codecHeader.VP9.width[i] = info->codecSpecific.VP9.width[i];
|
||||||
|
rtp->codecHeader.VP9.height[i] = info->codecSpecific.VP9.height[i];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
rtp->codecHeader.VP9.gof.CopyGofInfoVP9(info->codecSpecific.VP9.gof);
|
||||||
|
}
|
||||||
|
|
||||||
|
rtp->codecHeader.VP9.num_ref_pics = info->codecSpecific.VP9.num_ref_pics;
|
||||||
|
for (int i = 0; i < info->codecSpecific.VP9.num_ref_pics; ++i)
|
||||||
|
rtp->codecHeader.VP9.pid_diff[i] = info->codecSpecific.VP9.p_diff[i];
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
case kVideoCodecH264:
|
||||||
|
rtp->codec = kRtpVideoH264;
|
||||||
|
return;
|
||||||
|
case kVideoCodecGeneric:
|
||||||
|
rtp->codec = kRtpVideoGeneric;
|
||||||
|
rtp->simulcastIdx = info->codecSpecific.generic.simulcast_idx;
|
||||||
|
return;
|
||||||
|
default:
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
PayloadRouter::PayloadRouter(const std::vector<RtpRtcp*>& rtp_modules,
|
||||||
|
int payload_type)
|
||||||
|
: active_(false),
|
||||||
|
num_sending_modules_(1),
|
||||||
|
rtp_modules_(rtp_modules),
|
||||||
|
payload_type_(payload_type) {
|
||||||
UpdateModuleSendingState();
|
UpdateModuleSendingState();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -60,31 +134,33 @@ void PayloadRouter::UpdateModuleSendingState() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool PayloadRouter::RoutePayload(FrameType frame_type,
|
int32_t PayloadRouter::Encoded(const EncodedImage& encoded_image,
|
||||||
int8_t payload_type,
|
const CodecSpecificInfo* codec_specific_info,
|
||||||
uint32_t time_stamp,
|
const RTPFragmentationHeader* fragmentation) {
|
||||||
int64_t capture_time_ms,
|
|
||||||
const uint8_t* payload_data,
|
|
||||||
size_t payload_length,
|
|
||||||
const RTPFragmentationHeader* fragmentation,
|
|
||||||
const RTPVideoHeader* rtp_video_hdr) {
|
|
||||||
rtc::CritScope lock(&crit_);
|
rtc::CritScope lock(&crit_);
|
||||||
RTC_DCHECK(!rtp_modules_.empty());
|
RTC_DCHECK(!rtp_modules_.empty());
|
||||||
if (!active_ || num_sending_modules_ == 0)
|
if (!active_ || num_sending_modules_ == 0)
|
||||||
return false;
|
return -1;
|
||||||
|
|
||||||
int stream_idx = 0;
|
int stream_idx = 0;
|
||||||
if (rtp_video_hdr) {
|
|
||||||
RTC_DCHECK_LT(rtp_video_hdr->simulcastIdx, rtp_modules_.size());
|
RTPVideoHeader rtp_video_header;
|
||||||
// The simulcast index might actually be larger than the number of modules
|
memset(&rtp_video_header, 0, sizeof(RTPVideoHeader));
|
||||||
// in case the encoder was processing a frame during a codec reconfig.
|
if (codec_specific_info)
|
||||||
if (rtp_video_hdr->simulcastIdx >= num_sending_modules_)
|
CopyCodecSpecific(codec_specific_info, &rtp_video_header);
|
||||||
return false;
|
rtp_video_header.rotation = encoded_image.rotation_;
|
||||||
stream_idx = rtp_video_hdr->simulcastIdx;
|
|
||||||
}
|
RTC_DCHECK_LT(rtp_video_header.simulcastIdx, rtp_modules_.size());
|
||||||
|
// The simulcast index might actually be larger than the number of modules
|
||||||
|
// in case the encoder was processing a frame during a codec reconfig.
|
||||||
|
if (rtp_video_header.simulcastIdx >= num_sending_modules_)
|
||||||
|
return -1;
|
||||||
|
stream_idx = rtp_video_header.simulcastIdx;
|
||||||
|
|
||||||
return rtp_modules_[stream_idx]->SendOutgoingData(
|
return rtp_modules_[stream_idx]->SendOutgoingData(
|
||||||
frame_type, payload_type, time_stamp, capture_time_ms, payload_data,
|
encoded_image._frameType, payload_type_, encoded_image._timeStamp,
|
||||||
payload_length, fragmentation, rtp_video_hdr) == 0 ? true : false;
|
encoded_image.capture_time_ms_, encoded_image._buffer,
|
||||||
|
encoded_image._length, fragmentation, &rtp_video_header);
|
||||||
}
|
}
|
||||||
|
|
||||||
void PayloadRouter::SetTargetSendBitrates(
|
void PayloadRouter::SetTargetSendBitrates(
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
#include "webrtc/base/criticalsection.h"
|
#include "webrtc/base/criticalsection.h"
|
||||||
#include "webrtc/base/thread_annotations.h"
|
#include "webrtc/base/thread_annotations.h"
|
||||||
#include "webrtc/common_types.h"
|
#include "webrtc/common_types.h"
|
||||||
|
#include "webrtc/video_encoder.h"
|
||||||
#include "webrtc/system_wrappers/include/atomic32.h"
|
#include "webrtc/system_wrappers/include/atomic32.h"
|
||||||
|
|
||||||
namespace webrtc {
|
namespace webrtc {
|
||||||
@ -27,10 +28,11 @@ struct RTPVideoHeader;
|
|||||||
|
|
||||||
// PayloadRouter routes outgoing data to the correct sending RTP module, based
|
// PayloadRouter routes outgoing data to the correct sending RTP module, based
|
||||||
// on the simulcast layer in RTPVideoHeader.
|
// on the simulcast layer in RTPVideoHeader.
|
||||||
class PayloadRouter {
|
class PayloadRouter : public EncodedImageCallback {
|
||||||
public:
|
public:
|
||||||
// Rtp modules are assumed to be sorted in simulcast index order.
|
// Rtp modules are assumed to be sorted in simulcast index order.
|
||||||
explicit PayloadRouter(const std::vector<RtpRtcp*>& rtp_modules);
|
explicit PayloadRouter(const std::vector<RtpRtcp*>& rtp_modules,
|
||||||
|
int payload_type);
|
||||||
~PayloadRouter();
|
~PayloadRouter();
|
||||||
|
|
||||||
static size_t DefaultMaxPayloadLength();
|
static size_t DefaultMaxPayloadLength();
|
||||||
@ -41,16 +43,11 @@ class PayloadRouter {
|
|||||||
void set_active(bool active);
|
void set_active(bool active);
|
||||||
bool active();
|
bool active();
|
||||||
|
|
||||||
// Input parameters according to the signature of RtpRtcp::SendOutgoingData.
|
// Implements EncodedImageCallback.
|
||||||
// Returns true if the packet was routed / sent, false otherwise.
|
// Returns 0 if the packet was routed / sent, -1 otherwise.
|
||||||
bool RoutePayload(FrameType frame_type,
|
int32_t Encoded(const EncodedImage& encoded_image,
|
||||||
int8_t payload_type,
|
const CodecSpecificInfo* codec_specific_info,
|
||||||
uint32_t time_stamp,
|
const RTPFragmentationHeader* fragmentation) override;
|
||||||
int64_t capture_time_ms,
|
|
||||||
const uint8_t* payload_data,
|
|
||||||
size_t payload_size,
|
|
||||||
const RTPFragmentationHeader* fragmentation,
|
|
||||||
const RTPVideoHeader* rtp_video_hdr);
|
|
||||||
|
|
||||||
// Configures current target bitrate per module. 'stream_bitrates' is assumed
|
// Configures current target bitrate per module. 'stream_bitrates' is assumed
|
||||||
// to be in the same order as 'SetSendingRtpModules'.
|
// to be in the same order as 'SetSendingRtpModules'.
|
||||||
@ -69,6 +66,7 @@ class PayloadRouter {
|
|||||||
|
|
||||||
// Rtp modules are assumed to be sorted in simulcast index order. Not owned.
|
// Rtp modules are assumed to be sorted in simulcast index order. Not owned.
|
||||||
const std::vector<RtpRtcp*> rtp_modules_;
|
const std::vector<RtpRtcp*> rtp_modules_;
|
||||||
|
const int payload_type_;
|
||||||
|
|
||||||
RTC_DISALLOW_COPY_AND_ASSIGN(PayloadRouter);
|
RTC_DISALLOW_COPY_AND_ASSIGN(PayloadRouter);
|
||||||
};
|
};
|
||||||
|
@ -14,6 +14,7 @@
|
|||||||
#include "testing/gtest/include/gtest/gtest.h"
|
#include "testing/gtest/include/gtest/gtest.h"
|
||||||
#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp.h"
|
#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp.h"
|
||||||
#include "webrtc/modules/rtp_rtcp/mocks/mock_rtp_rtcp.h"
|
#include "webrtc/modules/rtp_rtcp/mocks/mock_rtp_rtcp.h"
|
||||||
|
#include "webrtc/modules/video_coding/include/video_codec_interface.h"
|
||||||
#include "webrtc/video/payload_router.h"
|
#include "webrtc/video/payload_router.h"
|
||||||
|
|
||||||
using ::testing::_;
|
using ::testing::_;
|
||||||
@ -27,46 +28,56 @@ TEST(PayloadRouterTest, SendOnOneModule) {
|
|||||||
MockRtpRtcp rtp;
|
MockRtpRtcp rtp;
|
||||||
std::vector<RtpRtcp*> modules(1, &rtp);
|
std::vector<RtpRtcp*> modules(1, &rtp);
|
||||||
|
|
||||||
PayloadRouter payload_router(modules);
|
uint8_t payload = 'a';
|
||||||
|
int8_t payload_type = 96;
|
||||||
|
EncodedImage encoded_image;
|
||||||
|
encoded_image._timeStamp = 1;
|
||||||
|
encoded_image.capture_time_ms_ = 2;
|
||||||
|
encoded_image._frameType = kVideoFrameKey;
|
||||||
|
encoded_image._buffer = &payload;
|
||||||
|
encoded_image._length = 1;
|
||||||
|
|
||||||
|
PayloadRouter payload_router(modules, payload_type);
|
||||||
payload_router.SetSendingRtpModules(modules.size());
|
payload_router.SetSendingRtpModules(modules.size());
|
||||||
|
|
||||||
uint8_t payload = 'a';
|
EXPECT_CALL(rtp, SendOutgoingData(encoded_image._frameType, payload_type,
|
||||||
FrameType frame_type = kVideoFrameKey;
|
encoded_image._timeStamp,
|
||||||
int8_t payload_type = 96;
|
encoded_image.capture_time_ms_, &payload,
|
||||||
|
encoded_image._length, nullptr, _))
|
||||||
EXPECT_CALL(rtp, SendOutgoingData(frame_type, payload_type, 0, 0, _, 1,
|
|
||||||
nullptr, nullptr))
|
|
||||||
.Times(0);
|
.Times(0);
|
||||||
EXPECT_FALSE(payload_router.RoutePayload(frame_type, payload_type, 0, 0,
|
EXPECT_EQ(-1, payload_router.Encoded(encoded_image, nullptr, nullptr));
|
||||||
&payload, 1, nullptr, nullptr));
|
|
||||||
|
|
||||||
payload_router.set_active(true);
|
payload_router.set_active(true);
|
||||||
EXPECT_CALL(rtp, SendOutgoingData(frame_type, payload_type, 0, 0, _, 1,
|
EXPECT_CALL(rtp, SendOutgoingData(encoded_image._frameType, payload_type,
|
||||||
nullptr, nullptr))
|
encoded_image._timeStamp,
|
||||||
|
encoded_image.capture_time_ms_, &payload,
|
||||||
|
encoded_image._length, nullptr, _))
|
||||||
.Times(1);
|
.Times(1);
|
||||||
EXPECT_TRUE(payload_router.RoutePayload(frame_type, payload_type, 0, 0,
|
EXPECT_EQ(0, payload_router.Encoded(encoded_image, nullptr, nullptr));
|
||||||
&payload, 1, nullptr, nullptr));
|
|
||||||
|
|
||||||
payload_router.set_active(false);
|
payload_router.set_active(false);
|
||||||
EXPECT_CALL(rtp, SendOutgoingData(frame_type, payload_type, 0, 0, _, 1,
|
EXPECT_CALL(rtp, SendOutgoingData(encoded_image._frameType, payload_type,
|
||||||
nullptr, nullptr))
|
encoded_image._timeStamp,
|
||||||
|
encoded_image.capture_time_ms_, &payload,
|
||||||
|
encoded_image._length, nullptr, _))
|
||||||
.Times(0);
|
.Times(0);
|
||||||
EXPECT_FALSE(payload_router.RoutePayload(frame_type, payload_type, 0, 0,
|
EXPECT_EQ(-1, payload_router.Encoded(encoded_image, nullptr, nullptr));
|
||||||
&payload, 1, nullptr, nullptr));
|
|
||||||
|
|
||||||
payload_router.set_active(true);
|
payload_router.set_active(true);
|
||||||
EXPECT_CALL(rtp, SendOutgoingData(frame_type, payload_type, 0, 0, _, 1,
|
EXPECT_CALL(rtp, SendOutgoingData(encoded_image._frameType, payload_type,
|
||||||
nullptr, nullptr))
|
encoded_image._timeStamp,
|
||||||
|
encoded_image.capture_time_ms_, &payload,
|
||||||
|
encoded_image._length, nullptr, _))
|
||||||
.Times(1);
|
.Times(1);
|
||||||
EXPECT_TRUE(payload_router.RoutePayload(frame_type, payload_type, 0, 0,
|
EXPECT_EQ(0, payload_router.Encoded(encoded_image, nullptr, nullptr));
|
||||||
&payload, 1, nullptr, nullptr));
|
|
||||||
|
|
||||||
payload_router.SetSendingRtpModules(0);
|
payload_router.SetSendingRtpModules(0);
|
||||||
EXPECT_CALL(rtp, SendOutgoingData(frame_type, payload_type, 0, 0, _, 1,
|
EXPECT_CALL(rtp, SendOutgoingData(encoded_image._frameType, payload_type,
|
||||||
nullptr, nullptr))
|
encoded_image._timeStamp,
|
||||||
|
encoded_image.capture_time_ms_, &payload,
|
||||||
|
encoded_image._length, nullptr, _))
|
||||||
.Times(0);
|
.Times(0);
|
||||||
EXPECT_FALSE(payload_router.RoutePayload(frame_type, payload_type, 0, 0,
|
EXPECT_EQ(-1, payload_router.Encoded(encoded_image, nullptr, nullptr));
|
||||||
&payload, 1, nullptr, nullptr));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(PayloadRouterTest, SendSimulcast) {
|
TEST(PayloadRouterTest, SendSimulcast) {
|
||||||
@ -76,36 +87,46 @@ TEST(PayloadRouterTest, SendSimulcast) {
|
|||||||
modules.push_back(&rtp_1);
|
modules.push_back(&rtp_1);
|
||||||
modules.push_back(&rtp_2);
|
modules.push_back(&rtp_2);
|
||||||
|
|
||||||
PayloadRouter payload_router(modules);
|
int8_t payload_type = 96;
|
||||||
|
uint8_t payload = 'a';
|
||||||
|
EncodedImage encoded_image;
|
||||||
|
encoded_image._timeStamp = 1;
|
||||||
|
encoded_image.capture_time_ms_ = 2;
|
||||||
|
encoded_image._frameType = kVideoFrameKey;
|
||||||
|
encoded_image._buffer = &payload;
|
||||||
|
encoded_image._length = 1;
|
||||||
|
|
||||||
|
PayloadRouter payload_router(modules, payload_type);
|
||||||
payload_router.SetSendingRtpModules(modules.size());
|
payload_router.SetSendingRtpModules(modules.size());
|
||||||
|
|
||||||
uint8_t payload_1 = 'a';
|
CodecSpecificInfo codec_info_1;
|
||||||
FrameType frame_type_1 = kVideoFrameKey;
|
memset(&codec_info_1, 0, sizeof(CodecSpecificInfo));
|
||||||
int8_t payload_type_1 = 96;
|
codec_info_1.codecType = kVideoCodecVP8;
|
||||||
RTPVideoHeader rtp_hdr_1;
|
codec_info_1.codecSpecific.VP8.simulcastIdx = 0;
|
||||||
rtp_hdr_1.simulcastIdx = 0;
|
|
||||||
|
|
||||||
payload_router.set_active(true);
|
payload_router.set_active(true);
|
||||||
EXPECT_CALL(rtp_1, SendOutgoingData(frame_type_1, payload_type_1, 0, 0, _, 1,
|
EXPECT_CALL(rtp_1, SendOutgoingData(encoded_image._frameType, payload_type,
|
||||||
nullptr, &rtp_hdr_1))
|
encoded_image._timeStamp,
|
||||||
|
encoded_image.capture_time_ms_, &payload,
|
||||||
|
encoded_image._length, nullptr, _))
|
||||||
.Times(1);
|
.Times(1);
|
||||||
EXPECT_CALL(rtp_2, SendOutgoingData(_, _, _, _, _, _, _, _))
|
EXPECT_CALL(rtp_2, SendOutgoingData(_, _, _, _, _, _, _, _))
|
||||||
.Times(0);
|
.Times(0);
|
||||||
EXPECT_TRUE(payload_router.RoutePayload(frame_type_1, payload_type_1, 0, 0,
|
EXPECT_EQ(0, payload_router.Encoded(encoded_image, &codec_info_1, nullptr));
|
||||||
&payload_1, 1, nullptr, &rtp_hdr_1));
|
|
||||||
|
|
||||||
uint8_t payload_2 = 'b';
|
CodecSpecificInfo codec_info_2;
|
||||||
FrameType frame_type_2 = kVideoFrameDelta;
|
memset(&codec_info_2, 0, sizeof(CodecSpecificInfo));
|
||||||
int8_t payload_type_2 = 97;
|
codec_info_2.codecType = kVideoCodecVP8;
|
||||||
RTPVideoHeader rtp_hdr_2;
|
codec_info_2.codecSpecific.VP8.simulcastIdx = 1;
|
||||||
rtp_hdr_2.simulcastIdx = 1;
|
|
||||||
EXPECT_CALL(rtp_2, SendOutgoingData(frame_type_2, payload_type_2, 0, 0, _, 1,
|
EXPECT_CALL(rtp_2, SendOutgoingData(encoded_image._frameType, payload_type,
|
||||||
nullptr, &rtp_hdr_2))
|
encoded_image._timeStamp,
|
||||||
|
encoded_image.capture_time_ms_, &payload,
|
||||||
|
encoded_image._length, nullptr, _))
|
||||||
.Times(1);
|
.Times(1);
|
||||||
EXPECT_CALL(rtp_1, SendOutgoingData(_, _, _, _, _, _, _, _))
|
EXPECT_CALL(rtp_1, SendOutgoingData(_, _, _, _, _, _, _, _))
|
||||||
.Times(0);
|
.Times(0);
|
||||||
EXPECT_TRUE(payload_router.RoutePayload(frame_type_2, payload_type_2, 0, 0,
|
EXPECT_EQ(0, payload_router.Encoded(encoded_image, &codec_info_2, nullptr));
|
||||||
&payload_2, 1, nullptr, &rtp_hdr_2));
|
|
||||||
|
|
||||||
// Inactive.
|
// Inactive.
|
||||||
payload_router.set_active(false);
|
payload_router.set_active(false);
|
||||||
@ -113,10 +134,8 @@ TEST(PayloadRouterTest, SendSimulcast) {
|
|||||||
.Times(0);
|
.Times(0);
|
||||||
EXPECT_CALL(rtp_2, SendOutgoingData(_, _, _, _, _, _, _, _))
|
EXPECT_CALL(rtp_2, SendOutgoingData(_, _, _, _, _, _, _, _))
|
||||||
.Times(0);
|
.Times(0);
|
||||||
EXPECT_FALSE(payload_router.RoutePayload(frame_type_1, payload_type_1, 0, 0,
|
EXPECT_EQ(-1, payload_router.Encoded(encoded_image, &codec_info_1, nullptr));
|
||||||
&payload_1, 1, nullptr, &rtp_hdr_1));
|
EXPECT_EQ(-1, payload_router.Encoded(encoded_image, &codec_info_2, nullptr));
|
||||||
EXPECT_FALSE(payload_router.RoutePayload(frame_type_2, payload_type_2, 0, 0,
|
|
||||||
&payload_2, 1, nullptr, &rtp_hdr_2));
|
|
||||||
|
|
||||||
// Invalid simulcast index.
|
// Invalid simulcast index.
|
||||||
payload_router.SetSendingRtpModules(1);
|
payload_router.SetSendingRtpModules(1);
|
||||||
@ -125,9 +144,8 @@ TEST(PayloadRouterTest, SendSimulcast) {
|
|||||||
.Times(0);
|
.Times(0);
|
||||||
EXPECT_CALL(rtp_2, SendOutgoingData(_, _, _, _, _, _, _, _))
|
EXPECT_CALL(rtp_2, SendOutgoingData(_, _, _, _, _, _, _, _))
|
||||||
.Times(0);
|
.Times(0);
|
||||||
rtp_hdr_1.simulcastIdx = 1;
|
codec_info_2.codecSpecific.VP8.simulcastIdx = 1;
|
||||||
EXPECT_FALSE(payload_router.RoutePayload(frame_type_1, payload_type_1, 0, 0,
|
EXPECT_EQ(-1, payload_router.Encoded(encoded_image, &codec_info_2, nullptr));
|
||||||
&payload_1, 1, nullptr, &rtp_hdr_1));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(PayloadRouterTest, MaxPayloadLength) {
|
TEST(PayloadRouterTest, MaxPayloadLength) {
|
||||||
@ -139,7 +157,7 @@ TEST(PayloadRouterTest, MaxPayloadLength) {
|
|||||||
std::vector<RtpRtcp*> modules;
|
std::vector<RtpRtcp*> modules;
|
||||||
modules.push_back(&rtp_1);
|
modules.push_back(&rtp_1);
|
||||||
modules.push_back(&rtp_2);
|
modules.push_back(&rtp_2);
|
||||||
PayloadRouter payload_router(modules);
|
PayloadRouter payload_router(modules, 42);
|
||||||
|
|
||||||
EXPECT_EQ(kDefaultMaxLength, PayloadRouter::DefaultMaxPayloadLength());
|
EXPECT_EQ(kDefaultMaxLength, PayloadRouter::DefaultMaxPayloadLength());
|
||||||
payload_router.SetSendingRtpModules(modules.size());
|
payload_router.SetSendingRtpModules(modules.size());
|
||||||
@ -170,7 +188,7 @@ TEST(PayloadRouterTest, SetTargetSendBitrates) {
|
|||||||
std::vector<RtpRtcp*> modules;
|
std::vector<RtpRtcp*> modules;
|
||||||
modules.push_back(&rtp_1);
|
modules.push_back(&rtp_1);
|
||||||
modules.push_back(&rtp_2);
|
modules.push_back(&rtp_2);
|
||||||
PayloadRouter payload_router(modules);
|
PayloadRouter payload_router(modules, 42);
|
||||||
payload_router.SetSendingRtpModules(modules.size());
|
payload_router.SetSendingRtpModules(modules.size());
|
||||||
|
|
||||||
const uint32_t bitrate_1 = 10000;
|
const uint32_t bitrate_1 = 10000;
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
|
|
||||||
#include "webrtc/base/checks.h"
|
#include "webrtc/base/checks.h"
|
||||||
#include "webrtc/base/logging.h"
|
#include "webrtc/base/logging.h"
|
||||||
|
#include "webrtc/modules/video_coding/include/video_codec_interface.h"
|
||||||
#include "webrtc/system_wrappers/include/metrics.h"
|
#include "webrtc/system_wrappers/include/metrics.h"
|
||||||
|
|
||||||
namespace webrtc {
|
namespace webrtc {
|
||||||
@ -426,8 +427,17 @@ void SendStatisticsProxy::OnSetRates(uint32_t bitrate_bps, int framerate) {
|
|||||||
|
|
||||||
void SendStatisticsProxy::OnSendEncodedImage(
|
void SendStatisticsProxy::OnSendEncodedImage(
|
||||||
const EncodedImage& encoded_image,
|
const EncodedImage& encoded_image,
|
||||||
const RTPVideoHeader* rtp_video_header) {
|
const CodecSpecificInfo* codec_info) {
|
||||||
size_t simulcast_idx = rtp_video_header ? rtp_video_header->simulcastIdx : 0;
|
size_t simulcast_idx = 0;
|
||||||
|
|
||||||
|
if (codec_info) {
|
||||||
|
if (codec_info->codecType == kVideoCodecVP8) {
|
||||||
|
simulcast_idx = codec_info->codecSpecific.VP8.simulcastIdx;
|
||||||
|
} else if (codec_info->codecType == kVideoCodecGeneric) {
|
||||||
|
simulcast_idx = codec_info->codecSpecific.generic.simulcast_idx;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (simulcast_idx >= config_.rtp.ssrcs.size()) {
|
if (simulcast_idx >= config_.rtp.ssrcs.size()) {
|
||||||
LOG(LS_ERROR) << "Encoded image outside simulcast range (" << simulcast_idx
|
LOG(LS_ERROR) << "Encoded image outside simulcast range (" << simulcast_idx
|
||||||
<< " >= " << config_.rtp.ssrcs.size() << ").";
|
<< " >= " << config_.rtp.ssrcs.size() << ").";
|
||||||
@ -469,17 +479,16 @@ void SendStatisticsProxy::OnSendEncodedImage(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (encoded_image.qp_ != -1 && rtp_video_header) {
|
if (encoded_image.qp_ != -1 && codec_info) {
|
||||||
if (rtp_video_header->codec == kRtpVideoVp8) {
|
if (codec_info->codecType == kVideoCodecVP8) {
|
||||||
int spatial_idx = (config_.rtp.ssrcs.size() == 1)
|
int spatial_idx = (config_.rtp.ssrcs.size() == 1)
|
||||||
? -1
|
? -1
|
||||||
: static_cast<int>(simulcast_idx);
|
: static_cast<int>(simulcast_idx);
|
||||||
uma_container_->qp_counters_[spatial_idx].vp8.Add(encoded_image.qp_);
|
uma_container_->qp_counters_[spatial_idx].vp8.Add(encoded_image.qp_);
|
||||||
} else if (rtp_video_header->codec == kRtpVideoVp9) {
|
} else if (codec_info->codecType == kVideoCodecVP9) {
|
||||||
int spatial_idx =
|
int spatial_idx = (codec_info->codecSpecific.VP9.num_spatial_layers == 1)
|
||||||
(rtp_video_header->codecHeader.VP9.num_spatial_layers == 1)
|
? -1
|
||||||
? -1
|
: codec_info->codecSpecific.VP9.spatial_idx;
|
||||||
: rtp_video_header->codecHeader.VP9.spatial_idx;
|
|
||||||
uma_container_->qp_counters_[spatial_idx].vp9.Add(encoded_image.qp_);
|
uma_container_->qp_counters_[spatial_idx].vp9.Add(encoded_image.qp_);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -49,7 +49,7 @@ class SendStatisticsProxy : public CpuOveruseMetricsObserver,
|
|||||||
VideoSendStream::Stats GetStats();
|
VideoSendStream::Stats GetStats();
|
||||||
|
|
||||||
virtual void OnSendEncodedImage(const EncodedImage& encoded_image,
|
virtual void OnSendEncodedImage(const EncodedImage& encoded_image,
|
||||||
const RTPVideoHeader* rtp_video_header);
|
const CodecSpecificInfo* codec_info);
|
||||||
// Used to update incoming frame rate.
|
// Used to update incoming frame rate.
|
||||||
void OnIncomingFrame(int width, int height);
|
void OnIncomingFrame(int width, int height);
|
||||||
|
|
||||||
|
@ -334,16 +334,16 @@ TEST_F(SendStatisticsProxyTest, SwitchContentTypeUpdatesHistograms) {
|
|||||||
TEST_F(SendStatisticsProxyTest, VerifyQpHistogramStats_Vp8) {
|
TEST_F(SendStatisticsProxyTest, VerifyQpHistogramStats_Vp8) {
|
||||||
test::ClearHistograms();
|
test::ClearHistograms();
|
||||||
EncodedImage encoded_image;
|
EncodedImage encoded_image;
|
||||||
RTPVideoHeader rtp_video_header;
|
CodecSpecificInfo codec_info;
|
||||||
rtp_video_header.codec = kRtpVideoVp8;
|
codec_info.codecType = kVideoCodecVP8;
|
||||||
|
|
||||||
for (int i = 0; i < kMinRequiredSamples; ++i) {
|
for (int i = 0; i < kMinRequiredSamples; ++i) {
|
||||||
rtp_video_header.simulcastIdx = 0;
|
codec_info.codecSpecific.VP8.simulcastIdx = 0;
|
||||||
encoded_image.qp_ = kQpIdx0;
|
encoded_image.qp_ = kQpIdx0;
|
||||||
statistics_proxy_->OnSendEncodedImage(encoded_image, &rtp_video_header);
|
statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
|
||||||
rtp_video_header.simulcastIdx = 1;
|
codec_info.codecSpecific.VP8.simulcastIdx = 1;
|
||||||
encoded_image.qp_ = kQpIdx1;
|
encoded_image.qp_ = kQpIdx1;
|
||||||
statistics_proxy_->OnSendEncodedImage(encoded_image, &rtp_video_header);
|
statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
|
||||||
}
|
}
|
||||||
statistics_proxy_.reset();
|
statistics_proxy_.reset();
|
||||||
EXPECT_EQ(1, test::NumHistogramSamples("WebRTC.Video.Encoded.Qp.Vp8.S0"));
|
EXPECT_EQ(1, test::NumHistogramSamples("WebRTC.Video.Encoded.Qp.Vp8.S0"));
|
||||||
@ -362,13 +362,13 @@ TEST_F(SendStatisticsProxyTest, VerifyQpHistogramStats_Vp8OneSsrc) {
|
|||||||
|
|
||||||
test::ClearHistograms();
|
test::ClearHistograms();
|
||||||
EncodedImage encoded_image;
|
EncodedImage encoded_image;
|
||||||
RTPVideoHeader rtp_video_header;
|
CodecSpecificInfo codec_info;
|
||||||
rtp_video_header.codec = kRtpVideoVp8;
|
codec_info.codecType = kVideoCodecVP8;
|
||||||
|
|
||||||
for (int i = 0; i < kMinRequiredSamples; ++i) {
|
for (int i = 0; i < kMinRequiredSamples; ++i) {
|
||||||
rtp_video_header.simulcastIdx = 0;
|
codec_info.codecSpecific.VP8.simulcastIdx = 0;
|
||||||
encoded_image.qp_ = kQpIdx0;
|
encoded_image.qp_ = kQpIdx0;
|
||||||
statistics_proxy_->OnSendEncodedImage(encoded_image, &rtp_video_header);
|
statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
|
||||||
}
|
}
|
||||||
statistics_proxy_.reset();
|
statistics_proxy_.reset();
|
||||||
EXPECT_EQ(1, test::NumHistogramSamples("WebRTC.Video.Encoded.Qp.Vp8"));
|
EXPECT_EQ(1, test::NumHistogramSamples("WebRTC.Video.Encoded.Qp.Vp8"));
|
||||||
@ -378,18 +378,17 @@ TEST_F(SendStatisticsProxyTest, VerifyQpHistogramStats_Vp8OneSsrc) {
|
|||||||
TEST_F(SendStatisticsProxyTest, VerifyQpHistogramStats_Vp9) {
|
TEST_F(SendStatisticsProxyTest, VerifyQpHistogramStats_Vp9) {
|
||||||
test::ClearHistograms();
|
test::ClearHistograms();
|
||||||
EncodedImage encoded_image;
|
EncodedImage encoded_image;
|
||||||
RTPVideoHeader rtp_video_header;
|
CodecSpecificInfo codec_info;
|
||||||
rtp_video_header.simulcastIdx = 0;
|
codec_info.codecType = kVideoCodecVP9;
|
||||||
rtp_video_header.codec = kRtpVideoVp9;
|
codec_info.codecSpecific.VP9.num_spatial_layers = 2;
|
||||||
rtp_video_header.codecHeader.VP9.num_spatial_layers = 2;
|
|
||||||
|
|
||||||
for (int i = 0; i < kMinRequiredSamples; ++i) {
|
for (int i = 0; i < kMinRequiredSamples; ++i) {
|
||||||
encoded_image.qp_ = kQpIdx0;
|
encoded_image.qp_ = kQpIdx0;
|
||||||
rtp_video_header.codecHeader.VP9.spatial_idx = 0;
|
codec_info.codecSpecific.VP9.spatial_idx = 0;
|
||||||
statistics_proxy_->OnSendEncodedImage(encoded_image, &rtp_video_header);
|
statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
|
||||||
encoded_image.qp_ = kQpIdx1;
|
encoded_image.qp_ = kQpIdx1;
|
||||||
rtp_video_header.codecHeader.VP9.spatial_idx = 1;
|
codec_info.codecSpecific.VP9.spatial_idx = 1;
|
||||||
statistics_proxy_->OnSendEncodedImage(encoded_image, &rtp_video_header);
|
statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
|
||||||
}
|
}
|
||||||
statistics_proxy_.reset();
|
statistics_proxy_.reset();
|
||||||
EXPECT_EQ(1, test::NumHistogramSamples("WebRTC.Video.Encoded.Qp.Vp9.S0"));
|
EXPECT_EQ(1, test::NumHistogramSamples("WebRTC.Video.Encoded.Qp.Vp9.S0"));
|
||||||
@ -408,15 +407,14 @@ TEST_F(SendStatisticsProxyTest, VerifyQpHistogramStats_Vp9OneSpatialLayer) {
|
|||||||
|
|
||||||
test::ClearHistograms();
|
test::ClearHistograms();
|
||||||
EncodedImage encoded_image;
|
EncodedImage encoded_image;
|
||||||
RTPVideoHeader rtp_video_header;
|
CodecSpecificInfo codec_info;
|
||||||
rtp_video_header.simulcastIdx = 0;
|
codec_info.codecType = kVideoCodecVP9;
|
||||||
rtp_video_header.codec = kRtpVideoVp9;
|
codec_info.codecSpecific.VP9.num_spatial_layers = 1;
|
||||||
rtp_video_header.codecHeader.VP9.num_spatial_layers = 1;
|
|
||||||
|
|
||||||
for (int i = 0; i < kMinRequiredSamples; ++i) {
|
for (int i = 0; i < kMinRequiredSamples; ++i) {
|
||||||
encoded_image.qp_ = kQpIdx0;
|
encoded_image.qp_ = kQpIdx0;
|
||||||
rtp_video_header.codecHeader.VP9.spatial_idx = 0;
|
codec_info.codecSpecific.VP9.spatial_idx = 0;
|
||||||
statistics_proxy_->OnSendEncodedImage(encoded_image, &rtp_video_header);
|
statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
|
||||||
}
|
}
|
||||||
statistics_proxy_.reset();
|
statistics_proxy_.reset();
|
||||||
EXPECT_EQ(1, test::NumHistogramSamples("WebRTC.Video.Encoded.Qp.Vp9"));
|
EXPECT_EQ(1, test::NumHistogramSamples("WebRTC.Video.Encoded.Qp.Vp9"));
|
||||||
@ -458,12 +456,13 @@ TEST_F(SendStatisticsProxyTest, EncodedResolutionTimesOut) {
|
|||||||
encoded_image._encodedWidth = kEncodedWidth;
|
encoded_image._encodedWidth = kEncodedWidth;
|
||||||
encoded_image._encodedHeight = kEncodedHeight;
|
encoded_image._encodedHeight = kEncodedHeight;
|
||||||
|
|
||||||
RTPVideoHeader rtp_video_header;
|
CodecSpecificInfo codec_info;
|
||||||
|
codec_info.codecType = kVideoCodecVP8;
|
||||||
|
codec_info.codecSpecific.VP8.simulcastIdx = 0;
|
||||||
|
|
||||||
rtp_video_header.simulcastIdx = 0;
|
statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
|
||||||
statistics_proxy_->OnSendEncodedImage(encoded_image, &rtp_video_header);
|
codec_info.codecSpecific.VP8.simulcastIdx = 1;
|
||||||
rtp_video_header.simulcastIdx = 1;
|
statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
|
||||||
statistics_proxy_->OnSendEncodedImage(encoded_image, &rtp_video_header);
|
|
||||||
|
|
||||||
VideoSendStream::Stats stats = statistics_proxy_->GetStats();
|
VideoSendStream::Stats stats = statistics_proxy_->GetStats();
|
||||||
EXPECT_EQ(kEncodedWidth, stats.substreams[config_.rtp.ssrcs[0]].width);
|
EXPECT_EQ(kEncodedWidth, stats.substreams[config_.rtp.ssrcs[0]].width);
|
||||||
@ -485,8 +484,8 @@ TEST_F(SendStatisticsProxyTest, EncodedResolutionTimesOut) {
|
|||||||
|
|
||||||
// Report stats for second SSRC to make sure it's not outdated along with the
|
// Report stats for second SSRC to make sure it's not outdated along with the
|
||||||
// first SSRC.
|
// first SSRC.
|
||||||
rtp_video_header.simulcastIdx = 1;
|
codec_info.codecSpecific.VP8.simulcastIdx = 1;
|
||||||
statistics_proxy_->OnSendEncodedImage(encoded_image, &rtp_video_header);
|
statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
|
||||||
|
|
||||||
// Forward 1 ms, reach timeout, substream 0 should have no resolution
|
// Forward 1 ms, reach timeout, substream 0 should have no resolution
|
||||||
// reported, but substream 1 should.
|
// reported, but substream 1 should.
|
||||||
@ -505,12 +504,13 @@ TEST_F(SendStatisticsProxyTest, ClearsResolutionFromInactiveSsrcs) {
|
|||||||
encoded_image._encodedWidth = kEncodedWidth;
|
encoded_image._encodedWidth = kEncodedWidth;
|
||||||
encoded_image._encodedHeight = kEncodedHeight;
|
encoded_image._encodedHeight = kEncodedHeight;
|
||||||
|
|
||||||
RTPVideoHeader rtp_video_header;
|
CodecSpecificInfo codec_info;
|
||||||
|
codec_info.codecType = kVideoCodecVP8;
|
||||||
|
codec_info.codecSpecific.VP8.simulcastIdx = 0;
|
||||||
|
|
||||||
rtp_video_header.simulcastIdx = 0;
|
statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
|
||||||
statistics_proxy_->OnSendEncodedImage(encoded_image, &rtp_video_header);
|
codec_info.codecSpecific.VP8.simulcastIdx = 1;
|
||||||
rtp_video_header.simulcastIdx = 1;
|
statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
|
||||||
statistics_proxy_->OnSendEncodedImage(encoded_image, &rtp_video_header);
|
|
||||||
|
|
||||||
statistics_proxy_->OnInactiveSsrc(config_.rtp.ssrcs[1]);
|
statistics_proxy_->OnInactiveSsrc(config_.rtp.ssrcs[1]);
|
||||||
VideoSendStream::Stats stats = statistics_proxy_->GetStats();
|
VideoSendStream::Stats stats = statistics_proxy_->GetStats();
|
||||||
|
@ -405,25 +405,10 @@ int32_t VideoReceiveStream::Encoded(
|
|||||||
if (kEnableFrameRecording) {
|
if (kEnableFrameRecording) {
|
||||||
if (!ivf_writer_.get()) {
|
if (!ivf_writer_.get()) {
|
||||||
RTC_DCHECK(codec_specific_info);
|
RTC_DCHECK(codec_specific_info);
|
||||||
RtpVideoCodecTypes rtp_codec_type;
|
|
||||||
switch (codec_specific_info->codecType) {
|
|
||||||
case kVideoCodecVP8:
|
|
||||||
rtp_codec_type = kRtpVideoVp8;
|
|
||||||
break;
|
|
||||||
case kVideoCodecVP9:
|
|
||||||
rtp_codec_type = kRtpVideoVp9;
|
|
||||||
break;
|
|
||||||
case kVideoCodecH264:
|
|
||||||
rtp_codec_type = kRtpVideoH264;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
rtp_codec_type = kRtpVideoNone;
|
|
||||||
RTC_NOTREACHED() << "Unsupported codec "
|
|
||||||
<< codec_specific_info->codecType;
|
|
||||||
}
|
|
||||||
std::ostringstream oss;
|
std::ostringstream oss;
|
||||||
oss << "receive_bitstream_ssrc_" << config_.rtp.remote_ssrc << ".ivf";
|
oss << "receive_bitstream_ssrc_" << config_.rtp.remote_ssrc << ".ivf";
|
||||||
ivf_writer_ = IvfFileWriter::Open(oss.str(), rtp_codec_type);
|
ivf_writer_ =
|
||||||
|
IvfFileWriter::Open(oss.str(), codec_specific_info->codecType);
|
||||||
}
|
}
|
||||||
if (ivf_writer_.get()) {
|
if (ivf_writer_.get()) {
|
||||||
bool ok = ivf_writer_->WriteFrame(encoded_image);
|
bool ok = ivf_writer_->WriteFrame(encoded_image);
|
||||||
|
@ -229,14 +229,16 @@ VideoSendStream::VideoSendStream(
|
|||||||
this,
|
this,
|
||||||
config.post_encode_callback,
|
config.post_encode_callback,
|
||||||
&stats_proxy_),
|
&stats_proxy_),
|
||||||
vie_encoder_(num_cpu_cores,
|
vie_encoder_(
|
||||||
config_.rtp.ssrcs,
|
num_cpu_cores,
|
||||||
module_process_thread_,
|
config_.rtp.ssrcs,
|
||||||
&stats_proxy_,
|
module_process_thread_,
|
||||||
config.pre_encode_callback,
|
&stats_proxy_,
|
||||||
&overuse_detector_,
|
config.pre_encode_callback,
|
||||||
congestion_controller_->pacer(),
|
&overuse_detector_,
|
||||||
&payload_router_),
|
congestion_controller_->pacer(),
|
||||||
|
&payload_router_,
|
||||||
|
config.post_encode_callback ? &encoded_frame_proxy_ : nullptr),
|
||||||
vcm_(vie_encoder_.vcm()),
|
vcm_(vie_encoder_.vcm()),
|
||||||
bandwidth_observer_(congestion_controller_->GetBitrateController()
|
bandwidth_observer_(congestion_controller_->GetBitrateController()
|
||||||
->CreateRtcpBandwidthObserver()),
|
->CreateRtcpBandwidthObserver()),
|
||||||
@ -250,7 +252,7 @@ VideoSendStream::VideoSendStream(
|
|||||||
congestion_controller_->packet_router(),
|
congestion_controller_->packet_router(),
|
||||||
&stats_proxy_,
|
&stats_proxy_,
|
||||||
config_.rtp.ssrcs.size())),
|
config_.rtp.ssrcs.size())),
|
||||||
payload_router_(rtp_rtcp_modules_),
|
payload_router_(rtp_rtcp_modules_, config.encoder_settings.payload_type),
|
||||||
input_(&encoder_wakeup_event_,
|
input_(&encoder_wakeup_event_,
|
||||||
config_.local_renderer,
|
config_.local_renderer,
|
||||||
&stats_proxy_,
|
&stats_proxy_,
|
||||||
@ -319,9 +321,6 @@ VideoSendStream::VideoSendStream(
|
|||||||
|
|
||||||
ReconfigureVideoEncoder(encoder_config);
|
ReconfigureVideoEncoder(encoder_config);
|
||||||
|
|
||||||
if (config_.post_encode_callback)
|
|
||||||
vie_encoder_.RegisterPostEncodeImageCallback(&encoded_frame_proxy_);
|
|
||||||
|
|
||||||
if (config_.suspend_below_min_bitrate) {
|
if (config_.suspend_below_min_bitrate) {
|
||||||
vcm_->SuspendBelowMinBitrate();
|
vcm_->SuspendBelowMinBitrate();
|
||||||
bitrate_allocator_->EnforceMinBitrate(false);
|
bitrate_allocator_->EnforceMinBitrate(false);
|
||||||
|
@ -1807,7 +1807,6 @@ TEST_F(VideoSendStreamTest, ReportsSentResolution) {
|
|||||||
const CodecSpecificInfo* codecSpecificInfo,
|
const CodecSpecificInfo* codecSpecificInfo,
|
||||||
const std::vector<FrameType>* frame_types) override {
|
const std::vector<FrameType>* frame_types) override {
|
||||||
CodecSpecificInfo specifics;
|
CodecSpecificInfo specifics;
|
||||||
memset(&specifics, 0, sizeof(specifics));
|
|
||||||
specifics.codecType = kVideoCodecGeneric;
|
specifics.codecType = kVideoCodecGeneric;
|
||||||
|
|
||||||
uint8_t buffer[16] = {0};
|
uint8_t buffer[16] = {0};
|
||||||
|
@ -85,7 +85,8 @@ ViEEncoder::ViEEncoder(uint32_t number_of_cores,
|
|||||||
rtc::VideoSinkInterface<VideoFrame>* pre_encode_callback,
|
rtc::VideoSinkInterface<VideoFrame>* pre_encode_callback,
|
||||||
OveruseFrameDetector* overuse_detector,
|
OveruseFrameDetector* overuse_detector,
|
||||||
PacedSender* pacer,
|
PacedSender* pacer,
|
||||||
PayloadRouter* payload_router)
|
PayloadRouter* payload_router,
|
||||||
|
EncodedImageCallback* post_encode_callback)
|
||||||
: number_of_cores_(number_of_cores),
|
: number_of_cores_(number_of_cores),
|
||||||
ssrcs_(ssrcs),
|
ssrcs_(ssrcs),
|
||||||
vp_(VideoProcessing::Create()),
|
vp_(VideoProcessing::Create()),
|
||||||
@ -98,6 +99,7 @@ ViEEncoder::ViEEncoder(uint32_t number_of_cores,
|
|||||||
overuse_detector_(overuse_detector),
|
overuse_detector_(overuse_detector),
|
||||||
pacer_(pacer),
|
pacer_(pacer),
|
||||||
send_payload_router_(payload_router),
|
send_payload_router_(payload_router),
|
||||||
|
post_encode_callback_(post_encode_callback),
|
||||||
time_of_last_frame_activity_ms_(0),
|
time_of_last_frame_activity_ms_(0),
|
||||||
encoder_config_(),
|
encoder_config_(),
|
||||||
min_transmit_bitrate_bps_(0),
|
min_transmit_bitrate_bps_(0),
|
||||||
@ -121,6 +123,10 @@ bool ViEEncoder::Init() {
|
|||||||
// Enable/disable content analysis: off by default for now.
|
// Enable/disable content analysis: off by default for now.
|
||||||
vp_->EnableContentAnalysis(false);
|
vp_->EnableContentAnalysis(false);
|
||||||
|
|
||||||
|
vcm_->RegisterPostEncodeImageCallback(this);
|
||||||
|
|
||||||
|
// TODO(perkj): Remove |RegisterTransportCallback| as soon as we don't use
|
||||||
|
// VCMPacketizationCallback::OnEncoderImplementationName.
|
||||||
if (vcm_->RegisterTransportCallback(this) != 0) {
|
if (vcm_->RegisterTransportCallback(this) != 0) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -403,10 +409,14 @@ void ViEEncoder::OnSetRates(uint32_t bitrate_bps, int framerate) {
|
|||||||
stats_proxy_->OnSetRates(bitrate_bps, framerate);
|
stats_proxy_->OnSetRates(bitrate_bps, framerate);
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t ViEEncoder::SendData(const uint8_t payload_type,
|
void ViEEncoder::OnEncoderImplementationName(const char* implementation_name) {
|
||||||
const EncodedImage& encoded_image,
|
if (stats_proxy_)
|
||||||
const RTPFragmentationHeader* fragmentation_header,
|
stats_proxy_->OnEncoderImplementationName(implementation_name);
|
||||||
const RTPVideoHeader* rtp_video_hdr) {
|
}
|
||||||
|
|
||||||
|
int32_t ViEEncoder::Encoded(const EncodedImage& encoded_image,
|
||||||
|
const CodecSpecificInfo* codec_specific_info,
|
||||||
|
const RTPFragmentationHeader* fragmentation) {
|
||||||
RTC_DCHECK(send_payload_router_);
|
RTC_DCHECK(send_payload_router_);
|
||||||
|
|
||||||
{
|
{
|
||||||
@ -414,17 +424,22 @@ int32_t ViEEncoder::SendData(const uint8_t payload_type,
|
|||||||
time_of_last_frame_activity_ms_ = TickTime::MillisecondTimestamp();
|
time_of_last_frame_activity_ms_ = TickTime::MillisecondTimestamp();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (stats_proxy_)
|
if (post_encode_callback_) {
|
||||||
stats_proxy_->OnSendEncodedImage(encoded_image, rtp_video_hdr);
|
post_encode_callback_->Encoded(encoded_image, codec_specific_info,
|
||||||
|
fragmentation);
|
||||||
|
}
|
||||||
|
|
||||||
bool success = send_payload_router_->RoutePayload(
|
if (stats_proxy_) {
|
||||||
encoded_image._frameType, payload_type, encoded_image._timeStamp,
|
stats_proxy_->OnSendEncodedImage(encoded_image, codec_specific_info);
|
||||||
encoded_image.capture_time_ms_, encoded_image._buffer,
|
}
|
||||||
encoded_image._length, fragmentation_header, rtp_video_hdr);
|
int success = send_payload_router_->Encoded(
|
||||||
|
encoded_image, codec_specific_info, fragmentation);
|
||||||
overuse_detector_->FrameSent(encoded_image._timeStamp);
|
overuse_detector_->FrameSent(encoded_image._timeStamp);
|
||||||
|
|
||||||
if (kEnableFrameRecording) {
|
if (kEnableFrameRecording) {
|
||||||
int layer = rtp_video_hdr->simulcastIdx;
|
int layer = codec_specific_info->codecType == kVideoCodecVP8
|
||||||
|
? codec_specific_info->codecSpecific.VP8.simulcastIdx
|
||||||
|
: 0;
|
||||||
IvfFileWriter* file_writer;
|
IvfFileWriter* file_writer;
|
||||||
{
|
{
|
||||||
rtc::CritScope lock(&data_cs_);
|
rtc::CritScope lock(&data_cs_);
|
||||||
@ -435,7 +450,7 @@ int32_t ViEEncoder::SendData(const uint8_t payload_type,
|
|||||||
oss << "_" << ssrc;
|
oss << "_" << ssrc;
|
||||||
oss << "_layer" << layer << ".ivf";
|
oss << "_layer" << layer << ".ivf";
|
||||||
file_writers_[layer] =
|
file_writers_[layer] =
|
||||||
IvfFileWriter::Open(oss.str(), rtp_video_hdr->codec);
|
IvfFileWriter::Open(oss.str(), codec_specific_info->codecType);
|
||||||
}
|
}
|
||||||
file_writer = file_writers_[layer].get();
|
file_writer = file_writers_[layer].get();
|
||||||
}
|
}
|
||||||
@ -445,13 +460,7 @@ int32_t ViEEncoder::SendData(const uint8_t payload_type,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return success ? 0 : -1;
|
return success;
|
||||||
}
|
|
||||||
|
|
||||||
void ViEEncoder::OnEncoderImplementationName(
|
|
||||||
const char* implementation_name) {
|
|
||||||
if (stats_proxy_)
|
|
||||||
stats_proxy_->OnEncoderImplementationName(implementation_name);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int32_t ViEEncoder::SendStatistics(const uint32_t bit_rate,
|
int32_t ViEEncoder::SendStatistics(const uint32_t bit_rate,
|
||||||
@ -531,11 +540,6 @@ void ViEEncoder::OnBitrateUpdated(uint32_t bitrate_bps,
|
|||||||
stats_proxy_->OnSuspendChange(video_is_suspended);
|
stats_proxy_->OnSuspendChange(video_is_suspended);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ViEEncoder::RegisterPostEncodeImageCallback(
|
|
||||||
EncodedImageCallback* post_encode_callback) {
|
|
||||||
vcm_->RegisterPostEncodeImageCallback(post_encode_callback);
|
|
||||||
}
|
|
||||||
|
|
||||||
QMVideoSettingsCallback::QMVideoSettingsCallback(VideoProcessing* vpm)
|
QMVideoSettingsCallback::QMVideoSettingsCallback(VideoProcessing* vpm)
|
||||||
: vp_(vpm) {
|
: vp_(vpm) {
|
||||||
}
|
}
|
||||||
|
@ -18,6 +18,7 @@
|
|||||||
#include "webrtc/base/scoped_ref_ptr.h"
|
#include "webrtc/base/scoped_ref_ptr.h"
|
||||||
#include "webrtc/base/thread_annotations.h"
|
#include "webrtc/base/thread_annotations.h"
|
||||||
#include "webrtc/common_types.h"
|
#include "webrtc/common_types.h"
|
||||||
|
#include "webrtc/video_encoder.h"
|
||||||
#include "webrtc/media/base/videosinkinterface.h"
|
#include "webrtc/media/base/videosinkinterface.h"
|
||||||
#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
|
#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
|
||||||
#include "webrtc/modules/video_coding/include/video_coding_defines.h"
|
#include "webrtc/modules/video_coding/include/video_coding_defines.h"
|
||||||
@ -41,6 +42,7 @@ class VideoCodingModule;
|
|||||||
class VideoEncoder;
|
class VideoEncoder;
|
||||||
|
|
||||||
class ViEEncoder : public VideoEncoderRateObserver,
|
class ViEEncoder : public VideoEncoderRateObserver,
|
||||||
|
public EncodedImageCallback,
|
||||||
public VCMPacketizationCallback,
|
public VCMPacketizationCallback,
|
||||||
public VCMSendStatisticsCallback {
|
public VCMSendStatisticsCallback {
|
||||||
public:
|
public:
|
||||||
@ -54,7 +56,8 @@ class ViEEncoder : public VideoEncoderRateObserver,
|
|||||||
rtc::VideoSinkInterface<VideoFrame>* pre_encode_callback,
|
rtc::VideoSinkInterface<VideoFrame>* pre_encode_callback,
|
||||||
OveruseFrameDetector* overuse_detector,
|
OveruseFrameDetector* overuse_detector,
|
||||||
PacedSender* pacer,
|
PacedSender* pacer,
|
||||||
PayloadRouter* payload_router);
|
PayloadRouter* payload_router,
|
||||||
|
EncodedImageCallback* post_encode_callback);
|
||||||
~ViEEncoder();
|
~ViEEncoder();
|
||||||
|
|
||||||
bool Init();
|
bool Init();
|
||||||
@ -92,12 +95,13 @@ class ViEEncoder : public VideoEncoderRateObserver,
|
|||||||
void OnSetRates(uint32_t bitrate_bps, int framerate) override;
|
void OnSetRates(uint32_t bitrate_bps, int framerate) override;
|
||||||
|
|
||||||
// Implements VCMPacketizationCallback.
|
// Implements VCMPacketizationCallback.
|
||||||
int32_t SendData(uint8_t payload_type,
|
|
||||||
const EncodedImage& encoded_image,
|
|
||||||
const RTPFragmentationHeader* fragmentation_header,
|
|
||||||
const RTPVideoHeader* rtp_video_hdr) override;
|
|
||||||
void OnEncoderImplementationName(const char* implementation_name) override;
|
void OnEncoderImplementationName(const char* implementation_name) override;
|
||||||
|
|
||||||
|
// Implements EncodedImageCallback.
|
||||||
|
int32_t Encoded(const EncodedImage& encoded_image,
|
||||||
|
const CodecSpecificInfo* codec_specific_info,
|
||||||
|
const RTPFragmentationHeader* fragmentation) override;
|
||||||
|
|
||||||
// Implements VideoSendStatisticsCallback.
|
// Implements VideoSendStatisticsCallback.
|
||||||
int32_t SendStatistics(const uint32_t bit_rate,
|
int32_t SendStatistics(const uint32_t bit_rate,
|
||||||
const uint32_t frame_rate) override;
|
const uint32_t frame_rate) override;
|
||||||
@ -107,10 +111,6 @@ class ViEEncoder : public VideoEncoderRateObserver,
|
|||||||
virtual void OnReceivedSLI(uint32_t ssrc, uint8_t picture_id);
|
virtual void OnReceivedSLI(uint32_t ssrc, uint8_t picture_id);
|
||||||
virtual void OnReceivedRPSI(uint32_t ssrc, uint64_t picture_id);
|
virtual void OnReceivedRPSI(uint32_t ssrc, uint64_t picture_id);
|
||||||
|
|
||||||
// New-style callbacks, used by VideoSendStream.
|
|
||||||
void RegisterPostEncodeImageCallback(
|
|
||||||
EncodedImageCallback* post_encode_callback);
|
|
||||||
|
|
||||||
int GetPaddingNeededBps() const;
|
int GetPaddingNeededBps() const;
|
||||||
|
|
||||||
void OnBitrateUpdated(uint32_t bitrate_bps,
|
void OnBitrateUpdated(uint32_t bitrate_bps,
|
||||||
@ -139,6 +139,7 @@ class ViEEncoder : public VideoEncoderRateObserver,
|
|||||||
OveruseFrameDetector* const overuse_detector_;
|
OveruseFrameDetector* const overuse_detector_;
|
||||||
PacedSender* const pacer_;
|
PacedSender* const pacer_;
|
||||||
PayloadRouter* const send_payload_router_;
|
PayloadRouter* const send_payload_router_;
|
||||||
|
EncodedImageCallback* const post_encode_callback_;
|
||||||
|
|
||||||
// The time we last received an input frame or encoded frame. This is used to
|
// The time we last received an input frame or encoded frame. This is used to
|
||||||
// track when video is stopped long enough that we also want to stop sending
|
// track when video is stopped long enough that we also want to stop sending
|
||||||
|
@ -31,6 +31,7 @@ class EncodedImageCallback {
|
|||||||
virtual ~EncodedImageCallback() {}
|
virtual ~EncodedImageCallback() {}
|
||||||
|
|
||||||
// Callback function which is called when an image has been encoded.
|
// Callback function which is called when an image has been encoded.
|
||||||
|
// TODO(perkj): Change this to return void.
|
||||||
virtual int32_t Encoded(const EncodedImage& encoded_image,
|
virtual int32_t Encoded(const EncodedImage& encoded_image,
|
||||||
const CodecSpecificInfo* codec_specific_info,
|
const CodecSpecificInfo* codec_specific_info,
|
||||||
const RTPFragmentationHeader* fragmentation) = 0;
|
const RTPFragmentationHeader* fragmentation) = 0;
|
||||||
|
Reference in New Issue
Block a user