Reland of Deprecate VCMPacketizationCallback::SendData and use EncodedImageCallback instead. (patchset #1 id:1 of https://codereview.webrtc.org/1903193002/ )

Reason for revert:
A fix is being prepared downstream so this can now go in.

Original issue's description:
> Revert of Deprecate VCMPacketizationCallback::SendData and use EncodedImageCallback instead. (patchset #5 id:80001 of https://codereview.webrtc.org/1897233002/ )
>
> Reason for revert:
> API changes broke downstream.
>
> Original issue's description:
> > Deprecate VCMPacketizationCallback::SendData and use EncodedImageCallback instead.
> > EncodedImageCallback is used by all encoder implementations and seems to be what we should try to use in the transport.
> > EncodedImageCallback can of course be cleaned up in the future.
> >
> > This moves creation of RTPVideoHeader from the GenericEncoder to the PayLoadRouter.
> >
> > BUG=webrtc::5687
> >
> > Committed: https://crrev.com/f5d55aaecdc39e9cc66eb6e87614f04afe28f6eb
> > Cr-Commit-Position: refs/heads/master@{#12436}
>
> TBR=stefan@webrtc.org,pbos@webrtc.org,perkj@webrtc.org
> # Skipping CQ checks because original CL landed less than 1 days ago.
> NOPRESUBMIT=true
> NOTREECHECKS=true
> NOTRY=true
> BUG=webrtc:5687
>
> Committed: https://crrev.com/a261e6136655af33f283eda8e60a6dd93dd746a4
> Cr-Commit-Position: refs/heads/master@{#12441}

TBR=stefan@webrtc.org,pbos@webrtc.org,perkj@webrtc.org
# Skipping CQ checks because original CL landed less than 1 days ago.
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=webrtc:5687

Review URL: https://codereview.webrtc.org/1905583002

Cr-Commit-Position: refs/heads/master@{#12442}
This commit is contained in:
kjellander
2016-04-20 05:05:54 -07:00
committed by Commit bot
parent a261e61366
commit 02b3d275a0
23 changed files with 333 additions and 333 deletions

View File

@ -238,7 +238,6 @@ bool VCMCodecDataBase::SetSendCodec(const VideoCodec* send_codec,
memcpy(&send_codec_, &new_send_codec, sizeof(send_codec_));
if (!reset_required) {
encoded_frame_callback_->SetPayloadType(send_codec_.plType);
return true;
}
@ -249,7 +248,6 @@ bool VCMCodecDataBase::SetSendCodec(const VideoCodec* send_codec,
ptr_encoder_.reset(
new VCMGenericEncoder(external_encoder_, encoder_rate_observer_,
encoded_frame_callback_, internal_source_));
encoded_frame_callback_->SetPayloadType(send_codec_.plType);
encoded_frame_callback_->SetInternalSource(internal_source_);
if (ptr_encoder_->InitEncode(&send_codec_, number_of_cores_,
max_payload_size_) < 0) {

View File

@ -21,76 +21,6 @@
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
namespace webrtc {
namespace {
// Map information from info into rtp. If no relevant information is found
// in info, rtp is set to NULL.
void CopyCodecSpecific(const CodecSpecificInfo* info, RTPVideoHeader* rtp) {
RTC_DCHECK(info);
switch (info->codecType) {
case kVideoCodecVP8: {
rtp->codec = kRtpVideoVp8;
rtp->codecHeader.VP8.InitRTPVideoHeaderVP8();
rtp->codecHeader.VP8.pictureId = info->codecSpecific.VP8.pictureId;
rtp->codecHeader.VP8.nonReference = info->codecSpecific.VP8.nonReference;
rtp->codecHeader.VP8.temporalIdx = info->codecSpecific.VP8.temporalIdx;
rtp->codecHeader.VP8.layerSync = info->codecSpecific.VP8.layerSync;
rtp->codecHeader.VP8.tl0PicIdx = info->codecSpecific.VP8.tl0PicIdx;
rtp->codecHeader.VP8.keyIdx = info->codecSpecific.VP8.keyIdx;
rtp->simulcastIdx = info->codecSpecific.VP8.simulcastIdx;
return;
}
case kVideoCodecVP9: {
rtp->codec = kRtpVideoVp9;
rtp->codecHeader.VP9.InitRTPVideoHeaderVP9();
rtp->codecHeader.VP9.inter_pic_predicted =
info->codecSpecific.VP9.inter_pic_predicted;
rtp->codecHeader.VP9.flexible_mode =
info->codecSpecific.VP9.flexible_mode;
rtp->codecHeader.VP9.ss_data_available =
info->codecSpecific.VP9.ss_data_available;
rtp->codecHeader.VP9.picture_id = info->codecSpecific.VP9.picture_id;
rtp->codecHeader.VP9.tl0_pic_idx = info->codecSpecific.VP9.tl0_pic_idx;
rtp->codecHeader.VP9.temporal_idx = info->codecSpecific.VP9.temporal_idx;
rtp->codecHeader.VP9.spatial_idx = info->codecSpecific.VP9.spatial_idx;
rtp->codecHeader.VP9.temporal_up_switch =
info->codecSpecific.VP9.temporal_up_switch;
rtp->codecHeader.VP9.inter_layer_predicted =
info->codecSpecific.VP9.inter_layer_predicted;
rtp->codecHeader.VP9.gof_idx = info->codecSpecific.VP9.gof_idx;
rtp->codecHeader.VP9.num_spatial_layers =
info->codecSpecific.VP9.num_spatial_layers;
if (info->codecSpecific.VP9.ss_data_available) {
rtp->codecHeader.VP9.spatial_layer_resolution_present =
info->codecSpecific.VP9.spatial_layer_resolution_present;
if (info->codecSpecific.VP9.spatial_layer_resolution_present) {
for (size_t i = 0; i < info->codecSpecific.VP9.num_spatial_layers;
++i) {
rtp->codecHeader.VP9.width[i] = info->codecSpecific.VP9.width[i];
rtp->codecHeader.VP9.height[i] = info->codecSpecific.VP9.height[i];
}
}
rtp->codecHeader.VP9.gof.CopyGofInfoVP9(info->codecSpecific.VP9.gof);
}
rtp->codecHeader.VP9.num_ref_pics = info->codecSpecific.VP9.num_ref_pics;
for (int i = 0; i < info->codecSpecific.VP9.num_ref_pics; ++i)
rtp->codecHeader.VP9.pid_diff[i] = info->codecSpecific.VP9.p_diff[i];
return;
}
case kVideoCodecH264:
rtp->codec = kRtpVideoH264;
return;
case kVideoCodecGeneric:
rtp->codec = kRtpVideoGeneric;
rtp->simulcastIdx = info->codecSpecific.generic.simulcast_idx;
return;
default:
return;
}
}
} // namespace
VCMGenericEncoder::VCMGenericEncoder(
VideoEncoder* encoder,
VideoEncoderRateObserver* rate_observer,
@ -216,7 +146,6 @@ VCMEncodedFrameCallback::VCMEncodedFrameCallback(
EncodedImageCallback* post_encode_callback)
: send_callback_(),
media_opt_(nullptr),
payload_type_(0),
internal_source_(false),
post_encode_callback_(post_encode_callback) {}
@ -234,19 +163,8 @@ int32_t VCMEncodedFrameCallback::Encoded(
const RTPFragmentationHeader* fragmentation_header) {
TRACE_EVENT_INSTANT1("webrtc", "VCMEncodedFrameCallback::Encoded",
"timestamp", encoded_image._timeStamp);
post_encode_callback_->Encoded(encoded_image, nullptr, nullptr);
if (send_callback_ == nullptr)
return VCM_UNINITIALIZED;
RTPVideoHeader rtp_video_header;
memset(&rtp_video_header, 0, sizeof(RTPVideoHeader));
if (codec_specific)
CopyCodecSpecific(codec_specific, &rtp_video_header);
rtp_video_header.rotation = encoded_image.rotation_;
int32_t ret_val = send_callback_->SendData(
payload_type_, encoded_image, fragmentation_header, &rtp_video_header);
int ret_val = post_encode_callback_->Encoded(encoded_image, codec_specific,
fragmentation_header);
if (ret_val < 0)
return ret_val;

View File

@ -44,7 +44,6 @@ class VCMEncodedFrameCallback : public EncodedImageCallback {
const RTPFragmentationHeader* fragmentation_header) override;
int32_t SetTransportCallback(VCMPacketizationCallback* transport);
void SetMediaOpt(media_optimization::MediaOptimization* media_opt);
void SetPayloadType(uint8_t payload_type) { payload_type_ = payload_type; }
void SetInternalSource(bool internal_source) {
internal_source_ = internal_source;
}
@ -54,7 +53,6 @@ class VCMEncodedFrameCallback : public EncodedImageCallback {
private:
VCMPacketizationCallback* send_callback_;
media_optimization::MediaOptimization* media_opt_;
uint8_t payload_type_;
bool internal_source_;
EncodedImageCallback* post_encode_callback_;

View File

@ -57,13 +57,11 @@ struct VCMFrameCount {
};
// Callback class used for sending data ready to be packetized
// Deprecated.
// TODO(perkj): Remove once OnEncoderImplementationName is not used.
class VCMPacketizationCallback {
public:
virtual int32_t SendData(uint8_t payloadType,
const EncodedImage& encoded_image,
const RTPFragmentationHeader* fragmentationHeader,
const RTPVideoHeader* rtpVideoHdr) = 0;
// TODO(perkj): Refactor this. It does not belong in VCMPacketizationCallback.
virtual void OnEncoderImplementationName(const char* implementation_name) {}
protected:

View File

@ -18,7 +18,7 @@ namespace webrtc {
IvfFileWriter::IvfFileWriter(const std::string& file_name,
std::unique_ptr<FileWrapper> file,
RtpVideoCodecTypes codec_type)
VideoCodecType codec_type)
: codec_type_(codec_type),
num_frames_(0),
width_(0),
@ -34,9 +34,8 @@ IvfFileWriter::~IvfFileWriter() {
const size_t kIvfHeaderSize = 32;
std::unique_ptr<IvfFileWriter> IvfFileWriter::Open(
const std::string& file_name,
RtpVideoCodecTypes codec_type) {
std::unique_ptr<IvfFileWriter> IvfFileWriter::Open(const std::string& file_name,
VideoCodecType codec_type) {
std::unique_ptr<IvfFileWriter> file_writer;
std::unique_ptr<FileWrapper> file(FileWrapper::Create());
if (file->OpenFile(file_name.c_str(), false) != 0)
@ -65,19 +64,19 @@ bool IvfFileWriter::WriteHeader() {
ByteWriter<uint16_t>::WriteLittleEndian(&ivf_header[6], 32); // Header size.
switch (codec_type_) {
case kRtpVideoVp8:
case kVideoCodecVP8:
ivf_header[8] = 'V';
ivf_header[9] = 'P';
ivf_header[10] = '8';
ivf_header[11] = '0';
break;
case kRtpVideoVp9:
case kVideoCodecVP9:
ivf_header[8] = 'V';
ivf_header[9] = 'P';
ivf_header[10] = '9';
ivf_header[11] = '0';
break;
case kRtpVideoH264:
case kVideoCodecH264:
ivf_header[8] = 'H';
ivf_header[9] = '2';
ivf_header[10] = '6';

View File

@ -27,18 +27,18 @@ class IvfFileWriter {
~IvfFileWriter();
static std::unique_ptr<IvfFileWriter> Open(const std::string& file_name,
RtpVideoCodecTypes codec_type);
VideoCodecType codec_type);
bool WriteFrame(const EncodedImage& encoded_image);
bool Close();
private:
IvfFileWriter(const std::string& path_name,
std::unique_ptr<FileWrapper> file,
RtpVideoCodecTypes codec_type);
VideoCodecType codec_type);
bool WriteHeader();
bool InitFromFirstFrame(const EncodedImage& encoded_image);
const RtpVideoCodecTypes codec_type_;
const VideoCodecType codec_type_;
size_t num_frames_;
uint16_t width_;
uint16_t height_;

View File

@ -103,7 +103,7 @@ class IvfFileWriterTest : public ::testing::Test {
}
}
void RunBasicFileStructureTest(RtpVideoCodecTypes codec_type,
void RunBasicFileStructureTest(VideoCodecType codec_type,
const uint8_t fourcc[4],
bool use_capture_tims_ms) {
file_writer_ = IvfFileWriter::Open(file_name_, codec_type);
@ -135,7 +135,7 @@ class IvfFileWriterTest : public ::testing::Test {
};
TEST_F(IvfFileWriterTest, RemovesUnusedFile) {
file_writer_ = IvfFileWriter::Open(file_name_, kRtpVideoVp8);
file_writer_ = IvfFileWriter::Open(file_name_, kVideoCodecVP8);
ASSERT_TRUE(file_writer_.get() != nullptr);
EXPECT_TRUE(FileExists());
EXPECT_TRUE(file_writer_->Close());
@ -145,32 +145,32 @@ TEST_F(IvfFileWriterTest, RemovesUnusedFile) {
TEST_F(IvfFileWriterTest, WritesBasicVP8FileNtpTimestamp) {
const uint8_t fourcc[4] = {'V', 'P', '8', '0'};
RunBasicFileStructureTest(kRtpVideoVp8, fourcc, false);
RunBasicFileStructureTest(kVideoCodecVP8, fourcc, false);
}
TEST_F(IvfFileWriterTest, WritesBasicVP8FileMsTimestamp) {
const uint8_t fourcc[4] = {'V', 'P', '8', '0'};
RunBasicFileStructureTest(kRtpVideoVp8, fourcc, true);
RunBasicFileStructureTest(kVideoCodecVP8, fourcc, true);
}
TEST_F(IvfFileWriterTest, WritesBasicVP9FileNtpTimestamp) {
const uint8_t fourcc[4] = {'V', 'P', '9', '0'};
RunBasicFileStructureTest(kRtpVideoVp9, fourcc, false);
RunBasicFileStructureTest(kVideoCodecVP9, fourcc, false);
}
TEST_F(IvfFileWriterTest, WritesBasicVP9FileMsTimestamp) {
const uint8_t fourcc[4] = {'V', 'P', '9', '0'};
RunBasicFileStructureTest(kRtpVideoVp9, fourcc, true);
RunBasicFileStructureTest(kVideoCodecVP9, fourcc, true);
}
TEST_F(IvfFileWriterTest, WritesBasicH264FileNtpTimestamp) {
const uint8_t fourcc[4] = {'H', '2', '6', '4'};
RunBasicFileStructureTest(kRtpVideoH264, fourcc, false);
RunBasicFileStructureTest(kVideoCodecH264, fourcc, false);
}
TEST_F(IvfFileWriterTest, WritesBasicH264FileMsTimestamp) {
const uint8_t fourcc[4] = {'H', '2', '6', '4'};
RunBasicFileStructureTest(kRtpVideoH264, fourcc, true);
RunBasicFileStructureTest(kVideoCodecH264, fourcc, true);
}
} // namespace webrtc

View File

@ -54,7 +54,6 @@ class EncodedImageCallbackWrapper : public EncodedImageCallback {
callback_ = callback;
}
// TODO(andresp): Change to void as return value is ignored.
virtual int32_t Encoded(const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) {

View File

@ -79,6 +79,8 @@ class VideoSender {
uint8_t lossRate,
int64_t rtt);
// Deprecated. Use |post_encode_callback| instead.
// TODO(perkj): Remove once |OnEncoderImplementationName| is not used.
int32_t RegisterTransportCallback(VCMPacketizationCallback* transport);
int32_t RegisterSendStatisticsCallback(VCMSendStatisticsCallback* sendStats);
int32_t RegisterProtectionCallback(VCMProtectionCallback* protection);

View File

@ -86,19 +86,19 @@ class EmptyFrameGenerator : public FrameGenerator {
std::unique_ptr<VideoFrame> frame_;
};
class PacketizationCallback : public VCMPacketizationCallback {
class EncodedImageCallbackImpl : public EncodedImageCallback {
public:
explicit PacketizationCallback(Clock* clock)
explicit EncodedImageCallbackImpl(Clock* clock)
: clock_(clock), start_time_ms_(clock_->TimeInMilliseconds()) {}
virtual ~PacketizationCallback() {}
virtual ~EncodedImageCallbackImpl() {}
int32_t SendData(uint8_t payload_type,
const EncodedImage& encoded_image,
const RTPFragmentationHeader* fragmentation_header,
const RTPVideoHeader* rtp_video_header) override {
assert(rtp_video_header);
frame_data_.push_back(FrameData(encoded_image._length, *rtp_video_header));
int32_t Encoded(const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) override {
assert(codec_specific_info);
frame_data_.push_back(
FrameData(encoded_image._length, *codec_specific_info));
return 0;
}
@ -130,11 +130,12 @@ class PacketizationCallback : public VCMPacketizationCallback {
struct FrameData {
FrameData() {}
FrameData(size_t payload_size, const RTPVideoHeader& rtp_video_header)
: payload_size(payload_size), rtp_video_header(rtp_video_header) {}
FrameData(size_t payload_size, const CodecSpecificInfo& codec_specific_info)
: payload_size(payload_size),
codec_specific_info(codec_specific_info) {}
size_t payload_size;
RTPVideoHeader rtp_video_header;
CodecSpecificInfo codec_specific_info;
};
int64_t interval_ms() {
@ -146,9 +147,9 @@ class PacketizationCallback : public VCMPacketizationCallback {
int CountFramesWithinTemporalLayer(int temporal_layer) {
int frames = 0;
for (size_t i = 0; i < frame_data_.size(); ++i) {
EXPECT_EQ(kRtpVideoVp8, frame_data_[i].rtp_video_header.codec);
EXPECT_EQ(kVideoCodecVP8, frame_data_[i].codec_specific_info.codecType);
const uint8_t temporal_idx =
frame_data_[i].rtp_video_header.codecHeader.VP8.temporalIdx;
frame_data_[i].codec_specific_info.codecSpecific.VP8.temporalIdx;
if (temporal_idx <= temporal_layer || temporal_idx == kNoTemporalIdx)
frames++;
}
@ -158,9 +159,9 @@ class PacketizationCallback : public VCMPacketizationCallback {
size_t SumPayloadBytesWithinTemporalLayer(int temporal_layer) {
size_t payload_size = 0;
for (size_t i = 0; i < frame_data_.size(); ++i) {
EXPECT_EQ(kRtpVideoVp8, frame_data_[i].rtp_video_header.codec);
EXPECT_EQ(kVideoCodecVP8, frame_data_[i].codec_specific_info.codecType);
const uint8_t temporal_idx =
frame_data_[i].rtp_video_header.codecHeader.VP8.temporalIdx;
frame_data_[i].codec_specific_info.codecSpecific.VP8.temporalIdx;
if (temporal_idx <= temporal_layer || temporal_idx == kNoTemporalIdx)
payload_size += frame_data_[i].payload_size;
}
@ -176,12 +177,11 @@ class TestVideoSender : public ::testing::Test {
protected:
// Note: simulated clock starts at 1 seconds, since parts of webrtc use 0 as
// a special case (e.g. frame rate in media optimization).
TestVideoSender() : clock_(1000), packetization_callback_(&clock_) {}
TestVideoSender() : clock_(1000), encoded_frame_callback_(&clock_) {}
void SetUp() override {
sender_.reset(
new VideoSender(&clock_, &post_encode_callback_, nullptr, nullptr));
EXPECT_EQ(0, sender_->RegisterTransportCallback(&packetization_callback_));
new VideoSender(&clock_, &encoded_frame_callback_, nullptr, nullptr));
}
void AddFrame() {
@ -190,8 +190,7 @@ class TestVideoSender : public ::testing::Test {
}
SimulatedClock clock_;
PacketizationCallback packetization_callback_;
MockEncodedImageCallback post_encode_callback_;
EncodedImageCallbackImpl encoded_frame_callback_;
// Used by subclassing tests, need to outlive sender_.
std::unique_ptr<VideoEncoder> encoder_;
std::unique_ptr<VideoSender> sender_;
@ -415,8 +414,6 @@ class TestVideoSenderWithVp8 : public TestVideoSender {
void InsertFrames(float framerate, float seconds) {
for (int i = 0; i < seconds * framerate; ++i) {
clock_.AdvanceTimeMilliseconds(1000.0f / framerate);
EXPECT_CALL(post_encode_callback_, Encoded(_, NULL, NULL))
.WillOnce(Return(0));
AddFrame();
// SetChannelParameters needs to be called frequently to propagate
// framerate from the media optimization into the encoder.
@ -435,10 +432,10 @@ class TestVideoSenderWithVp8 : public TestVideoSender {
// It appears that this 5 seconds simulation is needed to allow
// bitrate and framerate to stabilize.
InsertFrames(framerate, short_simulation_interval);
packetization_callback_.Reset();
encoded_frame_callback_.Reset();
InsertFrames(framerate, long_simulation_interval);
return packetization_callback_.CalculateVp8StreamInfo();
return encoded_frame_callback_.CalculateVp8StreamInfo();
}
protected:

View File

@ -35,6 +35,7 @@ class MockVieEncoder : public ViEEncoder {
nullptr,
nullptr,
pacer,
nullptr,
nullptr) {}
~MockVieEncoder() {}

View File

@ -13,11 +13,85 @@
#include "webrtc/base/checks.h"
#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp.h"
#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "webrtc/modules/video_coding/include/video_codec_interface.h"
namespace webrtc {
PayloadRouter::PayloadRouter(const std::vector<RtpRtcp*>& rtp_modules)
: active_(false), num_sending_modules_(1), rtp_modules_(rtp_modules) {
namespace {
// Map information from info into rtp.
void CopyCodecSpecific(const CodecSpecificInfo* info, RTPVideoHeader* rtp) {
RTC_DCHECK(info);
switch (info->codecType) {
case kVideoCodecVP8: {
rtp->codec = kRtpVideoVp8;
rtp->codecHeader.VP8.InitRTPVideoHeaderVP8();
rtp->codecHeader.VP8.pictureId = info->codecSpecific.VP8.pictureId;
rtp->codecHeader.VP8.nonReference = info->codecSpecific.VP8.nonReference;
rtp->codecHeader.VP8.temporalIdx = info->codecSpecific.VP8.temporalIdx;
rtp->codecHeader.VP8.layerSync = info->codecSpecific.VP8.layerSync;
rtp->codecHeader.VP8.tl0PicIdx = info->codecSpecific.VP8.tl0PicIdx;
rtp->codecHeader.VP8.keyIdx = info->codecSpecific.VP8.keyIdx;
rtp->simulcastIdx = info->codecSpecific.VP8.simulcastIdx;
return;
}
case kVideoCodecVP9: {
rtp->codec = kRtpVideoVp9;
rtp->codecHeader.VP9.InitRTPVideoHeaderVP9();
rtp->codecHeader.VP9.inter_pic_predicted =
info->codecSpecific.VP9.inter_pic_predicted;
rtp->codecHeader.VP9.flexible_mode =
info->codecSpecific.VP9.flexible_mode;
rtp->codecHeader.VP9.ss_data_available =
info->codecSpecific.VP9.ss_data_available;
rtp->codecHeader.VP9.picture_id = info->codecSpecific.VP9.picture_id;
rtp->codecHeader.VP9.tl0_pic_idx = info->codecSpecific.VP9.tl0_pic_idx;
rtp->codecHeader.VP9.temporal_idx = info->codecSpecific.VP9.temporal_idx;
rtp->codecHeader.VP9.spatial_idx = info->codecSpecific.VP9.spatial_idx;
rtp->codecHeader.VP9.temporal_up_switch =
info->codecSpecific.VP9.temporal_up_switch;
rtp->codecHeader.VP9.inter_layer_predicted =
info->codecSpecific.VP9.inter_layer_predicted;
rtp->codecHeader.VP9.gof_idx = info->codecSpecific.VP9.gof_idx;
rtp->codecHeader.VP9.num_spatial_layers =
info->codecSpecific.VP9.num_spatial_layers;
if (info->codecSpecific.VP9.ss_data_available) {
rtp->codecHeader.VP9.spatial_layer_resolution_present =
info->codecSpecific.VP9.spatial_layer_resolution_present;
if (info->codecSpecific.VP9.spatial_layer_resolution_present) {
for (size_t i = 0; i < info->codecSpecific.VP9.num_spatial_layers;
++i) {
rtp->codecHeader.VP9.width[i] = info->codecSpecific.VP9.width[i];
rtp->codecHeader.VP9.height[i] = info->codecSpecific.VP9.height[i];
}
}
rtp->codecHeader.VP9.gof.CopyGofInfoVP9(info->codecSpecific.VP9.gof);
}
rtp->codecHeader.VP9.num_ref_pics = info->codecSpecific.VP9.num_ref_pics;
for (int i = 0; i < info->codecSpecific.VP9.num_ref_pics; ++i)
rtp->codecHeader.VP9.pid_diff[i] = info->codecSpecific.VP9.p_diff[i];
return;
}
case kVideoCodecH264:
rtp->codec = kRtpVideoH264;
return;
case kVideoCodecGeneric:
rtp->codec = kRtpVideoGeneric;
rtp->simulcastIdx = info->codecSpecific.generic.simulcast_idx;
return;
default:
return;
}
}
} // namespace
PayloadRouter::PayloadRouter(const std::vector<RtpRtcp*>& rtp_modules,
int payload_type)
: active_(false),
num_sending_modules_(1),
rtp_modules_(rtp_modules),
payload_type_(payload_type) {
UpdateModuleSendingState();
}
@ -60,31 +134,33 @@ void PayloadRouter::UpdateModuleSendingState() {
}
}
bool PayloadRouter::RoutePayload(FrameType frame_type,
int8_t payload_type,
uint32_t time_stamp,
int64_t capture_time_ms,
const uint8_t* payload_data,
size_t payload_length,
const RTPFragmentationHeader* fragmentation,
const RTPVideoHeader* rtp_video_hdr) {
int32_t PayloadRouter::Encoded(const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) {
rtc::CritScope lock(&crit_);
RTC_DCHECK(!rtp_modules_.empty());
if (!active_ || num_sending_modules_ == 0)
return false;
return -1;
int stream_idx = 0;
if (rtp_video_hdr) {
RTC_DCHECK_LT(rtp_video_hdr->simulcastIdx, rtp_modules_.size());
RTPVideoHeader rtp_video_header;
memset(&rtp_video_header, 0, sizeof(RTPVideoHeader));
if (codec_specific_info)
CopyCodecSpecific(codec_specific_info, &rtp_video_header);
rtp_video_header.rotation = encoded_image.rotation_;
RTC_DCHECK_LT(rtp_video_header.simulcastIdx, rtp_modules_.size());
// The simulcast index might actually be larger than the number of modules
// in case the encoder was processing a frame during a codec reconfig.
if (rtp_video_hdr->simulcastIdx >= num_sending_modules_)
return false;
stream_idx = rtp_video_hdr->simulcastIdx;
}
if (rtp_video_header.simulcastIdx >= num_sending_modules_)
return -1;
stream_idx = rtp_video_header.simulcastIdx;
return rtp_modules_[stream_idx]->SendOutgoingData(
frame_type, payload_type, time_stamp, capture_time_ms, payload_data,
payload_length, fragmentation, rtp_video_hdr) == 0 ? true : false;
encoded_image._frameType, payload_type_, encoded_image._timeStamp,
encoded_image.capture_time_ms_, encoded_image._buffer,
encoded_image._length, fragmentation, &rtp_video_header);
}
void PayloadRouter::SetTargetSendBitrates(

View File

@ -17,6 +17,7 @@
#include "webrtc/base/criticalsection.h"
#include "webrtc/base/thread_annotations.h"
#include "webrtc/common_types.h"
#include "webrtc/video_encoder.h"
#include "webrtc/system_wrappers/include/atomic32.h"
namespace webrtc {
@ -27,10 +28,11 @@ struct RTPVideoHeader;
// PayloadRouter routes outgoing data to the correct sending RTP module, based
// on the simulcast layer in RTPVideoHeader.
class PayloadRouter {
class PayloadRouter : public EncodedImageCallback {
public:
// Rtp modules are assumed to be sorted in simulcast index order.
explicit PayloadRouter(const std::vector<RtpRtcp*>& rtp_modules);
explicit PayloadRouter(const std::vector<RtpRtcp*>& rtp_modules,
int payload_type);
~PayloadRouter();
static size_t DefaultMaxPayloadLength();
@ -41,16 +43,11 @@ class PayloadRouter {
void set_active(bool active);
bool active();
// Input parameters according to the signature of RtpRtcp::SendOutgoingData.
// Returns true if the packet was routed / sent, false otherwise.
bool RoutePayload(FrameType frame_type,
int8_t payload_type,
uint32_t time_stamp,
int64_t capture_time_ms,
const uint8_t* payload_data,
size_t payload_size,
const RTPFragmentationHeader* fragmentation,
const RTPVideoHeader* rtp_video_hdr);
// Implements EncodedImageCallback.
// Returns 0 if the packet was routed / sent, -1 otherwise.
int32_t Encoded(const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) override;
// Configures current target bitrate per module. 'stream_bitrates' is assumed
// to be in the same order as 'SetSendingRtpModules'.
@ -69,6 +66,7 @@ class PayloadRouter {
// Rtp modules are assumed to be sorted in simulcast index order. Not owned.
const std::vector<RtpRtcp*> rtp_modules_;
const int payload_type_;
RTC_DISALLOW_COPY_AND_ASSIGN(PayloadRouter);
};

View File

@ -14,6 +14,7 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp.h"
#include "webrtc/modules/rtp_rtcp/mocks/mock_rtp_rtcp.h"
#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/video/payload_router.h"
using ::testing::_;
@ -27,46 +28,56 @@ TEST(PayloadRouterTest, SendOnOneModule) {
MockRtpRtcp rtp;
std::vector<RtpRtcp*> modules(1, &rtp);
PayloadRouter payload_router(modules);
uint8_t payload = 'a';
int8_t payload_type = 96;
EncodedImage encoded_image;
encoded_image._timeStamp = 1;
encoded_image.capture_time_ms_ = 2;
encoded_image._frameType = kVideoFrameKey;
encoded_image._buffer = &payload;
encoded_image._length = 1;
PayloadRouter payload_router(modules, payload_type);
payload_router.SetSendingRtpModules(modules.size());
uint8_t payload = 'a';
FrameType frame_type = kVideoFrameKey;
int8_t payload_type = 96;
EXPECT_CALL(rtp, SendOutgoingData(frame_type, payload_type, 0, 0, _, 1,
nullptr, nullptr))
EXPECT_CALL(rtp, SendOutgoingData(encoded_image._frameType, payload_type,
encoded_image._timeStamp,
encoded_image.capture_time_ms_, &payload,
encoded_image._length, nullptr, _))
.Times(0);
EXPECT_FALSE(payload_router.RoutePayload(frame_type, payload_type, 0, 0,
&payload, 1, nullptr, nullptr));
EXPECT_EQ(-1, payload_router.Encoded(encoded_image, nullptr, nullptr));
payload_router.set_active(true);
EXPECT_CALL(rtp, SendOutgoingData(frame_type, payload_type, 0, 0, _, 1,
nullptr, nullptr))
EXPECT_CALL(rtp, SendOutgoingData(encoded_image._frameType, payload_type,
encoded_image._timeStamp,
encoded_image.capture_time_ms_, &payload,
encoded_image._length, nullptr, _))
.Times(1);
EXPECT_TRUE(payload_router.RoutePayload(frame_type, payload_type, 0, 0,
&payload, 1, nullptr, nullptr));
EXPECT_EQ(0, payload_router.Encoded(encoded_image, nullptr, nullptr));
payload_router.set_active(false);
EXPECT_CALL(rtp, SendOutgoingData(frame_type, payload_type, 0, 0, _, 1,
nullptr, nullptr))
EXPECT_CALL(rtp, SendOutgoingData(encoded_image._frameType, payload_type,
encoded_image._timeStamp,
encoded_image.capture_time_ms_, &payload,
encoded_image._length, nullptr, _))
.Times(0);
EXPECT_FALSE(payload_router.RoutePayload(frame_type, payload_type, 0, 0,
&payload, 1, nullptr, nullptr));
EXPECT_EQ(-1, payload_router.Encoded(encoded_image, nullptr, nullptr));
payload_router.set_active(true);
EXPECT_CALL(rtp, SendOutgoingData(frame_type, payload_type, 0, 0, _, 1,
nullptr, nullptr))
EXPECT_CALL(rtp, SendOutgoingData(encoded_image._frameType, payload_type,
encoded_image._timeStamp,
encoded_image.capture_time_ms_, &payload,
encoded_image._length, nullptr, _))
.Times(1);
EXPECT_TRUE(payload_router.RoutePayload(frame_type, payload_type, 0, 0,
&payload, 1, nullptr, nullptr));
EXPECT_EQ(0, payload_router.Encoded(encoded_image, nullptr, nullptr));
payload_router.SetSendingRtpModules(0);
EXPECT_CALL(rtp, SendOutgoingData(frame_type, payload_type, 0, 0, _, 1,
nullptr, nullptr))
EXPECT_CALL(rtp, SendOutgoingData(encoded_image._frameType, payload_type,
encoded_image._timeStamp,
encoded_image.capture_time_ms_, &payload,
encoded_image._length, nullptr, _))
.Times(0);
EXPECT_FALSE(payload_router.RoutePayload(frame_type, payload_type, 0, 0,
&payload, 1, nullptr, nullptr));
EXPECT_EQ(-1, payload_router.Encoded(encoded_image, nullptr, nullptr));
}
TEST(PayloadRouterTest, SendSimulcast) {
@ -76,36 +87,46 @@ TEST(PayloadRouterTest, SendSimulcast) {
modules.push_back(&rtp_1);
modules.push_back(&rtp_2);
PayloadRouter payload_router(modules);
int8_t payload_type = 96;
uint8_t payload = 'a';
EncodedImage encoded_image;
encoded_image._timeStamp = 1;
encoded_image.capture_time_ms_ = 2;
encoded_image._frameType = kVideoFrameKey;
encoded_image._buffer = &payload;
encoded_image._length = 1;
PayloadRouter payload_router(modules, payload_type);
payload_router.SetSendingRtpModules(modules.size());
uint8_t payload_1 = 'a';
FrameType frame_type_1 = kVideoFrameKey;
int8_t payload_type_1 = 96;
RTPVideoHeader rtp_hdr_1;
rtp_hdr_1.simulcastIdx = 0;
CodecSpecificInfo codec_info_1;
memset(&codec_info_1, 0, sizeof(CodecSpecificInfo));
codec_info_1.codecType = kVideoCodecVP8;
codec_info_1.codecSpecific.VP8.simulcastIdx = 0;
payload_router.set_active(true);
EXPECT_CALL(rtp_1, SendOutgoingData(frame_type_1, payload_type_1, 0, 0, _, 1,
nullptr, &rtp_hdr_1))
EXPECT_CALL(rtp_1, SendOutgoingData(encoded_image._frameType, payload_type,
encoded_image._timeStamp,
encoded_image.capture_time_ms_, &payload,
encoded_image._length, nullptr, _))
.Times(1);
EXPECT_CALL(rtp_2, SendOutgoingData(_, _, _, _, _, _, _, _))
.Times(0);
EXPECT_TRUE(payload_router.RoutePayload(frame_type_1, payload_type_1, 0, 0,
&payload_1, 1, nullptr, &rtp_hdr_1));
EXPECT_EQ(0, payload_router.Encoded(encoded_image, &codec_info_1, nullptr));
uint8_t payload_2 = 'b';
FrameType frame_type_2 = kVideoFrameDelta;
int8_t payload_type_2 = 97;
RTPVideoHeader rtp_hdr_2;
rtp_hdr_2.simulcastIdx = 1;
EXPECT_CALL(rtp_2, SendOutgoingData(frame_type_2, payload_type_2, 0, 0, _, 1,
nullptr, &rtp_hdr_2))
CodecSpecificInfo codec_info_2;
memset(&codec_info_2, 0, sizeof(CodecSpecificInfo));
codec_info_2.codecType = kVideoCodecVP8;
codec_info_2.codecSpecific.VP8.simulcastIdx = 1;
EXPECT_CALL(rtp_2, SendOutgoingData(encoded_image._frameType, payload_type,
encoded_image._timeStamp,
encoded_image.capture_time_ms_, &payload,
encoded_image._length, nullptr, _))
.Times(1);
EXPECT_CALL(rtp_1, SendOutgoingData(_, _, _, _, _, _, _, _))
.Times(0);
EXPECT_TRUE(payload_router.RoutePayload(frame_type_2, payload_type_2, 0, 0,
&payload_2, 1, nullptr, &rtp_hdr_2));
EXPECT_EQ(0, payload_router.Encoded(encoded_image, &codec_info_2, nullptr));
// Inactive.
payload_router.set_active(false);
@ -113,10 +134,8 @@ TEST(PayloadRouterTest, SendSimulcast) {
.Times(0);
EXPECT_CALL(rtp_2, SendOutgoingData(_, _, _, _, _, _, _, _))
.Times(0);
EXPECT_FALSE(payload_router.RoutePayload(frame_type_1, payload_type_1, 0, 0,
&payload_1, 1, nullptr, &rtp_hdr_1));
EXPECT_FALSE(payload_router.RoutePayload(frame_type_2, payload_type_2, 0, 0,
&payload_2, 1, nullptr, &rtp_hdr_2));
EXPECT_EQ(-1, payload_router.Encoded(encoded_image, &codec_info_1, nullptr));
EXPECT_EQ(-1, payload_router.Encoded(encoded_image, &codec_info_2, nullptr));
// Invalid simulcast index.
payload_router.SetSendingRtpModules(1);
@ -125,9 +144,8 @@ TEST(PayloadRouterTest, SendSimulcast) {
.Times(0);
EXPECT_CALL(rtp_2, SendOutgoingData(_, _, _, _, _, _, _, _))
.Times(0);
rtp_hdr_1.simulcastIdx = 1;
EXPECT_FALSE(payload_router.RoutePayload(frame_type_1, payload_type_1, 0, 0,
&payload_1, 1, nullptr, &rtp_hdr_1));
codec_info_2.codecSpecific.VP8.simulcastIdx = 1;
EXPECT_EQ(-1, payload_router.Encoded(encoded_image, &codec_info_2, nullptr));
}
TEST(PayloadRouterTest, MaxPayloadLength) {
@ -139,7 +157,7 @@ TEST(PayloadRouterTest, MaxPayloadLength) {
std::vector<RtpRtcp*> modules;
modules.push_back(&rtp_1);
modules.push_back(&rtp_2);
PayloadRouter payload_router(modules);
PayloadRouter payload_router(modules, 42);
EXPECT_EQ(kDefaultMaxLength, PayloadRouter::DefaultMaxPayloadLength());
payload_router.SetSendingRtpModules(modules.size());
@ -170,7 +188,7 @@ TEST(PayloadRouterTest, SetTargetSendBitrates) {
std::vector<RtpRtcp*> modules;
modules.push_back(&rtp_1);
modules.push_back(&rtp_2);
PayloadRouter payload_router(modules);
PayloadRouter payload_router(modules, 42);
payload_router.SetSendingRtpModules(modules.size());
const uint32_t bitrate_1 = 10000;

View File

@ -17,6 +17,7 @@
#include "webrtc/base/checks.h"
#include "webrtc/base/logging.h"
#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/system_wrappers/include/metrics.h"
namespace webrtc {
@ -426,8 +427,17 @@ void SendStatisticsProxy::OnSetRates(uint32_t bitrate_bps, int framerate) {
void SendStatisticsProxy::OnSendEncodedImage(
const EncodedImage& encoded_image,
const RTPVideoHeader* rtp_video_header) {
size_t simulcast_idx = rtp_video_header ? rtp_video_header->simulcastIdx : 0;
const CodecSpecificInfo* codec_info) {
size_t simulcast_idx = 0;
if (codec_info) {
if (codec_info->codecType == kVideoCodecVP8) {
simulcast_idx = codec_info->codecSpecific.VP8.simulcastIdx;
} else if (codec_info->codecType == kVideoCodecGeneric) {
simulcast_idx = codec_info->codecSpecific.generic.simulcast_idx;
}
}
if (simulcast_idx >= config_.rtp.ssrcs.size()) {
LOG(LS_ERROR) << "Encoded image outside simulcast range (" << simulcast_idx
<< " >= " << config_.rtp.ssrcs.size() << ").";
@ -469,17 +479,16 @@ void SendStatisticsProxy::OnSendEncodedImage(
}
}
if (encoded_image.qp_ != -1 && rtp_video_header) {
if (rtp_video_header->codec == kRtpVideoVp8) {
if (encoded_image.qp_ != -1 && codec_info) {
if (codec_info->codecType == kVideoCodecVP8) {
int spatial_idx = (config_.rtp.ssrcs.size() == 1)
? -1
: static_cast<int>(simulcast_idx);
uma_container_->qp_counters_[spatial_idx].vp8.Add(encoded_image.qp_);
} else if (rtp_video_header->codec == kRtpVideoVp9) {
int spatial_idx =
(rtp_video_header->codecHeader.VP9.num_spatial_layers == 1)
} else if (codec_info->codecType == kVideoCodecVP9) {
int spatial_idx = (codec_info->codecSpecific.VP9.num_spatial_layers == 1)
? -1
: rtp_video_header->codecHeader.VP9.spatial_idx;
: codec_info->codecSpecific.VP9.spatial_idx;
uma_container_->qp_counters_[spatial_idx].vp9.Add(encoded_image.qp_);
}
}

View File

@ -49,7 +49,7 @@ class SendStatisticsProxy : public CpuOveruseMetricsObserver,
VideoSendStream::Stats GetStats();
virtual void OnSendEncodedImage(const EncodedImage& encoded_image,
const RTPVideoHeader* rtp_video_header);
const CodecSpecificInfo* codec_info);
// Used to update incoming frame rate.
void OnIncomingFrame(int width, int height);

View File

@ -334,16 +334,16 @@ TEST_F(SendStatisticsProxyTest, SwitchContentTypeUpdatesHistograms) {
TEST_F(SendStatisticsProxyTest, VerifyQpHistogramStats_Vp8) {
test::ClearHistograms();
EncodedImage encoded_image;
RTPVideoHeader rtp_video_header;
rtp_video_header.codec = kRtpVideoVp8;
CodecSpecificInfo codec_info;
codec_info.codecType = kVideoCodecVP8;
for (int i = 0; i < kMinRequiredSamples; ++i) {
rtp_video_header.simulcastIdx = 0;
codec_info.codecSpecific.VP8.simulcastIdx = 0;
encoded_image.qp_ = kQpIdx0;
statistics_proxy_->OnSendEncodedImage(encoded_image, &rtp_video_header);
rtp_video_header.simulcastIdx = 1;
statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
codec_info.codecSpecific.VP8.simulcastIdx = 1;
encoded_image.qp_ = kQpIdx1;
statistics_proxy_->OnSendEncodedImage(encoded_image, &rtp_video_header);
statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
}
statistics_proxy_.reset();
EXPECT_EQ(1, test::NumHistogramSamples("WebRTC.Video.Encoded.Qp.Vp8.S0"));
@ -362,13 +362,13 @@ TEST_F(SendStatisticsProxyTest, VerifyQpHistogramStats_Vp8OneSsrc) {
test::ClearHistograms();
EncodedImage encoded_image;
RTPVideoHeader rtp_video_header;
rtp_video_header.codec = kRtpVideoVp8;
CodecSpecificInfo codec_info;
codec_info.codecType = kVideoCodecVP8;
for (int i = 0; i < kMinRequiredSamples; ++i) {
rtp_video_header.simulcastIdx = 0;
codec_info.codecSpecific.VP8.simulcastIdx = 0;
encoded_image.qp_ = kQpIdx0;
statistics_proxy_->OnSendEncodedImage(encoded_image, &rtp_video_header);
statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
}
statistics_proxy_.reset();
EXPECT_EQ(1, test::NumHistogramSamples("WebRTC.Video.Encoded.Qp.Vp8"));
@ -378,18 +378,17 @@ TEST_F(SendStatisticsProxyTest, VerifyQpHistogramStats_Vp8OneSsrc) {
TEST_F(SendStatisticsProxyTest, VerifyQpHistogramStats_Vp9) {
test::ClearHistograms();
EncodedImage encoded_image;
RTPVideoHeader rtp_video_header;
rtp_video_header.simulcastIdx = 0;
rtp_video_header.codec = kRtpVideoVp9;
rtp_video_header.codecHeader.VP9.num_spatial_layers = 2;
CodecSpecificInfo codec_info;
codec_info.codecType = kVideoCodecVP9;
codec_info.codecSpecific.VP9.num_spatial_layers = 2;
for (int i = 0; i < kMinRequiredSamples; ++i) {
encoded_image.qp_ = kQpIdx0;
rtp_video_header.codecHeader.VP9.spatial_idx = 0;
statistics_proxy_->OnSendEncodedImage(encoded_image, &rtp_video_header);
codec_info.codecSpecific.VP9.spatial_idx = 0;
statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
encoded_image.qp_ = kQpIdx1;
rtp_video_header.codecHeader.VP9.spatial_idx = 1;
statistics_proxy_->OnSendEncodedImage(encoded_image, &rtp_video_header);
codec_info.codecSpecific.VP9.spatial_idx = 1;
statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
}
statistics_proxy_.reset();
EXPECT_EQ(1, test::NumHistogramSamples("WebRTC.Video.Encoded.Qp.Vp9.S0"));
@ -408,15 +407,14 @@ TEST_F(SendStatisticsProxyTest, VerifyQpHistogramStats_Vp9OneSpatialLayer) {
test::ClearHistograms();
EncodedImage encoded_image;
RTPVideoHeader rtp_video_header;
rtp_video_header.simulcastIdx = 0;
rtp_video_header.codec = kRtpVideoVp9;
rtp_video_header.codecHeader.VP9.num_spatial_layers = 1;
CodecSpecificInfo codec_info;
codec_info.codecType = kVideoCodecVP9;
codec_info.codecSpecific.VP9.num_spatial_layers = 1;
for (int i = 0; i < kMinRequiredSamples; ++i) {
encoded_image.qp_ = kQpIdx0;
rtp_video_header.codecHeader.VP9.spatial_idx = 0;
statistics_proxy_->OnSendEncodedImage(encoded_image, &rtp_video_header);
codec_info.codecSpecific.VP9.spatial_idx = 0;
statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
}
statistics_proxy_.reset();
EXPECT_EQ(1, test::NumHistogramSamples("WebRTC.Video.Encoded.Qp.Vp9"));
@ -458,12 +456,13 @@ TEST_F(SendStatisticsProxyTest, EncodedResolutionTimesOut) {
encoded_image._encodedWidth = kEncodedWidth;
encoded_image._encodedHeight = kEncodedHeight;
RTPVideoHeader rtp_video_header;
CodecSpecificInfo codec_info;
codec_info.codecType = kVideoCodecVP8;
codec_info.codecSpecific.VP8.simulcastIdx = 0;
rtp_video_header.simulcastIdx = 0;
statistics_proxy_->OnSendEncodedImage(encoded_image, &rtp_video_header);
rtp_video_header.simulcastIdx = 1;
statistics_proxy_->OnSendEncodedImage(encoded_image, &rtp_video_header);
statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
codec_info.codecSpecific.VP8.simulcastIdx = 1;
statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
VideoSendStream::Stats stats = statistics_proxy_->GetStats();
EXPECT_EQ(kEncodedWidth, stats.substreams[config_.rtp.ssrcs[0]].width);
@ -485,8 +484,8 @@ TEST_F(SendStatisticsProxyTest, EncodedResolutionTimesOut) {
// Report stats for second SSRC to make sure it's not outdated along with the
// first SSRC.
rtp_video_header.simulcastIdx = 1;
statistics_proxy_->OnSendEncodedImage(encoded_image, &rtp_video_header);
codec_info.codecSpecific.VP8.simulcastIdx = 1;
statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
// Forward 1 ms, reach timeout, substream 0 should have no resolution
// reported, but substream 1 should.
@ -505,12 +504,13 @@ TEST_F(SendStatisticsProxyTest, ClearsResolutionFromInactiveSsrcs) {
encoded_image._encodedWidth = kEncodedWidth;
encoded_image._encodedHeight = kEncodedHeight;
RTPVideoHeader rtp_video_header;
CodecSpecificInfo codec_info;
codec_info.codecType = kVideoCodecVP8;
codec_info.codecSpecific.VP8.simulcastIdx = 0;
rtp_video_header.simulcastIdx = 0;
statistics_proxy_->OnSendEncodedImage(encoded_image, &rtp_video_header);
rtp_video_header.simulcastIdx = 1;
statistics_proxy_->OnSendEncodedImage(encoded_image, &rtp_video_header);
statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
codec_info.codecSpecific.VP8.simulcastIdx = 1;
statistics_proxy_->OnSendEncodedImage(encoded_image, &codec_info);
statistics_proxy_->OnInactiveSsrc(config_.rtp.ssrcs[1]);
VideoSendStream::Stats stats = statistics_proxy_->GetStats();

View File

@ -405,25 +405,10 @@ int32_t VideoReceiveStream::Encoded(
if (kEnableFrameRecording) {
if (!ivf_writer_.get()) {
RTC_DCHECK(codec_specific_info);
RtpVideoCodecTypes rtp_codec_type;
switch (codec_specific_info->codecType) {
case kVideoCodecVP8:
rtp_codec_type = kRtpVideoVp8;
break;
case kVideoCodecVP9:
rtp_codec_type = kRtpVideoVp9;
break;
case kVideoCodecH264:
rtp_codec_type = kRtpVideoH264;
break;
default:
rtp_codec_type = kRtpVideoNone;
RTC_NOTREACHED() << "Unsupported codec "
<< codec_specific_info->codecType;
}
std::ostringstream oss;
oss << "receive_bitstream_ssrc_" << config_.rtp.remote_ssrc << ".ivf";
ivf_writer_ = IvfFileWriter::Open(oss.str(), rtp_codec_type);
ivf_writer_ =
IvfFileWriter::Open(oss.str(), codec_specific_info->codecType);
}
if (ivf_writer_.get()) {
bool ok = ivf_writer_->WriteFrame(encoded_image);

View File

@ -229,14 +229,16 @@ VideoSendStream::VideoSendStream(
this,
config.post_encode_callback,
&stats_proxy_),
vie_encoder_(num_cpu_cores,
vie_encoder_(
num_cpu_cores,
config_.rtp.ssrcs,
module_process_thread_,
&stats_proxy_,
config.pre_encode_callback,
&overuse_detector_,
congestion_controller_->pacer(),
&payload_router_),
&payload_router_,
config.post_encode_callback ? &encoded_frame_proxy_ : nullptr),
vcm_(vie_encoder_.vcm()),
bandwidth_observer_(congestion_controller_->GetBitrateController()
->CreateRtcpBandwidthObserver()),
@ -250,7 +252,7 @@ VideoSendStream::VideoSendStream(
congestion_controller_->packet_router(),
&stats_proxy_,
config_.rtp.ssrcs.size())),
payload_router_(rtp_rtcp_modules_),
payload_router_(rtp_rtcp_modules_, config.encoder_settings.payload_type),
input_(&encoder_wakeup_event_,
config_.local_renderer,
&stats_proxy_,
@ -319,9 +321,6 @@ VideoSendStream::VideoSendStream(
ReconfigureVideoEncoder(encoder_config);
if (config_.post_encode_callback)
vie_encoder_.RegisterPostEncodeImageCallback(&encoded_frame_proxy_);
if (config_.suspend_below_min_bitrate) {
vcm_->SuspendBelowMinBitrate();
bitrate_allocator_->EnforceMinBitrate(false);

View File

@ -1807,7 +1807,6 @@ TEST_F(VideoSendStreamTest, ReportsSentResolution) {
const CodecSpecificInfo* codecSpecificInfo,
const std::vector<FrameType>* frame_types) override {
CodecSpecificInfo specifics;
memset(&specifics, 0, sizeof(specifics));
specifics.codecType = kVideoCodecGeneric;
uint8_t buffer[16] = {0};

View File

@ -85,7 +85,8 @@ ViEEncoder::ViEEncoder(uint32_t number_of_cores,
rtc::VideoSinkInterface<VideoFrame>* pre_encode_callback,
OveruseFrameDetector* overuse_detector,
PacedSender* pacer,
PayloadRouter* payload_router)
PayloadRouter* payload_router,
EncodedImageCallback* post_encode_callback)
: number_of_cores_(number_of_cores),
ssrcs_(ssrcs),
vp_(VideoProcessing::Create()),
@ -98,6 +99,7 @@ ViEEncoder::ViEEncoder(uint32_t number_of_cores,
overuse_detector_(overuse_detector),
pacer_(pacer),
send_payload_router_(payload_router),
post_encode_callback_(post_encode_callback),
time_of_last_frame_activity_ms_(0),
encoder_config_(),
min_transmit_bitrate_bps_(0),
@ -121,6 +123,10 @@ bool ViEEncoder::Init() {
// Enable/disable content analysis: off by default for now.
vp_->EnableContentAnalysis(false);
vcm_->RegisterPostEncodeImageCallback(this);
// TODO(perkj): Remove |RegisterTransportCallback| as soon as we don't use
// VCMPacketizationCallback::OnEncoderImplementationName.
if (vcm_->RegisterTransportCallback(this) != 0) {
return false;
}
@ -403,10 +409,14 @@ void ViEEncoder::OnSetRates(uint32_t bitrate_bps, int framerate) {
stats_proxy_->OnSetRates(bitrate_bps, framerate);
}
int32_t ViEEncoder::SendData(const uint8_t payload_type,
const EncodedImage& encoded_image,
const RTPFragmentationHeader* fragmentation_header,
const RTPVideoHeader* rtp_video_hdr) {
void ViEEncoder::OnEncoderImplementationName(const char* implementation_name) {
if (stats_proxy_)
stats_proxy_->OnEncoderImplementationName(implementation_name);
}
int32_t ViEEncoder::Encoded(const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) {
RTC_DCHECK(send_payload_router_);
{
@ -414,17 +424,22 @@ int32_t ViEEncoder::SendData(const uint8_t payload_type,
time_of_last_frame_activity_ms_ = TickTime::MillisecondTimestamp();
}
if (stats_proxy_)
stats_proxy_->OnSendEncodedImage(encoded_image, rtp_video_hdr);
if (post_encode_callback_) {
post_encode_callback_->Encoded(encoded_image, codec_specific_info,
fragmentation);
}
bool success = send_payload_router_->RoutePayload(
encoded_image._frameType, payload_type, encoded_image._timeStamp,
encoded_image.capture_time_ms_, encoded_image._buffer,
encoded_image._length, fragmentation_header, rtp_video_hdr);
if (stats_proxy_) {
stats_proxy_->OnSendEncodedImage(encoded_image, codec_specific_info);
}
int success = send_payload_router_->Encoded(
encoded_image, codec_specific_info, fragmentation);
overuse_detector_->FrameSent(encoded_image._timeStamp);
if (kEnableFrameRecording) {
int layer = rtp_video_hdr->simulcastIdx;
int layer = codec_specific_info->codecType == kVideoCodecVP8
? codec_specific_info->codecSpecific.VP8.simulcastIdx
: 0;
IvfFileWriter* file_writer;
{
rtc::CritScope lock(&data_cs_);
@ -435,7 +450,7 @@ int32_t ViEEncoder::SendData(const uint8_t payload_type,
oss << "_" << ssrc;
oss << "_layer" << layer << ".ivf";
file_writers_[layer] =
IvfFileWriter::Open(oss.str(), rtp_video_hdr->codec);
IvfFileWriter::Open(oss.str(), codec_specific_info->codecType);
}
file_writer = file_writers_[layer].get();
}
@ -445,13 +460,7 @@ int32_t ViEEncoder::SendData(const uint8_t payload_type,
}
}
return success ? 0 : -1;
}
void ViEEncoder::OnEncoderImplementationName(
const char* implementation_name) {
if (stats_proxy_)
stats_proxy_->OnEncoderImplementationName(implementation_name);
return success;
}
int32_t ViEEncoder::SendStatistics(const uint32_t bit_rate,
@ -531,11 +540,6 @@ void ViEEncoder::OnBitrateUpdated(uint32_t bitrate_bps,
stats_proxy_->OnSuspendChange(video_is_suspended);
}
void ViEEncoder::RegisterPostEncodeImageCallback(
EncodedImageCallback* post_encode_callback) {
vcm_->RegisterPostEncodeImageCallback(post_encode_callback);
}
QMVideoSettingsCallback::QMVideoSettingsCallback(VideoProcessing* vpm)
: vp_(vpm) {
}

View File

@ -18,6 +18,7 @@
#include "webrtc/base/scoped_ref_ptr.h"
#include "webrtc/base/thread_annotations.h"
#include "webrtc/common_types.h"
#include "webrtc/video_encoder.h"
#include "webrtc/media/base/videosinkinterface.h"
#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "webrtc/modules/video_coding/include/video_coding_defines.h"
@ -41,6 +42,7 @@ class VideoCodingModule;
class VideoEncoder;
class ViEEncoder : public VideoEncoderRateObserver,
public EncodedImageCallback,
public VCMPacketizationCallback,
public VCMSendStatisticsCallback {
public:
@ -54,7 +56,8 @@ class ViEEncoder : public VideoEncoderRateObserver,
rtc::VideoSinkInterface<VideoFrame>* pre_encode_callback,
OveruseFrameDetector* overuse_detector,
PacedSender* pacer,
PayloadRouter* payload_router);
PayloadRouter* payload_router,
EncodedImageCallback* post_encode_callback);
~ViEEncoder();
bool Init();
@ -92,12 +95,13 @@ class ViEEncoder : public VideoEncoderRateObserver,
void OnSetRates(uint32_t bitrate_bps, int framerate) override;
// Implements VCMPacketizationCallback.
int32_t SendData(uint8_t payload_type,
const EncodedImage& encoded_image,
const RTPFragmentationHeader* fragmentation_header,
const RTPVideoHeader* rtp_video_hdr) override;
void OnEncoderImplementationName(const char* implementation_name) override;
// Implements EncodedImageCallback.
int32_t Encoded(const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) override;
// Implements VideoSendStatisticsCallback.
int32_t SendStatistics(const uint32_t bit_rate,
const uint32_t frame_rate) override;
@ -107,10 +111,6 @@ class ViEEncoder : public VideoEncoderRateObserver,
virtual void OnReceivedSLI(uint32_t ssrc, uint8_t picture_id);
virtual void OnReceivedRPSI(uint32_t ssrc, uint64_t picture_id);
// New-style callbacks, used by VideoSendStream.
void RegisterPostEncodeImageCallback(
EncodedImageCallback* post_encode_callback);
int GetPaddingNeededBps() const;
void OnBitrateUpdated(uint32_t bitrate_bps,
@ -139,6 +139,7 @@ class ViEEncoder : public VideoEncoderRateObserver,
OveruseFrameDetector* const overuse_detector_;
PacedSender* const pacer_;
PayloadRouter* const send_payload_router_;
EncodedImageCallback* const post_encode_callback_;
// The time we last received an input frame or encoded frame. This is used to
// track when video is stopped long enough that we also want to stop sending

View File

@ -31,6 +31,7 @@ class EncodedImageCallback {
virtual ~EncodedImageCallback() {}
// Callback function which is called when an image has been encoded.
// TODO(perkj): Change this to return void.
virtual int32_t Encoded(const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) = 0;