Revert of Deprecate VCMPacketizationCallback::SendData and use EncodedImageCallback instead. (patchset #5 id:80001 of https://codereview.webrtc.org/1897233002/ )
Reason for revert: API changes broke downstream. Original issue's description: > Deprecate VCMPacketizationCallback::SendData and use EncodedImageCallback instead. > EncodedImageCallback is used by all encoder implementations and seems to be what we should try to use in the transport. > EncodedImageCallback can of course be cleaned up in the future. > > This moves creation of RTPVideoHeader from the GenericEncoder to the PayLoadRouter. > > BUG=webrtc::5687 > > Committed: https://crrev.com/f5d55aaecdc39e9cc66eb6e87614f04afe28f6eb > Cr-Commit-Position: refs/heads/master@{#12436} TBR=stefan@webrtc.org,pbos@webrtc.org,perkj@webrtc.org # Skipping CQ checks because original CL landed less than 1 days ago. NOPRESUBMIT=true NOTREECHECKS=true NOTRY=true BUG=webrtc::5687 Review URL: https://codereview.webrtc.org/1903193002 Cr-Commit-Position: refs/heads/master@{#12441}
This commit is contained in:
@ -238,6 +238,7 @@ bool VCMCodecDataBase::SetSendCodec(const VideoCodec* send_codec,
|
||||
memcpy(&send_codec_, &new_send_codec, sizeof(send_codec_));
|
||||
|
||||
if (!reset_required) {
|
||||
encoded_frame_callback_->SetPayloadType(send_codec_.plType);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -248,6 +249,7 @@ bool VCMCodecDataBase::SetSendCodec(const VideoCodec* send_codec,
|
||||
ptr_encoder_.reset(
|
||||
new VCMGenericEncoder(external_encoder_, encoder_rate_observer_,
|
||||
encoded_frame_callback_, internal_source_));
|
||||
encoded_frame_callback_->SetPayloadType(send_codec_.plType);
|
||||
encoded_frame_callback_->SetInternalSource(internal_source_);
|
||||
if (ptr_encoder_->InitEncode(&send_codec_, number_of_cores_,
|
||||
max_payload_size_) < 0) {
|
||||
|
||||
@ -21,6 +21,76 @@
|
||||
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
|
||||
|
||||
namespace webrtc {
|
||||
namespace {
|
||||
// Map information from info into rtp. If no relevant information is found
|
||||
// in info, rtp is set to NULL.
|
||||
void CopyCodecSpecific(const CodecSpecificInfo* info, RTPVideoHeader* rtp) {
|
||||
RTC_DCHECK(info);
|
||||
switch (info->codecType) {
|
||||
case kVideoCodecVP8: {
|
||||
rtp->codec = kRtpVideoVp8;
|
||||
rtp->codecHeader.VP8.InitRTPVideoHeaderVP8();
|
||||
rtp->codecHeader.VP8.pictureId = info->codecSpecific.VP8.pictureId;
|
||||
rtp->codecHeader.VP8.nonReference = info->codecSpecific.VP8.nonReference;
|
||||
rtp->codecHeader.VP8.temporalIdx = info->codecSpecific.VP8.temporalIdx;
|
||||
rtp->codecHeader.VP8.layerSync = info->codecSpecific.VP8.layerSync;
|
||||
rtp->codecHeader.VP8.tl0PicIdx = info->codecSpecific.VP8.tl0PicIdx;
|
||||
rtp->codecHeader.VP8.keyIdx = info->codecSpecific.VP8.keyIdx;
|
||||
rtp->simulcastIdx = info->codecSpecific.VP8.simulcastIdx;
|
||||
return;
|
||||
}
|
||||
case kVideoCodecVP9: {
|
||||
rtp->codec = kRtpVideoVp9;
|
||||
rtp->codecHeader.VP9.InitRTPVideoHeaderVP9();
|
||||
rtp->codecHeader.VP9.inter_pic_predicted =
|
||||
info->codecSpecific.VP9.inter_pic_predicted;
|
||||
rtp->codecHeader.VP9.flexible_mode =
|
||||
info->codecSpecific.VP9.flexible_mode;
|
||||
rtp->codecHeader.VP9.ss_data_available =
|
||||
info->codecSpecific.VP9.ss_data_available;
|
||||
rtp->codecHeader.VP9.picture_id = info->codecSpecific.VP9.picture_id;
|
||||
rtp->codecHeader.VP9.tl0_pic_idx = info->codecSpecific.VP9.tl0_pic_idx;
|
||||
rtp->codecHeader.VP9.temporal_idx = info->codecSpecific.VP9.temporal_idx;
|
||||
rtp->codecHeader.VP9.spatial_idx = info->codecSpecific.VP9.spatial_idx;
|
||||
rtp->codecHeader.VP9.temporal_up_switch =
|
||||
info->codecSpecific.VP9.temporal_up_switch;
|
||||
rtp->codecHeader.VP9.inter_layer_predicted =
|
||||
info->codecSpecific.VP9.inter_layer_predicted;
|
||||
rtp->codecHeader.VP9.gof_idx = info->codecSpecific.VP9.gof_idx;
|
||||
rtp->codecHeader.VP9.num_spatial_layers =
|
||||
info->codecSpecific.VP9.num_spatial_layers;
|
||||
|
||||
if (info->codecSpecific.VP9.ss_data_available) {
|
||||
rtp->codecHeader.VP9.spatial_layer_resolution_present =
|
||||
info->codecSpecific.VP9.spatial_layer_resolution_present;
|
||||
if (info->codecSpecific.VP9.spatial_layer_resolution_present) {
|
||||
for (size_t i = 0; i < info->codecSpecific.VP9.num_spatial_layers;
|
||||
++i) {
|
||||
rtp->codecHeader.VP9.width[i] = info->codecSpecific.VP9.width[i];
|
||||
rtp->codecHeader.VP9.height[i] = info->codecSpecific.VP9.height[i];
|
||||
}
|
||||
}
|
||||
rtp->codecHeader.VP9.gof.CopyGofInfoVP9(info->codecSpecific.VP9.gof);
|
||||
}
|
||||
|
||||
rtp->codecHeader.VP9.num_ref_pics = info->codecSpecific.VP9.num_ref_pics;
|
||||
for (int i = 0; i < info->codecSpecific.VP9.num_ref_pics; ++i)
|
||||
rtp->codecHeader.VP9.pid_diff[i] = info->codecSpecific.VP9.p_diff[i];
|
||||
return;
|
||||
}
|
||||
case kVideoCodecH264:
|
||||
rtp->codec = kRtpVideoH264;
|
||||
return;
|
||||
case kVideoCodecGeneric:
|
||||
rtp->codec = kRtpVideoGeneric;
|
||||
rtp->simulcastIdx = info->codecSpecific.generic.simulcast_idx;
|
||||
return;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
}
|
||||
} // namespace
|
||||
|
||||
VCMGenericEncoder::VCMGenericEncoder(
|
||||
VideoEncoder* encoder,
|
||||
VideoEncoderRateObserver* rate_observer,
|
||||
@ -146,6 +216,7 @@ VCMEncodedFrameCallback::VCMEncodedFrameCallback(
|
||||
EncodedImageCallback* post_encode_callback)
|
||||
: send_callback_(),
|
||||
media_opt_(nullptr),
|
||||
payload_type_(0),
|
||||
internal_source_(false),
|
||||
post_encode_callback_(post_encode_callback) {}
|
||||
|
||||
@ -163,8 +234,19 @@ int32_t VCMEncodedFrameCallback::Encoded(
|
||||
const RTPFragmentationHeader* fragmentation_header) {
|
||||
TRACE_EVENT_INSTANT1("webrtc", "VCMEncodedFrameCallback::Encoded",
|
||||
"timestamp", encoded_image._timeStamp);
|
||||
int ret_val = post_encode_callback_->Encoded(encoded_image, codec_specific,
|
||||
fragmentation_header);
|
||||
post_encode_callback_->Encoded(encoded_image, nullptr, nullptr);
|
||||
|
||||
if (send_callback_ == nullptr)
|
||||
return VCM_UNINITIALIZED;
|
||||
|
||||
RTPVideoHeader rtp_video_header;
|
||||
memset(&rtp_video_header, 0, sizeof(RTPVideoHeader));
|
||||
if (codec_specific)
|
||||
CopyCodecSpecific(codec_specific, &rtp_video_header);
|
||||
rtp_video_header.rotation = encoded_image.rotation_;
|
||||
|
||||
int32_t ret_val = send_callback_->SendData(
|
||||
payload_type_, encoded_image, fragmentation_header, &rtp_video_header);
|
||||
if (ret_val < 0)
|
||||
return ret_val;
|
||||
|
||||
|
||||
@ -44,6 +44,7 @@ class VCMEncodedFrameCallback : public EncodedImageCallback {
|
||||
const RTPFragmentationHeader* fragmentation_header) override;
|
||||
int32_t SetTransportCallback(VCMPacketizationCallback* transport);
|
||||
void SetMediaOpt(media_optimization::MediaOptimization* media_opt);
|
||||
void SetPayloadType(uint8_t payload_type) { payload_type_ = payload_type; }
|
||||
void SetInternalSource(bool internal_source) {
|
||||
internal_source_ = internal_source;
|
||||
}
|
||||
@ -53,6 +54,7 @@ class VCMEncodedFrameCallback : public EncodedImageCallback {
|
||||
private:
|
||||
VCMPacketizationCallback* send_callback_;
|
||||
media_optimization::MediaOptimization* media_opt_;
|
||||
uint8_t payload_type_;
|
||||
bool internal_source_;
|
||||
|
||||
EncodedImageCallback* post_encode_callback_;
|
||||
|
||||
@ -57,11 +57,13 @@ struct VCMFrameCount {
|
||||
};
|
||||
|
||||
// Callback class used for sending data ready to be packetized
|
||||
// Deprecated.
|
||||
// TODO(perkj): Remove once OnEncoderImplementationName is not used.
|
||||
class VCMPacketizationCallback {
|
||||
public:
|
||||
// TODO(perkj): Refactor this. It does not belong in VCMPacketizationCallback.
|
||||
virtual int32_t SendData(uint8_t payloadType,
|
||||
const EncodedImage& encoded_image,
|
||||
const RTPFragmentationHeader* fragmentationHeader,
|
||||
const RTPVideoHeader* rtpVideoHdr) = 0;
|
||||
|
||||
virtual void OnEncoderImplementationName(const char* implementation_name) {}
|
||||
|
||||
protected:
|
||||
|
||||
@ -18,7 +18,7 @@ namespace webrtc {
|
||||
|
||||
IvfFileWriter::IvfFileWriter(const std::string& file_name,
|
||||
std::unique_ptr<FileWrapper> file,
|
||||
VideoCodecType codec_type)
|
||||
RtpVideoCodecTypes codec_type)
|
||||
: codec_type_(codec_type),
|
||||
num_frames_(0),
|
||||
width_(0),
|
||||
@ -34,8 +34,9 @@ IvfFileWriter::~IvfFileWriter() {
|
||||
|
||||
const size_t kIvfHeaderSize = 32;
|
||||
|
||||
std::unique_ptr<IvfFileWriter> IvfFileWriter::Open(const std::string& file_name,
|
||||
VideoCodecType codec_type) {
|
||||
std::unique_ptr<IvfFileWriter> IvfFileWriter::Open(
|
||||
const std::string& file_name,
|
||||
RtpVideoCodecTypes codec_type) {
|
||||
std::unique_ptr<IvfFileWriter> file_writer;
|
||||
std::unique_ptr<FileWrapper> file(FileWrapper::Create());
|
||||
if (file->OpenFile(file_name.c_str(), false) != 0)
|
||||
@ -64,19 +65,19 @@ bool IvfFileWriter::WriteHeader() {
|
||||
ByteWriter<uint16_t>::WriteLittleEndian(&ivf_header[6], 32); // Header size.
|
||||
|
||||
switch (codec_type_) {
|
||||
case kVideoCodecVP8:
|
||||
case kRtpVideoVp8:
|
||||
ivf_header[8] = 'V';
|
||||
ivf_header[9] = 'P';
|
||||
ivf_header[10] = '8';
|
||||
ivf_header[11] = '0';
|
||||
break;
|
||||
case kVideoCodecVP9:
|
||||
case kRtpVideoVp9:
|
||||
ivf_header[8] = 'V';
|
||||
ivf_header[9] = 'P';
|
||||
ivf_header[10] = '9';
|
||||
ivf_header[11] = '0';
|
||||
break;
|
||||
case kVideoCodecH264:
|
||||
case kRtpVideoH264:
|
||||
ivf_header[8] = 'H';
|
||||
ivf_header[9] = '2';
|
||||
ivf_header[10] = '6';
|
||||
|
||||
@ -27,18 +27,18 @@ class IvfFileWriter {
|
||||
~IvfFileWriter();
|
||||
|
||||
static std::unique_ptr<IvfFileWriter> Open(const std::string& file_name,
|
||||
VideoCodecType codec_type);
|
||||
RtpVideoCodecTypes codec_type);
|
||||
bool WriteFrame(const EncodedImage& encoded_image);
|
||||
bool Close();
|
||||
|
||||
private:
|
||||
IvfFileWriter(const std::string& path_name,
|
||||
std::unique_ptr<FileWrapper> file,
|
||||
VideoCodecType codec_type);
|
||||
RtpVideoCodecTypes codec_type);
|
||||
bool WriteHeader();
|
||||
bool InitFromFirstFrame(const EncodedImage& encoded_image);
|
||||
|
||||
const VideoCodecType codec_type_;
|
||||
const RtpVideoCodecTypes codec_type_;
|
||||
size_t num_frames_;
|
||||
uint16_t width_;
|
||||
uint16_t height_;
|
||||
|
||||
@ -103,7 +103,7 @@ class IvfFileWriterTest : public ::testing::Test {
|
||||
}
|
||||
}
|
||||
|
||||
void RunBasicFileStructureTest(VideoCodecType codec_type,
|
||||
void RunBasicFileStructureTest(RtpVideoCodecTypes codec_type,
|
||||
const uint8_t fourcc[4],
|
||||
bool use_capture_tims_ms) {
|
||||
file_writer_ = IvfFileWriter::Open(file_name_, codec_type);
|
||||
@ -135,7 +135,7 @@ class IvfFileWriterTest : public ::testing::Test {
|
||||
};
|
||||
|
||||
TEST_F(IvfFileWriterTest, RemovesUnusedFile) {
|
||||
file_writer_ = IvfFileWriter::Open(file_name_, kVideoCodecVP8);
|
||||
file_writer_ = IvfFileWriter::Open(file_name_, kRtpVideoVp8);
|
||||
ASSERT_TRUE(file_writer_.get() != nullptr);
|
||||
EXPECT_TRUE(FileExists());
|
||||
EXPECT_TRUE(file_writer_->Close());
|
||||
@ -145,32 +145,32 @@ TEST_F(IvfFileWriterTest, RemovesUnusedFile) {
|
||||
|
||||
TEST_F(IvfFileWriterTest, WritesBasicVP8FileNtpTimestamp) {
|
||||
const uint8_t fourcc[4] = {'V', 'P', '8', '0'};
|
||||
RunBasicFileStructureTest(kVideoCodecVP8, fourcc, false);
|
||||
RunBasicFileStructureTest(kRtpVideoVp8, fourcc, false);
|
||||
}
|
||||
|
||||
TEST_F(IvfFileWriterTest, WritesBasicVP8FileMsTimestamp) {
|
||||
const uint8_t fourcc[4] = {'V', 'P', '8', '0'};
|
||||
RunBasicFileStructureTest(kVideoCodecVP8, fourcc, true);
|
||||
RunBasicFileStructureTest(kRtpVideoVp8, fourcc, true);
|
||||
}
|
||||
|
||||
TEST_F(IvfFileWriterTest, WritesBasicVP9FileNtpTimestamp) {
|
||||
const uint8_t fourcc[4] = {'V', 'P', '9', '0'};
|
||||
RunBasicFileStructureTest(kVideoCodecVP9, fourcc, false);
|
||||
RunBasicFileStructureTest(kRtpVideoVp9, fourcc, false);
|
||||
}
|
||||
|
||||
TEST_F(IvfFileWriterTest, WritesBasicVP9FileMsTimestamp) {
|
||||
const uint8_t fourcc[4] = {'V', 'P', '9', '0'};
|
||||
RunBasicFileStructureTest(kVideoCodecVP9, fourcc, true);
|
||||
RunBasicFileStructureTest(kRtpVideoVp9, fourcc, true);
|
||||
}
|
||||
|
||||
TEST_F(IvfFileWriterTest, WritesBasicH264FileNtpTimestamp) {
|
||||
const uint8_t fourcc[4] = {'H', '2', '6', '4'};
|
||||
RunBasicFileStructureTest(kVideoCodecH264, fourcc, false);
|
||||
RunBasicFileStructureTest(kRtpVideoH264, fourcc, false);
|
||||
}
|
||||
|
||||
TEST_F(IvfFileWriterTest, WritesBasicH264FileMsTimestamp) {
|
||||
const uint8_t fourcc[4] = {'H', '2', '6', '4'};
|
||||
RunBasicFileStructureTest(kVideoCodecH264, fourcc, true);
|
||||
RunBasicFileStructureTest(kRtpVideoH264, fourcc, true);
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
@ -54,6 +54,7 @@ class EncodedImageCallbackWrapper : public EncodedImageCallback {
|
||||
callback_ = callback;
|
||||
}
|
||||
|
||||
// TODO(andresp): Change to void as return value is ignored.
|
||||
virtual int32_t Encoded(const EncodedImage& encoded_image,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
const RTPFragmentationHeader* fragmentation) {
|
||||
|
||||
@ -79,8 +79,6 @@ class VideoSender {
|
||||
uint8_t lossRate,
|
||||
int64_t rtt);
|
||||
|
||||
// Deprecated. Use |post_encode_callback| instead.
|
||||
// TODO(perkj): Remove once |OnEncoderImplementationName| is not used.
|
||||
int32_t RegisterTransportCallback(VCMPacketizationCallback* transport);
|
||||
int32_t RegisterSendStatisticsCallback(VCMSendStatisticsCallback* sendStats);
|
||||
int32_t RegisterProtectionCallback(VCMProtectionCallback* protection);
|
||||
|
||||
@ -86,19 +86,19 @@ class EmptyFrameGenerator : public FrameGenerator {
|
||||
std::unique_ptr<VideoFrame> frame_;
|
||||
};
|
||||
|
||||
class EncodedImageCallbackImpl : public EncodedImageCallback {
|
||||
class PacketizationCallback : public VCMPacketizationCallback {
|
||||
public:
|
||||
explicit EncodedImageCallbackImpl(Clock* clock)
|
||||
explicit PacketizationCallback(Clock* clock)
|
||||
: clock_(clock), start_time_ms_(clock_->TimeInMilliseconds()) {}
|
||||
|
||||
virtual ~EncodedImageCallbackImpl() {}
|
||||
virtual ~PacketizationCallback() {}
|
||||
|
||||
int32_t Encoded(const EncodedImage& encoded_image,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
const RTPFragmentationHeader* fragmentation) override {
|
||||
assert(codec_specific_info);
|
||||
frame_data_.push_back(
|
||||
FrameData(encoded_image._length, *codec_specific_info));
|
||||
int32_t SendData(uint8_t payload_type,
|
||||
const EncodedImage& encoded_image,
|
||||
const RTPFragmentationHeader* fragmentation_header,
|
||||
const RTPVideoHeader* rtp_video_header) override {
|
||||
assert(rtp_video_header);
|
||||
frame_data_.push_back(FrameData(encoded_image._length, *rtp_video_header));
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -130,12 +130,11 @@ class EncodedImageCallbackImpl : public EncodedImageCallback {
|
||||
struct FrameData {
|
||||
FrameData() {}
|
||||
|
||||
FrameData(size_t payload_size, const CodecSpecificInfo& codec_specific_info)
|
||||
: payload_size(payload_size),
|
||||
codec_specific_info(codec_specific_info) {}
|
||||
FrameData(size_t payload_size, const RTPVideoHeader& rtp_video_header)
|
||||
: payload_size(payload_size), rtp_video_header(rtp_video_header) {}
|
||||
|
||||
size_t payload_size;
|
||||
CodecSpecificInfo codec_specific_info;
|
||||
RTPVideoHeader rtp_video_header;
|
||||
};
|
||||
|
||||
int64_t interval_ms() {
|
||||
@ -147,9 +146,9 @@ class EncodedImageCallbackImpl : public EncodedImageCallback {
|
||||
int CountFramesWithinTemporalLayer(int temporal_layer) {
|
||||
int frames = 0;
|
||||
for (size_t i = 0; i < frame_data_.size(); ++i) {
|
||||
EXPECT_EQ(kVideoCodecVP8, frame_data_[i].codec_specific_info.codecType);
|
||||
EXPECT_EQ(kRtpVideoVp8, frame_data_[i].rtp_video_header.codec);
|
||||
const uint8_t temporal_idx =
|
||||
frame_data_[i].codec_specific_info.codecSpecific.VP8.temporalIdx;
|
||||
frame_data_[i].rtp_video_header.codecHeader.VP8.temporalIdx;
|
||||
if (temporal_idx <= temporal_layer || temporal_idx == kNoTemporalIdx)
|
||||
frames++;
|
||||
}
|
||||
@ -159,9 +158,9 @@ class EncodedImageCallbackImpl : public EncodedImageCallback {
|
||||
size_t SumPayloadBytesWithinTemporalLayer(int temporal_layer) {
|
||||
size_t payload_size = 0;
|
||||
for (size_t i = 0; i < frame_data_.size(); ++i) {
|
||||
EXPECT_EQ(kVideoCodecVP8, frame_data_[i].codec_specific_info.codecType);
|
||||
EXPECT_EQ(kRtpVideoVp8, frame_data_[i].rtp_video_header.codec);
|
||||
const uint8_t temporal_idx =
|
||||
frame_data_[i].codec_specific_info.codecSpecific.VP8.temporalIdx;
|
||||
frame_data_[i].rtp_video_header.codecHeader.VP8.temporalIdx;
|
||||
if (temporal_idx <= temporal_layer || temporal_idx == kNoTemporalIdx)
|
||||
payload_size += frame_data_[i].payload_size;
|
||||
}
|
||||
@ -177,11 +176,12 @@ class TestVideoSender : public ::testing::Test {
|
||||
protected:
|
||||
// Note: simulated clock starts at 1 seconds, since parts of webrtc use 0 as
|
||||
// a special case (e.g. frame rate in media optimization).
|
||||
TestVideoSender() : clock_(1000), encoded_frame_callback_(&clock_) {}
|
||||
TestVideoSender() : clock_(1000), packetization_callback_(&clock_) {}
|
||||
|
||||
void SetUp() override {
|
||||
sender_.reset(
|
||||
new VideoSender(&clock_, &encoded_frame_callback_, nullptr, nullptr));
|
||||
new VideoSender(&clock_, &post_encode_callback_, nullptr, nullptr));
|
||||
EXPECT_EQ(0, sender_->RegisterTransportCallback(&packetization_callback_));
|
||||
}
|
||||
|
||||
void AddFrame() {
|
||||
@ -190,7 +190,8 @@ class TestVideoSender : public ::testing::Test {
|
||||
}
|
||||
|
||||
SimulatedClock clock_;
|
||||
EncodedImageCallbackImpl encoded_frame_callback_;
|
||||
PacketizationCallback packetization_callback_;
|
||||
MockEncodedImageCallback post_encode_callback_;
|
||||
// Used by subclassing tests, need to outlive sender_.
|
||||
std::unique_ptr<VideoEncoder> encoder_;
|
||||
std::unique_ptr<VideoSender> sender_;
|
||||
@ -414,6 +415,8 @@ class TestVideoSenderWithVp8 : public TestVideoSender {
|
||||
void InsertFrames(float framerate, float seconds) {
|
||||
for (int i = 0; i < seconds * framerate; ++i) {
|
||||
clock_.AdvanceTimeMilliseconds(1000.0f / framerate);
|
||||
EXPECT_CALL(post_encode_callback_, Encoded(_, NULL, NULL))
|
||||
.WillOnce(Return(0));
|
||||
AddFrame();
|
||||
// SetChannelParameters needs to be called frequently to propagate
|
||||
// framerate from the media optimization into the encoder.
|
||||
@ -432,10 +435,10 @@ class TestVideoSenderWithVp8 : public TestVideoSender {
|
||||
// It appears that this 5 seconds simulation is needed to allow
|
||||
// bitrate and framerate to stabilize.
|
||||
InsertFrames(framerate, short_simulation_interval);
|
||||
encoded_frame_callback_.Reset();
|
||||
packetization_callback_.Reset();
|
||||
|
||||
InsertFrames(framerate, long_simulation_interval);
|
||||
return encoded_frame_callback_.CalculateVp8StreamInfo();
|
||||
return packetization_callback_.CalculateVp8StreamInfo();
|
||||
}
|
||||
|
||||
protected:
|
||||
|
||||
Reference in New Issue
Block a user