Remove RTPFragmentationHeader creation and propagation through webrtc

Bug: webrtc:6471
Change-Id: I5cb1e10088aaecb5981888082b87ae9957bbaaef
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/181541
Reviewed-by: Erik Språng <sprang@webrtc.org>
Commit-Queue: Danil Chapovalov <danilchap@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#31955}
This commit is contained in:
Danil Chapovalov
2020-08-12 17:30:36 +02:00
committed by Commit Bot
parent c8ac35879c
commit 2549f174b5
41 changed files with 121 additions and 221 deletions

View File

@ -527,7 +527,7 @@ int32_t LibaomAv1Encoder::Encode(
}
}
encoded_image_callback_->OnEncodedImage(encoded_image,
&codec_specific_info, nullptr);
&codec_specific_info);
}
}

View File

@ -87,19 +87,15 @@ VideoFrameType ConvertToVideoFrameType(EVideoFrameType type) {
} // namespace
// Helper method used by H264EncoderImpl::Encode.
// Copies the encoded bytes from |info| to |encoded_image| and updates the
// fragmentation information of |frag_header|. The |encoded_image->_buffer| may
// be deleted and reallocated if a bigger buffer is required.
// Copies the encoded bytes from |info| to |encoded_image|. The
// |encoded_image->_buffer| may be deleted and reallocated if a bigger buffer is
// required.
//
// After OpenH264 encoding, the encoded bytes are stored in |info| spread out
// over a number of layers and "NAL units". Each NAL unit is a fragment starting
// with the four-byte start code {0,0,0,1}. All of this data (including the
// start codes) is copied to the |encoded_image->_buffer| and the |frag_header|
// is updated to point to each fragment, with offsets and lengths set as to
// exclude the start codes.
static void RtpFragmentize(EncodedImage* encoded_image,
SFrameBSInfo* info,
RTPFragmentationHeader* frag_header) {
// start codes) is copied to the |encoded_image->_buffer|.
static void RtpFragmentize(EncodedImage* encoded_image, SFrameBSInfo* info) {
// Calculate minimum buffer size required to hold encoded data.
size_t required_capacity = 0;
size_t fragments_count = 0;
@ -119,7 +115,6 @@ static void RtpFragmentize(EncodedImage* encoded_image,
// Iterate layers and NAL units, note each NAL unit as a fragment and copy
// the data to |encoded_image->_buffer|.
const uint8_t start_code[4] = {0, 0, 0, 1};
frag_header->VerifyAndAllocateFragmentationHeader(fragments_count);
size_t frag = 0;
encoded_image->set_size(0);
for (int layer = 0; layer < info->iLayerNum; ++layer) {
@ -134,10 +129,6 @@ static void RtpFragmentize(EncodedImage* encoded_image,
RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len + 1], start_code[1]);
RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len + 2], start_code[2]);
RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len + 3], start_code[3]);
frag_header->fragmentationOffset[frag] =
encoded_image->size() + layer_len + sizeof(start_code);
frag_header->fragmentationLength[frag] =
layerInfo.pNalLengthInByte[nal] - sizeof(start_code);
layer_len += layerInfo.pNalLengthInByte[nal];
}
// Copy the entire layer's data (including start codes).
@ -485,8 +476,7 @@ int32_t H264EncoderImpl::Encode(
// Split encoded image up into fragments. This also updates
// |encoded_image_|.
RTPFragmentationHeader frag_header;
RtpFragmentize(&encoded_images_[i], &info, &frag_header);
RtpFragmentize(&encoded_images_[i], &info);
// Encoder can skip frames to save bandwidth in which case
// |encoded_images_[i]._length| == 0.
@ -518,7 +508,7 @@ int32_t H264EncoderImpl::Encode(
}
}
encoded_image_callback_->OnEncodedImage(encoded_images_[i],
&codec_specific, &frag_header);
&codec_specific);
}
}
return WEBRTC_VIDEO_CODEC_OK;

View File

@ -72,7 +72,7 @@ class H264EncoderImpl : public H264Encoder {
EncodedImageCallback* callback) override;
void SetRates(const RateControlParameters& parameters) override;
// The result of encoding - an EncodedImage and RTPFragmentationHeader - are
// The result of encoding - an EncodedImage and CodecSpecificInfo - are
// passed to the encode complete callback.
int32_t Encode(const VideoFrame& frame,
const std::vector<VideoFrameType>* frame_types) override;

View File

@ -57,8 +57,7 @@ class MultiplexEncoderAdapter : public VideoEncoder {
EncodedImageCallback::Result OnEncodedImage(
AlphaCodecStream stream_idx,
const EncodedImage& encodedImage,
const CodecSpecificInfo* codecSpecificInfo,
const RTPFragmentationHeader* fragmentation);
const CodecSpecificInfo* codecSpecificInfo);
private:
// Wrapper class that redirects OnEncodedImage() calls.

View File

@ -17,7 +17,6 @@
#include "common_video/include/video_frame_buffer.h"
#include "common_video/libyuv/include/webrtc_libyuv.h"
#include "media/base/video_common.h"
#include "modules/include/module_common_types.h"
#include "modules/video_coding/codecs/multiplex/include/augmented_video_frame_buffer.h"
#include "rtc_base/keep_ref_until_done.h"
#include "rtc_base/logging.h"
@ -35,12 +34,11 @@ class MultiplexEncoderAdapter::AdapterEncodedImageCallback
EncodedImageCallback::Result OnEncodedImage(
const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) override {
const CodecSpecificInfo* codec_specific_info) override {
if (!adapter_)
return Result(Result::OK);
return adapter_->OnEncodedImage(stream_idx_, encoded_image,
codec_specific_info, fragmentation);
codec_specific_info);
}
private:
@ -286,8 +284,7 @@ VideoEncoder::EncoderInfo MultiplexEncoderAdapter::GetEncoderInfo() const {
EncodedImageCallback::Result MultiplexEncoderAdapter::OnEncodedImage(
AlphaCodecStream stream_idx,
const EncodedImage& encodedImage,
const CodecSpecificInfo* codecSpecificInfo,
const RTPFragmentationHeader* fragmentation) {
const CodecSpecificInfo* codecSpecificInfo) {
// Save the image
MultiplexImageComponent image_component;
image_component.component_index = stream_idx;
@ -324,8 +321,7 @@ EncodedImageCallback::Result MultiplexEncoderAdapter::OnEncodedImage(
CodecSpecificInfo codec_info = *codecSpecificInfo;
codec_info.codecType = kVideoCodecMultiplex;
encoded_complete_callback_->OnEncodedImage(combined_image_, &codec_info,
fragmentation);
encoded_complete_callback_->OnEncodedImage(combined_image_, &codec_info);
}
stashed_images_.erase(stashed_images_.begin(), stashed_image_next_itr);

View File

@ -33,10 +33,8 @@ class EncoderCallback : public EncodedImageCallback {
: output_frames_(output_frames) {}
private:
Result OnEncodedImage(
const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* /*fragmentation*/) override {
Result OnEncodedImage(const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info) override {
output_frames_.push_back({encoded_image, *codec_specific_info});
return Result(Result::Error::OK);
}

View File

@ -35,8 +35,7 @@ const VideoEncoder::Capabilities kCapabilities(false);
EncodedImageCallback::Result
VideoCodecUnitTest::FakeEncodeCompleteCallback::OnEncodedImage(
const EncodedImage& frame,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) {
const CodecSpecificInfo* codec_specific_info) {
MutexLock lock(&test_->encoded_frame_section_);
test_->encoded_frames_.push_back(frame);
RTC_DCHECK(codec_specific_info);

View File

@ -42,8 +42,7 @@ class VideoCodecUnitTest : public ::testing::Test {
: test_(test) {}
Result OnEncodedImage(const EncodedImage& frame,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation);
const CodecSpecificInfo* codec_specific_info);
private:
VideoCodecUnitTest* const test_;

View File

@ -91,8 +91,7 @@ class VideoProcessor {
Result OnEncodedImage(
const webrtc::EncodedImage& encoded_image,
const webrtc::CodecSpecificInfo* codec_specific_info,
const webrtc::RTPFragmentationHeader* fragmentation) override {
const webrtc::CodecSpecificInfo* codec_specific_info) override {
RTC_CHECK(codec_specific_info);
// Post the callback to the right task queue, if needed.

View File

@ -1214,7 +1214,7 @@ int LibvpxVp8Encoder::GetEncodedPartitions(const VideoFrame& input_image,
&qp_128);
encoded_images_[encoder_idx].qp_ = qp_128;
encoded_complete_callback_->OnEncodedImage(encoded_images_[encoder_idx],
&codec_specific, nullptr);
&codec_specific);
const size_t steady_state_size = SteadyStateSize(
stream_idx, codec_specific.codecSpecific.VP8.temporalIdx);
if (qp_128 > variable_framerate_experiment_.steady_state_qp ||

View File

@ -1525,15 +1525,8 @@ void VP9EncoderImpl::DeliverBufferedFrame(bool end_of_picture) {
codec_specific_.codecSpecific.VP9.end_of_picture = end_of_picture;
// No data partitioning in VP9, so 1 partition only.
int part_idx = 0;
RTPFragmentationHeader frag_info;
frag_info.VerifyAndAllocateFragmentationHeader(1);
frag_info.fragmentationOffset[part_idx] = 0;
frag_info.fragmentationLength[part_idx] = encoded_image_.size();
encoded_complete_callback_->OnEncodedImage(encoded_image_, &codec_specific_,
&frag_info);
encoded_complete_callback_->OnEncodedImage(encoded_image_,
&codec_specific_);
if (codec_.mode == VideoCodecMode::kScreensharing) {
const uint8_t spatial_idx = encoded_image_.SpatialIndex().value_or(0);