Remove RTPFragmentationHeader creation and propagation through webrtc
Bug: webrtc:6471 Change-Id: I5cb1e10088aaecb5981888082b87ae9957bbaaef Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/181541 Reviewed-by: Erik Språng <sprang@webrtc.org> Commit-Queue: Danil Chapovalov <danilchap@webrtc.org> Cr-Commit-Position: refs/heads/master@{#31955}
This commit is contained in:
committed by
Commit Bot
parent
c8ac35879c
commit
2549f174b5
@ -87,19 +87,15 @@ VideoFrameType ConvertToVideoFrameType(EVideoFrameType type) {
|
||||
} // namespace
|
||||
|
||||
// Helper method used by H264EncoderImpl::Encode.
|
||||
// Copies the encoded bytes from |info| to |encoded_image| and updates the
|
||||
// fragmentation information of |frag_header|. The |encoded_image->_buffer| may
|
||||
// be deleted and reallocated if a bigger buffer is required.
|
||||
// Copies the encoded bytes from |info| to |encoded_image|. The
|
||||
// |encoded_image->_buffer| may be deleted and reallocated if a bigger buffer is
|
||||
// required.
|
||||
//
|
||||
// After OpenH264 encoding, the encoded bytes are stored in |info| spread out
|
||||
// over a number of layers and "NAL units". Each NAL unit is a fragment starting
|
||||
// with the four-byte start code {0,0,0,1}. All of this data (including the
|
||||
// start codes) is copied to the |encoded_image->_buffer| and the |frag_header|
|
||||
// is updated to point to each fragment, with offsets and lengths set as to
|
||||
// exclude the start codes.
|
||||
static void RtpFragmentize(EncodedImage* encoded_image,
|
||||
SFrameBSInfo* info,
|
||||
RTPFragmentationHeader* frag_header) {
|
||||
// start codes) is copied to the |encoded_image->_buffer|.
|
||||
static void RtpFragmentize(EncodedImage* encoded_image, SFrameBSInfo* info) {
|
||||
// Calculate minimum buffer size required to hold encoded data.
|
||||
size_t required_capacity = 0;
|
||||
size_t fragments_count = 0;
|
||||
@ -119,7 +115,6 @@ static void RtpFragmentize(EncodedImage* encoded_image,
|
||||
// Iterate layers and NAL units, note each NAL unit as a fragment and copy
|
||||
// the data to |encoded_image->_buffer|.
|
||||
const uint8_t start_code[4] = {0, 0, 0, 1};
|
||||
frag_header->VerifyAndAllocateFragmentationHeader(fragments_count);
|
||||
size_t frag = 0;
|
||||
encoded_image->set_size(0);
|
||||
for (int layer = 0; layer < info->iLayerNum; ++layer) {
|
||||
@ -134,10 +129,6 @@ static void RtpFragmentize(EncodedImage* encoded_image,
|
||||
RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len + 1], start_code[1]);
|
||||
RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len + 2], start_code[2]);
|
||||
RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len + 3], start_code[3]);
|
||||
frag_header->fragmentationOffset[frag] =
|
||||
encoded_image->size() + layer_len + sizeof(start_code);
|
||||
frag_header->fragmentationLength[frag] =
|
||||
layerInfo.pNalLengthInByte[nal] - sizeof(start_code);
|
||||
layer_len += layerInfo.pNalLengthInByte[nal];
|
||||
}
|
||||
// Copy the entire layer's data (including start codes).
|
||||
@ -485,8 +476,7 @@ int32_t H264EncoderImpl::Encode(
|
||||
|
||||
// Split encoded image up into fragments. This also updates
|
||||
// |encoded_image_|.
|
||||
RTPFragmentationHeader frag_header;
|
||||
RtpFragmentize(&encoded_images_[i], &info, &frag_header);
|
||||
RtpFragmentize(&encoded_images_[i], &info);
|
||||
|
||||
// Encoder can skip frames to save bandwidth in which case
|
||||
// |encoded_images_[i]._length| == 0.
|
||||
@ -518,7 +508,7 @@ int32_t H264EncoderImpl::Encode(
|
||||
}
|
||||
}
|
||||
encoded_image_callback_->OnEncodedImage(encoded_images_[i],
|
||||
&codec_specific, &frag_header);
|
||||
&codec_specific);
|
||||
}
|
||||
}
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
|
||||
Reference in New Issue
Block a user