Remove usage of webrtc::RTPFragmentationHeader from objc wrappers

Bug: webrtc:6471
Change-Id: Ibe4ce280a9f1aea53016f131d1d235337fe71a4f
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/182502
Reviewed-by: Kári Helgason <kthelgason@webrtc.org>
Reviewed-by: Karl Wiberg <kwiberg@webrtc.org>
Commit-Queue: Danil Chapovalov <danilchap@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#32022}
This commit is contained in:
Danil Chapovalov
2020-08-25 15:49:53 +02:00
committed by Commit Bot
parent c2302e8e2e
commit 090049c546
7 changed files with 13 additions and 163 deletions

View File

@ -805,8 +805,6 @@ if (is_ios || is_mac) {
sources = [
"objc/api/peerconnection/RTCEncodedImage+Private.h",
"objc/api/peerconnection/RTCEncodedImage+Private.mm",
"objc/api/peerconnection/RTCRtpFragmentationHeader+Private.h",
"objc/api/peerconnection/RTCRtpFragmentationHeader+Private.mm",
"objc/api/peerconnection/RTCVideoCodecInfo+Private.h",
"objc/api/peerconnection/RTCVideoCodecInfo+Private.mm",
"objc/api/peerconnection/RTCVideoEncoderSettings+Private.h",

View File

@ -1,27 +0,0 @@
/*
* Copyright 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#import "base/RTCRtpFragmentationHeader.h"
#include "modules/include/module_common_types.h"
NS_ASSUME_NONNULL_BEGIN
/* Interfaces for converting to/from internal C++ formats. */
@interface RTC_OBJC_TYPE (RTCRtpFragmentationHeader)
(Private)
- (instancetype)initWithNativeFragmentationHeader
: (const webrtc::RTPFragmentationHeader *__nullable)fragmentationHeader;
- (std::unique_ptr<webrtc::RTPFragmentationHeader>)createNativeFragmentationHeader;
@end
NS_ASSUME_NONNULL_END

View File

@ -1,55 +0,0 @@
/*
* Copyright 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#import "RTCRtpFragmentationHeader+Private.h"
#include "modules/include/module_common_types.h"
@implementation RTC_OBJC_TYPE (RTCRtpFragmentationHeader)
(Private)
- (instancetype)initWithNativeFragmentationHeader
: (const webrtc::RTPFragmentationHeader *)fragmentationHeader {
if (self = [super init]) {
if (fragmentationHeader) {
int count = fragmentationHeader->fragmentationVectorSize;
NSMutableArray *offsets = [NSMutableArray array];
NSMutableArray *lengths = [NSMutableArray array];
NSMutableArray *timeDiffs = [NSMutableArray array];
NSMutableArray *plTypes = [NSMutableArray array];
for (int i = 0; i < count; ++i) {
[offsets addObject:@(fragmentationHeader->fragmentationOffset[i])];
[lengths addObject:@(fragmentationHeader->fragmentationLength[i])];
[timeDiffs addObject:@(0)];
[plTypes addObject:@(0)];
}
self.fragmentationOffset = [offsets copy];
self.fragmentationLength = [lengths copy];
self.fragmentationTimeDiff = [timeDiffs copy];
self.fragmentationPlType = [plTypes copy];
}
}
return self;
}
- (std::unique_ptr<webrtc::RTPFragmentationHeader>)createNativeFragmentationHeader {
auto fragmentationHeader =
std::unique_ptr<webrtc::RTPFragmentationHeader>(new webrtc::RTPFragmentationHeader);
fragmentationHeader->VerifyAndAllocateFragmentationHeader(self.fragmentationOffset.count);
for (NSUInteger i = 0; i < self.fragmentationOffset.count; ++i) {
fragmentationHeader->fragmentationOffset[i] = (size_t)self.fragmentationOffset[i].unsignedIntValue;
fragmentationHeader->fragmentationLength[i] = (size_t)self.fragmentationLength[i].unsignedIntValue;
}
return fragmentationHeader;
}
@end

View File

@ -19,7 +19,6 @@
#endif
#import "RTCCodecSpecificInfoH264.h"
#import "RTCH264ProfileLevelId.h"
#import "api/peerconnection/RTCRtpFragmentationHeader+Private.h"
#import "api/peerconnection/RTCVideoCodecInfo+Private.h"
#import "base/RTCCodecSpecificInfo.h"
#import "base/RTCI420Buffer.h"
@ -32,7 +31,6 @@
#include "common_video/h264/h264_bitstream_parser.h"
#include "common_video/h264/profile_level_id.h"
#include "common_video/include/bitrate_adjuster.h"
#include "modules/include/module_common_types.h"
#include "modules/video_coding/include/video_error_codes.h"
#include "rtc_base/buffer.h"
#include "rtc_base/logging.h"
@ -783,16 +781,8 @@ NSUInteger GetMaxSampleRate(const webrtc::H264::ProfileLevelId &profile_level_id
}
__block std::unique_ptr<rtc::Buffer> buffer = std::make_unique<rtc::Buffer>();
RTC_OBJC_TYPE(RTCRtpFragmentationHeader) * header;
{
std::unique_ptr<webrtc::RTPFragmentationHeader> header_cpp;
bool result =
H264CMSampleBufferToAnnexBBuffer(sampleBuffer, isKeyframe, buffer.get(), &header_cpp);
header = [[RTC_OBJC_TYPE(RTCRtpFragmentationHeader) alloc]
initWithNativeFragmentationHeader:header_cpp.get()];
if (!result) {
return;
}
if (!webrtc::H264CMSampleBufferToAnnexBBuffer(sampleBuffer, isKeyframe, buffer.get())) {
return;
}
RTC_OBJC_TYPE(RTCEncodedImage) *frame = [[RTC_OBJC_TYPE(RTCEncodedImage) alloc] init];
@ -818,6 +808,8 @@ NSUInteger GetMaxSampleRate(const webrtc::H264::ProfileLevelId &profile_level_id
_h264BitstreamParser.GetLastSliceQp(&qp);
frame.qp = @(qp);
RTC_OBJC_TYPE(RTCRtpFragmentationHeader) *header =
[[RTC_OBJC_TYPE(RTCRtpFragmentationHeader) alloc] init];
BOOL res = _callback(frame, codecSpecificInfo, header);
if (!res) {
RTC_LOG(LS_ERROR) << "Encode callback failed";

View File

@ -29,14 +29,10 @@ using H264::ParseNaluType;
const char kAnnexBHeaderBytes[4] = {0, 0, 0, 1};
const size_t kAvccHeaderByteSize = sizeof(uint32_t);
bool H264CMSampleBufferToAnnexBBuffer(
CMSampleBufferRef avcc_sample_buffer,
bool is_keyframe,
rtc::Buffer* annexb_buffer,
std::unique_ptr<RTPFragmentationHeader>* out_header) {
bool H264CMSampleBufferToAnnexBBuffer(CMSampleBufferRef avcc_sample_buffer,
bool is_keyframe,
rtc::Buffer* annexb_buffer) {
RTC_DCHECK(avcc_sample_buffer);
RTC_DCHECK(out_header);
out_header->reset(nullptr);
// Get format description from the sample buffer.
CMVideoFormatDescriptionRef description =
@ -61,10 +57,6 @@ bool H264CMSampleBufferToAnnexBBuffer(
// Truncate any previous data in the buffer without changing its capacity.
annexb_buffer->SetSize(0);
size_t nalu_offset = 0;
std::vector<size_t> frag_offsets;
std::vector<size_t> frag_lengths;
// Place all parameter sets at the front of buffer.
if (is_keyframe) {
size_t param_set_size = 0;
@ -80,10 +72,6 @@ bool H264CMSampleBufferToAnnexBBuffer(
annexb_buffer->AppendData(kAnnexBHeaderBytes, sizeof(kAnnexBHeaderBytes));
annexb_buffer->AppendData(reinterpret_cast<const char*>(param_set),
param_set_size);
// Update fragmentation.
frag_offsets.push_back(nalu_offset + sizeof(kAnnexBHeaderBytes));
frag_lengths.push_back(param_set_size);
nalu_offset += sizeof(kAnnexBHeaderBytes) + param_set_size;
}
}
@ -132,10 +120,6 @@ bool H264CMSampleBufferToAnnexBBuffer(
// Update buffer.
annexb_buffer->AppendData(kAnnexBHeaderBytes, sizeof(kAnnexBHeaderBytes));
annexb_buffer->AppendData(data_ptr + nalu_header_size, packet_size);
// Update fragmentation.
frag_offsets.push_back(nalu_offset + sizeof(kAnnexBHeaderBytes));
frag_lengths.push_back(packet_size);
nalu_offset += sizeof(kAnnexBHeaderBytes) + packet_size;
size_t bytes_written = packet_size + sizeof(kAnnexBHeaderBytes);
bytes_remaining -= bytes_written;
@ -143,14 +127,6 @@ bool H264CMSampleBufferToAnnexBBuffer(
}
RTC_DCHECK_EQ(bytes_remaining, (size_t)0);
std::unique_ptr<RTPFragmentationHeader> header(new RTPFragmentationHeader());
header->VerifyAndAllocateFragmentationHeader(frag_offsets.size());
RTC_DCHECK_EQ(frag_lengths.size(), frag_offsets.size());
for (size_t i = 0; i < frag_offsets.size(); ++i) {
header->fragmentationOffset[i] = frag_offsets[i];
header->fragmentationLength[i] = frag_lengths[i];
}
*out_header = std::move(header);
CFRelease(contiguous_buffer);
return true;
}

View File

@ -18,7 +18,6 @@
#include <vector>
#include "common_video/h264/h264_common.h"
#include "modules/include/module_common_types.h"
#include "rtc_base/buffer.h"
using webrtc::H264::NaluIndex;
@ -27,13 +26,10 @@ namespace webrtc {
// Converts a sample buffer emitted from the VideoToolbox encoder into a buffer
// suitable for RTP. The sample buffer is in avcc format whereas the rtp buffer
// needs to be in Annex B format. Data is written directly to |annexb_buffer|
// and a new RTPFragmentationHeader is returned in |out_header|.
bool H264CMSampleBufferToAnnexBBuffer(
CMSampleBufferRef avcc_sample_buffer,
bool is_keyframe,
rtc::Buffer* annexb_buffer,
std::unique_ptr<RTPFragmentationHeader>* out_header);
// needs to be in Annex B format. Data is written directly to |annexb_buffer|.
bool H264CMSampleBufferToAnnexBBuffer(CMSampleBufferRef avcc_sample_buffer,
bool is_keyframe,
rtc::Buffer* annexb_buffer);
// Converts a buffer received from RTP into a sample buffer suitable for the
// VideoToolbox decoder. The RTP buffer is in annex b format whereas the sample

View File

@ -276,14 +276,12 @@ static const uint8_t SPS_PPS_BUFFER[] = {
// clang-format on
rtc::Buffer annexb_buffer(arraysize(cmsample_data));
std::unique_ptr<webrtc::RTPFragmentationHeader> out_header_ptr;
CMSampleBufferRef sample_buffer =
[self createCMSampleBufferRef:(void*)cmsample_data cmsampleSize:arraysize(cmsample_data)];
Boolean result = webrtc::H264CMSampleBufferToAnnexBBuffer(sample_buffer,
/* is_keyframe */ false,
&annexb_buffer,
&out_header_ptr);
&annexb_buffer);
XCTAssertTrue(result);
@ -293,16 +291,6 @@ static const uint8_t SPS_PPS_BUFFER[] = {
memcmp(expected_annex_b_data, annexb_buffer.data(), arraysize(expected_annex_b_data));
XCTAssertEqual(0, data_comparison_result);
webrtc::RTPFragmentationHeader* out_header = out_header_ptr.get();
XCTAssertEqual(2, (int)out_header->Size());
XCTAssertEqual(4, (int)out_header->Offset(0));
XCTAssertEqual(4, (int)out_header->Length(0));
XCTAssertEqual(12, (int)out_header->Offset(1));
XCTAssertEqual(2, (int)out_header->Length(1));
}
- (void)testH264CMSampleBufferToAnnexBBufferWithKeyframe {
@ -321,14 +309,12 @@ static const uint8_t SPS_PPS_BUFFER[] = {
// clang-format on
rtc::Buffer annexb_buffer(arraysize(cmsample_data));
std::unique_ptr<webrtc::RTPFragmentationHeader> out_header_ptr;
CMSampleBufferRef sample_buffer =
[self createCMSampleBufferRef:(void*)cmsample_data cmsampleSize:arraysize(cmsample_data)];
Boolean result = webrtc::H264CMSampleBufferToAnnexBBuffer(sample_buffer,
/* is_keyframe */ true,
&annexb_buffer,
&out_header_ptr);
&annexb_buffer);
XCTAssertTrue(result);
@ -341,22 +327,6 @@ static const uint8_t SPS_PPS_BUFFER[] = {
memcmp(expected_annex_b_data,
annexb_buffer.data() + arraysize(SPS_PPS_BUFFER),
arraysize(expected_annex_b_data)));
webrtc::RTPFragmentationHeader* out_header = out_header_ptr.get();
XCTAssertEqual(4, (int)out_header->Size());
XCTAssertEqual(4, (int)out_header->Offset(0));
XCTAssertEqual(14, (int)out_header->Length(0));
XCTAssertEqual(22, (int)out_header->Offset(1));
XCTAssertEqual(4, (int)out_header->Length(1));
XCTAssertEqual(30, (int)out_header->Offset(2));
XCTAssertEqual(4, (int)out_header->Length(2));
XCTAssertEqual(38, (int)out_header->Offset(3));
XCTAssertEqual(2, (int)out_header->Length(3));
}
- (CMVideoFormatDescriptionRef)createDescription {