ObjC: Implement HW codecs in ObjC instead of C++

The current ObjC HW encoder is implemented as a C++
webrtc::VideoEncoder. We then wrap it two times in the following way:
webrtc::VideoEncoder -> RTCVideoEncoder -> webrtc::VideoEncoder.
This was originally done to minimize the code diff when landing the
injectable encoder.

This CL removes the first wrapping and implements the ObjC HW encoder
as a RTCVideoEncoder directly. Similarly, the decoder is implemented
as a RTCVideoDecoder directly.

Based on andersc@ CL: https://codereview.webrtc.org/2978623002/.

BUG=webrtc:7924

Review-Url: https://codereview.webrtc.org/2987413002
Cr-Commit-Position: refs/heads/master@{#19255}
This commit is contained in:
magjed
2017-08-07 06:55:28 -07:00
committed by Commit Bot
parent bea36fdee8
commit 73c0eb5014
23 changed files with 1227 additions and 1480 deletions

View File

@ -0,0 +1,252 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*
*/
#import "WebRTC/RTCVideoCodecH264.h"
#import <VideoToolbox/VideoToolbox.h>
#include "webrtc/modules/video_coding/include/video_error_codes.h"
#include "webrtc/rtc_base/checks.h"
#include "webrtc/rtc_base/logging.h"
#include "webrtc/rtc_base/timeutils.h"
#include "webrtc/sdk/objc/Framework/Classes/VideoToolbox/nalu_rewriter.h"
#import "WebRTC/RTCVideoFrame.h"
#import "WebRTC/RTCVideoFrameBuffer.h"
#import "helpers.h"
#if defined(WEBRTC_IOS)
#import "Common/RTCUIApplicationStatusObserver.h"
#endif
// Struct that we pass to the decoder per frame to decode. We receive it again
// in the decoder callback.
struct RTCFrameDecodeParams {
RTCFrameDecodeParams(RTCVideoDecoderCallback cb, int64_t ts) : callback(cb), timestamp(ts) {}
RTCVideoDecoderCallback callback;
int64_t timestamp;
};
// This is the callback function that VideoToolbox calls when decode is
// complete.
void decompressionOutputCallback(void *decoder,
void *params,
OSStatus status,
VTDecodeInfoFlags infoFlags,
CVImageBufferRef imageBuffer,
CMTime timestamp,
CMTime duration) {
std::unique_ptr<RTCFrameDecodeParams> decodeParams(
reinterpret_cast<RTCFrameDecodeParams *>(params));
if (status != noErr) {
LOG(LS_ERROR) << "Failed to decode frame. Status: " << status;
return;
}
// TODO(tkchin): Handle CVO properly.
RTCCVPixelBuffer *frameBuffer = [[RTCCVPixelBuffer alloc] initWithPixelBuffer:imageBuffer];
RTCVideoFrame *decodedFrame =
[[RTCVideoFrame alloc] initWithBuffer:frameBuffer
rotation:RTCVideoRotation_0
timeStampNs:CMTimeGetSeconds(timestamp) * rtc::kNumNanosecsPerSec];
decodedFrame.timeStamp = decodeParams->timestamp;
decodeParams->callback(decodedFrame);
}
// Decoder.
@implementation RTCVideoDecoderH264 {
CMVideoFormatDescriptionRef _videoFormat;
VTDecompressionSessionRef _decompressionSession;
RTCVideoDecoderCallback _callback;
}
- (void)dealloc {
[self destroyDecompressionSession];
[self setVideoFormat:nullptr];
}
- (NSInteger)startDecodeWithSettings:(RTCVideoEncoderSettings *)settings
numberOfCores:(int)numberOfCores {
return WEBRTC_VIDEO_CODEC_OK;
}
- (NSInteger)decode:(RTCEncodedImage *)inputImage
missingFrames:(BOOL)missingFrames
fragmentationHeader:(RTCRtpFragmentationHeader *)fragmentationHeader
codecSpecificInfo:(__nullable id<RTCCodecSpecificInfo>)info
renderTimeMs:(int64_t)renderTimeMs {
RTC_DCHECK(inputImage.buffer);
#if defined(WEBRTC_IOS)
if (![[RTCUIApplicationStatusObserver sharedInstance] isApplicationActive]) {
// Ignore all decode requests when app isn't active. In this state, the
// hardware decoder has been invalidated by the OS.
// Reset video format so that we won't process frames until the next
// keyframe.
[self setVideoFormat:nullptr];
return WEBRTC_VIDEO_CODEC_NO_OUTPUT;
}
#endif
CMVideoFormatDescriptionRef inputFormat = nullptr;
if (webrtc::H264AnnexBBufferHasVideoFormatDescription((uint8_t *)inputImage.buffer.bytes,
inputImage.buffer.length)) {
inputFormat = webrtc::CreateVideoFormatDescription((uint8_t *)inputImage.buffer.bytes,
inputImage.buffer.length);
if (inputFormat) {
// Check if the video format has changed, and reinitialize decoder if
// needed.
if (!CMFormatDescriptionEqual(inputFormat, _videoFormat)) {
[self setVideoFormat:inputFormat];
[self resetDecompressionSession];
}
CFRelease(inputFormat);
}
}
if (!_videoFormat) {
// We received a frame but we don't have format information so we can't
// decode it.
// This can happen after backgrounding. We need to wait for the next
// sps/pps before we can resume so we request a keyframe by returning an
// error.
LOG(LS_WARNING) << "Missing video format. Frame with sps/pps required.";
return WEBRTC_VIDEO_CODEC_ERROR;
}
CMSampleBufferRef sampleBuffer = nullptr;
if (!webrtc::H264AnnexBBufferToCMSampleBuffer((uint8_t *)inputImage.buffer.bytes,
inputImage.buffer.length,
_videoFormat,
&sampleBuffer)) {
return WEBRTC_VIDEO_CODEC_ERROR;
}
RTC_DCHECK(sampleBuffer);
VTDecodeFrameFlags decodeFlags = kVTDecodeFrame_EnableAsynchronousDecompression;
std::unique_ptr<RTCFrameDecodeParams> frameDecodeParams;
frameDecodeParams.reset(new RTCFrameDecodeParams(_callback, inputImage.timeStamp));
OSStatus status = VTDecompressionSessionDecodeFrame(
_decompressionSession, sampleBuffer, decodeFlags, frameDecodeParams.release(), nullptr);
#if defined(WEBRTC_IOS)
// Re-initialize the decoder if we have an invalid session while the app is
// active and retry the decode request.
if (status == kVTInvalidSessionErr && [self resetDecompressionSession] == WEBRTC_VIDEO_CODEC_OK) {
frameDecodeParams.reset(new RTCFrameDecodeParams(_callback, inputImage.timeStamp));
status = VTDecompressionSessionDecodeFrame(
_decompressionSession, sampleBuffer, decodeFlags, frameDecodeParams.release(), nullptr);
}
#endif
CFRelease(sampleBuffer);
if (status != noErr) {
LOG(LS_ERROR) << "Failed to decode frame with code: " << status;
return WEBRTC_VIDEO_CODEC_ERROR;
}
return WEBRTC_VIDEO_CODEC_OK;
}
- (void)setCallback:(RTCVideoDecoderCallback)callback {
_callback = callback;
}
- (NSInteger)releaseDecoder {
// Need to invalidate the session so that callbacks no longer occur and it
// is safe to null out the callback.
[self destroyDecompressionSession];
[self setVideoFormat:nullptr];
_callback = nullptr;
return WEBRTC_VIDEO_CODEC_OK;
}
#pragma mark - Private
- (int)resetDecompressionSession {
[self destroyDecompressionSession];
// Need to wait for the first SPS to initialize decoder.
if (!_videoFormat) {
return WEBRTC_VIDEO_CODEC_OK;
}
// Set keys for OpenGL and IOSurface compatibilty, which makes the encoder
// create pixel buffers with GPU backed memory. The intent here is to pass
// the pixel buffers directly so we avoid a texture upload later during
// rendering. This currently is moot because we are converting back to an
// I420 frame after decode, but eventually we will be able to plumb
// CVPixelBuffers directly to the renderer.
// TODO(tkchin): Maybe only set OpenGL/IOSurface keys if we know that that
// we can pass CVPixelBuffers as native handles in decoder output.
static size_t const attributesSize = 3;
CFTypeRef keys[attributesSize] = {
#if defined(WEBRTC_IOS)
kCVPixelBufferOpenGLESCompatibilityKey,
#elif defined(WEBRTC_MAC)
kCVPixelBufferOpenGLCompatibilityKey,
#endif
kCVPixelBufferIOSurfacePropertiesKey,
kCVPixelBufferPixelFormatTypeKey
};
CFDictionaryRef ioSurfaceValue = CreateCFTypeDictionary(nullptr, nullptr, 0);
int64_t nv12type = kCVPixelFormatType_420YpCbCr8BiPlanarFullRange;
CFNumberRef pixelFormat = CFNumberCreate(nullptr, kCFNumberLongType, &nv12type);
CFTypeRef values[attributesSize] = {kCFBooleanTrue, ioSurfaceValue, pixelFormat};
CFDictionaryRef attributes = CreateCFTypeDictionary(keys, values, attributesSize);
if (ioSurfaceValue) {
CFRelease(ioSurfaceValue);
ioSurfaceValue = nullptr;
}
if (pixelFormat) {
CFRelease(pixelFormat);
pixelFormat = nullptr;
}
VTDecompressionOutputCallbackRecord record = {
decompressionOutputCallback, nullptr,
};
OSStatus status = VTDecompressionSessionCreate(
nullptr, _videoFormat, nullptr, attributes, &record, &_decompressionSession);
CFRelease(attributes);
if (status != noErr) {
[self destroyDecompressionSession];
return WEBRTC_VIDEO_CODEC_ERROR;
}
[self configureDecompressionSession];
return WEBRTC_VIDEO_CODEC_OK;
}
- (void)configureDecompressionSession {
RTC_DCHECK(_decompressionSession);
#if defined(WEBRTC_IOS)
VTSessionSetProperty(_decompressionSession, kVTDecompressionPropertyKey_RealTime, kCFBooleanTrue);
#endif
}
- (void)destroyDecompressionSession {
if (_decompressionSession) {
VTDecompressionSessionInvalidate(_decompressionSession);
CFRelease(_decompressionSession);
_decompressionSession = nullptr;
}
}
- (void)setVideoFormat:(CMVideoFormatDescriptionRef)videoFormat {
if (_videoFormat == videoFormat) {
return;
}
if (_videoFormat) {
CFRelease(_videoFormat);
}
_videoFormat = videoFormat;
if (_videoFormat) {
CFRetain(_videoFormat);
}
}
- (NSString *)implementationName {
return @"VideoToolbox";
}
@end

View File

@ -0,0 +1,680 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*
*/
#import "WebRTC/RTCVideoCodecH264.h"
#import <VideoToolbox/VideoToolbox.h>
#include <vector>
#if defined(WEBRTC_IOS)
#import "Common/RTCUIApplicationStatusObserver.h"
#import "WebRTC/UIDevice+RTCDevice.h"
#endif
#import "PeerConnection/RTCVideoCodec+Private.h"
#import "WebRTC/RTCVideoCodec.h"
#import "WebRTC/RTCVideoFrame.h"
#import "WebRTC/RTCVideoFrameBuffer.h"
#import "helpers.h"
#include "libyuv/convert_from.h"
#include "webrtc/common_video/h264/h264_bitstream_parser.h"
#include "webrtc/common_video/h264/profile_level_id.h"
#include "webrtc/common_video/include/bitrate_adjuster.h"
#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/modules/video_coding/include/video_error_codes.h"
#include "webrtc/rtc_base/buffer.h"
#include "webrtc/rtc_base/logging.h"
#include "webrtc/rtc_base/timeutils.h"
#include "webrtc/sdk/objc/Framework/Classes/VideoToolbox/nalu_rewriter.h"
#include "webrtc/system_wrappers/include/clock.h"
@interface RTCVideoEncoderH264 ()
- (void)frameWasEncoded:(OSStatus)status
flags:(VTEncodeInfoFlags)infoFlags
sampleBuffer:(CMSampleBufferRef)sampleBuffer
codecSpecificInfo:(id<RTCCodecSpecificInfo>)codecSpecificInfo
width:(int32_t)width
height:(int32_t)height
renderTimeMs:(int64_t)renderTimeMs
timestamp:(uint32_t)timestamp
rotation:(RTCVideoRotation)rotation;
@end
// The ratio between kVTCompressionPropertyKey_DataRateLimits and
// kVTCompressionPropertyKey_AverageBitRate. The data rate limit is set higher
// than the average bit rate to avoid undershooting the target.
const float kLimitToAverageBitRateFactor = 1.5f;
// These thresholds deviate from the default h264 QP thresholds, as they
// have been found to work better on devices that support VideoToolbox
const int kLowH264QpThreshold = 28;
const int kHighH264QpThreshold = 39;
// Struct that we pass to the encoder per frame to encode. We receive it again
// in the encoder callback.
struct RTCFrameEncodeParams {
RTCFrameEncodeParams(RTCVideoEncoderH264 *e,
RTCCodecSpecificInfoH264 *csi,
int32_t w,
int32_t h,
int64_t rtms,
uint32_t ts,
RTCVideoRotation r)
: encoder(e), width(w), height(h), render_time_ms(rtms), timestamp(ts), rotation(r) {
if (csi) {
codecSpecificInfo = csi;
} else {
codecSpecificInfo = [[RTCCodecSpecificInfoH264 alloc] init];
}
}
RTCVideoEncoderH264 *encoder;
RTCCodecSpecificInfoH264 *codecSpecificInfo;
int32_t width;
int32_t height;
int64_t render_time_ms;
uint32_t timestamp;
RTCVideoRotation rotation;
};
// We receive I420Frames as input, but we need to feed CVPixelBuffers into the
// encoder. This performs the copy and format conversion.
// TODO(tkchin): See if encoder will accept i420 frames and compare performance.
bool CopyVideoFrameToPixelBuffer(id<RTCI420Buffer> frameBuffer, CVPixelBufferRef pixelBuffer) {
RTC_DCHECK(pixelBuffer);
RTC_DCHECK_EQ(CVPixelBufferGetPixelFormatType(pixelBuffer),
kCVPixelFormatType_420YpCbCr8BiPlanarFullRange);
RTC_DCHECK_EQ(CVPixelBufferGetHeightOfPlane(pixelBuffer, 0), frameBuffer.height);
RTC_DCHECK_EQ(CVPixelBufferGetWidthOfPlane(pixelBuffer, 0), frameBuffer.width);
CVReturn cvRet = CVPixelBufferLockBaseAddress(pixelBuffer, 0);
if (cvRet != kCVReturnSuccess) {
LOG(LS_ERROR) << "Failed to lock base address: " << cvRet;
return false;
}
uint8_t *dstY = reinterpret_cast<uint8_t *>(CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 0));
int dstStrideY = CVPixelBufferGetBytesPerRowOfPlane(pixelBuffer, 0);
uint8_t *dstUV = reinterpret_cast<uint8_t *>(CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 1));
int dstStrideUV = CVPixelBufferGetBytesPerRowOfPlane(pixelBuffer, 1);
// Convert I420 to NV12.
int ret = libyuv::I420ToNV12(frameBuffer.dataY,
frameBuffer.strideY,
frameBuffer.dataU,
frameBuffer.strideU,
frameBuffer.dataV,
frameBuffer.strideV,
dstY,
dstStrideY,
dstUV,
dstStrideUV,
frameBuffer.width,
frameBuffer.height);
CVPixelBufferUnlockBaseAddress(pixelBuffer, 0);
if (ret) {
LOG(LS_ERROR) << "Error converting I420 VideoFrame to NV12 :" << ret;
return false;
}
return true;
}
CVPixelBufferRef CreatePixelBuffer(CVPixelBufferPoolRef pixel_buffer_pool) {
if (!pixel_buffer_pool) {
LOG(LS_ERROR) << "Failed to get pixel buffer pool.";
return nullptr;
}
CVPixelBufferRef pixel_buffer;
CVReturn ret = CVPixelBufferPoolCreatePixelBuffer(nullptr, pixel_buffer_pool, &pixel_buffer);
if (ret != kCVReturnSuccess) {
LOG(LS_ERROR) << "Failed to create pixel buffer: " << ret;
// We probably want to drop frames here, since failure probably means
// that the pool is empty.
return nullptr;
}
return pixel_buffer;
}
// This is the callback function that VideoToolbox calls when encode is
// complete. From inspection this happens on its own queue.
void compressionOutputCallback(void *encoder,
void *params,
OSStatus status,
VTEncodeInfoFlags infoFlags,
CMSampleBufferRef sampleBuffer) {
std::unique_ptr<RTCFrameEncodeParams> encodeParams(
reinterpret_cast<RTCFrameEncodeParams *>(params));
[encodeParams->encoder frameWasEncoded:status
flags:infoFlags
sampleBuffer:sampleBuffer
codecSpecificInfo:encodeParams->codecSpecificInfo
width:encodeParams->width
height:encodeParams->height
renderTimeMs:encodeParams->render_time_ms
timestamp:encodeParams->timestamp
rotation:encodeParams->rotation];
}
// Extract VideoToolbox profile out of the cricket::VideoCodec. If there is no
// specific VideoToolbox profile for the specified level, AutoLevel will be
// returned. The user must initialize the encoder with a resolution and
// framerate conforming to the selected H264 level regardless.
CFStringRef ExtractProfile(const cricket::VideoCodec &codec) {
const rtc::Optional<webrtc::H264::ProfileLevelId> profile_level_id =
webrtc::H264::ParseSdpProfileLevelId(codec.params);
RTC_DCHECK(profile_level_id);
switch (profile_level_id->profile) {
case webrtc::H264::kProfileConstrainedBaseline:
case webrtc::H264::kProfileBaseline:
switch (profile_level_id->level) {
case webrtc::H264::kLevel3:
return kVTProfileLevel_H264_Baseline_3_0;
case webrtc::H264::kLevel3_1:
return kVTProfileLevel_H264_Baseline_3_1;
case webrtc::H264::kLevel3_2:
return kVTProfileLevel_H264_Baseline_3_2;
case webrtc::H264::kLevel4:
return kVTProfileLevel_H264_Baseline_4_0;
case webrtc::H264::kLevel4_1:
return kVTProfileLevel_H264_Baseline_4_1;
case webrtc::H264::kLevel4_2:
return kVTProfileLevel_H264_Baseline_4_2;
case webrtc::H264::kLevel5:
return kVTProfileLevel_H264_Baseline_5_0;
case webrtc::H264::kLevel5_1:
return kVTProfileLevel_H264_Baseline_5_1;
case webrtc::H264::kLevel5_2:
return kVTProfileLevel_H264_Baseline_5_2;
case webrtc::H264::kLevel1:
case webrtc::H264::kLevel1_b:
case webrtc::H264::kLevel1_1:
case webrtc::H264::kLevel1_2:
case webrtc::H264::kLevel1_3:
case webrtc::H264::kLevel2:
case webrtc::H264::kLevel2_1:
case webrtc::H264::kLevel2_2:
return kVTProfileLevel_H264_Baseline_AutoLevel;
}
case webrtc::H264::kProfileMain:
switch (profile_level_id->level) {
case webrtc::H264::kLevel3:
return kVTProfileLevel_H264_Main_3_0;
case webrtc::H264::kLevel3_1:
return kVTProfileLevel_H264_Main_3_1;
case webrtc::H264::kLevel3_2:
return kVTProfileLevel_H264_Main_3_2;
case webrtc::H264::kLevel4:
return kVTProfileLevel_H264_Main_4_0;
case webrtc::H264::kLevel4_1:
return kVTProfileLevel_H264_Main_4_1;
case webrtc::H264::kLevel4_2:
return kVTProfileLevel_H264_Main_4_2;
case webrtc::H264::kLevel5:
return kVTProfileLevel_H264_Main_5_0;
case webrtc::H264::kLevel5_1:
return kVTProfileLevel_H264_Main_5_1;
case webrtc::H264::kLevel5_2:
return kVTProfileLevel_H264_Main_5_2;
case webrtc::H264::kLevel1:
case webrtc::H264::kLevel1_b:
case webrtc::H264::kLevel1_1:
case webrtc::H264::kLevel1_2:
case webrtc::H264::kLevel1_3:
case webrtc::H264::kLevel2:
case webrtc::H264::kLevel2_1:
case webrtc::H264::kLevel2_2:
return kVTProfileLevel_H264_Main_AutoLevel;
}
case webrtc::H264::kProfileConstrainedHigh:
case webrtc::H264::kProfileHigh:
switch (profile_level_id->level) {
case webrtc::H264::kLevel3:
return kVTProfileLevel_H264_High_3_0;
case webrtc::H264::kLevel3_1:
return kVTProfileLevel_H264_High_3_1;
case webrtc::H264::kLevel3_2:
return kVTProfileLevel_H264_High_3_2;
case webrtc::H264::kLevel4:
return kVTProfileLevel_H264_High_4_0;
case webrtc::H264::kLevel4_1:
return kVTProfileLevel_H264_High_4_1;
case webrtc::H264::kLevel4_2:
return kVTProfileLevel_H264_High_4_2;
case webrtc::H264::kLevel5:
return kVTProfileLevel_H264_High_5_0;
case webrtc::H264::kLevel5_1:
return kVTProfileLevel_H264_High_5_1;
case webrtc::H264::kLevel5_2:
return kVTProfileLevel_H264_High_5_2;
case webrtc::H264::kLevel1:
case webrtc::H264::kLevel1_b:
case webrtc::H264::kLevel1_1:
case webrtc::H264::kLevel1_2:
case webrtc::H264::kLevel1_3:
case webrtc::H264::kLevel2:
case webrtc::H264::kLevel2_1:
case webrtc::H264::kLevel2_2:
return kVTProfileLevel_H264_High_AutoLevel;
}
}
}
@implementation RTCVideoEncoderH264 {
RTCVideoCodecInfo *_codecInfo;
webrtc::BitrateAdjuster *_bitrateAdjuster;
uint32_t _targetBitrateBps;
uint32_t _encoderBitrateBps;
RTCH264PacketizationMode _packetizationMode;
CFStringRef _profile;
RTCVideoEncoderCallback _callback;
int32_t _width;
int32_t _height;
VTCompressionSessionRef _compressionSession;
RTCVideoCodecMode _mode;
webrtc::H264BitstreamParser _h264BitstreamParser;
std::vector<uint8_t> _nv12ScaleBuffer;
}
// .5 is set as a mininum to prevent overcompensating for large temporary
// overshoots. We don't want to degrade video quality too badly.
// .95 is set to prevent oscillations. When a lower bitrate is set on the
// encoder than previously set, its output seems to have a brief period of
// drastically reduced bitrate, so we want to avoid that. In steady state
// conditions, 0.95 seems to give us better overall bitrate over long periods
// of time.
- (instancetype)initWithCodecInfo:(RTCVideoCodecInfo *)codecInfo {
if (self = [super init]) {
_codecInfo = codecInfo;
_bitrateAdjuster = new webrtc::BitrateAdjuster(webrtc::Clock::GetRealTimeClock(), .5, .95);
_packetizationMode = RTCH264PacketizationModeNonInterleaved;
_profile = ExtractProfile([codecInfo nativeVideoCodec]);
LOG(LS_INFO) << "Using profile " << CFStringToString(_profile);
RTC_CHECK([codecInfo.name isEqualToString:@"H264"]);
}
return self;
}
- (void)dealloc {
[self destroyCompressionSession];
}
- (NSInteger)startEncodeWithSettings:(RTCVideoEncoderSettings *)settings
numberOfCores:(int)numberOfCores {
RTC_DCHECK(settings);
RTC_DCHECK([settings.name isEqualToString:@"H264"]);
_width = settings.width;
_height = settings.height;
_mode = settings.mode;
// We can only set average bitrate on the HW encoder.
_targetBitrateBps = settings.startBitrate;
_bitrateAdjuster->SetTargetBitrateBps(_targetBitrateBps);
// TODO(tkchin): Try setting payload size via
// kVTCompressionPropertyKey_MaxH264SliceBytes.
return [self resetCompressionSession];
}
- (NSInteger)encode:(RTCVideoFrame *)frame
codecSpecificInfo:(id<RTCCodecSpecificInfo>)codecSpecificInfo
frameTypes:(NSArray<NSNumber *> *)frameTypes {
RTC_DCHECK_EQ(frame.width, _width);
RTC_DCHECK_EQ(frame.height, _height);
if (!_callback || !_compressionSession) {
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
#if defined(WEBRTC_IOS)
if (![[RTCUIApplicationStatusObserver sharedInstance] isApplicationActive]) {
// Ignore all encode requests when app isn't active. In this state, the
// hardware encoder has been invalidated by the OS.
return WEBRTC_VIDEO_CODEC_OK;
}
#endif
BOOL isKeyframeRequired = NO;
// Get a pixel buffer from the pool and copy frame data over.
CVPixelBufferPoolRef pixelBufferPool =
VTCompressionSessionGetPixelBufferPool(_compressionSession);
#if defined(WEBRTC_IOS)
if (!pixelBufferPool) {
// Kind of a hack. On backgrounding, the compression session seems to get
// invalidated, which causes this pool call to fail when the application
// is foregrounded and frames are being sent for encoding again.
// Resetting the session when this happens fixes the issue.
// In addition we request a keyframe so video can recover quickly.
[self resetCompressionSession];
pixelBufferPool = VTCompressionSessionGetPixelBufferPool(_compressionSession);
isKeyframeRequired = YES;
LOG(LS_INFO) << "Resetting compression session due to invalid pool.";
}
#endif
CVPixelBufferRef pixelBuffer = nullptr;
if ([frame.buffer isKindOfClass:[RTCCVPixelBuffer class]]) {
// Native frame buffer
RTCCVPixelBuffer *rtcPixelBuffer = (RTCCVPixelBuffer *)frame.buffer;
if (![rtcPixelBuffer requiresCropping]) {
// This pixel buffer might have a higher resolution than what the
// compression session is configured to. The compression session can
// handle that and will output encoded frames in the configured
// resolution regardless of the input pixel buffer resolution.
pixelBuffer = rtcPixelBuffer.pixelBuffer;
CVBufferRetain(pixelBuffer);
} else {
// Cropping required, we need to crop and scale to a new pixel buffer.
pixelBuffer = CreatePixelBuffer(pixelBufferPool);
if (!pixelBuffer) {
return WEBRTC_VIDEO_CODEC_ERROR;
}
int dstWidth = CVPixelBufferGetWidth(pixelBuffer);
int dstHeight = CVPixelBufferGetHeight(pixelBuffer);
if ([rtcPixelBuffer requiresScalingToWidth:dstWidth height:dstHeight]) {
int size =
[rtcPixelBuffer bufferSizeForCroppingAndScalingToWidth:dstWidth height:dstHeight];
_nv12ScaleBuffer.resize(size);
} else {
_nv12ScaleBuffer.clear();
}
_nv12ScaleBuffer.shrink_to_fit();
if (![rtcPixelBuffer cropAndScaleTo:pixelBuffer withTempBuffer:_nv12ScaleBuffer.data()]) {
return WEBRTC_VIDEO_CODEC_ERROR;
}
}
}
if (!pixelBuffer) {
// We did not have a native frame buffer
pixelBuffer = CreatePixelBuffer(pixelBufferPool);
if (!pixelBuffer) {
return WEBRTC_VIDEO_CODEC_ERROR;
}
RTC_DCHECK(pixelBuffer);
if (!CopyVideoFrameToPixelBuffer([frame.buffer toI420], pixelBuffer)) {
LOG(LS_ERROR) << "Failed to copy frame data.";
CVBufferRelease(pixelBuffer);
return WEBRTC_VIDEO_CODEC_ERROR;
}
}
// Check if we need a keyframe.
if (!isKeyframeRequired && frameTypes) {
for (NSNumber *frameType in frameTypes) {
if ((RTCFrameType)frameType.intValue == RTCFrameTypeVideoFrameKey) {
isKeyframeRequired = YES;
break;
}
}
}
CMTime presentationTimeStamp = CMTimeMake(frame.timeStampNs / rtc::kNumNanosecsPerMillisec, 1000);
CFDictionaryRef frameProperties = nullptr;
if (isKeyframeRequired) {
CFTypeRef keys[] = {kVTEncodeFrameOptionKey_ForceKeyFrame};
CFTypeRef values[] = {kCFBooleanTrue};
frameProperties = CreateCFTypeDictionary(keys, values, 1);
}
std::unique_ptr<RTCFrameEncodeParams> encodeParams;
encodeParams.reset(new RTCFrameEncodeParams(self,
codecSpecificInfo,
_width,
_height,
frame.timeStampNs / rtc::kNumNanosecsPerMillisec,
frame.timeStamp,
frame.rotation));
encodeParams->codecSpecificInfo.packetizationMode = _packetizationMode;
// Update the bitrate if needed.
[self setBitrateBps:_bitrateAdjuster->GetAdjustedBitrateBps()];
OSStatus status = VTCompressionSessionEncodeFrame(_compressionSession,
pixelBuffer,
presentationTimeStamp,
kCMTimeInvalid,
frameProperties,
encodeParams.release(),
nullptr);
if (frameProperties) {
CFRelease(frameProperties);
}
if (pixelBuffer) {
CVBufferRelease(pixelBuffer);
}
if (status != noErr) {
LOG(LS_ERROR) << "Failed to encode frame with code: " << status;
return WEBRTC_VIDEO_CODEC_ERROR;
}
return WEBRTC_VIDEO_CODEC_OK;
}
- (void)setCallback:(RTCVideoEncoderCallback)callback {
_callback = callback;
}
- (int)setBitrate:(uint32_t)bitrateKbit framerate:(uint32_t)framerate {
_targetBitrateBps = 1000 * bitrateKbit;
_bitrateAdjuster->SetTargetBitrateBps(_targetBitrateBps);
[self setBitrateBps:_bitrateAdjuster->GetAdjustedBitrateBps()];
return WEBRTC_VIDEO_CODEC_OK;
}
#pragma mark - Private
- (NSInteger)releaseEncoder {
// Need to destroy so that the session is invalidated and won't use the
// callback anymore. Do not remove callback until the session is invalidated
// since async encoder callbacks can occur until invalidation.
[self destroyCompressionSession];
_callback = nullptr;
return WEBRTC_VIDEO_CODEC_OK;
}
- (int)resetCompressionSession {
[self destroyCompressionSession];
// Set source image buffer attributes. These attributes will be present on
// buffers retrieved from the encoder's pixel buffer pool.
const size_t attributesSize = 3;
CFTypeRef keys[attributesSize] = {
#if defined(WEBRTC_IOS)
kCVPixelBufferOpenGLESCompatibilityKey,
#elif defined(WEBRTC_MAC)
kCVPixelBufferOpenGLCompatibilityKey,
#endif
kCVPixelBufferIOSurfacePropertiesKey,
kCVPixelBufferPixelFormatTypeKey
};
CFDictionaryRef ioSurfaceValue = CreateCFTypeDictionary(nullptr, nullptr, 0);
int64_t nv12type = kCVPixelFormatType_420YpCbCr8BiPlanarFullRange;
CFNumberRef pixelFormat = CFNumberCreate(nullptr, kCFNumberLongType, &nv12type);
CFTypeRef values[attributesSize] = {kCFBooleanTrue, ioSurfaceValue, pixelFormat};
CFDictionaryRef sourceAttributes = CreateCFTypeDictionary(keys, values, attributesSize);
if (ioSurfaceValue) {
CFRelease(ioSurfaceValue);
ioSurfaceValue = nullptr;
}
if (pixelFormat) {
CFRelease(pixelFormat);
pixelFormat = nullptr;
}
OSStatus status = VTCompressionSessionCreate(nullptr, // use default allocator
_width,
_height,
kCMVideoCodecType_H264,
nullptr, // use default encoder
sourceAttributes,
nullptr, // use default compressed data allocator
compressionOutputCallback,
nullptr,
&_compressionSession);
if (sourceAttributes) {
CFRelease(sourceAttributes);
sourceAttributes = nullptr;
}
if (status != noErr) {
LOG(LS_ERROR) << "Failed to create compression session: " << status;
return WEBRTC_VIDEO_CODEC_ERROR;
}
[self configureCompressionSession];
return WEBRTC_VIDEO_CODEC_OK;
}
- (void)configureCompressionSession {
RTC_DCHECK(_compressionSession);
SetVTSessionProperty(_compressionSession, kVTCompressionPropertyKey_RealTime, true);
SetVTSessionProperty(_compressionSession, kVTCompressionPropertyKey_ProfileLevel, _profile);
SetVTSessionProperty(_compressionSession, kVTCompressionPropertyKey_AllowFrameReordering, false);
[self setEncoderBitrateBps:_targetBitrateBps];
// TODO(tkchin): Look at entropy mode and colorspace matrices.
// TODO(tkchin): Investigate to see if there's any way to make this work.
// May need it to interop with Android. Currently this call just fails.
// On inspecting encoder output on iOS8, this value is set to 6.
// internal::SetVTSessionProperty(compression_session_,
// kVTCompressionPropertyKey_MaxFrameDelayCount,
// 1);
// Set a relatively large value for keyframe emission (7200 frames or 4 minutes).
SetVTSessionProperty(_compressionSession, kVTCompressionPropertyKey_MaxKeyFrameInterval, 7200);
SetVTSessionProperty(
_compressionSession, kVTCompressionPropertyKey_MaxKeyFrameIntervalDuration, 240);
}
- (void)destroyCompressionSession {
if (_compressionSession) {
VTCompressionSessionInvalidate(_compressionSession);
CFRelease(_compressionSession);
_compressionSession = nullptr;
}
}
- (NSString *)implementationName {
return @"VideoToolbox";
}
- (void)setBitrateBps:(uint32_t)bitrateBps {
if (_encoderBitrateBps != bitrateBps) {
[self setEncoderBitrateBps:bitrateBps];
}
}
- (void)setEncoderBitrateBps:(uint32_t)bitrateBps {
if (_compressionSession) {
SetVTSessionProperty(_compressionSession, kVTCompressionPropertyKey_AverageBitRate, bitrateBps);
// TODO(tkchin): Add a helper method to set array value.
int64_t dataLimitBytesPerSecondValue =
static_cast<int64_t>(bitrateBps * kLimitToAverageBitRateFactor / 8);
CFNumberRef bytesPerSecond =
CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt64Type, &dataLimitBytesPerSecondValue);
int64_t oneSecondValue = 1;
CFNumberRef oneSecond =
CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt64Type, &oneSecondValue);
const void *nums[2] = {bytesPerSecond, oneSecond};
CFArrayRef dataRateLimits = CFArrayCreate(nullptr, nums, 2, &kCFTypeArrayCallBacks);
OSStatus status = VTSessionSetProperty(
_compressionSession, kVTCompressionPropertyKey_DataRateLimits, dataRateLimits);
if (bytesPerSecond) {
CFRelease(bytesPerSecond);
}
if (oneSecond) {
CFRelease(oneSecond);
}
if (dataRateLimits) {
CFRelease(dataRateLimits);
}
if (status != noErr) {
LOG(LS_ERROR) << "Failed to set data rate limit";
}
_encoderBitrateBps = bitrateBps;
}
}
- (void)frameWasEncoded:(OSStatus)status
flags:(VTEncodeInfoFlags)infoFlags
sampleBuffer:(CMSampleBufferRef)sampleBuffer
codecSpecificInfo:(id<RTCCodecSpecificInfo>)codecSpecificInfo
width:(int32_t)width
height:(int32_t)height
renderTimeMs:(int64_t)renderTimeMs
timestamp:(uint32_t)timestamp
rotation:(RTCVideoRotation)rotation {
if (status != noErr) {
LOG(LS_ERROR) << "H264 encode failed.";
return;
}
if (infoFlags & kVTEncodeInfo_FrameDropped) {
LOG(LS_INFO) << "H264 encode dropped frame.";
return;
}
BOOL isKeyframe = NO;
CFArrayRef attachments = CMSampleBufferGetSampleAttachmentsArray(sampleBuffer, 0);
if (attachments != nullptr && CFArrayGetCount(attachments)) {
CFDictionaryRef attachment =
static_cast<CFDictionaryRef>(CFArrayGetValueAtIndex(attachments, 0));
isKeyframe = !CFDictionaryContainsKey(attachment, kCMSampleAttachmentKey_NotSync);
}
if (isKeyframe) {
LOG(LS_INFO) << "Generated keyframe";
}
// Convert the sample buffer into a buffer suitable for RTP packetization.
// TODO(tkchin): Allocate buffers through a pool.
std::unique_ptr<rtc::Buffer> buffer(new rtc::Buffer());
RTCRtpFragmentationHeader *header;
{
webrtc::RTPFragmentationHeader *header_cpp;
bool result =
H264CMSampleBufferToAnnexBBuffer(sampleBuffer, isKeyframe, buffer.get(), &header_cpp);
header = [[RTCRtpFragmentationHeader alloc] initWithNativeFragmentationHeader:header_cpp];
if (!result) {
return;
}
}
RTCEncodedImage *frame = [[RTCEncodedImage alloc] init];
frame.buffer = [NSData dataWithBytesNoCopy:buffer->data() length:buffer->size() freeWhenDone:NO];
frame.encodedWidth = width;
frame.encodedHeight = height;
frame.completeFrame = YES;
frame.frameType = isKeyframe ? RTCFrameTypeVideoFrameKey : RTCFrameTypeVideoFrameDelta;
frame.captureTimeMs = renderTimeMs;
frame.timeStamp = timestamp;
frame.rotation = rotation;
frame.contentType = (_mode == RTCVideoCodecModeScreensharing) ? RTCVideoContentTypeScreenshare :
RTCVideoContentTypeUnspecified;
frame.isTimingFrame = NO;
int qp;
_h264BitstreamParser.ParseBitstream(buffer->data(), buffer->size());
_h264BitstreamParser.GetLastSliceQp(&qp);
frame.qp = @(qp);
BOOL res = _callback(frame, codecSpecificInfo, header);
if (!res) {
LOG(LS_ERROR) << "Encode callback failed";
return;
}
_bitrateAdjuster->Update(frame.buffer.length);
}
- (RTCVideoEncoderQpThresholds *)scalingSettings {
return [[RTCVideoEncoderQpThresholds alloc] initWithThresholdsLow:kLowH264QpThreshold
high:kHighH264QpThreshold];
}
@end

View File

@ -1,59 +0,0 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*
*/
#ifndef WEBRTC_SDK_OBJC_FRAMEWORK_CLASSES_VIDEOTOOLBOX_DECODER_H_
#define WEBRTC_SDK_OBJC_FRAMEWORK_CLASSES_VIDEOTOOLBOX_DECODER_H_
#include "webrtc/modules/video_coding/codecs/h264/include/h264.h"
#include <VideoToolbox/VideoToolbox.h>
// This file provides a H264 encoder implementation using the VideoToolbox
// APIs. Since documentation is almost non-existent, this is largely based on
// the information in the VideoToolbox header files, a talk from WWDC 2014 and
// experimentation.
namespace webrtc {
class H264VideoToolboxDecoder : public H264Decoder {
public:
H264VideoToolboxDecoder();
~H264VideoToolboxDecoder() override;
int InitDecode(const VideoCodec* video_codec, int number_of_cores) override;
int Decode(const EncodedImage& input_image,
bool missing_frames,
const RTPFragmentationHeader* fragmentation,
const CodecSpecificInfo* codec_specific_info,
int64_t render_time_ms) override;
int RegisterDecodeCompleteCallback(DecodedImageCallback* callback) override;
int Release() override;
const char* ImplementationName() const override;
private:
int ResetDecompressionSession();
void ConfigureDecompressionSession();
void DestroyDecompressionSession();
void SetVideoFormat(CMVideoFormatDescriptionRef video_format);
DecodedImageCallback* callback_;
CMVideoFormatDescriptionRef video_format_;
VTDecompressionSessionRef decompression_session_;
}; // H264VideoToolboxDecoder
} // namespace webrtc
#endif // WEBRTC_SDK_OBJC_FRAMEWORK_CLASSES_VIDEOTOOLBOX_DECODER_H_

View File

@ -1,277 +0,0 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*
*/
#include "webrtc/sdk/objc/Framework/Classes/VideoToolbox/decoder.h"
#include <memory>
#include "libyuv/convert.h"
#include "webrtc/api/video/video_frame.h"
#include "webrtc/common_video/include/video_frame.h"
#include "webrtc/rtc_base/checks.h"
#include "webrtc/rtc_base/logging.h"
#include "webrtc/sdk/objc/Framework/Classes/Video/objc_frame_buffer.h"
#include "webrtc/sdk/objc/Framework/Classes/VideoToolbox/nalu_rewriter.h"
#import "WebRTC/RTCVideoFrameBuffer.h"
#if defined(WEBRTC_IOS)
#import "Common/RTCUIApplicationStatusObserver.h"
#endif
namespace webrtc {
namespace {
static const int64_t kMsPerSec = 1000;
// Convenience function for creating a dictionary.
inline CFDictionaryRef CreateCFDictionary(CFTypeRef* keys,
CFTypeRef* values,
size_t size) {
return CFDictionaryCreate(nullptr, keys, values, size,
&kCFTypeDictionaryKeyCallBacks,
&kCFTypeDictionaryValueCallBacks);
}
// Struct that we pass to the decoder per frame to decode. We receive it again
// in the decoder callback.
struct FrameDecodeParams {
FrameDecodeParams(DecodedImageCallback* cb, int64_t ts)
: callback(cb), timestamp(ts) {}
DecodedImageCallback* callback;
int64_t timestamp;
};
// This is the callback function that VideoToolbox calls when decode is
// complete.
void VTDecompressionOutputCallback(void* decoder,
void* params,
OSStatus status,
VTDecodeInfoFlags info_flags,
CVImageBufferRef image_buffer,
CMTime timestamp,
CMTime duration) {
std::unique_ptr<FrameDecodeParams> decode_params(
reinterpret_cast<FrameDecodeParams*>(params));
if (status != noErr) {
LOG(LS_ERROR) << "Failed to decode frame. Status: " << status;
return;
}
// TODO(tkchin): Handle CVO properly.
rtc::scoped_refptr<VideoFrameBuffer> buffer = new rtc::RefCountedObject<ObjCFrameBuffer>(
[[RTCCVPixelBuffer alloc] initWithPixelBuffer:image_buffer]);
VideoFrame decoded_frame(buffer, decode_params->timestamp,
CMTimeGetSeconds(timestamp) * kMsPerSec,
kVideoRotation_0);
decode_params->callback->Decoded(decoded_frame);
}
} // namespace
H264VideoToolboxDecoder::H264VideoToolboxDecoder()
: callback_(nullptr), video_format_(nullptr), decompression_session_(nullptr) {}
H264VideoToolboxDecoder::~H264VideoToolboxDecoder() {
DestroyDecompressionSession();
SetVideoFormat(nullptr);
}
int H264VideoToolboxDecoder::InitDecode(const VideoCodec* video_codec,
int number_of_cores) {
return WEBRTC_VIDEO_CODEC_OK;
}
int H264VideoToolboxDecoder::Decode(
const EncodedImage& input_image,
bool missing_frames,
const RTPFragmentationHeader* fragmentation,
const CodecSpecificInfo* codec_specific_info,
int64_t render_time_ms) {
RTC_DCHECK(input_image._buffer);
#if defined(WEBRTC_IOS)
if (![[RTCUIApplicationStatusObserver sharedInstance] isApplicationActive]) {
// Ignore all decode requests when app isn't active. In this state, the
// hardware decoder has been invalidated by the OS.
// Reset video format so that we won't process frames until the next
// keyframe.
SetVideoFormat(nullptr);
return WEBRTC_VIDEO_CODEC_NO_OUTPUT;
}
#endif
CMVideoFormatDescriptionRef input_format = nullptr;
if (H264AnnexBBufferHasVideoFormatDescription(input_image._buffer,
input_image._length)) {
input_format = CreateVideoFormatDescription(input_image._buffer,
input_image._length);
if (input_format) {
// Check if the video format has changed, and reinitialize decoder if
// needed.
if (!CMFormatDescriptionEqual(input_format, video_format_)) {
SetVideoFormat(input_format);
ResetDecompressionSession();
}
CFRelease(input_format);
}
}
if (!video_format_) {
// We received a frame but we don't have format information so we can't
// decode it.
// This can happen after backgrounding. We need to wait for the next
// sps/pps before we can resume so we request a keyframe by returning an
// error.
LOG(LS_WARNING) << "Missing video format. Frame with sps/pps required.";
return WEBRTC_VIDEO_CODEC_ERROR;
}
CMSampleBufferRef sample_buffer = nullptr;
if (!H264AnnexBBufferToCMSampleBuffer(input_image._buffer,
input_image._length, video_format_,
&sample_buffer)) {
return WEBRTC_VIDEO_CODEC_ERROR;
}
RTC_DCHECK(sample_buffer);
VTDecodeFrameFlags decode_flags =
kVTDecodeFrame_EnableAsynchronousDecompression;
std::unique_ptr<FrameDecodeParams> frame_decode_params;
frame_decode_params.reset(
new FrameDecodeParams(callback_, input_image._timeStamp));
OSStatus status = VTDecompressionSessionDecodeFrame(
decompression_session_, sample_buffer, decode_flags,
frame_decode_params.release(), nullptr);
#if defined(WEBRTC_IOS)
// Re-initialize the decoder if we have an invalid session while the app is
// active and retry the decode request.
if (status == kVTInvalidSessionErr &&
ResetDecompressionSession() == WEBRTC_VIDEO_CODEC_OK) {
frame_decode_params.reset(
new FrameDecodeParams(callback_, input_image._timeStamp));
status = VTDecompressionSessionDecodeFrame(
decompression_session_, sample_buffer, decode_flags,
frame_decode_params.release(), nullptr);
}
#endif
CFRelease(sample_buffer);
if (status != noErr) {
LOG(LS_ERROR) << "Failed to decode frame with code: " << status;
return WEBRTC_VIDEO_CODEC_ERROR;
}
return WEBRTC_VIDEO_CODEC_OK;
}
int H264VideoToolboxDecoder::RegisterDecodeCompleteCallback(
DecodedImageCallback* callback) {
RTC_DCHECK(!callback_);
callback_ = callback;
return WEBRTC_VIDEO_CODEC_OK;
}
int H264VideoToolboxDecoder::Release() {
// Need to invalidate the session so that callbacks no longer occur and it
// is safe to null out the callback.
DestroyDecompressionSession();
SetVideoFormat(nullptr);
callback_ = nullptr;
return WEBRTC_VIDEO_CODEC_OK;
}
int H264VideoToolboxDecoder::ResetDecompressionSession() {
DestroyDecompressionSession();
// Need to wait for the first SPS to initialize decoder.
if (!video_format_) {
return WEBRTC_VIDEO_CODEC_OK;
}
// Set keys for OpenGL and IOSurface compatibilty, which makes the encoder
// create pixel buffers with GPU backed memory. The intent here is to pass
// the pixel buffers directly so we avoid a texture upload later during
// rendering. This currently is moot because we are converting back to an
// I420 frame after decode, but eventually we will be able to plumb
// CVPixelBuffers directly to the renderer.
// TODO(tkchin): Maybe only set OpenGL/IOSurface keys if we know that that
// we can pass CVPixelBuffers as native handles in decoder output.
static size_t const attributes_size = 3;
CFTypeRef keys[attributes_size] = {
#if defined(WEBRTC_IOS)
kCVPixelBufferOpenGLESCompatibilityKey,
#elif defined(WEBRTC_MAC)
kCVPixelBufferOpenGLCompatibilityKey,
#endif
kCVPixelBufferIOSurfacePropertiesKey,
kCVPixelBufferPixelFormatTypeKey
};
CFDictionaryRef io_surface_value = CreateCFDictionary(nullptr, nullptr, 0);
int64_t nv12type = kCVPixelFormatType_420YpCbCr8BiPlanarFullRange;
CFNumberRef pixel_format =
CFNumberCreate(nullptr, kCFNumberLongType, &nv12type);
CFTypeRef values[attributes_size] = {kCFBooleanTrue, io_surface_value,
pixel_format};
CFDictionaryRef attributes =
CreateCFDictionary(keys, values, attributes_size);
if (io_surface_value) {
CFRelease(io_surface_value);
io_surface_value = nullptr;
}
if (pixel_format) {
CFRelease(pixel_format);
pixel_format = nullptr;
}
VTDecompressionOutputCallbackRecord record = {
VTDecompressionOutputCallback, this,
};
OSStatus status =
VTDecompressionSessionCreate(nullptr, video_format_, nullptr, attributes,
&record, &decompression_session_);
CFRelease(attributes);
if (status != noErr) {
DestroyDecompressionSession();
return WEBRTC_VIDEO_CODEC_ERROR;
}
ConfigureDecompressionSession();
return WEBRTC_VIDEO_CODEC_OK;
}
void H264VideoToolboxDecoder::ConfigureDecompressionSession() {
RTC_DCHECK(decompression_session_);
#if defined(WEBRTC_IOS)
VTSessionSetProperty(decompression_session_,
kVTDecompressionPropertyKey_RealTime, kCFBooleanTrue);
#endif
}
void H264VideoToolboxDecoder::DestroyDecompressionSession() {
if (decompression_session_) {
VTDecompressionSessionInvalidate(decompression_session_);
CFRelease(decompression_session_);
decompression_session_ = nullptr;
}
}
void H264VideoToolboxDecoder::SetVideoFormat(
CMVideoFormatDescriptionRef video_format) {
if (video_format_ == video_format) {
return;
}
if (video_format_) {
CFRelease(video_format_);
}
video_format_ = video_format;
if (video_format_) {
CFRetain(video_format_);
}
}
const char* H264VideoToolboxDecoder::ImplementationName() const {
return "VideoToolbox";
}
} // namespace webrtc

View File

@ -1,97 +0,0 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*
*/
#ifndef WEBRTC_SDK_OBJC_FRAMEWORK_CLASSES_VIDEOTOOLBOX_ENCODER_H_
#define WEBRTC_SDK_OBJC_FRAMEWORK_CLASSES_VIDEOTOOLBOX_ENCODER_H_
#include "webrtc/api/video/video_rotation.h"
#include "webrtc/common_video/h264/h264_bitstream_parser.h"
#include "webrtc/common_video/include/bitrate_adjuster.h"
#include "webrtc/media/base/codec.h"
#include "webrtc/modules/video_coding/codecs/h264/include/h264.h"
#include "webrtc/modules/video_coding/utility/quality_scaler.h"
#include "webrtc/rtc_base/criticalsection.h"
#include <VideoToolbox/VideoToolbox.h>
#include <vector>
// This file provides a H264 encoder implementation using the VideoToolbox
// APIs. Since documentation is almost non-existent, this is largely based on
// the information in the VideoToolbox header files, a talk from WWDC 2014 and
// experimentation.
namespace webrtc {
class H264VideoToolboxEncoder : public H264Encoder {
public:
explicit H264VideoToolboxEncoder(const cricket::VideoCodec& codec);
~H264VideoToolboxEncoder() override;
int InitEncode(const VideoCodec* codec_settings,
int number_of_cores,
size_t max_payload_size) override;
int Encode(const VideoFrame& input_image,
const CodecSpecificInfo* codec_specific_info,
const std::vector<FrameType>* frame_types) override;
int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override;
int SetChannelParameters(uint32_t packet_loss, int64_t rtt) override;
int SetRates(uint32_t new_bitrate_kbit, uint32_t frame_rate) override;
int Release() override;
const char* ImplementationName() const override;
bool SupportsNativeHandle() const override;
void OnEncodedFrame(OSStatus status,
VTEncodeInfoFlags info_flags,
CMSampleBufferRef sample_buffer,
CodecSpecificInfo codec_specific_info,
int32_t width,
int32_t height,
int64_t render_time_ms,
uint32_t timestamp,
VideoRotation rotation);
ScalingSettings GetScalingSettings() const override;
private:
int ResetCompressionSession();
void ConfigureCompressionSession();
void DestroyCompressionSession();
rtc::scoped_refptr<VideoFrameBuffer> GetScaledBufferOnEncode(
const rtc::scoped_refptr<VideoFrameBuffer>& frame);
void SetBitrateBps(uint32_t bitrate_bps);
void SetEncoderBitrateBps(uint32_t bitrate_bps);
EncodedImageCallback* callback_;
VTCompressionSessionRef compression_session_;
BitrateAdjuster bitrate_adjuster_;
H264PacketizationMode packetization_mode_;
uint32_t target_bitrate_bps_;
uint32_t encoder_bitrate_bps_;
int32_t width_;
int32_t height_;
VideoCodecMode mode_;
const CFStringRef profile_;
H264BitstreamParser h264_bitstream_parser_;
std::vector<uint8_t> nv12_scale_buffer_;
}; // H264VideoToolboxEncoder
} // namespace webrtc
#endif // WEBRTC_SDK_OBJC_FRAMEWORK_CLASSES_VIDEOTOOLBOX_ENCODER_H_

View File

@ -1,767 +0,0 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*
*/
#include "webrtc/sdk/objc/Framework/Classes/VideoToolbox/encoder.h"
#include <memory>
#include <string>
#include <vector>
#if defined(WEBRTC_IOS)
#import "Common/RTCUIApplicationStatusObserver.h"
#import "WebRTC/UIDevice+RTCDevice.h"
#endif
#import "WebRTC/RTCVideoFrameBuffer.h"
#include "libyuv/convert_from.h"
#include "webrtc/common_video/h264/profile_level_id.h"
#include "webrtc/rtc_base/checks.h"
#include "webrtc/rtc_base/logging.h"
#include "webrtc/sdk/objc/Framework/Classes/Video/objc_frame_buffer.h"
#include "webrtc/sdk/objc/Framework/Classes/VideoToolbox/nalu_rewriter.h"
#include "webrtc/system_wrappers/include/clock.h"
namespace internal {
// The ratio between kVTCompressionPropertyKey_DataRateLimits and
// kVTCompressionPropertyKey_AverageBitRate. The data rate limit is set higher
// than the average bit rate to avoid undershooting the target.
const float kLimitToAverageBitRateFactor = 1.5f;
// These thresholds deviate from the default h264 QP thresholds, as they
// have been found to work better on devices that support VideoToolbox
const int kLowH264QpThreshold = 28;
const int kHighH264QpThreshold = 39;
// Convenience function for creating a dictionary.
inline CFDictionaryRef CreateCFDictionary(CFTypeRef* keys,
CFTypeRef* values,
size_t size) {
return CFDictionaryCreate(kCFAllocatorDefault, keys, values, size,
&kCFTypeDictionaryKeyCallBacks,
&kCFTypeDictionaryValueCallBacks);
}
// Copies characters from a CFStringRef into a std::string.
std::string CFStringToString(const CFStringRef cf_string) {
RTC_DCHECK(cf_string);
std::string std_string;
// Get the size needed for UTF8 plus terminating character.
size_t buffer_size =
CFStringGetMaximumSizeForEncoding(CFStringGetLength(cf_string),
kCFStringEncodingUTF8) +
1;
std::unique_ptr<char[]> buffer(new char[buffer_size]);
if (CFStringGetCString(cf_string, buffer.get(), buffer_size,
kCFStringEncodingUTF8)) {
// Copy over the characters.
std_string.assign(buffer.get());
}
return std_string;
}
// Convenience function for setting a VT property.
void SetVTSessionProperty(VTSessionRef session,
CFStringRef key,
int32_t value) {
CFNumberRef cfNum =
CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &value);
OSStatus status = VTSessionSetProperty(session, key, cfNum);
CFRelease(cfNum);
if (status != noErr) {
std::string key_string = CFStringToString(key);
LOG(LS_ERROR) << "VTSessionSetProperty failed to set: " << key_string
<< " to " << value << ": " << status;
}
}
// Convenience function for setting a VT property.
void SetVTSessionProperty(VTSessionRef session,
CFStringRef key,
uint32_t value) {
int64_t value_64 = value;
CFNumberRef cfNum =
CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt64Type, &value_64);
OSStatus status = VTSessionSetProperty(session, key, cfNum);
CFRelease(cfNum);
if (status != noErr) {
std::string key_string = CFStringToString(key);
LOG(LS_ERROR) << "VTSessionSetProperty failed to set: " << key_string
<< " to " << value << ": " << status;
}
}
// Convenience function for setting a VT property.
void SetVTSessionProperty(VTSessionRef session, CFStringRef key, bool value) {
CFBooleanRef cf_bool = (value) ? kCFBooleanTrue : kCFBooleanFalse;
OSStatus status = VTSessionSetProperty(session, key, cf_bool);
if (status != noErr) {
std::string key_string = CFStringToString(key);
LOG(LS_ERROR) << "VTSessionSetProperty failed to set: " << key_string
<< " to " << value << ": " << status;
}
}
// Convenience function for setting a VT property.
void SetVTSessionProperty(VTSessionRef session,
CFStringRef key,
CFStringRef value) {
OSStatus status = VTSessionSetProperty(session, key, value);
if (status != noErr) {
std::string key_string = CFStringToString(key);
std::string val_string = CFStringToString(value);
LOG(LS_ERROR) << "VTSessionSetProperty failed to set: " << key_string
<< " to " << val_string << ": " << status;
}
}
// Struct that we pass to the encoder per frame to encode. We receive it again
// in the encoder callback.
struct FrameEncodeParams {
FrameEncodeParams(webrtc::H264VideoToolboxEncoder* e,
const webrtc::CodecSpecificInfo* csi,
int32_t w,
int32_t h,
int64_t rtms,
uint32_t ts,
webrtc::VideoRotation r)
: encoder(e),
width(w),
height(h),
render_time_ms(rtms),
timestamp(ts),
rotation(r) {
if (csi) {
codec_specific_info = *csi;
} else {
codec_specific_info.codecType = webrtc::kVideoCodecH264;
}
}
webrtc::H264VideoToolboxEncoder* encoder;
webrtc::CodecSpecificInfo codec_specific_info;
int32_t width;
int32_t height;
int64_t render_time_ms;
uint32_t timestamp;
webrtc::VideoRotation rotation;
};
// We receive I420Frames as input, but we need to feed CVPixelBuffers into the
// encoder. This performs the copy and format conversion.
// TODO(tkchin): See if encoder will accept i420 frames and compare performance.
bool CopyVideoFrameToPixelBuffer(const rtc::scoped_refptr<webrtc::I420BufferInterface>& frame,
CVPixelBufferRef pixel_buffer) {
RTC_DCHECK(pixel_buffer);
RTC_DCHECK_EQ(CVPixelBufferGetPixelFormatType(pixel_buffer),
kCVPixelFormatType_420YpCbCr8BiPlanarFullRange);
RTC_DCHECK_EQ(CVPixelBufferGetHeightOfPlane(pixel_buffer, 0),
static_cast<size_t>(frame->height()));
RTC_DCHECK_EQ(CVPixelBufferGetWidthOfPlane(pixel_buffer, 0),
static_cast<size_t>(frame->width()));
CVReturn cvRet = CVPixelBufferLockBaseAddress(pixel_buffer, 0);
if (cvRet != kCVReturnSuccess) {
LOG(LS_ERROR) << "Failed to lock base address: " << cvRet;
return false;
}
uint8_t* dst_y = reinterpret_cast<uint8_t*>(
CVPixelBufferGetBaseAddressOfPlane(pixel_buffer, 0));
int dst_stride_y = CVPixelBufferGetBytesPerRowOfPlane(pixel_buffer, 0);
uint8_t* dst_uv = reinterpret_cast<uint8_t*>(
CVPixelBufferGetBaseAddressOfPlane(pixel_buffer, 1));
int dst_stride_uv = CVPixelBufferGetBytesPerRowOfPlane(pixel_buffer, 1);
// Convert I420 to NV12.
int ret = libyuv::I420ToNV12(
frame->DataY(), frame->StrideY(),
frame->DataU(), frame->StrideU(),
frame->DataV(), frame->StrideV(),
dst_y, dst_stride_y, dst_uv, dst_stride_uv,
frame->width(), frame->height());
CVPixelBufferUnlockBaseAddress(pixel_buffer, 0);
if (ret) {
LOG(LS_ERROR) << "Error converting I420 VideoFrame to NV12 :" << ret;
return false;
}
return true;
}
CVPixelBufferRef CreatePixelBuffer(CVPixelBufferPoolRef pixel_buffer_pool) {
if (!pixel_buffer_pool) {
LOG(LS_ERROR) << "Failed to get pixel buffer pool.";
return nullptr;
}
CVPixelBufferRef pixel_buffer;
CVReturn ret = CVPixelBufferPoolCreatePixelBuffer(nullptr, pixel_buffer_pool,
&pixel_buffer);
if (ret != kCVReturnSuccess) {
LOG(LS_ERROR) << "Failed to create pixel buffer: " << ret;
// We probably want to drop frames here, since failure probably means
// that the pool is empty.
return nullptr;
}
return pixel_buffer;
}
// This is the callback function that VideoToolbox calls when encode is
// complete. From inspection this happens on its own queue.
void VTCompressionOutputCallback(void* encoder,
void* params,
OSStatus status,
VTEncodeInfoFlags info_flags,
CMSampleBufferRef sample_buffer) {
std::unique_ptr<FrameEncodeParams> encode_params(
reinterpret_cast<FrameEncodeParams*>(params));
encode_params->encoder->OnEncodedFrame(
status, info_flags, sample_buffer, encode_params->codec_specific_info,
encode_params->width, encode_params->height,
encode_params->render_time_ms, encode_params->timestamp,
encode_params->rotation);
}
// Extract VideoToolbox profile out of the cricket::VideoCodec. If there is no
// specific VideoToolbox profile for the specified level, AutoLevel will be
// returned. The user must initialize the encoder with a resolution and
// framerate conforming to the selected H264 level regardless.
CFStringRef ExtractProfile(const cricket::VideoCodec& codec) {
const rtc::Optional<webrtc::H264::ProfileLevelId> profile_level_id =
webrtc::H264::ParseSdpProfileLevelId(codec.params);
RTC_DCHECK(profile_level_id);
switch (profile_level_id->profile) {
case webrtc::H264::kProfileConstrainedBaseline:
case webrtc::H264::kProfileBaseline:
switch (profile_level_id->level) {
case webrtc::H264::kLevel3:
return kVTProfileLevel_H264_Baseline_3_0;
case webrtc::H264::kLevel3_1:
return kVTProfileLevel_H264_Baseline_3_1;
case webrtc::H264::kLevel3_2:
return kVTProfileLevel_H264_Baseline_3_2;
case webrtc::H264::kLevel4:
return kVTProfileLevel_H264_Baseline_4_0;
case webrtc::H264::kLevel4_1:
return kVTProfileLevel_H264_Baseline_4_1;
case webrtc::H264::kLevel4_2:
return kVTProfileLevel_H264_Baseline_4_2;
case webrtc::H264::kLevel5:
return kVTProfileLevel_H264_Baseline_5_0;
case webrtc::H264::kLevel5_1:
return kVTProfileLevel_H264_Baseline_5_1;
case webrtc::H264::kLevel5_2:
return kVTProfileLevel_H264_Baseline_5_2;
case webrtc::H264::kLevel1:
case webrtc::H264::kLevel1_b:
case webrtc::H264::kLevel1_1:
case webrtc::H264::kLevel1_2:
case webrtc::H264::kLevel1_3:
case webrtc::H264::kLevel2:
case webrtc::H264::kLevel2_1:
case webrtc::H264::kLevel2_2:
return kVTProfileLevel_H264_Baseline_AutoLevel;
}
case webrtc::H264::kProfileMain:
switch (profile_level_id->level) {
case webrtc::H264::kLevel3:
return kVTProfileLevel_H264_Main_3_0;
case webrtc::H264::kLevel3_1:
return kVTProfileLevel_H264_Main_3_1;
case webrtc::H264::kLevel3_2:
return kVTProfileLevel_H264_Main_3_2;
case webrtc::H264::kLevel4:
return kVTProfileLevel_H264_Main_4_0;
case webrtc::H264::kLevel4_1:
return kVTProfileLevel_H264_Main_4_1;
case webrtc::H264::kLevel4_2:
return kVTProfileLevel_H264_Main_4_2;
case webrtc::H264::kLevel5:
return kVTProfileLevel_H264_Main_5_0;
case webrtc::H264::kLevel5_1:
return kVTProfileLevel_H264_Main_5_1;
case webrtc::H264::kLevel5_2:
return kVTProfileLevel_H264_Main_5_2;
case webrtc::H264::kLevel1:
case webrtc::H264::kLevel1_b:
case webrtc::H264::kLevel1_1:
case webrtc::H264::kLevel1_2:
case webrtc::H264::kLevel1_3:
case webrtc::H264::kLevel2:
case webrtc::H264::kLevel2_1:
case webrtc::H264::kLevel2_2:
return kVTProfileLevel_H264_Main_AutoLevel;
}
case webrtc::H264::kProfileConstrainedHigh:
case webrtc::H264::kProfileHigh:
switch (profile_level_id->level) {
case webrtc::H264::kLevel3:
return kVTProfileLevel_H264_High_3_0;
case webrtc::H264::kLevel3_1:
return kVTProfileLevel_H264_High_3_1;
case webrtc::H264::kLevel3_2:
return kVTProfileLevel_H264_High_3_2;
case webrtc::H264::kLevel4:
return kVTProfileLevel_H264_High_4_0;
case webrtc::H264::kLevel4_1:
return kVTProfileLevel_H264_High_4_1;
case webrtc::H264::kLevel4_2:
return kVTProfileLevel_H264_High_4_2;
case webrtc::H264::kLevel5:
return kVTProfileLevel_H264_High_5_0;
case webrtc::H264::kLevel5_1:
return kVTProfileLevel_H264_High_5_1;
case webrtc::H264::kLevel5_2:
return kVTProfileLevel_H264_High_5_2;
case webrtc::H264::kLevel1:
case webrtc::H264::kLevel1_b:
case webrtc::H264::kLevel1_1:
case webrtc::H264::kLevel1_2:
case webrtc::H264::kLevel1_3:
case webrtc::H264::kLevel2:
case webrtc::H264::kLevel2_1:
case webrtc::H264::kLevel2_2:
return kVTProfileLevel_H264_High_AutoLevel;
}
}
}
} // namespace internal
namespace webrtc {
// .5 is set as a mininum to prevent overcompensating for large temporary
// overshoots. We don't want to degrade video quality too badly.
// .95 is set to prevent oscillations. When a lower bitrate is set on the
// encoder than previously set, its output seems to have a brief period of
// drastically reduced bitrate, so we want to avoid that. In steady state
// conditions, 0.95 seems to give us better overall bitrate over long periods
// of time.
H264VideoToolboxEncoder::H264VideoToolboxEncoder(const cricket::VideoCodec& codec)
: callback_(nullptr),
compression_session_(nullptr),
bitrate_adjuster_(Clock::GetRealTimeClock(), .5, .95),
packetization_mode_(H264PacketizationMode::NonInterleaved),
profile_(internal::ExtractProfile(codec)) {
LOG(LS_INFO) << "Using profile " << internal::CFStringToString(profile_);
RTC_CHECK(cricket::CodecNamesEq(codec.name, cricket::kH264CodecName));
}
H264VideoToolboxEncoder::~H264VideoToolboxEncoder() {
DestroyCompressionSession();
}
int H264VideoToolboxEncoder::InitEncode(const VideoCodec* codec_settings,
int number_of_cores,
size_t max_payload_size) {
RTC_DCHECK(codec_settings);
RTC_DCHECK_EQ(codec_settings->codecType, kVideoCodecH264);
width_ = codec_settings->width;
height_ = codec_settings->height;
mode_ = codec_settings->mode;
// We can only set average bitrate on the HW encoder.
target_bitrate_bps_ = codec_settings->startBitrate;
bitrate_adjuster_.SetTargetBitrateBps(target_bitrate_bps_);
// TODO(tkchin): Try setting payload size via
// kVTCompressionPropertyKey_MaxH264SliceBytes.
return ResetCompressionSession();
}
int H264VideoToolboxEncoder::Encode(
const VideoFrame& frame,
const CodecSpecificInfo* codec_specific_info,
const std::vector<FrameType>* frame_types) {
// |input_frame| size should always match codec settings.
RTC_DCHECK_EQ(frame.width(), width_);
RTC_DCHECK_EQ(frame.height(), height_);
if (!callback_ || !compression_session_) {
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
#if defined(WEBRTC_IOS)
if (![[RTCUIApplicationStatusObserver sharedInstance] isApplicationActive]) {
// Ignore all encode requests when app isn't active. In this state, the
// hardware encoder has been invalidated by the OS.
return WEBRTC_VIDEO_CODEC_OK;
}
#endif
bool is_keyframe_required = false;
// Get a pixel buffer from the pool and copy frame data over.
CVPixelBufferPoolRef pixel_buffer_pool =
VTCompressionSessionGetPixelBufferPool(compression_session_);
#if defined(WEBRTC_IOS)
if (!pixel_buffer_pool) {
// Kind of a hack. On backgrounding, the compression session seems to get
// invalidated, which causes this pool call to fail when the application
// is foregrounded and frames are being sent for encoding again.
// Resetting the session when this happens fixes the issue.
// In addition we request a keyframe so video can recover quickly.
ResetCompressionSession();
pixel_buffer_pool =
VTCompressionSessionGetPixelBufferPool(compression_session_);
is_keyframe_required = true;
LOG(LS_INFO) << "Resetting compression session due to invalid pool.";
}
#endif
CVPixelBufferRef pixel_buffer = nullptr;
if (frame.video_frame_buffer()->type() == VideoFrameBuffer::Type::kNative) {
// Native frame.
rtc::scoped_refptr<ObjCFrameBuffer> objc_frame_buffer(
static_cast<ObjCFrameBuffer*>(frame.video_frame_buffer().get()));
id<RTCVideoFrameBuffer> wrapped_frame_buffer =
(id<RTCVideoFrameBuffer>)objc_frame_buffer->wrapped_frame_buffer();
if ([wrapped_frame_buffer isKindOfClass:[RTCCVPixelBuffer class]]) {
RTCCVPixelBuffer* rtc_pixel_buffer = (RTCCVPixelBuffer*)wrapped_frame_buffer;
if (![rtc_pixel_buffer requiresCropping]) {
// This pixel buffer might have a higher resolution than what the
// compression session is configured to. The compression session can
// handle that and will output encoded frames in the configured
// resolution regardless of the input pixel buffer resolution.
pixel_buffer = rtc_pixel_buffer.pixelBuffer;
CVBufferRetain(pixel_buffer);
} else {
// Cropping required, we need to crop and scale to a new pixel buffer.
pixel_buffer = internal::CreatePixelBuffer(pixel_buffer_pool);
if (!pixel_buffer) {
return WEBRTC_VIDEO_CODEC_ERROR;
}
int dst_width = CVPixelBufferGetWidth(pixel_buffer);
int dst_height = CVPixelBufferGetHeight(pixel_buffer);
if ([rtc_pixel_buffer requiresScalingToWidth:dst_width height:dst_height]) {
int size =
[rtc_pixel_buffer bufferSizeForCroppingAndScalingToWidth:dst_width height:dst_height];
nv12_scale_buffer_.resize(size);
} else {
nv12_scale_buffer_.clear();
}
nv12_scale_buffer_.shrink_to_fit();
if (![rtc_pixel_buffer cropAndScaleTo:pixel_buffer
withTempBuffer:nv12_scale_buffer_.data()]) {
return WEBRTC_VIDEO_CODEC_ERROR;
}
}
}
}
if (!pixel_buffer) {
// We did not have a native frame, or the ObjCVideoFrame wrapped a non-native frame
pixel_buffer = internal::CreatePixelBuffer(pixel_buffer_pool);
if (!pixel_buffer) {
return WEBRTC_VIDEO_CODEC_ERROR;
}
RTC_DCHECK(pixel_buffer);
if (!internal::CopyVideoFrameToPixelBuffer(frame.video_frame_buffer()->ToI420(),
pixel_buffer)) {
LOG(LS_ERROR) << "Failed to copy frame data.";
CVBufferRelease(pixel_buffer);
return WEBRTC_VIDEO_CODEC_ERROR;
}
}
// Check if we need a keyframe.
if (!is_keyframe_required && frame_types) {
for (auto frame_type : *frame_types) {
if (frame_type == kVideoFrameKey) {
is_keyframe_required = true;
break;
}
}
}
CMTime presentation_time_stamp =
CMTimeMake(frame.render_time_ms(), 1000);
CFDictionaryRef frame_properties = nullptr;
if (is_keyframe_required) {
CFTypeRef keys[] = {kVTEncodeFrameOptionKey_ForceKeyFrame};
CFTypeRef values[] = {kCFBooleanTrue};
frame_properties = internal::CreateCFDictionary(keys, values, 1);
}
std::unique_ptr<internal::FrameEncodeParams> encode_params;
encode_params.reset(new internal::FrameEncodeParams(
this, codec_specific_info, width_, height_, frame.render_time_ms(),
frame.timestamp(), frame.rotation()));
encode_params->codec_specific_info.codecSpecific.H264.packetization_mode =
packetization_mode_;
// Update the bitrate if needed.
SetBitrateBps(bitrate_adjuster_.GetAdjustedBitrateBps());
OSStatus status = VTCompressionSessionEncodeFrame(
compression_session_, pixel_buffer, presentation_time_stamp,
kCMTimeInvalid, frame_properties, encode_params.release(), nullptr);
if (frame_properties) {
CFRelease(frame_properties);
}
if (pixel_buffer) {
CVBufferRelease(pixel_buffer);
}
if (status != noErr) {
LOG(LS_ERROR) << "Failed to encode frame with code: " << status;
return WEBRTC_VIDEO_CODEC_ERROR;
}
return WEBRTC_VIDEO_CODEC_OK;
}
int H264VideoToolboxEncoder::RegisterEncodeCompleteCallback(
EncodedImageCallback* callback) {
callback_ = callback;
return WEBRTC_VIDEO_CODEC_OK;
}
int H264VideoToolboxEncoder::SetChannelParameters(uint32_t packet_loss,
int64_t rtt) {
// Encoder doesn't know anything about packet loss or rtt so just return.
return WEBRTC_VIDEO_CODEC_OK;
}
int H264VideoToolboxEncoder::SetRates(uint32_t new_bitrate_kbit,
uint32_t frame_rate) {
target_bitrate_bps_ = 1000 * new_bitrate_kbit;
bitrate_adjuster_.SetTargetBitrateBps(target_bitrate_bps_);
SetBitrateBps(bitrate_adjuster_.GetAdjustedBitrateBps());
return WEBRTC_VIDEO_CODEC_OK;
}
int H264VideoToolboxEncoder::Release() {
// Need to destroy so that the session is invalidated and won't use the
// callback anymore. Do not remove callback until the session is invalidated
// since async encoder callbacks can occur until invalidation.
DestroyCompressionSession();
callback_ = nullptr;
return WEBRTC_VIDEO_CODEC_OK;
}
int H264VideoToolboxEncoder::ResetCompressionSession() {
DestroyCompressionSession();
// Set source image buffer attributes. These attributes will be present on
// buffers retrieved from the encoder's pixel buffer pool.
const size_t attributes_size = 3;
CFTypeRef keys[attributes_size] = {
#if defined(WEBRTC_IOS)
kCVPixelBufferOpenGLESCompatibilityKey,
#elif defined(WEBRTC_MAC)
kCVPixelBufferOpenGLCompatibilityKey,
#endif
kCVPixelBufferIOSurfacePropertiesKey,
kCVPixelBufferPixelFormatTypeKey
};
CFDictionaryRef io_surface_value =
internal::CreateCFDictionary(nullptr, nullptr, 0);
int64_t nv12type = kCVPixelFormatType_420YpCbCr8BiPlanarFullRange;
CFNumberRef pixel_format =
CFNumberCreate(nullptr, kCFNumberLongType, &nv12type);
CFTypeRef values[attributes_size] = {kCFBooleanTrue, io_surface_value,
pixel_format};
CFDictionaryRef source_attributes =
internal::CreateCFDictionary(keys, values, attributes_size);
if (io_surface_value) {
CFRelease(io_surface_value);
io_surface_value = nullptr;
}
if (pixel_format) {
CFRelease(pixel_format);
pixel_format = nullptr;
}
OSStatus status = VTCompressionSessionCreate(
nullptr, // use default allocator
width_, height_, kCMVideoCodecType_H264,
nullptr, // use default encoder
source_attributes,
nullptr, // use default compressed data allocator
internal::VTCompressionOutputCallback, this, &compression_session_);
if (source_attributes) {
CFRelease(source_attributes);
source_attributes = nullptr;
}
if (status != noErr) {
LOG(LS_ERROR) << "Failed to create compression session: " << status;
return WEBRTC_VIDEO_CODEC_ERROR;
}
ConfigureCompressionSession();
return WEBRTC_VIDEO_CODEC_OK;
}
void H264VideoToolboxEncoder::ConfigureCompressionSession() {
RTC_DCHECK(compression_session_);
internal::SetVTSessionProperty(compression_session_,
kVTCompressionPropertyKey_RealTime, true);
internal::SetVTSessionProperty(compression_session_,
kVTCompressionPropertyKey_ProfileLevel,
profile_);
internal::SetVTSessionProperty(compression_session_,
kVTCompressionPropertyKey_AllowFrameReordering,
false);
SetEncoderBitrateBps(target_bitrate_bps_);
// TODO(tkchin): Look at entropy mode and colorspace matrices.
// TODO(tkchin): Investigate to see if there's any way to make this work.
// May need it to interop with Android. Currently this call just fails.
// On inspecting encoder output on iOS8, this value is set to 6.
// internal::SetVTSessionProperty(compression_session_,
// kVTCompressionPropertyKey_MaxFrameDelayCount,
// 1);
// Set a relatively large value for keyframe emission (7200 frames or
// 4 minutes).
internal::SetVTSessionProperty(
compression_session_,
kVTCompressionPropertyKey_MaxKeyFrameInterval, 7200);
internal::SetVTSessionProperty(
compression_session_,
kVTCompressionPropertyKey_MaxKeyFrameIntervalDuration, 240);
}
void H264VideoToolboxEncoder::DestroyCompressionSession() {
if (compression_session_) {
VTCompressionSessionInvalidate(compression_session_);
CFRelease(compression_session_);
compression_session_ = nullptr;
}
}
const char* H264VideoToolboxEncoder::ImplementationName() const {
return "VideoToolbox";
}
bool H264VideoToolboxEncoder::SupportsNativeHandle() const {
return true;
}
void H264VideoToolboxEncoder::SetBitrateBps(uint32_t bitrate_bps) {
if (encoder_bitrate_bps_ != bitrate_bps) {
SetEncoderBitrateBps(bitrate_bps);
}
}
void H264VideoToolboxEncoder::SetEncoderBitrateBps(uint32_t bitrate_bps) {
if (compression_session_) {
internal::SetVTSessionProperty(compression_session_,
kVTCompressionPropertyKey_AverageBitRate,
bitrate_bps);
// TODO(tkchin): Add a helper method to set array value.
int64_t data_limit_bytes_per_second_value = static_cast<int64_t>(
bitrate_bps * internal::kLimitToAverageBitRateFactor / 8);
CFNumberRef bytes_per_second =
CFNumberCreate(kCFAllocatorDefault,
kCFNumberSInt64Type,
&data_limit_bytes_per_second_value);
int64_t one_second_value = 1;
CFNumberRef one_second =
CFNumberCreate(kCFAllocatorDefault,
kCFNumberSInt64Type,
&one_second_value);
const void* nums[2] = { bytes_per_second, one_second };
CFArrayRef data_rate_limits =
CFArrayCreate(nullptr, nums, 2, &kCFTypeArrayCallBacks);
OSStatus status =
VTSessionSetProperty(compression_session_,
kVTCompressionPropertyKey_DataRateLimits,
data_rate_limits);
if (bytes_per_second) {
CFRelease(bytes_per_second);
}
if (one_second) {
CFRelease(one_second);
}
if (data_rate_limits) {
CFRelease(data_rate_limits);
}
if (status != noErr) {
LOG(LS_ERROR) << "Failed to set data rate limit";
}
encoder_bitrate_bps_ = bitrate_bps;
}
}
void H264VideoToolboxEncoder::OnEncodedFrame(
OSStatus status,
VTEncodeInfoFlags info_flags,
CMSampleBufferRef sample_buffer,
CodecSpecificInfo codec_specific_info,
int32_t width,
int32_t height,
int64_t render_time_ms,
uint32_t timestamp,
VideoRotation rotation) {
if (status != noErr) {
LOG(LS_ERROR) << "H264 encode failed.";
return;
}
if (info_flags & kVTEncodeInfo_FrameDropped) {
LOG(LS_INFO) << "H264 encode dropped frame.";
return;
}
bool is_keyframe = false;
CFArrayRef attachments =
CMSampleBufferGetSampleAttachmentsArray(sample_buffer, 0);
if (attachments != nullptr && CFArrayGetCount(attachments)) {
CFDictionaryRef attachment =
static_cast<CFDictionaryRef>(CFArrayGetValueAtIndex(attachments, 0));
is_keyframe =
!CFDictionaryContainsKey(attachment, kCMSampleAttachmentKey_NotSync);
}
if (is_keyframe) {
LOG(LS_INFO) << "Generated keyframe";
}
// Convert the sample buffer into a buffer suitable for RTP packetization.
// TODO(tkchin): Allocate buffers through a pool.
std::unique_ptr<rtc::Buffer> buffer(new rtc::Buffer());
std::unique_ptr<webrtc::RTPFragmentationHeader> header;
{
webrtc::RTPFragmentationHeader* header_raw;
bool result = H264CMSampleBufferToAnnexBBuffer(sample_buffer, is_keyframe,
buffer.get(), &header_raw);
header.reset(header_raw);
if (!result) {
return;
}
}
webrtc::EncodedImage frame(buffer->data(), buffer->size(), buffer->size());
frame._encodedWidth = width;
frame._encodedHeight = height;
frame._completeFrame = true;
frame._frameType =
is_keyframe ? webrtc::kVideoFrameKey : webrtc::kVideoFrameDelta;
frame.capture_time_ms_ = render_time_ms;
frame._timeStamp = timestamp;
frame.rotation_ = rotation;
frame.content_type_ =
(mode_ == kScreensharing) ? VideoContentType::SCREENSHARE : VideoContentType::UNSPECIFIED;
frame.timing_.is_timing_frame = false;
h264_bitstream_parser_.ParseBitstream(buffer->data(), buffer->size());
h264_bitstream_parser_.GetLastSliceQp(&frame.qp_);
EncodedImageCallback::Result res =
callback_->OnEncodedImage(frame, &codec_specific_info, header.get());
if (res.error != EncodedImageCallback::Result::OK) {
LOG(LS_ERROR) << "Encode callback failed: " << res.error;
return;
}
bitrate_adjuster_.Update(frame._length);
}
// TODO(magjed): This function is not used by RTCVideoEncoderH264, but this whole file will be
// removed soon and inlined as ObjC.
VideoEncoder::ScalingSettings H264VideoToolboxEncoder::GetScalingSettings()
const {
return VideoEncoder::ScalingSettings(true, internal::kLowH264QpThreshold,
internal::kHighH264QpThreshold);
}
} // namespace webrtc

View File

@ -0,0 +1,90 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*
*/
#include "helpers.h"
#include <string>
#include "webrtc/rtc_base/checks.h"
#include "webrtc/rtc_base/logging.h"
// Copies characters from a CFStringRef into a std::string.
std::string CFStringToString(const CFStringRef cf_string) {
RTC_DCHECK(cf_string);
std::string std_string;
// Get the size needed for UTF8 plus terminating character.
size_t buffer_size =
CFStringGetMaximumSizeForEncoding(CFStringGetLength(cf_string),
kCFStringEncodingUTF8) +
1;
std::unique_ptr<char[]> buffer(new char[buffer_size]);
if (CFStringGetCString(cf_string, buffer.get(), buffer_size,
kCFStringEncodingUTF8)) {
// Copy over the characters.
std_string.assign(buffer.get());
}
return std_string;
}
// Convenience function for setting a VT property.
void SetVTSessionProperty(VTSessionRef session,
CFStringRef key,
int32_t value) {
CFNumberRef cfNum =
CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &value);
OSStatus status = VTSessionSetProperty(session, key, cfNum);
CFRelease(cfNum);
if (status != noErr) {
std::string key_string = CFStringToString(key);
LOG(LS_ERROR) << "VTSessionSetProperty failed to set: " << key_string
<< " to " << value << ": " << status;
}
}
// Convenience function for setting a VT property.
void SetVTSessionProperty(VTSessionRef session,
CFStringRef key,
uint32_t value) {
int64_t value_64 = value;
CFNumberRef cfNum =
CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt64Type, &value_64);
OSStatus status = VTSessionSetProperty(session, key, cfNum);
CFRelease(cfNum);
if (status != noErr) {
std::string key_string = CFStringToString(key);
LOG(LS_ERROR) << "VTSessionSetProperty failed to set: " << key_string
<< " to " << value << ": " << status;
}
}
// Convenience function for setting a VT property.
void SetVTSessionProperty(VTSessionRef session, CFStringRef key, bool value) {
CFBooleanRef cf_bool = (value) ? kCFBooleanTrue : kCFBooleanFalse;
OSStatus status = VTSessionSetProperty(session, key, cf_bool);
if (status != noErr) {
std::string key_string = CFStringToString(key);
LOG(LS_ERROR) << "VTSessionSetProperty failed to set: " << key_string
<< " to " << value << ": " << status;
}
}
// Convenience function for setting a VT property.
void SetVTSessionProperty(VTSessionRef session,
CFStringRef key,
CFStringRef value) {
OSStatus status = VTSessionSetProperty(session, key, value);
if (status != noErr) {
std::string key_string = CFStringToString(key);
std::string val_string = CFStringToString(value);
LOG(LS_ERROR) << "VTSessionSetProperty failed to set: " << key_string
<< " to " << val_string << ": " << status;
}
}

View File

@ -0,0 +1,47 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*
*/
#ifndef WEBRTC_SDK_OBJC_FRAMEWORK_CLASSES_VIDEOTOOLBOX_HELPERS_H_
#define WEBRTC_SDK_OBJC_FRAMEWORK_CLASSES_VIDEOTOOLBOX_HELPERS_H_
#include <CoreFoundation/CoreFoundation.h>
#include <VideoToolbox/VideoToolbox.h>
#include <string>
// Convenience function for creating a dictionary.
inline CFDictionaryRef CreateCFTypeDictionary(CFTypeRef* keys,
CFTypeRef* values,
size_t size) {
return CFDictionaryCreate(kCFAllocatorDefault, keys, values, size,
&kCFTypeDictionaryKeyCallBacks,
&kCFTypeDictionaryValueCallBacks);
}
// Copies characters from a CFStringRef into a std::string.
std::string CFStringToString(const CFStringRef cf_string);
// Convenience function for setting a VT property.
void SetVTSessionProperty(VTSessionRef session, CFStringRef key, int32_t value);
// Convenience function for setting a VT property.
void SetVTSessionProperty(VTSessionRef session,
CFStringRef key,
uint32_t value);
// Convenience function for setting a VT property.
void SetVTSessionProperty(VTSessionRef session, CFStringRef key, bool value);
// Convenience function for setting a VT property.
void SetVTSessionProperty(VTSessionRef session,
CFStringRef key,
CFStringRef value);
#endif // WEBRTC_SDK_OBJC_FRAMEWORK_CLASSES_VIDEOTOOLBOX_HELPERS_H_

View File

@ -33,7 +33,6 @@ class ObjCVideoDecoder : public VideoDecoder {
public:
ObjCVideoDecoder(id<RTCVideoDecoder> decoder)
: decoder_(decoder), implementation_name_([decoder implementationName].stdString) {}
~ObjCVideoDecoder() { [decoder_ destroy]; }
int32_t InitDecode(const VideoCodec *codec_settings, int32_t number_of_cores) {
RTCVideoEncoderSettings *settings =

View File

@ -54,7 +54,6 @@ class ObjCVideoEncoder : public VideoEncoder {
public:
ObjCVideoEncoder(id<RTCVideoEncoder> encoder)
: encoder_(encoder), implementation_name_([encoder implementationName].stdString) {}
~ObjCVideoEncoder() { [encoder_ destroy]; }
int32_t InitEncode(const VideoCodec *codec_settings,
int32_t number_of_cores,
@ -65,9 +64,9 @@ class ObjCVideoEncoder : public VideoEncoder {
}
int32_t RegisterEncodeCompleteCallback(EncodedImageCallback *callback) {
[encoder_ setCallback:^(RTCEncodedImage *frame,
id<RTCCodecSpecificInfo> info,
RTCRtpFragmentationHeader *header) {
[encoder_ setCallback:^BOOL(RTCEncodedImage *_Nonnull frame,
id<RTCCodecSpecificInfo> _Nonnull info,
RTCRtpFragmentationHeader *_Nonnull header) {
EncodedImage encodedImage = [frame nativeEncodedImage];
// Handle types than can be converted into one of CodecSpecificInfo's hard coded cases.
@ -78,7 +77,9 @@ class ObjCVideoEncoder : public VideoEncoder {
std::unique_ptr<RTPFragmentationHeader> fragmentationHeader =
[header createNativeFragmentationHeader];
callback->OnEncodedImage(encodedImage, &codecSpecificInfo, fragmentationHeader.release());
EncodedImageCallback::Result res =
callback->OnEncodedImage(encodedImage, &codecSpecificInfo, fragmentationHeader.release());
return res.error == EncodedImageCallback::Result::OK;
}];
return WEBRTC_VIDEO_CODEC_OK;
@ -113,11 +114,7 @@ class ObjCVideoEncoder : public VideoEncoder {
int32_t SetChannelParameters(uint32_t packet_loss, int64_t rtt) { return WEBRTC_VIDEO_CODEC_OK; }
int32_t SetRates(uint32_t bitrate, uint32_t framerate) {
if ([encoder_ setBitrate:bitrate framerate:framerate]) {
return WEBRTC_VIDEO_CODEC_OK;
} else {
return WEBRTC_VIDEO_CODEC_ERROR;
}
return [encoder_ setBitrate:bitrate framerate:framerate];
}
bool SupportsNativeHandle() const { return true; }