diff --git a/sdk/objc/components/video_codec/RTCVideoDecoderH264.mm b/sdk/objc/components/video_codec/RTCVideoDecoderH264.mm index 52edefe053..667553002c 100644 --- a/sdk/objc/components/video_codec/RTCVideoDecoderH264.mm +++ b/sdk/objc/components/video_codec/RTCVideoDecoderH264.mm @@ -202,20 +202,31 @@ void decompressionOutputCallback(void *decoderRef, // CVPixelBuffers directly to the renderer. // TODO(tkchin): Maybe only set OpenGL/IOSurface keys if we know that that // we can pass CVPixelBuffers as native handles in decoder output. +#if TARGET_OS_SIMULATOR + static size_t const attributesSize = 2; +#else static size_t const attributesSize = 3; +#endif + CFTypeRef keys[attributesSize] = { #if defined(WEBRTC_IOS) - kCVPixelBufferOpenGLESCompatibilityKey, + kCVPixelBufferOpenGLESCompatibilityKey, #elif defined(WEBRTC_MAC) - kCVPixelBufferOpenGLCompatibilityKey, + kCVPixelBufferOpenGLCompatibilityKey, #endif - kCVPixelBufferIOSurfacePropertiesKey, - kCVPixelBufferPixelFormatTypeKey - }; +#if !(TARGET_OS_SIMULATOR) + kCVPixelBufferIOSurfacePropertiesKey, +#endif + kCVPixelBufferPixelFormatTypeKey}; CFDictionaryRef ioSurfaceValue = CreateCFTypeDictionary(nullptr, nullptr, 0); int64_t nv12type = kCVPixelFormatType_420YpCbCr8BiPlanarFullRange; CFNumberRef pixelFormat = CFNumberCreate(nullptr, kCFNumberLongType, &nv12type); +#if TARGET_OS_SIMULATOR + CFTypeRef values[attributesSize] = {kCFBooleanTrue, pixelFormat}; +#else CFTypeRef values[attributesSize] = {kCFBooleanTrue, ioSurfaceValue, pixelFormat}; +#endif + CFDictionaryRef attributes = CreateCFTypeDictionary(keys, values, attributesSize); if (ioSurfaceValue) { CFRelease(ioSurfaceValue);