Revert "Add stereo codec header and pass it through RTP"

This reverts commit 20f2133d5dbd1591b89425b24db3b1e09fbcf0b1.

Reason for revert: Breaks downstream project.

Original change's description:
> Add stereo codec header and pass it through RTP
> 
> - Defines CodecSpecificInfoStereo that carries stereo specific header info from
> encoded image.
> - Defines RTPVideoHeaderStereo that carries the above info to packetizer,
> see module_common_types.h.
> - Adds an RTPPacketizer and RTPDepacketizer that supports passing specific stereo
> header.
> - Uses new data containers in StereoAdapter classes.
> 
> This CL is the step 3 for adding alpha channel support over the wire in webrtc.
> See https://webrtc-review.googlesource.com/c/src/+/7800 for the experimental
> CL that gives an idea about how it will come together.
> Design Doc: https://goo.gl/sFeSUT
> 
> Bug: webrtc:7671
> Change-Id: Ia932568fdd7065ba104afd2bc0ecf25a765748ab
> Reviewed-on: https://webrtc-review.googlesource.com/22900
> Reviewed-by: Emircan Uysaler <emircan@webrtc.org>
> Reviewed-by: Erik Språng <sprang@webrtc.org>
> Reviewed-by: Danil Chapovalov <danilchap@webrtc.org>
> Reviewed-by: Niklas Enbom <niklas.enbom@webrtc.org>
> Commit-Queue: Emircan Uysaler <emircan@webrtc.org>
> Cr-Commit-Position: refs/heads/master@{#20920}

TBR=danilchap@webrtc.org,sprang@webrtc.org,stefan@webrtc.org,niklas.enbom@webrtc.org,emircan@webrtc.org

Change-Id: I57f3172ca3c60a84537d577a574dc8018e12d634
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: webrtc:7671
Reviewed-on: https://webrtc-review.googlesource.com/26940
Reviewed-by: Philip Eliasson <philipel@webrtc.org>
Commit-Queue: Philip Eliasson <philipel@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#20931}
This commit is contained in:
Philip Eliasson
2017-11-29 11:39:27 +00:00
committed by Commit Bot
parent f82000328d
commit deb866360a
27 changed files with 75 additions and 658 deletions

View File

@ -11,7 +11,6 @@
#ifndef MODULES_VIDEO_CODING_CODECS_STEREO_INCLUDE_STEREO_ENCODER_ADAPTER_H_
#define MODULES_VIDEO_CODING_CODECS_STEREO_INCLUDE_STEREO_ENCODER_ADAPTER_H_
#include <map>
#include <memory>
#include <vector>
@ -57,16 +56,15 @@ class StereoEncoderAdapter : public VideoEncoder {
// Wrapper class that redirects OnEncodedImage() calls.
class AdapterEncodedImageCallback;
// Holds the encoded image output of a frame.
struct EncodedImageData;
VideoEncoderFactory* const factory_;
std::vector<std::unique_ptr<VideoEncoder>> encoders_;
std::vector<std::unique_ptr<AdapterEncodedImageCallback>> adapter_callbacks_;
EncodedImageCallback* encoded_complete_callback_;
// Holds the encoded image info.
struct ImageStereoInfo;
std::map<uint32_t /* timestamp */, ImageStereoInfo> image_stereo_info_;
uint16_t picture_index_ = 0;
uint64_t picture_index_ = 0;
std::vector<uint8_t> stereo_dummy_planes_;
};

View File

@ -1,24 +0,0 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_VIDEO_CODING_CODECS_STEREO_INCLUDE_STEREO_GLOBALS_H_
#define MODULES_VIDEO_CODING_CODECS_STEREO_INCLUDE_STEREO_GLOBALS_H_
namespace webrtc {
struct StereoIndices {
uint8_t frame_index;
uint8_t frame_count;
uint16_t picture_index;
};
} // namespace webrtc
#endif // MODULES_VIDEO_CODING_CODECS_STEREO_INCLUDE_STEREO_GLOBALS_H_

View File

@ -33,18 +33,18 @@ class StereoDecoderAdapter::AdapterDecodedImageCallback
AlphaCodecStream stream_idx)
: adapter_(adapter), stream_idx_(stream_idx) {}
void Decoded(VideoFrame& decoded_image,
void Decoded(VideoFrame& decodedImage,
rtc::Optional<int32_t> decode_time_ms,
rtc::Optional<uint8_t> qp) override {
if (!adapter_)
return;
adapter_->Decoded(stream_idx_, &decoded_image, decode_time_ms, qp);
adapter_->Decoded(stream_idx_, &decodedImage, decode_time_ms, qp);
}
int32_t Decoded(VideoFrame& decoded_image) override {
int32_t Decoded(VideoFrame& decodedImage) override {
RTC_NOTREACHED();
return WEBRTC_VIDEO_CODEC_OK;
}
int32_t Decoded(VideoFrame& decoded_image, int64_t decode_time_ms) override {
int32_t Decoded(VideoFrame& decodedImage, int64_t decode_time_ms) override {
RTC_NOTREACHED();
return WEBRTC_VIDEO_CODEC_OK;
}
@ -57,22 +57,22 @@ class StereoDecoderAdapter::AdapterDecodedImageCallback
struct StereoDecoderAdapter::DecodedImageData {
explicit DecodedImageData(AlphaCodecStream stream_idx)
: stream_idx_(stream_idx),
decoded_image_(I420Buffer::Create(1 /* width */, 1 /* height */),
0,
0,
kVideoRotation_0) {
decodedImage_(I420Buffer::Create(1 /* width */, 1 /* height */),
0,
0,
kVideoRotation_0) {
RTC_DCHECK_EQ(kAXXStream, stream_idx);
}
DecodedImageData(AlphaCodecStream stream_idx,
const VideoFrame& decoded_image,
const VideoFrame& decodedImage,
const rtc::Optional<int32_t>& decode_time_ms,
const rtc::Optional<uint8_t>& qp)
: stream_idx_(stream_idx),
decoded_image_(decoded_image),
decodedImage_(decodedImage),
decode_time_ms_(decode_time_ms),
qp_(qp) {}
const AlphaCodecStream stream_idx_;
VideoFrame decoded_image_;
VideoFrame decodedImage_;
const rtc::Optional<int32_t> decode_time_ms_;
const rtc::Optional<uint8_t> qp_;
@ -113,21 +113,14 @@ int32_t StereoDecoderAdapter::Decode(
const RTPFragmentationHeader* /*fragmentation*/,
const CodecSpecificInfo* codec_specific_info,
int64_t render_time_ms) {
const CodecSpecificInfoStereo& stereo_info =
codec_specific_info->codecSpecific.stereo;
RTC_DCHECK_LT(static_cast<size_t>(stereo_info.indices.frame_index),
decoders_.size());
if (stereo_info.indices.frame_count == 1) {
RTC_DCHECK_EQ(static_cast<int>(stereo_info.indices.frame_index), 0);
RTC_DCHECK(decoded_data_.find(input_image._timeStamp) ==
decoded_data_.end());
decoded_data_.emplace(std::piecewise_construct,
std::forward_as_tuple(input_image._timeStamp),
std::forward_as_tuple(kAXXStream));
}
int32_t rv = decoders_[stereo_info.indices.frame_index]->Decode(
input_image, missing_frames, nullptr, nullptr, render_time_ms);
// TODO(emircan): Read |codec_specific_info->stereoInfo| to split frames.
int32_t rv =
decoders_[kYUVStream]->Decode(input_image, missing_frames, nullptr,
codec_specific_info, render_time_ms);
if (rv)
return rv;
rv = decoders_[kAXXStream]->Decode(input_image, missing_frames, nullptr,
codec_specific_info, render_time_ms);
return rv;
}
@ -159,12 +152,12 @@ void StereoDecoderAdapter::Decoded(AlphaCodecStream stream_idx,
if (stream_idx == kYUVStream) {
RTC_DCHECK_EQ(kAXXStream, other_image_data.stream_idx_);
MergeAlphaImages(decoded_image, decode_time_ms, qp,
&other_image_data.decoded_image_,
&other_image_data.decodedImage_,
other_image_data.decode_time_ms_, other_image_data.qp_);
} else {
RTC_DCHECK_EQ(kYUVStream, other_image_data.stream_idx_);
RTC_DCHECK_EQ(kAXXStream, stream_idx);
MergeAlphaImages(&other_image_data.decoded_image_,
MergeAlphaImages(&other_image_data.decodedImage_,
other_image_data.decode_time_ms_, other_image_data.qp_,
decoded_image, decode_time_ms, qp);
}
@ -173,8 +166,6 @@ void StereoDecoderAdapter::Decoded(AlphaCodecStream stream_idx,
}
RTC_DCHECK(decoded_data_.find(decoded_image->timestamp()) ==
decoded_data_.end());
// decoded_data_[decoded_image->timestamp()] =
// DecodedImageData(stream_idx, *decoded_image, decode_time_ms, qp);
decoded_data_.emplace(
std::piecewise_construct,
std::forward_as_tuple(decoded_image->timestamp()),
@ -182,21 +173,16 @@ void StereoDecoderAdapter::Decoded(AlphaCodecStream stream_idx,
}
void StereoDecoderAdapter::MergeAlphaImages(
VideoFrame* decoded_image,
VideoFrame* decodedImage,
const rtc::Optional<int32_t>& decode_time_ms,
const rtc::Optional<uint8_t>& qp,
VideoFrame* alpha_decoded_image,
VideoFrame* alpha_decodedImage,
const rtc::Optional<int32_t>& alpha_decode_time_ms,
const rtc::Optional<uint8_t>& alpha_qp) {
if (!alpha_decoded_image->timestamp()) {
decoded_complete_callback_->Decoded(*decoded_image, decode_time_ms, qp);
return;
}
rtc::scoped_refptr<webrtc::I420BufferInterface> yuv_buffer =
decoded_image->video_frame_buffer()->ToI420();
decodedImage->video_frame_buffer()->ToI420();
rtc::scoped_refptr<webrtc::I420BufferInterface> alpha_buffer =
alpha_decoded_image->video_frame_buffer()->ToI420();
alpha_decodedImage->video_frame_buffer()->ToI420();
RTC_DCHECK_EQ(yuv_buffer->width(), alpha_buffer->width());
RTC_DCHECK_EQ(yuv_buffer->height(), alpha_buffer->height());
rtc::scoped_refptr<I420ABufferInterface> merged_buffer = WrapI420ABuffer(
@ -206,8 +192,8 @@ void StereoDecoderAdapter::MergeAlphaImages(
alpha_buffer->StrideY(),
rtc::Bind(&KeepBufferRefs, yuv_buffer, alpha_buffer));
VideoFrame merged_image(merged_buffer, decoded_image->timestamp(),
0 /* render_time_ms */, decoded_image->rotation());
VideoFrame merged_image(merged_buffer, decodedImage->timestamp(),
0 /* render_time_ms */, decodedImage->rotation());
decoded_complete_callback_->Decoded(merged_image, decode_time_ms, qp);
}

View File

@ -44,20 +44,6 @@ class StereoEncoderAdapter::AdapterEncodedImageCallback
const AlphaCodecStream stream_idx_;
};
// Holds the encoded image info.
struct StereoEncoderAdapter::ImageStereoInfo {
ImageStereoInfo(uint16_t picture_index, uint8_t frame_count)
: picture_index(picture_index),
frame_count(frame_count),
encoded_count(0) {}
uint16_t picture_index;
uint8_t frame_count;
uint8_t encoded_count;
private:
RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(ImageStereoInfo);
};
StereoEncoderAdapter::StereoEncoderAdapter(VideoEncoderFactory* factory)
: factory_(factory), encoded_complete_callback_(nullptr) {}
@ -97,21 +83,15 @@ int StereoEncoderAdapter::Encode(const VideoFrame& input_image,
if (!encoded_complete_callback_) {
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
const bool has_alpha = input_image.video_frame_buffer()->type() ==
VideoFrameBuffer::Type::kI420A;
image_stereo_info_.emplace(
std::piecewise_construct, std::forward_as_tuple(input_image.timestamp()),
std::forward_as_tuple(picture_index_++,
has_alpha ? kAlphaCodecStreams : 1));
// Encode YUV
int rv = encoders_[kYUVStream]->Encode(input_image, codec_specific_info,
frame_types);
// If we do not receive an alpha frame, we send a single frame for this
// |picture_index_|. The receiver will receive |frame_count| as 1 which
// soecifies this case.
if (rv || !has_alpha)
if (rv)
return rv;
const bool has_alpha = input_image.video_frame_buffer()->type() ==
VideoFrameBuffer::Type::kI420A;
if (!has_alpha)
return rv;
// Encode AXX
@ -149,7 +129,7 @@ int StereoEncoderAdapter::SetChannelParameters(uint32_t packet_loss,
int StereoEncoderAdapter::SetRateAllocation(const BitrateAllocation& bitrate,
uint32_t framerate) {
for (auto& encoder : encoders_) {
// TODO(emircan): |framerate| is used to calculate duration in encoder
// TODO(emircan): |new_framerate| is used to calculate duration for encoder
// instances. We report the total frame rate to keep real time for now.
// Remove this after refactoring duration logic.
const int rv = encoder->SetRateAllocation(
@ -180,25 +160,11 @@ EncodedImageCallback::Result StereoEncoderAdapter::OnEncodedImage(
const EncodedImage& encodedImage,
const CodecSpecificInfo* codecSpecificInfo,
const RTPFragmentationHeader* fragmentation) {
const VideoCodecType associated_coded_type = codecSpecificInfo->codecType;
const auto& image_stereo_info_itr =
image_stereo_info_.find(encodedImage._timeStamp);
RTC_DCHECK(image_stereo_info_itr != image_stereo_info_.end());
ImageStereoInfo& image_stereo_info = image_stereo_info_itr->second;
const uint8_t frame_count = image_stereo_info.frame_count;
const uint16_t picture_index = image_stereo_info.picture_index;
if (++image_stereo_info.encoded_count == frame_count)
image_stereo_info_.erase(image_stereo_info_itr);
if (stream_idx == kAXXStream)
return EncodedImageCallback::Result(EncodedImageCallback::Result::OK);
CodecSpecificInfo codec_info = *codecSpecificInfo;
codec_info.codecType = kVideoCodecStereo;
codec_info.codec_name = "stereo";
codec_info.codecSpecific.stereo.associated_codec_type = associated_coded_type;
codec_info.codecSpecific.stereo.indices.frame_index = stream_idx;
codec_info.codecSpecific.stereo.indices.frame_count = frame_count;
codec_info.codecSpecific.stereo.indices.picture_index = picture_index;
encoded_complete_callback_->OnEncodedImage(encodedImage, &codec_info,
// TODO(emircan): Fill |codec_specific_info| with stereo parameters.
encoded_complete_callback_->OnEncodedImage(encodedImage, codecSpecificInfo,
fragmentation);
return EncodedImageCallback::Result(EncodedImageCallback::Result::OK);
}

View File

@ -101,18 +101,8 @@ TEST_F(TestStereoAdapter, EncodeDecodeI420Frame) {
EncodedImage encoded_frame;
CodecSpecificInfo codec_specific_info;
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
EXPECT_EQ(kVideoCodecStereo, codec_specific_info.codecType);
EXPECT_EQ(kVideoCodecVP9,
codec_specific_info.codecSpecific.stereo.associated_codec_type);
EXPECT_EQ(0, codec_specific_info.codecSpecific.stereo.indices.frame_index);
EXPECT_EQ(1, codec_specific_info.codecSpecific.stereo.indices.frame_count);
EXPECT_EQ(0ull,
codec_specific_info.codecSpecific.stereo.indices.picture_index);
EXPECT_EQ(
WEBRTC_VIDEO_CODEC_OK,
decoder_->Decode(encoded_frame, false, nullptr, &codec_specific_info));
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
decoder_->Decode(encoded_frame, false, nullptr));
std::unique_ptr<VideoFrame> decoded_frame;
rtc::Optional<uint8_t> decoded_qp;
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
@ -122,38 +112,13 @@ TEST_F(TestStereoAdapter, EncodeDecodeI420Frame) {
TEST_F(TestStereoAdapter, EncodeDecodeI420AFrame) {
std::unique_ptr<VideoFrame> yuva_frame = CreateI420AInputFrame();
const size_t expected_num_encoded_frames = 2;
SetWaitForEncodedFramesThreshold(expected_num_encoded_frames);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(*yuva_frame, nullptr, nullptr));
std::vector<EncodedImage> encoded_frames;
std::vector<CodecSpecificInfo> codec_specific_infos;
ASSERT_TRUE(WaitForEncodedFrames(&encoded_frames, &codec_specific_infos));
EXPECT_EQ(expected_num_encoded_frames, encoded_frames.size());
EXPECT_EQ(expected_num_encoded_frames, codec_specific_infos.size());
const CodecSpecificInfo& yuv_info = codec_specific_infos[kYUVStream];
EXPECT_EQ(kVideoCodecStereo, yuv_info.codecType);
EXPECT_EQ(kVideoCodecVP9,
yuv_info.codecSpecific.stereo.associated_codec_type);
EXPECT_EQ(kYUVStream, yuv_info.codecSpecific.stereo.indices.frame_index);
EXPECT_EQ(kAlphaCodecStreams,
yuv_info.codecSpecific.stereo.indices.frame_count);
EXPECT_EQ(0ull, yuv_info.codecSpecific.stereo.indices.picture_index);
const CodecSpecificInfo& axx_info = codec_specific_infos[kAXXStream];
EXPECT_EQ(kVideoCodecStereo, axx_info.codecType);
EXPECT_EQ(kVideoCodecVP9,
axx_info.codecSpecific.stereo.associated_codec_type);
EXPECT_EQ(kAXXStream, axx_info.codecSpecific.stereo.indices.frame_index);
EXPECT_EQ(kAlphaCodecStreams,
axx_info.codecSpecific.stereo.indices.frame_count);
EXPECT_EQ(0ull, axx_info.codecSpecific.stereo.indices.picture_index);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frames[kYUVStream],
false, nullptr, &yuv_info));
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frames[kAXXStream],
false, nullptr, &axx_info));
EncodedImage encoded_frame;
CodecSpecificInfo codec_specific_info;
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
decoder_->Decode(encoded_frame, false, nullptr));
std::unique_ptr<VideoFrame> decoded_frame;
rtc::Optional<uint8_t> decoded_qp;
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));

View File

@ -33,19 +33,13 @@ VideoCodecTest::FakeEncodeCompleteCallback::OnEncodedImage(
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) {
rtc::CritScope lock(&test_->encoded_frame_section_);
test_->encoded_frames_.push_back(frame);
test_->encoded_frame_.emplace(frame);
RTC_DCHECK(codec_specific_info);
test_->codec_specific_infos_.push_back(*codec_specific_info);
if (!test_->wait_for_encoded_frames_threshold_) {
test_->encoded_frame_event_.Set();
return Result(Result::OK);
}
if (test_->encoded_frames_.size() ==
test_->wait_for_encoded_frames_threshold_) {
test_->wait_for_encoded_frames_threshold_ = 1;
test_->encoded_frame_event_.Set();
}
test_->codec_specific_info_.codecType = codec_specific_info->codecType;
// Skip |codec_name|, to avoid allocating.
test_->codec_specific_info_.codecSpecific =
codec_specific_info->codecSpecific;
test_->encoded_frame_event_.Set();
return Result(Result::OK);
}
@ -80,38 +74,17 @@ void VideoCodecTest::SetUp() {
bool VideoCodecTest::WaitForEncodedFrame(
EncodedImage* frame,
CodecSpecificInfo* codec_specific_info) {
std::vector<EncodedImage> frames;
std::vector<CodecSpecificInfo> codec_specific_infos;
if (!WaitForEncodedFrames(&frames, &codec_specific_infos))
return false;
EXPECT_EQ(frames.size(), static_cast<size_t>(1));
EXPECT_EQ(frames.size(), codec_specific_infos.size());
*frame = frames[0];
*codec_specific_info = codec_specific_infos[0];
return true;
}
void VideoCodecTest::SetWaitForEncodedFramesThreshold(size_t num_frames) {
rtc::CritScope lock(&encoded_frame_section_);
wait_for_encoded_frames_threshold_ = num_frames;
}
bool VideoCodecTest::WaitForEncodedFrames(
std::vector<EncodedImage>* frames,
std::vector<CodecSpecificInfo>* codec_specific_info) {
EXPECT_TRUE(encoded_frame_event_.Wait(kEncodeTimeoutMs))
<< "Timed out while waiting for encoded frame.";
bool ret = encoded_frame_event_.Wait(kEncodeTimeoutMs);
EXPECT_TRUE(ret) << "Timed out while waiting for an encoded frame.";
// This becomes unsafe if there are multiple threads waiting for frames.
rtc::CritScope lock(&encoded_frame_section_);
EXPECT_FALSE(encoded_frames_.empty());
EXPECT_FALSE(codec_specific_infos_.empty());
EXPECT_EQ(encoded_frames_.size(), codec_specific_infos_.size());
if (!encoded_frames_.empty()) {
*frames = encoded_frames_;
encoded_frames_.clear();
RTC_DCHECK(!codec_specific_infos_.empty());
*codec_specific_info = codec_specific_infos_;
codec_specific_infos_.clear();
EXPECT_TRUE(encoded_frame_);
if (encoded_frame_) {
*frame = std::move(*encoded_frame_);
encoded_frame_.reset();
RTC_DCHECK(codec_specific_info);
codec_specific_info->codecType = codec_specific_info_.codecType;
codec_specific_info->codecSpecific = codec_specific_info_.codecSpecific;
return true;
} else {
return false;

View File

@ -12,7 +12,6 @@
#define MODULES_VIDEO_CODING_CODECS_TEST_VIDEO_CODEC_TEST_H_
#include <memory>
#include <vector>
#include "api/video_codecs/video_decoder.h"
#include "api/video_codecs/video_encoder.h"
@ -33,7 +32,6 @@ class VideoCodecTest : public ::testing::Test {
decode_complete_callback_(this),
encoded_frame_event_(false /* manual reset */,
false /* initially signaled */),
wait_for_encoded_frames_threshold_(1),
decoded_frame_event_(false /* manual reset */,
false /* initially signaled */) {}
@ -76,19 +74,8 @@ class VideoCodecTest : public ::testing::Test {
void SetUp() override;
// Helper method for waiting a single encoded frame.
bool WaitForEncodedFrame(EncodedImage* frame,
CodecSpecificInfo* codec_specific_info);
// Helper methods for waiting for multiple encoded frames. Caller must
// define how many frames are to be waited for via |num_frames| before calling
// Encode(). Then, they can expect to retrive them via WaitForEncodedFrames().
void SetWaitForEncodedFramesThreshold(size_t num_frames);
bool WaitForEncodedFrames(
std::vector<EncodedImage>* frames,
std::vector<CodecSpecificInfo>* codec_specific_info);
// Helper method for waiting a single decoded frame.
bool WaitForDecodedFrame(std::unique_ptr<VideoFrame>* frame,
rtc::Optional<uint8_t>* qp);
@ -108,11 +95,9 @@ class VideoCodecTest : public ::testing::Test {
rtc::Event encoded_frame_event_;
rtc::CriticalSection encoded_frame_section_;
size_t wait_for_encoded_frames_threshold_;
std::vector<EncodedImage> encoded_frames_
RTC_GUARDED_BY(encoded_frame_section_);
std::vector<CodecSpecificInfo> codec_specific_infos_
rtc::Optional<EncodedImage> encoded_frame_
RTC_GUARDED_BY(encoded_frame_section_);
CodecSpecificInfo codec_specific_info_ RTC_GUARDED_BY(encoded_frame_section_);
rtc::Event decoded_frame_event_;
rtc::CriticalSection decoded_frame_section_;