Convert all webrtc code to not access EncodedImage::_size directly.

Read using capacity() method, write using set_buffer() method. This is
a preparation for making the member private, and renaming it to
capacity_.

Bug: webrtc:9378
Change-Id: I2f96679d052a83fe81be40301bd9863c87074640
Reviewed-on: https://webrtc-review.googlesource.com/c/113520
Reviewed-by: Philip Eliasson <philipel@webrtc.org>
Reviewed-by: Erik Språng <sprang@webrtc.org>
Commit-Queue: Niels Moller <nisse@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#25934}
This commit is contained in:
Niels Möller
2018-12-07 16:21:18 +01:00
committed by Commit Bot
parent 60d770f2ef
commit 48a79465ec
12 changed files with 74 additions and 75 deletions

View File

@ -250,7 +250,8 @@ int32_t H264DecoderImpl::Decode(const EncodedImage& input_image,
// FFmpeg requires padding due to some optimized bitstream readers reading 32 // FFmpeg requires padding due to some optimized bitstream readers reading 32
// or 64 bits at once and could read over the end. See avcodec_decode_video2. // or 64 bits at once and could read over the end. See avcodec_decode_video2.
RTC_CHECK_GE(input_image._size, input_image._length + RTC_CHECK_GE(input_image.capacity(),
input_image._length +
EncodedImage::GetBufferPaddingBytes(kVideoCodecH264)); EncodedImage::GetBufferPaddingBytes(kVideoCodecH264));
// "If the first 23 bits of the additional bytes are not 0, then damaged MPEG // "If the first 23 bits of the additional bytes are not 0, then damaged MPEG
// bitstreams could cause overread and segfault." See // bitstreams could cause overread and segfault." See

View File

@ -99,34 +99,34 @@ static void RtpFragmentize(EncodedImage* encoded_image,
SFrameBSInfo* info, SFrameBSInfo* info,
RTPFragmentationHeader* frag_header) { RTPFragmentationHeader* frag_header) {
// Calculate minimum buffer size required to hold encoded data. // Calculate minimum buffer size required to hold encoded data.
size_t required_size = 0; size_t required_capacity = 0;
size_t fragments_count = 0; size_t fragments_count = 0;
for (int layer = 0; layer < info->iLayerNum; ++layer) { for (int layer = 0; layer < info->iLayerNum; ++layer) {
const SLayerBSInfo& layerInfo = info->sLayerInfo[layer]; const SLayerBSInfo& layerInfo = info->sLayerInfo[layer];
for (int nal = 0; nal < layerInfo.iNalCount; ++nal, ++fragments_count) { for (int nal = 0; nal < layerInfo.iNalCount; ++nal, ++fragments_count) {
RTC_CHECK_GE(layerInfo.pNalLengthInByte[nal], 0); RTC_CHECK_GE(layerInfo.pNalLengthInByte[nal], 0);
// Ensure |required_size| will not overflow. // Ensure |required_capacity| will not overflow.
RTC_CHECK_LE(layerInfo.pNalLengthInByte[nal], RTC_CHECK_LE(layerInfo.pNalLengthInByte[nal],
std::numeric_limits<size_t>::max() - required_size); std::numeric_limits<size_t>::max() - required_capacity);
required_size += layerInfo.pNalLengthInByte[nal]; required_capacity += layerInfo.pNalLengthInByte[nal];
} }
} }
if (encoded_image->_size < required_size) { if (encoded_image->capacity() < required_capacity) {
// Increase buffer size. Allocate enough to hold an unencoded image, this // Increase buffer size. Allocate enough to hold an unencoded image, this
// should be more than enough to hold any encoded data of future frames of // should be more than enough to hold any encoded data of future frames of
// the same size (avoiding possible future reallocation due to variations in // the same size (avoiding possible future reallocation due to variations in
// required size). // required size).
encoded_image->_size = CalcBufferSize( size_t new_capacity = CalcBufferSize(VideoType::kI420, frame_buffer.width(),
VideoType::kI420, frame_buffer.width(), frame_buffer.height()); frame_buffer.height());
if (encoded_image->_size < required_size) { if (new_capacity < required_capacity) {
// Encoded data > unencoded data. Allocate required bytes. // Encoded data > unencoded data. Allocate required bytes.
RTC_LOG(LS_WARNING) RTC_LOG(LS_WARNING)
<< "Encoding produced more bytes than the original image " << "Encoding produced more bytes than the original image "
<< "data! Original bytes: " << encoded_image->_size << "data! Original bytes: " << new_capacity
<< ", encoded bytes: " << required_size << "."; << ", encoded bytes: " << required_capacity << ".";
encoded_image->_size = required_size; new_capacity = required_capacity;
} }
encoded_image->_buffer = new uint8_t[encoded_image->_size]; encoded_image->set_buffer(new uint8_t[new_capacity], new_capacity);
encoded_image_buffer->reset(encoded_image->_buffer); encoded_image_buffer->reset(encoded_image->_buffer);
} }
@ -141,7 +141,7 @@ static void RtpFragmentize(EncodedImage* encoded_image,
// Iterate NAL units making up this layer, noting fragments. // Iterate NAL units making up this layer, noting fragments.
size_t layer_len = 0; size_t layer_len = 0;
for (int nal = 0; nal < layerInfo.iNalCount; ++nal, ++frag) { for (int nal = 0; nal < layerInfo.iNalCount; ++nal, ++frag) {
// Because the sum of all layer lengths, |required_size|, fits in a // Because the sum of all layer lengths, |required_capacity|, fits in a
// |size_t|, we know that any indices in-between will not overflow. // |size_t|, we know that any indices in-between will not overflow.
RTC_DCHECK_GE(layerInfo.pNalLengthInByte[nal], 4); RTC_DCHECK_GE(layerInfo.pNalLengthInByte[nal], 4);
RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len + 0], start_code[0]); RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len + 0], start_code[0]);
@ -299,10 +299,11 @@ int32_t H264EncoderImpl::InitEncode(const VideoCodec* inst,
openh264_encoder->SetOption(ENCODER_OPTION_DATAFORMAT, &video_format); openh264_encoder->SetOption(ENCODER_OPTION_DATAFORMAT, &video_format);
// Initialize encoded image. Default buffer size: size of unencoded data. // Initialize encoded image. Default buffer size: size of unencoded data.
encoded_images_[i]._size =
const size_t new_capacity =
CalcBufferSize(VideoType::kI420, codec_.simulcastStream[idx].width, CalcBufferSize(VideoType::kI420, codec_.simulcastStream[idx].width,
codec_.simulcastStream[idx].height); codec_.simulcastStream[idx].height);
encoded_images_[i]._buffer = new uint8_t[encoded_images_[i]._size]; encoded_images_[i].set_buffer(new uint8_t[new_capacity], new_capacity);
encoded_image_buffers_[i].reset(encoded_images_[i]._buffer); encoded_image_buffers_[i].reset(encoded_images_[i]._buffer);
encoded_images_[i]._completeFrame = true; encoded_images_[i]._completeFrame = true;
encoded_images_[i]._encodedWidth = codec_.simulcastStream[idx].width; encoded_images_[i]._encodedWidth = codec_.simulcastStream[idx].width;

View File

@ -55,18 +55,17 @@ int I420Encoder::InitEncode(const VideoCodec* codecSettings,
// Allocating encoded memory. // Allocating encoded memory.
if (_encodedImage._buffer != NULL) { if (_encodedImage._buffer != NULL) {
delete[] _encodedImage._buffer; delete[] _encodedImage._buffer;
_encodedImage._buffer = NULL; _encodedImage.set_buffer(NULL, 0);
_encodedImage._size = 0;
} }
const size_t newSize = CalcBufferSize(VideoType::kI420, codecSettings->width, const size_t newCapacity =
codecSettings->height) + CalcBufferSize(VideoType::kI420, codecSettings->width,
kI420HeaderSize; codecSettings->height) +
uint8_t* newBuffer = new uint8_t[newSize]; kI420HeaderSize;
uint8_t* newBuffer = new uint8_t[newCapacity];
if (newBuffer == NULL) { if (newBuffer == NULL) {
return WEBRTC_VIDEO_CODEC_MEMORY; return WEBRTC_VIDEO_CODEC_MEMORY;
} }
_encodedImage._size = newSize; _encodedImage.set_buffer(newBuffer, newCapacity);
_encodedImage._buffer = newBuffer;
// If no memory allocation, no point to init. // If no memory allocation, no point to init.
_inited = true; _inited = true;
@ -97,15 +96,13 @@ int I420Encoder::Encode(const VideoFrame& inputImage,
return WEBRTC_VIDEO_CODEC_ERR_SIZE; return WEBRTC_VIDEO_CODEC_ERR_SIZE;
} }
size_t req_length = CalcBufferSize(VideoType::kI420, inputImage.width(), size_t req_capacity = CalcBufferSize(VideoType::kI420, inputImage.width(),
inputImage.height()) + inputImage.height()) +
kI420HeaderSize; kI420HeaderSize;
if (_encodedImage._size > req_length) { if (_encodedImage.capacity() < req_capacity) {
// Reallocate buffer. // Reallocate buffer.
delete[] _encodedImage._buffer; delete[] _encodedImage._buffer;
_encodedImage.set_buffer(new uint8_t[req_capacity], req_capacity);
_encodedImage._buffer = new uint8_t[req_length];
_encodedImage._size = req_length;
} }
uint8_t* buffer = _encodedImage._buffer; uint8_t* buffer = _encodedImage._buffer;
@ -113,7 +110,7 @@ int I420Encoder::Encode(const VideoFrame& inputImage,
buffer = InsertHeader(buffer, width, height); buffer = InsertHeader(buffer, width, height);
int ret_length = int ret_length =
ExtractBuffer(inputImage, req_length - kI420HeaderSize, buffer); ExtractBuffer(inputImage, req_capacity - kI420HeaderSize, buffer);
if (ret_length < 0) if (ret_length < 0)
return WEBRTC_VIDEO_CODEC_MEMORY; return WEBRTC_VIDEO_CODEC_MEMORY;
_encodedImage._length = ret_length + kI420HeaderSize; _encodedImage._length = ret_length + kI420HeaderSize;
@ -175,10 +172,10 @@ int I420Decoder::Decode(const EncodedImage& inputImage,
buffer = ExtractHeader(buffer, &width, &height); buffer = ExtractHeader(buffer, &width, &height);
// Verify that the available length is sufficient: // Verify that the available length is sufficient:
size_t req_length = size_t req_capacity =
CalcBufferSize(VideoType::kI420, width, height) + kI420HeaderSize; CalcBufferSize(VideoType::kI420, width, height) + kI420HeaderSize;
if (req_length > inputImage._length) { if (req_capacity > inputImage._length) {
return WEBRTC_VIDEO_CODEC_ERROR; return WEBRTC_VIDEO_CODEC_ERROR;
} }
// Set decoded image parameters. // Set decoded image parameters.

View File

@ -188,8 +188,9 @@ EncodedImage MultiplexEncodedImagePacker::PackAndRelease(
frame_headers.push_back(frame_header); frame_headers.push_back(frame_header);
} }
combined_image._length = combined_image._size = bitstream_offset; combined_image._length = bitstream_offset;
combined_image._buffer = new uint8_t[combined_image._length]; combined_image.set_buffer(new uint8_t[combined_image._length],
combined_image._length);
// header // header
header_offset = PackHeader(combined_image._buffer, header); header_offset = PackHeader(combined_image._buffer, header);
@ -262,13 +263,12 @@ MultiplexImage MultiplexEncodedImagePacker::Unpack(
EncodedImage encoded_image = combined_image; EncodedImage encoded_image = combined_image;
encoded_image.SetTimestamp(combined_image.Timestamp()); encoded_image.SetTimestamp(combined_image.Timestamp());
encoded_image._frameType = frame_headers[i].frame_type; encoded_image._frameType = frame_headers[i].frame_type;
encoded_image._size = encoded_image.set_buffer(
static_cast<size_t>(frame_headers[i].bitstream_length); combined_image._buffer + frame_headers[i].bitstream_offset,
static_cast<size_t>(frame_headers[i].bitstream_length));
const size_t padding = const size_t padding =
EncodedImage::GetBufferPaddingBytes(image_component.codec_type); EncodedImage::GetBufferPaddingBytes(image_component.codec_type);
encoded_image._length = encoded_image._size - padding; encoded_image._length = encoded_image.capacity() - padding;
encoded_image._buffer =
combined_image._buffer + frame_headers[i].bitstream_offset;
image_component.encoded_image = encoded_image; image_component.encoded_image = encoded_image;

View File

@ -575,9 +575,8 @@ const webrtc::EncodedImage* VideoProcessor::BuildAndStoreSuperframe(
EncodedImage copied_image = encoded_image; EncodedImage copied_image = encoded_image;
copied_image = encoded_image; copied_image = encoded_image;
copied_image._buffer = copied_buffer; copied_image.set_buffer(copied_buffer, buffer_size_bytes);
copied_image._length = payload_size_bytes; copied_image._length = payload_size_bytes;
copied_image._size = buffer_size_bytes;
// Replace previous EncodedImage for this spatial layer. // Replace previous EncodedImage for this spatial layer.
uint8_t* old_buffer = merged_encoded_frames_.at(spatial_idx)._buffer; uint8_t* old_buffer = merged_encoded_frames_.at(spatial_idx)._buffer;

View File

@ -379,9 +379,9 @@ int LibvpxVp8Encoder::InitEncode(const VideoCodec* inst,
if (encoded_images_[i]._buffer != NULL) { if (encoded_images_[i]._buffer != NULL) {
delete[] encoded_images_[i]._buffer; delete[] encoded_images_[i]._buffer;
} }
encoded_images_[i]._size = size_t frame_capacity =
CalcBufferSize(VideoType::kI420, codec_.width, codec_.height); CalcBufferSize(VideoType::kI420, codec_.width, codec_.height);
encoded_images_[i]._buffer = new uint8_t[encoded_images_[i]._size]; encoded_images_[i].set_buffer(new uint8_t[frame_capacity], frame_capacity);
encoded_images_[i]._completeFrame = true; encoded_images_[i]._completeFrame = true;
} }
// populate encoder configuration with default values // populate encoder configuration with default values
@ -861,17 +861,17 @@ int LibvpxVp8Encoder::GetEncodedPartitions(const VideoFrame& input_image) {
case VPX_CODEC_CX_FRAME_PKT: { case VPX_CODEC_CX_FRAME_PKT: {
size_t length = encoded_images_[encoder_idx]._length; size_t length = encoded_images_[encoder_idx]._length;
if (pkt->data.frame.sz + length > if (pkt->data.frame.sz + length >
encoded_images_[encoder_idx]._size) { encoded_images_[encoder_idx].capacity()) {
uint8_t* buffer = new uint8_t[pkt->data.frame.sz + length]; uint8_t* buffer = new uint8_t[pkt->data.frame.sz + length];
memcpy(buffer, encoded_images_[encoder_idx]._buffer, length); memcpy(buffer, encoded_images_[encoder_idx]._buffer, length);
delete[] encoded_images_[encoder_idx]._buffer; delete[] encoded_images_[encoder_idx]._buffer;
encoded_images_[encoder_idx]._buffer = buffer; encoded_images_[encoder_idx].set_buffer(
encoded_images_[encoder_idx]._size = pkt->data.frame.sz + length; buffer, pkt->data.frame.sz + length);
} }
memcpy(&encoded_images_[encoder_idx]._buffer[length], memcpy(&encoded_images_[encoder_idx]._buffer[length],
pkt->data.frame.buf, pkt->data.frame.sz); pkt->data.frame.buf, pkt->data.frame.sz);
encoded_images_[encoder_idx]._length += pkt->data.frame.sz; encoded_images_[encoder_idx]._length += pkt->data.frame.sz;
assert(length <= encoded_images_[encoder_idx]._size); assert(length <= encoded_images_[encoder_idx].capacity());
break; break;
} }
default: default:

View File

@ -395,9 +395,9 @@ int VP9EncoderImpl::InitEncode(const VideoCodec* inst,
if (encoded_image_._buffer != nullptr) { if (encoded_image_._buffer != nullptr) {
delete[] encoded_image_._buffer; delete[] encoded_image_._buffer;
} }
encoded_image_._size = size_t frame_capacity =
CalcBufferSize(VideoType::kI420, codec_.width, codec_.height); CalcBufferSize(VideoType::kI420, codec_.width, codec_.height);
encoded_image_._buffer = new uint8_t[encoded_image_._size]; encoded_image_.set_buffer(new uint8_t[frame_capacity], frame_capacity);
encoded_image_._completeFrame = true; encoded_image_._completeFrame = true;
// Populate encoder configuration with default values. // Populate encoder configuration with default values.
if (vpx_codec_enc_config_default(vpx_codec_vp9_cx(), config_, 0)) { if (vpx_codec_enc_config_default(vpx_codec_vp9_cx(), config_, 0)) {
@ -1257,10 +1257,10 @@ int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
DeliverBufferedFrame(end_of_picture); DeliverBufferedFrame(end_of_picture);
} }
if (pkt->data.frame.sz > encoded_image_._size) { if (pkt->data.frame.sz > encoded_image_.capacity()) {
delete[] encoded_image_._buffer; delete[] encoded_image_._buffer;
encoded_image_._size = pkt->data.frame.sz; encoded_image_.set_buffer(new uint8_t[pkt->data.frame.sz],
encoded_image_._buffer = new uint8_t[encoded_image_._size]; pkt->data.frame.sz);
} }
memcpy(encoded_image_._buffer, pkt->data.frame.buf, pkt->data.frame.sz); memcpy(encoded_image_._buffer, pkt->data.frame.buf, pkt->data.frame.sz);
encoded_image_._length = pkt->data.frame.sz; encoded_image_._length = pkt->data.frame.sz;
@ -1276,7 +1276,7 @@ int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
encoded_image_._frameType = kVideoFrameKey; encoded_image_._frameType = kVideoFrameKey;
force_key_frame_ = false; force_key_frame_ = false;
} }
RTC_DCHECK_LE(encoded_image_._length, encoded_image_._size); RTC_DCHECK_LE(encoded_image_._length, encoded_image_.capacity());
memset(&codec_specific_, 0, sizeof(codec_specific_)); memset(&codec_specific_, 0, sizeof(codec_specific_));
absl::optional<int> spatial_index; absl::optional<int> spatial_index;

View File

@ -152,16 +152,17 @@ void VCMEncodedFrame::CopyCodecSpecific(const RTPVideoHeader* header) {
} }
void VCMEncodedFrame::VerifyAndAllocate(size_t minimumSize) { void VCMEncodedFrame::VerifyAndAllocate(size_t minimumSize) {
if (minimumSize > _size) { size_t old_capacity = capacity();
if (minimumSize > old_capacity) {
// create buffer of sufficient size // create buffer of sufficient size
uint8_t* newBuffer = new uint8_t[minimumSize]; uint8_t* old_buffer = _buffer;
if (_buffer) {
set_buffer(new uint8_t[minimumSize], minimumSize);
if (old_buffer) {
// copy old data // copy old data
memcpy(newBuffer, _buffer, _size); memcpy(_buffer, old_buffer, old_capacity);
delete[] _buffer; delete[] old_buffer;
} }
_buffer = newBuffer;
_size = minimumSize;
} }
} }

View File

@ -105,12 +105,12 @@ VCMFrameBufferEnum VCMFrameBuffer::InsertPacket(
size() + packet.sizeBytes + size() + packet.sizeBytes +
(packet.insertStartCode ? kH264StartCodeLengthBytes : 0) + (packet.insertStartCode ? kH264StartCodeLengthBytes : 0) +
EncodedImage::GetBufferPaddingBytes(packet.codec); EncodedImage::GetBufferPaddingBytes(packet.codec);
if (requiredSizeBytes >= _size) { if (requiredSizeBytes >= capacity()) {
const uint8_t* prevBuffer = _buffer; const uint8_t* prevBuffer = _buffer;
const uint32_t increments = const uint32_t increments =
requiredSizeBytes / kBufferIncStepSizeBytes + requiredSizeBytes / kBufferIncStepSizeBytes +
(requiredSizeBytes % kBufferIncStepSizeBytes > 0); (requiredSizeBytes % kBufferIncStepSizeBytes > 0);
const uint32_t newSize = _size + increments * kBufferIncStepSizeBytes; const uint32_t newSize = capacity() + increments * kBufferIncStepSizeBytes;
if (newSize > kMaxJBFrameSizeBytes) { if (newSize > kMaxJBFrameSizeBytes) {
RTC_LOG(LS_ERROR) << "Failed to insert packet due to frame being too " RTC_LOG(LS_ERROR) << "Failed to insert packet due to frame being too "
"big."; "big.";

View File

@ -139,7 +139,7 @@ int64_t RtpFrameObject::RenderTime() const {
} }
void RtpFrameObject::SetSize(size_t size) { void RtpFrameObject::SetSize(size_t size) {
RTC_DCHECK_LE(size, _size); RTC_DCHECK_LE(size, capacity());
_length = size; _length = size;
} }
@ -182,10 +182,9 @@ void RtpFrameObject::AllocateBitstreamBuffer(size_t frame_size) {
size_t new_size = frame_size + (codec_type_ == kVideoCodecH264 size_t new_size = frame_size + (codec_type_ == kVideoCodecH264
? EncodedImage::kBufferPaddingBytesH264 ? EncodedImage::kBufferPaddingBytesH264
: 0); : 0);
if (_size < new_size) { if (capacity() < new_size) {
delete[] _buffer; delete[] _buffer;
_buffer = new uint8_t[new_size]; set_buffer(new uint8_t[new_size], new_size);
_size = new_size;
} }
_length = frame_size; _length = frame_size;

View File

@ -82,8 +82,8 @@ class SimulcastTestFixtureImpl::TestEncodedImageCallback
if (encoded_image.SpatialIndex().value_or(0) == 0) { if (encoded_image.SpatialIndex().value_or(0) == 0) {
if (encoded_image._frameType == kVideoFrameKey) { if (encoded_image._frameType == kVideoFrameKey) {
delete[] encoded_key_frame_._buffer; delete[] encoded_key_frame_._buffer;
encoded_key_frame_._buffer = new uint8_t[encoded_image._size]; encoded_key_frame_.set_buffer(new uint8_t[encoded_image.capacity()],
encoded_key_frame_._size = encoded_image._size; encoded_image.capacity());
encoded_key_frame_._length = encoded_image._length; encoded_key_frame_._length = encoded_image._length;
encoded_key_frame_._frameType = kVideoFrameKey; encoded_key_frame_._frameType = kVideoFrameKey;
encoded_key_frame_._completeFrame = encoded_image._completeFrame; encoded_key_frame_._completeFrame = encoded_image._completeFrame;
@ -91,8 +91,8 @@ class SimulcastTestFixtureImpl::TestEncodedImageCallback
encoded_image._length); encoded_image._length);
} else { } else {
delete[] encoded_frame_._buffer; delete[] encoded_frame_._buffer;
encoded_frame_._buffer = new uint8_t[encoded_image._size]; encoded_frame_.set_buffer(new uint8_t[encoded_image.capacity()],
encoded_frame_._size = encoded_image._size; encoded_image.capacity());
encoded_frame_._length = encoded_image._length; encoded_frame_._length = encoded_image._length;
memcpy(encoded_frame_._buffer, encoded_image._buffer, memcpy(encoded_frame_._buffer, encoded_image._buffer,
encoded_image._length); encoded_image._length);
@ -838,8 +838,9 @@ void SimulcastTestFixtureImpl::TestDecodeWidthHeightSet() {
EXPECT_EQ(encoded_image._frameType, kVideoFrameKey); EXPECT_EQ(encoded_image._frameType, kVideoFrameKey);
size_t index = encoded_image.SpatialIndex().value_or(0); size_t index = encoded_image.SpatialIndex().value_or(0);
encoded_frame[index]._buffer = new uint8_t[encoded_image._size]; encoded_frame[index].set_buffer(
encoded_frame[index]._size = encoded_image._size; new uint8_t[encoded_image.capacity()],
encoded_image.capacity());
encoded_frame[index]._length = encoded_image._length; encoded_frame[index]._length = encoded_image._length;
encoded_frame[index]._frameType = encoded_image._frameType; encoded_frame[index]._frameType = encoded_image._frameType;
encoded_frame[index]._completeFrame = encoded_image._completeFrame; encoded_frame[index]._completeFrame = encoded_image._completeFrame;

View File

@ -647,7 +647,7 @@ TEST_P(TestPacketBufferH264Parameterized, GetBitstreamBufferPadding) {
ASSERT_EQ(1UL, frames_from_callback_.size()); ASSERT_EQ(1UL, frames_from_callback_.size());
EXPECT_EQ(frames_from_callback_[seq_num]->EncodedImage()._length, EXPECT_EQ(frames_from_callback_[seq_num]->EncodedImage()._length,
sizeof(data_data)); sizeof(data_data));
EXPECT_EQ(frames_from_callback_[seq_num]->EncodedImage()._size, EXPECT_EQ(frames_from_callback_[seq_num]->EncodedImage().capacity(),
sizeof(data_data) + EncodedImage::kBufferPaddingBytesH264); sizeof(data_data) + EncodedImage::kBufferPaddingBytesH264);
EXPECT_EQ( EXPECT_EQ(
memcmp(frames_from_callback_[seq_num]->Buffer(), data, sizeof(data_data)), memcmp(frames_from_callback_[seq_num]->Buffer(), data, sizeof(data_data)),