Rename EncodedImage::_length --> size_, and make private.

Use size() accessor function. Also replace most nearby uses of _buffer
with data().

Bug: webrtc:9378
Change-Id: I1ac3459612f7c6151bd057d05448da1c4e1c6e3d
Reviewed-on: https://webrtc-review.googlesource.com/c/116783
Commit-Queue: Niels Moller <nisse@webrtc.org>
Reviewed-by: Karl Wiberg <kwiberg@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#26273}
This commit is contained in:
Niels Möller
2019-01-15 08:50:01 +01:00
committed by Commit Bot
parent 7491e8f17b
commit 77536a2b81
43 changed files with 153 additions and 164 deletions

View File

@ -241,7 +241,7 @@ int32_t H264DecoderImpl::Decode(const EncodedImage& input_image,
ReportError();
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
if (!input_image._buffer || !input_image._length) {
if (!input_image.data() || !input_image.size()) {
ReportError();
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
@ -254,24 +254,23 @@ int32_t H264DecoderImpl::Decode(const EncodedImage& input_image,
// FFmpeg requires padding due to some optimized bitstream readers reading 32
// or 64 bits at once and could read over the end. See avcodec_decode_video2.
RTC_CHECK_GE(input_image.capacity(),
input_image._length +
input_image.size() +
EncodedImage::GetBufferPaddingBytes(kVideoCodecH264));
// "If the first 23 bits of the additional bytes are not 0, then damaged MPEG
// bitstreams could cause overread and segfault." See
// AV_INPUT_BUFFER_PADDING_SIZE. We'll zero the entire padding just in case.
memset(input_image._buffer + input_image._length,
0,
memset(input_image._buffer + input_image.size(), 0,
EncodedImage::GetBufferPaddingBytes(kVideoCodecH264));
AVPacket packet;
av_init_packet(&packet);
packet.data = input_image._buffer;
if (input_image._length >
if (input_image.size() >
static_cast<size_t>(std::numeric_limits<int>::max())) {
ReportError();
return WEBRTC_VIDEO_CODEC_ERROR;
}
packet.size = static_cast<int>(input_image._length);
packet.size = static_cast<int>(input_image.size());
int64_t frame_timestamp_us = input_image.ntp_time_ms_ * 1000; // ms -> μs
av_context_->reordered_opaque = frame_timestamp_us;
@ -318,8 +317,7 @@ int32_t H264DecoderImpl::Decode(const EncodedImage& input_image,
absl::optional<uint8_t> qp;
// TODO(sakal): Maybe it is possible to get QP directly from FFmpeg.
h264_bitstream_parser_.ParseBitstream(input_image._buffer,
input_image._length);
h264_bitstream_parser_.ParseBitstream(input_image.data(), input_image.size());
int qp_int;
if (h264_bitstream_parser_.GetLastSliceQp(&qp_int)) {
qp.emplace(qp_int);

View File

@ -135,7 +135,7 @@ static void RtpFragmentize(EncodedImage* encoded_image,
const uint8_t start_code[4] = {0, 0, 0, 1};
frag_header->VerifyAndAllocateFragmentationHeader(fragments_count);
size_t frag = 0;
encoded_image->_length = 0;
encoded_image->set_size(0);
for (int layer = 0; layer < info->iLayerNum; ++layer) {
const SLayerBSInfo& layerInfo = info->sLayerInfo[layer];
// Iterate NAL units making up this layer, noting fragments.
@ -149,15 +149,15 @@ static void RtpFragmentize(EncodedImage* encoded_image,
RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len + 2], start_code[2]);
RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len + 3], start_code[3]);
frag_header->fragmentationOffset[frag] =
encoded_image->_length + layer_len + sizeof(start_code);
encoded_image->size() + layer_len + sizeof(start_code);
frag_header->fragmentationLength[frag] =
layerInfo.pNalLengthInByte[nal] - sizeof(start_code);
layer_len += layerInfo.pNalLengthInByte[nal];
}
// Copy the entire layer's data (including start codes).
memcpy(encoded_image->_buffer + encoded_image->_length, layerInfo.pBsBuf,
memcpy(encoded_image->data() + encoded_image->size(), layerInfo.pBsBuf,
layer_len);
encoded_image->_length += layer_len;
encoded_image->set_size(encoded_image->size() + layer_len);
}
}
@ -308,7 +308,7 @@ int32_t H264EncoderImpl::InitEncode(const VideoCodec* inst,
encoded_images_[i]._completeFrame = true;
encoded_images_[i]._encodedWidth = codec_.simulcastStream[idx].width;
encoded_images_[i]._encodedHeight = codec_.simulcastStream[idx].height;
encoded_images_[i]._length = 0;
encoded_images_[i].set_size(0);
}
SimulcastRateAllocator init_allocator(codec_);
@ -519,10 +519,10 @@ int32_t H264EncoderImpl::Encode(const VideoFrame& input_frame,
// Encoder can skip frames to save bandwidth in which case
// |encoded_images_[i]._length| == 0.
if (encoded_images_[i]._length > 0) {
if (encoded_images_[i].size() > 0) {
// Parse QP.
h264_bitstream_parser_.ParseBitstream(encoded_images_[i]._buffer,
encoded_images_[i]._length);
h264_bitstream_parser_.ParseBitstream(encoded_images_[i].data(),
encoded_images_[i].size());
h264_bitstream_parser_.GetLastSliceQp(&encoded_images_[i].qp_);
// Deliver encoded image.

View File

@ -128,7 +128,7 @@ MultiplexImageComponentHeader UnpackFrameHeader(uint8_t* buffer) {
}
void PackBitstream(uint8_t* buffer, MultiplexImageComponent image) {
memcpy(buffer, image.encoded_image._buffer, image.encoded_image._length);
memcpy(buffer, image.encoded_image.data(), image.encoded_image.size());
}
MultiplexImage::MultiplexImage(uint16_t picture_index,
@ -170,7 +170,7 @@ EncodedImage MultiplexEncodedImagePacker::PackAndRelease(
const size_t padding =
EncodedImage::GetBufferPaddingBytes(images[i].codec_type);
frame_header.bitstream_length =
static_cast<uint32_t>(images[i].encoded_image._length + padding);
static_cast<uint32_t>(images[i].encoded_image.size() + padding);
bitstream_offset += frame_header.bitstream_length;
frame_header.codec_type = images[i].codec_type;
@ -188,9 +188,8 @@ EncodedImage MultiplexEncodedImagePacker::PackAndRelease(
frame_headers.push_back(frame_header);
}
combined_image._length = bitstream_offset;
combined_image.set_buffer(new uint8_t[combined_image._length],
combined_image._length);
combined_image.set_buffer(new uint8_t[bitstream_offset], bitstream_offset);
combined_image.set_size(bitstream_offset);
// header
header_offset = PackHeader(combined_image._buffer, header);
@ -268,7 +267,7 @@ MultiplexImage MultiplexEncodedImagePacker::Unpack(
static_cast<size_t>(frame_headers[i].bitstream_length));
const size_t padding =
EncodedImage::GetBufferPaddingBytes(image_component.codec_type);
encoded_image._length = encoded_image.capacity() - padding;
encoded_image.set_size(encoded_image.capacity() - padding);
image_component.encoded_image = encoded_image;

View File

@ -275,9 +275,11 @@ EncodedImageCallback::Result MultiplexEncoderAdapter::OnEncodedImage(
image_component.codec_type =
PayloadStringToCodecType(associated_format_.name);
image_component.encoded_image = encodedImage;
image_component.encoded_image._buffer = new uint8_t[encodedImage._length];
std::memcpy(image_component.encoded_image._buffer, encodedImage._buffer,
encodedImage._length);
image_component.encoded_image.set_buffer(new uint8_t[encodedImage.size()],
encodedImage.size());
image_component.encoded_image.set_size(encodedImage.size());
std::memcpy(image_component.encoded_image.data(), encodedImage.data(),
encodedImage.size());
rtc::CritScope cs(&crit_);
const auto& stashed_image_itr =

View File

@ -320,8 +320,7 @@ void VideoCodecTestFixtureImpl::H264KeyframeChecker::CheckEncodedFrame(
bool contains_pps = false;
bool contains_idr = false;
const std::vector<webrtc::H264::NaluIndex> nalu_indices =
webrtc::H264::FindNaluIndices(encoded_frame._buffer,
encoded_frame._length);
webrtc::H264::FindNaluIndices(encoded_frame.data(), encoded_frame.size());
for (const webrtc::H264::NaluIndex& index : nalu_indices) {
webrtc::H264::NaluType nalu_type = webrtc::H264::ParseNaluType(
encoded_frame._buffer[index.payload_start_offset]);

View File

@ -45,11 +45,9 @@ class QpFrameChecker : public VideoCodecTestFixture::EncodedFrameChecker {
const EncodedImage& encoded_frame) const override {
int qp;
if (codec == kVideoCodecVP8) {
EXPECT_TRUE(
vp8::GetQp(encoded_frame._buffer, encoded_frame._length, &qp));
EXPECT_TRUE(vp8::GetQp(encoded_frame.data(), encoded_frame.size(), &qp));
} else if (codec == kVideoCodecVP9) {
EXPECT_TRUE(
vp9::GetQp(encoded_frame._buffer, encoded_frame._length, &qp));
EXPECT_TRUE(vp9::GetQp(encoded_frame.data(), encoded_frame.size(), &qp));
} else {
RTC_NOTREACHED();
}

View File

@ -51,8 +51,7 @@ size_t GetMaxNaluSizeBytes(const EncodedImage& encoded_frame,
return 0;
std::vector<webrtc::H264::NaluIndex> nalu_indices =
webrtc::H264::FindNaluIndices(encoded_frame._buffer,
encoded_frame._length);
webrtc::H264::FindNaluIndices(encoded_frame.data(), encoded_frame.size());
RTC_CHECK(!nalu_indices.empty());
@ -392,7 +391,7 @@ void VideoProcessor::FrameEncoded(
frame_stat->encode_start_ns, encode_stop_ns - post_encode_time_ns_);
frame_stat->target_bitrate_kbps =
bitrate_allocation_.GetTemporalLayerSum(spatial_idx, temporal_idx) / 1000;
frame_stat->length_bytes = encoded_image._length;
frame_stat->length_bytes = encoded_image.size();
frame_stat->frame_type = encoded_image._frameType;
frame_stat->temporal_idx = temporal_idx;
frame_stat->max_nalu_size_bytes = GetMaxNaluSizeBytes(encoded_image, config_);
@ -554,7 +553,7 @@ const webrtc::EncodedImage* VideoProcessor::BuildAndStoreSuperframe(
RTC_CHECK_GT(config_.NumberOfSpatialLayers(), 1);
EncodedImage base_image;
RTC_CHECK_EQ(base_image._length, 0);
RTC_CHECK_EQ(base_image.size(), 0);
// Each SVC layer is decoded with dedicated decoder. Find the nearest
// non-dropped base frame and merge it and current frame into superframe.
@ -568,29 +567,29 @@ const webrtc::EncodedImage* VideoProcessor::BuildAndStoreSuperframe(
}
}
}
const size_t payload_size_bytes = base_image._length + encoded_image._length;
const size_t payload_size_bytes = base_image.size() + encoded_image.size();
const size_t buffer_size_bytes =
payload_size_bytes + EncodedImage::GetBufferPaddingBytes(codec);
uint8_t* copied_buffer = new uint8_t[buffer_size_bytes];
RTC_CHECK(copied_buffer);
if (base_image._length) {
if (base_image.size()) {
RTC_CHECK(base_image._buffer);
memcpy(copied_buffer, base_image._buffer, base_image._length);
memcpy(copied_buffer, base_image.data(), base_image.size());
}
memcpy(copied_buffer + base_image._length, encoded_image._buffer,
encoded_image._length);
memcpy(copied_buffer + base_image.size(), encoded_image.data(),
encoded_image.size());
EncodedImage copied_image = encoded_image;
copied_image = encoded_image;
copied_image.set_buffer(copied_buffer, buffer_size_bytes);
copied_image._length = payload_size_bytes;
copied_image.set_size(payload_size_bytes);
// Replace previous EncodedImage for this spatial layer.
uint8_t* old_buffer = merged_encoded_frames_.at(spatial_idx)._buffer;
if (old_buffer) {
delete[] old_buffer;
uint8_t* old_data = merged_encoded_frames_.at(spatial_idx).data();
if (old_data) {
delete[] old_data;
}
merged_encoded_frames_.at(spatial_idx) = copied_image;

View File

@ -109,7 +109,7 @@ class VideoProcessor {
const webrtc::EncodedImage& encoded_image,
const webrtc::CodecSpecificInfo* codec_specific_info)
: video_processor_(video_processor),
buffer_(encoded_image._buffer, encoded_image._length),
buffer_(encoded_image._buffer, encoded_image.size()),
encoded_image_(encoded_image),
codec_specific_info_(*codec_specific_info) {
encoded_image_._buffer = buffer_.data();

View File

@ -161,7 +161,7 @@ int LibvpxVp8Decoder::Decode(const EncodedImage& input_image,
if (decode_complete_callback_ == NULL) {
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
if (input_image._buffer == NULL && input_image._length > 0) {
if (input_image.data() == NULL && input_image.size() > 0) {
// Reset to avoid requesting key frames too often.
if (propagation_cnt_ > 0)
propagation_cnt_ = 0;
@ -249,10 +249,10 @@ int LibvpxVp8Decoder::Decode(const EncodedImage& input_image,
}
uint8_t* buffer = input_image._buffer;
if (input_image._length == 0) {
if (input_image.size() == 0) {
buffer = NULL; // Triggers full frame concealment.
}
if (vpx_codec_decode(decoder_, buffer, input_image._length, 0,
if (vpx_codec_decode(decoder_, buffer, input_image.size(), 0,
kDecodeDeadlineRealtime)) {
// Reset to avoid requesting key frames too often.
if (propagation_cnt_ > 0) {

View File

@ -861,7 +861,7 @@ void LibvpxVp8Encoder::PopulateCodecSpecific(CodecSpecificInfo* codec_specific,
int qp = 0;
vpx_codec_control(&encoders_[encoder_idx], VP8E_GET_LAST_QUANTIZER_64, &qp);
temporal_layers_[stream_idx]->OnEncodeDone(
timestamp, encoded_images_[encoder_idx]._length,
timestamp, encoded_images_[encoder_idx].size(),
(pkt.data.frame.flags & VPX_FRAME_IS_KEY) != 0, qp, vp8Info);
}
@ -871,7 +871,7 @@ int LibvpxVp8Encoder::GetEncodedPartitions(const VideoFrame& input_image) {
for (size_t encoder_idx = 0; encoder_idx < encoders_.size();
++encoder_idx, --stream_idx) {
vpx_codec_iter_t iter = NULL;
encoded_images_[encoder_idx]._length = 0;
encoded_images_[encoder_idx].set_size(0);
encoded_images_[encoder_idx]._frameType = kVideoFrameDelta;
CodecSpecificInfo codec_specific;
const vpx_codec_cx_pkt_t* pkt = NULL;
@ -879,7 +879,7 @@ int LibvpxVp8Encoder::GetEncodedPartitions(const VideoFrame& input_image) {
NULL) {
switch (pkt->kind) {
case VPX_CODEC_CX_FRAME_PKT: {
size_t length = encoded_images_[encoder_idx]._length;
size_t length = encoded_images_[encoder_idx].size();
if (pkt->data.frame.sz + length >
encoded_images_[encoder_idx].capacity()) {
uint8_t* buffer = new uint8_t[pkt->data.frame.sz + length];
@ -890,8 +890,8 @@ int LibvpxVp8Encoder::GetEncodedPartitions(const VideoFrame& input_image) {
}
memcpy(&encoded_images_[encoder_idx]._buffer[length],
pkt->data.frame.buf, pkt->data.frame.sz);
encoded_images_[encoder_idx]._length += pkt->data.frame.sz;
assert(length <= encoded_images_[encoder_idx].capacity());
encoded_images_[encoder_idx].set_size(
encoded_images_[encoder_idx].size() + pkt->data.frame.sz);
break;
}
default:
@ -921,9 +921,9 @@ int LibvpxVp8Encoder::GetEncodedPartitions(const VideoFrame& input_image) {
encoded_images_[encoder_idx].SetColorSpace(input_image.color_space());
if (send_stream_[stream_idx]) {
if (encoded_images_[encoder_idx]._length > 0) {
if (encoded_images_[encoder_idx].size() > 0) {
TRACE_COUNTER_ID1("webrtc", "EncodedFrameSize", encoder_idx,
encoded_images_[encoder_idx]._length);
encoded_images_[encoder_idx].size());
encoded_images_[encoder_idx]._encodedHeight =
codec_.simulcastStream[stream_idx].height;
encoded_images_[encoder_idx]._encodedWidth =
@ -937,7 +937,7 @@ int LibvpxVp8Encoder::GetEncodedPartitions(const VideoFrame& input_image) {
} else if (!temporal_layers_[stream_idx]
->SupportsEncoderFrameDropping()) {
result = WEBRTC_VIDEO_CODEC_TARGET_BITRATE_OVERSHOOT;
if (encoded_images_[encoder_idx]._length == 0) {
if (encoded_images_[encoder_idx].size() == 0) {
// Dropped frame that will be re-encoded.
temporal_layers_[stream_idx]->OnEncodeDone(input_image.timestamp(), 0,
false, 0, nullptr);

View File

@ -98,8 +98,8 @@ class TestVp8Impl : public VideoCodecUnitTest {
void VerifyQpParser(const EncodedImage& encoded_frame) const {
int qp;
EXPECT_GT(encoded_frame._length, 0u);
ASSERT_TRUE(vp8::GetQp(encoded_frame._buffer, encoded_frame._length, &qp));
EXPECT_GT(encoded_frame.size(), 0u);
ASSERT_TRUE(vp8::GetQp(encoded_frame.data(), encoded_frame.size(), &qp));
EXPECT_EQ(encoded_frame.qp_, qp) << "Encoder QP != parsed bitstream QP.";
}
};

View File

@ -243,7 +243,7 @@ TEST_F(TestVp9Impl, ParserQpEqualsEncodedQp) {
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
int qp = 0;
ASSERT_TRUE(vp9::GetQp(encoded_frame._buffer, encoded_frame._length, &qp));
ASSERT_TRUE(vp9::GetQp(encoded_frame.data(), encoded_frame.size(), &qp));
EXPECT_EQ(encoded_frame.qp_, qp);
}

View File

@ -1263,7 +1263,7 @@ int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
pkt->data.frame.sz);
}
memcpy(encoded_image_._buffer, pkt->data.frame.buf, pkt->data.frame.sz);
encoded_image_._length = pkt->data.frame.sz;
encoded_image_.set_size(pkt->data.frame.sz);
const bool is_key_frame =
(pkt->data.frame.flags & VPX_FRAME_IS_KEY) ? true : false;
@ -1276,7 +1276,7 @@ int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
encoded_image_._frameType = kVideoFrameKey;
force_key_frame_ = false;
}
RTC_DCHECK_LE(encoded_image_._length, encoded_image_.capacity());
RTC_DCHECK_LE(encoded_image_.size(), encoded_image_.capacity());
memset(&codec_specific_, 0, sizeof(codec_specific_));
absl::optional<int> spatial_index;
@ -1288,7 +1288,7 @@ int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
UpdateReferenceBuffers(*pkt, pics_since_key_);
}
TRACE_COUNTER1("webrtc", "EncodedFrameSize", encoded_image_._length);
TRACE_COUNTER1("webrtc", "EncodedFrameSize", encoded_image_.size());
encoded_image_.SetTimestamp(input_image_->timestamp());
encoded_image_.capture_time_ms_ = input_image_->render_time_ms();
encoded_image_.rotation_ = input_image_->rotation();
@ -1315,7 +1315,7 @@ int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
}
void VP9EncoderImpl::DeliverBufferedFrame(bool end_of_picture) {
if (encoded_image_._length > 0) {
if (encoded_image_.size() > 0) {
codec_specific_.codecSpecific.VP9.end_of_picture = end_of_picture;
// No data partitioning in VP9, so 1 partition only.
@ -1323,13 +1323,13 @@ void VP9EncoderImpl::DeliverBufferedFrame(bool end_of_picture) {
RTPFragmentationHeader frag_info;
frag_info.VerifyAndAllocateFragmentationHeader(1);
frag_info.fragmentationOffset[part_idx] = 0;
frag_info.fragmentationLength[part_idx] = encoded_image_._length;
frag_info.fragmentationLength[part_idx] = encoded_image_.size();
frag_info.fragmentationPlType[part_idx] = 0;
frag_info.fragmentationTimeDiff[part_idx] = 0;
encoded_complete_callback_->OnEncodedImage(encoded_image_, &codec_specific_,
&frag_info);
encoded_image_._length = 0;
encoded_image_.set_size(0);
if (codec_.mode == VideoCodecMode::kScreensharing) {
const uint8_t spatial_idx = encoded_image_.SpatialIndex().value_or(0);
@ -1432,13 +1432,13 @@ int VP9DecoderImpl::Decode(const EncodedImage& input_image,
vpx_codec_iter_t iter = nullptr;
vpx_image_t* img;
uint8_t* buffer = input_image._buffer;
if (input_image._length == 0) {
if (input_image.size() == 0) {
buffer = nullptr; // Triggers full frame concealment.
}
// During decode libvpx may get and release buffers from |frame_buffer_pool_|.
// In practice libvpx keeps a few (~3-4) buffers alive at a time.
if (vpx_codec_decode(decoder_, buffer,
static_cast<unsigned int>(input_image._length), 0,
static_cast<unsigned int>(input_image.size()), 0,
VPX_DL_REALTIME)) {
return WEBRTC_VIDEO_CODEC_ERROR;
}