Rename EncodedImage::_length --> size_, and make private.
Use size() accessor function. Also replace most nearby uses of _buffer with data(). Bug: webrtc:9378 Change-Id: I1ac3459612f7c6151bd057d05448da1c4e1c6e3d Reviewed-on: https://webrtc-review.googlesource.com/c/116783 Commit-Queue: Niels Moller <nisse@webrtc.org> Reviewed-by: Karl Wiberg <kwiberg@webrtc.org> Cr-Commit-Position: refs/heads/master@{#26273}
This commit is contained in:
@ -241,7 +241,7 @@ int32_t H264DecoderImpl::Decode(const EncodedImage& input_image,
|
||||
ReportError();
|
||||
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
||||
}
|
||||
if (!input_image._buffer || !input_image._length) {
|
||||
if (!input_image.data() || !input_image.size()) {
|
||||
ReportError();
|
||||
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
|
||||
}
|
||||
@ -254,24 +254,23 @@ int32_t H264DecoderImpl::Decode(const EncodedImage& input_image,
|
||||
// FFmpeg requires padding due to some optimized bitstream readers reading 32
|
||||
// or 64 bits at once and could read over the end. See avcodec_decode_video2.
|
||||
RTC_CHECK_GE(input_image.capacity(),
|
||||
input_image._length +
|
||||
input_image.size() +
|
||||
EncodedImage::GetBufferPaddingBytes(kVideoCodecH264));
|
||||
// "If the first 23 bits of the additional bytes are not 0, then damaged MPEG
|
||||
// bitstreams could cause overread and segfault." See
|
||||
// AV_INPUT_BUFFER_PADDING_SIZE. We'll zero the entire padding just in case.
|
||||
memset(input_image._buffer + input_image._length,
|
||||
0,
|
||||
memset(input_image._buffer + input_image.size(), 0,
|
||||
EncodedImage::GetBufferPaddingBytes(kVideoCodecH264));
|
||||
|
||||
AVPacket packet;
|
||||
av_init_packet(&packet);
|
||||
packet.data = input_image._buffer;
|
||||
if (input_image._length >
|
||||
if (input_image.size() >
|
||||
static_cast<size_t>(std::numeric_limits<int>::max())) {
|
||||
ReportError();
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
packet.size = static_cast<int>(input_image._length);
|
||||
packet.size = static_cast<int>(input_image.size());
|
||||
int64_t frame_timestamp_us = input_image.ntp_time_ms_ * 1000; // ms -> μs
|
||||
av_context_->reordered_opaque = frame_timestamp_us;
|
||||
|
||||
@ -318,8 +317,7 @@ int32_t H264DecoderImpl::Decode(const EncodedImage& input_image,
|
||||
|
||||
absl::optional<uint8_t> qp;
|
||||
// TODO(sakal): Maybe it is possible to get QP directly from FFmpeg.
|
||||
h264_bitstream_parser_.ParseBitstream(input_image._buffer,
|
||||
input_image._length);
|
||||
h264_bitstream_parser_.ParseBitstream(input_image.data(), input_image.size());
|
||||
int qp_int;
|
||||
if (h264_bitstream_parser_.GetLastSliceQp(&qp_int)) {
|
||||
qp.emplace(qp_int);
|
||||
|
||||
@ -135,7 +135,7 @@ static void RtpFragmentize(EncodedImage* encoded_image,
|
||||
const uint8_t start_code[4] = {0, 0, 0, 1};
|
||||
frag_header->VerifyAndAllocateFragmentationHeader(fragments_count);
|
||||
size_t frag = 0;
|
||||
encoded_image->_length = 0;
|
||||
encoded_image->set_size(0);
|
||||
for (int layer = 0; layer < info->iLayerNum; ++layer) {
|
||||
const SLayerBSInfo& layerInfo = info->sLayerInfo[layer];
|
||||
// Iterate NAL units making up this layer, noting fragments.
|
||||
@ -149,15 +149,15 @@ static void RtpFragmentize(EncodedImage* encoded_image,
|
||||
RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len + 2], start_code[2]);
|
||||
RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len + 3], start_code[3]);
|
||||
frag_header->fragmentationOffset[frag] =
|
||||
encoded_image->_length + layer_len + sizeof(start_code);
|
||||
encoded_image->size() + layer_len + sizeof(start_code);
|
||||
frag_header->fragmentationLength[frag] =
|
||||
layerInfo.pNalLengthInByte[nal] - sizeof(start_code);
|
||||
layer_len += layerInfo.pNalLengthInByte[nal];
|
||||
}
|
||||
// Copy the entire layer's data (including start codes).
|
||||
memcpy(encoded_image->_buffer + encoded_image->_length, layerInfo.pBsBuf,
|
||||
memcpy(encoded_image->data() + encoded_image->size(), layerInfo.pBsBuf,
|
||||
layer_len);
|
||||
encoded_image->_length += layer_len;
|
||||
encoded_image->set_size(encoded_image->size() + layer_len);
|
||||
}
|
||||
}
|
||||
|
||||
@ -308,7 +308,7 @@ int32_t H264EncoderImpl::InitEncode(const VideoCodec* inst,
|
||||
encoded_images_[i]._completeFrame = true;
|
||||
encoded_images_[i]._encodedWidth = codec_.simulcastStream[idx].width;
|
||||
encoded_images_[i]._encodedHeight = codec_.simulcastStream[idx].height;
|
||||
encoded_images_[i]._length = 0;
|
||||
encoded_images_[i].set_size(0);
|
||||
}
|
||||
|
||||
SimulcastRateAllocator init_allocator(codec_);
|
||||
@ -519,10 +519,10 @@ int32_t H264EncoderImpl::Encode(const VideoFrame& input_frame,
|
||||
|
||||
// Encoder can skip frames to save bandwidth in which case
|
||||
// |encoded_images_[i]._length| == 0.
|
||||
if (encoded_images_[i]._length > 0) {
|
||||
if (encoded_images_[i].size() > 0) {
|
||||
// Parse QP.
|
||||
h264_bitstream_parser_.ParseBitstream(encoded_images_[i]._buffer,
|
||||
encoded_images_[i]._length);
|
||||
h264_bitstream_parser_.ParseBitstream(encoded_images_[i].data(),
|
||||
encoded_images_[i].size());
|
||||
h264_bitstream_parser_.GetLastSliceQp(&encoded_images_[i].qp_);
|
||||
|
||||
// Deliver encoded image.
|
||||
|
||||
@ -128,7 +128,7 @@ MultiplexImageComponentHeader UnpackFrameHeader(uint8_t* buffer) {
|
||||
}
|
||||
|
||||
void PackBitstream(uint8_t* buffer, MultiplexImageComponent image) {
|
||||
memcpy(buffer, image.encoded_image._buffer, image.encoded_image._length);
|
||||
memcpy(buffer, image.encoded_image.data(), image.encoded_image.size());
|
||||
}
|
||||
|
||||
MultiplexImage::MultiplexImage(uint16_t picture_index,
|
||||
@ -170,7 +170,7 @@ EncodedImage MultiplexEncodedImagePacker::PackAndRelease(
|
||||
const size_t padding =
|
||||
EncodedImage::GetBufferPaddingBytes(images[i].codec_type);
|
||||
frame_header.bitstream_length =
|
||||
static_cast<uint32_t>(images[i].encoded_image._length + padding);
|
||||
static_cast<uint32_t>(images[i].encoded_image.size() + padding);
|
||||
bitstream_offset += frame_header.bitstream_length;
|
||||
|
||||
frame_header.codec_type = images[i].codec_type;
|
||||
@ -188,9 +188,8 @@ EncodedImage MultiplexEncodedImagePacker::PackAndRelease(
|
||||
frame_headers.push_back(frame_header);
|
||||
}
|
||||
|
||||
combined_image._length = bitstream_offset;
|
||||
combined_image.set_buffer(new uint8_t[combined_image._length],
|
||||
combined_image._length);
|
||||
combined_image.set_buffer(new uint8_t[bitstream_offset], bitstream_offset);
|
||||
combined_image.set_size(bitstream_offset);
|
||||
|
||||
// header
|
||||
header_offset = PackHeader(combined_image._buffer, header);
|
||||
@ -268,7 +267,7 @@ MultiplexImage MultiplexEncodedImagePacker::Unpack(
|
||||
static_cast<size_t>(frame_headers[i].bitstream_length));
|
||||
const size_t padding =
|
||||
EncodedImage::GetBufferPaddingBytes(image_component.codec_type);
|
||||
encoded_image._length = encoded_image.capacity() - padding;
|
||||
encoded_image.set_size(encoded_image.capacity() - padding);
|
||||
|
||||
image_component.encoded_image = encoded_image;
|
||||
|
||||
|
||||
@ -275,9 +275,11 @@ EncodedImageCallback::Result MultiplexEncoderAdapter::OnEncodedImage(
|
||||
image_component.codec_type =
|
||||
PayloadStringToCodecType(associated_format_.name);
|
||||
image_component.encoded_image = encodedImage;
|
||||
image_component.encoded_image._buffer = new uint8_t[encodedImage._length];
|
||||
std::memcpy(image_component.encoded_image._buffer, encodedImage._buffer,
|
||||
encodedImage._length);
|
||||
image_component.encoded_image.set_buffer(new uint8_t[encodedImage.size()],
|
||||
encodedImage.size());
|
||||
image_component.encoded_image.set_size(encodedImage.size());
|
||||
std::memcpy(image_component.encoded_image.data(), encodedImage.data(),
|
||||
encodedImage.size());
|
||||
|
||||
rtc::CritScope cs(&crit_);
|
||||
const auto& stashed_image_itr =
|
||||
|
||||
@ -320,8 +320,7 @@ void VideoCodecTestFixtureImpl::H264KeyframeChecker::CheckEncodedFrame(
|
||||
bool contains_pps = false;
|
||||
bool contains_idr = false;
|
||||
const std::vector<webrtc::H264::NaluIndex> nalu_indices =
|
||||
webrtc::H264::FindNaluIndices(encoded_frame._buffer,
|
||||
encoded_frame._length);
|
||||
webrtc::H264::FindNaluIndices(encoded_frame.data(), encoded_frame.size());
|
||||
for (const webrtc::H264::NaluIndex& index : nalu_indices) {
|
||||
webrtc::H264::NaluType nalu_type = webrtc::H264::ParseNaluType(
|
||||
encoded_frame._buffer[index.payload_start_offset]);
|
||||
|
||||
@ -45,11 +45,9 @@ class QpFrameChecker : public VideoCodecTestFixture::EncodedFrameChecker {
|
||||
const EncodedImage& encoded_frame) const override {
|
||||
int qp;
|
||||
if (codec == kVideoCodecVP8) {
|
||||
EXPECT_TRUE(
|
||||
vp8::GetQp(encoded_frame._buffer, encoded_frame._length, &qp));
|
||||
EXPECT_TRUE(vp8::GetQp(encoded_frame.data(), encoded_frame.size(), &qp));
|
||||
} else if (codec == kVideoCodecVP9) {
|
||||
EXPECT_TRUE(
|
||||
vp9::GetQp(encoded_frame._buffer, encoded_frame._length, &qp));
|
||||
EXPECT_TRUE(vp9::GetQp(encoded_frame.data(), encoded_frame.size(), &qp));
|
||||
} else {
|
||||
RTC_NOTREACHED();
|
||||
}
|
||||
|
||||
@ -51,8 +51,7 @@ size_t GetMaxNaluSizeBytes(const EncodedImage& encoded_frame,
|
||||
return 0;
|
||||
|
||||
std::vector<webrtc::H264::NaluIndex> nalu_indices =
|
||||
webrtc::H264::FindNaluIndices(encoded_frame._buffer,
|
||||
encoded_frame._length);
|
||||
webrtc::H264::FindNaluIndices(encoded_frame.data(), encoded_frame.size());
|
||||
|
||||
RTC_CHECK(!nalu_indices.empty());
|
||||
|
||||
@ -392,7 +391,7 @@ void VideoProcessor::FrameEncoded(
|
||||
frame_stat->encode_start_ns, encode_stop_ns - post_encode_time_ns_);
|
||||
frame_stat->target_bitrate_kbps =
|
||||
bitrate_allocation_.GetTemporalLayerSum(spatial_idx, temporal_idx) / 1000;
|
||||
frame_stat->length_bytes = encoded_image._length;
|
||||
frame_stat->length_bytes = encoded_image.size();
|
||||
frame_stat->frame_type = encoded_image._frameType;
|
||||
frame_stat->temporal_idx = temporal_idx;
|
||||
frame_stat->max_nalu_size_bytes = GetMaxNaluSizeBytes(encoded_image, config_);
|
||||
@ -554,7 +553,7 @@ const webrtc::EncodedImage* VideoProcessor::BuildAndStoreSuperframe(
|
||||
RTC_CHECK_GT(config_.NumberOfSpatialLayers(), 1);
|
||||
|
||||
EncodedImage base_image;
|
||||
RTC_CHECK_EQ(base_image._length, 0);
|
||||
RTC_CHECK_EQ(base_image.size(), 0);
|
||||
|
||||
// Each SVC layer is decoded with dedicated decoder. Find the nearest
|
||||
// non-dropped base frame and merge it and current frame into superframe.
|
||||
@ -568,29 +567,29 @@ const webrtc::EncodedImage* VideoProcessor::BuildAndStoreSuperframe(
|
||||
}
|
||||
}
|
||||
}
|
||||
const size_t payload_size_bytes = base_image._length + encoded_image._length;
|
||||
const size_t payload_size_bytes = base_image.size() + encoded_image.size();
|
||||
const size_t buffer_size_bytes =
|
||||
payload_size_bytes + EncodedImage::GetBufferPaddingBytes(codec);
|
||||
|
||||
uint8_t* copied_buffer = new uint8_t[buffer_size_bytes];
|
||||
RTC_CHECK(copied_buffer);
|
||||
|
||||
if (base_image._length) {
|
||||
if (base_image.size()) {
|
||||
RTC_CHECK(base_image._buffer);
|
||||
memcpy(copied_buffer, base_image._buffer, base_image._length);
|
||||
memcpy(copied_buffer, base_image.data(), base_image.size());
|
||||
}
|
||||
memcpy(copied_buffer + base_image._length, encoded_image._buffer,
|
||||
encoded_image._length);
|
||||
memcpy(copied_buffer + base_image.size(), encoded_image.data(),
|
||||
encoded_image.size());
|
||||
|
||||
EncodedImage copied_image = encoded_image;
|
||||
copied_image = encoded_image;
|
||||
copied_image.set_buffer(copied_buffer, buffer_size_bytes);
|
||||
copied_image._length = payload_size_bytes;
|
||||
copied_image.set_size(payload_size_bytes);
|
||||
|
||||
// Replace previous EncodedImage for this spatial layer.
|
||||
uint8_t* old_buffer = merged_encoded_frames_.at(spatial_idx)._buffer;
|
||||
if (old_buffer) {
|
||||
delete[] old_buffer;
|
||||
uint8_t* old_data = merged_encoded_frames_.at(spatial_idx).data();
|
||||
if (old_data) {
|
||||
delete[] old_data;
|
||||
}
|
||||
merged_encoded_frames_.at(spatial_idx) = copied_image;
|
||||
|
||||
|
||||
@ -109,7 +109,7 @@ class VideoProcessor {
|
||||
const webrtc::EncodedImage& encoded_image,
|
||||
const webrtc::CodecSpecificInfo* codec_specific_info)
|
||||
: video_processor_(video_processor),
|
||||
buffer_(encoded_image._buffer, encoded_image._length),
|
||||
buffer_(encoded_image._buffer, encoded_image.size()),
|
||||
encoded_image_(encoded_image),
|
||||
codec_specific_info_(*codec_specific_info) {
|
||||
encoded_image_._buffer = buffer_.data();
|
||||
|
||||
@ -161,7 +161,7 @@ int LibvpxVp8Decoder::Decode(const EncodedImage& input_image,
|
||||
if (decode_complete_callback_ == NULL) {
|
||||
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
||||
}
|
||||
if (input_image._buffer == NULL && input_image._length > 0) {
|
||||
if (input_image.data() == NULL && input_image.size() > 0) {
|
||||
// Reset to avoid requesting key frames too often.
|
||||
if (propagation_cnt_ > 0)
|
||||
propagation_cnt_ = 0;
|
||||
@ -249,10 +249,10 @@ int LibvpxVp8Decoder::Decode(const EncodedImage& input_image,
|
||||
}
|
||||
|
||||
uint8_t* buffer = input_image._buffer;
|
||||
if (input_image._length == 0) {
|
||||
if (input_image.size() == 0) {
|
||||
buffer = NULL; // Triggers full frame concealment.
|
||||
}
|
||||
if (vpx_codec_decode(decoder_, buffer, input_image._length, 0,
|
||||
if (vpx_codec_decode(decoder_, buffer, input_image.size(), 0,
|
||||
kDecodeDeadlineRealtime)) {
|
||||
// Reset to avoid requesting key frames too often.
|
||||
if (propagation_cnt_ > 0) {
|
||||
|
||||
@ -861,7 +861,7 @@ void LibvpxVp8Encoder::PopulateCodecSpecific(CodecSpecificInfo* codec_specific,
|
||||
int qp = 0;
|
||||
vpx_codec_control(&encoders_[encoder_idx], VP8E_GET_LAST_QUANTIZER_64, &qp);
|
||||
temporal_layers_[stream_idx]->OnEncodeDone(
|
||||
timestamp, encoded_images_[encoder_idx]._length,
|
||||
timestamp, encoded_images_[encoder_idx].size(),
|
||||
(pkt.data.frame.flags & VPX_FRAME_IS_KEY) != 0, qp, vp8Info);
|
||||
}
|
||||
|
||||
@ -871,7 +871,7 @@ int LibvpxVp8Encoder::GetEncodedPartitions(const VideoFrame& input_image) {
|
||||
for (size_t encoder_idx = 0; encoder_idx < encoders_.size();
|
||||
++encoder_idx, --stream_idx) {
|
||||
vpx_codec_iter_t iter = NULL;
|
||||
encoded_images_[encoder_idx]._length = 0;
|
||||
encoded_images_[encoder_idx].set_size(0);
|
||||
encoded_images_[encoder_idx]._frameType = kVideoFrameDelta;
|
||||
CodecSpecificInfo codec_specific;
|
||||
const vpx_codec_cx_pkt_t* pkt = NULL;
|
||||
@ -879,7 +879,7 @@ int LibvpxVp8Encoder::GetEncodedPartitions(const VideoFrame& input_image) {
|
||||
NULL) {
|
||||
switch (pkt->kind) {
|
||||
case VPX_CODEC_CX_FRAME_PKT: {
|
||||
size_t length = encoded_images_[encoder_idx]._length;
|
||||
size_t length = encoded_images_[encoder_idx].size();
|
||||
if (pkt->data.frame.sz + length >
|
||||
encoded_images_[encoder_idx].capacity()) {
|
||||
uint8_t* buffer = new uint8_t[pkt->data.frame.sz + length];
|
||||
@ -890,8 +890,8 @@ int LibvpxVp8Encoder::GetEncodedPartitions(const VideoFrame& input_image) {
|
||||
}
|
||||
memcpy(&encoded_images_[encoder_idx]._buffer[length],
|
||||
pkt->data.frame.buf, pkt->data.frame.sz);
|
||||
encoded_images_[encoder_idx]._length += pkt->data.frame.sz;
|
||||
assert(length <= encoded_images_[encoder_idx].capacity());
|
||||
encoded_images_[encoder_idx].set_size(
|
||||
encoded_images_[encoder_idx].size() + pkt->data.frame.sz);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
@ -921,9 +921,9 @@ int LibvpxVp8Encoder::GetEncodedPartitions(const VideoFrame& input_image) {
|
||||
encoded_images_[encoder_idx].SetColorSpace(input_image.color_space());
|
||||
|
||||
if (send_stream_[stream_idx]) {
|
||||
if (encoded_images_[encoder_idx]._length > 0) {
|
||||
if (encoded_images_[encoder_idx].size() > 0) {
|
||||
TRACE_COUNTER_ID1("webrtc", "EncodedFrameSize", encoder_idx,
|
||||
encoded_images_[encoder_idx]._length);
|
||||
encoded_images_[encoder_idx].size());
|
||||
encoded_images_[encoder_idx]._encodedHeight =
|
||||
codec_.simulcastStream[stream_idx].height;
|
||||
encoded_images_[encoder_idx]._encodedWidth =
|
||||
@ -937,7 +937,7 @@ int LibvpxVp8Encoder::GetEncodedPartitions(const VideoFrame& input_image) {
|
||||
} else if (!temporal_layers_[stream_idx]
|
||||
->SupportsEncoderFrameDropping()) {
|
||||
result = WEBRTC_VIDEO_CODEC_TARGET_BITRATE_OVERSHOOT;
|
||||
if (encoded_images_[encoder_idx]._length == 0) {
|
||||
if (encoded_images_[encoder_idx].size() == 0) {
|
||||
// Dropped frame that will be re-encoded.
|
||||
temporal_layers_[stream_idx]->OnEncodeDone(input_image.timestamp(), 0,
|
||||
false, 0, nullptr);
|
||||
|
||||
@ -98,8 +98,8 @@ class TestVp8Impl : public VideoCodecUnitTest {
|
||||
|
||||
void VerifyQpParser(const EncodedImage& encoded_frame) const {
|
||||
int qp;
|
||||
EXPECT_GT(encoded_frame._length, 0u);
|
||||
ASSERT_TRUE(vp8::GetQp(encoded_frame._buffer, encoded_frame._length, &qp));
|
||||
EXPECT_GT(encoded_frame.size(), 0u);
|
||||
ASSERT_TRUE(vp8::GetQp(encoded_frame.data(), encoded_frame.size(), &qp));
|
||||
EXPECT_EQ(encoded_frame.qp_, qp) << "Encoder QP != parsed bitstream QP.";
|
||||
}
|
||||
};
|
||||
|
||||
@ -243,7 +243,7 @@ TEST_F(TestVp9Impl, ParserQpEqualsEncodedQp) {
|
||||
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||
|
||||
int qp = 0;
|
||||
ASSERT_TRUE(vp9::GetQp(encoded_frame._buffer, encoded_frame._length, &qp));
|
||||
ASSERT_TRUE(vp9::GetQp(encoded_frame.data(), encoded_frame.size(), &qp));
|
||||
EXPECT_EQ(encoded_frame.qp_, qp);
|
||||
}
|
||||
|
||||
|
||||
@ -1263,7 +1263,7 @@ int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
|
||||
pkt->data.frame.sz);
|
||||
}
|
||||
memcpy(encoded_image_._buffer, pkt->data.frame.buf, pkt->data.frame.sz);
|
||||
encoded_image_._length = pkt->data.frame.sz;
|
||||
encoded_image_.set_size(pkt->data.frame.sz);
|
||||
|
||||
const bool is_key_frame =
|
||||
(pkt->data.frame.flags & VPX_FRAME_IS_KEY) ? true : false;
|
||||
@ -1276,7 +1276,7 @@ int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
|
||||
encoded_image_._frameType = kVideoFrameKey;
|
||||
force_key_frame_ = false;
|
||||
}
|
||||
RTC_DCHECK_LE(encoded_image_._length, encoded_image_.capacity());
|
||||
RTC_DCHECK_LE(encoded_image_.size(), encoded_image_.capacity());
|
||||
|
||||
memset(&codec_specific_, 0, sizeof(codec_specific_));
|
||||
absl::optional<int> spatial_index;
|
||||
@ -1288,7 +1288,7 @@ int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
|
||||
UpdateReferenceBuffers(*pkt, pics_since_key_);
|
||||
}
|
||||
|
||||
TRACE_COUNTER1("webrtc", "EncodedFrameSize", encoded_image_._length);
|
||||
TRACE_COUNTER1("webrtc", "EncodedFrameSize", encoded_image_.size());
|
||||
encoded_image_.SetTimestamp(input_image_->timestamp());
|
||||
encoded_image_.capture_time_ms_ = input_image_->render_time_ms();
|
||||
encoded_image_.rotation_ = input_image_->rotation();
|
||||
@ -1315,7 +1315,7 @@ int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
|
||||
}
|
||||
|
||||
void VP9EncoderImpl::DeliverBufferedFrame(bool end_of_picture) {
|
||||
if (encoded_image_._length > 0) {
|
||||
if (encoded_image_.size() > 0) {
|
||||
codec_specific_.codecSpecific.VP9.end_of_picture = end_of_picture;
|
||||
|
||||
// No data partitioning in VP9, so 1 partition only.
|
||||
@ -1323,13 +1323,13 @@ void VP9EncoderImpl::DeliverBufferedFrame(bool end_of_picture) {
|
||||
RTPFragmentationHeader frag_info;
|
||||
frag_info.VerifyAndAllocateFragmentationHeader(1);
|
||||
frag_info.fragmentationOffset[part_idx] = 0;
|
||||
frag_info.fragmentationLength[part_idx] = encoded_image_._length;
|
||||
frag_info.fragmentationLength[part_idx] = encoded_image_.size();
|
||||
frag_info.fragmentationPlType[part_idx] = 0;
|
||||
frag_info.fragmentationTimeDiff[part_idx] = 0;
|
||||
|
||||
encoded_complete_callback_->OnEncodedImage(encoded_image_, &codec_specific_,
|
||||
&frag_info);
|
||||
encoded_image_._length = 0;
|
||||
encoded_image_.set_size(0);
|
||||
|
||||
if (codec_.mode == VideoCodecMode::kScreensharing) {
|
||||
const uint8_t spatial_idx = encoded_image_.SpatialIndex().value_or(0);
|
||||
@ -1432,13 +1432,13 @@ int VP9DecoderImpl::Decode(const EncodedImage& input_image,
|
||||
vpx_codec_iter_t iter = nullptr;
|
||||
vpx_image_t* img;
|
||||
uint8_t* buffer = input_image._buffer;
|
||||
if (input_image._length == 0) {
|
||||
if (input_image.size() == 0) {
|
||||
buffer = nullptr; // Triggers full frame concealment.
|
||||
}
|
||||
// During decode libvpx may get and release buffers from |frame_buffer_pool_|.
|
||||
// In practice libvpx keeps a few (~3-4) buffers alive at a time.
|
||||
if (vpx_codec_decode(decoder_, buffer,
|
||||
static_cast<unsigned int>(input_image._length), 0,
|
||||
static_cast<unsigned int>(input_image.size()), 0,
|
||||
VPX_DL_REALTIME)) {
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
|
||||
@ -52,7 +52,7 @@ void VCMEncodedFrame::Reset() {
|
||||
_encodedHeight = 0;
|
||||
_completeFrame = false;
|
||||
_missingFrame = false;
|
||||
_length = 0;
|
||||
set_size(0);
|
||||
_codecSpecificInfo.codecType = kVideoCodecGeneric;
|
||||
_codec = kVideoCodecGeneric;
|
||||
rotation_ = kVideoRotation_0;
|
||||
|
||||
@ -138,8 +138,8 @@ VCMFrameBufferEnum VCMFrameBuffer::InsertPacket(
|
||||
} else if (retVal == -3) {
|
||||
return kOutOfBoundsPacket;
|
||||
}
|
||||
// update length
|
||||
_length = size() + static_cast<uint32_t>(retVal);
|
||||
// update size
|
||||
set_size(size() + static_cast<uint32_t>(retVal));
|
||||
|
||||
_latestPacketTimeMs = timeInMs;
|
||||
|
||||
@ -216,7 +216,7 @@ int VCMFrameBuffer::NumPackets() const {
|
||||
|
||||
void VCMFrameBuffer::Reset() {
|
||||
TRACE_EVENT0("webrtc", "VCMFrameBuffer::Reset");
|
||||
_length = 0;
|
||||
set_size(0);
|
||||
_sessionInfo.Reset();
|
||||
_payloadType = 0;
|
||||
_nackCount = 0;
|
||||
@ -265,7 +265,7 @@ VCMFrameBufferStateEnum VCMFrameBuffer::GetState() const {
|
||||
void VCMFrameBuffer::PrepareForDecode(bool continuous) {
|
||||
TRACE_EVENT0("webrtc", "VCMFrameBuffer::PrepareForDecode");
|
||||
size_t bytes_removed = _sessionInfo.MakeDecodable();
|
||||
_length -= bytes_removed;
|
||||
set_size(size() - bytes_removed);
|
||||
// Transfer frame information to EncodedFrame and create any codec
|
||||
// specific information.
|
||||
_frameType = _sessionInfo.FrameType();
|
||||
|
||||
@ -91,10 +91,6 @@ class FrameObjectFake : public EncodedFrame {
|
||||
int64_t ReceivedTime() const override { return 0; }
|
||||
|
||||
int64_t RenderTime() const override { return _renderTimeMs; }
|
||||
|
||||
// In EncodedImage |_length| is used to descibe its size and |_size| to
|
||||
// describe its capacity.
|
||||
void SetSize(int size) { _length = size; }
|
||||
};
|
||||
|
||||
class VCMReceiveStatisticsCallbackMock : public VCMReceiveStatisticsCallback {
|
||||
@ -168,7 +164,7 @@ class TestFrameBuffer2 : public ::testing::Test {
|
||||
frame->is_last_spatial_layer = last_spatial_layer;
|
||||
// Add some data to buffer.
|
||||
frame->VerifyAndAllocate(kFrameSize);
|
||||
frame->SetSize(kFrameSize);
|
||||
frame->set_size(kFrameSize);
|
||||
for (size_t r = 0; r < references.size(); ++r)
|
||||
frame->references[r] = references[r];
|
||||
|
||||
@ -491,7 +487,7 @@ TEST_F(TestFrameBuffer2, StatsCallback) {
|
||||
{
|
||||
std::unique_ptr<FrameObjectFake> frame(new FrameObjectFake());
|
||||
frame->VerifyAndAllocate(kFrameSize);
|
||||
frame->SetSize(kFrameSize);
|
||||
frame->set_size(kFrameSize);
|
||||
frame->id.picture_id = pid;
|
||||
frame->id.spatial_layer = 0;
|
||||
frame->SetTimestamp(ts);
|
||||
|
||||
@ -139,11 +139,6 @@ int64_t RtpFrameObject::RenderTime() const {
|
||||
return _renderTimeMs;
|
||||
}
|
||||
|
||||
void RtpFrameObject::SetSize(size_t size) {
|
||||
RTC_DCHECK_LE(size, capacity());
|
||||
_length = size;
|
||||
}
|
||||
|
||||
bool RtpFrameObject::delayed_by_retransmission() const {
|
||||
return times_nacked() > 0;
|
||||
}
|
||||
@ -188,7 +183,7 @@ void RtpFrameObject::AllocateBitstreamBuffer(size_t frame_size) {
|
||||
set_buffer(new uint8_t[new_size], new_size);
|
||||
}
|
||||
|
||||
_length = frame_size;
|
||||
set_size(frame_size);
|
||||
}
|
||||
|
||||
} // namespace video_coding
|
||||
|
||||
@ -40,7 +40,6 @@ class RtpFrameObject : public EncodedFrame {
|
||||
VideoCodecType codec_type() const;
|
||||
int64_t ReceivedTime() const override;
|
||||
int64_t RenderTime() const override;
|
||||
void SetSize(size_t size);
|
||||
bool delayed_by_retransmission() const override;
|
||||
absl::optional<RTPVideoHeader> GetRtpVideoHeader() const;
|
||||
absl::optional<RtpGenericFrameDescriptor> GetGenericFrameDescriptor() const;
|
||||
|
||||
@ -327,7 +327,7 @@ void VCMEncodedFrameCallback::FillTimingInfo(size_t simulcast_svc_idx,
|
||||
|
||||
// Outliers trigger timing frames, but do not affect scheduled timing
|
||||
// frames.
|
||||
if (outlier_frame_size && encoded_image->_length >= *outlier_frame_size) {
|
||||
if (outlier_frame_size && encoded_image->size() >= *outlier_frame_size) {
|
||||
timing_flags |= VideoSendTiming::kTriggeredBySize;
|
||||
}
|
||||
|
||||
|
||||
@ -82,6 +82,7 @@ std::vector<std::vector<FrameType>> GetTimingFrames(
|
||||
{delay_ms, kDefaultOutlierFrameSizePercent});
|
||||
callback.OnFrameRateChanged(kFramerate);
|
||||
int s, i;
|
||||
std::vector<uint8_t> frame_data(max_frame_size);
|
||||
std::vector<std::vector<FrameType>> result(num_streams);
|
||||
for (s = 0; s < num_streams; ++s)
|
||||
callback.OnTargetBitrateChanged(average_frame_sizes[s] * kFramerate, s);
|
||||
@ -94,7 +95,8 @@ std::vector<std::vector<FrameType>> GetTimingFrames(
|
||||
|
||||
EncodedImage image;
|
||||
CodecSpecificInfo codec_specific;
|
||||
image._length = FrameSize(min_frame_size, max_frame_size, s, i);
|
||||
image.set_buffer(frame_data.data(), frame_data.size());
|
||||
image.set_size(FrameSize(min_frame_size, max_frame_size, s, i));
|
||||
image.capture_time_ms_ = current_timestamp;
|
||||
image.SetTimestamp(static_cast<uint32_t>(current_timestamp * 90));
|
||||
image.SetSpatialIndex(s);
|
||||
@ -187,7 +189,9 @@ TEST(TestVCMEncodedFrameCallback, NoTimingFrameIfNoEncodeStartTime) {
|
||||
EncodedImage image;
|
||||
CodecSpecificInfo codec_specific;
|
||||
int64_t timestamp = 1;
|
||||
image._length = 500;
|
||||
uint8_t frame_data[500];
|
||||
image.set_buffer(frame_data, sizeof(frame_data));
|
||||
image.set_size(sizeof(frame_data));
|
||||
image.capture_time_ms_ = timestamp;
|
||||
image.SetTimestamp(static_cast<uint32_t>(timestamp * 90));
|
||||
codec_specific.codecType = kVideoCodecGeneric;
|
||||
@ -218,7 +222,9 @@ TEST(TestVCMEncodedFrameCallback, AdjustsCaptureTimeForInternalSourceEncoder) {
|
||||
const int64_t kEncodeStartDelayMs = 2;
|
||||
const int64_t kEncodeFinishDelayMs = 10;
|
||||
int64_t timestamp = 1;
|
||||
image._length = 500;
|
||||
uint8_t frame_data[500];
|
||||
image.set_buffer(frame_data, sizeof(frame_data));
|
||||
image.set_size(sizeof(frame_data));
|
||||
image.capture_time_ms_ = timestamp;
|
||||
image.SetTimestamp(static_cast<uint32_t>(timestamp * 90));
|
||||
codec_specific.codecType = kVideoCodecGeneric;
|
||||
|
||||
@ -160,7 +160,7 @@ bool IvfFileWriter::WriteFrame(const EncodedImage& encoded_image,
|
||||
|
||||
const size_t kFrameHeaderSize = 12;
|
||||
if (byte_limit_ != 0 &&
|
||||
bytes_written_ + kFrameHeaderSize + encoded_image._length > byte_limit_) {
|
||||
bytes_written_ + kFrameHeaderSize + encoded_image.size() > byte_limit_) {
|
||||
RTC_LOG(LS_WARNING) << "Closing IVF file due to reaching size limit: "
|
||||
<< byte_limit_ << " bytes.";
|
||||
Close();
|
||||
@ -168,16 +168,16 @@ bool IvfFileWriter::WriteFrame(const EncodedImage& encoded_image,
|
||||
}
|
||||
uint8_t frame_header[kFrameHeaderSize] = {};
|
||||
ByteWriter<uint32_t>::WriteLittleEndian(
|
||||
&frame_header[0], static_cast<uint32_t>(encoded_image._length));
|
||||
&frame_header[0], static_cast<uint32_t>(encoded_image.size()));
|
||||
ByteWriter<uint64_t>::WriteLittleEndian(&frame_header[4], timestamp);
|
||||
if (file_.Write(frame_header, kFrameHeaderSize) < kFrameHeaderSize ||
|
||||
file_.Write(encoded_image._buffer, encoded_image._length) <
|
||||
encoded_image._length) {
|
||||
file_.Write(encoded_image.data(), encoded_image.size()) <
|
||||
encoded_image.size()) {
|
||||
RTC_LOG(LS_ERROR) << "Unable to write frame to file.";
|
||||
return false;
|
||||
}
|
||||
|
||||
bytes_written_ += kFrameHeaderSize + encoded_image._length;
|
||||
bytes_written_ += kFrameHeaderSize + encoded_image.size();
|
||||
|
||||
++num_frames_;
|
||||
return true;
|
||||
|
||||
@ -41,11 +41,11 @@ class IvfFileWriterTest : public ::testing::Test {
|
||||
int num_frames,
|
||||
bool use_capture_tims_ms) {
|
||||
EncodedImage frame;
|
||||
frame._buffer = dummy_payload;
|
||||
frame.set_buffer(dummy_payload, sizeof(dummy_payload));
|
||||
frame._encodedWidth = width;
|
||||
frame._encodedHeight = height;
|
||||
for (int i = 1; i <= num_frames; ++i) {
|
||||
frame._length = i % sizeof(dummy_payload);
|
||||
frame.set_size(i % sizeof(dummy_payload));
|
||||
if (use_capture_tims_ms) {
|
||||
frame.capture_time_ms_ = i;
|
||||
} else {
|
||||
|
||||
@ -85,18 +85,18 @@ class SimulcastTestFixtureImpl::TestEncodedImageCallback
|
||||
delete[] encoded_key_frame_._buffer;
|
||||
encoded_key_frame_.set_buffer(new uint8_t[encoded_image.capacity()],
|
||||
encoded_image.capacity());
|
||||
encoded_key_frame_._length = encoded_image._length;
|
||||
encoded_key_frame_.set_size(encoded_image.size());
|
||||
encoded_key_frame_._frameType = kVideoFrameKey;
|
||||
encoded_key_frame_._completeFrame = encoded_image._completeFrame;
|
||||
memcpy(encoded_key_frame_._buffer, encoded_image._buffer,
|
||||
encoded_image._length);
|
||||
encoded_image.size());
|
||||
} else {
|
||||
delete[] encoded_frame_._buffer;
|
||||
encoded_frame_.set_buffer(new uint8_t[encoded_image.capacity()],
|
||||
encoded_image.capacity());
|
||||
encoded_frame_._length = encoded_image._length;
|
||||
encoded_frame_.set_size(encoded_image.size());
|
||||
memcpy(encoded_frame_._buffer, encoded_image._buffer,
|
||||
encoded_image._length);
|
||||
encoded_image.size());
|
||||
}
|
||||
}
|
||||
if (is_vp8) {
|
||||
@ -858,11 +858,11 @@ void SimulcastTestFixtureImpl::TestDecodeWidthHeightSet() {
|
||||
encoded_frame[index].set_buffer(
|
||||
new uint8_t[encoded_image.capacity()],
|
||||
encoded_image.capacity());
|
||||
encoded_frame[index]._length = encoded_image._length;
|
||||
encoded_frame[index].set_size(encoded_image.size());
|
||||
encoded_frame[index]._frameType = encoded_image._frameType;
|
||||
encoded_frame[index]._completeFrame = encoded_image._completeFrame;
|
||||
memcpy(encoded_frame[index]._buffer, encoded_image._buffer,
|
||||
encoded_image._length);
|
||||
encoded_image.size());
|
||||
return EncodedImageCallback::Result(
|
||||
EncodedImageCallback::Result::OK, 0);
|
||||
}));
|
||||
|
||||
@ -643,7 +643,7 @@ TEST_P(TestPacketBufferH264Parameterized, GetBitstreamBufferPadding) {
|
||||
packet_buffer_->InsertPacket(&packet);
|
||||
|
||||
ASSERT_EQ(1UL, frames_from_callback_.size());
|
||||
EXPECT_EQ(frames_from_callback_[seq_num]->EncodedImage()._length,
|
||||
EXPECT_EQ(frames_from_callback_[seq_num]->EncodedImage().size(),
|
||||
sizeof(data_data));
|
||||
EXPECT_EQ(frames_from_callback_[seq_num]->EncodedImage().capacity(),
|
||||
sizeof(data_data) + EncodedImage::kBufferPaddingBytesH264);
|
||||
|
||||
@ -104,7 +104,7 @@ class EncodedImageCallbackImpl : public EncodedImageCallback {
|
||||
const RTPFragmentationHeader* fragmentation) override {
|
||||
assert(codec_specific_info);
|
||||
frame_data_.push_back(
|
||||
FrameData(encoded_image._length, *codec_specific_info));
|
||||
FrameData(encoded_image.size(), *codec_specific_info));
|
||||
return Result(Result::OK, encoded_image.Timestamp());
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user