Add owned data buffer to EncodedImage

Bug: webrtc:9378
Change-Id: I6a66b9301cbadf1d6517bf7a96028099970a20a3
Reviewed-on: https://webrtc-review.googlesource.com/c/117964
Commit-Queue: Niels Moller <nisse@webrtc.org>
Reviewed-by: Philip Eliasson <philipel@webrtc.org>
Reviewed-by: Karl Wiberg <kwiberg@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#26585}
This commit is contained in:
Niels Möller
2019-02-07 00:02:17 +01:00
committed by Commit Bot
parent e6f6a0cb8d
commit 938dd9f1e8
17 changed files with 105 additions and 108 deletions

View File

@ -259,12 +259,12 @@ int32_t H264DecoderImpl::Decode(const EncodedImage& input_image,
// "If the first 23 bits of the additional bytes are not 0, then damaged MPEG
// bitstreams could cause overread and segfault." See
// AV_INPUT_BUFFER_PADDING_SIZE. We'll zero the entire padding just in case.
memset(input_image.data() + input_image.size(), 0,
memset(input_image.mutable_data() + input_image.size(), 0,
EncodedImage::GetBufferPaddingBytes(kVideoCodecH264));
AVPacket packet;
av_init_packet(&packet);
packet.data = input_image.data();
packet.data = input_image.mutable_data();
if (input_image.size() >
static_cast<size_t>(std::numeric_limits<int>::max())) {
ReportError();

View File

@ -221,7 +221,7 @@ EncodedImage MultiplexEncodedImagePacker::PackAndRelease(
for (size_t i = 0; i < images.size(); i++) {
PackBitstream(combined_image.data() + frame_headers[i].bitstream_offset,
images[i]);
delete[] images[i].encoded_image.data();
delete[] images[i].encoded_image.buffer();
}
return combined_image;
@ -263,7 +263,7 @@ MultiplexImage MultiplexEncodedImagePacker::Unpack(
encoded_image.SetTimestamp(combined_image.Timestamp());
encoded_image._frameType = frame_headers[i].frame_type;
encoded_image.set_buffer(
combined_image.data() + frame_headers[i].bitstream_offset,
combined_image.mutable_data() + frame_headers[i].bitstream_offset,
static_cast<size_t>(frame_headers[i].bitstream_length));
const size_t padding =
EncodedImage::GetBufferPaddingBytes(image_component.codec_type);

View File

@ -253,8 +253,8 @@ int MultiplexEncoderAdapter::Release() {
}
}
stashed_images_.clear();
if (combined_image_.data()) {
delete[] combined_image_.data();
if (combined_image_.buffer()) {
delete[] combined_image_.buffer();
combined_image_.set_buffer(nullptr, 0);
}
return WEBRTC_VIDEO_CODEC_OK;
@ -302,8 +302,8 @@ EncodedImageCallback::Result MultiplexEncoderAdapter::OnEncodedImage(
// We have to send out those stashed frames, otherwise the delta frame
// dependency chain is broken.
if (combined_image_.data())
delete[] combined_image_.data();
if (combined_image_.buffer())
delete[] combined_image_.buffer();
combined_image_ =
MultiplexEncodedImagePacker::PackAndRelease(iter->second);

View File

@ -587,9 +587,9 @@ const webrtc::EncodedImage* VideoProcessor::BuildAndStoreSuperframe(
copied_image.set_size(payload_size_bytes);
// Replace previous EncodedImage for this spatial layer.
uint8_t* old_data = merged_encoded_frames_.at(spatial_idx).data();
if (old_data) {
delete[] old_data;
uint8_t* old_buffer = merged_encoded_frames_.at(spatial_idx).buffer();
if (old_buffer) {
delete[] old_buffer;
}
merged_encoded_frames_.at(spatial_idx) = copied_image;

View File

@ -888,7 +888,7 @@ int LibvpxVp8Encoder::GetEncodedPartitions(const VideoFrame& input_image) {
encoded_images_[encoder_idx].capacity()) {
uint8_t* buffer = new uint8_t[pkt->data.frame.sz + length];
memcpy(buffer, encoded_images_[encoder_idx].data(), length);
delete[] encoded_images_[encoder_idx].data();
delete[] encoded_images_[encoder_idx].buffer();
encoded_images_[encoder_idx].set_buffer(
buffer, pkt->data.frame.sz + length);
}

View File

@ -185,8 +185,8 @@ VP9EncoderImpl::~VP9EncoderImpl() {
int VP9EncoderImpl::Release() {
int ret_val = WEBRTC_VIDEO_CODEC_OK;
if (encoded_image_.data() != nullptr) {
delete[] encoded_image_.data();
if (encoded_image_.buffer() != nullptr) {
delete[] encoded_image_.buffer();
encoded_image_.set_buffer(nullptr, 0);
}
if (encoder_ != nullptr) {
@ -1266,7 +1266,7 @@ int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
}
if (pkt->data.frame.sz > encoded_image_.capacity()) {
delete[] encoded_image_.data();
delete[] encoded_image_.buffer();
encoded_image_.set_buffer(new uint8_t[pkt->data.frame.sz],
pkt->data.frame.sz);
}