New interface EncodedImageBufferInterface, replacing use of CopyOnWriteBuffer

Bug: webrtc:9378
Change-Id: I62b7adbd9dd539c545b5b1b1520721482a4623c4
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/138820
Reviewed-by: Kári Helgason <kthelgason@webrtc.org>
Reviewed-by: Sami Kalliomäki <sakal@webrtc.org>
Reviewed-by: Karl Wiberg <kwiberg@webrtc.org>
Commit-Queue: Niels Moller <nisse@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#28317}
This commit is contained in:
Niels Möller
2019-06-18 15:56:56 +02:00
committed by Commit Bot
parent 0894f0fd76
commit 4d504c76cb
27 changed files with 215 additions and 134 deletions

View File

@ -115,23 +115,8 @@ static void RtpFragmentize(EncodedImage* encoded_image,
required_capacity += layerInfo.pNalLengthInByte[nal];
}
}
if (encoded_image->capacity() < required_capacity) {
// Increase buffer size. Allocate enough to hold an unencoded image, this
// should be more than enough to hold any encoded data of future frames of
// the same size (avoiding possible future reallocation due to variations in
// required size).
size_t new_capacity = CalcBufferSize(VideoType::kI420, frame_buffer.width(),
frame_buffer.height());
if (new_capacity < required_capacity) {
// Encoded data > unencoded data. Allocate required bytes.
RTC_LOG(LS_WARNING)
<< "Encoding produced more bytes than the original image "
<< "data! Original bytes: " << new_capacity
<< ", encoded bytes: " << required_capacity << ".";
new_capacity = required_capacity;
}
encoded_image->Allocate(new_capacity);
}
// TODO(nisse): Use a cache or buffer pool to avoid allocation?
encoded_image->SetEncodedData(EncodedImageBuffer::Create(required_capacity));
// Iterate layers and NAL units, note each NAL unit as a fragment and copy
// the data to |encoded_image->_buffer|.
@ -300,7 +285,7 @@ int32_t H264EncoderImpl::InitEncode(const VideoCodec* inst,
const size_t new_capacity =
CalcBufferSize(VideoType::kI420, codec_.simulcastStream[idx].width,
codec_.simulcastStream[idx].height);
encoded_images_[i].Allocate(new_capacity);
encoded_images_[i].SetEncodedData(EncodedImageBuffer::Create(new_capacity));
encoded_images_[i]._completeFrame = true;
encoded_images_[i]._encodedWidth = codec_.simulcastStream[idx].width;
encoded_images_[i]._encodedHeight = codec_.simulcastStream[idx].height;

View File

@ -189,8 +189,8 @@ EncodedImage MultiplexEncodedImagePacker::PackAndRelease(
frame_headers.push_back(frame_header);
}
combined_image.Allocate(bitstream_offset);
combined_image.set_size(bitstream_offset);
auto buffer = EncodedImageBuffer::Create(bitstream_offset);
combined_image.SetEncodedData(buffer);
// header
header_offset = PackHeader(combined_image.data(), header);
@ -199,8 +199,8 @@ EncodedImage MultiplexEncodedImagePacker::PackAndRelease(
// Frame Header
for (size_t i = 0; i < images.size(); i++) {
int relative_offset = PackFrameHeader(combined_image.data() + header_offset,
frame_headers[i]);
int relative_offset =
PackFrameHeader(buffer->data() + header_offset, frame_headers[i]);
RTC_DCHECK_EQ(relative_offset, kMultiplexImageComponentHeaderSize);
header_offset = frame_headers[i].next_component_header_offset;
@ -213,16 +213,15 @@ EncodedImage MultiplexEncodedImagePacker::PackAndRelease(
// Augmenting Data
if (multiplex_image.augmenting_data_size != 0) {
memcpy(combined_image.data() + header.augmenting_data_offset,
memcpy(buffer->data() + header.augmenting_data_offset,
multiplex_image.augmenting_data.get(),
multiplex_image.augmenting_data_size);
}
// Bitstreams
for (size_t i = 0; i < images.size(); i++) {
PackBitstream(combined_image.data() + frame_headers[i].bitstream_offset,
PackBitstream(buffer->data() + frame_headers[i].bitstream_offset,
images[i]);
delete[] images[i].encoded_image.buffer();
}
return combined_image;
@ -263,11 +262,9 @@ MultiplexImage MultiplexEncodedImagePacker::Unpack(
EncodedImage encoded_image = combined_image;
encoded_image.SetTimestamp(combined_image.Timestamp());
encoded_image._frameType = frame_headers[i].frame_type;
encoded_image.Allocate(frame_headers[i].bitstream_length);
encoded_image.set_size(frame_headers[i].bitstream_length);
memcpy(encoded_image.data(),
combined_image.data() + frame_headers[i].bitstream_offset,
frame_headers[i].bitstream_length);
encoded_image.SetEncodedData(EncodedImageBuffer::Create(
combined_image.data() + frame_headers[i].bitstream_offset,
frame_headers[i].bitstream_length));
image_component.encoded_image = encoded_image;

View File

@ -568,7 +568,7 @@ const webrtc::EncodedImage* VideoProcessor::BuildAndStoreSuperframe(
const size_t payload_size_bytes = base_image.size() + encoded_image.size();
EncodedImage copied_image = encoded_image;
copied_image.Allocate(payload_size_bytes);
copied_image.SetEncodedData(EncodedImageBuffer::Create(payload_size_bytes));
if (base_image.size()) {
RTC_CHECK(base_image.data());
memcpy(copied_image.data(), base_image.data(), base_image.size());

View File

@ -533,7 +533,8 @@ int LibvpxVp8Encoder::InitEncode(const VideoCodec* inst,
// allocate memory for encoded image
size_t frame_capacity =
CalcBufferSize(VideoType::kI420, codec_.width, codec_.height);
encoded_images_[i].Allocate(frame_capacity);
encoded_images_[i].SetEncodedData(
EncodedImageBuffer::Create(frame_capacity));
encoded_images_[i]._completeFrame = true;
}
// populate encoder configuration with default values

View File

@ -458,10 +458,6 @@ int VP9EncoderImpl::InitEncode(const VideoCodec* inst,
is_svc_ = (num_spatial_layers_ > 1 || num_temporal_layers_ > 1);
// Allocate memory for encoded image
size_t frame_capacity =
CalcBufferSize(VideoType::kI420, codec_.width, codec_.height);
encoded_image_.Allocate(frame_capacity);
encoded_image_._completeFrame = true;
// Populate encoder configuration with default values.
if (vpx_codec_enc_config_default(vpx_codec_vp9_cx(), config_, 0)) {
@ -1417,11 +1413,10 @@ int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
DeliverBufferedFrame(end_of_picture);
}
if (pkt->data.frame.sz > encoded_image_.capacity()) {
encoded_image_.Allocate(pkt->data.frame.sz);
}
memcpy(encoded_image_.data(), pkt->data.frame.buf, pkt->data.frame.sz);
encoded_image_.set_size(pkt->data.frame.sz);
// TODO(nisse): Introduce some buffer cache or buffer pool, to reduce
// allocations and/or copy operations.
encoded_image_.SetEncodedData(EncodedImageBuffer::Create(
static_cast<const uint8_t*>(pkt->data.frame.buf), pkt->data.frame.sz));
const bool is_key_frame =
(pkt->data.frame.flags & VPX_FRAME_IS_KEY) ? true : false;