Delete the non-const version of the EncodedImage::data() method.

Bug: webrtc:9378
Change-Id: I84ace3ca6a2eb4d0f7c3d4e62f815d77df581bfa
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/185122
Reviewed-by: Artem Titov <titovartem@webrtc.org>
Reviewed-by: Philip Eliasson <philipel@webrtc.org>
Reviewed-by: Ilya Nikolaevskiy <ilnik@webrtc.org>
Commit-Queue: Niels Moller <nisse@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#32197}
This commit is contained in:
Niels Möller
2020-09-23 15:58:12 +02:00
committed by Commit Bot
parent 9ccbe17abb
commit f2969fa868
16 changed files with 97 additions and 83 deletions

View File

@ -110,7 +110,8 @@ static void RtpFragmentize(EncodedImage* encoded_image, SFrameBSInfo* info) {
}
}
// TODO(nisse): Use a cache or buffer pool to avoid allocation?
encoded_image->SetEncodedData(EncodedImageBuffer::Create(required_capacity));
auto buffer = EncodedImageBuffer::Create(required_capacity);
encoded_image->SetEncodedData(buffer);
// Iterate layers and NAL units, note each NAL unit as a fragment and copy
// the data to |encoded_image->_buffer|.
@ -132,8 +133,7 @@ static void RtpFragmentize(EncodedImage* encoded_image, SFrameBSInfo* info) {
layer_len += layerInfo.pNalLengthInByte[nal];
}
// Copy the entire layer's data (including start codes).
memcpy(encoded_image->data() + encoded_image->size(), layerInfo.pBsBuf,
layer_len);
memcpy(buffer->data() + encoded_image->size(), layerInfo.pBsBuf, layer_len);
encoded_image->set_size(encoded_image->size() + layer_len);
}
}

View File

@ -571,16 +571,16 @@ const webrtc::EncodedImage* VideoProcessor::BuildAndStoreSuperframe(
}
const size_t payload_size_bytes = base_image.size() + encoded_image.size();
EncodedImage copied_image = encoded_image;
copied_image.SetEncodedData(EncodedImageBuffer::Create(payload_size_bytes));
auto buffer = EncodedImageBuffer::Create(payload_size_bytes);
if (base_image.size()) {
RTC_CHECK(base_image.data());
memcpy(copied_image.data(), base_image.data(), base_image.size());
memcpy(buffer->data(), base_image.data(), base_image.size());
}
memcpy(copied_image.data() + base_image.size(), encoded_image.data(),
memcpy(buffer->data() + base_image.size(), encoded_image.data(),
encoded_image.size());
copied_image.set_size(payload_size_bytes);
EncodedImage copied_image = encoded_image;
copied_image.SetEncodedData(buffer);
// Replace previous EncodedImage for this spatial layer.
merged_encoded_frames_.at(spatial_idx) = std::move(copied_image);

View File

@ -134,7 +134,9 @@ VCMFrameBufferEnum VCMFrameBuffer::InsertPacket(const VCMPacket& packet,
if (packet.sizeBytes > 0)
CopyCodecSpecific(&packet.video_header);
int retVal = _sessionInfo.InsertPacket(packet, data(), frame_data);
int retVal = _sessionInfo.InsertPacket(
packet, encoded_image_buffer_ ? encoded_image_buffer_->data() : nullptr,
frame_data);
if (retVal == -1) {
return kSizeError;
} else if (retVal == -2) {

View File

@ -38,7 +38,8 @@ RtpFrameObject::RtpFrameObject(
const absl::optional<webrtc::ColorSpace>& color_space,
RtpPacketInfos packet_infos,
rtc::scoped_refptr<EncodedImageBuffer> image_buffer)
: first_seq_num_(first_seq_num),
: image_buffer_(image_buffer),
first_seq_num_(first_seq_num),
last_seq_num_(last_seq_num),
last_packet_received_time_(last_packet_received_time),
times_nacked_(times_nacked) {
@ -60,7 +61,7 @@ RtpFrameObject::RtpFrameObject(
// as of the first packet's.
SetPlayoutDelay(rtp_video_header_.playout_delay);
SetEncodedData(std::move(image_buffer));
SetEncodedData(image_buffer_);
_encodedWidth = rtp_video_header_.width;
_encodedHeight = rtp_video_header_.height;

View File

@ -48,7 +48,11 @@ class RtpFrameObject : public EncodedFrame {
bool delayed_by_retransmission() const override;
const RTPVideoHeader& GetRtpVideoHeader() const;
uint8_t* mutable_data() { return image_buffer_->data(); }
private:
// Reference for mutable access.
rtc::scoped_refptr<EncodedImageBuffer> image_buffer_;
RTPVideoHeader rtp_video_header_;
VideoCodecType codec_type_;
uint16_t first_seq_num_;

View File

@ -81,20 +81,13 @@ class SimulcastTestFixtureImpl::TestEncodedImageCallback
// Only store the base layer.
if (encoded_image.SpatialIndex().value_or(0) == 0) {
if (encoded_image._frameType == VideoFrameType::kVideoFrameKey) {
// TODO(nisse): Why not size() ?
encoded_key_frame_.SetEncodedData(
EncodedImageBuffer::Create(encoded_image.size()));
encoded_key_frame_.set_size(encoded_image.size());
encoded_key_frame_.SetEncodedData(EncodedImageBuffer::Create(
encoded_image.data(), encoded_image.size()));
encoded_key_frame_._frameType = VideoFrameType::kVideoFrameKey;
encoded_key_frame_._completeFrame = encoded_image._completeFrame;
memcpy(encoded_key_frame_.data(), encoded_image.data(),
encoded_image.size());
} else {
encoded_frame_.SetEncodedData(
EncodedImageBuffer::Create(encoded_image.size()));
encoded_frame_.set_size(encoded_image.size());
memcpy(encoded_frame_.data(), encoded_image.data(),
encoded_image.size());
encoded_frame_.SetEncodedData(EncodedImageBuffer::Create(
encoded_image.data(), encoded_image.size()));
}
}
if (is_vp8) {
@ -873,13 +866,10 @@ void SimulcastTestFixtureImpl::TestDecodeWidthHeightSet() {
EXPECT_EQ(encoded_image._frameType, VideoFrameType::kVideoFrameKey);
size_t index = encoded_image.SpatialIndex().value_or(0);
encoded_frame[index].SetEncodedData(
EncodedImageBuffer::Create(encoded_image.size()));
encoded_frame[index].set_size(encoded_image.size());
encoded_frame[index].SetEncodedData(EncodedImageBuffer::Create(
encoded_image.data(), encoded_image.size()));
encoded_frame[index]._frameType = encoded_image._frameType;
encoded_frame[index]._completeFrame = encoded_image._completeFrame;
memcpy(encoded_frame[index].data(), encoded_image.data(),
encoded_image.size());
return EncodedImageCallback::Result(
EncodedImageCallback::Result::OK, 0);
}));