Add accessor methods for RTP timestamp of EncodedImage.

Intention is to make the member private, but downstream callers
must be updated to use the accessor methods first.

Bug: webrtc:9378
Change-Id: I3495bd8d545b7234fbea10abfd14f082caa420b6
Reviewed-on: https://webrtc-review.googlesource.com/82160
Reviewed-by: Magnus Jedvert <magjed@webrtc.org>
Reviewed-by: Erik Språng <sprang@webrtc.org>
Reviewed-by: Sebastian Jansson <srte@webrtc.org>
Reviewed-by: Philip Eliasson <philipel@webrtc.org>
Commit-Queue: Niels Moller <nisse@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#24352}
This commit is contained in:
Niels Möller
2018-08-16 10:24:12 +02:00
committed by Commit Bot
parent bcdf5f1a94
commit 2377588c82
52 changed files with 163 additions and 166 deletions

View File

@ -304,7 +304,7 @@ int32_t H264DecoderImpl::Decode(const EncodedImage& input_image,
VideoFrame::Builder()
.set_video_frame_buffer(input_frame->video_frame_buffer())
.set_timestamp_us(input_frame->timestamp_us())
.set_timestamp_rtp(input_image._timeStamp)
.set_timestamp_rtp(input_image.Timestamp())
.set_rotation(input_frame->rotation())
.set_color_space(color_space)
.build();

View File

@ -496,7 +496,7 @@ int32_t H264EncoderImpl::Encode(const VideoFrame& input_frame,
encoded_images_[i]._encodedWidth = configurations_[i].width;
encoded_images_[i]._encodedHeight = configurations_[i].height;
encoded_images_[i]._timeStamp = input_frame.timestamp();
encoded_images_[i].SetTimestamp(input_frame.timestamp());
encoded_images_[i].ntp_time_ms_ = input_frame.ntp_time_ms();
encoded_images_[i].capture_time_ms_ = input_frame.render_time_ms();
encoded_images_[i].rotation_ = input_frame.rotation();

View File

@ -84,7 +84,7 @@ int I420Encoder::Encode(const VideoFrame& inputImage,
}
_encodedImage._frameType = kVideoFrameKey;
_encodedImage._timeStamp = inputImage.timestamp();
_encodedImage.SetTimestamp(inputImage.timestamp());
_encodedImage._encodedHeight = inputImage.height();
_encodedImage._encodedWidth = inputImage.width();
@ -200,7 +200,7 @@ int I420Decoder::Decode(const EncodedImage& inputImage,
return WEBRTC_VIDEO_CODEC_MEMORY;
}
VideoFrame decoded_image(frame_buffer, inputImage._timeStamp, 0,
VideoFrame decoded_image(frame_buffer, inputImage.Timestamp(), 0,
webrtc::kVideoRotation_0);
_decodeCompleteCallback->Decoded(decoded_image);
return WEBRTC_VIDEO_CODEC_OK;

View File

@ -139,10 +139,10 @@ int32_t MultiplexDecoderAdapter::Decode(
}
if (image.component_count == 1) {
RTC_DCHECK(decoded_data_.find(input_image._timeStamp) ==
RTC_DCHECK(decoded_data_.find(input_image.Timestamp()) ==
decoded_data_.end());
decoded_data_.emplace(std::piecewise_construct,
std::forward_as_tuple(input_image._timeStamp),
std::forward_as_tuple(input_image.Timestamp()),
std::forward_as_tuple(kAXXStream));
}
int32_t rv = 0;

View File

@ -258,7 +258,7 @@ MultiplexImage MultiplexEncodedImagePacker::Unpack(
image_component.codec_type = frame_headers[i].codec_type;
EncodedImage encoded_image = combined_image;
encoded_image._timeStamp = combined_image._timeStamp;
encoded_image.SetTimestamp(combined_image.Timestamp());
encoded_image._frameType = frame_headers[i].frame_type;
encoded_image._size =
static_cast<size_t>(frame_headers[i].bitstream_length);

View File

@ -258,7 +258,8 @@ EncodedImageCallback::Result MultiplexEncoderAdapter::OnEncodedImage(
encodedImage._length);
rtc::CritScope cs(&crit_);
const auto& stashed_image_itr = stashed_images_.find(encodedImage._timeStamp);
const auto& stashed_image_itr =
stashed_images_.find(encodedImage.Timestamp());
const auto& stashed_image_next_itr = std::next(stashed_image_itr, 1);
RTC_DCHECK(stashed_image_itr != stashed_images_.end());
MultiplexImage& stashed_image = stashed_image_itr->second;

View File

@ -352,7 +352,7 @@ void VideoProcessor::FrameEncoded(
GetLayerIndices(codec_specific, &spatial_idx, &temporal_idx);
FrameStatistics* frame_stat =
stats_->GetFrameWithTimestamp(encoded_image._timeStamp, spatial_idx);
stats_->GetFrameWithTimestamp(encoded_image.Timestamp(), spatial_idx);
const size_t frame_number = frame_stat->frame_number;
// Ensure that the encode order is monotonically increasing, within this
@ -428,7 +428,7 @@ void VideoProcessor::FrameEncoded(
if (!layer_dropped) {
base_image = &merged_encoded_frames_[i];
base_stat =
stats_->GetFrameWithTimestamp(encoded_image._timeStamp, i);
stats_->GetFrameWithTimestamp(encoded_image.Timestamp(), i);
} else if (base_image && !base_stat->non_ref_for_inter_layer_pred) {
DecodeFrame(*base_image, i);
}
@ -526,7 +526,7 @@ void VideoProcessor::DecodeFrame(const EncodedImage& encoded_image,
size_t spatial_idx) {
RTC_DCHECK_CALLED_SEQUENTIALLY(&sequence_checker_);
FrameStatistics* frame_stat =
stats_->GetFrameWithTimestamp(encoded_image._timeStamp, spatial_idx);
stats_->GetFrameWithTimestamp(encoded_image.Timestamp(), spatial_idx);
frame_stat->decode_start_ns = rtc::TimeNanos();
frame_stat->decode_return_code =
@ -551,7 +551,7 @@ const webrtc::EncodedImage* VideoProcessor::BuildAndStoreSuperframe(
for (int base_idx = static_cast<int>(spatial_idx) - 1; base_idx >= 0;
--base_idx) {
EncodedImage lower_layer = merged_encoded_frames_.at(base_idx);
if (lower_layer._timeStamp == encoded_image._timeStamp) {
if (lower_layer.Timestamp() == encoded_image.Timestamp()) {
base_image = lower_layer;
break;
}

View File

@ -254,7 +254,7 @@ int LibvpxVp8Decoder::Decode(const EncodedImage& input_image,
vpx_codec_err_t vpx_ret =
vpx_codec_control(decoder_, VPXD_GET_LAST_QUANTIZER, &qp);
RTC_DCHECK_EQ(vpx_ret, VPX_CODEC_OK);
ret = ReturnFrame(img, input_image._timeStamp, input_image.ntp_time_ms_, qp);
ret = ReturnFrame(img, input_image.Timestamp(), input_image.ntp_time_ms_, qp);
if (ret != 0) {
// Reset to avoid requesting key frames too often.
if (ret < 0 && propagation_cnt_ > 0)

View File

@ -881,7 +881,7 @@ int LibvpxVp8Encoder::GetEncodedPartitions(
break;
}
}
encoded_images_[encoder_idx]._timeStamp = input_image.timestamp();
encoded_images_[encoder_idx].SetTimestamp(input_image.timestamp());
encoded_images_[encoder_idx].capture_time_ms_ =
input_image.render_time_ms();
encoded_images_[encoder_idx].rotation_ = input_image.rotation();

View File

@ -136,7 +136,7 @@ TEST_F(TestVp8Impl, OnEncodedImageReportsInfo) {
CodecSpecificInfo codec_specific_info;
EncodeAndWaitForFrame(*input_frame, &encoded_frame, &codec_specific_info);
EXPECT_EQ(kInitialTimestampRtp, encoded_frame._timeStamp);
EXPECT_EQ(kInitialTimestampRtp, encoded_frame.Timestamp());
EXPECT_EQ(kInitialTimestampMs, encoded_frame.capture_time_ms_);
EXPECT_EQ(kWidth, static_cast<int>(encoded_frame._encodedWidth));
EXPECT_EQ(kHeight, static_cast<int>(encoded_frame._encodedHeight));

View File

@ -1009,7 +1009,7 @@ int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
}
TRACE_COUNTER1("webrtc", "EncodedFrameSize", encoded_image_._length);
encoded_image_._timeStamp = input_image_->timestamp();
encoded_image_.SetTimestamp(input_image_->timestamp());
encoded_image_.capture_time_ms_ = input_image_->render_time_ms();
encoded_image_.rotation_ = input_image_->rotation();
encoded_image_.content_type_ = (codec_.mode == VideoCodecMode::kScreensharing)
@ -1046,9 +1046,9 @@ void VP9EncoderImpl::DeliverBufferedFrame(bool end_of_picture) {
if (end_of_picture) {
const uint32_t timestamp_ms =
1000 * encoded_image_._timeStamp / kVideoPayloadTypeFrequency;
1000 * encoded_image_.Timestamp() / kVideoPayloadTypeFrequency;
output_framerate_.Update(1, timestamp_ms);
last_encoded_frame_rtp_timestamp_ = encoded_image_._timeStamp;
last_encoded_frame_rtp_timestamp_ = encoded_image_.Timestamp();
}
}
}
@ -1190,7 +1190,7 @@ int VP9DecoderImpl::Decode(const EncodedImage& input_image,
vpx_codec_control(decoder_, VPXD_GET_LAST_QUANTIZER, &qp);
RTC_DCHECK_EQ(vpx_ret, VPX_CODEC_OK);
int ret =
ReturnFrame(img, input_image._timeStamp, input_image.ntp_time_ms_, qp);
ReturnFrame(img, input_image.Timestamp(), input_image.ntp_time_ms_, qp);
if (ret != 0) {
return ret;
}