Revert of Add content type information to encoded images and corresponding rtp extension header (patchset #31 id:600001 of https://codereview.webrtc.org/2772033002/ )
Reason for revert:
Breaks dependent projects.
Original issue's description:
> Add content type information to Encoded Images and add corresponding RTP extension header.
> Use it to separate UMA e2e delay metric between screenshare from video.
> Content type extension is set based on encoder settings and processed and decoders.
>
> Also,
> Fix full-stack-tests to calculate RTT correctly, so new metric could be tested.
>
> BUG=webrtc:7420
>
> Review-Url: https://codereview.webrtc.org/2772033002
> Cr-Commit-Position: refs/heads/master@{#17640}
> Committed: 64e739aeae
TBR=tommi@webrtc.org,sprang@webrtc.org,stefan@webrtc.org,nisse@webrtc.org,mflodman@webrtc.org
# Skipping CQ checks because original CL landed less than 1 days ago.
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=webrtc:7420
Review-Url: https://codereview.webrtc.org/2816463002
Cr-Commit-Position: refs/heads/master@{#17644}
This commit is contained in:
@ -367,9 +367,6 @@ int32_t H264EncoderImpl::Encode(const VideoFrame& input_frame,
|
||||
encoded_image_.ntp_time_ms_ = input_frame.ntp_time_ms();
|
||||
encoded_image_.capture_time_ms_ = input_frame.render_time_ms();
|
||||
encoded_image_.rotation_ = input_frame.rotation();
|
||||
encoded_image_.content_type_ = (mode_ == kScreensharing)
|
||||
? VideoContentType::SCREENSHARE
|
||||
: VideoContentType::UNSPECIFIED;
|
||||
encoded_image_._frameType = ConvertToVideoFrameType(info.eFrameType);
|
||||
|
||||
// Split encoded image up into fragments. This also updates |encoded_image_|.
|
||||
|
||||
@ -878,9 +878,6 @@ int VP8EncoderImpl::GetEncodedPartitions(const VideoFrame& input_image) {
|
||||
encoded_images_[encoder_idx].capture_time_ms_ =
|
||||
input_image.render_time_ms();
|
||||
encoded_images_[encoder_idx].rotation_ = input_image.rotation();
|
||||
encoded_images_[encoder_idx].content_type_ =
|
||||
(codec_.mode == kScreensharing) ? VideoContentType::SCREENSHARE
|
||||
: VideoContentType::UNSPECIFIED;
|
||||
|
||||
int qp = -1;
|
||||
vpx_codec_control(&encoders_[encoder_idx], VP8E_GET_LAST_QUANTIZER_64, &qp);
|
||||
|
||||
@ -706,9 +706,6 @@ int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
|
||||
encoded_image_._timeStamp = input_image_->timestamp();
|
||||
encoded_image_.capture_time_ms_ = input_image_->render_time_ms();
|
||||
encoded_image_.rotation_ = input_image_->rotation();
|
||||
encoded_image_.content_type_ = (codec_.mode == kScreensharing)
|
||||
? VideoContentType::SCREENSHARE
|
||||
: VideoContentType::UNSPECIFIED;
|
||||
encoded_image_._encodedHeight = raw_->d_h;
|
||||
encoded_image_._encodedWidth = raw_->d_w;
|
||||
int qp = -1;
|
||||
|
||||
@ -87,7 +87,6 @@ void VCMEncodedFrame::Reset() {
|
||||
_codecSpecificInfo.codecType = kVideoCodecUnknown;
|
||||
_codec = kVideoCodecUnknown;
|
||||
rotation_ = kVideoRotation_0;
|
||||
content_type_ = VideoContentType::UNSPECIFIED;
|
||||
_rotation_set = false;
|
||||
}
|
||||
|
||||
|
||||
@ -77,12 +77,8 @@ class VCMEncodedFrame : protected EncodedImage {
|
||||
*/
|
||||
VideoRotation rotation() const { return rotation_; }
|
||||
/**
|
||||
* Get video content type
|
||||
*/
|
||||
VideoContentType contentType() const { return content_type_; }
|
||||
/**
|
||||
* True if this frame is complete, false otherwise
|
||||
*/
|
||||
* True if this frame is complete, false otherwise
|
||||
*/
|
||||
bool Complete() const { return _completeFrame; }
|
||||
/**
|
||||
* True if there's a frame missing before this frame
|
||||
|
||||
@ -163,7 +163,6 @@ VCMFrameBufferEnum VCMFrameBuffer::InsertPacket(
|
||||
RTC_DCHECK(!_rotation_set);
|
||||
rotation_ = packet.video_header.rotation;
|
||||
_rotation_set = true;
|
||||
content_type_ = packet.video_header.content_type;
|
||||
}
|
||||
|
||||
if (packet.is_first_packet_in_frame) {
|
||||
|
||||
@ -79,7 +79,6 @@ RtpFrameObject::RtpFrameObject(PacketBuffer* packet_buffer,
|
||||
// (HEVC)).
|
||||
rotation_ = last_packet->video_header.rotation;
|
||||
_rotation_set = true;
|
||||
content_type_ = last_packet->video_header.content_type;
|
||||
}
|
||||
|
||||
RtpFrameObject::~RtpFrameObject() {
|
||||
|
||||
@ -87,7 +87,7 @@ void VCMDecodedFrameCallback::Decoded(VideoFrame& decodedImage,
|
||||
decodedImage.set_timestamp_us(
|
||||
frameInfo->renderTimeMs * rtc::kNumMicrosecsPerMillisec);
|
||||
decodedImage.set_rotation(frameInfo->rotation);
|
||||
_receiveCallback->FrameToRender(decodedImage, qp, frameInfo->content_type);
|
||||
_receiveCallback->FrameToRender(decodedImage, qp);
|
||||
}
|
||||
|
||||
int32_t VCMDecodedFrameCallback::ReceivedDecodedReferenceFrame(
|
||||
@ -131,8 +131,7 @@ VCMGenericDecoder::VCMGenericDecoder(VideoDecoder* decoder, bool isExternal)
|
||||
_decoder(decoder),
|
||||
_codecType(kVideoCodecUnknown),
|
||||
_isExternal(isExternal),
|
||||
_keyFrameDecoded(false),
|
||||
_last_keyframe_content_type(VideoContentType::UNSPECIFIED) {}
|
||||
_keyFrameDecoded(false) {}
|
||||
|
||||
VCMGenericDecoder::~VCMGenericDecoder() {}
|
||||
|
||||
@ -150,15 +149,6 @@ int32_t VCMGenericDecoder::Decode(const VCMEncodedFrame& frame, int64_t nowMs) {
|
||||
_frameInfos[_nextFrameInfoIdx].decodeStartTimeMs = nowMs;
|
||||
_frameInfos[_nextFrameInfoIdx].renderTimeMs = frame.RenderTimeMs();
|
||||
_frameInfos[_nextFrameInfoIdx].rotation = frame.rotation();
|
||||
// Set correctly only for key frames. Thus, use latest key frame
|
||||
// content type. If the corresponding key frame was lost, decode will fail
|
||||
// and content type will be ignored.
|
||||
if (frame.FrameType() == kVideoFrameKey) {
|
||||
_frameInfos[_nextFrameInfoIdx].content_type = frame.contentType();
|
||||
_last_keyframe_content_type = frame.contentType();
|
||||
} else {
|
||||
_frameInfos[_nextFrameInfoIdx].content_type = _last_keyframe_content_type;
|
||||
}
|
||||
_callback->Map(frame.TimeStamp(), &_frameInfos[_nextFrameInfoIdx]);
|
||||
|
||||
_nextFrameInfoIdx = (_nextFrameInfoIdx + 1) % kDecoderFrameMemoryLength;
|
||||
|
||||
@ -30,7 +30,6 @@ struct VCMFrameInformation {
|
||||
int64_t decodeStartTimeMs;
|
||||
void* userData;
|
||||
VideoRotation rotation;
|
||||
VideoContentType content_type;
|
||||
};
|
||||
|
||||
class VCMDecodedFrameCallback : public DecodedImageCallback {
|
||||
@ -110,7 +109,6 @@ class VCMGenericDecoder {
|
||||
VideoCodecType _codecType;
|
||||
bool _isExternal;
|
||||
bool _keyFrameDecoded;
|
||||
VideoContentType _last_keyframe_content_type;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
@ -33,8 +33,7 @@ class MockVCMReceiveCallback : public VCMReceiveCallback {
|
||||
MockVCMReceiveCallback() {}
|
||||
virtual ~MockVCMReceiveCallback() {}
|
||||
|
||||
MOCK_METHOD3(FrameToRender,
|
||||
int32_t(VideoFrame&, rtc::Optional<uint8_t>, VideoContentType));
|
||||
MOCK_METHOD2(FrameToRender, int32_t(VideoFrame&, rtc::Optional<uint8_t>));
|
||||
MOCK_METHOD1(ReceivedDecodedReferenceFrame, int32_t(const uint64_t));
|
||||
MOCK_METHOD1(OnIncomingPayloadType, void(int));
|
||||
MOCK_METHOD1(OnDecoderImplementationName, void(const char*));
|
||||
|
||||
@ -62,8 +62,7 @@ struct VCMFrameCount {
|
||||
class VCMReceiveCallback {
|
||||
public:
|
||||
virtual int32_t FrameToRender(VideoFrame& videoFrame, // NOLINT
|
||||
rtc::Optional<uint8_t> qp,
|
||||
VideoContentType content_type) = 0;
|
||||
rtc::Optional<uint8_t> qp) = 0;
|
||||
virtual int32_t ReceivedDecodedReferenceFrame(const uint64_t pictureId) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user