Remove framemarking RTP extension.
BUG=webrtc:11637 Change-Id: I47f8e22473429c9762956444e27cfbafb201b208 Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/176442 Commit-Queue: Philip Eliasson <philipel@webrtc.org> Reviewed-by: Tommi <tommi@webrtc.org> Reviewed-by: Danil Chapovalov <danilchap@webrtc.org> Cr-Commit-Position: refs/heads/master@{#31522}
This commit is contained in:
@ -41,7 +41,6 @@ constexpr ExtensionInfo kExtensions[] = {
|
||||
CreateExtensionInfo<PlayoutDelayLimits>(),
|
||||
CreateExtensionInfo<VideoContentTypeExtension>(),
|
||||
CreateExtensionInfo<VideoTimingExtension>(),
|
||||
CreateExtensionInfo<FrameMarkingExtension>(),
|
||||
CreateExtensionInfo<RtpStreamId>(),
|
||||
CreateExtensionInfo<RepairedRtpStreamId>(),
|
||||
CreateExtensionInfo<RtpMid>(),
|
||||
|
||||
@ -525,86 +525,6 @@ bool VideoTimingExtension::Write(rtc::ArrayView<uint8_t> data,
|
||||
return true;
|
||||
}
|
||||
|
||||
// Frame Marking.
|
||||
//
|
||||
// Meta-information about an RTP stream outside the encrypted media payload,
|
||||
// useful for an RTP switch to do codec-agnostic selective forwarding
|
||||
// without decrypting the payload.
|
||||
//
|
||||
// For non-scalable streams:
|
||||
// 0 1
|
||||
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
|
||||
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
// | ID | L = 0 |S|E|I|D|0 0 0 0|
|
||||
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
//
|
||||
// For scalable streams:
|
||||
// 0 1 2 3
|
||||
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
// | ID | L = 2 |S|E|I|D|B| TID | LID | TL0PICIDX |
|
||||
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
||||
constexpr RTPExtensionType FrameMarkingExtension::kId;
|
||||
constexpr const char FrameMarkingExtension::kUri[];
|
||||
|
||||
bool FrameMarkingExtension::IsScalable(uint8_t temporal_id, uint8_t layer_id) {
|
||||
return temporal_id != kNoTemporalIdx || layer_id != kNoSpatialIdx;
|
||||
}
|
||||
|
||||
bool FrameMarkingExtension::Parse(rtc::ArrayView<const uint8_t> data,
|
||||
FrameMarking* frame_marking) {
|
||||
RTC_DCHECK(frame_marking);
|
||||
|
||||
if (data.size() != 1 && data.size() != 3)
|
||||
return false;
|
||||
|
||||
frame_marking->start_of_frame = (data[0] & 0x80) != 0;
|
||||
frame_marking->end_of_frame = (data[0] & 0x40) != 0;
|
||||
frame_marking->independent_frame = (data[0] & 0x20) != 0;
|
||||
frame_marking->discardable_frame = (data[0] & 0x10) != 0;
|
||||
|
||||
if (data.size() == 3) {
|
||||
frame_marking->base_layer_sync = (data[0] & 0x08) != 0;
|
||||
frame_marking->temporal_id = data[0] & 0x7;
|
||||
frame_marking->layer_id = data[1];
|
||||
frame_marking->tl0_pic_idx = data[2];
|
||||
} else {
|
||||
// non-scalable
|
||||
frame_marking->base_layer_sync = false;
|
||||
frame_marking->temporal_id = kNoTemporalIdx;
|
||||
frame_marking->layer_id = kNoSpatialIdx;
|
||||
frame_marking->tl0_pic_idx = 0;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
size_t FrameMarkingExtension::ValueSize(const FrameMarking& frame_marking) {
|
||||
if (IsScalable(frame_marking.temporal_id, frame_marking.layer_id))
|
||||
return 3;
|
||||
else
|
||||
return 1;
|
||||
}
|
||||
|
||||
bool FrameMarkingExtension::Write(rtc::ArrayView<uint8_t> data,
|
||||
const FrameMarking& frame_marking) {
|
||||
RTC_DCHECK_GE(data.size(), 1);
|
||||
RTC_CHECK_LE(frame_marking.temporal_id, 0x07);
|
||||
data[0] = frame_marking.start_of_frame ? 0x80 : 0x00;
|
||||
data[0] |= frame_marking.end_of_frame ? 0x40 : 0x00;
|
||||
data[0] |= frame_marking.independent_frame ? 0x20 : 0x00;
|
||||
data[0] |= frame_marking.discardable_frame ? 0x10 : 0x00;
|
||||
|
||||
if (IsScalable(frame_marking.temporal_id, frame_marking.layer_id)) {
|
||||
RTC_DCHECK_EQ(data.size(), 3);
|
||||
data[0] |= frame_marking.base_layer_sync ? 0x08 : 0x00;
|
||||
data[0] |= frame_marking.temporal_id & 0x07;
|
||||
data[1] = frame_marking.layer_id;
|
||||
data[2] = frame_marking.tl0_pic_idx;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// Color space including HDR metadata as an optional field.
|
||||
//
|
||||
// RTP header extension to carry color space information and optionally HDR
|
||||
|
||||
@ -19,7 +19,6 @@
|
||||
#include "api/rtp_headers.h"
|
||||
#include "api/video/color_space.h"
|
||||
#include "api/video/video_content_type.h"
|
||||
#include "api/video/video_frame_marking.h"
|
||||
#include "api/video/video_rotation.h"
|
||||
#include "api/video/video_timing.h"
|
||||
#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
|
||||
@ -217,23 +216,6 @@ class VideoTimingExtension {
|
||||
uint8_t offset);
|
||||
};
|
||||
|
||||
class FrameMarkingExtension {
|
||||
public:
|
||||
using value_type = FrameMarking;
|
||||
static constexpr RTPExtensionType kId = kRtpExtensionFrameMarking;
|
||||
static constexpr const char kUri[] =
|
||||
"http://tools.ietf.org/html/draft-ietf-avtext-framemarking-07";
|
||||
|
||||
static bool Parse(rtc::ArrayView<const uint8_t> data,
|
||||
FrameMarking* frame_marking);
|
||||
static size_t ValueSize(const FrameMarking& frame_marking);
|
||||
static bool Write(rtc::ArrayView<uint8_t> data,
|
||||
const FrameMarking& frame_marking);
|
||||
|
||||
private:
|
||||
static bool IsScalable(uint8_t temporal_id, uint8_t layer_id);
|
||||
};
|
||||
|
||||
class ColorSpaceExtension {
|
||||
public:
|
||||
using value_type = ColorSpace;
|
||||
|
||||
@ -188,7 +188,6 @@ void RtpPacket::ZeroMutableExtensions() {
|
||||
case RTPExtensionType::kRtpExtensionAudioLevel:
|
||||
case RTPExtensionType::kRtpExtensionAbsoluteCaptureTime:
|
||||
case RTPExtensionType::kRtpExtensionColorSpace:
|
||||
case RTPExtensionType::kRtpExtensionFrameMarking:
|
||||
case RTPExtensionType::kRtpExtensionGenericFrameDescriptor00:
|
||||
case RTPExtensionType::kRtpExtensionGenericFrameDescriptor02:
|
||||
case RTPExtensionType::kRtpExtensionMid:
|
||||
|
||||
@ -69,8 +69,6 @@ void RtpPacketReceived::GetHeader(RTPHeader* header) const {
|
||||
&header->extension.videoContentType);
|
||||
header->extension.has_video_timing =
|
||||
GetExtension<VideoTimingExtension>(&header->extension.video_timing);
|
||||
header->extension.has_frame_marking =
|
||||
GetExtension<FrameMarkingExtension>(&header->extension.frame_marking);
|
||||
GetExtension<RtpStreamId>(&header->extension.stream_id);
|
||||
GetExtension<RepairedRtpStreamId>(&header->extension.repaired_stream_id);
|
||||
GetExtension<RtpMid>(&header->extension.mid);
|
||||
|
||||
@ -108,7 +108,6 @@ bool IsNonVolatile(RTPExtensionType type) {
|
||||
case kRtpExtensionAbsoluteSendTime:
|
||||
case kRtpExtensionTransportSequenceNumber:
|
||||
case kRtpExtensionTransportSequenceNumber02:
|
||||
case kRtpExtensionFrameMarking:
|
||||
case kRtpExtensionRtpStreamId:
|
||||
case kRtpExtensionMid:
|
||||
case kRtpExtensionGenericFrameDescriptor00:
|
||||
|
||||
@ -312,14 +312,6 @@ void RTPSenderVideo::AddRtpHeaderExtensions(
|
||||
packet->SetExtension<AbsoluteCaptureTimeExtension>(*absolute_capture_time);
|
||||
}
|
||||
|
||||
if (video_header.codec == kVideoCodecH264 &&
|
||||
video_header.frame_marking.temporal_id != kNoTemporalIdx) {
|
||||
FrameMarking frame_marking = video_header.frame_marking;
|
||||
frame_marking.start_of_frame = first_packet;
|
||||
frame_marking.end_of_frame = last_packet;
|
||||
packet->SetExtension<FrameMarkingExtension>(frame_marking);
|
||||
}
|
||||
|
||||
if (video_header.generic) {
|
||||
bool extension_is_set = false;
|
||||
if (video_structure_ != nullptr) {
|
||||
@ -736,12 +728,7 @@ uint8_t RTPSenderVideo::GetTemporalId(const RTPVideoHeader& header) {
|
||||
}
|
||||
uint8_t operator()(const absl::monostate&) { return kNoTemporalIdx; }
|
||||
};
|
||||
switch (header.codec) {
|
||||
case kVideoCodecH264:
|
||||
return header.frame_marking.temporal_id;
|
||||
default:
|
||||
return absl::visit(TemporalIdGetter(), header.video_type_header);
|
||||
}
|
||||
return absl::visit(TemporalIdGetter(), header.video_type_header);
|
||||
}
|
||||
|
||||
bool RTPSenderVideo::UpdateConditionalRetransmit(
|
||||
|
||||
@ -59,7 +59,6 @@ using ::testing::WithArgs;
|
||||
|
||||
enum : int { // The first valid value is 1.
|
||||
kAbsoluteSendTimeExtensionId = 1,
|
||||
kFrameMarkingExtensionId,
|
||||
kGenericDescriptorId,
|
||||
kDependencyDescriptorId,
|
||||
kTransmissionTimeOffsetExtensionId,
|
||||
@ -95,8 +94,6 @@ class LoopbackTransportTest : public webrtc::Transport {
|
||||
kGenericDescriptorId);
|
||||
receivers_extensions_.Register<RtpDependencyDescriptorExtension>(
|
||||
kDependencyDescriptorId);
|
||||
receivers_extensions_.Register<FrameMarkingExtension>(
|
||||
kFrameMarkingExtensionId);
|
||||
receivers_extensions_.Register<AbsoluteCaptureTimeExtension>(
|
||||
kAbsoluteCaptureTimeExtensionId);
|
||||
receivers_extensions_.Register<PlayoutDelayLimits>(
|
||||
@ -291,43 +288,6 @@ TEST_P(RtpSenderVideoTest, DeltaFrameHasCVOWhenNonZero) {
|
||||
EXPECT_EQ(kVideoRotation_90, rotation);
|
||||
}
|
||||
|
||||
TEST_P(RtpSenderVideoTest, CheckH264FrameMarking) {
|
||||
uint8_t kFrame[kMaxPacketLength];
|
||||
rtp_module_->RegisterRtpHeaderExtension(FrameMarkingExtension::kUri,
|
||||
kFrameMarkingExtensionId);
|
||||
|
||||
RTPFragmentationHeader frag;
|
||||
frag.VerifyAndAllocateFragmentationHeader(1);
|
||||
frag.fragmentationOffset[0] = 0;
|
||||
frag.fragmentationLength[0] = sizeof(kFrame);
|
||||
|
||||
RTPVideoHeader hdr;
|
||||
hdr.video_type_header.emplace<RTPVideoHeaderH264>().packetization_mode =
|
||||
H264PacketizationMode::NonInterleaved;
|
||||
hdr.codec = kVideoCodecH264;
|
||||
hdr.frame_marking.temporal_id = kNoTemporalIdx;
|
||||
hdr.frame_marking.tl0_pic_idx = 99;
|
||||
hdr.frame_marking.base_layer_sync = true;
|
||||
hdr.frame_type = VideoFrameType::kVideoFrameDelta;
|
||||
rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp, 0, kFrame, &frag,
|
||||
hdr, kDefaultExpectedRetransmissionTimeMs);
|
||||
|
||||
FrameMarking fm;
|
||||
EXPECT_FALSE(
|
||||
transport_.last_sent_packet().GetExtension<FrameMarkingExtension>(&fm));
|
||||
|
||||
hdr.frame_marking.temporal_id = 0;
|
||||
hdr.frame_type = VideoFrameType::kVideoFrameDelta;
|
||||
rtp_sender_video_.SendVideo(kPayload, kType, kTimestamp + 1, 0, kFrame, &frag,
|
||||
hdr, kDefaultExpectedRetransmissionTimeMs);
|
||||
|
||||
EXPECT_TRUE(
|
||||
transport_.last_sent_packet().GetExtension<FrameMarkingExtension>(&fm));
|
||||
EXPECT_EQ(hdr.frame_marking.temporal_id, fm.temporal_id);
|
||||
EXPECT_EQ(hdr.frame_marking.tl0_pic_idx, fm.tl0_pic_idx);
|
||||
EXPECT_EQ(hdr.frame_marking.base_layer_sync, fm.base_layer_sync);
|
||||
}
|
||||
|
||||
// Make sure rotation is parsed correctly when the Camera (C) and Flip (F) bits
|
||||
// are set in the CVO byte.
|
||||
TEST_P(RtpSenderVideoTest, SendVideoWithCameraAndFlipCVO) {
|
||||
@ -369,7 +329,6 @@ TEST_P(RtpSenderVideoTest, RetransmissionTypesH264) {
|
||||
header.video_type_header.emplace<RTPVideoHeaderH264>().packetization_mode =
|
||||
H264PacketizationMode::NonInterleaved;
|
||||
header.codec = kVideoCodecH264;
|
||||
header.frame_marking.temporal_id = kNoTemporalIdx;
|
||||
|
||||
EXPECT_FALSE(rtp_sender_video_.AllowRetransmission(
|
||||
header, kRetransmitOff, kDefaultExpectedRetransmissionTimeMs));
|
||||
@ -380,14 +339,6 @@ TEST_P(RtpSenderVideoTest, RetransmissionTypesH264) {
|
||||
EXPECT_TRUE(rtp_sender_video_.AllowRetransmission(
|
||||
header, kConditionallyRetransmitHigherLayers,
|
||||
kDefaultExpectedRetransmissionTimeMs));
|
||||
|
||||
// Test higher level retransmit.
|
||||
for (int tid = 0; tid <= kMaxTemporalStreams; ++tid) {
|
||||
header.frame_marking.temporal_id = tid;
|
||||
EXPECT_TRUE(rtp_sender_video_.AllowRetransmission(
|
||||
header, kRetransmitHigherLayers | kRetransmitBaseLayer,
|
||||
kDefaultExpectedRetransmissionTimeMs));
|
||||
}
|
||||
}
|
||||
|
||||
TEST_P(RtpSenderVideoTest, RetransmissionTypesVP8BaseLayer) {
|
||||
|
||||
@ -17,7 +17,6 @@
|
||||
|
||||
#include "api/array_view.h"
|
||||
#include "api/video/video_content_type.h"
|
||||
#include "api/video/video_frame_marking.h"
|
||||
#include "api/video/video_rotation.h"
|
||||
#include "api/video/video_timing.h"
|
||||
#include "modules/rtp_rtcp/include/rtp_cvo.h"
|
||||
@ -245,10 +244,6 @@ bool RtpHeaderParser::Parse(RTPHeader* header,
|
||||
header->extension.has_video_timing = false;
|
||||
header->extension.video_timing = {0u, 0u, 0u, 0u, 0u, 0u, false};
|
||||
|
||||
header->extension.has_frame_marking = false;
|
||||
header->extension.frame_marking = {false, false, false, false,
|
||||
false, kNoTemporalIdx, 0, 0};
|
||||
|
||||
if (X) {
|
||||
/* RTP header extension, RFC 3550.
|
||||
0 1 2 3
|
||||
@ -497,15 +492,6 @@ void RtpHeaderParser::ParseOneByteExtensionHeader(
|
||||
&header->extension.video_timing);
|
||||
break;
|
||||
}
|
||||
case kRtpExtensionFrameMarking: {
|
||||
if (!FrameMarkingExtension::Parse(rtc::MakeArrayView(ptr, len + 1),
|
||||
&header->extension.frame_marking)) {
|
||||
RTC_LOG(LS_WARNING) << "Incorrect frame marking len: " << len;
|
||||
return;
|
||||
}
|
||||
header->extension.has_frame_marking = true;
|
||||
break;
|
||||
}
|
||||
case kRtpExtensionRtpStreamId: {
|
||||
std::string name(reinterpret_cast<const char*>(ptr), len + 1);
|
||||
if (IsLegalRsidName(name)) {
|
||||
|
||||
@ -19,7 +19,6 @@
|
||||
#include "api/video/color_space.h"
|
||||
#include "api/video/video_codec_type.h"
|
||||
#include "api/video/video_content_type.h"
|
||||
#include "api/video/video_frame_marking.h"
|
||||
#include "api/video/video_frame_type.h"
|
||||
#include "api/video/video_rotation.h"
|
||||
#include "api/video/video_timing.h"
|
||||
@ -75,7 +74,6 @@ struct RTPVideoHeader {
|
||||
|
||||
PlayoutDelay playout_delay = {-1, -1};
|
||||
VideoSendTiming video_timing;
|
||||
FrameMarking frame_marking = {false, false, false, false, false, 0xFF, 0, 0};
|
||||
absl::optional<ColorSpace> color_space;
|
||||
RTPVideoTypeHeader video_type_header;
|
||||
};
|
||||
|
||||
Reference in New Issue
Block a user