Remove RTPVideoHeader::vp8() accessors.
Bug: none Change-Id: Ia7d65148fb36a8f26647bee8a876ce7217ff8a68 Reviewed-on: https://webrtc-review.googlesource.com/93321 Reviewed-by: Niels Moller <nisse@webrtc.org> Reviewed-by: Stefan Holmer <stefan@webrtc.org> Reviewed-by: Danil Chapovalov <danilchap@webrtc.org> Commit-Queue: Philip Eliasson <philipel@webrtc.org> Cr-Commit-Position: refs/heads/master@{#24626}
This commit is contained in:
@ -318,16 +318,16 @@ bool RtpDepacketizerVp8::Parse(ParsedPayload* parsed_payload,
|
||||
beginning_of_partition && (partition_id == 0);
|
||||
parsed_payload->video_header().simulcastIdx = 0;
|
||||
parsed_payload->video_header().codec = kVideoCodecVP8;
|
||||
parsed_payload->video_header().vp8().nonReference =
|
||||
(*payload_data & 0x20) ? true : false; // N bit
|
||||
parsed_payload->video_header().vp8().partitionId = partition_id;
|
||||
parsed_payload->video_header().vp8().beginningOfPartition =
|
||||
beginning_of_partition;
|
||||
parsed_payload->video_header().vp8().pictureId = kNoPictureId;
|
||||
parsed_payload->video_header().vp8().tl0PicIdx = kNoTl0PicIdx;
|
||||
parsed_payload->video_header().vp8().temporalIdx = kNoTemporalIdx;
|
||||
parsed_payload->video_header().vp8().layerSync = false;
|
||||
parsed_payload->video_header().vp8().keyIdx = kNoKeyIdx;
|
||||
auto& vp8_header = parsed_payload->video_header()
|
||||
.video_type_header.emplace<RTPVideoHeaderVP8>();
|
||||
vp8_header.nonReference = (*payload_data & 0x20) ? true : false; // N bit
|
||||
vp8_header.partitionId = partition_id;
|
||||
vp8_header.beginningOfPartition = beginning_of_partition;
|
||||
vp8_header.pictureId = kNoPictureId;
|
||||
vp8_header.tl0PicIdx = kNoTl0PicIdx;
|
||||
vp8_header.temporalIdx = kNoTemporalIdx;
|
||||
vp8_header.layerSync = false;
|
||||
vp8_header.keyIdx = kNoKeyIdx;
|
||||
|
||||
if (partition_id > 8) {
|
||||
// Weak check for corrupt payload_data: PartID MUST NOT be larger than 8.
|
||||
@ -344,8 +344,7 @@ bool RtpDepacketizerVp8::Parse(ParsedPayload* parsed_payload,
|
||||
|
||||
if (extension) {
|
||||
const int parsed_bytes =
|
||||
ParseVP8Extension(&parsed_payload->video_header().vp8(), payload_data,
|
||||
payload_data_length);
|
||||
ParseVP8Extension(&vp8_header, payload_data, payload_data_length);
|
||||
if (parsed_bytes < 0)
|
||||
return false;
|
||||
payload_data += parsed_bytes;
|
||||
|
||||
@ -57,9 +57,11 @@ constexpr RtpPacketizer::PayloadSizeLimits kNoSizeLimits;
|
||||
// +-+-+-+-+-+-+-+-+
|
||||
void VerifyBasicHeader(RTPVideoHeader* header, bool N, bool S, int part_id) {
|
||||
ASSERT_TRUE(header != NULL);
|
||||
EXPECT_EQ(N, header->vp8().nonReference);
|
||||
EXPECT_EQ(S, header->vp8().beginningOfPartition);
|
||||
EXPECT_EQ(part_id, header->vp8().partitionId);
|
||||
const auto& vp8_header =
|
||||
absl::get<RTPVideoHeaderVP8>(header->video_type_header);
|
||||
EXPECT_EQ(N, vp8_header.nonReference);
|
||||
EXPECT_EQ(S, vp8_header.beginningOfPartition);
|
||||
EXPECT_EQ(part_id, vp8_header.partitionId);
|
||||
}
|
||||
|
||||
void VerifyExtensions(RTPVideoHeader* header,
|
||||
@ -68,10 +70,12 @@ void VerifyExtensions(RTPVideoHeader* header,
|
||||
uint8_t temporal_idx, /* T */
|
||||
int key_idx /* K */) {
|
||||
ASSERT_TRUE(header != NULL);
|
||||
EXPECT_EQ(picture_id, header->vp8().pictureId);
|
||||
EXPECT_EQ(tl0_pic_idx, header->vp8().tl0PicIdx);
|
||||
EXPECT_EQ(temporal_idx, header->vp8().temporalIdx);
|
||||
EXPECT_EQ(key_idx, header->vp8().keyIdx);
|
||||
const auto& vp8_header =
|
||||
absl::get<RTPVideoHeaderVP8>(header->video_type_header);
|
||||
EXPECT_EQ(picture_id, vp8_header.pictureId);
|
||||
EXPECT_EQ(tl0_pic_idx, vp8_header.tl0PicIdx);
|
||||
EXPECT_EQ(temporal_idx, vp8_header.temporalIdx);
|
||||
EXPECT_EQ(key_idx, vp8_header.keyIdx);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
@ -268,7 +272,9 @@ TEST_F(RtpDepacketizerVp8Test, TIDAndLayerSync) {
|
||||
VerifyBasicHeader(&payload.video_header(), 0, 0, 8);
|
||||
VerifyExtensions(&payload.video_header(), kNoPictureId, kNoTl0PicIdx, 2,
|
||||
kNoKeyIdx);
|
||||
EXPECT_FALSE(payload.video_header().vp8().layerSync);
|
||||
EXPECT_FALSE(
|
||||
absl::get<RTPVideoHeaderVP8>(payload.video_header().video_type_header)
|
||||
.layerSync);
|
||||
}
|
||||
|
||||
TEST_F(RtpDepacketizerVp8Test, KeyIdx) {
|
||||
@ -351,7 +357,10 @@ TEST_F(RtpDepacketizerVp8Test, TestWithPacketizer) {
|
||||
VerifyExtensions(&payload.video_header(), input_header.pictureId,
|
||||
input_header.tl0PicIdx, input_header.temporalIdx,
|
||||
input_header.keyIdx);
|
||||
EXPECT_EQ(payload.video_header().vp8().layerSync, input_header.layerSync);
|
||||
EXPECT_EQ(
|
||||
absl::get<RTPVideoHeaderVP8>(payload.video_header().video_type_header)
|
||||
.layerSync,
|
||||
input_header.layerSync);
|
||||
}
|
||||
|
||||
TEST_F(RtpDepacketizerVp8Test, TestEmptyPayload) {
|
||||
|
||||
@ -240,7 +240,7 @@ class RtpRtcpImplTest : public ::testing::Test {
|
||||
rtp_video_header.is_first_packet_in_frame = true;
|
||||
rtp_video_header.simulcastIdx = 0;
|
||||
rtp_video_header.codec = kVideoCodecVP8;
|
||||
rtp_video_header.vp8() = vp8_header;
|
||||
rtp_video_header.video_type_header = vp8_header;
|
||||
rtp_video_header.video_timing = {0u, 0u, 0u, 0u, 0u, 0u, false};
|
||||
|
||||
const uint8_t payload[100] = {0};
|
||||
|
||||
@ -1842,7 +1842,8 @@ TEST_P(RtpSenderVideoTest, RetransmissionTypesH264) {
|
||||
TEST_P(RtpSenderVideoTest, RetransmissionTypesVP8BaseLayer) {
|
||||
RTPVideoHeader header;
|
||||
header.codec = kVideoCodecVP8;
|
||||
header.vp8().temporalIdx = 0;
|
||||
auto& vp8_header = header.video_type_header.emplace<RTPVideoHeaderVP8>();
|
||||
vp8_header.temporalIdx = 0;
|
||||
|
||||
EXPECT_EQ(kDontRetransmit,
|
||||
rtp_sender_video_->GetStorageType(
|
||||
@ -1874,8 +1875,9 @@ TEST_P(RtpSenderVideoTest, RetransmissionTypesVP8HigherLayers) {
|
||||
RTPVideoHeader header;
|
||||
header.codec = kVideoCodecVP8;
|
||||
|
||||
auto& vp8_header = header.video_type_header.emplace<RTPVideoHeaderVP8>();
|
||||
for (int tid = 1; tid <= kMaxTemporalStreams; ++tid) {
|
||||
header.vp8().temporalIdx = tid;
|
||||
vp8_header.temporalIdx = tid;
|
||||
|
||||
EXPECT_EQ(kDontRetransmit, rtp_sender_video_->GetStorageType(
|
||||
header, kRetransmitOff,
|
||||
@ -1938,8 +1940,9 @@ TEST_P(RtpSenderVideoTest, ConditionalRetransmit) {
|
||||
(RTPSenderVideo::kTLRateWindowSizeMs + (kFrameIntervalMs / 2)) /
|
||||
kFrameIntervalMs;
|
||||
constexpr int kPattern[] = {0, 2, 1, 2};
|
||||
auto& vp8_header = header.video_type_header.emplace<RTPVideoHeaderVP8>();
|
||||
for (size_t i = 0; i < arraysize(kPattern) * kNumRepetitions; ++i) {
|
||||
header.vp8().temporalIdx = kPattern[i % arraysize(kPattern)];
|
||||
vp8_header.temporalIdx = kPattern[i % arraysize(kPattern)];
|
||||
rtp_sender_video_->GetStorageType(header, kSettings, kRttMs);
|
||||
fake_clock_.AdvanceTimeMilliseconds(kFrameIntervalMs);
|
||||
}
|
||||
@ -1948,7 +1951,7 @@ TEST_P(RtpSenderVideoTest, ConditionalRetransmit) {
|
||||
// right now. We will wait at most one expected retransmission time before
|
||||
// acknowledging that it did not arrive, which means this frame and the next
|
||||
// will not be retransmitted.
|
||||
header.vp8().temporalIdx = 1;
|
||||
vp8_header.temporalIdx = 1;
|
||||
EXPECT_EQ(StorageType::kDontRetransmit,
|
||||
rtp_sender_video_->GetStorageType(header, kSettings, kRttMs));
|
||||
fake_clock_.AdvanceTimeMilliseconds(kFrameIntervalMs);
|
||||
@ -1964,7 +1967,7 @@ TEST_P(RtpSenderVideoTest, ConditionalRetransmit) {
|
||||
// Insert a frame for TL2. We just had frame in TL1, so the next one there is
|
||||
// in three frames away. TL0 is still too far in the past. So, allow
|
||||
// retransmission.
|
||||
header.vp8().temporalIdx = 2;
|
||||
vp8_header.temporalIdx = 2;
|
||||
EXPECT_EQ(StorageType::kAllowRetransmission,
|
||||
rtp_sender_video_->GetStorageType(header, kSettings, kRttMs));
|
||||
fake_clock_.AdvanceTimeMilliseconds(kFrameIntervalMs);
|
||||
@ -1995,8 +1998,9 @@ TEST_P(RtpSenderVideoTest, ConditionalRetransmitLimit) {
|
||||
(RTPSenderVideo::kTLRateWindowSizeMs + (kFrameIntervalMs / 2)) /
|
||||
kFrameIntervalMs;
|
||||
constexpr int kPattern[] = {0, 2, 2, 2};
|
||||
auto& vp8_header = header.video_type_header.emplace<RTPVideoHeaderVP8>();
|
||||
for (size_t i = 0; i < arraysize(kPattern) * kNumRepetitions; ++i) {
|
||||
header.vp8().temporalIdx = kPattern[i % arraysize(kPattern)];
|
||||
vp8_header.temporalIdx = kPattern[i % arraysize(kPattern)];
|
||||
|
||||
rtp_sender_video_->GetStorageType(header, kSettings, kRttMs);
|
||||
fake_clock_.AdvanceTimeMilliseconds(kFrameIntervalMs);
|
||||
@ -2007,7 +2011,7 @@ TEST_P(RtpSenderVideoTest, ConditionalRetransmitLimit) {
|
||||
// we don't store for retransmission because we expect a frame in a lower
|
||||
// layer, but that last frame in TL1 was a long time ago in absolute terms,
|
||||
// so allow retransmission anyway.
|
||||
header.vp8().temporalIdx = 1;
|
||||
vp8_header.temporalIdx = 1;
|
||||
EXPECT_EQ(StorageType::kAllowRetransmission,
|
||||
rtp_sender_video_->GetStorageType(header, kSettings, kRttMs));
|
||||
}
|
||||
|
||||
@ -466,15 +466,15 @@ StorageType RTPSenderVideo::GetStorageType(
|
||||
}
|
||||
|
||||
uint8_t RTPSenderVideo::GetTemporalId(const RTPVideoHeader& header) {
|
||||
switch (header.codec) {
|
||||
case kVideoCodecVP8:
|
||||
return header.vp8().temporalIdx;
|
||||
case kVideoCodecVP9:
|
||||
return absl::get<RTPVideoHeaderVP9>(header.video_type_header)
|
||||
.temporal_idx;
|
||||
default:
|
||||
return kNoTemporalIdx;
|
||||
}
|
||||
struct TemporalIdGetter {
|
||||
uint8_t operator()(const RTPVideoHeaderVP8& vp8) { return vp8.temporalIdx; }
|
||||
uint8_t operator()(const RTPVideoHeaderVP9& vp9) {
|
||||
return vp9.temporal_idx;
|
||||
}
|
||||
uint8_t operator()(const RTPVideoHeaderH264&) { return kNoTemporalIdx; }
|
||||
uint8_t operator()(const absl::monostate&) { return kNoTemporalIdx; }
|
||||
};
|
||||
return absl::visit(TemporalIdGetter(), header.video_type_header);
|
||||
}
|
||||
|
||||
bool RTPSenderVideo::UpdateConditionalRetransmit(
|
||||
|
||||
@ -21,8 +21,10 @@
|
||||
#include "modules/video_coding/codecs/vp9/include/vp9_globals.h"
|
||||
|
||||
namespace webrtc {
|
||||
using RTPVideoTypeHeader =
|
||||
absl::variant<RTPVideoHeaderVP8, RTPVideoHeaderVP9, RTPVideoHeaderH264>;
|
||||
using RTPVideoTypeHeader = absl::variant<absl::monostate,
|
||||
RTPVideoHeaderVP8,
|
||||
RTPVideoHeaderVP9,
|
||||
RTPVideoHeaderH264>;
|
||||
|
||||
struct RTPVideoHeader {
|
||||
struct GenericDescriptorInfo {
|
||||
@ -42,21 +44,6 @@ struct RTPVideoHeader {
|
||||
|
||||
~RTPVideoHeader();
|
||||
|
||||
// TODO(philipel): Remove when downstream projects have been updated.
|
||||
RTPVideoHeaderVP8& vp8() {
|
||||
if (!absl::holds_alternative<RTPVideoHeaderVP8>(video_type_header))
|
||||
video_type_header.emplace<RTPVideoHeaderVP8>();
|
||||
|
||||
return absl::get<RTPVideoHeaderVP8>(video_type_header);
|
||||
}
|
||||
// TODO(philipel): Remove when downstream projects have been updated.
|
||||
const RTPVideoHeaderVP8& vp8() const {
|
||||
if (!absl::holds_alternative<RTPVideoHeaderVP8>(video_type_header))
|
||||
video_type_header.emplace<RTPVideoHeaderVP8>();
|
||||
|
||||
return absl::get<RTPVideoHeaderVP8>(video_type_header);
|
||||
}
|
||||
|
||||
absl::optional<GenericDescriptorInfo> generic;
|
||||
|
||||
uint16_t width = 0;
|
||||
@ -69,8 +56,7 @@ struct RTPVideoHeader {
|
||||
|
||||
PlayoutDelay playout_delay;
|
||||
VideoSendTiming video_timing;
|
||||
// TODO(philipel): remove mutable when downstream projects have been updated.
|
||||
mutable RTPVideoTypeHeader video_type_header;
|
||||
RTPVideoTypeHeader video_type_header;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
Reference in New Issue
Block a user