Remove RTPVideoHeader::h264() accessors.
Bug: none Change-Id: I043bcaf358575688b223bc3631506e148b47fd58 Reviewed-on: https://webrtc-review.googlesource.com/88220 Reviewed-by: Stefan Holmer <stefan@webrtc.org> Reviewed-by: Danil Chapovalov <danilchap@webrtc.org> Commit-Queue: Philip Eliasson <philipel@webrtc.org> Cr-Commit-Position: refs/heads/master@{#23971}
This commit is contained in:
@ -72,11 +72,13 @@ void CopyCodecSpecific(const CodecSpecificInfo* info, RTPVideoHeader* rtp) {
|
|||||||
rtp->vp9().end_of_picture = info->codecSpecific.VP9.end_of_picture;
|
rtp->vp9().end_of_picture = info->codecSpecific.VP9.end_of_picture;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
case kVideoCodecH264:
|
case kVideoCodecH264: {
|
||||||
rtp->h264().packetization_mode =
|
auto& h264_header = rtp->video_type_header.emplace<RTPVideoHeaderH264>();
|
||||||
|
h264_header.packetization_mode =
|
||||||
info->codecSpecific.H264.packetization_mode;
|
info->codecSpecific.H264.packetization_mode;
|
||||||
rtp->simulcastIdx = info->codecSpecific.H264.simulcast_idx;
|
rtp->simulcastIdx = info->codecSpecific.H264.simulcast_idx;
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
case kVideoCodecMultiplex:
|
case kVideoCodecMultiplex:
|
||||||
case kVideoCodecGeneric:
|
case kVideoCodecGeneric:
|
||||||
rtp->codec = kVideoCodecGeneric;
|
rtp->codec = kVideoCodecGeneric;
|
||||||
|
@ -445,8 +445,10 @@ TEST(PayloadRouterTest, InfoMappedToRtpVideoHeader_H264) {
|
|||||||
Unused, const RTPVideoHeader* header, Unused) {
|
Unused, const RTPVideoHeader* header, Unused) {
|
||||||
EXPECT_EQ(0, header->simulcastIdx);
|
EXPECT_EQ(0, header->simulcastIdx);
|
||||||
EXPECT_EQ(kVideoCodecH264, header->codec);
|
EXPECT_EQ(kVideoCodecH264, header->codec);
|
||||||
|
const auto& h264 =
|
||||||
|
absl::get<RTPVideoHeaderH264>(header->video_type_header);
|
||||||
EXPECT_EQ(H264PacketizationMode::SingleNalUnit,
|
EXPECT_EQ(H264PacketizationMode::SingleNalUnit,
|
||||||
header->h264().packetization_mode);
|
h264.packetization_mode);
|
||||||
return true;
|
return true;
|
||||||
}));
|
}));
|
||||||
|
|
||||||
|
@ -23,17 +23,18 @@ RtpPacketizer* RtpPacketizer::Create(VideoCodecType type,
|
|||||||
size_t last_packet_reduction_len,
|
size_t last_packet_reduction_len,
|
||||||
const RTPVideoHeader* rtp_video_header,
|
const RTPVideoHeader* rtp_video_header,
|
||||||
FrameType frame_type) {
|
FrameType frame_type) {
|
||||||
|
RTC_CHECK(type == kVideoCodecGeneric || rtp_video_header);
|
||||||
switch (type) {
|
switch (type) {
|
||||||
case kVideoCodecH264:
|
case kVideoCodecH264: {
|
||||||
RTC_CHECK(rtp_video_header);
|
const auto& h264 =
|
||||||
|
absl::get<RTPVideoHeaderH264>(rtp_video_header->video_type_header);
|
||||||
return new RtpPacketizerH264(max_payload_len, last_packet_reduction_len,
|
return new RtpPacketizerH264(max_payload_len, last_packet_reduction_len,
|
||||||
rtp_video_header->h264().packetization_mode);
|
h264.packetization_mode);
|
||||||
|
}
|
||||||
case kVideoCodecVP8:
|
case kVideoCodecVP8:
|
||||||
RTC_CHECK(rtp_video_header);
|
|
||||||
return new RtpPacketizerVp8(rtp_video_header->vp8(), max_payload_len,
|
return new RtpPacketizerVp8(rtp_video_header->vp8(), max_payload_len,
|
||||||
last_packet_reduction_len);
|
last_packet_reduction_len);
|
||||||
case kVideoCodecVP9:
|
case kVideoCodecVP9:
|
||||||
RTC_CHECK(rtp_video_header);
|
|
||||||
return new RtpPacketizerVp9(rtp_video_header->vp9(), max_payload_len,
|
return new RtpPacketizerVp9(rtp_video_header->vp9(), max_payload_len,
|
||||||
last_packet_reduction_len);
|
last_packet_reduction_len);
|
||||||
case kVideoCodecGeneric:
|
case kVideoCodecGeneric:
|
||||||
|
@ -429,7 +429,8 @@ bool RtpDepacketizerH264::Parse(ParsedPayload* parsed_payload,
|
|||||||
modified_buffer_.reset();
|
modified_buffer_.reset();
|
||||||
|
|
||||||
uint8_t nal_type = payload_data[0] & kTypeMask;
|
uint8_t nal_type = payload_data[0] & kTypeMask;
|
||||||
parsed_payload->video_header().h264().nalus_length = 0;
|
parsed_payload->video_header()
|
||||||
|
.video_type_header.emplace<RTPVideoHeaderH264>();
|
||||||
if (nal_type == H264::NaluType::kFuA) {
|
if (nal_type == H264::NaluType::kFuA) {
|
||||||
// Fragmented NAL units (FU-A).
|
// Fragmented NAL units (FU-A).
|
||||||
if (!ParseFuaNalu(parsed_payload, payload_data))
|
if (!ParseFuaNalu(parsed_payload, payload_data))
|
||||||
@ -458,7 +459,8 @@ bool RtpDepacketizerH264::ProcessStapAOrSingleNalu(
|
|||||||
parsed_payload->video_header().codec = kVideoCodecH264;
|
parsed_payload->video_header().codec = kVideoCodecH264;
|
||||||
parsed_payload->video_header().simulcastIdx = 0;
|
parsed_payload->video_header().simulcastIdx = 0;
|
||||||
parsed_payload->video_header().is_first_packet_in_frame = true;
|
parsed_payload->video_header().is_first_packet_in_frame = true;
|
||||||
RTPVideoHeaderH264* h264_header = &parsed_payload->video_header().h264();
|
auto& h264_header = absl::get<RTPVideoHeaderH264>(
|
||||||
|
parsed_payload->video_header().video_type_header);
|
||||||
|
|
||||||
const uint8_t* nalu_start = payload_data + kNalHeaderSize;
|
const uint8_t* nalu_start = payload_data + kNalHeaderSize;
|
||||||
const size_t nalu_length = length_ - kNalHeaderSize;
|
const size_t nalu_length = length_ - kNalHeaderSize;
|
||||||
@ -476,13 +478,13 @@ bool RtpDepacketizerH264::ProcessStapAOrSingleNalu(
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
h264_header->packetization_type = kH264StapA;
|
h264_header.packetization_type = kH264StapA;
|
||||||
nal_type = payload_data[kStapAHeaderSize] & kTypeMask;
|
nal_type = payload_data[kStapAHeaderSize] & kTypeMask;
|
||||||
} else {
|
} else {
|
||||||
h264_header->packetization_type = kH264SingleNalu;
|
h264_header.packetization_type = kH264SingleNalu;
|
||||||
nalu_start_offsets.push_back(0);
|
nalu_start_offsets.push_back(0);
|
||||||
}
|
}
|
||||||
h264_header->nalu_type = nal_type;
|
h264_header.nalu_type = nal_type;
|
||||||
parsed_payload->frame_type = kVideoFrameDelta;
|
parsed_payload->frame_type = kVideoFrameDelta;
|
||||||
|
|
||||||
nalu_start_offsets.push_back(length_ + kLengthFieldSize); // End offset.
|
nalu_start_offsets.push_back(length_ + kLengthFieldSize); // End offset.
|
||||||
@ -528,7 +530,7 @@ bool RtpDepacketizerH264::ProcessStapAOrSingleNalu(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Rewrite length field to new SPS size.
|
// Rewrite length field to new SPS size.
|
||||||
if (h264_header->packetization_type == kH264StapA) {
|
if (h264_header.packetization_type == kH264StapA) {
|
||||||
size_t length_field_offset =
|
size_t length_field_offset =
|
||||||
start_offset - (H264::kNaluTypeSize + kLengthFieldSize);
|
start_offset - (H264::kNaluTypeSize + kLengthFieldSize);
|
||||||
// Stap-A Length includes payload data and type header.
|
// Stap-A Length includes payload data and type header.
|
||||||
@ -617,13 +619,13 @@ bool RtpDepacketizerH264::ProcessStapAOrSingleNalu(
|
|||||||
RTC_LOG(LS_WARNING) << "Unexpected STAP-A or FU-A received.";
|
RTC_LOG(LS_WARNING) << "Unexpected STAP-A or FU-A received.";
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
RTPVideoHeaderH264* h264 = &parsed_payload->video_header().h264();
|
|
||||||
if (h264->nalus_length == kMaxNalusPerPacket) {
|
if (h264_header.nalus_length == kMaxNalusPerPacket) {
|
||||||
RTC_LOG(LS_WARNING)
|
RTC_LOG(LS_WARNING)
|
||||||
<< "Received packet containing more than " << kMaxNalusPerPacket
|
<< "Received packet containing more than " << kMaxNalusPerPacket
|
||||||
<< " NAL units. Will not keep track sps and pps ids for all of them.";
|
<< " NAL units. Will not keep track sps and pps ids for all of them.";
|
||||||
} else {
|
} else {
|
||||||
h264->nalus[h264->nalus_length++] = nalu;
|
h264_header.nalus[h264_header.nalus_length++] = nalu;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -676,12 +678,13 @@ bool RtpDepacketizerH264::ParseFuaNalu(
|
|||||||
parsed_payload->video_header().codec = kVideoCodecH264;
|
parsed_payload->video_header().codec = kVideoCodecH264;
|
||||||
parsed_payload->video_header().simulcastIdx = 0;
|
parsed_payload->video_header().simulcastIdx = 0;
|
||||||
parsed_payload->video_header().is_first_packet_in_frame = first_fragment;
|
parsed_payload->video_header().is_first_packet_in_frame = first_fragment;
|
||||||
RTPVideoHeaderH264* h264 = &parsed_payload->video_header().h264();
|
auto& h264_header = absl::get<RTPVideoHeaderH264>(
|
||||||
h264->packetization_type = kH264FuA;
|
parsed_payload->video_header().video_type_header);
|
||||||
h264->nalu_type = original_nal_type;
|
h264_header.packetization_type = kH264FuA;
|
||||||
|
h264_header.nalu_type = original_nal_type;
|
||||||
if (first_fragment) {
|
if (first_fragment) {
|
||||||
h264->nalus[h264->nalus_length] = nalu;
|
h264_header.nalus[h264_header.nalus_length] = nalu;
|
||||||
h264->nalus_length = 1;
|
h264_header.nalus_length = 1;
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -26,6 +26,12 @@ namespace {
|
|||||||
|
|
||||||
using ::testing::ElementsAreArray;
|
using ::testing::ElementsAreArray;
|
||||||
|
|
||||||
|
struct H264ParsedPayload : public RtpDepacketizer::ParsedPayload {
|
||||||
|
RTPVideoHeaderH264& h264() {
|
||||||
|
return absl::get<RTPVideoHeaderH264>(video.video_type_header);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
constexpr RtpPacketToSend::ExtensionManager* kNoExtensions = nullptr;
|
constexpr RtpPacketToSend::ExtensionManager* kNoExtensions = nullptr;
|
||||||
const size_t kMaxPayloadSize = 1200;
|
const size_t kMaxPayloadSize = 1200;
|
||||||
const size_t kLengthFieldLength = 2;
|
const size_t kLengthFieldLength = 2;
|
||||||
@ -66,7 +72,8 @@ RtpPacketizer* CreateH264Packetizer(H264PacketizationMode mode,
|
|||||||
size_t max_payload_size,
|
size_t max_payload_size,
|
||||||
size_t last_packet_reduction) {
|
size_t last_packet_reduction) {
|
||||||
RTPVideoHeader header;
|
RTPVideoHeader header;
|
||||||
header.h264().packetization_mode = mode;
|
header.video_type_header.emplace<RTPVideoHeaderH264>().packetization_mode =
|
||||||
|
mode;
|
||||||
return RtpPacketizer::Create(kVideoCodecH264, max_payload_size,
|
return RtpPacketizer::Create(kVideoCodecH264, max_payload_size,
|
||||||
last_packet_reduction, &header, kEmptyFrame);
|
last_packet_reduction, &header, kEmptyFrame);
|
||||||
}
|
}
|
||||||
@ -579,7 +586,7 @@ class RtpDepacketizerH264Test : public ::testing::Test {
|
|||||||
RtpDepacketizerH264Test()
|
RtpDepacketizerH264Test()
|
||||||
: depacketizer_(RtpDepacketizer::Create(kVideoCodecH264)) {}
|
: depacketizer_(RtpDepacketizer::Create(kVideoCodecH264)) {}
|
||||||
|
|
||||||
void ExpectPacket(RtpDepacketizer::ParsedPayload* parsed_payload,
|
void ExpectPacket(H264ParsedPayload* parsed_payload,
|
||||||
const uint8_t* data,
|
const uint8_t* data,
|
||||||
size_t length) {
|
size_t length) {
|
||||||
ASSERT_TRUE(parsed_payload != NULL);
|
ASSERT_TRUE(parsed_payload != NULL);
|
||||||
@ -594,29 +601,29 @@ class RtpDepacketizerH264Test : public ::testing::Test {
|
|||||||
|
|
||||||
TEST_F(RtpDepacketizerH264Test, TestSingleNalu) {
|
TEST_F(RtpDepacketizerH264Test, TestSingleNalu) {
|
||||||
uint8_t packet[2] = {0x05, 0xFF}; // F=0, NRI=0, Type=5 (IDR).
|
uint8_t packet[2] = {0x05, 0xFF}; // F=0, NRI=0, Type=5 (IDR).
|
||||||
RtpDepacketizer::ParsedPayload payload;
|
H264ParsedPayload payload;
|
||||||
|
|
||||||
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
|
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
|
||||||
ExpectPacket(&payload, packet, sizeof(packet));
|
ExpectPacket(&payload, packet, sizeof(packet));
|
||||||
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
|
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
|
||||||
EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
|
EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
|
||||||
EXPECT_TRUE(payload.video_header().is_first_packet_in_frame);
|
EXPECT_TRUE(payload.video_header().is_first_packet_in_frame);
|
||||||
EXPECT_EQ(kH264SingleNalu, payload.video_header().h264().packetization_type);
|
EXPECT_EQ(kH264SingleNalu, payload.h264().packetization_type);
|
||||||
EXPECT_EQ(kIdr, payload.video_header().h264().nalu_type);
|
EXPECT_EQ(kIdr, payload.h264().nalu_type);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(RtpDepacketizerH264Test, TestSingleNaluSpsWithResolution) {
|
TEST_F(RtpDepacketizerH264Test, TestSingleNaluSpsWithResolution) {
|
||||||
uint8_t packet[] = {kSps, 0x7A, 0x00, 0x1F, 0xBC, 0xD9, 0x40, 0x50,
|
uint8_t packet[] = {kSps, 0x7A, 0x00, 0x1F, 0xBC, 0xD9, 0x40, 0x50,
|
||||||
0x05, 0xBA, 0x10, 0x00, 0x00, 0x03, 0x00, 0xC0,
|
0x05, 0xBA, 0x10, 0x00, 0x00, 0x03, 0x00, 0xC0,
|
||||||
0x00, 0x00, 0x03, 0x2A, 0xE0, 0xF1, 0x83, 0x25};
|
0x00, 0x00, 0x03, 0x2A, 0xE0, 0xF1, 0x83, 0x25};
|
||||||
RtpDepacketizer::ParsedPayload payload;
|
H264ParsedPayload payload;
|
||||||
|
|
||||||
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
|
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
|
||||||
ExpectPacket(&payload, packet, sizeof(packet));
|
ExpectPacket(&payload, packet, sizeof(packet));
|
||||||
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
|
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
|
||||||
EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
|
EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
|
||||||
EXPECT_TRUE(payload.video_header().is_first_packet_in_frame);
|
EXPECT_TRUE(payload.video_header().is_first_packet_in_frame);
|
||||||
EXPECT_EQ(kH264SingleNalu, payload.video_header().h264().packetization_type);
|
EXPECT_EQ(kH264SingleNalu, payload.h264().packetization_type);
|
||||||
EXPECT_EQ(1280u, payload.video_header().width);
|
EXPECT_EQ(1280u, payload.video_header().width);
|
||||||
EXPECT_EQ(720u, payload.video_header().height);
|
EXPECT_EQ(720u, payload.video_header().height);
|
||||||
}
|
}
|
||||||
@ -639,13 +646,13 @@ TEST_F(RtpDepacketizerH264Test, TestStapAKey) {
|
|||||||
0x85, 0xB8, 0x0, 0x4, 0x0, 0x0, 0x13, 0x93, 0x12, 0x0};
|
0x85, 0xB8, 0x0, 0x4, 0x0, 0x0, 0x13, 0x93, 0x12, 0x0};
|
||||||
// clang-format on
|
// clang-format on
|
||||||
|
|
||||||
RtpDepacketizer::ParsedPayload payload;
|
H264ParsedPayload payload;
|
||||||
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
|
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
|
||||||
ExpectPacket(&payload, packet, sizeof(packet));
|
ExpectPacket(&payload, packet, sizeof(packet));
|
||||||
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
|
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
|
||||||
EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
|
EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
|
||||||
EXPECT_TRUE(payload.video_header().is_first_packet_in_frame);
|
EXPECT_TRUE(payload.video_header().is_first_packet_in_frame);
|
||||||
const RTPVideoHeaderH264& h264 = payload.video_header().h264();
|
const RTPVideoHeaderH264& h264 = payload.h264();
|
||||||
EXPECT_EQ(kH264StapA, h264.packetization_type);
|
EXPECT_EQ(kH264StapA, h264.packetization_type);
|
||||||
// NALU type for aggregated packets is the type of the first packet only.
|
// NALU type for aggregated packets is the type of the first packet only.
|
||||||
EXPECT_EQ(kSps, h264.nalu_type);
|
EXPECT_EQ(kSps, h264.nalu_type);
|
||||||
@ -669,14 +676,14 @@ TEST_F(RtpDepacketizerH264Test, TestStapANaluSpsWithResolution) {
|
|||||||
0x00, 0x03, kIdr, 0xFF, 0x00, 0x00, 0x04, kIdr, 0xFF,
|
0x00, 0x03, kIdr, 0xFF, 0x00, 0x00, 0x04, kIdr, 0xFF,
|
||||||
0x00, 0x11};
|
0x00, 0x11};
|
||||||
|
|
||||||
RtpDepacketizer::ParsedPayload payload;
|
H264ParsedPayload payload;
|
||||||
|
|
||||||
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
|
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
|
||||||
ExpectPacket(&payload, packet, sizeof(packet));
|
ExpectPacket(&payload, packet, sizeof(packet));
|
||||||
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
|
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
|
||||||
EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
|
EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
|
||||||
EXPECT_TRUE(payload.video_header().is_first_packet_in_frame);
|
EXPECT_TRUE(payload.video_header().is_first_packet_in_frame);
|
||||||
EXPECT_EQ(kH264StapA, payload.video_header().h264().packetization_type);
|
EXPECT_EQ(kH264StapA, payload.h264().packetization_type);
|
||||||
EXPECT_EQ(1280u, payload.video_header().width);
|
EXPECT_EQ(1280u, payload.video_header().width);
|
||||||
EXPECT_EQ(720u, payload.video_header().height);
|
EXPECT_EQ(720u, payload.video_header().height);
|
||||||
}
|
}
|
||||||
@ -693,7 +700,7 @@ TEST_F(RtpDepacketizerH264Test, TestEmptyStapARejected) {
|
|||||||
uint8_t trailing_empty_packet[] = {kStapA, 0x00, 0x03, kIdr,
|
uint8_t trailing_empty_packet[] = {kStapA, 0x00, 0x03, kIdr,
|
||||||
0xFF, 0x00, 0x00, 0x00};
|
0xFF, 0x00, 0x00, 0x00};
|
||||||
|
|
||||||
RtpDepacketizer::ParsedPayload payload;
|
H264ParsedPayload payload;
|
||||||
|
|
||||||
EXPECT_FALSE(depacketizer_->Parse(&payload, lone_empty_packet,
|
EXPECT_FALSE(depacketizer_->Parse(&payload, lone_empty_packet,
|
||||||
sizeof(lone_empty_packet)));
|
sizeof(lone_empty_packet)));
|
||||||
@ -732,7 +739,7 @@ TEST_F(RtpDepacketizerH264Test, DepacketizeWithRewriting) {
|
|||||||
out_buffer.AppendData(kHeader, 2);
|
out_buffer.AppendData(kHeader, 2);
|
||||||
out_buffer.AppendData(kIdrTwo);
|
out_buffer.AppendData(kIdrTwo);
|
||||||
|
|
||||||
RtpDepacketizer::ParsedPayload payload;
|
H264ParsedPayload payload;
|
||||||
EXPECT_TRUE(
|
EXPECT_TRUE(
|
||||||
depacketizer_->Parse(&payload, in_buffer.data(), in_buffer.size()));
|
depacketizer_->Parse(&payload, in_buffer.data(), in_buffer.size()));
|
||||||
|
|
||||||
@ -779,7 +786,7 @@ TEST_F(RtpDepacketizerH264Test, DepacketizeWithDoubleRewriting) {
|
|||||||
out_buffer.AppendData(kHeader, 2);
|
out_buffer.AppendData(kHeader, 2);
|
||||||
out_buffer.AppendData(kIdrTwo);
|
out_buffer.AppendData(kIdrTwo);
|
||||||
|
|
||||||
RtpDepacketizer::ParsedPayload payload;
|
H264ParsedPayload payload;
|
||||||
EXPECT_TRUE(
|
EXPECT_TRUE(
|
||||||
depacketizer_->Parse(&payload, in_buffer.data(), in_buffer.size()));
|
depacketizer_->Parse(&payload, in_buffer.data(), in_buffer.size()));
|
||||||
|
|
||||||
@ -796,16 +803,16 @@ TEST_F(RtpDepacketizerH264Test, TestStapADelta) {
|
|||||||
// Length, nal header, payload.
|
// Length, nal header, payload.
|
||||||
0, 0x02, kSlice, 0xFF, 0, 0x03, kSlice, 0xFF, 0x00, 0,
|
0, 0x02, kSlice, 0xFF, 0, 0x03, kSlice, 0xFF, 0x00, 0,
|
||||||
0x04, kSlice, 0xFF, 0x00, 0x11};
|
0x04, kSlice, 0xFF, 0x00, 0x11};
|
||||||
RtpDepacketizer::ParsedPayload payload;
|
H264ParsedPayload payload;
|
||||||
|
|
||||||
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
|
ASSERT_TRUE(depacketizer_->Parse(&payload, packet, sizeof(packet)));
|
||||||
ExpectPacket(&payload, packet, sizeof(packet));
|
ExpectPacket(&payload, packet, sizeof(packet));
|
||||||
EXPECT_EQ(kVideoFrameDelta, payload.frame_type);
|
EXPECT_EQ(kVideoFrameDelta, payload.frame_type);
|
||||||
EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
|
EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
|
||||||
EXPECT_TRUE(payload.video_header().is_first_packet_in_frame);
|
EXPECT_TRUE(payload.video_header().is_first_packet_in_frame);
|
||||||
EXPECT_EQ(kH264StapA, payload.video_header().h264().packetization_type);
|
EXPECT_EQ(kH264StapA, payload.h264().packetization_type);
|
||||||
// NALU type for aggregated packets is the type of the first packet only.
|
// NALU type for aggregated packets is the type of the first packet only.
|
||||||
EXPECT_EQ(kSlice, payload.video_header().h264().nalu_type);
|
EXPECT_EQ(kSlice, payload.h264().nalu_type);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(RtpDepacketizerH264Test, TestFuA) {
|
TEST_F(RtpDepacketizerH264Test, TestFuA) {
|
||||||
@ -833,7 +840,7 @@ TEST_F(RtpDepacketizerH264Test, TestFuA) {
|
|||||||
};
|
};
|
||||||
const uint8_t kExpected3[] = {0x03};
|
const uint8_t kExpected3[] = {0x03};
|
||||||
|
|
||||||
RtpDepacketizer::ParsedPayload payload;
|
H264ParsedPayload payload;
|
||||||
|
|
||||||
// We expect that the first packet is one byte shorter since the FU-A header
|
// We expect that the first packet is one byte shorter since the FU-A header
|
||||||
// has been replaced by the original nal header.
|
// has been replaced by the original nal header.
|
||||||
@ -842,7 +849,7 @@ TEST_F(RtpDepacketizerH264Test, TestFuA) {
|
|||||||
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
|
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
|
||||||
EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
|
EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
|
||||||
EXPECT_TRUE(payload.video_header().is_first_packet_in_frame);
|
EXPECT_TRUE(payload.video_header().is_first_packet_in_frame);
|
||||||
const RTPVideoHeaderH264& h264 = payload.video_header().h264();
|
const RTPVideoHeaderH264& h264 = payload.h264();
|
||||||
EXPECT_EQ(kH264FuA, h264.packetization_type);
|
EXPECT_EQ(kH264FuA, h264.packetization_type);
|
||||||
EXPECT_EQ(kIdr, h264.nalu_type);
|
EXPECT_EQ(kIdr, h264.nalu_type);
|
||||||
ASSERT_EQ(1u, h264.nalus_length);
|
ASSERT_EQ(1u, h264.nalus_length);
|
||||||
@ -852,28 +859,28 @@ TEST_F(RtpDepacketizerH264Test, TestFuA) {
|
|||||||
|
|
||||||
// Following packets will be 2 bytes shorter since they will only be appended
|
// Following packets will be 2 bytes shorter since they will only be appended
|
||||||
// onto the first packet.
|
// onto the first packet.
|
||||||
payload = RtpDepacketizer::ParsedPayload();
|
payload = H264ParsedPayload();
|
||||||
ASSERT_TRUE(depacketizer_->Parse(&payload, packet2, sizeof(packet2)));
|
ASSERT_TRUE(depacketizer_->Parse(&payload, packet2, sizeof(packet2)));
|
||||||
ExpectPacket(&payload, kExpected2, sizeof(kExpected2));
|
ExpectPacket(&payload, kExpected2, sizeof(kExpected2));
|
||||||
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
|
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
|
||||||
EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
|
EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
|
||||||
EXPECT_FALSE(payload.video_header().is_first_packet_in_frame);
|
EXPECT_FALSE(payload.video_header().is_first_packet_in_frame);
|
||||||
{
|
{
|
||||||
const RTPVideoHeaderH264& h264 = payload.video_header().h264();
|
const RTPVideoHeaderH264& h264 = payload.h264();
|
||||||
EXPECT_EQ(kH264FuA, h264.packetization_type);
|
EXPECT_EQ(kH264FuA, h264.packetization_type);
|
||||||
EXPECT_EQ(kIdr, h264.nalu_type);
|
EXPECT_EQ(kIdr, h264.nalu_type);
|
||||||
// NALU info is only expected for the first FU-A packet.
|
// NALU info is only expected for the first FU-A packet.
|
||||||
EXPECT_EQ(0u, h264.nalus_length);
|
EXPECT_EQ(0u, h264.nalus_length);
|
||||||
}
|
}
|
||||||
|
|
||||||
payload = RtpDepacketizer::ParsedPayload();
|
payload = H264ParsedPayload();
|
||||||
ASSERT_TRUE(depacketizer_->Parse(&payload, packet3, sizeof(packet3)));
|
ASSERT_TRUE(depacketizer_->Parse(&payload, packet3, sizeof(packet3)));
|
||||||
ExpectPacket(&payload, kExpected3, sizeof(kExpected3));
|
ExpectPacket(&payload, kExpected3, sizeof(kExpected3));
|
||||||
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
|
EXPECT_EQ(kVideoFrameKey, payload.frame_type);
|
||||||
EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
|
EXPECT_EQ(kVideoCodecH264, payload.video_header().codec);
|
||||||
EXPECT_FALSE(payload.video_header().is_first_packet_in_frame);
|
EXPECT_FALSE(payload.video_header().is_first_packet_in_frame);
|
||||||
{
|
{
|
||||||
const RTPVideoHeaderH264& h264 = payload.video_header().h264();
|
const RTPVideoHeaderH264& h264 = payload.h264();
|
||||||
EXPECT_EQ(kH264FuA, h264.packetization_type);
|
EXPECT_EQ(kH264FuA, h264.packetization_type);
|
||||||
EXPECT_EQ(kIdr, h264.nalu_type);
|
EXPECT_EQ(kIdr, h264.nalu_type);
|
||||||
// NALU info is only expected for the first FU-A packet.
|
// NALU info is only expected for the first FU-A packet.
|
||||||
@ -884,37 +891,37 @@ TEST_F(RtpDepacketizerH264Test, TestFuA) {
|
|||||||
TEST_F(RtpDepacketizerH264Test, TestEmptyPayload) {
|
TEST_F(RtpDepacketizerH264Test, TestEmptyPayload) {
|
||||||
// Using a wild pointer to crash on accesses from inside the depacketizer.
|
// Using a wild pointer to crash on accesses from inside the depacketizer.
|
||||||
uint8_t* garbage_ptr = reinterpret_cast<uint8_t*>(0x4711);
|
uint8_t* garbage_ptr = reinterpret_cast<uint8_t*>(0x4711);
|
||||||
RtpDepacketizer::ParsedPayload payload;
|
H264ParsedPayload payload;
|
||||||
EXPECT_FALSE(depacketizer_->Parse(&payload, garbage_ptr, 0));
|
EXPECT_FALSE(depacketizer_->Parse(&payload, garbage_ptr, 0));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(RtpDepacketizerH264Test, TestTruncatedFuaNalu) {
|
TEST_F(RtpDepacketizerH264Test, TestTruncatedFuaNalu) {
|
||||||
const uint8_t kPayload[] = {0x9c};
|
const uint8_t kPayload[] = {0x9c};
|
||||||
RtpDepacketizer::ParsedPayload payload;
|
H264ParsedPayload payload;
|
||||||
EXPECT_FALSE(depacketizer_->Parse(&payload, kPayload, sizeof(kPayload)));
|
EXPECT_FALSE(depacketizer_->Parse(&payload, kPayload, sizeof(kPayload)));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(RtpDepacketizerH264Test, TestTruncatedSingleStapANalu) {
|
TEST_F(RtpDepacketizerH264Test, TestTruncatedSingleStapANalu) {
|
||||||
const uint8_t kPayload[] = {0xd8, 0x27};
|
const uint8_t kPayload[] = {0xd8, 0x27};
|
||||||
RtpDepacketizer::ParsedPayload payload;
|
H264ParsedPayload payload;
|
||||||
EXPECT_FALSE(depacketizer_->Parse(&payload, kPayload, sizeof(kPayload)));
|
EXPECT_FALSE(depacketizer_->Parse(&payload, kPayload, sizeof(kPayload)));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(RtpDepacketizerH264Test, TestStapAPacketWithTruncatedNalUnits) {
|
TEST_F(RtpDepacketizerH264Test, TestStapAPacketWithTruncatedNalUnits) {
|
||||||
const uint8_t kPayload[] = {0x58, 0xCB, 0xED, 0xDF};
|
const uint8_t kPayload[] = {0x58, 0xCB, 0xED, 0xDF};
|
||||||
RtpDepacketizer::ParsedPayload payload;
|
H264ParsedPayload payload;
|
||||||
EXPECT_FALSE(depacketizer_->Parse(&payload, kPayload, sizeof(kPayload)));
|
EXPECT_FALSE(depacketizer_->Parse(&payload, kPayload, sizeof(kPayload)));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(RtpDepacketizerH264Test, TestTruncationJustAfterSingleStapANalu) {
|
TEST_F(RtpDepacketizerH264Test, TestTruncationJustAfterSingleStapANalu) {
|
||||||
const uint8_t kPayload[] = {0x38, 0x27, 0x27};
|
const uint8_t kPayload[] = {0x38, 0x27, 0x27};
|
||||||
RtpDepacketizer::ParsedPayload payload;
|
H264ParsedPayload payload;
|
||||||
EXPECT_FALSE(depacketizer_->Parse(&payload, kPayload, sizeof(kPayload)));
|
EXPECT_FALSE(depacketizer_->Parse(&payload, kPayload, sizeof(kPayload)));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(RtpDepacketizerH264Test, TestShortSpsPacket) {
|
TEST_F(RtpDepacketizerH264Test, TestShortSpsPacket) {
|
||||||
const uint8_t kPayload[] = {0x27, 0x80, 0x00};
|
const uint8_t kPayload[] = {0x27, 0x80, 0x00};
|
||||||
RtpDepacketizer::ParsedPayload payload;
|
H264ParsedPayload payload;
|
||||||
EXPECT_TRUE(depacketizer_->Parse(&payload, kPayload, sizeof(kPayload)));
|
EXPECT_TRUE(depacketizer_->Parse(&payload, kPayload, sizeof(kPayload)));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -923,9 +930,9 @@ TEST_F(RtpDepacketizerH264Test, TestSeiPacket) {
|
|||||||
kSei, // F=0, NRI=0, Type=6.
|
kSei, // F=0, NRI=0, Type=6.
|
||||||
0x03, 0x03, 0x03, 0x03 // Payload.
|
0x03, 0x03, 0x03, 0x03 // Payload.
|
||||||
};
|
};
|
||||||
RtpDepacketizer::ParsedPayload payload;
|
H264ParsedPayload payload;
|
||||||
ASSERT_TRUE(depacketizer_->Parse(&payload, kPayload, sizeof(kPayload)));
|
ASSERT_TRUE(depacketizer_->Parse(&payload, kPayload, sizeof(kPayload)));
|
||||||
const RTPVideoHeaderH264& h264 = payload.video_header().h264();
|
const RTPVideoHeaderH264& h264 = payload.h264();
|
||||||
EXPECT_EQ(kVideoFrameDelta, payload.frame_type);
|
EXPECT_EQ(kVideoFrameDelta, payload.frame_type);
|
||||||
EXPECT_EQ(kH264SingleNalu, h264.packetization_type);
|
EXPECT_EQ(kH264SingleNalu, h264.packetization_type);
|
||||||
EXPECT_EQ(kSei, h264.nalu_type);
|
EXPECT_EQ(kSei, h264.nalu_type);
|
||||||
|
@ -1102,7 +1102,6 @@ TEST_P(RtpSenderTest, NoFlexfecForTimingFrames) {
|
|||||||
EXPECT_EQ(0, rtp_sender_->RegisterPayload(payload_name, kPayloadType, 90000,
|
EXPECT_EQ(0, rtp_sender_->RegisterPayload(payload_name, kPayloadType, 90000,
|
||||||
0, 1500));
|
0, 1500));
|
||||||
RTPVideoHeader video_header;
|
RTPVideoHeader video_header;
|
||||||
memset(&video_header, 0, sizeof(RTPVideoHeader));
|
|
||||||
video_header.video_timing.flags = VideoSendTiming::kTriggeredByTimer;
|
video_header.video_timing.flags = VideoSendTiming::kTriggeredByTimer;
|
||||||
EXPECT_TRUE(rtp_sender_->SendOutgoingData(
|
EXPECT_TRUE(rtp_sender_->SendOutgoingData(
|
||||||
kVideoFrameKey, kPayloadType, kTimestamp, kCaptureTimeMs, kPayloadData,
|
kVideoFrameKey, kPayloadType, kTimestamp, kCaptureTimeMs, kPayloadData,
|
||||||
@ -1810,8 +1809,9 @@ TEST_P(RtpSenderVideoTest, RetransmissionTypesGeneric) {
|
|||||||
|
|
||||||
TEST_P(RtpSenderVideoTest, RetransmissionTypesH264) {
|
TEST_P(RtpSenderVideoTest, RetransmissionTypesH264) {
|
||||||
RTPVideoHeader header;
|
RTPVideoHeader header;
|
||||||
|
header.video_type_header.emplace<RTPVideoHeaderH264>().packetization_mode =
|
||||||
|
H264PacketizationMode::NonInterleaved;
|
||||||
header.codec = kVideoCodecH264;
|
header.codec = kVideoCodecH264;
|
||||||
header.h264().packetization_mode = H264PacketizationMode::NonInterleaved;
|
|
||||||
|
|
||||||
EXPECT_EQ(kDontRetransmit,
|
EXPECT_EQ(kDontRetransmit,
|
||||||
rtp_sender_video_->GetStorageType(
|
rtp_sender_video_->GetStorageType(
|
||||||
|
@ -55,20 +55,6 @@ struct RTPVideoHeader {
|
|||||||
|
|
||||||
return absl::get<RTPVideoHeaderVP9>(video_type_header);
|
return absl::get<RTPVideoHeaderVP9>(video_type_header);
|
||||||
}
|
}
|
||||||
// TODO(philipel): Remove when downstream projects have been updated.
|
|
||||||
RTPVideoHeaderH264& h264() {
|
|
||||||
if (!absl::holds_alternative<RTPVideoHeaderH264>(video_type_header))
|
|
||||||
video_type_header.emplace<RTPVideoHeaderH264>();
|
|
||||||
|
|
||||||
return absl::get<RTPVideoHeaderH264>(video_type_header);
|
|
||||||
}
|
|
||||||
// TODO(philipel): Remove when downstream projects have been updated.
|
|
||||||
const RTPVideoHeaderH264& h264() const {
|
|
||||||
if (!absl::holds_alternative<RTPVideoHeaderH264>(video_type_header))
|
|
||||||
video_type_header.emplace<RTPVideoHeaderH264>();
|
|
||||||
|
|
||||||
return absl::get<RTPVideoHeaderH264>(video_type_header);
|
|
||||||
}
|
|
||||||
|
|
||||||
uint16_t width;
|
uint16_t width;
|
||||||
uint16_t height;
|
uint16_t height;
|
||||||
|
@ -36,14 +36,15 @@ H264SpsPpsTracker::PacketAction H264SpsPpsTracker::CopyAndFixBitstream(
|
|||||||
const uint8_t* data = packet->dataPtr;
|
const uint8_t* data = packet->dataPtr;
|
||||||
const size_t data_size = packet->sizeBytes;
|
const size_t data_size = packet->sizeBytes;
|
||||||
const RTPVideoHeader& video_header = packet->video_header;
|
const RTPVideoHeader& video_header = packet->video_header;
|
||||||
RTPVideoHeaderH264* codec_header = &packet->video_header.h264();
|
auto& h264_header =
|
||||||
|
absl::get<RTPVideoHeaderH264>(packet->video_header.video_type_header);
|
||||||
|
|
||||||
bool append_sps_pps = false;
|
bool append_sps_pps = false;
|
||||||
auto sps = sps_data_.end();
|
auto sps = sps_data_.end();
|
||||||
auto pps = pps_data_.end();
|
auto pps = pps_data_.end();
|
||||||
|
|
||||||
for (size_t i = 0; i < codec_header->nalus_length; ++i) {
|
for (size_t i = 0; i < h264_header.nalus_length; ++i) {
|
||||||
const NaluInfo& nalu = codec_header->nalus[i];
|
const NaluInfo& nalu = h264_header.nalus[i];
|
||||||
switch (nalu.type) {
|
switch (nalu.type) {
|
||||||
case H264::NaluType::kSps: {
|
case H264::NaluType::kSps: {
|
||||||
sps_data_[nalu.sps_id].width = packet->width;
|
sps_data_[nalu.sps_id].width = packet->width;
|
||||||
@ -110,7 +111,7 @@ H264SpsPpsTracker::PacketAction H264SpsPpsTracker::CopyAndFixBitstream(
|
|||||||
required_size += pps->second.size + sizeof(start_code_h264);
|
required_size += pps->second.size + sizeof(start_code_h264);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (codec_header->packetization_type == kH264StapA) {
|
if (h264_header.packetization_type == kH264StapA) {
|
||||||
const uint8_t* nalu_ptr = data + 1;
|
const uint8_t* nalu_ptr = data + 1;
|
||||||
while (nalu_ptr < data + data_size) {
|
while (nalu_ptr < data + data_size) {
|
||||||
RTC_DCHECK(video_header.is_first_packet_in_frame);
|
RTC_DCHECK(video_header.is_first_packet_in_frame);
|
||||||
@ -155,9 +156,9 @@ H264SpsPpsTracker::PacketAction H264SpsPpsTracker::CopyAndFixBitstream(
|
|||||||
pps_info.type = H264::NaluType::kPps;
|
pps_info.type = H264::NaluType::kPps;
|
||||||
pps_info.sps_id = sps->first;
|
pps_info.sps_id = sps->first;
|
||||||
pps_info.pps_id = pps->first;
|
pps_info.pps_id = pps->first;
|
||||||
if (codec_header->nalus_length + 2 <= kMaxNalusPerPacket) {
|
if (h264_header.nalus_length + 2 <= kMaxNalusPerPacket) {
|
||||||
codec_header->nalus[codec_header->nalus_length++] = sps_info;
|
h264_header.nalus[h264_header.nalus_length++] = sps_info;
|
||||||
codec_header->nalus[codec_header->nalus_length++] = pps_info;
|
h264_header.nalus[h264_header.nalus_length++] = pps_info;
|
||||||
} else {
|
} else {
|
||||||
RTC_LOG(LS_WARNING) << "Not enough space in H.264 codec header to insert "
|
RTC_LOG(LS_WARNING) << "Not enough space in H.264 codec header to insert "
|
||||||
"SPS/PPS provided out-of-band.";
|
"SPS/PPS provided out-of-band.";
|
||||||
@ -165,7 +166,7 @@ H264SpsPpsTracker::PacketAction H264SpsPpsTracker::CopyAndFixBitstream(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Copy the rest of the bitstream and insert start codes.
|
// Copy the rest of the bitstream and insert start codes.
|
||||||
if (codec_header->packetization_type == kH264StapA) {
|
if (h264_header.packetization_type == kH264StapA) {
|
||||||
const uint8_t* nalu_ptr = data + 1;
|
const uint8_t* nalu_ptr = data + 1;
|
||||||
while (nalu_ptr < data + data_size) {
|
while (nalu_ptr < data + data_size) {
|
||||||
memcpy(insert_at, start_code_h264, sizeof(start_code_h264));
|
memcpy(insert_at, start_code_h264, sizeof(start_code_h264));
|
||||||
|
@ -46,21 +46,29 @@ void ExpectSpsPpsIdr(const RTPVideoHeaderH264& codec_header,
|
|||||||
EXPECT_TRUE(contains_idr);
|
EXPECT_TRUE(contains_idr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
class H264VcmPacket : public VCMPacket {
|
||||||
|
public:
|
||||||
|
H264VcmPacket() {
|
||||||
|
codec = kVideoCodecH264;
|
||||||
|
video_header.is_first_packet_in_frame = false;
|
||||||
|
auto& type_header =
|
||||||
|
video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
||||||
|
type_header.nalus_length = 0;
|
||||||
|
type_header.packetization_type = kH264SingleNalu;
|
||||||
|
}
|
||||||
|
|
||||||
|
RTPVideoHeaderH264& h264() {
|
||||||
|
return absl::get<RTPVideoHeaderH264>(video_header.video_type_header);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
class TestH264SpsPpsTracker : public ::testing::Test {
|
class TestH264SpsPpsTracker : public ::testing::Test {
|
||||||
public:
|
public:
|
||||||
VCMPacket GetDefaultPacket() {
|
void AddSps(H264VcmPacket* packet,
|
||||||
VCMPacket packet;
|
uint8_t sps_id,
|
||||||
packet.codec = kVideoCodecH264;
|
std::vector<uint8_t>* data) {
|
||||||
packet.video_header.h264().nalus_length = 0;
|
|
||||||
packet.video_header.is_first_packet_in_frame = false;
|
|
||||||
packet.video_header.h264().packetization_type = kH264SingleNalu;
|
|
||||||
|
|
||||||
return packet;
|
|
||||||
}
|
|
||||||
|
|
||||||
void AddSps(VCMPacket* packet, uint8_t sps_id, std::vector<uint8_t>* data) {
|
|
||||||
NaluInfo info;
|
NaluInfo info;
|
||||||
info.type = H264::NaluType::kSps;
|
info.type = H264::NaluType::kSps;
|
||||||
info.sps_id = sps_id;
|
info.sps_id = sps_id;
|
||||||
@ -68,11 +76,10 @@ class TestH264SpsPpsTracker : public ::testing::Test {
|
|||||||
data->push_back(H264::NaluType::kSps);
|
data->push_back(H264::NaluType::kSps);
|
||||||
data->push_back(sps_id); // The sps data, just a single byte.
|
data->push_back(sps_id); // The sps data, just a single byte.
|
||||||
|
|
||||||
packet->video_header.h264()
|
packet->h264().nalus[packet->h264().nalus_length++] = info;
|
||||||
.nalus[packet->video_header.h264().nalus_length++] = info;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void AddPps(VCMPacket* packet,
|
void AddPps(H264VcmPacket* packet,
|
||||||
uint8_t sps_id,
|
uint8_t sps_id,
|
||||||
uint8_t pps_id,
|
uint8_t pps_id,
|
||||||
std::vector<uint8_t>* data) {
|
std::vector<uint8_t>* data) {
|
||||||
@ -83,18 +90,16 @@ class TestH264SpsPpsTracker : public ::testing::Test {
|
|||||||
data->push_back(H264::NaluType::kPps);
|
data->push_back(H264::NaluType::kPps);
|
||||||
data->push_back(pps_id); // The pps data, just a single byte.
|
data->push_back(pps_id); // The pps data, just a single byte.
|
||||||
|
|
||||||
packet->video_header.h264()
|
packet->h264().nalus[packet->h264().nalus_length++] = info;
|
||||||
.nalus[packet->video_header.h264().nalus_length++] = info;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void AddIdr(VCMPacket* packet, int pps_id) {
|
void AddIdr(H264VcmPacket* packet, int pps_id) {
|
||||||
NaluInfo info;
|
NaluInfo info;
|
||||||
info.type = H264::NaluType::kIdr;
|
info.type = H264::NaluType::kIdr;
|
||||||
info.sps_id = -1;
|
info.sps_id = -1;
|
||||||
info.pps_id = pps_id;
|
info.pps_id = pps_id;
|
||||||
|
|
||||||
packet->video_header.h264()
|
packet->h264().nalus[packet->h264().nalus_length++] = info;
|
||||||
.nalus[packet->video_header.h264().nalus_length++] = info;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
@ -103,8 +108,8 @@ class TestH264SpsPpsTracker : public ::testing::Test {
|
|||||||
|
|
||||||
TEST_F(TestH264SpsPpsTracker, NoNalus) {
|
TEST_F(TestH264SpsPpsTracker, NoNalus) {
|
||||||
uint8_t data[] = {1, 2, 3};
|
uint8_t data[] = {1, 2, 3};
|
||||||
VCMPacket packet = GetDefaultPacket();
|
H264VcmPacket packet;
|
||||||
packet.video_header.h264().packetization_type = kH264FuA;
|
packet.h264().packetization_type = kH264FuA;
|
||||||
packet.dataPtr = data;
|
packet.dataPtr = data;
|
||||||
packet.sizeBytes = sizeof(data);
|
packet.sizeBytes = sizeof(data);
|
||||||
|
|
||||||
@ -115,8 +120,8 @@ TEST_F(TestH264SpsPpsTracker, NoNalus) {
|
|||||||
|
|
||||||
TEST_F(TestH264SpsPpsTracker, FuAFirstPacket) {
|
TEST_F(TestH264SpsPpsTracker, FuAFirstPacket) {
|
||||||
uint8_t data[] = {1, 2, 3};
|
uint8_t data[] = {1, 2, 3};
|
||||||
VCMPacket packet = GetDefaultPacket();
|
H264VcmPacket packet;
|
||||||
packet.video_header.h264().packetization_type = kH264FuA;
|
packet.h264().packetization_type = kH264FuA;
|
||||||
packet.video_header.is_first_packet_in_frame = true;
|
packet.video_header.is_first_packet_in_frame = true;
|
||||||
packet.dataPtr = data;
|
packet.dataPtr = data;
|
||||||
packet.sizeBytes = sizeof(data);
|
packet.sizeBytes = sizeof(data);
|
||||||
@ -131,8 +136,8 @@ TEST_F(TestH264SpsPpsTracker, FuAFirstPacket) {
|
|||||||
|
|
||||||
TEST_F(TestH264SpsPpsTracker, StapAIncorrectSegmentLength) {
|
TEST_F(TestH264SpsPpsTracker, StapAIncorrectSegmentLength) {
|
||||||
uint8_t data[] = {0, 0, 2, 0};
|
uint8_t data[] = {0, 0, 2, 0};
|
||||||
VCMPacket packet = GetDefaultPacket();
|
H264VcmPacket packet;
|
||||||
packet.video_header.h264().packetization_type = kH264StapA;
|
packet.h264().packetization_type = kH264StapA;
|
||||||
packet.video_header.is_first_packet_in_frame = true;
|
packet.video_header.is_first_packet_in_frame = true;
|
||||||
packet.dataPtr = data;
|
packet.dataPtr = data;
|
||||||
packet.sizeBytes = sizeof(data);
|
packet.sizeBytes = sizeof(data);
|
||||||
@ -142,7 +147,7 @@ TEST_F(TestH264SpsPpsTracker, StapAIncorrectSegmentLength) {
|
|||||||
|
|
||||||
TEST_F(TestH264SpsPpsTracker, NoNalusFirstPacket) {
|
TEST_F(TestH264SpsPpsTracker, NoNalusFirstPacket) {
|
||||||
uint8_t data[] = {1, 2, 3};
|
uint8_t data[] = {1, 2, 3};
|
||||||
VCMPacket packet = GetDefaultPacket();
|
H264VcmPacket packet;
|
||||||
packet.video_header.is_first_packet_in_frame = true;
|
packet.video_header.is_first_packet_in_frame = true;
|
||||||
packet.dataPtr = data;
|
packet.dataPtr = data;
|
||||||
packet.sizeBytes = sizeof(data);
|
packet.sizeBytes = sizeof(data);
|
||||||
@ -157,8 +162,8 @@ TEST_F(TestH264SpsPpsTracker, NoNalusFirstPacket) {
|
|||||||
|
|
||||||
TEST_F(TestH264SpsPpsTracker, IdrNoSpsPpsInserted) {
|
TEST_F(TestH264SpsPpsTracker, IdrNoSpsPpsInserted) {
|
||||||
std::vector<uint8_t> data = {1, 2, 3};
|
std::vector<uint8_t> data = {1, 2, 3};
|
||||||
VCMPacket packet = GetDefaultPacket();
|
H264VcmPacket packet;
|
||||||
packet.video_header.h264().packetization_type = kH264FuA;
|
packet.h264().packetization_type = kH264FuA;
|
||||||
|
|
||||||
AddIdr(&packet, 0);
|
AddIdr(&packet, 0);
|
||||||
packet.dataPtr = data.data();
|
packet.dataPtr = data.data();
|
||||||
@ -171,7 +176,7 @@ TEST_F(TestH264SpsPpsTracker, IdrNoSpsPpsInserted) {
|
|||||||
|
|
||||||
TEST_F(TestH264SpsPpsTracker, IdrFirstPacketNoSpsPpsInserted) {
|
TEST_F(TestH264SpsPpsTracker, IdrFirstPacketNoSpsPpsInserted) {
|
||||||
std::vector<uint8_t> data = {1, 2, 3};
|
std::vector<uint8_t> data = {1, 2, 3};
|
||||||
VCMPacket packet = GetDefaultPacket();
|
H264VcmPacket packet;
|
||||||
packet.video_header.is_first_packet_in_frame = true;
|
packet.video_header.is_first_packet_in_frame = true;
|
||||||
|
|
||||||
AddIdr(&packet, 0);
|
AddIdr(&packet, 0);
|
||||||
@ -184,7 +189,7 @@ TEST_F(TestH264SpsPpsTracker, IdrFirstPacketNoSpsPpsInserted) {
|
|||||||
|
|
||||||
TEST_F(TestH264SpsPpsTracker, IdrFirstPacketNoPpsInserted) {
|
TEST_F(TestH264SpsPpsTracker, IdrFirstPacketNoPpsInserted) {
|
||||||
std::vector<uint8_t> data = {1, 2, 3};
|
std::vector<uint8_t> data = {1, 2, 3};
|
||||||
VCMPacket packet = GetDefaultPacket();
|
H264VcmPacket packet;
|
||||||
packet.video_header.is_first_packet_in_frame = true;
|
packet.video_header.is_first_packet_in_frame = true;
|
||||||
|
|
||||||
AddSps(&packet, 0, &data);
|
AddSps(&packet, 0, &data);
|
||||||
@ -198,7 +203,7 @@ TEST_F(TestH264SpsPpsTracker, IdrFirstPacketNoPpsInserted) {
|
|||||||
|
|
||||||
TEST_F(TestH264SpsPpsTracker, IdrFirstPacketNoSpsInserted) {
|
TEST_F(TestH264SpsPpsTracker, IdrFirstPacketNoSpsInserted) {
|
||||||
std::vector<uint8_t> data = {1, 2, 3};
|
std::vector<uint8_t> data = {1, 2, 3};
|
||||||
VCMPacket packet = GetDefaultPacket();
|
H264VcmPacket packet;
|
||||||
packet.video_header.is_first_packet_in_frame = true;
|
packet.video_header.is_first_packet_in_frame = true;
|
||||||
|
|
||||||
AddPps(&packet, 0, 0, &data);
|
AddPps(&packet, 0, 0, &data);
|
||||||
@ -212,7 +217,7 @@ TEST_F(TestH264SpsPpsTracker, IdrFirstPacketNoSpsInserted) {
|
|||||||
|
|
||||||
TEST_F(TestH264SpsPpsTracker, SpsPpsPacketThenIdrFirstPacket) {
|
TEST_F(TestH264SpsPpsTracker, SpsPpsPacketThenIdrFirstPacket) {
|
||||||
std::vector<uint8_t> data;
|
std::vector<uint8_t> data;
|
||||||
VCMPacket sps_pps_packet = GetDefaultPacket();
|
H264VcmPacket sps_pps_packet;
|
||||||
|
|
||||||
// Insert SPS/PPS
|
// Insert SPS/PPS
|
||||||
AddSps(&sps_pps_packet, 0, &data);
|
AddSps(&sps_pps_packet, 0, &data);
|
||||||
@ -225,7 +230,7 @@ TEST_F(TestH264SpsPpsTracker, SpsPpsPacketThenIdrFirstPacket) {
|
|||||||
data.clear();
|
data.clear();
|
||||||
|
|
||||||
// Insert first packet of the IDR
|
// Insert first packet of the IDR
|
||||||
VCMPacket idr_packet = GetDefaultPacket();
|
H264VcmPacket idr_packet;
|
||||||
idr_packet.video_header.is_first_packet_in_frame = true;
|
idr_packet.video_header.is_first_packet_in_frame = true;
|
||||||
AddIdr(&idr_packet, 1);
|
AddIdr(&idr_packet, 1);
|
||||||
data.insert(data.end(), {1, 2, 3});
|
data.insert(data.end(), {1, 2, 3});
|
||||||
@ -243,8 +248,8 @@ TEST_F(TestH264SpsPpsTracker, SpsPpsPacketThenIdrFirstPacket) {
|
|||||||
|
|
||||||
TEST_F(TestH264SpsPpsTracker, SpsPpsIdrInStapA) {
|
TEST_F(TestH264SpsPpsTracker, SpsPpsIdrInStapA) {
|
||||||
std::vector<uint8_t> data;
|
std::vector<uint8_t> data;
|
||||||
VCMPacket packet = GetDefaultPacket();
|
H264VcmPacket packet;
|
||||||
packet.video_header.h264().packetization_type = kH264StapA;
|
packet.h264().packetization_type = kH264StapA;
|
||||||
packet.video_header.is_first_packet_in_frame = true; // Always true for StapA
|
packet.video_header.is_first_packet_in_frame = true; // Always true for StapA
|
||||||
|
|
||||||
data.insert(data.end(), {0}); // First byte is ignored
|
data.insert(data.end(), {0}); // First byte is ignored
|
||||||
@ -284,18 +289,18 @@ TEST_F(TestH264SpsPpsTracker, SpsPpsOutOfBand) {
|
|||||||
tracker_.InsertSpsPpsNalus(sps, pps);
|
tracker_.InsertSpsPpsNalus(sps, pps);
|
||||||
|
|
||||||
// Insert first packet of the IDR.
|
// Insert first packet of the IDR.
|
||||||
VCMPacket idr_packet = GetDefaultPacket();
|
H264VcmPacket idr_packet;
|
||||||
idr_packet.video_header.is_first_packet_in_frame = true;
|
idr_packet.video_header.is_first_packet_in_frame = true;
|
||||||
AddIdr(&idr_packet, 0);
|
AddIdr(&idr_packet, 0);
|
||||||
idr_packet.dataPtr = kData;
|
idr_packet.dataPtr = kData;
|
||||||
idr_packet.sizeBytes = sizeof(kData);
|
idr_packet.sizeBytes = sizeof(kData);
|
||||||
EXPECT_EQ(1u, idr_packet.video_header.h264().nalus_length);
|
EXPECT_EQ(1u, idr_packet.h264().nalus_length);
|
||||||
EXPECT_EQ(H264SpsPpsTracker::kInsert,
|
EXPECT_EQ(H264SpsPpsTracker::kInsert,
|
||||||
tracker_.CopyAndFixBitstream(&idr_packet));
|
tracker_.CopyAndFixBitstream(&idr_packet));
|
||||||
EXPECT_EQ(3u, idr_packet.video_header.h264().nalus_length);
|
EXPECT_EQ(3u, idr_packet.h264().nalus_length);
|
||||||
EXPECT_EQ(320, idr_packet.width);
|
EXPECT_EQ(320, idr_packet.width);
|
||||||
EXPECT_EQ(240, idr_packet.height);
|
EXPECT_EQ(240, idr_packet.height);
|
||||||
ExpectSpsPpsIdr(idr_packet.video_header.h264(), 0, 0);
|
ExpectSpsPpsIdr(idr_packet.h264(), 0, 0);
|
||||||
|
|
||||||
if (idr_packet.dataPtr != kData) {
|
if (idr_packet.dataPtr != kData) {
|
||||||
// In case CopyAndFixBitStream() prepends SPS/PPS nalus to the packet, it
|
// In case CopyAndFixBitStream() prepends SPS/PPS nalus to the packet, it
|
||||||
@ -317,7 +322,7 @@ TEST_F(TestH264SpsPpsTracker, SpsPpsOutOfBandWrongNaluHeader) {
|
|||||||
tracker_.InsertSpsPpsNalus(sps, pps);
|
tracker_.InsertSpsPpsNalus(sps, pps);
|
||||||
|
|
||||||
// Insert first packet of the IDR.
|
// Insert first packet of the IDR.
|
||||||
VCMPacket idr_packet = GetDefaultPacket();
|
H264VcmPacket idr_packet;
|
||||||
idr_packet.video_header.is_first_packet_in_frame = true;
|
idr_packet.video_header.is_first_packet_in_frame = true;
|
||||||
AddIdr(&idr_packet, 0);
|
AddIdr(&idr_packet, 0);
|
||||||
idr_packet.dataPtr = kData;
|
idr_packet.dataPtr = kData;
|
||||||
@ -336,7 +341,7 @@ TEST_F(TestH264SpsPpsTracker, SpsPpsOutOfBandIncompleteNalu) {
|
|||||||
tracker_.InsertSpsPpsNalus(sps, pps);
|
tracker_.InsertSpsPpsNalus(sps, pps);
|
||||||
|
|
||||||
// Insert first packet of the IDR.
|
// Insert first packet of the IDR.
|
||||||
VCMPacket idr_packet = GetDefaultPacket();
|
H264VcmPacket idr_packet;
|
||||||
idr_packet.video_header.is_first_packet_in_frame = true;
|
idr_packet.video_header.is_first_packet_in_frame = true;
|
||||||
AddIdr(&idr_packet, 0);
|
AddIdr(&idr_packet, 0);
|
||||||
idr_packet.dataPtr = kData;
|
idr_packet.dataPtr = kData;
|
||||||
@ -350,7 +355,7 @@ TEST_F(TestH264SpsPpsTracker, SaveRestoreWidthHeight) {
|
|||||||
|
|
||||||
// Insert an SPS/PPS packet with width/height and make sure
|
// Insert an SPS/PPS packet with width/height and make sure
|
||||||
// that information is set on the first IDR packet.
|
// that information is set on the first IDR packet.
|
||||||
VCMPacket sps_pps_packet = GetDefaultPacket();
|
H264VcmPacket sps_pps_packet;
|
||||||
AddSps(&sps_pps_packet, 0, &data);
|
AddSps(&sps_pps_packet, 0, &data);
|
||||||
AddPps(&sps_pps_packet, 0, 1, &data);
|
AddPps(&sps_pps_packet, 0, 1, &data);
|
||||||
sps_pps_packet.dataPtr = data.data();
|
sps_pps_packet.dataPtr = data.data();
|
||||||
@ -361,7 +366,7 @@ TEST_F(TestH264SpsPpsTracker, SaveRestoreWidthHeight) {
|
|||||||
tracker_.CopyAndFixBitstream(&sps_pps_packet));
|
tracker_.CopyAndFixBitstream(&sps_pps_packet));
|
||||||
delete[] sps_pps_packet.dataPtr;
|
delete[] sps_pps_packet.dataPtr;
|
||||||
|
|
||||||
VCMPacket idr_packet = GetDefaultPacket();
|
H264VcmPacket idr_packet;
|
||||||
idr_packet.video_header.is_first_packet_in_frame = true;
|
idr_packet.video_header.is_first_packet_in_frame = true;
|
||||||
AddIdr(&idr_packet, 1);
|
AddIdr(&idr_packet, 1);
|
||||||
data.insert(data.end(), {1, 2, 3});
|
data.insert(data.end(), {1, 2, 3});
|
||||||
|
@ -1155,17 +1155,19 @@ TEST_F(TestBasicJitterBuffer, H264InsertStartCode) {
|
|||||||
TEST_F(TestBasicJitterBuffer, SpsAndPpsHandling) {
|
TEST_F(TestBasicJitterBuffer, SpsAndPpsHandling) {
|
||||||
jitter_buffer_->SetDecodeErrorMode(kNoErrors);
|
jitter_buffer_->SetDecodeErrorMode(kNoErrors);
|
||||||
|
|
||||||
|
auto& h264_header =
|
||||||
|
packet_->video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
||||||
packet_->timestamp = timestamp_;
|
packet_->timestamp = timestamp_;
|
||||||
packet_->frameType = kVideoFrameKey;
|
packet_->frameType = kVideoFrameKey;
|
||||||
packet_->is_first_packet_in_frame = true;
|
packet_->is_first_packet_in_frame = true;
|
||||||
packet_->markerBit = true;
|
packet_->markerBit = true;
|
||||||
packet_->codec = kVideoCodecH264;
|
packet_->codec = kVideoCodecH264;
|
||||||
packet_->video_header.codec = kVideoCodecH264;
|
packet_->video_header.codec = kVideoCodecH264;
|
||||||
packet_->video_header.h264().nalu_type = H264::NaluType::kIdr;
|
h264_header.nalu_type = H264::NaluType::kIdr;
|
||||||
packet_->video_header.h264().nalus[0].type = H264::NaluType::kIdr;
|
h264_header.nalus[0].type = H264::NaluType::kIdr;
|
||||||
packet_->video_header.h264().nalus[0].sps_id = -1;
|
h264_header.nalus[0].sps_id = -1;
|
||||||
packet_->video_header.h264().nalus[0].pps_id = 0;
|
h264_header.nalus[0].pps_id = 0;
|
||||||
packet_->video_header.h264().nalus_length = 1;
|
h264_header.nalus_length = 1;
|
||||||
bool retransmitted = false;
|
bool retransmitted = false;
|
||||||
EXPECT_EQ(kCompleteSession,
|
EXPECT_EQ(kCompleteSession,
|
||||||
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
|
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
|
||||||
@ -1181,14 +1183,14 @@ TEST_F(TestBasicJitterBuffer, SpsAndPpsHandling) {
|
|||||||
packet_->markerBit = false;
|
packet_->markerBit = false;
|
||||||
packet_->codec = kVideoCodecH264;
|
packet_->codec = kVideoCodecH264;
|
||||||
packet_->video_header.codec = kVideoCodecH264;
|
packet_->video_header.codec = kVideoCodecH264;
|
||||||
packet_->video_header.h264().nalu_type = H264::NaluType::kStapA;
|
h264_header.nalu_type = H264::NaluType::kStapA;
|
||||||
packet_->video_header.h264().nalus[0].type = H264::NaluType::kSps;
|
h264_header.nalus[0].type = H264::NaluType::kSps;
|
||||||
packet_->video_header.h264().nalus[0].sps_id = 0;
|
h264_header.nalus[0].sps_id = 0;
|
||||||
packet_->video_header.h264().nalus[0].pps_id = -1;
|
h264_header.nalus[0].pps_id = -1;
|
||||||
packet_->video_header.h264().nalus[1].type = H264::NaluType::kPps;
|
h264_header.nalus[1].type = H264::NaluType::kPps;
|
||||||
packet_->video_header.h264().nalus[1].sps_id = 0;
|
h264_header.nalus[1].sps_id = 0;
|
||||||
packet_->video_header.h264().nalus[1].pps_id = 0;
|
h264_header.nalus[1].pps_id = 0;
|
||||||
packet_->video_header.h264().nalus_length = 2;
|
h264_header.nalus_length = 2;
|
||||||
// Not complete since the marker bit hasn't been received.
|
// Not complete since the marker bit hasn't been received.
|
||||||
EXPECT_EQ(kIncomplete,
|
EXPECT_EQ(kIncomplete,
|
||||||
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
|
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
|
||||||
@ -1200,11 +1202,11 @@ TEST_F(TestBasicJitterBuffer, SpsAndPpsHandling) {
|
|||||||
packet_->markerBit = true;
|
packet_->markerBit = true;
|
||||||
packet_->codec = kVideoCodecH264;
|
packet_->codec = kVideoCodecH264;
|
||||||
packet_->video_header.codec = kVideoCodecH264;
|
packet_->video_header.codec = kVideoCodecH264;
|
||||||
packet_->video_header.h264().nalu_type = H264::NaluType::kIdr;
|
h264_header.nalu_type = H264::NaluType::kIdr;
|
||||||
packet_->video_header.h264().nalus[0].type = H264::NaluType::kIdr;
|
h264_header.nalus[0].type = H264::NaluType::kIdr;
|
||||||
packet_->video_header.h264().nalus[0].sps_id = -1;
|
h264_header.nalus[0].sps_id = -1;
|
||||||
packet_->video_header.h264().nalus[0].pps_id = 0;
|
h264_header.nalus[0].pps_id = 0;
|
||||||
packet_->video_header.h264().nalus_length = 1;
|
h264_header.nalus_length = 1;
|
||||||
// Complete and decodable since the pps and sps are received in the first
|
// Complete and decodable since the pps and sps are received in the first
|
||||||
// packet of this frame.
|
// packet of this frame.
|
||||||
EXPECT_EQ(kCompleteSession,
|
EXPECT_EQ(kCompleteSession,
|
||||||
@ -1222,11 +1224,11 @@ TEST_F(TestBasicJitterBuffer, SpsAndPpsHandling) {
|
|||||||
packet_->markerBit = true;
|
packet_->markerBit = true;
|
||||||
packet_->codec = kVideoCodecH264;
|
packet_->codec = kVideoCodecH264;
|
||||||
packet_->video_header.codec = kVideoCodecH264;
|
packet_->video_header.codec = kVideoCodecH264;
|
||||||
packet_->video_header.h264().nalu_type = H264::NaluType::kSlice;
|
h264_header.nalu_type = H264::NaluType::kSlice;
|
||||||
packet_->video_header.h264().nalus[0].type = H264::NaluType::kSlice;
|
h264_header.nalus[0].type = H264::NaluType::kSlice;
|
||||||
packet_->video_header.h264().nalus[0].sps_id = -1;
|
h264_header.nalus[0].sps_id = -1;
|
||||||
packet_->video_header.h264().nalus[0].pps_id = 0;
|
h264_header.nalus[0].pps_id = 0;
|
||||||
packet_->video_header.h264().nalus_length = 1;
|
h264_header.nalus_length = 1;
|
||||||
// Complete and decodable since sps, pps and key frame has been received.
|
// Complete and decodable since sps, pps and key frame has been received.
|
||||||
EXPECT_EQ(kCompleteSession,
|
EXPECT_EQ(kCompleteSession,
|
||||||
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
|
jitter_buffer_->InsertPacket(*packet_, &retransmitted));
|
||||||
|
@ -303,18 +303,17 @@ std::vector<std::unique_ptr<RtpFrameObject>> PacketBuffer::FindFrames(
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
if (is_h264 && !is_h264_keyframe) {
|
if (is_h264 && !is_h264_keyframe) {
|
||||||
const RTPVideoHeaderH264& header =
|
const auto* h264_header = absl::get_if<RTPVideoHeaderH264>(
|
||||||
data_buffer_[start_index].video_header.h264();
|
&data_buffer_[start_index].video_header.video_type_header);
|
||||||
|
if (!h264_header || h264_header->nalus_length >= kMaxNalusPerPacket)
|
||||||
if (header.nalus_length >= kMaxNalusPerPacket)
|
|
||||||
return found_frames;
|
return found_frames;
|
||||||
|
|
||||||
for (size_t j = 0; j < header.nalus_length; ++j) {
|
for (size_t j = 0; j < h264_header->nalus_length; ++j) {
|
||||||
if (header.nalus[j].type == H264::NaluType::kSps) {
|
if (h264_header->nalus[j].type == H264::NaluType::kSps) {
|
||||||
has_h264_sps = true;
|
has_h264_sps = true;
|
||||||
} else if (header.nalus[j].type == H264::NaluType::kPps) {
|
} else if (h264_header->nalus[j].type == H264::NaluType::kPps) {
|
||||||
has_h264_pps = true;
|
has_h264_pps = true;
|
||||||
} else if (header.nalus[j].type == H264::NaluType::kIdr) {
|
} else if (h264_header->nalus[j].type == H264::NaluType::kIdr) {
|
||||||
has_h264_idr = true;
|
has_h264_idr = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -110,8 +110,10 @@ std::vector<NaluInfo> VCMSessionInfo::GetNaluInfos() const {
|
|||||||
return std::vector<NaluInfo>();
|
return std::vector<NaluInfo>();
|
||||||
std::vector<NaluInfo> nalu_infos;
|
std::vector<NaluInfo> nalu_infos;
|
||||||
for (const VCMPacket& packet : packets_) {
|
for (const VCMPacket& packet : packets_) {
|
||||||
for (size_t i = 0; i < packet.video_header.h264().nalus_length; ++i) {
|
const auto& h264 =
|
||||||
nalu_infos.push_back(packet.video_header.h264().nalus[i]);
|
absl::get<RTPVideoHeaderH264>(packet.video_header.video_type_header);
|
||||||
|
for (size_t i = 0; i < h264.nalus_length; ++i) {
|
||||||
|
nalu_infos.push_back(h264.nalus[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nalu_infos;
|
return nalu_infos;
|
||||||
@ -175,8 +177,9 @@ size_t VCMSessionInfo::InsertBuffer(uint8_t* frame_buffer,
|
|||||||
// header supplied by the H264 depacketizer.
|
// header supplied by the H264 depacketizer.
|
||||||
const size_t kH264NALHeaderLengthInBytes = 1;
|
const size_t kH264NALHeaderLengthInBytes = 1;
|
||||||
const size_t kLengthFieldLength = 2;
|
const size_t kLengthFieldLength = 2;
|
||||||
if (packet.video_header.codec == kVideoCodecH264 &&
|
const auto* h264 =
|
||||||
packet.video_header.h264().packetization_type == kH264StapA) {
|
absl::get_if<RTPVideoHeaderH264>(&packet.video_header.video_type_header);
|
||||||
|
if (h264 && h264->packetization_type == kH264StapA) {
|
||||||
size_t required_length = 0;
|
size_t required_length = 0;
|
||||||
const uint8_t* nalu_ptr = packet_buffer + kH264NALHeaderLengthInBytes;
|
const uint8_t* nalu_ptr = packet_buffer + kH264NALHeaderLengthInBytes;
|
||||||
while (nalu_ptr < packet_buffer + packet.sizeBytes) {
|
while (nalu_ptr < packet_buffer + packet.sizeBytes) {
|
||||||
|
@ -508,17 +508,19 @@ class TestPacketBufferH264 : public TestPacketBuffer {
|
|||||||
uint8_t* data = nullptr) { // data pointer
|
uint8_t* data = nullptr) { // data pointer
|
||||||
VCMPacket packet;
|
VCMPacket packet;
|
||||||
packet.codec = kVideoCodecH264;
|
packet.codec = kVideoCodecH264;
|
||||||
|
auto& h264_header =
|
||||||
|
packet.video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
||||||
packet.seqNum = seq_num;
|
packet.seqNum = seq_num;
|
||||||
packet.timestamp = timestamp;
|
packet.timestamp = timestamp;
|
||||||
if (keyframe == kKeyFrame) {
|
if (keyframe == kKeyFrame) {
|
||||||
if (sps_pps_idr_is_keyframe_) {
|
if (sps_pps_idr_is_keyframe_) {
|
||||||
packet.video_header.h264().nalus[0].type = H264::NaluType::kSps;
|
h264_header.nalus[0].type = H264::NaluType::kSps;
|
||||||
packet.video_header.h264().nalus[1].type = H264::NaluType::kPps;
|
h264_header.nalus[1].type = H264::NaluType::kPps;
|
||||||
packet.video_header.h264().nalus[2].type = H264::NaluType::kIdr;
|
h264_header.nalus[2].type = H264::NaluType::kIdr;
|
||||||
packet.video_header.h264().nalus_length = 3;
|
h264_header.nalus_length = 3;
|
||||||
} else {
|
} else {
|
||||||
packet.video_header.h264().nalus[0].type = H264::NaluType::kIdr;
|
h264_header.nalus[0].type = H264::NaluType::kIdr;
|
||||||
packet.video_header.h264().nalus_length = 1;
|
h264_header.nalus_length = 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
packet.is_first_packet_in_frame = first == kFirst;
|
packet.is_first_packet_in_frame = first == kFirst;
|
||||||
@ -592,12 +594,14 @@ TEST_P(TestPacketBufferH264Parameterized, GetBitstreamBufferPadding) {
|
|||||||
new uint8_t[sizeof(data_data) + EncodedImage::kBufferPaddingBytesH264]);
|
new uint8_t[sizeof(data_data) + EncodedImage::kBufferPaddingBytesH264]);
|
||||||
|
|
||||||
VCMPacket packet;
|
VCMPacket packet;
|
||||||
packet.video_header.h264().nalus_length = 1;
|
auto& h264_header =
|
||||||
packet.video_header.h264().nalus[0].type = H264::NaluType::kIdr;
|
packet.video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
||||||
|
h264_header.nalus_length = 1;
|
||||||
|
h264_header.nalus[0].type = H264::NaluType::kIdr;
|
||||||
|
h264_header.packetization_type = kH264SingleNalu;
|
||||||
packet.seqNum = seq_num;
|
packet.seqNum = seq_num;
|
||||||
packet.codec = kVideoCodecH264;
|
packet.codec = kVideoCodecH264;
|
||||||
packet.insertStartCode = true;
|
packet.insertStartCode = true;
|
||||||
packet.video_header.h264().packetization_type = kH264SingleNalu;
|
|
||||||
packet.dataPtr = data;
|
packet.dataPtr = data;
|
||||||
packet.sizeBytes = sizeof(data_data);
|
packet.sizeBytes = sizeof(data_data);
|
||||||
packet.is_first_packet_in_frame = true;
|
packet.is_first_packet_in_frame = true;
|
||||||
@ -755,7 +759,9 @@ TEST_F(TestPacketBuffer, IncomingCodecChange) {
|
|||||||
EXPECT_TRUE(packet_buffer_->InsertPacket(&packet));
|
EXPECT_TRUE(packet_buffer_->InsertPacket(&packet));
|
||||||
|
|
||||||
packet.codec = kVideoCodecH264;
|
packet.codec = kVideoCodecH264;
|
||||||
packet.video_header.h264().nalus_length = 1;
|
auto& h264_header =
|
||||||
|
packet.video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
||||||
|
h264_header.nalus_length = 1;
|
||||||
packet.timestamp = 3;
|
packet.timestamp = 3;
|
||||||
packet.seqNum = 3;
|
packet.seqNum = 3;
|
||||||
EXPECT_TRUE(packet_buffer_->InsertPacket(&packet));
|
EXPECT_TRUE(packet_buffer_->InsertPacket(&packet));
|
||||||
@ -778,7 +784,9 @@ TEST_F(TestPacketBuffer, TooManyNalusInPacket) {
|
|||||||
packet.frameType = kVideoFrameKey;
|
packet.frameType = kVideoFrameKey;
|
||||||
packet.is_first_packet_in_frame = true;
|
packet.is_first_packet_in_frame = true;
|
||||||
packet.markerBit = true;
|
packet.markerBit = true;
|
||||||
packet.video_header.h264().nalus_length = kMaxNalusPerPacket;
|
auto& h264_header =
|
||||||
|
packet.video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
||||||
|
h264_header.nalus_length = kMaxNalusPerPacket;
|
||||||
packet.sizeBytes = 0;
|
packet.sizeBytes = 0;
|
||||||
packet.dataPtr = nullptr;
|
packet.dataPtr = nullptr;
|
||||||
EXPECT_TRUE(packet_buffer_->InsertPacket(&packet));
|
EXPECT_TRUE(packet_buffer_->InsertPacket(&packet));
|
||||||
@ -873,9 +881,10 @@ class TestPacketBufferH264IdrIsKeyframe
|
|||||||
};
|
};
|
||||||
|
|
||||||
TEST_F(TestPacketBufferH264IdrIsKeyframe, IdrIsKeyframe) {
|
TEST_F(TestPacketBufferH264IdrIsKeyframe, IdrIsKeyframe) {
|
||||||
packet_.video_header.h264().nalus[0].type = H264::NaluType::kIdr;
|
auto& h264_header =
|
||||||
packet_.video_header.h264().nalus_length = 1;
|
packet_.video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
||||||
|
h264_header.nalus[0].type = H264::NaluType::kIdr;
|
||||||
|
h264_header.nalus_length = 1;
|
||||||
packet_buffer_->InsertPacket(&packet_);
|
packet_buffer_->InsertPacket(&packet_);
|
||||||
|
|
||||||
ASSERT_EQ(1u, frames_from_callback_.size());
|
ASSERT_EQ(1u, frames_from_callback_.size());
|
||||||
@ -883,10 +892,12 @@ TEST_F(TestPacketBufferH264IdrIsKeyframe, IdrIsKeyframe) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(TestPacketBufferH264IdrIsKeyframe, SpsPpsIdrIsKeyframe) {
|
TEST_F(TestPacketBufferH264IdrIsKeyframe, SpsPpsIdrIsKeyframe) {
|
||||||
packet_.video_header.h264().nalus[0].type = H264::NaluType::kSps;
|
auto& h264_header =
|
||||||
packet_.video_header.h264().nalus[1].type = H264::NaluType::kPps;
|
packet_.video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
||||||
packet_.video_header.h264().nalus[2].type = H264::NaluType::kIdr;
|
h264_header.nalus[0].type = H264::NaluType::kSps;
|
||||||
packet_.video_header.h264().nalus_length = 3;
|
h264_header.nalus[1].type = H264::NaluType::kPps;
|
||||||
|
h264_header.nalus[2].type = H264::NaluType::kIdr;
|
||||||
|
h264_header.nalus_length = 3;
|
||||||
|
|
||||||
packet_buffer_->InsertPacket(&packet_);
|
packet_buffer_->InsertPacket(&packet_);
|
||||||
|
|
||||||
@ -902,8 +913,10 @@ class TestPacketBufferH264SpsPpsIdrIsKeyframe
|
|||||||
};
|
};
|
||||||
|
|
||||||
TEST_F(TestPacketBufferH264SpsPpsIdrIsKeyframe, IdrIsNotKeyframe) {
|
TEST_F(TestPacketBufferH264SpsPpsIdrIsKeyframe, IdrIsNotKeyframe) {
|
||||||
packet_.video_header.h264().nalus[0].type = H264::NaluType::kIdr;
|
auto& h264_header =
|
||||||
packet_.video_header.h264().nalus_length = 1;
|
packet_.video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
||||||
|
h264_header.nalus[0].type = H264::NaluType::kIdr;
|
||||||
|
h264_header.nalus_length = 1;
|
||||||
|
|
||||||
packet_buffer_->InsertPacket(&packet_);
|
packet_buffer_->InsertPacket(&packet_);
|
||||||
|
|
||||||
@ -912,9 +925,11 @@ TEST_F(TestPacketBufferH264SpsPpsIdrIsKeyframe, IdrIsNotKeyframe) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(TestPacketBufferH264SpsPpsIdrIsKeyframe, SpsPpsIsNotKeyframe) {
|
TEST_F(TestPacketBufferH264SpsPpsIdrIsKeyframe, SpsPpsIsNotKeyframe) {
|
||||||
packet_.video_header.h264().nalus[0].type = H264::NaluType::kSps;
|
auto& h264_header =
|
||||||
packet_.video_header.h264().nalus[1].type = H264::NaluType::kPps;
|
packet_.video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
||||||
packet_.video_header.h264().nalus_length = 2;
|
h264_header.nalus[0].type = H264::NaluType::kSps;
|
||||||
|
h264_header.nalus[1].type = H264::NaluType::kPps;
|
||||||
|
h264_header.nalus_length = 2;
|
||||||
|
|
||||||
packet_buffer_->InsertPacket(&packet_);
|
packet_buffer_->InsertPacket(&packet_);
|
||||||
|
|
||||||
@ -923,10 +938,12 @@ TEST_F(TestPacketBufferH264SpsPpsIdrIsKeyframe, SpsPpsIsNotKeyframe) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(TestPacketBufferH264SpsPpsIdrIsKeyframe, SpsPpsIdrIsKeyframe) {
|
TEST_F(TestPacketBufferH264SpsPpsIdrIsKeyframe, SpsPpsIdrIsKeyframe) {
|
||||||
packet_.video_header.h264().nalus[0].type = H264::NaluType::kSps;
|
auto& h264_header =
|
||||||
packet_.video_header.h264().nalus[1].type = H264::NaluType::kPps;
|
packet_.video_header.video_type_header.emplace<RTPVideoHeaderH264>();
|
||||||
packet_.video_header.h264().nalus[2].type = H264::NaluType::kIdr;
|
h264_header.nalus[0].type = H264::NaluType::kSps;
|
||||||
packet_.video_header.h264().nalus_length = 3;
|
h264_header.nalus[1].type = H264::NaluType::kPps;
|
||||||
|
h264_header.nalus[2].type = H264::NaluType::kIdr;
|
||||||
|
h264_header.nalus_length = 3;
|
||||||
|
|
||||||
packet_buffer_->InsertPacket(&packet_);
|
packet_buffer_->InsertPacket(&packet_);
|
||||||
|
|
||||||
|
@ -98,8 +98,7 @@ TEST_F(TestVideoReceiver, PaddingOnlyFrames) {
|
|||||||
0, receiver_->RegisterPacketRequestCallback(&packet_request_callback_));
|
0, receiver_->RegisterPacketRequestCallback(&packet_request_callback_));
|
||||||
const size_t kPaddingSize = 220;
|
const size_t kPaddingSize = 220;
|
||||||
const uint8_t payload[kPaddingSize] = {0};
|
const uint8_t payload[kPaddingSize] = {0};
|
||||||
WebRtcRTPHeader header;
|
WebRtcRTPHeader header = {};
|
||||||
memset(&header, 0, sizeof(header));
|
|
||||||
header.frameType = kEmptyFrame;
|
header.frameType = kEmptyFrame;
|
||||||
header.header.markerBit = false;
|
header.header.markerBit = false;
|
||||||
header.header.paddingLength = kPaddingSize;
|
header.header.paddingLength = kPaddingSize;
|
||||||
@ -122,8 +121,7 @@ TEST_F(TestVideoReceiver, PaddingOnlyFramesWithLosses) {
|
|||||||
const size_t kFrameSize = 1200;
|
const size_t kFrameSize = 1200;
|
||||||
const size_t kPaddingSize = 220;
|
const size_t kPaddingSize = 220;
|
||||||
const uint8_t payload[kFrameSize] = {0};
|
const uint8_t payload[kFrameSize] = {0};
|
||||||
WebRtcRTPHeader header;
|
WebRtcRTPHeader header = {};
|
||||||
memset(&header, 0, sizeof(header));
|
|
||||||
header.frameType = kEmptyFrame;
|
header.frameType = kEmptyFrame;
|
||||||
header.header.markerBit = false;
|
header.header.markerBit = false;
|
||||||
header.header.paddingLength = kPaddingSize;
|
header.header.paddingLength = kPaddingSize;
|
||||||
@ -173,8 +171,7 @@ TEST_F(TestVideoReceiver, PaddingOnlyAndVideo) {
|
|||||||
const size_t kFrameSize = 1200;
|
const size_t kFrameSize = 1200;
|
||||||
const size_t kPaddingSize = 220;
|
const size_t kPaddingSize = 220;
|
||||||
const uint8_t payload[kFrameSize] = {0};
|
const uint8_t payload[kFrameSize] = {0};
|
||||||
WebRtcRTPHeader header;
|
WebRtcRTPHeader header = {};
|
||||||
memset(&header, 0, sizeof(header));
|
|
||||||
header.frameType = kEmptyFrame;
|
header.frameType = kEmptyFrame;
|
||||||
header.video_header().is_first_packet_in_frame = false;
|
header.video_header().is_first_packet_in_frame = false;
|
||||||
header.header.markerBit = false;
|
header.header.markerBit = false;
|
||||||
|
@ -134,9 +134,9 @@ class RtpVideoStreamReceiverTest : public testing::Test {
|
|||||||
}
|
}
|
||||||
|
|
||||||
WebRtcRTPHeader GetDefaultPacket() {
|
WebRtcRTPHeader GetDefaultPacket() {
|
||||||
WebRtcRTPHeader packet;
|
WebRtcRTPHeader packet = {};
|
||||||
memset(&packet, 0, sizeof(packet));
|
|
||||||
packet.video_header().codec = kVideoCodecH264;
|
packet.video_header().codec = kVideoCodecH264;
|
||||||
|
packet.video_header().video_type_header.emplace<RTPVideoHeaderH264>();
|
||||||
return packet;
|
return packet;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -151,9 +151,9 @@ class RtpVideoStreamReceiverTest : public testing::Test {
|
|||||||
info.pps_id = -1;
|
info.pps_id = -1;
|
||||||
data->push_back(H264::NaluType::kSps);
|
data->push_back(H264::NaluType::kSps);
|
||||||
data->push_back(sps_id);
|
data->push_back(sps_id);
|
||||||
packet->video_header()
|
auto& h264 =
|
||||||
.h264()
|
absl::get<RTPVideoHeaderH264>(packet->video_header().video_type_header);
|
||||||
.nalus[packet->video_header().h264().nalus_length++] = info;
|
h264.nalus[h264.nalus_length++] = info;
|
||||||
}
|
}
|
||||||
|
|
||||||
void AddPps(WebRtcRTPHeader* packet,
|
void AddPps(WebRtcRTPHeader* packet,
|
||||||
@ -166,9 +166,9 @@ class RtpVideoStreamReceiverTest : public testing::Test {
|
|||||||
info.pps_id = pps_id;
|
info.pps_id = pps_id;
|
||||||
data->push_back(H264::NaluType::kPps);
|
data->push_back(H264::NaluType::kPps);
|
||||||
data->push_back(pps_id);
|
data->push_back(pps_id);
|
||||||
packet->video_header()
|
auto& h264 =
|
||||||
.h264()
|
absl::get<RTPVideoHeaderH264>(packet->video_header().video_type_header);
|
||||||
.nalus[packet->video_header().h264().nalus_length++] = info;
|
h264.nalus[h264.nalus_length++] = info;
|
||||||
}
|
}
|
||||||
|
|
||||||
void AddIdr(WebRtcRTPHeader* packet, int pps_id) {
|
void AddIdr(WebRtcRTPHeader* packet, int pps_id) {
|
||||||
@ -176,9 +176,9 @@ class RtpVideoStreamReceiverTest : public testing::Test {
|
|||||||
info.type = H264::NaluType::kIdr;
|
info.type = H264::NaluType::kIdr;
|
||||||
info.sps_id = -1;
|
info.sps_id = -1;
|
||||||
info.pps_id = pps_id;
|
info.pps_id = pps_id;
|
||||||
packet->video_header()
|
auto& h264 =
|
||||||
.h264()
|
absl::get<RTPVideoHeaderH264>(packet->video_header().video_type_header);
|
||||||
.nalus[packet->video_header().h264().nalus_length++] = info;
|
h264.nalus[h264.nalus_length++] = info;
|
||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
@ -202,9 +202,8 @@ class RtpVideoStreamReceiverTest : public testing::Test {
|
|||||||
};
|
};
|
||||||
|
|
||||||
TEST_F(RtpVideoStreamReceiverTest, GenericKeyFrame) {
|
TEST_F(RtpVideoStreamReceiverTest, GenericKeyFrame) {
|
||||||
WebRtcRTPHeader rtp_header;
|
WebRtcRTPHeader rtp_header = {};
|
||||||
const std::vector<uint8_t> data({1, 2, 3, 4});
|
const std::vector<uint8_t> data({1, 2, 3, 4});
|
||||||
memset(&rtp_header, 0, sizeof(rtp_header));
|
|
||||||
rtp_header.header.sequenceNumber = 1;
|
rtp_header.header.sequenceNumber = 1;
|
||||||
rtp_header.header.markerBit = 1;
|
rtp_header.header.markerBit = 1;
|
||||||
rtp_header.video_header().is_first_packet_in_frame = true;
|
rtp_header.video_header().is_first_packet_in_frame = true;
|
||||||
@ -260,9 +259,8 @@ TEST_F(RtpVideoStreamReceiverTest,
|
|||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(RtpVideoStreamReceiverTest, GenericKeyFrameBitstreamError) {
|
TEST_F(RtpVideoStreamReceiverTest, GenericKeyFrameBitstreamError) {
|
||||||
WebRtcRTPHeader rtp_header;
|
WebRtcRTPHeader rtp_header = {};
|
||||||
const std::vector<uint8_t> data({1, 2, 3, 4});
|
const std::vector<uint8_t> data({1, 2, 3, 4});
|
||||||
memset(&rtp_header, 0, sizeof(rtp_header));
|
|
||||||
rtp_header.header.sequenceNumber = 1;
|
rtp_header.header.sequenceNumber = 1;
|
||||||
rtp_header.header.markerBit = 1;
|
rtp_header.header.markerBit = 1;
|
||||||
rtp_header.video_header().is_first_packet_in_frame = true;
|
rtp_header.video_header().is_first_packet_in_frame = true;
|
||||||
@ -409,9 +407,8 @@ TEST_F(RtpVideoStreamReceiverTest, PaddingInMediaStream) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(RtpVideoStreamReceiverTest, RequestKeyframeIfFirstFrameIsDelta) {
|
TEST_F(RtpVideoStreamReceiverTest, RequestKeyframeIfFirstFrameIsDelta) {
|
||||||
WebRtcRTPHeader rtp_header;
|
WebRtcRTPHeader rtp_header = {};
|
||||||
const std::vector<uint8_t> data({1, 2, 3, 4});
|
const std::vector<uint8_t> data({1, 2, 3, 4});
|
||||||
memset(&rtp_header, 0, sizeof(rtp_header));
|
|
||||||
rtp_header.header.sequenceNumber = 1;
|
rtp_header.header.sequenceNumber = 1;
|
||||||
rtp_header.header.markerBit = 1;
|
rtp_header.header.markerBit = 1;
|
||||||
rtp_header.video_header().is_first_packet_in_frame = true;
|
rtp_header.video_header().is_first_packet_in_frame = true;
|
||||||
|
Reference in New Issue
Block a user