Move SPS/PPS/IDR requirement from RtpFrameObject to PacketBuffer.

BUG=webrtc:8423

Change-Id: I0f0d59461afead700c20c9a2ed9b2bc991590b4a
Reviewed-on: https://webrtc-review.googlesource.com/15101
Reviewed-by: Stefan Holmer <stefan@webrtc.org>
Reviewed-by: Philip Eliasson <philipel@webrtc.org>
Commit-Queue: Rasmus Brandt <brandtr@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#20559}
This commit is contained in:
Rasmus Brandt
2017-11-02 14:28:06 +01:00
committed by Commit Bot
parent cdfbcd4068
commit 88f080ae9a
9 changed files with 185 additions and 106 deletions

View File

@ -89,6 +89,7 @@ rtc_static_library("video_coding") {
} }
deps = [ deps = [
":codec_globals_headers",
":video_coding_utility", ":video_coding_utility",
":webrtc_h264", ":webrtc_h264",
":webrtc_i420", ":webrtc_i420",

View File

@ -10,13 +10,9 @@
#include "modules/video_coding/frame_object.h" #include "modules/video_coding/frame_object.h"
#include <sstream>
#include "common_video/h264/h264_common.h" #include "common_video/h264/h264_common.h"
#include "modules/video_coding/packet_buffer.h" #include "modules/video_coding/packet_buffer.h"
#include "rtc_base/checks.h" #include "rtc_base/checks.h"
#include "rtc_base/logging.h"
#include "system_wrappers/include/field_trial.h"
namespace webrtc { namespace webrtc {
namespace video_coding { namespace video_coding {
@ -54,6 +50,7 @@ RtpFrameObject::RtpFrameObject(PacketBuffer* packet_buffer,
_payloadType = first_packet->payloadType; _payloadType = first_packet->payloadType;
_timeStamp = first_packet->timestamp; _timeStamp = first_packet->timestamp;
ntp_time_ms_ = first_packet->ntp_time_ms_; ntp_time_ms_ = first_packet->ntp_time_ms_;
_frameType = first_packet->frameType;
// Setting frame's playout delays to the same values // Setting frame's playout delays to the same values
// as of the first packet's. // as of the first packet's.
@ -73,58 +70,6 @@ RtpFrameObject::RtpFrameObject(PacketBuffer* packet_buffer,
_buffer = new uint8_t[_size]; _buffer = new uint8_t[_size];
_length = frame_size; _length = frame_size;
// For H264 frames we can't determine the frame type by just looking at the
// first packet. Instead we consider the frame to be a keyframe if it contains
// an IDR, and SPS/PPS if the field trial is set.
if (codec_type_ == kVideoCodecH264) {
_frameType = kVideoFrameDelta;
frame_type_ = kVideoFrameDelta;
bool contains_sps = false;
bool contains_pps = false;
bool contains_idr = false;
for (uint16_t seq_num = first_seq_num;
seq_num != static_cast<uint16_t>(last_seq_num + 1) &&
_frameType == kVideoFrameDelta;
++seq_num) {
VCMPacket* packet = packet_buffer_->GetPacket(seq_num);
RTC_CHECK(packet);
const RTPVideoHeaderH264& header = packet->video_header.codecHeader.H264;
for (size_t i = 0; i < header.nalus_length; ++i) {
if (header.nalus[i].type == H264::NaluType::kSps) {
contains_sps = true;
} else if (header.nalus[i].type == H264::NaluType::kPps) {
contains_pps = true;
} else if (header.nalus[i].type == H264::NaluType::kIdr) {
contains_idr = true;
}
}
}
const bool sps_pps_idr_is_keyframe =
field_trial::IsEnabled("WebRTC-SpsPpsIdrIsH264Keyframe");
if ((sps_pps_idr_is_keyframe && contains_idr && contains_sps &&
contains_pps) ||
(!sps_pps_idr_is_keyframe && contains_idr)) {
_frameType = kVideoFrameKey;
frame_type_ = kVideoFrameKey;
}
if (contains_idr && (!contains_sps || !contains_pps)) {
std::stringstream ss;
ss << "Received H.264-IDR frame "
<< "(SPS: " << contains_sps << ", PPS: " << contains_pps << "). ";
if (sps_pps_idr_is_keyframe) {
ss << "Treating as delta frame since WebRTC-SpsPpsIdrIsH264Keyframe is "
"enabled.";
} else {
ss << "Treating as key frame since WebRTC-SpsPpsIdrIsH264Keyframe is "
"disabled.";
}
LOG(LS_WARNING) << ss.str();
}
} else {
_frameType = first_packet->frameType;
frame_type_ = first_packet->frameType;
}
bool bitstream_copied = GetBitstream(_buffer); bool bitstream_copied = GetBitstream(_buffer);
RTC_DCHECK(bitstream_copied); RTC_DCHECK(bitstream_copied);
_encodedWidth = first_packet->width; _encodedWidth = first_packet->width;

View File

@ -16,6 +16,7 @@
#include "common_video/h264/h264_common.h" #include "common_video/h264/h264_common.h"
#include "common_video/h264/pps_parser.h" #include "common_video/h264/pps_parser.h"
#include "common_video/h264/sps_parser.h" #include "common_video/h264/sps_parser.h"
#include "modules/video_coding/codecs/h264/include/h264_globals.h"
#include "modules/video_coding/frame_object.h" #include "modules/video_coding/frame_object.h"
#include "modules/video_coding/packet_buffer.h" #include "modules/video_coding/packet_buffer.h"
#include "rtc_base/checks.h" #include "rtc_base/checks.h"
@ -35,14 +36,14 @@ H264SpsPpsTracker::PacketAction H264SpsPpsTracker::CopyAndFixBitstream(
const uint8_t* data = packet->dataPtr; const uint8_t* data = packet->dataPtr;
const size_t data_size = packet->sizeBytes; const size_t data_size = packet->sizeBytes;
const RTPVideoHeader& video_header = packet->video_header; const RTPVideoHeader& video_header = packet->video_header;
const RTPVideoHeaderH264& codec_header = video_header.codecHeader.H264; RTPVideoHeaderH264* codec_header = &packet->video_header.codecHeader.H264;
bool append_sps_pps = false; bool append_sps_pps = false;
auto sps = sps_data_.end(); auto sps = sps_data_.end();
auto pps = pps_data_.end(); auto pps = pps_data_.end();
for (size_t i = 0; i < codec_header.nalus_length; ++i) { for (size_t i = 0; i < codec_header->nalus_length; ++i) {
const NaluInfo& nalu = codec_header.nalus[i]; const NaluInfo& nalu = codec_header->nalus[i];
switch (nalu.type) { switch (nalu.type) {
case H264::NaluType::kSps: { case H264::NaluType::kSps: {
sps_data_[nalu.sps_id].width = packet->width; sps_data_[nalu.sps_id].width = packet->width;
@ -109,7 +110,7 @@ H264SpsPpsTracker::PacketAction H264SpsPpsTracker::CopyAndFixBitstream(
required_size += pps->second.size + sizeof(start_code_h264); required_size += pps->second.size + sizeof(start_code_h264);
} }
if (codec_header.packetization_type == kH264StapA) { if (codec_header->packetization_type == kH264StapA) {
const uint8_t* nalu_ptr = data + 1; const uint8_t* nalu_ptr = data + 1;
while (nalu_ptr < data + data_size) { while (nalu_ptr < data + data_size) {
RTC_DCHECK(video_header.is_first_packet_in_frame); RTC_DCHECK(video_header.is_first_packet_in_frame);
@ -144,10 +145,27 @@ H264SpsPpsTracker::PacketAction H264SpsPpsTracker::CopyAndFixBitstream(
insert_at += sizeof(start_code_h264); insert_at += sizeof(start_code_h264);
memcpy(insert_at, pps->second.data.get(), pps->second.size); memcpy(insert_at, pps->second.data.get(), pps->second.size);
insert_at += pps->second.size; insert_at += pps->second.size;
// Update codec header to reflect the newly added SPS and PPS.
NaluInfo sps_info;
sps_info.type = H264::NaluType::kSps;
sps_info.sps_id = sps->first;
sps_info.pps_id = -1;
NaluInfo pps_info;
pps_info.type = H264::NaluType::kPps;
pps_info.sps_id = sps->first;
pps_info.pps_id = pps->first;
if (codec_header->nalus_length + 2 <= kMaxNalusPerPacket) {
codec_header->nalus[codec_header->nalus_length++] = sps_info;
codec_header->nalus[codec_header->nalus_length++] = pps_info;
} else {
LOG(LS_WARNING) << "Not enough space in H.264 codec header to insert "
"SPS/PPS provided out-of-band.";
}
} }
// Copy the rest of the bitstream and insert start codes. // Copy the rest of the bitstream and insert start codes.
if (codec_header.packetization_type == kH264StapA) { if (codec_header->packetization_type == kH264StapA) {
const uint8_t* nalu_ptr = data + 1; const uint8_t* nalu_ptr = data + 1;
while (nalu_ptr < data + data_size) { while (nalu_ptr < data + data_size) {
memcpy(insert_at, start_code_h264, sizeof(start_code_h264)); memcpy(insert_at, start_code_h264, sizeof(start_code_h264));

View File

@ -21,6 +21,31 @@ namespace video_coding {
namespace { namespace {
const uint8_t start_code[] = {0, 0, 0, 1}; const uint8_t start_code[] = {0, 0, 0, 1};
void ExpectSpsPpsIdr(const RTPVideoHeaderH264& codec_header,
uint8_t sps_id,
uint8_t pps_id) {
bool contains_sps = false;
bool contains_pps = false;
bool contains_idr = false;
for (const auto& nalu : codec_header.nalus) {
if (nalu.type == H264::NaluType::kSps) {
EXPECT_EQ(sps_id, nalu.sps_id);
contains_sps = true;
} else if (nalu.type == H264::NaluType::kPps) {
EXPECT_EQ(sps_id, nalu.sps_id);
EXPECT_EQ(pps_id, nalu.pps_id);
contains_pps = true;
} else if (nalu.type == H264::NaluType::kIdr) {
EXPECT_EQ(pps_id, nalu.pps_id);
contains_idr = true;
}
}
EXPECT_TRUE(contains_sps);
EXPECT_TRUE(contains_pps);
EXPECT_TRUE(contains_idr);
}
} // namespace } // namespace
class TestH264SpsPpsTracker : public ::testing::Test { class TestH264SpsPpsTracker : public ::testing::Test {
@ -264,10 +289,14 @@ TEST_F(TestH264SpsPpsTracker, SpsPpsOutOfBand) {
AddIdr(&idr_packet, 0); AddIdr(&idr_packet, 0);
idr_packet.dataPtr = kData; idr_packet.dataPtr = kData;
idr_packet.sizeBytes = sizeof(kData); idr_packet.sizeBytes = sizeof(kData);
EXPECT_EQ(1u, idr_packet.video_header.codecHeader.H264.nalus_length);
EXPECT_EQ(H264SpsPpsTracker::kInsert, EXPECT_EQ(H264SpsPpsTracker::kInsert,
tracker_.CopyAndFixBitstream(&idr_packet)); tracker_.CopyAndFixBitstream(&idr_packet));
EXPECT_EQ(3u, idr_packet.video_header.codecHeader.H264.nalus_length);
EXPECT_EQ(320, idr_packet.width); EXPECT_EQ(320, idr_packet.width);
EXPECT_EQ(240, idr_packet.height); EXPECT_EQ(240, idr_packet.height);
ExpectSpsPpsIdr(idr_packet.video_header.codecHeader.H264, 0, 0);
if (idr_packet.dataPtr != kData) { if (idr_packet.dataPtr != kData) {
// In case CopyAndFixBitStream() prepends SPS/PPS nalus to the packet, it // In case CopyAndFixBitStream() prepends SPS/PPS nalus to the packet, it
// uses new uint8_t[] to allocate memory. Caller of CopyAndFixBitStream() // uses new uint8_t[] to allocate memory. Caller of CopyAndFixBitStream()

View File

@ -12,6 +12,7 @@
#include <algorithm> #include <algorithm>
#include <limits> #include <limits>
#include <sstream>
#include <utility> #include <utility>
#include "common_video/h264/h264_common.h" #include "common_video/h264/h264_common.h"
@ -20,6 +21,7 @@
#include "rtc_base/checks.h" #include "rtc_base/checks.h"
#include "rtc_base/logging.h" #include "rtc_base/logging.h"
#include "system_wrappers/include/clock.h" #include "system_wrappers/include/clock.h"
#include "system_wrappers/include/field_trial.h"
namespace webrtc { namespace webrtc {
namespace video_coding { namespace video_coding {
@ -45,7 +47,9 @@ PacketBuffer::PacketBuffer(Clock* clock,
is_cleared_to_first_seq_num_(false), is_cleared_to_first_seq_num_(false),
data_buffer_(start_buffer_size), data_buffer_(start_buffer_size),
sequence_buffer_(start_buffer_size), sequence_buffer_(start_buffer_size),
received_frame_callback_(received_frame_callback) { received_frame_callback_(received_frame_callback),
sps_pps_idr_is_h264_keyframe_(
field_trial::IsEnabled("WebRTC-SpsPpsIdrIsH264Keyframe")) {
RTC_DCHECK_LE(start_buffer_size, max_buffer_size); RTC_DCHECK_LE(start_buffer_size, max_buffer_size);
// Buffer size must always be a power of 2. // Buffer size must always be a power of 2.
RTC_DCHECK((start_buffer_size & (start_buffer_size - 1)) == 0); RTC_DCHECK((start_buffer_size & (start_buffer_size - 1)) == 0);
@ -269,11 +273,15 @@ std::vector<std::unique_ptr<RtpFrameObject>> PacketBuffer::FindFrames(
// the |frame_begin| flag is set. // the |frame_begin| flag is set.
int start_index = index; int start_index = index;
size_t tested_packets = 0; size_t tested_packets = 0;
bool is_h264 = data_buffer_[start_index].codec == kVideoCodecH264;
bool is_h264_keyframe = false;
int64_t frame_timestamp = data_buffer_[start_index].timestamp; int64_t frame_timestamp = data_buffer_[start_index].timestamp;
// Identify H.264 keyframes by means of SPS, PPS, and IDR.
bool is_h264 = data_buffer_[start_index].codec == kVideoCodecH264;
bool has_h264_sps = false;
bool has_h264_pps = false;
bool has_h264_idr = false;
bool is_h264_keyframe = false;
while (true) { while (true) {
++tested_packets; ++tested_packets;
frame_size += data_buffer_[start_index].sizeBytes; frame_size += data_buffer_[start_index].sizeBytes;
@ -287,12 +295,20 @@ std::vector<std::unique_ptr<RtpFrameObject>> PacketBuffer::FindFrames(
if (is_h264 && !is_h264_keyframe) { if (is_h264 && !is_h264_keyframe) {
const RTPVideoHeaderH264& header = const RTPVideoHeaderH264& header =
data_buffer_[start_index].video_header.codecHeader.H264; data_buffer_[start_index].video_header.codecHeader.H264;
for (size_t i = 0; i < header.nalus_length; ++i) { for (size_t j = 0; j < header.nalus_length; ++j) {
if (header.nalus[i].type == H264::NaluType::kIdr) { if (header.nalus[j].type == H264::NaluType::kSps) {
is_h264_keyframe = true; has_h264_sps = true;
break; } else if (header.nalus[j].type == H264::NaluType::kPps) {
has_h264_pps = true;
} else if (header.nalus[j].type == H264::NaluType::kIdr) {
has_h264_idr = true;
} }
} }
if ((sps_pps_idr_is_h264_keyframe_ && has_h264_idr && has_h264_sps &&
has_h264_pps) ||
(!sps_pps_idr_is_h264_keyframe_ && has_h264_idr)) {
is_h264_keyframe = true;
}
} }
if (tested_packets == size_) if (tested_packets == size_)
@ -315,18 +331,45 @@ std::vector<std::unique_ptr<RtpFrameObject>> PacketBuffer::FindFrames(
--start_seq_num; --start_seq_num;
} }
// If this is H264 but not a keyframe, make sure there are no gaps in the if (is_h264) {
// packet sequence numbers up until this point. // Warn if this is an unsafe frame.
if (is_h264 && !is_h264_keyframe && if (has_h264_idr && (!has_h264_sps || !has_h264_pps)) {
missing_packets_.upper_bound(start_seq_num) != std::stringstream ss;
missing_packets_.begin()) { ss << "Received H.264-IDR frame "
uint16_t stop_index = (index + 1) % size_; << "(SPS: " << has_h264_sps << ", PPS: " << has_h264_pps << "). ";
while (start_index != stop_index) { if (sps_pps_idr_is_h264_keyframe_) {
sequence_buffer_[start_index].frame_created = false; ss << "Treating as delta frame since "
start_index = (start_index + 1) % size_; "WebRTC-SpsPpsIdrIsH264Keyframe is enabled.";
} else {
ss << "Treating as key frame since "
"WebRTC-SpsPpsIdrIsH264Keyframe is disabled.";
}
LOG(LS_WARNING) << ss.str();
} }
return found_frames; // Now that we have decided whether to treat this frame as a key frame
// or delta frame in the frame buffer, we update the field that
// determines if the RtpFrameObject is a key frame or delta frame.
const size_t first_packet_index = start_seq_num % size_;
RTC_CHECK_LT(first_packet_index, size_);
if (is_h264_keyframe) {
data_buffer_[first_packet_index].frameType = kVideoFrameKey;
} else {
data_buffer_[first_packet_index].frameType = kVideoFrameDelta;
}
// If this is not a keyframe, make sure there are no gaps in the
// packet sequence numbers up until this point.
if (!is_h264_keyframe && missing_packets_.upper_bound(start_seq_num) !=
missing_packets_.begin()) {
uint16_t stop_index = (index + 1) % size_;
while (start_index != stop_index) {
sequence_buffer_[start_index].frame_created = false;
start_index = (start_index + 1) % size_;
}
return found_frames;
}
} }
missing_packets_.erase(missing_packets_.begin(), missing_packets_.erase(missing_packets_.begin(),

View File

@ -160,6 +160,10 @@ class PacketBuffer {
std::set<uint16_t, DescendingSeqNumComp<uint16_t>> missing_packets_ std::set<uint16_t, DescendingSeqNumComp<uint16_t>> missing_packets_
RTC_GUARDED_BY(crit_); RTC_GUARDED_BY(crit_);
// Indicates if we should require SPS, PPS, and IDR for a particular
// RTP timestamp to treat the corresponding frame as a keyframe.
const bool sps_pps_idr_is_h264_keyframe_;
mutable volatile int ref_count_ = 0; mutable volatile int ref_count_ = 0;
}; };

View File

@ -27,8 +27,10 @@ namespace video_coding {
class TestPacketBuffer : public ::testing::Test, class TestPacketBuffer : public ::testing::Test,
public OnReceivedFrameCallback { public OnReceivedFrameCallback {
protected: protected:
TestPacketBuffer() TestPacketBuffer() : TestPacketBuffer("") {}
: rand_(0x7732213), explicit TestPacketBuffer(std::string field_trials)
: scoped_field_trials_(field_trials),
rand_(0x7732213),
clock_(new SimulatedClock(0)), clock_(new SimulatedClock(0)),
packet_buffer_( packet_buffer_(
PacketBuffer::Create(clock_.get(), kStartSize, kMaxSize, this)) {} PacketBuffer::Create(clock_.get(), kStartSize, kMaxSize, this)) {}
@ -81,6 +83,8 @@ class TestPacketBuffer : public ::testing::Test,
static constexpr int kStartSize = 16; static constexpr int kStartSize = 16;
static constexpr int kMaxSize = 64; static constexpr int kMaxSize = 64;
const test::ScopedFieldTrials scoped_field_trials_;
Random rand_; Random rand_;
std::unique_ptr<SimulatedClock> clock_; std::unique_ptr<SimulatedClock> clock_;
rtc::scoped_refptr<PacketBuffer> packet_buffer_; rtc::scoped_refptr<PacketBuffer> packet_buffer_;
@ -423,15 +427,17 @@ TEST_F(TestPacketBuffer, GetBitstreamOneFrameFullBuffer) {
EXPECT_EQ(memcmp(result, expected, kStartSize), 0); EXPECT_EQ(memcmp(result, expected, kStartSize), 0);
} }
class TestPacketBufferH264 : public TestPacketBuffer, // If |sps_pps_idr_is_keyframe| is true, we require keyframes to contain
public ::testing::WithParamInterface<bool> { // SPS/PPS/IDR and the keyframes we create as part of the test do contain
// SPS/PPS/IDR. If |sps_pps_idr_is_keyframe| is false, we only require and
// create keyframes containing only IDR.
class TestPacketBufferH264 : public TestPacketBuffer {
protected: protected:
TestPacketBufferH264() : TestPacketBufferH264(GetParam()) {}
explicit TestPacketBufferH264(bool sps_pps_idr_is_keyframe) explicit TestPacketBufferH264(bool sps_pps_idr_is_keyframe)
: sps_pps_idr_is_keyframe_(sps_pps_idr_is_keyframe), : TestPacketBuffer(sps_pps_idr_is_keyframe
scoped_field_trials_(sps_pps_idr_is_keyframe_ ? "WebRTC-SpsPpsIdrIsH264Keyframe/Enabled/"
? "WebRTC-SpsPpsIdrIsH264Keyframe/Enabled/" : ""),
: "") {} sps_pps_idr_is_keyframe_(sps_pps_idr_is_keyframe) {}
bool InsertH264(uint16_t seq_num, // packet sequence number bool InsertH264(uint16_t seq_num, // packet sequence number
IsKeyFrame keyframe, // is keyframe IsKeyFrame keyframe, // is keyframe
@ -468,14 +474,22 @@ class TestPacketBufferH264 : public TestPacketBuffer,
} }
const bool sps_pps_idr_is_keyframe_; const bool sps_pps_idr_is_keyframe_;
const test::ScopedFieldTrials scoped_field_trials_; };
// This fixture is used to test the general behaviour of the packet buffer
// in both configurations.
class TestPacketBufferH264Parameterized
: public ::testing::WithParamInterface<bool>,
public TestPacketBufferH264 {
protected:
TestPacketBufferH264Parameterized() : TestPacketBufferH264(GetParam()) {}
}; };
INSTANTIATE_TEST_CASE_P(SpsPpsIdrIsKeyframe, INSTANTIATE_TEST_CASE_P(SpsPpsIdrIsKeyframe,
TestPacketBufferH264, TestPacketBufferH264Parameterized,
::testing::Values(false, true)); ::testing::Values(false, true));
TEST_P(TestPacketBufferH264, GetBitstreamOneFrameFullBuffer) { TEST_P(TestPacketBufferH264Parameterized, GetBitstreamOneFrameFullBuffer) {
uint8_t* data_arr[kStartSize]; uint8_t* data_arr[kStartSize];
uint8_t expected[kStartSize]; uint8_t expected[kStartSize];
uint8_t result[kStartSize]; uint8_t result[kStartSize];
@ -501,7 +515,7 @@ TEST_P(TestPacketBufferH264, GetBitstreamOneFrameFullBuffer) {
EXPECT_EQ(memcmp(result, expected, kStartSize), 0); EXPECT_EQ(memcmp(result, expected, kStartSize), 0);
} }
TEST_P(TestPacketBufferH264, GetBitstreamBufferPadding) { TEST_P(TestPacketBufferH264Parameterized, GetBitstreamBufferPadding) {
uint16_t seq_num = Rand(); uint16_t seq_num = Rand();
uint8_t data_data[] = "some plain old data"; uint8_t data_data[] = "some plain old data";
uint8_t* data = new uint8_t[sizeof(data_data)]; uint8_t* data = new uint8_t[sizeof(data_data)];
@ -661,7 +675,7 @@ TEST_F(TestPacketBuffer, PacketTimestamps) {
EXPECT_FALSE(packet_keyframe_ms); EXPECT_FALSE(packet_keyframe_ms);
} }
TEST_P(TestPacketBufferH264, OneFrameFillBuffer) { TEST_P(TestPacketBufferH264Parameterized, OneFrameFillBuffer) {
InsertH264(0, kKeyFrame, kFirst, kNotLast, 1000); InsertH264(0, kKeyFrame, kFirst, kNotLast, 1000);
for (int i = 1; i < kStartSize - 1; ++i) for (int i = 1; i < kStartSize - 1; ++i)
InsertH264(i, kKeyFrame, kNotFirst, kNotLast, 1000); InsertH264(i, kKeyFrame, kNotFirst, kNotLast, 1000);
@ -671,7 +685,7 @@ TEST_P(TestPacketBufferH264, OneFrameFillBuffer) {
CheckFrame(0); CheckFrame(0);
} }
TEST_P(TestPacketBufferH264, CreateFramesAfterFilledBuffer) { TEST_P(TestPacketBufferH264Parameterized, CreateFramesAfterFilledBuffer) {
InsertH264(kStartSize - 2, kKeyFrame, kFirst, kLast, 0); InsertH264(kStartSize - 2, kKeyFrame, kFirst, kLast, 0);
ASSERT_EQ(1UL, frames_from_callback_.size()); ASSERT_EQ(1UL, frames_from_callback_.size());
frames_from_callback_.clear(); frames_from_callback_.clear();
@ -688,7 +702,7 @@ TEST_P(TestPacketBufferH264, CreateFramesAfterFilledBuffer) {
CheckFrame(kStartSize); CheckFrame(kStartSize);
} }
TEST_P(TestPacketBufferH264, OneFrameMaxSeqNum) { TEST_P(TestPacketBufferH264Parameterized, OneFrameMaxSeqNum) {
InsertH264(65534, kKeyFrame, kFirst, kNotLast, 1000); InsertH264(65534, kKeyFrame, kFirst, kNotLast, 1000);
InsertH264(65535, kKeyFrame, kNotFirst, kLast, 1000); InsertH264(65535, kKeyFrame, kNotFirst, kLast, 1000);
@ -696,7 +710,7 @@ TEST_P(TestPacketBufferH264, OneFrameMaxSeqNum) {
CheckFrame(65534); CheckFrame(65534);
} }
TEST_P(TestPacketBufferH264, ClearMissingPacketsOnKeyframe) { TEST_P(TestPacketBufferH264Parameterized, ClearMissingPacketsOnKeyframe) {
InsertH264(0, kKeyFrame, kFirst, kLast, 1000); InsertH264(0, kKeyFrame, kFirst, kLast, 1000);
InsertH264(2, kKeyFrame, kFirst, kLast, 3000); InsertH264(2, kKeyFrame, kFirst, kLast, 3000);
InsertH264(3, kDeltaFrame, kFirst, kNotLast, 4000); InsertH264(3, kDeltaFrame, kFirst, kNotLast, 4000);
@ -713,7 +727,7 @@ TEST_P(TestPacketBufferH264, ClearMissingPacketsOnKeyframe) {
CheckFrame(kStartSize + 1); CheckFrame(kStartSize + 1);
} }
TEST_P(TestPacketBufferH264, FindFramesOnPadding) { TEST_P(TestPacketBufferH264Parameterized, FindFramesOnPadding) {
InsertH264(0, kKeyFrame, kFirst, kLast, 1000); InsertH264(0, kKeyFrame, kFirst, kLast, 1000);
InsertH264(2, kDeltaFrame, kFirst, kLast, 1000); InsertH264(2, kDeltaFrame, kFirst, kLast, 1000);

View File

@ -190,7 +190,6 @@ TEST_F(FullStackTest, ForemanCifPlr5H264) {
foreman_cif.call.send_side_bwe = true; foreman_cif.call.send_side_bwe = true;
foreman_cif.video = {true, 352, 288, 30, 30000, 500000, 2000000, false, foreman_cif.video = {true, 352, 288, 30, 30000, 500000, 2000000, false,
"H264", 1, 0, 0, false, false, "foreman_cif"}; "H264", 1, 0, 0, false, false, "foreman_cif"};
std::string fec_description;
foreman_cif.analyzer = {"foreman_cif_delay_50_0_plr_5_H264", 0.0, 0.0, foreman_cif.analyzer = {"foreman_cif_delay_50_0_plr_5_H264", 0.0, 0.0,
kFullStackTestDurationSecs}; kFullStackTestDurationSecs};
foreman_cif.pipe.loss_percent = 5; foreman_cif.pipe.loss_percent = 5;
@ -198,6 +197,21 @@ TEST_F(FullStackTest, ForemanCifPlr5H264) {
RunTest(foreman_cif); RunTest(foreman_cif);
} }
TEST_F(FullStackTest, ForemanCifPlr5H264SpsPpsIdrIsKeyframe) {
test::ScopedFieldTrials override_field_trials(
"WebRTC-SpsPpsIdrIsH264Keyframe/Enabled/");
VideoQualityTest::Params foreman_cif;
foreman_cif.call.send_side_bwe = true;
foreman_cif.video = {true, 352, 288, 30, 30000, 500000, 2000000, false,
"H264", 1, 0, 0, false, false, "foreman_cif"};
foreman_cif.analyzer = {"foreman_cif_delay_50_0_plr_5_H264_sps_pps_idr", 0.0,
0.0, kFullStackTestDurationSecs};
foreman_cif.pipe.loss_percent = 5;
foreman_cif.pipe.queue_delay_ms = 50;
RunTest(foreman_cif);
}
// Verify that this is worth the bot time, before enabling. // Verify that this is worth the bot time, before enabling.
TEST_F(FullStackTest, ForemanCifPlr5H264Flexfec) { TEST_F(FullStackTest, ForemanCifPlr5H264Flexfec) {
VideoQualityTest::Params foreman_cif; VideoQualityTest::Params foreman_cif;

View File

@ -35,8 +35,6 @@ namespace webrtc {
namespace { namespace {
const char kNewJitterBufferFieldTrialEnabled[] =
"WebRTC-NewVideoJitterBuffer/Enabled/";
const uint8_t kH264StartCode[] = {0x00, 0x00, 0x00, 0x01}; const uint8_t kH264StartCode[] = {0x00, 0x00, 0x00, 0x01};
class MockTransport : public Transport { class MockTransport : public Transport {
@ -120,8 +118,10 @@ MATCHER_P(SamePacketAs, other, "") {
class RtpVideoStreamReceiverTest : public testing::Test { class RtpVideoStreamReceiverTest : public testing::Test {
public: public:
RtpVideoStreamReceiverTest() RtpVideoStreamReceiverTest() : RtpVideoStreamReceiverTest("") {}
: config_(CreateConfig()), explicit RtpVideoStreamReceiverTest(std::string field_trials)
: override_field_trials_(field_trials),
config_(CreateConfig()),
timing_(Clock::GetRealTimeClock()), timing_(Clock::GetRealTimeClock()),
process_thread_(ProcessThread::Create("TestThread")) {} process_thread_(ProcessThread::Create("TestThread")) {}
@ -189,8 +189,7 @@ class RtpVideoStreamReceiverTest : public testing::Test {
return config; return config;
} }
webrtc::test::ScopedFieldTrials override_field_trials_{ const webrtc::test::ScopedFieldTrials override_field_trials_;
kNewJitterBufferFieldTrialEnabled};
VideoReceiveStream::Config config_; VideoReceiveStream::Config config_;
MockNackSender mock_nack_sender_; MockNackSender mock_nack_sender_;
MockKeyFrameRequestSender mock_key_frame_request_sender_; MockKeyFrameRequestSender mock_key_frame_request_sender_;
@ -237,7 +236,19 @@ TEST_F(RtpVideoStreamReceiverTest, GenericKeyFrameBitstreamError) {
&rtp_header); &rtp_header);
} }
TEST_F(RtpVideoStreamReceiverTest, InBandSpsPps) { class RtpVideoStreamReceiverTestH264
: public RtpVideoStreamReceiverTest,
public testing::WithParamInterface<std::string> {
protected:
RtpVideoStreamReceiverTestH264() : RtpVideoStreamReceiverTest(GetParam()) {}
};
INSTANTIATE_TEST_CASE_P(
SpsPpsIdrIsKeyframe,
RtpVideoStreamReceiverTestH264,
::testing::Values("", "WebRTC-SpsPpsIdrIsH264Keyframe/Enabled/"));
TEST_P(RtpVideoStreamReceiverTestH264, InBandSpsPps) {
std::vector<uint8_t> sps_data; std::vector<uint8_t> sps_data;
WebRtcRTPHeader sps_packet = GetDefaultPacket(); WebRtcRTPHeader sps_packet = GetDefaultPacket();
AddSps(&sps_packet, 0, &sps_data); AddSps(&sps_packet, 0, &sps_data);
@ -279,7 +290,7 @@ TEST_F(RtpVideoStreamReceiverTest, InBandSpsPps) {
idr_data.data(), idr_data.size(), &idr_packet); idr_data.data(), idr_data.size(), &idr_packet);
} }
TEST_F(RtpVideoStreamReceiverTest, OutOfBandFmtpSpsPps) { TEST_P(RtpVideoStreamReceiverTestH264, OutOfBandFmtpSpsPps) {
constexpr int kPayloadType = 99; constexpr int kPayloadType = 99;
VideoCodec codec; VideoCodec codec;
codec.plType = kPayloadType; codec.plType = kPayloadType;