Implement H.264 packetization mode 0.
This approach extends the H.264 specific information with a packetization mode enum. Status: Parameter is in code. No way to set it yet. Rebase of CL 2009213002 BUG=600254 Review-Url: https://codereview.webrtc.org/2337453002 Cr-Commit-Position: refs/heads/master@{#15032}
This commit is contained in:
@ -1113,6 +1113,9 @@ bool MediaCodecVideoEncoder::DeliverPendingOutputs(JNIEnv* jni) {
|
||||
info.codecSpecific.VP9.height[0] = height_;
|
||||
info.codecSpecific.VP9.gof.CopyGofInfoVP9(gof_);
|
||||
}
|
||||
} else if (codecType_ == kVideoCodecH264) {
|
||||
info.codecSpecific.H264.packetization_mode =
|
||||
webrtc::kH264PacketizationMode1;
|
||||
}
|
||||
picture_id_ = (picture_id_ + 1) & 0x7FFF;
|
||||
|
||||
|
@ -540,9 +540,18 @@ struct VideoCodecVP9 {
|
||||
};
|
||||
|
||||
// H264 specific.
|
||||
enum H264PacketizationMode {
|
||||
// Because VideoCodecH264 was initialized in multiple places using memset,
|
||||
// we let 0 have the meaning of "not set".
|
||||
kH264PacketizationModeNotSet = 0,
|
||||
kH264PacketizationMode0, // Only single NALU allowed
|
||||
kH264PacketizationMode1 // Non-interleaved - STAP-A, FU-A is allowed
|
||||
};
|
||||
|
||||
struct VideoCodecH264 {
|
||||
bool frameDroppingOn;
|
||||
int keyFrameInterval;
|
||||
H264PacketizationMode packetization_mode;
|
||||
// These are NULL/0 if not externally negotiated.
|
||||
const uint8_t* spsData;
|
||||
size_t spsLen;
|
||||
|
@ -542,6 +542,10 @@ if (rtc_include_tests) {
|
||||
[ "video_coding/codecs/vp9/vp9_screenshare_layers_unittest.cc" ]
|
||||
}
|
||||
|
||||
if (rtc_use_h264) {
|
||||
sources += [ "video_coding/codecs/h264/h264_encoder_impl_unittest.cc" ]
|
||||
}
|
||||
|
||||
if (rtc_desktop_capture_supported || is_android) {
|
||||
deps += [ "desktop_capture" ]
|
||||
sources += [
|
||||
|
@ -273,14 +273,19 @@ struct NaluInfo {
|
||||
const size_t kMaxNalusPerPacket = 10;
|
||||
|
||||
struct RTPVideoHeaderH264 {
|
||||
uint8_t nalu_type; // The NAL unit type. If this is a header for a
|
||||
// The NAL unit type. If this is a header for a
|
||||
// fragmented packet, it's the NAL unit type of
|
||||
// the original data. If this is the header for an
|
||||
// aggregated packet, it's the NAL unit type of
|
||||
// the first NAL unit in the packet.
|
||||
uint8_t nalu_type;
|
||||
// The packetization type of this buffer - single, aggregated or fragmented.
|
||||
H264PacketizationTypes packetization_type;
|
||||
NaluInfo nalus[kMaxNalusPerPacket];
|
||||
size_t nalus_length;
|
||||
// The packetization mode of this transport. Packetization mode
|
||||
// determines which packetization types are allowed when packetizing.
|
||||
H264PacketizationMode packetization_mode;
|
||||
};
|
||||
|
||||
union RTPVideoTypeHeader {
|
||||
|
@ -8,6 +8,8 @@
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include <utility>
|
||||
|
||||
#include "webrtc/modules/rtp_rtcp/source/rtp_format.h"
|
||||
|
||||
#include "webrtc/modules/rtp_rtcp/source/rtp_format_h264.h"
|
||||
@ -22,7 +24,9 @@ RtpPacketizer* RtpPacketizer::Create(RtpVideoCodecTypes type,
|
||||
FrameType frame_type) {
|
||||
switch (type) {
|
||||
case kRtpVideoH264:
|
||||
return new RtpPacketizerH264(frame_type, max_payload_len);
|
||||
assert(rtp_type_header != NULL);
|
||||
return new RtpPacketizerH264(max_payload_len,
|
||||
rtp_type_header->H264.packetization_mode);
|
||||
case kRtpVideoVp8:
|
||||
assert(rtp_type_header != NULL);
|
||||
return new RtpPacketizerVp8(rtp_type_header->VP8, max_payload_len);
|
||||
|
@ -77,9 +77,10 @@ bool ParseStapAStartOffsets(const uint8_t* nalu_ptr,
|
||||
|
||||
} // namespace
|
||||
|
||||
RtpPacketizerH264::RtpPacketizerH264(FrameType frame_type,
|
||||
size_t max_payload_len)
|
||||
: max_payload_len_(max_payload_len) {}
|
||||
RtpPacketizerH264::RtpPacketizerH264(size_t max_payload_len,
|
||||
H264PacketizationMode packetization_mode)
|
||||
: max_payload_len_(max_payload_len),
|
||||
packetization_mode_(packetization_mode) {}
|
||||
|
||||
RtpPacketizerH264::~RtpPacketizerH264() {
|
||||
}
|
||||
@ -162,6 +163,11 @@ void RtpPacketizerH264::SetPayloadData(
|
||||
|
||||
void RtpPacketizerH264::GeneratePackets() {
|
||||
for (size_t i = 0; i < input_fragments_.size();) {
|
||||
if (packetization_mode_ == kH264PacketizationMode0) {
|
||||
PacketizeSingleNalu(i);
|
||||
++i;
|
||||
} else {
|
||||
RTC_CHECK_EQ(packetization_mode_, kH264PacketizationMode1);
|
||||
if (input_fragments_[i].length > max_payload_len_) {
|
||||
PacketizeFuA(i);
|
||||
++i;
|
||||
@ -170,6 +176,7 @@ void RtpPacketizerH264::GeneratePackets() {
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void RtpPacketizerH264::PacketizeFuA(size_t fragment_index) {
|
||||
// Fragment payload into packets (FU-A).
|
||||
@ -229,6 +236,16 @@ size_t RtpPacketizerH264::PacketizeStapA(size_t fragment_index) {
|
||||
return fragment_index;
|
||||
}
|
||||
|
||||
void RtpPacketizerH264::PacketizeSingleNalu(size_t fragment_index) {
|
||||
// Add a single NALU to the queue, no aggregation.
|
||||
size_t payload_size_left = max_payload_len_;
|
||||
const Fragment* fragment = &input_fragments_[fragment_index];
|
||||
RTC_CHECK_GE(payload_size_left, fragment->length);
|
||||
RTC_CHECK_GT(fragment->length, 0u);
|
||||
packets_.push(PacketUnit(*fragment, true /* first */, true /* last */,
|
||||
false /* aggregated */, fragment->buffer[0]));
|
||||
}
|
||||
|
||||
bool RtpPacketizerH264::NextPacket(uint8_t* buffer,
|
||||
size_t* bytes_to_send,
|
||||
bool* last_packet) {
|
||||
@ -249,9 +266,11 @@ bool RtpPacketizerH264::NextPacket(uint8_t* buffer,
|
||||
input_fragments_.pop_front();
|
||||
RTC_CHECK_LE(*bytes_to_send, max_payload_len_);
|
||||
} else if (packet.aggregated) {
|
||||
RTC_CHECK_EQ(packetization_mode_, kH264PacketizationMode1);
|
||||
NextAggregatePacket(buffer, bytes_to_send);
|
||||
RTC_CHECK_LE(*bytes_to_send, max_payload_len_);
|
||||
} else {
|
||||
RTC_CHECK_EQ(packetization_mode_, kH264PacketizationMode1);
|
||||
NextFragmentPacket(buffer, bytes_to_send);
|
||||
RTC_CHECK_LE(*bytes_to_send, max_payload_len_);
|
||||
}
|
||||
|
@ -12,6 +12,7 @@
|
||||
#define WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_FORMAT_H264_H_
|
||||
|
||||
#include <deque>
|
||||
#include <memory>
|
||||
#include <queue>
|
||||
#include <string>
|
||||
|
||||
@ -25,7 +26,8 @@ class RtpPacketizerH264 : public RtpPacketizer {
|
||||
public:
|
||||
// Initialize with payload from encoder.
|
||||
// The payload_data must be exactly one encoded H264 frame.
|
||||
RtpPacketizerH264(FrameType frame_type, size_t max_payload_len);
|
||||
RtpPacketizerH264(size_t max_payload_len,
|
||||
H264PacketizationMode packetization_mode);
|
||||
|
||||
virtual ~RtpPacketizerH264();
|
||||
|
||||
@ -89,10 +91,12 @@ class RtpPacketizerH264 : public RtpPacketizer {
|
||||
void GeneratePackets();
|
||||
void PacketizeFuA(size_t fragment_index);
|
||||
size_t PacketizeStapA(size_t fragment_index);
|
||||
void PacketizeSingleNalu(size_t fragment_index);
|
||||
void NextAggregatePacket(uint8_t* buffer, size_t* bytes_to_send);
|
||||
void NextFragmentPacket(uint8_t* buffer, size_t* bytes_to_send);
|
||||
|
||||
const size_t max_payload_len_;
|
||||
const H264PacketizationMode packetization_mode_;
|
||||
std::deque<Fragment> input_fragments_;
|
||||
std::queue<PacketUnit> packets_;
|
||||
|
||||
|
@ -43,6 +43,14 @@ enum NalDefs { kFBit = 0x80, kNriMask = 0x60, kTypeMask = 0x1F };
|
||||
// Bit masks for FU (A and B) headers.
|
||||
enum FuDefs { kSBit = 0x80, kEBit = 0x40, kRBit = 0x20 };
|
||||
|
||||
RtpPacketizer* CreateH264Packetizer(H264PacketizationMode mode,
|
||||
size_t max_payload_size) {
|
||||
RTPVideoTypeHeader type_header;
|
||||
type_header.H264.packetization_mode = mode;
|
||||
return RtpPacketizer::Create(kRtpVideoH264, max_payload_size, &type_header,
|
||||
kEmptyFrame);
|
||||
}
|
||||
|
||||
void VerifyFua(size_t fua_index,
|
||||
const uint8_t* expected_payload,
|
||||
int offset,
|
||||
@ -84,8 +92,8 @@ void TestFua(size_t frame_size,
|
||||
fragmentation.VerifyAndAllocateFragmentationHeader(1);
|
||||
fragmentation.fragmentationOffset[0] = 0;
|
||||
fragmentation.fragmentationLength[0] = frame_size;
|
||||
std::unique_ptr<RtpPacketizer> packetizer(RtpPacketizer::Create(
|
||||
kRtpVideoH264, max_payload_size, NULL, kEmptyFrame));
|
||||
std::unique_ptr<RtpPacketizer> packetizer(
|
||||
CreateH264Packetizer(kH264PacketizationMode1, max_payload_size));
|
||||
packetizer->SetPayloadData(frame.get(), frame_size, &fragmentation);
|
||||
|
||||
std::unique_ptr<uint8_t[]> packet(new uint8_t[max_payload_size]);
|
||||
@ -152,14 +160,19 @@ void VerifySingleNaluPayload(const RTPFragmentationHeader& fragmentation,
|
||||
}
|
||||
} // namespace
|
||||
|
||||
TEST(RtpPacketizerH264Test, TestSingleNalu) {
|
||||
// Tests that should work with both packetization mode 0 and
|
||||
// packetization mode 1.
|
||||
class RtpPacketizerH264ModeTest
|
||||
: public ::testing::TestWithParam<H264PacketizationMode> {};
|
||||
|
||||
TEST_P(RtpPacketizerH264ModeTest, TestSingleNalu) {
|
||||
const uint8_t frame[2] = {0x05, 0xFF}; // F=0, NRI=0, Type=5.
|
||||
RTPFragmentationHeader fragmentation;
|
||||
fragmentation.VerifyAndAllocateFragmentationHeader(1);
|
||||
fragmentation.fragmentationOffset[0] = 0;
|
||||
fragmentation.fragmentationLength[0] = sizeof(frame);
|
||||
std::unique_ptr<RtpPacketizer> packetizer(
|
||||
RtpPacketizer::Create(kRtpVideoH264, kMaxPayloadSize, NULL, kEmptyFrame));
|
||||
CreateH264Packetizer(GetParam(), kMaxPayloadSize));
|
||||
packetizer->SetPayloadData(frame, sizeof(frame), &fragmentation);
|
||||
uint8_t packet[kMaxPayloadSize] = {0};
|
||||
size_t length = 0;
|
||||
@ -167,12 +180,12 @@ TEST(RtpPacketizerH264Test, TestSingleNalu) {
|
||||
ASSERT_TRUE(packetizer->NextPacket(packet, &length, &last));
|
||||
EXPECT_EQ(2u, length);
|
||||
EXPECT_TRUE(last);
|
||||
VerifySingleNaluPayload(
|
||||
fragmentation, 0, frame, sizeof(frame), packet, length);
|
||||
VerifySingleNaluPayload(fragmentation, 0, frame, sizeof(frame), packet,
|
||||
length);
|
||||
EXPECT_FALSE(packetizer->NextPacket(packet, &length, &last));
|
||||
}
|
||||
|
||||
TEST(RtpPacketizerH264Test, TestSingleNaluTwoPackets) {
|
||||
TEST_P(RtpPacketizerH264ModeTest, TestSingleNaluTwoPackets) {
|
||||
const size_t kFrameSize = kMaxPayloadSize + 100;
|
||||
uint8_t frame[kFrameSize] = {0};
|
||||
for (size_t i = 0; i < kFrameSize; ++i)
|
||||
@ -188,7 +201,7 @@ TEST(RtpPacketizerH264Test, TestSingleNaluTwoPackets) {
|
||||
frame[fragmentation.fragmentationOffset[1]] = 0x01;
|
||||
|
||||
std::unique_ptr<RtpPacketizer> packetizer(
|
||||
RtpPacketizer::Create(kRtpVideoH264, kMaxPayloadSize, NULL, kEmptyFrame));
|
||||
CreateH264Packetizer(GetParam(), kMaxPayloadSize));
|
||||
packetizer->SetPayloadData(frame, kFrameSize, &fragmentation);
|
||||
|
||||
uint8_t packet[kMaxPayloadSize] = {0};
|
||||
@ -206,6 +219,11 @@ TEST(RtpPacketizerH264Test, TestSingleNaluTwoPackets) {
|
||||
EXPECT_FALSE(packetizer->NextPacket(packet, &length, &last));
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(PacketMode,
|
||||
RtpPacketizerH264ModeTest,
|
||||
::testing::Values(kH264PacketizationMode0,
|
||||
kH264PacketizationMode1));
|
||||
|
||||
TEST(RtpPacketizerH264Test, TestStapA) {
|
||||
const size_t kFrameSize =
|
||||
kMaxPayloadSize - 3 * kLengthFieldLength - kNalHeaderSize;
|
||||
@ -225,7 +243,7 @@ TEST(RtpPacketizerH264Test, TestStapA) {
|
||||
fragmentation.fragmentationLength[2] =
|
||||
kNalHeaderSize + kFrameSize - kPayloadOffset;
|
||||
std::unique_ptr<RtpPacketizer> packetizer(
|
||||
RtpPacketizer::Create(kRtpVideoH264, kMaxPayloadSize, NULL, kEmptyFrame));
|
||||
CreateH264Packetizer(kH264PacketizationMode1, kMaxPayloadSize));
|
||||
packetizer->SetPayloadData(frame, kFrameSize, &fragmentation);
|
||||
|
||||
uint8_t packet[kMaxPayloadSize] = {0};
|
||||
@ -242,6 +260,39 @@ TEST(RtpPacketizerH264Test, TestStapA) {
|
||||
EXPECT_FALSE(packetizer->NextPacket(packet, &length, &last));
|
||||
}
|
||||
|
||||
TEST(RtpPacketizerH264Test, TestMode0HasNoStapA) {
|
||||
// This is the same setup as for the TestStapA test.
|
||||
const size_t kFrameSize =
|
||||
kMaxPayloadSize - 3 * kLengthFieldLength - kNalHeaderSize;
|
||||
uint8_t frame[kFrameSize] = {0x07, 0xFF, // F=0, NRI=0, Type=7 (SPS).
|
||||
0x08, 0xFF, // F=0, NRI=0, Type=8 (PPS).
|
||||
0x05}; // F=0, NRI=0, Type=5 (IDR).
|
||||
const size_t kPayloadOffset = 5;
|
||||
for (size_t i = 0; i < kFrameSize - kPayloadOffset; ++i)
|
||||
frame[i + kPayloadOffset] = i;
|
||||
RTPFragmentationHeader fragmentation;
|
||||
fragmentation.VerifyAndAllocateFragmentationHeader(3);
|
||||
fragmentation.fragmentationOffset[0] = 0;
|
||||
fragmentation.fragmentationLength[0] = 2;
|
||||
fragmentation.fragmentationOffset[1] = 2;
|
||||
fragmentation.fragmentationLength[1] = 2;
|
||||
fragmentation.fragmentationOffset[2] = 4;
|
||||
fragmentation.fragmentationLength[2] =
|
||||
kNalHeaderSize + kFrameSize - kPayloadOffset;
|
||||
std::unique_ptr<RtpPacketizer> packetizer(
|
||||
CreateH264Packetizer(kH264PacketizationMode0, kMaxPayloadSize));
|
||||
packetizer->SetPayloadData(frame, kFrameSize, &fragmentation);
|
||||
|
||||
uint8_t packet[kMaxPayloadSize] = {0};
|
||||
size_t length = 0;
|
||||
bool last = false;
|
||||
// The three fragments should be returned as three packets.
|
||||
ASSERT_TRUE(packetizer->NextPacket(packet, &length, &last));
|
||||
ASSERT_TRUE(packetizer->NextPacket(packet, &length, &last));
|
||||
ASSERT_TRUE(packetizer->NextPacket(packet, &length, &last));
|
||||
EXPECT_FALSE(packetizer->NextPacket(packet, &length, &last));
|
||||
}
|
||||
|
||||
TEST(RtpPacketizerH264Test, TestTooSmallForStapAHeaders) {
|
||||
const size_t kFrameSize = kMaxPayloadSize - 1;
|
||||
uint8_t frame[kFrameSize] = {0x07, 0xFF, // F=0, NRI=0, Type=7.
|
||||
@ -260,7 +311,7 @@ TEST(RtpPacketizerH264Test, TestTooSmallForStapAHeaders) {
|
||||
fragmentation.fragmentationLength[2] =
|
||||
kNalHeaderSize + kFrameSize - kPayloadOffset;
|
||||
std::unique_ptr<RtpPacketizer> packetizer(
|
||||
RtpPacketizer::Create(kRtpVideoH264, kMaxPayloadSize, NULL, kEmptyFrame));
|
||||
CreateH264Packetizer(kH264PacketizationMode1, kMaxPayloadSize));
|
||||
packetizer->SetPayloadData(frame, kFrameSize, &fragmentation);
|
||||
|
||||
uint8_t packet[kMaxPayloadSize] = {0};
|
||||
@ -308,7 +359,7 @@ TEST(RtpPacketizerH264Test, TestMixedStapA_FUA) {
|
||||
}
|
||||
}
|
||||
std::unique_ptr<RtpPacketizer> packetizer(
|
||||
RtpPacketizer::Create(kRtpVideoH264, kMaxPayloadSize, NULL, kEmptyFrame));
|
||||
CreateH264Packetizer(kH264PacketizationMode1, kMaxPayloadSize));
|
||||
packetizer->SetPayloadData(frame, kFrameSize, &fragmentation);
|
||||
|
||||
// First expecting two FU-A packets.
|
||||
@ -381,6 +432,28 @@ TEST(RtpPacketizerH264Test, TestFUABig) {
|
||||
sizeof(kExpectedPayloadSizes) / sizeof(size_t)));
|
||||
}
|
||||
|
||||
#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
|
||||
|
||||
TEST(RtpPacketizerH264DeathTest, SendOverlongDataInPacketizationMode0) {
|
||||
const size_t kFrameSize = kMaxPayloadSize + 100;
|
||||
uint8_t frame[kFrameSize] = {0};
|
||||
for (size_t i = 0; i < kFrameSize; ++i)
|
||||
frame[i] = i;
|
||||
RTPFragmentationHeader fragmentation;
|
||||
fragmentation.VerifyAndAllocateFragmentationHeader(1);
|
||||
fragmentation.fragmentationOffset[0] = 0;
|
||||
fragmentation.fragmentationLength[0] = kFrameSize;
|
||||
// Set NAL headers.
|
||||
frame[fragmentation.fragmentationOffset[0]] = 0x01;
|
||||
|
||||
std::unique_ptr<RtpPacketizer> packetizer(
|
||||
CreateH264Packetizer(kH264PacketizationMode0, kMaxPayloadSize));
|
||||
EXPECT_DEATH(packetizer->SetPayloadData(frame, kFrameSize, &fragmentation),
|
||||
"payload_size");
|
||||
}
|
||||
|
||||
#endif // RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
|
||||
|
||||
namespace {
|
||||
const uint8_t kStartSequence[] = {0x00, 0x00, 0x00, 0x01};
|
||||
const uint8_t kOriginalSps[] = {kSps, 0x00, 0x00, 0x03, 0x03,
|
||||
@ -421,9 +494,8 @@ TEST_F(RtpPacketizerH264TestSpsRewriting, FuASps) {
|
||||
const size_t kHeaderOverhead = kFuAHeaderSize + 1;
|
||||
|
||||
// Set size to fragment SPS into two FU-A packets.
|
||||
packetizer_.reset(RtpPacketizer::Create(
|
||||
kRtpVideoH264, sizeof(kOriginalSps) - 2 + kHeaderOverhead, nullptr,
|
||||
kEmptyFrame));
|
||||
packetizer_.reset(CreateH264Packetizer(
|
||||
kH264PacketizationMode1, sizeof(kOriginalSps) - 2 + kHeaderOverhead));
|
||||
|
||||
packetizer_->SetPayloadData(in_buffer_.data(), in_buffer_.size(),
|
||||
&fragmentation_header_);
|
||||
@ -459,9 +531,8 @@ TEST_F(RtpPacketizerH264TestSpsRewriting, StapASps) {
|
||||
sizeof(kIdrTwo) + (kLengthFieldLength * 3);
|
||||
|
||||
// Set size to include SPS and the rest of the packets in a Stap-A package.
|
||||
packetizer_.reset(RtpPacketizer::Create(kRtpVideoH264,
|
||||
kExpectedTotalSize + kHeaderOverhead,
|
||||
nullptr, kEmptyFrame));
|
||||
packetizer_.reset(CreateH264Packetizer(kH264PacketizationMode1,
|
||||
kExpectedTotalSize + kHeaderOverhead));
|
||||
|
||||
packetizer_->SetPayloadData(in_buffer_.data(), in_buffer_.size(),
|
||||
&fragmentation_header_);
|
||||
|
@ -64,6 +64,7 @@ VideoCodecH264 VideoEncoder::GetDefaultH264Settings() {
|
||||
|
||||
h264_settings.frameDroppingOn = true;
|
||||
h264_settings.keyFrameInterval = 3000;
|
||||
h264_settings.packetization_mode = kH264PacketizationMode1;
|
||||
h264_settings.spsData = nullptr;
|
||||
h264_settings.spsLen = 0;
|
||||
h264_settings.ppsData = nullptr;
|
||||
|
@ -152,6 +152,15 @@ static void RtpFragmentize(EncodedImage* encoded_image,
|
||||
|
||||
H264EncoderImpl::H264EncoderImpl()
|
||||
: openh264_encoder_(nullptr),
|
||||
width_(0),
|
||||
height_(0),
|
||||
max_frame_rate_(0.0f),
|
||||
target_bps_(0),
|
||||
max_bps_(0),
|
||||
mode_(kRealtimeVideo),
|
||||
frame_dropping_on_(false),
|
||||
key_frame_interval_(0),
|
||||
max_payload_size_(0),
|
||||
number_of_cores_(0),
|
||||
encoded_image_callback_(nullptr),
|
||||
has_reported_init_(false),
|
||||
@ -163,7 +172,7 @@ H264EncoderImpl::~H264EncoderImpl() {
|
||||
|
||||
int32_t H264EncoderImpl::InitEncode(const VideoCodec* codec_settings,
|
||||
int32_t number_of_cores,
|
||||
size_t /*max_payload_size*/) {
|
||||
size_t max_payload_size) {
|
||||
ReportInit();
|
||||
if (!codec_settings ||
|
||||
codec_settings->codecType != kVideoCodecH264) {
|
||||
@ -210,6 +219,7 @@ int32_t H264EncoderImpl::InitEncode(const VideoCodec* codec_settings,
|
||||
mode_ = codec_settings->mode;
|
||||
frame_dropping_on_ = codec_settings->H264().frameDroppingOn;
|
||||
key_frame_interval_ = codec_settings->H264().keyFrameInterval;
|
||||
max_payload_size_ = max_payload_size;
|
||||
|
||||
// Codec_settings uses kbits/second; encoder uses bits/second.
|
||||
max_bps_ = codec_settings->maxBitrate * 1000;
|
||||
@ -217,8 +227,12 @@ int32_t H264EncoderImpl::InitEncode(const VideoCodec* codec_settings,
|
||||
target_bps_ = codec_settings->startBitrate * 1000;
|
||||
else
|
||||
target_bps_ = codec_settings->targetBitrate * 1000;
|
||||
RTC_DCHECK(codec_settings->H264().packetization_mode !=
|
||||
kH264PacketizationModeNotSet);
|
||||
packetization_mode_ = codec_settings->H264().packetization_mode;
|
||||
|
||||
SEncParamExt encoder_params = CreateEncoderParams();
|
||||
|
||||
// Initialize.
|
||||
if (openh264_encoder_->InitializeExt(&encoder_params) != 0) {
|
||||
LOG(LS_ERROR) << "Failed to initialize OpenH264 encoder";
|
||||
@ -377,6 +391,7 @@ int32_t H264EncoderImpl::Encode(const VideoFrame& input_frame,
|
||||
// Deliver encoded image.
|
||||
CodecSpecificInfo codec_specific;
|
||||
codec_specific.codecType = kVideoCodecH264;
|
||||
codec_specific.codecSpecific.H264.packetization_mode = packetization_mode_;
|
||||
encoded_image_callback_->OnEncodedImage(encoded_image_, &codec_specific,
|
||||
&frag_header);
|
||||
|
||||
@ -445,19 +460,50 @@ SEncParamExt H264EncoderImpl::CreateEncoderParams() const {
|
||||
encoder_params.iTargetBitrate;
|
||||
encoder_params.sSpatialLayers[0].iMaxSpatialBitrate =
|
||||
encoder_params.iMaxBitrate;
|
||||
LOG(INFO) << "OpenH264 version is " << OPENH264_MAJOR << "."
|
||||
<< OPENH264_MINOR;
|
||||
switch (packetization_mode_) {
|
||||
case kH264PacketizationMode0:
|
||||
#if (OPENH264_MAJOR == 1) && (OPENH264_MINOR <= 5)
|
||||
// Limit the size of packets produced.
|
||||
encoder_params.sSpatialLayers[0].sSliceCfg.uiSliceMode = SM_DYN_SLICE;
|
||||
// The slice size is max payload size - room for a NAL header.
|
||||
// The constant 50 is NAL_HEADER_ADD_0X30BYTES in openh264 source,
|
||||
// but is not exported.
|
||||
encoder_params.sSpatialLayers[0]
|
||||
.sSliceCfg.sSliceArgument.uiSliceSizeConstraint =
|
||||
static_cast<unsigned int>(max_payload_size_ - 50);
|
||||
encoder_params.uiMaxNalSize =
|
||||
static_cast<unsigned int>(max_payload_size_);
|
||||
#else
|
||||
// When uiSliceMode = SM_FIXEDSLCNUM_SLICE, uiSliceNum = 0 means auto
|
||||
// design
|
||||
// it with cpu core number.
|
||||
encoder_params.sSpatialLayers[0].sSliceArgument.uiSliceNum = 1;
|
||||
encoder_params.sSpatialLayers[0].sSliceArgument.uiSliceMode =
|
||||
SM_SIZELIMITED_SLICE;
|
||||
encoder_params.sSpatialLayers[0]
|
||||
.sSliceArgument.uiSliceSizeConstraint =
|
||||
static_cast<unsigned int>(max_payload_size_);
|
||||
#endif
|
||||
break;
|
||||
case kH264PacketizationMode1:
|
||||
#if (OPENH264_MAJOR == 1) && (OPENH264_MINOR <= 5)
|
||||
// Slice num according to number of threads.
|
||||
encoder_params.sSpatialLayers[0].sSliceCfg.uiSliceMode = SM_AUTO_SLICE;
|
||||
#else
|
||||
// When uiSliceMode = SM_FIXEDSLCNUM_SLICE, uiSliceNum = 0 means auto design
|
||||
// it with cpu core number.
|
||||
// When uiSliceMode = SM_FIXEDSLCNUM_SLICE, uiSliceNum = 0 means auto
|
||||
// design it with cpu core number.
|
||||
// TODO(sprang): Set to 0 when we understand why the rate controller borks
|
||||
// when uiSliceNum > 1.
|
||||
encoder_params.sSpatialLayers[0].sSliceArgument.uiSliceNum = 1;
|
||||
encoder_params.sSpatialLayers[0].sSliceArgument.uiSliceMode =
|
||||
SM_FIXEDSLCNUM_SLICE;
|
||||
#endif
|
||||
|
||||
break;
|
||||
default:
|
||||
RTC_NOTREACHED() << "Illegal packetization mode specified";
|
||||
}
|
||||
return encoder_params;
|
||||
}
|
||||
|
||||
|
@ -39,7 +39,7 @@ class H264EncoderImpl : public H264Encoder {
|
||||
// - height
|
||||
int32_t InitEncode(const VideoCodec* codec_settings,
|
||||
int32_t number_of_cores,
|
||||
size_t /*max_payload_size*/) override;
|
||||
size_t max_payload_size) override;
|
||||
int32_t Release() override;
|
||||
|
||||
int32_t RegisterEncodeCompleteCallback(
|
||||
@ -80,7 +80,9 @@ class H264EncoderImpl : public H264Encoder {
|
||||
// H.264 specifc parameters
|
||||
bool frame_dropping_on_;
|
||||
int key_frame_interval_;
|
||||
H264PacketizationMode packetization_mode_;
|
||||
|
||||
size_t max_payload_size_;
|
||||
int32_t number_of_cores_;
|
||||
|
||||
EncodedImage encoded_image_;
|
||||
|
@ -0,0 +1,54 @@
|
||||
/*
|
||||
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "webrtc/modules/video_coding/codecs/h264/h264_encoder_impl.h"
|
||||
|
||||
#include "webrtc/test/gtest.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace {
|
||||
|
||||
const int kMaxPayloadSize = 1024;
|
||||
|
||||
void SetDefaultSettings(VideoCodec* codec_settings) {
|
||||
codec_settings->codecType = kVideoCodecH264;
|
||||
codec_settings->maxFramerate = 60;
|
||||
codec_settings->width = 640;
|
||||
codec_settings->height = 480;
|
||||
codec_settings->H264()->packetization_mode = kH264PacketizationMode1;
|
||||
// If frame dropping is false, we get a warning that bitrate can't
|
||||
// be controlled for RC_QUALITY_MODE; RC_BITRATE_MODE and RC_TIMESTAMP_MODE
|
||||
codec_settings->H264()->frameDroppingOn = true;
|
||||
codec_settings->targetBitrate = 2000;
|
||||
codec_settings->maxBitrate = 4000;
|
||||
}
|
||||
|
||||
TEST(H264EncoderImplTest, CanInitializeWithDefaultParameters) {
|
||||
H264EncoderImpl encoder;
|
||||
VideoCodec codec_settings;
|
||||
SetDefaultSettings(&codec_settings);
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
encoder.InitEncode(&codec_settings, 1, kMaxPayloadSize));
|
||||
}
|
||||
|
||||
TEST(H264EncoderImplTest, CanInitializeWithPacketizationMode0) {
|
||||
H264EncoderImpl encoder;
|
||||
VideoCodec codec_settings;
|
||||
SetDefaultSettings(&codec_settings);
|
||||
codec_settings.H264()->packetization_mode = kH264PacketizationMode0;
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
encoder.InitEncode(&codec_settings, 1, kMaxPayloadSize));
|
||||
}
|
||||
|
||||
} // anonymous namespace
|
||||
|
||||
} // namespace webrtc
|
@ -77,7 +77,9 @@ struct CodecSpecificInfoGeneric {
|
||||
uint8_t simulcast_idx;
|
||||
};
|
||||
|
||||
struct CodecSpecificInfoH264 {};
|
||||
struct CodecSpecificInfoH264 {
|
||||
H264PacketizationMode packetization_mode;
|
||||
};
|
||||
|
||||
union CodecSpecificInfoUnion {
|
||||
CodecSpecificInfoGeneric generic;
|
||||
|
@ -200,6 +200,7 @@ EncodedImageCallback::Result FakeH264Encoder::OnEncodedImage(
|
||||
CodecSpecificInfo specifics;
|
||||
memset(&specifics, 0, sizeof(specifics));
|
||||
specifics.codecType = kVideoCodecH264;
|
||||
specifics.codecSpecific.H264.packetization_mode = kH264PacketizationMode1;
|
||||
return callback_->OnEncodedImage(encoded_image, &specifics, &fragmentation);
|
||||
}
|
||||
|
||||
|
@ -143,6 +143,7 @@ if (rtc_include_tests) {
|
||||
# TODO(pbos): Rename test suite.
|
||||
rtc_source_set("video_tests") {
|
||||
testonly = true
|
||||
defines = []
|
||||
sources = [
|
||||
"call_stats_unittest.cc",
|
||||
"encoder_rtcp_feedback_unittest.cc",
|
||||
@ -170,5 +171,8 @@ if (rtc_include_tests) {
|
||||
# Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
|
||||
suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
|
||||
}
|
||||
if (rtc_use_h264) {
|
||||
defines += [ "WEBRTC_USE_H264" ]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -130,6 +130,15 @@ class EndToEndTest : public test::CallTest {
|
||||
Transport* transport);
|
||||
};
|
||||
|
||||
void SetPacketizationMode(H264PacketizationMode mode, VideoEncoder* encoder) {
|
||||
VideoCodec codec_settings;
|
||||
codec_settings.codecType = kVideoCodecH264;
|
||||
codec_settings.H264()->packetization_mode = mode;
|
||||
// TODO(hta): Determine appropriate value for max packet size.
|
||||
static const int kMaxPacketSize = 1024;
|
||||
encoder->InitEncode(&codec_settings, 0, kMaxPacketSize);
|
||||
}
|
||||
|
||||
TEST_F(EndToEndTest, ReceiverCanBeStartedTwice) {
|
||||
CreateCalls(Call::Config(&event_log_), Call::Config(&event_log_));
|
||||
|
||||
@ -375,7 +384,7 @@ TEST_F(EndToEndTest, SendsAndReceivesVP9VideoRotation90) {
|
||||
}
|
||||
#endif // !defined(RTC_DISABLE_VP9)
|
||||
|
||||
#if defined(WEBRTC_END_TO_END_H264_TESTS)
|
||||
#if defined(WEBRTC_USE_H264)
|
||||
|
||||
TEST_F(EndToEndTest, SendsAndReceivesH264) {
|
||||
CodecObserver test(500, kVideoRotation_0, "H264",
|
||||
@ -391,7 +400,25 @@ TEST_F(EndToEndTest, SendsAndReceivesH264VideoRotation90) {
|
||||
RunBaseTest(&test);
|
||||
}
|
||||
|
||||
#endif // defined(WEBRTC_END_TO_END_H264_TESTS)
|
||||
TEST_F(EndToEndTest, SendsAndReceivesH264PacketizationMode0) {
|
||||
VideoEncoder* encoder = VideoEncoder::Create(VideoEncoder::kH264);
|
||||
SetPacketizationMode(kH264PacketizationMode0, encoder);
|
||||
// The CodecObserver takes ownership of the encoder.
|
||||
CodecObserver test(500, kVideoRotation_0, "H264", encoder,
|
||||
H264Decoder::Create());
|
||||
RunBaseTest(&test);
|
||||
}
|
||||
|
||||
TEST_F(EndToEndTest, SendsAndReceivesH264PacketizationMode1) {
|
||||
VideoEncoder* encoder = VideoEncoder::Create(VideoEncoder::kH264);
|
||||
SetPacketizationMode(kH264PacketizationMode1, encoder);
|
||||
// The CodecObserver takes ownership of the encoder.
|
||||
CodecObserver test(500, kVideoRotation_0, "H264", encoder,
|
||||
H264Decoder::Create());
|
||||
RunBaseTest(&test);
|
||||
}
|
||||
|
||||
#endif // defined(WEBRTC_USE_H264)
|
||||
|
||||
TEST_F(EndToEndTest, ReceiverUsesLocalSsrc) {
|
||||
class SyncRtcpObserver : public test::EndToEndTest {
|
||||
|
@ -75,6 +75,8 @@ void CopyCodecSpecific(const CodecSpecificInfo* info, RTPVideoHeader* rtp) {
|
||||
}
|
||||
case kVideoCodecH264:
|
||||
rtp->codec = kRtpVideoH264;
|
||||
rtp->codecHeader.H264.packetization_mode =
|
||||
info->codecSpecific.H264.packetization_mode;
|
||||
return;
|
||||
case kVideoCodecGeneric:
|
||||
rtp->codec = kRtpVideoGeneric;
|
||||
|
@ -1970,6 +1970,7 @@ class VideoCodecConfigObserver : public test::SendTest,
|
||||
num_initializations_(0),
|
||||
stream_(nullptr) {
|
||||
memset(&encoder_settings_, 0, sizeof(encoder_settings_));
|
||||
InitCodecSpecifics();
|
||||
}
|
||||
|
||||
private:
|
||||
@ -1993,6 +1994,8 @@ class VideoCodecConfigObserver : public test::SendTest,
|
||||
}
|
||||
};
|
||||
|
||||
void InitCodecSpecifics();
|
||||
|
||||
void ModifyVideoConfigs(
|
||||
VideoSendStream::Config* send_config,
|
||||
std::vector<VideoReceiveStream::Config>* receive_configs,
|
||||
@ -2057,11 +2060,20 @@ class VideoCodecConfigObserver : public test::SendTest,
|
||||
VideoEncoderConfig encoder_config_;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
void VideoCodecConfigObserver<T>::InitCodecSpecifics() {}
|
||||
|
||||
template <>
|
||||
void VideoCodecConfigObserver<VideoCodecH264>::InitCodecSpecifics() {
|
||||
encoder_settings_.packetization_mode = kH264PacketizationMode1;
|
||||
}
|
||||
template <>
|
||||
void VideoCodecConfigObserver<VideoCodecH264>::VerifyCodecSpecifics(
|
||||
const VideoCodec& config) const {
|
||||
EXPECT_EQ(
|
||||
0, memcmp(&config.H264(), &encoder_settings_, sizeof(encoder_settings_)));
|
||||
// Check that packetization mode has propagated.
|
||||
EXPECT_EQ(kH264PacketizationMode1, config.H264().packetization_mode);
|
||||
}
|
||||
|
||||
template <>
|
||||
|
Reference in New Issue
Block a user