Revert of H.264 packetization mode 0 (try 2) (patchset #27 id:520001 of https://codereview.webrtc.org/2337453002/ )

Reason for revert:
Broke a lot of tests in chromium.webrtc browser_tests. See e.g. https://build.chromium.org/p/chromium.webrtc/builders/Mac%20Tester/builds/62228 and https://build.chromium.org/p/chromium.webrtc/builders/Win8%20Tester/builds/30102.
[ RUN      ] WebRtcVideoQualityBrowserTests/WebRtcVideoQualityBrowserTest.MANUAL_TestVideoQualityH264/1
...
#
# Fatal error in e:\b\c\b\win_builder\src\third_party\webrtc\modules\rtp_rtcp\source\rtp_format_h264.cc, line 170
# last system error: 0
# Check failed: packetization_mode_ == kH264PacketizationMode1 (0 vs. 2)
#

Original issue's description:
> Implement H.264 packetization mode 0.
>
> This approach extends the H.264 specific information with
> a packetization mode enum.
>
> Status: Parameter is in code. No way to set it yet.
>
> Rebase of CL  2009213002
>
> BUG=600254
>
> Committed: https://crrev.com/3bba101f36483b8030a693dfbc93af736d1dba68
> Cr-Commit-Position: refs/heads/master@{#15032}

TBR=hbos@webrtc.org,sprang@webrtc.org,mflodman@webrtc.org,hta@webrtc.org
# Not skipping CQ checks because original CL landed more than 1 days ago.
BUG=600254
NOPRESUBMIT=true

Review-Url: https://codereview.webrtc.org/2500743002
Cr-Commit-Position: refs/heads/master@{#15050}
This commit is contained in:
magjed
2016-11-12 08:54:45 -08:00
committed by Commit bot
parent b73a75195e
commit 13ceeeadfc
18 changed files with 45 additions and 315 deletions

View File

@ -1113,9 +1113,6 @@ bool MediaCodecVideoEncoder::DeliverPendingOutputs(JNIEnv* jni) {
info.codecSpecific.VP9.height[0] = height_;
info.codecSpecific.VP9.gof.CopyGofInfoVP9(gof_);
}
} else if (codecType_ == kVideoCodecH264) {
info.codecSpecific.H264.packetization_mode =
webrtc::kH264PacketizationMode1;
}
picture_id_ = (picture_id_ + 1) & 0x7FFF;

View File

@ -540,18 +540,9 @@ struct VideoCodecVP9 {
};
// H264 specific.
enum H264PacketizationMode {
// Because VideoCodecH264 was initialized in multiple places using memset,
// we let 0 have the meaning of "not set".
kH264PacketizationModeNotSet = 0,
kH264PacketizationMode0, // Only single NALU allowed
kH264PacketizationMode1 // Non-interleaved - STAP-A, FU-A is allowed
};
struct VideoCodecH264 {
bool frameDroppingOn;
int keyFrameInterval;
H264PacketizationMode packetization_mode;
// These are NULL/0 if not externally negotiated.
const uint8_t* spsData;
size_t spsLen;

View File

@ -544,10 +544,6 @@ if (rtc_include_tests) {
[ "video_coding/codecs/vp9/vp9_screenshare_layers_unittest.cc" ]
}
if (rtc_use_h264) {
sources += [ "video_coding/codecs/h264/h264_encoder_impl_unittest.cc" ]
}
if (rtc_desktop_capture_supported || is_android) {
deps += [ "desktop_capture" ]
sources += [

View File

@ -273,19 +273,14 @@ struct NaluInfo {
const size_t kMaxNalusPerPacket = 10;
struct RTPVideoHeaderH264 {
// The NAL unit type. If this is a header for a
// fragmented packet, it's the NAL unit type of
// the original data. If this is the header for an
// aggregated packet, it's the NAL unit type of
// the first NAL unit in the packet.
uint8_t nalu_type;
// The packetization type of this buffer - single, aggregated or fragmented.
uint8_t nalu_type; // The NAL unit type. If this is a header for a
// fragmented packet, it's the NAL unit type of
// the original data. If this is the header for an
// aggregated packet, it's the NAL unit type of
// the first NAL unit in the packet.
H264PacketizationTypes packetization_type;
NaluInfo nalus[kMaxNalusPerPacket];
size_t nalus_length;
// The packetization mode of this transport. Packetization mode
// determines which packetization types are allowed when packetizing.
H264PacketizationMode packetization_mode;
};
union RTPVideoTypeHeader {

View File

@ -8,8 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
#include <utility>
#include "webrtc/modules/rtp_rtcp/source/rtp_format.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_format_h264.h"
@ -24,9 +22,7 @@ RtpPacketizer* RtpPacketizer::Create(RtpVideoCodecTypes type,
FrameType frame_type) {
switch (type) {
case kRtpVideoH264:
assert(rtp_type_header != NULL);
return new RtpPacketizerH264(max_payload_len,
rtp_type_header->H264.packetization_mode);
return new RtpPacketizerH264(frame_type, max_payload_len);
case kRtpVideoVp8:
assert(rtp_type_header != NULL);
return new RtpPacketizerVp8(rtp_type_header->VP8, max_payload_len);

View File

@ -77,10 +77,9 @@ bool ParseStapAStartOffsets(const uint8_t* nalu_ptr,
} // namespace
RtpPacketizerH264::RtpPacketizerH264(size_t max_payload_len,
H264PacketizationMode packetization_mode)
: max_payload_len_(max_payload_len),
packetization_mode_(packetization_mode) {}
RtpPacketizerH264::RtpPacketizerH264(FrameType frame_type,
size_t max_payload_len)
: max_payload_len_(max_payload_len) {}
RtpPacketizerH264::~RtpPacketizerH264() {
}
@ -163,17 +162,11 @@ void RtpPacketizerH264::SetPayloadData(
void RtpPacketizerH264::GeneratePackets() {
for (size_t i = 0; i < input_fragments_.size();) {
if (packetization_mode_ == kH264PacketizationMode0) {
PacketizeSingleNalu(i);
if (input_fragments_[i].length > max_payload_len_) {
PacketizeFuA(i);
++i;
} else {
RTC_CHECK_EQ(packetization_mode_, kH264PacketizationMode1);
if (input_fragments_[i].length > max_payload_len_) {
PacketizeFuA(i);
++i;
} else {
i = PacketizeStapA(i);
}
i = PacketizeStapA(i);
}
}
}
@ -236,16 +229,6 @@ size_t RtpPacketizerH264::PacketizeStapA(size_t fragment_index) {
return fragment_index;
}
void RtpPacketizerH264::PacketizeSingleNalu(size_t fragment_index) {
// Add a single NALU to the queue, no aggregation.
size_t payload_size_left = max_payload_len_;
const Fragment* fragment = &input_fragments_[fragment_index];
RTC_CHECK_GE(payload_size_left, fragment->length);
RTC_CHECK_GT(fragment->length, 0u);
packets_.push(PacketUnit(*fragment, true /* first */, true /* last */,
false /* aggregated */, fragment->buffer[0]));
}
bool RtpPacketizerH264::NextPacket(uint8_t* buffer,
size_t* bytes_to_send,
bool* last_packet) {
@ -266,11 +249,9 @@ bool RtpPacketizerH264::NextPacket(uint8_t* buffer,
input_fragments_.pop_front();
RTC_CHECK_LE(*bytes_to_send, max_payload_len_);
} else if (packet.aggregated) {
RTC_CHECK_EQ(packetization_mode_, kH264PacketizationMode1);
NextAggregatePacket(buffer, bytes_to_send);
RTC_CHECK_LE(*bytes_to_send, max_payload_len_);
} else {
RTC_CHECK_EQ(packetization_mode_, kH264PacketizationMode1);
NextFragmentPacket(buffer, bytes_to_send);
RTC_CHECK_LE(*bytes_to_send, max_payload_len_);
}

View File

@ -12,7 +12,6 @@
#define WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_FORMAT_H264_H_
#include <deque>
#include <memory>
#include <queue>
#include <string>
@ -26,8 +25,7 @@ class RtpPacketizerH264 : public RtpPacketizer {
public:
// Initialize with payload from encoder.
// The payload_data must be exactly one encoded H264 frame.
RtpPacketizerH264(size_t max_payload_len,
H264PacketizationMode packetization_mode);
RtpPacketizerH264(FrameType frame_type, size_t max_payload_len);
virtual ~RtpPacketizerH264();
@ -91,12 +89,10 @@ class RtpPacketizerH264 : public RtpPacketizer {
void GeneratePackets();
void PacketizeFuA(size_t fragment_index);
size_t PacketizeStapA(size_t fragment_index);
void PacketizeSingleNalu(size_t fragment_index);
void NextAggregatePacket(uint8_t* buffer, size_t* bytes_to_send);
void NextFragmentPacket(uint8_t* buffer, size_t* bytes_to_send);
const size_t max_payload_len_;
const H264PacketizationMode packetization_mode_;
std::deque<Fragment> input_fragments_;
std::queue<PacketUnit> packets_;

View File

@ -43,14 +43,6 @@ enum NalDefs { kFBit = 0x80, kNriMask = 0x60, kTypeMask = 0x1F };
// Bit masks for FU (A and B) headers.
enum FuDefs { kSBit = 0x80, kEBit = 0x40, kRBit = 0x20 };
RtpPacketizer* CreateH264Packetizer(H264PacketizationMode mode,
size_t max_payload_size) {
RTPVideoTypeHeader type_header;
type_header.H264.packetization_mode = mode;
return RtpPacketizer::Create(kRtpVideoH264, max_payload_size, &type_header,
kEmptyFrame);
}
void VerifyFua(size_t fua_index,
const uint8_t* expected_payload,
int offset,
@ -92,8 +84,8 @@ void TestFua(size_t frame_size,
fragmentation.VerifyAndAllocateFragmentationHeader(1);
fragmentation.fragmentationOffset[0] = 0;
fragmentation.fragmentationLength[0] = frame_size;
std::unique_ptr<RtpPacketizer> packetizer(
CreateH264Packetizer(kH264PacketizationMode1, max_payload_size));
std::unique_ptr<RtpPacketizer> packetizer(RtpPacketizer::Create(
kRtpVideoH264, max_payload_size, NULL, kEmptyFrame));
packetizer->SetPayloadData(frame.get(), frame_size, &fragmentation);
std::unique_ptr<uint8_t[]> packet(new uint8_t[max_payload_size]);
@ -160,19 +152,14 @@ void VerifySingleNaluPayload(const RTPFragmentationHeader& fragmentation,
}
} // namespace
// Tests that should work with both packetization mode 0 and
// packetization mode 1.
class RtpPacketizerH264ModeTest
: public ::testing::TestWithParam<H264PacketizationMode> {};
TEST_P(RtpPacketizerH264ModeTest, TestSingleNalu) {
TEST(RtpPacketizerH264Test, TestSingleNalu) {
const uint8_t frame[2] = {0x05, 0xFF}; // F=0, NRI=0, Type=5.
RTPFragmentationHeader fragmentation;
fragmentation.VerifyAndAllocateFragmentationHeader(1);
fragmentation.fragmentationOffset[0] = 0;
fragmentation.fragmentationLength[0] = sizeof(frame);
std::unique_ptr<RtpPacketizer> packetizer(
CreateH264Packetizer(GetParam(), kMaxPayloadSize));
RtpPacketizer::Create(kRtpVideoH264, kMaxPayloadSize, NULL, kEmptyFrame));
packetizer->SetPayloadData(frame, sizeof(frame), &fragmentation);
uint8_t packet[kMaxPayloadSize] = {0};
size_t length = 0;
@ -180,12 +167,12 @@ TEST_P(RtpPacketizerH264ModeTest, TestSingleNalu) {
ASSERT_TRUE(packetizer->NextPacket(packet, &length, &last));
EXPECT_EQ(2u, length);
EXPECT_TRUE(last);
VerifySingleNaluPayload(fragmentation, 0, frame, sizeof(frame), packet,
length);
VerifySingleNaluPayload(
fragmentation, 0, frame, sizeof(frame), packet, length);
EXPECT_FALSE(packetizer->NextPacket(packet, &length, &last));
}
TEST_P(RtpPacketizerH264ModeTest, TestSingleNaluTwoPackets) {
TEST(RtpPacketizerH264Test, TestSingleNaluTwoPackets) {
const size_t kFrameSize = kMaxPayloadSize + 100;
uint8_t frame[kFrameSize] = {0};
for (size_t i = 0; i < kFrameSize; ++i)
@ -201,7 +188,7 @@ TEST_P(RtpPacketizerH264ModeTest, TestSingleNaluTwoPackets) {
frame[fragmentation.fragmentationOffset[1]] = 0x01;
std::unique_ptr<RtpPacketizer> packetizer(
CreateH264Packetizer(GetParam(), kMaxPayloadSize));
RtpPacketizer::Create(kRtpVideoH264, kMaxPayloadSize, NULL, kEmptyFrame));
packetizer->SetPayloadData(frame, kFrameSize, &fragmentation);
uint8_t packet[kMaxPayloadSize] = {0};
@ -219,11 +206,6 @@ TEST_P(RtpPacketizerH264ModeTest, TestSingleNaluTwoPackets) {
EXPECT_FALSE(packetizer->NextPacket(packet, &length, &last));
}
INSTANTIATE_TEST_CASE_P(PacketMode,
RtpPacketizerH264ModeTest,
::testing::Values(kH264PacketizationMode0,
kH264PacketizationMode1));
TEST(RtpPacketizerH264Test, TestStapA) {
const size_t kFrameSize =
kMaxPayloadSize - 3 * kLengthFieldLength - kNalHeaderSize;
@ -243,7 +225,7 @@ TEST(RtpPacketizerH264Test, TestStapA) {
fragmentation.fragmentationLength[2] =
kNalHeaderSize + kFrameSize - kPayloadOffset;
std::unique_ptr<RtpPacketizer> packetizer(
CreateH264Packetizer(kH264PacketizationMode1, kMaxPayloadSize));
RtpPacketizer::Create(kRtpVideoH264, kMaxPayloadSize, NULL, kEmptyFrame));
packetizer->SetPayloadData(frame, kFrameSize, &fragmentation);
uint8_t packet[kMaxPayloadSize] = {0};
@ -260,39 +242,6 @@ TEST(RtpPacketizerH264Test, TestStapA) {
EXPECT_FALSE(packetizer->NextPacket(packet, &length, &last));
}
TEST(RtpPacketizerH264Test, TestMode0HasNoStapA) {
// This is the same setup as for the TestStapA test.
const size_t kFrameSize =
kMaxPayloadSize - 3 * kLengthFieldLength - kNalHeaderSize;
uint8_t frame[kFrameSize] = {0x07, 0xFF, // F=0, NRI=0, Type=7 (SPS).
0x08, 0xFF, // F=0, NRI=0, Type=8 (PPS).
0x05}; // F=0, NRI=0, Type=5 (IDR).
const size_t kPayloadOffset = 5;
for (size_t i = 0; i < kFrameSize - kPayloadOffset; ++i)
frame[i + kPayloadOffset] = i;
RTPFragmentationHeader fragmentation;
fragmentation.VerifyAndAllocateFragmentationHeader(3);
fragmentation.fragmentationOffset[0] = 0;
fragmentation.fragmentationLength[0] = 2;
fragmentation.fragmentationOffset[1] = 2;
fragmentation.fragmentationLength[1] = 2;
fragmentation.fragmentationOffset[2] = 4;
fragmentation.fragmentationLength[2] =
kNalHeaderSize + kFrameSize - kPayloadOffset;
std::unique_ptr<RtpPacketizer> packetizer(
CreateH264Packetizer(kH264PacketizationMode0, kMaxPayloadSize));
packetizer->SetPayloadData(frame, kFrameSize, &fragmentation);
uint8_t packet[kMaxPayloadSize] = {0};
size_t length = 0;
bool last = false;
// The three fragments should be returned as three packets.
ASSERT_TRUE(packetizer->NextPacket(packet, &length, &last));
ASSERT_TRUE(packetizer->NextPacket(packet, &length, &last));
ASSERT_TRUE(packetizer->NextPacket(packet, &length, &last));
EXPECT_FALSE(packetizer->NextPacket(packet, &length, &last));
}
TEST(RtpPacketizerH264Test, TestTooSmallForStapAHeaders) {
const size_t kFrameSize = kMaxPayloadSize - 1;
uint8_t frame[kFrameSize] = {0x07, 0xFF, // F=0, NRI=0, Type=7.
@ -311,7 +260,7 @@ TEST(RtpPacketizerH264Test, TestTooSmallForStapAHeaders) {
fragmentation.fragmentationLength[2] =
kNalHeaderSize + kFrameSize - kPayloadOffset;
std::unique_ptr<RtpPacketizer> packetizer(
CreateH264Packetizer(kH264PacketizationMode1, kMaxPayloadSize));
RtpPacketizer::Create(kRtpVideoH264, kMaxPayloadSize, NULL, kEmptyFrame));
packetizer->SetPayloadData(frame, kFrameSize, &fragmentation);
uint8_t packet[kMaxPayloadSize] = {0};
@ -359,7 +308,7 @@ TEST(RtpPacketizerH264Test, TestMixedStapA_FUA) {
}
}
std::unique_ptr<RtpPacketizer> packetizer(
CreateH264Packetizer(kH264PacketizationMode1, kMaxPayloadSize));
RtpPacketizer::Create(kRtpVideoH264, kMaxPayloadSize, NULL, kEmptyFrame));
packetizer->SetPayloadData(frame, kFrameSize, &fragmentation);
// First expecting two FU-A packets.
@ -432,28 +381,6 @@ TEST(RtpPacketizerH264Test, TestFUABig) {
sizeof(kExpectedPayloadSizes) / sizeof(size_t)));
}
#if RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
TEST(RtpPacketizerH264DeathTest, SendOverlongDataInPacketizationMode0) {
const size_t kFrameSize = kMaxPayloadSize + 100;
uint8_t frame[kFrameSize] = {0};
for (size_t i = 0; i < kFrameSize; ++i)
frame[i] = i;
RTPFragmentationHeader fragmentation;
fragmentation.VerifyAndAllocateFragmentationHeader(1);
fragmentation.fragmentationOffset[0] = 0;
fragmentation.fragmentationLength[0] = kFrameSize;
// Set NAL headers.
frame[fragmentation.fragmentationOffset[0]] = 0x01;
std::unique_ptr<RtpPacketizer> packetizer(
CreateH264Packetizer(kH264PacketizationMode0, kMaxPayloadSize));
EXPECT_DEATH(packetizer->SetPayloadData(frame, kFrameSize, &fragmentation),
"payload_size");
}
#endif // RTC_DCHECK_IS_ON && GTEST_HAS_DEATH_TEST && !defined(WEBRTC_ANDROID)
namespace {
const uint8_t kStartSequence[] = {0x00, 0x00, 0x00, 0x01};
const uint8_t kOriginalSps[] = {kSps, 0x00, 0x00, 0x03, 0x03,
@ -494,8 +421,9 @@ TEST_F(RtpPacketizerH264TestSpsRewriting, FuASps) {
const size_t kHeaderOverhead = kFuAHeaderSize + 1;
// Set size to fragment SPS into two FU-A packets.
packetizer_.reset(CreateH264Packetizer(
kH264PacketizationMode1, sizeof(kOriginalSps) - 2 + kHeaderOverhead));
packetizer_.reset(RtpPacketizer::Create(
kRtpVideoH264, sizeof(kOriginalSps) - 2 + kHeaderOverhead, nullptr,
kEmptyFrame));
packetizer_->SetPayloadData(in_buffer_.data(), in_buffer_.size(),
&fragmentation_header_);
@ -531,8 +459,9 @@ TEST_F(RtpPacketizerH264TestSpsRewriting, StapASps) {
sizeof(kIdrTwo) + (kLengthFieldLength * 3);
// Set size to include SPS and the rest of the packets in a Stap-A package.
packetizer_.reset(CreateH264Packetizer(kH264PacketizationMode1,
kExpectedTotalSize + kHeaderOverhead));
packetizer_.reset(RtpPacketizer::Create(kRtpVideoH264,
kExpectedTotalSize + kHeaderOverhead,
nullptr, kEmptyFrame));
packetizer_->SetPayloadData(in_buffer_.data(), in_buffer_.size(),
&fragmentation_header_);

View File

@ -64,7 +64,6 @@ VideoCodecH264 VideoEncoder::GetDefaultH264Settings() {
h264_settings.frameDroppingOn = true;
h264_settings.keyFrameInterval = 3000;
h264_settings.packetization_mode = kH264PacketizationMode1;
h264_settings.spsData = nullptr;
h264_settings.spsLen = 0;
h264_settings.ppsData = nullptr;

View File

@ -152,15 +152,6 @@ static void RtpFragmentize(EncodedImage* encoded_image,
H264EncoderImpl::H264EncoderImpl()
: openh264_encoder_(nullptr),
width_(0),
height_(0),
max_frame_rate_(0.0f),
target_bps_(0),
max_bps_(0),
mode_(kRealtimeVideo),
frame_dropping_on_(false),
key_frame_interval_(0),
max_payload_size_(0),
number_of_cores_(0),
encoded_image_callback_(nullptr),
has_reported_init_(false),
@ -172,7 +163,7 @@ H264EncoderImpl::~H264EncoderImpl() {
int32_t H264EncoderImpl::InitEncode(const VideoCodec* codec_settings,
int32_t number_of_cores,
size_t max_payload_size) {
size_t /*max_payload_size*/) {
ReportInit();
if (!codec_settings ||
codec_settings->codecType != kVideoCodecH264) {
@ -219,7 +210,6 @@ int32_t H264EncoderImpl::InitEncode(const VideoCodec* codec_settings,
mode_ = codec_settings->mode;
frame_dropping_on_ = codec_settings->H264().frameDroppingOn;
key_frame_interval_ = codec_settings->H264().keyFrameInterval;
max_payload_size_ = max_payload_size;
// Codec_settings uses kbits/second; encoder uses bits/second.
max_bps_ = codec_settings->maxBitrate * 1000;
@ -227,12 +217,8 @@ int32_t H264EncoderImpl::InitEncode(const VideoCodec* codec_settings,
target_bps_ = codec_settings->startBitrate * 1000;
else
target_bps_ = codec_settings->targetBitrate * 1000;
RTC_DCHECK(codec_settings->H264().packetization_mode !=
kH264PacketizationModeNotSet);
packetization_mode_ = codec_settings->H264().packetization_mode;
SEncParamExt encoder_params = CreateEncoderParams();
// Initialize.
if (openh264_encoder_->InitializeExt(&encoder_params) != 0) {
LOG(LS_ERROR) << "Failed to initialize OpenH264 encoder";
@ -391,7 +377,6 @@ int32_t H264EncoderImpl::Encode(const VideoFrame& input_frame,
// Deliver encoded image.
CodecSpecificInfo codec_specific;
codec_specific.codecType = kVideoCodecH264;
codec_specific.codecSpecific.H264.packetization_mode = packetization_mode_;
encoded_image_callback_->OnEncodedImage(encoded_image_, &codec_specific,
&frag_header);
@ -460,50 +445,19 @@ SEncParamExt H264EncoderImpl::CreateEncoderParams() const {
encoder_params.iTargetBitrate;
encoder_params.sSpatialLayers[0].iMaxSpatialBitrate =
encoder_params.iMaxBitrate;
LOG(INFO) << "OpenH264 version is " << OPENH264_MAJOR << "."
<< OPENH264_MINOR;
switch (packetization_mode_) {
case kH264PacketizationMode0:
#if (OPENH264_MAJOR == 1) && (OPENH264_MINOR <= 5)
// Limit the size of packets produced.
encoder_params.sSpatialLayers[0].sSliceCfg.uiSliceMode = SM_DYN_SLICE;
// The slice size is max payload size - room for a NAL header.
// The constant 50 is NAL_HEADER_ADD_0X30BYTES in openh264 source,
// but is not exported.
encoder_params.sSpatialLayers[0]
.sSliceCfg.sSliceArgument.uiSliceSizeConstraint =
static_cast<unsigned int>(max_payload_size_ - 50);
encoder_params.uiMaxNalSize =
static_cast<unsigned int>(max_payload_size_);
// Slice num according to number of threads.
encoder_params.sSpatialLayers[0].sSliceCfg.uiSliceMode = SM_AUTO_SLICE;
#else
// When uiSliceMode = SM_FIXEDSLCNUM_SLICE, uiSliceNum = 0 means auto
// design
// it with cpu core number.
encoder_params.sSpatialLayers[0].sSliceArgument.uiSliceNum = 1;
encoder_params.sSpatialLayers[0].sSliceArgument.uiSliceMode =
SM_SIZELIMITED_SLICE;
encoder_params.sSpatialLayers[0]
.sSliceArgument.uiSliceSizeConstraint =
static_cast<unsigned int>(max_payload_size_);
// When uiSliceMode = SM_FIXEDSLCNUM_SLICE, uiSliceNum = 0 means auto design
// it with cpu core number.
// TODO(sprang): Set to 0 when we understand why the rate controller borks
// when uiSliceNum > 1.
encoder_params.sSpatialLayers[0].sSliceArgument.uiSliceNum = 1;
encoder_params.sSpatialLayers[0].sSliceArgument.uiSliceMode =
SM_FIXEDSLCNUM_SLICE;
#endif
break;
case kH264PacketizationMode1:
#if (OPENH264_MAJOR == 1) && (OPENH264_MINOR <= 5)
// Slice num according to number of threads.
encoder_params.sSpatialLayers[0].sSliceCfg.uiSliceMode = SM_AUTO_SLICE;
#else
// When uiSliceMode = SM_FIXEDSLCNUM_SLICE, uiSliceNum = 0 means auto
// design it with cpu core number.
// TODO(sprang): Set to 0 when we understand why the rate controller borks
// when uiSliceNum > 1.
encoder_params.sSpatialLayers[0].sSliceArgument.uiSliceNum = 1;
encoder_params.sSpatialLayers[0].sSliceArgument.uiSliceMode =
SM_FIXEDSLCNUM_SLICE;
#endif
break;
default:
RTC_NOTREACHED() << "Illegal packetization mode specified";
}
return encoder_params;
}

View File

@ -39,7 +39,7 @@ class H264EncoderImpl : public H264Encoder {
// - height
int32_t InitEncode(const VideoCodec* codec_settings,
int32_t number_of_cores,
size_t max_payload_size) override;
size_t /*max_payload_size*/) override;
int32_t Release() override;
int32_t RegisterEncodeCompleteCallback(
@ -80,9 +80,7 @@ class H264EncoderImpl : public H264Encoder {
// H.264 specifc parameters
bool frame_dropping_on_;
int key_frame_interval_;
H264PacketizationMode packetization_mode_;
size_t max_payload_size_;
int32_t number_of_cores_;
EncodedImage encoded_image_;

View File

@ -1,54 +0,0 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*
*/
#include "webrtc/modules/video_coding/codecs/h264/h264_encoder_impl.h"
#include "webrtc/test/gtest.h"
namespace webrtc {
namespace {
const int kMaxPayloadSize = 1024;
void SetDefaultSettings(VideoCodec* codec_settings) {
codec_settings->codecType = kVideoCodecH264;
codec_settings->maxFramerate = 60;
codec_settings->width = 640;
codec_settings->height = 480;
codec_settings->H264()->packetization_mode = kH264PacketizationMode1;
// If frame dropping is false, we get a warning that bitrate can't
// be controlled for RC_QUALITY_MODE; RC_BITRATE_MODE and RC_TIMESTAMP_MODE
codec_settings->H264()->frameDroppingOn = true;
codec_settings->targetBitrate = 2000;
codec_settings->maxBitrate = 4000;
}
TEST(H264EncoderImplTest, CanInitializeWithDefaultParameters) {
H264EncoderImpl encoder;
VideoCodec codec_settings;
SetDefaultSettings(&codec_settings);
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder.InitEncode(&codec_settings, 1, kMaxPayloadSize));
}
TEST(H264EncoderImplTest, CanInitializeWithPacketizationMode0) {
H264EncoderImpl encoder;
VideoCodec codec_settings;
SetDefaultSettings(&codec_settings);
codec_settings.H264()->packetization_mode = kH264PacketizationMode0;
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder.InitEncode(&codec_settings, 1, kMaxPayloadSize));
}
} // anonymous namespace
} // namespace webrtc

View File

@ -77,9 +77,7 @@ struct CodecSpecificInfoGeneric {
uint8_t simulcast_idx;
};
struct CodecSpecificInfoH264 {
H264PacketizationMode packetization_mode;
};
struct CodecSpecificInfoH264 {};
union CodecSpecificInfoUnion {
CodecSpecificInfoGeneric generic;

View File

@ -200,7 +200,6 @@ EncodedImageCallback::Result FakeH264Encoder::OnEncodedImage(
CodecSpecificInfo specifics;
memset(&specifics, 0, sizeof(specifics));
specifics.codecType = kVideoCodecH264;
specifics.codecSpecific.H264.packetization_mode = kH264PacketizationMode1;
return callback_->OnEncodedImage(encoded_image, &specifics, &fragmentation);
}

View File

@ -143,7 +143,6 @@ if (rtc_include_tests) {
# TODO(pbos): Rename test suite.
rtc_source_set("video_tests") {
testonly = true
defines = []
sources = [
"call_stats_unittest.cc",
"encoder_rtcp_feedback_unittest.cc",
@ -171,8 +170,5 @@ if (rtc_include_tests) {
# Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
}
if (rtc_use_h264) {
defines += [ "WEBRTC_USE_H264" ]
}
}
}

View File

@ -130,15 +130,6 @@ class EndToEndTest : public test::CallTest {
Transport* transport);
};
void SetPacketizationMode(H264PacketizationMode mode, VideoEncoder* encoder) {
VideoCodec codec_settings;
codec_settings.codecType = kVideoCodecH264;
codec_settings.H264()->packetization_mode = mode;
// TODO(hta): Determine appropriate value for max packet size.
static const int kMaxPacketSize = 1024;
encoder->InitEncode(&codec_settings, 0, kMaxPacketSize);
}
TEST_F(EndToEndTest, ReceiverCanBeStartedTwice) {
CreateCalls(Call::Config(&event_log_), Call::Config(&event_log_));
@ -384,7 +375,7 @@ TEST_F(EndToEndTest, SendsAndReceivesVP9VideoRotation90) {
}
#endif // !defined(RTC_DISABLE_VP9)
#if defined(WEBRTC_USE_H264)
#if defined(WEBRTC_END_TO_END_H264_TESTS)
TEST_F(EndToEndTest, SendsAndReceivesH264) {
CodecObserver test(500, kVideoRotation_0, "H264",
@ -400,25 +391,7 @@ TEST_F(EndToEndTest, SendsAndReceivesH264VideoRotation90) {
RunBaseTest(&test);
}
TEST_F(EndToEndTest, SendsAndReceivesH264PacketizationMode0) {
VideoEncoder* encoder = VideoEncoder::Create(VideoEncoder::kH264);
SetPacketizationMode(kH264PacketizationMode0, encoder);
// The CodecObserver takes ownership of the encoder.
CodecObserver test(500, kVideoRotation_0, "H264", encoder,
H264Decoder::Create());
RunBaseTest(&test);
}
TEST_F(EndToEndTest, SendsAndReceivesH264PacketizationMode1) {
VideoEncoder* encoder = VideoEncoder::Create(VideoEncoder::kH264);
SetPacketizationMode(kH264PacketizationMode1, encoder);
// The CodecObserver takes ownership of the encoder.
CodecObserver test(500, kVideoRotation_0, "H264", encoder,
H264Decoder::Create());
RunBaseTest(&test);
}
#endif // defined(WEBRTC_USE_H264)
#endif // defined(WEBRTC_END_TO_END_H264_TESTS)
TEST_F(EndToEndTest, ReceiverUsesLocalSsrc) {
class SyncRtcpObserver : public test::EndToEndTest {

View File

@ -75,8 +75,6 @@ void CopyCodecSpecific(const CodecSpecificInfo* info, RTPVideoHeader* rtp) {
}
case kVideoCodecH264:
rtp->codec = kRtpVideoH264;
rtp->codecHeader.H264.packetization_mode =
info->codecSpecific.H264.packetization_mode;
return;
case kVideoCodecGeneric:
rtp->codec = kRtpVideoGeneric;

View File

@ -1970,7 +1970,6 @@ class VideoCodecConfigObserver : public test::SendTest,
num_initializations_(0),
stream_(nullptr) {
memset(&encoder_settings_, 0, sizeof(encoder_settings_));
InitCodecSpecifics();
}
private:
@ -1994,8 +1993,6 @@ class VideoCodecConfigObserver : public test::SendTest,
}
};
void InitCodecSpecifics();
void ModifyVideoConfigs(
VideoSendStream::Config* send_config,
std::vector<VideoReceiveStream::Config>* receive_configs,
@ -2060,20 +2057,11 @@ class VideoCodecConfigObserver : public test::SendTest,
VideoEncoderConfig encoder_config_;
};
template <typename T>
void VideoCodecConfigObserver<T>::InitCodecSpecifics() {}
template <>
void VideoCodecConfigObserver<VideoCodecH264>::InitCodecSpecifics() {
encoder_settings_.packetization_mode = kH264PacketizationMode1;
}
template <>
void VideoCodecConfigObserver<VideoCodecH264>::VerifyCodecSpecifics(
const VideoCodec& config) const {
EXPECT_EQ(
0, memcmp(&config.H264(), &encoder_settings_, sizeof(encoder_settings_)));
// Check that packetization mode has propagated.
EXPECT_EQ(kH264PacketizationMode1, config.H264().packetization_mode);
}
template <>