Do not use internal source in H.264 bitstream rewriting tests.
Bug: None Change-Id: Ice1ffb4371ade57bd642f5fe86d6432f2c175d71 Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/179281 Reviewed-by: Niels Moller <nisse@webrtc.org> Reviewed-by: Ilya Nikolaevskiy <ilnik@webrtc.org> Commit-Queue: Mirta Dvornicic <mirtad@webrtc.org> Cr-Commit-Position: refs/heads/master@{#31730}
This commit is contained in:

committed by
Commit Bot

parent
c6801d4522
commit
97910da4e1
@ -707,6 +707,7 @@ class VideoStreamEncoderTest : public ::testing::Test {
|
|||||||
if (payload_name == "VP9") {
|
if (payload_name == "VP9") {
|
||||||
VideoCodecVP9 vp9_settings = VideoEncoder::GetDefaultVp9Settings();
|
VideoCodecVP9 vp9_settings = VideoEncoder::GetDefaultVp9Settings();
|
||||||
vp9_settings.numberOfSpatialLayers = num_spatial_layers;
|
vp9_settings.numberOfSpatialLayers = num_spatial_layers;
|
||||||
|
vp9_settings.automaticResizeOn = num_spatial_layers <= 1;
|
||||||
video_encoder_config.encoder_specific_settings =
|
video_encoder_config.encoder_specific_settings =
|
||||||
new rtc::RefCountedObject<
|
new rtc::RefCountedObject<
|
||||||
VideoEncoderConfig::Vp9EncoderSpecificSettings>(vp9_settings);
|
VideoEncoderConfig::Vp9EncoderSpecificSettings>(vp9_settings);
|
||||||
@ -943,12 +944,10 @@ class VideoStreamEncoderTest : public ::testing::Test {
|
|||||||
encoded_image_callback_->OnEncodedImage(image, nullptr, nullptr);
|
encoded_image_callback_->OnEncodedImage(image, nullptr, nullptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
void InjectEncodedImage(const EncodedImage& image,
|
void SetEncodedImageData(
|
||||||
const CodecSpecificInfo* codec_specific_info,
|
rtc::scoped_refptr<EncodedImageBufferInterface> encoded_image_data) {
|
||||||
const RTPFragmentationHeader* fragmentation) {
|
|
||||||
MutexLock lock(&local_mutex_);
|
MutexLock lock(&local_mutex_);
|
||||||
encoded_image_callback_->OnEncodedImage(image, codec_specific_info,
|
encoded_image_data_ = encoded_image_data;
|
||||||
fragmentation);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void ExpectNullFrame() {
|
void ExpectNullFrame() {
|
||||||
@ -1005,6 +1004,27 @@ class VideoStreamEncoderTest : public ::testing::Test {
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::unique_ptr<RTPFragmentationHeader> EncodeHook(
|
||||||
|
EncodedImage* encoded_image,
|
||||||
|
CodecSpecificInfo* codec_specific) override {
|
||||||
|
{
|
||||||
|
MutexLock lock(&mutex_);
|
||||||
|
codec_specific->codecType = config_.codecType;
|
||||||
|
}
|
||||||
|
MutexLock lock(&local_mutex_);
|
||||||
|
if (encoded_image_data_) {
|
||||||
|
encoded_image->SetEncodedData(encoded_image_data_);
|
||||||
|
if (codec_specific->codecType == kVideoCodecH264) {
|
||||||
|
auto fragmentation = std::make_unique<RTPFragmentationHeader>();
|
||||||
|
fragmentation->VerifyAndAllocateFragmentationHeader(1);
|
||||||
|
fragmentation->fragmentationOffset[0] = 4;
|
||||||
|
fragmentation->fragmentationLength[0] = encoded_image->size() - 4;
|
||||||
|
return fragmentation;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
int32_t InitEncode(const VideoCodec* config,
|
int32_t InitEncode(const VideoCodec* config,
|
||||||
const Settings& settings) override {
|
const Settings& settings) override {
|
||||||
int res = FakeEncoder::InitEncode(config, settings);
|
int res = FakeEncoder::InitEncode(config, settings);
|
||||||
@ -1073,6 +1093,8 @@ class VideoStreamEncoderTest : public ::testing::Test {
|
|||||||
bool quality_scaling_ RTC_GUARDED_BY(local_mutex_) = true;
|
bool quality_scaling_ RTC_GUARDED_BY(local_mutex_) = true;
|
||||||
int requested_resolution_alignment_ RTC_GUARDED_BY(local_mutex_) = 1;
|
int requested_resolution_alignment_ RTC_GUARDED_BY(local_mutex_) = 1;
|
||||||
bool is_hardware_accelerated_ RTC_GUARDED_BY(local_mutex_) = false;
|
bool is_hardware_accelerated_ RTC_GUARDED_BY(local_mutex_) = false;
|
||||||
|
rtc::scoped_refptr<EncodedImageBufferInterface> encoded_image_data_
|
||||||
|
RTC_GUARDED_BY(local_mutex_);
|
||||||
std::unique_ptr<Vp8FrameBufferController> frame_buffer_controller_
|
std::unique_ptr<Vp8FrameBufferController> frame_buffer_controller_
|
||||||
RTC_GUARDED_BY(local_mutex_);
|
RTC_GUARDED_BY(local_mutex_);
|
||||||
absl::optional<bool>
|
absl::optional<bool>
|
||||||
@ -5404,23 +5426,22 @@ TEST_F(VideoStreamEncoderTest, AdjustsTimestampInternalSource) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(VideoStreamEncoderTest, DoesNotRewriteH264BitstreamWithOptimalSps) {
|
TEST_F(VideoStreamEncoderTest, DoesNotRewriteH264BitstreamWithOptimalSps) {
|
||||||
// Configure internal source factory and setup test again.
|
// SPS contains VUI with restrictions on the maximum number of reordered
|
||||||
encoder_factory_.SetHasInternalSource(true);
|
// pictures, there is no need to rewrite the bitstream to enable faster
|
||||||
|
// decoding.
|
||||||
ResetEncoder("H264", 1, 1, 1, false);
|
ResetEncoder("H264", 1, 1, 1, false);
|
||||||
|
|
||||||
EncodedImage image(optimal_sps, sizeof(optimal_sps), sizeof(optimal_sps));
|
video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
|
||||||
image._frameType = VideoFrameType::kVideoFrameKey;
|
DataRate::BitsPerSec(kTargetBitrateBps),
|
||||||
|
DataRate::BitsPerSec(kTargetBitrateBps),
|
||||||
|
DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
|
||||||
|
video_stream_encoder_->WaitUntilTaskQueueIsIdle();
|
||||||
|
|
||||||
CodecSpecificInfo codec_specific_info;
|
fake_encoder_.SetEncodedImageData(
|
||||||
codec_specific_info.codecType = kVideoCodecH264;
|
EncodedImageBuffer::Create(optimal_sps, sizeof(optimal_sps)));
|
||||||
|
|
||||||
RTPFragmentationHeader fragmentation;
|
video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
|
||||||
fragmentation.VerifyAndAllocateFragmentationHeader(1);
|
WaitForEncodedFrame(1);
|
||||||
fragmentation.fragmentationOffset[0] = 4;
|
|
||||||
fragmentation.fragmentationLength[0] = sizeof(optimal_sps) - 4;
|
|
||||||
|
|
||||||
fake_encoder_.InjectEncodedImage(image, &codec_specific_info, &fragmentation);
|
|
||||||
EXPECT_TRUE(sink_.WaitForFrame(kDefaultTimeoutMs));
|
|
||||||
|
|
||||||
EXPECT_THAT(sink_.GetLastEncodedImageData(),
|
EXPECT_THAT(sink_.GetLastEncodedImageData(),
|
||||||
testing::ElementsAreArray(optimal_sps));
|
testing::ElementsAreArray(optimal_sps));
|
||||||
@ -5433,27 +5454,25 @@ TEST_F(VideoStreamEncoderTest, DoesNotRewriteH264BitstreamWithOptimalSps) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(VideoStreamEncoderTest, RewritesH264BitstreamWithNonOptimalSps) {
|
TEST_F(VideoStreamEncoderTest, RewritesH264BitstreamWithNonOptimalSps) {
|
||||||
|
// SPS does not contain VUI, the bitstream is will be rewritten with added
|
||||||
|
// VUI with restrictions on the maximum number of reordered pictures to
|
||||||
|
// enable faster decoding.
|
||||||
uint8_t original_sps[] = {0, 0, 0, 1, H264::NaluType::kSps,
|
uint8_t original_sps[] = {0, 0, 0, 1, H264::NaluType::kSps,
|
||||||
0x00, 0x00, 0x03, 0x03, 0xF4,
|
0x00, 0x00, 0x03, 0x03, 0xF4,
|
||||||
0x05, 0x03, 0xC7, 0xC0};
|
0x05, 0x03, 0xC7, 0xC0};
|
||||||
|
|
||||||
// Configure internal source factory and setup test again.
|
|
||||||
encoder_factory_.SetHasInternalSource(true);
|
|
||||||
ResetEncoder("H264", 1, 1, 1, false);
|
ResetEncoder("H264", 1, 1, 1, false);
|
||||||
|
|
||||||
EncodedImage image(original_sps, sizeof(original_sps), sizeof(original_sps));
|
video_stream_encoder_->OnBitrateUpdatedAndWaitForManagedResources(
|
||||||
image._frameType = VideoFrameType::kVideoFrameKey;
|
DataRate::BitsPerSec(kTargetBitrateBps),
|
||||||
|
DataRate::BitsPerSec(kTargetBitrateBps),
|
||||||
|
DataRate::BitsPerSec(kTargetBitrateBps), 0, 0, 0);
|
||||||
|
video_stream_encoder_->WaitUntilTaskQueueIsIdle();
|
||||||
|
|
||||||
CodecSpecificInfo codec_specific_info;
|
fake_encoder_.SetEncodedImageData(
|
||||||
codec_specific_info.codecType = kVideoCodecH264;
|
EncodedImageBuffer::Create(original_sps, sizeof(original_sps)));
|
||||||
|
|
||||||
RTPFragmentationHeader fragmentation;
|
video_source_.IncomingCapturedFrame(CreateFrame(1, nullptr));
|
||||||
fragmentation.VerifyAndAllocateFragmentationHeader(1);
|
WaitForEncodedFrame(1);
|
||||||
fragmentation.fragmentationOffset[0] = 4;
|
|
||||||
fragmentation.fragmentationLength[0] = sizeof(original_sps) - 4;
|
|
||||||
|
|
||||||
fake_encoder_.InjectEncodedImage(image, &codec_specific_info, &fragmentation);
|
|
||||||
EXPECT_TRUE(sink_.WaitForFrame(kDefaultTimeoutMs));
|
|
||||||
|
|
||||||
EXPECT_THAT(sink_.GetLastEncodedImageData(),
|
EXPECT_THAT(sink_.GetLastEncodedImageData(),
|
||||||
testing::ElementsAreArray(optimal_sps));
|
testing::ElementsAreArray(optimal_sps));
|
||||||
|
Reference in New Issue
Block a user