Refactor SimulcastTestUtility into SimulcastTestFixture{,Impl}
This will allow exposing the interface to downstream users that want to test VP8 simulcast. No functional changes to the tests themselves are expected. Bug: webrtc:9281 Change-Id: I4128b8f35a4412c5b330cf55c8dc0e173d4570da Reviewed-on: https://webrtc-review.googlesource.com/77361 Commit-Queue: Rasmus Brandt <brandtr@webrtc.org> Reviewed-by: Fredrik Solenberg <solenberg@webrtc.org> Reviewed-by: Magnus Jedvert <magjed@webrtc.org> Reviewed-by: Stefan Holmer <stefan@webrtc.org> Reviewed-by: Erik Språng <sprang@webrtc.org> Cr-Commit-Position: refs/heads/master@{#23469}
This commit is contained in:

committed by
Commit Bot

parent
29921cf097
commit
0cedc054a2
27
api/BUILD.gn
27
api/BUILD.gn
@ -283,6 +283,33 @@ if (rtc_include_tests) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rtc_source_set("simulcast_test_fixture_api") {
|
||||||
|
visibility = [ "*" ]
|
||||||
|
testonly = true
|
||||||
|
sources = [
|
||||||
|
"test/simulcast_test_fixture.h",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
rtc_source_set("create_simulcast_test_fixture_api") {
|
||||||
|
visibility = [ "*" ]
|
||||||
|
testonly = true
|
||||||
|
sources = [
|
||||||
|
"test/create_simulcast_test_fixture.cc",
|
||||||
|
"test/create_simulcast_test_fixture.h",
|
||||||
|
]
|
||||||
|
deps = [
|
||||||
|
":simulcast_test_fixture_api",
|
||||||
|
"../modules/video_coding:simulcast_test_fixture_impl",
|
||||||
|
"../rtc_base:rtc_base_approved",
|
||||||
|
"video_codecs:video_codecs_api",
|
||||||
|
]
|
||||||
|
if (!build_with_chromium && is_clang) {
|
||||||
|
# Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
|
||||||
|
suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
rtc_source_set("videocodec_test_fixture_api") {
|
rtc_source_set("videocodec_test_fixture_api") {
|
||||||
visibility = [ "*" ]
|
visibility = [ "*" ]
|
||||||
testonly = true
|
testonly = true
|
||||||
|
31
api/test/create_simulcast_test_fixture.cc
Normal file
31
api/test/create_simulcast_test_fixture.cc
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Use of this source code is governed by a BSD-style license
|
||||||
|
* that can be found in the LICENSE file in the root of the source
|
||||||
|
* tree. An additional intellectual property rights grant can be found
|
||||||
|
* in the file PATENTS. All contributing project authors may
|
||||||
|
* be found in the AUTHORS file in the root of the source tree.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "api/test/create_simulcast_test_fixture.h"
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
#include <utility>
|
||||||
|
|
||||||
|
#include "api/test/simulcast_test_fixture.h"
|
||||||
|
#include "modules/video_coding/codecs/vp8/simulcast_test_fixture_impl.h"
|
||||||
|
#include "rtc_base/ptr_util.h"
|
||||||
|
|
||||||
|
namespace webrtc {
|
||||||
|
namespace test {
|
||||||
|
|
||||||
|
std::unique_ptr<SimulcastTestFixture> CreateSimulcastTestFixture(
|
||||||
|
std::unique_ptr<VideoEncoderFactory> encoder_factory,
|
||||||
|
std::unique_ptr<VideoDecoderFactory> decoder_factory) {
|
||||||
|
return rtc::MakeUnique<SimulcastTestFixtureImpl>(std::move(encoder_factory),
|
||||||
|
std::move(decoder_factory));
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace test
|
||||||
|
} // namespace webrtc
|
30
api/test/create_simulcast_test_fixture.h
Normal file
30
api/test/create_simulcast_test_fixture.h
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Use of this source code is governed by a BSD-style license
|
||||||
|
* that can be found in the LICENSE file in the root of the source
|
||||||
|
* tree. An additional intellectual property rights grant can be found
|
||||||
|
* in the file PATENTS. All contributing project authors may
|
||||||
|
* be found in the AUTHORS file in the root of the source tree.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef API_TEST_CREATE_SIMULCAST_TEST_FIXTURE_H_
|
||||||
|
#define API_TEST_CREATE_SIMULCAST_TEST_FIXTURE_H_
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
|
#include "api/test/simulcast_test_fixture.h"
|
||||||
|
#include "api/video_codecs/video_decoder_factory.h"
|
||||||
|
#include "api/video_codecs/video_encoder_factory.h"
|
||||||
|
|
||||||
|
namespace webrtc {
|
||||||
|
namespace test {
|
||||||
|
|
||||||
|
std::unique_ptr<SimulcastTestFixture> CreateSimulcastTestFixture(
|
||||||
|
std::unique_ptr<VideoEncoderFactory> encoder_factory,
|
||||||
|
std::unique_ptr<VideoDecoderFactory> decoder_factory);
|
||||||
|
|
||||||
|
} // namespace test
|
||||||
|
} // namespace webrtc
|
||||||
|
|
||||||
|
#endif // API_TEST_CREATE_SIMULCAST_TEST_FIXTURE_H_
|
41
api/test/simulcast_test_fixture.h
Normal file
41
api/test/simulcast_test_fixture.h
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Use of this source code is governed by a BSD-style license
|
||||||
|
* that can be found in the LICENSE file in the root of the source
|
||||||
|
* tree. An additional intellectual property rights grant can be found
|
||||||
|
* in the file PATENTS. All contributing project authors may
|
||||||
|
* be found in the AUTHORS file in the root of the source tree.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef API_TEST_SIMULCAST_TEST_FIXTURE_H_
|
||||||
|
#define API_TEST_SIMULCAST_TEST_FIXTURE_H_
|
||||||
|
|
||||||
|
namespace webrtc {
|
||||||
|
namespace test {
|
||||||
|
|
||||||
|
class SimulcastTestFixture {
|
||||||
|
public:
|
||||||
|
virtual ~SimulcastTestFixture() = default;
|
||||||
|
|
||||||
|
virtual void TestKeyFrameRequestsOnAllStreams() = 0;
|
||||||
|
virtual void TestPaddingAllStreams() = 0;
|
||||||
|
virtual void TestPaddingTwoStreams() = 0;
|
||||||
|
virtual void TestPaddingTwoStreamsOneMaxedOut() = 0;
|
||||||
|
virtual void TestPaddingOneStream() = 0;
|
||||||
|
virtual void TestPaddingOneStreamTwoMaxedOut() = 0;
|
||||||
|
virtual void TestSendAllStreams() = 0;
|
||||||
|
virtual void TestDisablingStreams() = 0;
|
||||||
|
virtual void TestActiveStreams() = 0;
|
||||||
|
virtual void TestSwitchingToOneStream() = 0;
|
||||||
|
virtual void TestSwitchingToOneOddStream() = 0;
|
||||||
|
virtual void TestSwitchingToOneSmallStream() = 0;
|
||||||
|
virtual void TestSpatioTemporalLayers333PatternEncoder() = 0;
|
||||||
|
virtual void TestSpatioTemporalLayers321PatternEncoder() = 0;
|
||||||
|
virtual void TestStrideEncodeDecode() = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace test
|
||||||
|
} // namespace webrtc
|
||||||
|
|
||||||
|
#endif // API_TEST_SIMULCAST_TEST_FIXTURE_H_
|
@ -529,6 +529,7 @@ if (rtc_include_tests) {
|
|||||||
"../rtc_base:rtc_task_queue",
|
"../rtc_base:rtc_task_queue",
|
||||||
"../rtc_base:stringutils",
|
"../rtc_base:stringutils",
|
||||||
"../test:field_trial",
|
"../test:field_trial",
|
||||||
|
"../test:test_common",
|
||||||
]
|
]
|
||||||
sources = [
|
sources = [
|
||||||
"base/codec_unittest.cc",
|
"base/codec_unittest.cc",
|
||||||
@ -608,8 +609,10 @@ if (rtc_include_tests) {
|
|||||||
":rtc_media_base",
|
":rtc_media_base",
|
||||||
":rtc_media_tests_utils",
|
":rtc_media_tests_utils",
|
||||||
":rtc_software_fallback_wrappers",
|
":rtc_software_fallback_wrappers",
|
||||||
|
"../api:create_simulcast_test_fixture_api",
|
||||||
"../api:libjingle_peerconnection_api",
|
"../api:libjingle_peerconnection_api",
|
||||||
"../api:mock_video_codec_factory",
|
"../api:mock_video_codec_factory",
|
||||||
|
"../api:simulcast_test_fixture_api",
|
||||||
"../api/audio_codecs:builtin_audio_decoder_factory",
|
"../api/audio_codecs:builtin_audio_decoder_factory",
|
||||||
"../api/audio_codecs:builtin_audio_encoder_factory",
|
"../api/audio_codecs:builtin_audio_encoder_factory",
|
||||||
"../api/video:video_bitrate_allocation",
|
"../api/video:video_bitrate_allocation",
|
||||||
@ -624,8 +627,7 @@ if (rtc_include_tests) {
|
|||||||
"../logging:rtc_event_log_impl_base",
|
"../logging:rtc_event_log_impl_base",
|
||||||
"../modules/audio_device:mock_audio_device",
|
"../modules/audio_device:mock_audio_device",
|
||||||
"../modules/audio_processing:audio_processing",
|
"../modules/audio_processing:audio_processing",
|
||||||
"../modules/video_coding:simulcast_test_utility",
|
"../modules/video_coding:simulcast_test_fixture_impl",
|
||||||
"../modules/video_coding:video_coding_utility",
|
|
||||||
"../modules/video_coding:webrtc_vp8_helpers",
|
"../modules/video_coding:webrtc_vp8_helpers",
|
||||||
"../p2p:p2p_test_utils",
|
"../p2p:p2p_test_utils",
|
||||||
"../rtc_base:rtc_base",
|
"../rtc_base:rtc_base",
|
||||||
|
@ -12,90 +12,134 @@
|
|||||||
#include <memory>
|
#include <memory>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
|
#include "api/test/create_simulcast_test_fixture.h"
|
||||||
|
#include "api/test/simulcast_test_fixture.h"
|
||||||
#include "api/video_codecs/sdp_video_format.h"
|
#include "api/video_codecs/sdp_video_format.h"
|
||||||
#include "api/video_codecs/video_encoder_factory.h"
|
#include "api/video_codecs/video_encoder_factory.h"
|
||||||
#include "common_video/include/video_frame_buffer.h"
|
#include "common_video/include/video_frame_buffer.h"
|
||||||
#include "media/engine/internalencoderfactory.h"
|
#include "media/engine/internalencoderfactory.h"
|
||||||
#include "media/engine/simulcast_encoder_adapter.h"
|
#include "media/engine/simulcast_encoder_adapter.h"
|
||||||
#include "modules/video_coding/codecs/vp8/simulcast_test_utility.h"
|
#include "modules/video_coding/codecs/vp8/simulcast_test_fixture_impl.h"
|
||||||
#include "modules/video_coding/include/video_codec_interface.h"
|
#include "modules/video_coding/include/video_codec_interface.h"
|
||||||
#include "rtc_base/ptr_util.h"
|
#include "rtc_base/ptr_util.h"
|
||||||
|
#include "test/function_video_decoder_factory.h"
|
||||||
|
#include "test/function_video_encoder_factory.h"
|
||||||
#include "test/gmock.h"
|
#include "test/gmock.h"
|
||||||
|
#include "test/gtest.h"
|
||||||
|
|
||||||
|
using ::testing::_;
|
||||||
|
using ::testing::Return;
|
||||||
|
|
||||||
namespace webrtc {
|
namespace webrtc {
|
||||||
namespace testing {
|
namespace test {
|
||||||
|
|
||||||
class TestSimulcastEncoderAdapter : public TestVp8Simulcast {
|
namespace {
|
||||||
public:
|
|
||||||
TestSimulcastEncoderAdapter() : factory_(new InternalEncoderFactory()) {}
|
|
||||||
|
|
||||||
protected:
|
constexpr int kDefaultWidth = 1280;
|
||||||
std::unique_ptr<VP8Encoder> CreateEncoder() override {
|
constexpr int kDefaultHeight = 720;
|
||||||
return rtc::MakeUnique<SimulcastEncoderAdapter>(factory_.get(),
|
|
||||||
SdpVideoFormat("VP8"));
|
|
||||||
}
|
|
||||||
std::unique_ptr<VP8Decoder> CreateDecoder() override {
|
|
||||||
return VP8Decoder::Create();
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
std::unique_ptr<SimulcastTestFixture> CreateSpecificSimulcastTestFixture(
|
||||||
std::unique_ptr<VideoEncoderFactory> factory_;
|
VideoEncoderFactory* internal_encoder_factory) {
|
||||||
};
|
std::unique_ptr<VideoEncoderFactory> encoder_factory =
|
||||||
|
rtc::MakeUnique<FunctionVideoEncoderFactory>(
|
||||||
TEST_F(TestSimulcastEncoderAdapter, TestKeyFrameRequestsOnAllStreams) {
|
[internal_encoder_factory]() {
|
||||||
TestVp8Simulcast::TestKeyFrameRequestsOnAllStreams();
|
return rtc::MakeUnique<SimulcastEncoderAdapter>(
|
||||||
|
internal_encoder_factory,
|
||||||
|
SdpVideoFormat(cricket::kVp8CodecName));
|
||||||
|
});
|
||||||
|
std::unique_ptr<VideoDecoderFactory> decoder_factory =
|
||||||
|
rtc::MakeUnique<FunctionVideoDecoderFactory>(
|
||||||
|
[]() { return VP8Decoder::Create(); });
|
||||||
|
return CreateSimulcastTestFixture(std::move(encoder_factory),
|
||||||
|
std::move(decoder_factory));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(TestSimulcastEncoderAdapter, TestPaddingAllStreams) {
|
} // namespace
|
||||||
TestVp8Simulcast::TestPaddingAllStreams();
|
|
||||||
|
TEST(SimulcastEncoderAdapterSimulcastTest, TestKeyFrameRequestsOnAllStreams) {
|
||||||
|
InternalEncoderFactory internal_encoder_factory;
|
||||||
|
auto fixture = CreateSpecificSimulcastTestFixture(&internal_encoder_factory);
|
||||||
|
fixture->TestKeyFrameRequestsOnAllStreams();
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(TestSimulcastEncoderAdapter, TestPaddingTwoStreams) {
|
TEST(SimulcastEncoderAdapterSimulcastTest, TestPaddingAllStreams) {
|
||||||
TestVp8Simulcast::TestPaddingTwoStreams();
|
InternalEncoderFactory internal_encoder_factory;
|
||||||
|
auto fixture = CreateSpecificSimulcastTestFixture(&internal_encoder_factory);
|
||||||
|
fixture->TestPaddingAllStreams();
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(TestSimulcastEncoderAdapter, TestPaddingTwoStreamsOneMaxedOut) {
|
TEST(SimulcastEncoderAdapterSimulcastTest, TestPaddingTwoStreams) {
|
||||||
TestVp8Simulcast::TestPaddingTwoStreamsOneMaxedOut();
|
InternalEncoderFactory internal_encoder_factory;
|
||||||
|
auto fixture = CreateSpecificSimulcastTestFixture(&internal_encoder_factory);
|
||||||
|
fixture->TestPaddingTwoStreams();
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(TestSimulcastEncoderAdapter, TestPaddingOneStream) {
|
TEST(SimulcastEncoderAdapterSimulcastTest, TestPaddingTwoStreamsOneMaxedOut) {
|
||||||
TestVp8Simulcast::TestPaddingOneStream();
|
InternalEncoderFactory internal_encoder_factory;
|
||||||
|
auto fixture = CreateSpecificSimulcastTestFixture(&internal_encoder_factory);
|
||||||
|
fixture->TestPaddingTwoStreamsOneMaxedOut();
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(TestSimulcastEncoderAdapter, TestPaddingOneStreamTwoMaxedOut) {
|
TEST(SimulcastEncoderAdapterSimulcastTest, TestPaddingOneStream) {
|
||||||
TestVp8Simulcast::TestPaddingOneStreamTwoMaxedOut();
|
InternalEncoderFactory internal_encoder_factory;
|
||||||
|
auto fixture = CreateSpecificSimulcastTestFixture(&internal_encoder_factory);
|
||||||
|
fixture->TestPaddingOneStream();
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(TestSimulcastEncoderAdapter, TestSendAllStreams) {
|
TEST(SimulcastEncoderAdapterSimulcastTest, TestPaddingOneStreamTwoMaxedOut) {
|
||||||
TestVp8Simulcast::TestSendAllStreams();
|
InternalEncoderFactory internal_encoder_factory;
|
||||||
|
auto fixture = CreateSpecificSimulcastTestFixture(&internal_encoder_factory);
|
||||||
|
fixture->TestPaddingOneStreamTwoMaxedOut();
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(TestSimulcastEncoderAdapter, TestDisablingStreams) {
|
TEST(SimulcastEncoderAdapterSimulcastTest, TestSendAllStreams) {
|
||||||
TestVp8Simulcast::TestDisablingStreams();
|
InternalEncoderFactory internal_encoder_factory;
|
||||||
|
auto fixture = CreateSpecificSimulcastTestFixture(&internal_encoder_factory);
|
||||||
|
fixture->TestSendAllStreams();
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(TestSimulcastEncoderAdapter, TestActiveStreams) {
|
TEST(SimulcastEncoderAdapterSimulcastTest, TestDisablingStreams) {
|
||||||
TestVp8Simulcast::TestActiveStreams();
|
InternalEncoderFactory internal_encoder_factory;
|
||||||
|
auto fixture = CreateSpecificSimulcastTestFixture(&internal_encoder_factory);
|
||||||
|
fixture->TestDisablingStreams();
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(TestSimulcastEncoderAdapter, TestSwitchingToOneStream) {
|
TEST(SimulcastEncoderAdapterSimulcastTest, TestActiveStreams) {
|
||||||
TestVp8Simulcast::TestSwitchingToOneStream();
|
InternalEncoderFactory internal_encoder_factory;
|
||||||
|
auto fixture = CreateSpecificSimulcastTestFixture(&internal_encoder_factory);
|
||||||
|
fixture->TestActiveStreams();
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(TestSimulcastEncoderAdapter, TestSwitchingToOneOddStream) {
|
TEST(SimulcastEncoderAdapterSimulcastTest, TestSwitchingToOneStream) {
|
||||||
TestVp8Simulcast::TestSwitchingToOneOddStream();
|
InternalEncoderFactory internal_encoder_factory;
|
||||||
|
auto fixture = CreateSpecificSimulcastTestFixture(&internal_encoder_factory);
|
||||||
|
fixture->TestSwitchingToOneStream();
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(TestSimulcastEncoderAdapter, TestStrideEncodeDecode) {
|
TEST(SimulcastEncoderAdapterSimulcastTest, TestSwitchingToOneOddStream) {
|
||||||
TestVp8Simulcast::TestStrideEncodeDecode();
|
InternalEncoderFactory internal_encoder_factory;
|
||||||
|
auto fixture = CreateSpecificSimulcastTestFixture(&internal_encoder_factory);
|
||||||
|
fixture->TestSwitchingToOneOddStream();
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(TestSimulcastEncoderAdapter, TestSaptioTemporalLayers333PatternEncoder) {
|
TEST(SimulcastEncoderAdapterSimulcastTest, TestStrideEncodeDecode) {
|
||||||
TestVp8Simulcast::TestSaptioTemporalLayers333PatternEncoder();
|
InternalEncoderFactory internal_encoder_factory;
|
||||||
|
auto fixture = CreateSpecificSimulcastTestFixture(&internal_encoder_factory);
|
||||||
|
fixture->TestStrideEncodeDecode();
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(TestSimulcastEncoderAdapter, TestSpatioTemporalLayers321PatternEncoder) {
|
TEST(SimulcastEncoderAdapterSimulcastTest,
|
||||||
TestVp8Simulcast::TestSpatioTemporalLayers321PatternEncoder();
|
TestSpatioTemporalLayers333PatternEncoder) {
|
||||||
|
InternalEncoderFactory internal_encoder_factory;
|
||||||
|
auto fixture = CreateSpecificSimulcastTestFixture(&internal_encoder_factory);
|
||||||
|
fixture->TestSpatioTemporalLayers333PatternEncoder();
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(SimulcastEncoderAdapterSimulcastTest,
|
||||||
|
TestSpatioTemporalLayers321PatternEncoder) {
|
||||||
|
InternalEncoderFactory internal_encoder_factory;
|
||||||
|
auto fixture = CreateSpecificSimulcastTestFixture(&internal_encoder_factory);
|
||||||
|
fixture->TestSpatioTemporalLayers321PatternEncoder();
|
||||||
}
|
}
|
||||||
|
|
||||||
class MockVideoEncoder;
|
class MockVideoEncoder;
|
||||||
@ -312,7 +356,7 @@ class TestSimulcastEncoderAdapterFake : public ::testing::Test,
|
|||||||
}
|
}
|
||||||
|
|
||||||
void SetupCodec() {
|
void SetupCodec() {
|
||||||
TestVp8Simulcast::DefaultSettings(
|
SimulcastTestFixtureImpl::DefaultSettings(
|
||||||
&codec_, static_cast<const int*>(kTestTemporalLayerProfile));
|
&codec_, static_cast<const int*>(kTestTemporalLayerProfile));
|
||||||
rate_allocator_.reset(new SimulcastRateAllocator(codec_));
|
rate_allocator_.reset(new SimulcastRateAllocator(codec_));
|
||||||
EXPECT_EQ(0, adapter_->InitEncode(&codec_, 1, 1200));
|
EXPECT_EQ(0, adapter_->InitEncode(&codec_, 1, 1200));
|
||||||
@ -460,7 +504,7 @@ TEST_F(TestSimulcastEncoderAdapterFake, EncodedCallbackForDifferentEncoders) {
|
|||||||
// with the lowest stream.
|
// with the lowest stream.
|
||||||
TEST_F(TestSimulcastEncoderAdapterFake, ReusesEncodersInOrder) {
|
TEST_F(TestSimulcastEncoderAdapterFake, ReusesEncodersInOrder) {
|
||||||
// Set up common settings for three streams.
|
// Set up common settings for three streams.
|
||||||
TestVp8Simulcast::DefaultSettings(
|
SimulcastTestFixtureImpl::DefaultSettings(
|
||||||
&codec_, static_cast<const int*>(kTestTemporalLayerProfile));
|
&codec_, static_cast<const int*>(kTestTemporalLayerProfile));
|
||||||
rate_allocator_.reset(new SimulcastRateAllocator(codec_));
|
rate_allocator_.reset(new SimulcastRateAllocator(codec_));
|
||||||
adapter_->RegisterEncodeCompleteCallback(this);
|
adapter_->RegisterEncodeCompleteCallback(this);
|
||||||
@ -658,7 +702,7 @@ TEST_F(TestSimulcastEncoderAdapterFake, ReinitDoesNotReorderFrameSimulcastIdx) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(TestSimulcastEncoderAdapterFake, SupportsNativeHandleForSingleStreams) {
|
TEST_F(TestSimulcastEncoderAdapterFake, SupportsNativeHandleForSingleStreams) {
|
||||||
TestVp8Simulcast::DefaultSettings(
|
SimulcastTestFixtureImpl::DefaultSettings(
|
||||||
&codec_, static_cast<const int*>(kTestTemporalLayerProfile));
|
&codec_, static_cast<const int*>(kTestTemporalLayerProfile));
|
||||||
codec_.numberOfSimulcastStreams = 1;
|
codec_.numberOfSimulcastStreams = 1;
|
||||||
EXPECT_EQ(0, adapter_->InitEncode(&codec_, 1, 1200));
|
EXPECT_EQ(0, adapter_->InitEncode(&codec_, 1, 1200));
|
||||||
@ -671,7 +715,7 @@ TEST_F(TestSimulcastEncoderAdapterFake, SupportsNativeHandleForSingleStreams) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(TestSimulcastEncoderAdapterFake, SetRatesUnderMinBitrate) {
|
TEST_F(TestSimulcastEncoderAdapterFake, SetRatesUnderMinBitrate) {
|
||||||
TestVp8Simulcast::DefaultSettings(
|
SimulcastTestFixtureImpl::DefaultSettings(
|
||||||
&codec_, static_cast<const int*>(kTestTemporalLayerProfile));
|
&codec_, static_cast<const int*>(kTestTemporalLayerProfile));
|
||||||
codec_.minBitrate = 50;
|
codec_.minBitrate = 50;
|
||||||
codec_.numberOfSimulcastStreams = 1;
|
codec_.numberOfSimulcastStreams = 1;
|
||||||
@ -700,7 +744,7 @@ TEST_F(TestSimulcastEncoderAdapterFake, SetRatesUnderMinBitrate) {
|
|||||||
|
|
||||||
TEST_F(TestSimulcastEncoderAdapterFake, SupportsImplementationName) {
|
TEST_F(TestSimulcastEncoderAdapterFake, SupportsImplementationName) {
|
||||||
EXPECT_STREQ("SimulcastEncoderAdapter", adapter_->ImplementationName());
|
EXPECT_STREQ("SimulcastEncoderAdapter", adapter_->ImplementationName());
|
||||||
TestVp8Simulcast::DefaultSettings(
|
SimulcastTestFixtureImpl::DefaultSettings(
|
||||||
&codec_, static_cast<const int*>(kTestTemporalLayerProfile));
|
&codec_, static_cast<const int*>(kTestTemporalLayerProfile));
|
||||||
std::vector<const char*> encoder_names;
|
std::vector<const char*> encoder_names;
|
||||||
encoder_names.push_back("codec1");
|
encoder_names.push_back("codec1");
|
||||||
@ -722,7 +766,7 @@ TEST_F(TestSimulcastEncoderAdapterFake, SupportsImplementationName) {
|
|||||||
|
|
||||||
TEST_F(TestSimulcastEncoderAdapterFake,
|
TEST_F(TestSimulcastEncoderAdapterFake,
|
||||||
SupportsNativeHandleForMultipleStreams) {
|
SupportsNativeHandleForMultipleStreams) {
|
||||||
TestVp8Simulcast::DefaultSettings(
|
SimulcastTestFixtureImpl::DefaultSettings(
|
||||||
&codec_, static_cast<const int*>(kTestTemporalLayerProfile));
|
&codec_, static_cast<const int*>(kTestTemporalLayerProfile));
|
||||||
codec_.numberOfSimulcastStreams = 3;
|
codec_.numberOfSimulcastStreams = 3;
|
||||||
EXPECT_EQ(0, adapter_->InitEncode(&codec_, 1, 1200));
|
EXPECT_EQ(0, adapter_->InitEncode(&codec_, 1, 1200));
|
||||||
@ -759,7 +803,7 @@ class FakeNativeBuffer : public VideoFrameBuffer {
|
|||||||
|
|
||||||
TEST_F(TestSimulcastEncoderAdapterFake,
|
TEST_F(TestSimulcastEncoderAdapterFake,
|
||||||
NativeHandleForwardingForMultipleStreams) {
|
NativeHandleForwardingForMultipleStreams) {
|
||||||
TestVp8Simulcast::DefaultSettings(
|
SimulcastTestFixtureImpl::DefaultSettings(
|
||||||
&codec_, static_cast<const int*>(kTestTemporalLayerProfile));
|
&codec_, static_cast<const int*>(kTestTemporalLayerProfile));
|
||||||
codec_.numberOfSimulcastStreams = 3;
|
codec_.numberOfSimulcastStreams = 3;
|
||||||
// High start bitrate, so all streams are enabled.
|
// High start bitrate, so all streams are enabled.
|
||||||
@ -783,7 +827,7 @@ TEST_F(TestSimulcastEncoderAdapterFake,
|
|||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(TestSimulcastEncoderAdapterFake, TestFailureReturnCodesFromEncodeCalls) {
|
TEST_F(TestSimulcastEncoderAdapterFake, TestFailureReturnCodesFromEncodeCalls) {
|
||||||
TestVp8Simulcast::DefaultSettings(
|
SimulcastTestFixtureImpl::DefaultSettings(
|
||||||
&codec_, static_cast<const int*>(kTestTemporalLayerProfile));
|
&codec_, static_cast<const int*>(kTestTemporalLayerProfile));
|
||||||
codec_.numberOfSimulcastStreams = 3;
|
codec_.numberOfSimulcastStreams = 3;
|
||||||
EXPECT_EQ(0, adapter_->InitEncode(&codec_, 1, 1200));
|
EXPECT_EQ(0, adapter_->InitEncode(&codec_, 1, 1200));
|
||||||
@ -804,7 +848,7 @@ TEST_F(TestSimulcastEncoderAdapterFake, TestFailureReturnCodesFromEncodeCalls) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(TestSimulcastEncoderAdapterFake, TestInitFailureCleansUpEncoders) {
|
TEST_F(TestSimulcastEncoderAdapterFake, TestInitFailureCleansUpEncoders) {
|
||||||
TestVp8Simulcast::DefaultSettings(
|
SimulcastTestFixtureImpl::DefaultSettings(
|
||||||
&codec_, static_cast<const int*>(kTestTemporalLayerProfile));
|
&codec_, static_cast<const int*>(kTestTemporalLayerProfile));
|
||||||
codec_.numberOfSimulcastStreams = 3;
|
codec_.numberOfSimulcastStreams = 3;
|
||||||
helper_->factory()->set_init_encode_return_value(
|
helper_->factory()->set_init_encode_return_value(
|
||||||
@ -814,5 +858,5 @@ TEST_F(TestSimulcastEncoderAdapterFake, TestInitFailureCleansUpEncoders) {
|
|||||||
EXPECT_TRUE(helper_->factory()->encoders().empty());
|
EXPECT_TRUE(helper_->factory()->encoders().empty());
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace testing
|
} // namespace test
|
||||||
} // namespace webrtc
|
} // namespace webrtc
|
||||||
|
@ -549,10 +549,11 @@ if (rtc_include_tests) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
rtc_source_set("simulcast_test_utility") {
|
rtc_source_set("simulcast_test_fixture_impl") {
|
||||||
testonly = true
|
testonly = true
|
||||||
sources = [
|
sources = [
|
||||||
"codecs/vp8/simulcast_test_utility.h",
|
"codecs/vp8/simulcast_test_fixture_impl.cc",
|
||||||
|
"codecs/vp8/simulcast_test_fixture_impl.h",
|
||||||
]
|
]
|
||||||
|
|
||||||
if (!build_with_chromium && is_clang) {
|
if (!build_with_chromium && is_clang) {
|
||||||
@ -565,8 +566,11 @@ if (rtc_include_tests) {
|
|||||||
":video_codec_interface",
|
":video_codec_interface",
|
||||||
":video_coding",
|
":video_coding",
|
||||||
":webrtc_vp8_helpers",
|
":webrtc_vp8_helpers",
|
||||||
|
"../../:webrtc_common",
|
||||||
|
"../../api:simulcast_test_fixture_api",
|
||||||
"../../api/video:video_frame",
|
"../../api/video:video_frame",
|
||||||
"../../api/video:video_frame_i420",
|
"../../api/video:video_frame_i420",
|
||||||
|
"../../api/video_codecs:video_codecs_api",
|
||||||
"../../common_video:common_video",
|
"../../common_video:common_video",
|
||||||
"../../rtc_base:checks",
|
"../../rtc_base:checks",
|
||||||
"../../rtc_base:rtc_base_approved",
|
"../../rtc_base:rtc_base_approved",
|
||||||
@ -767,8 +771,8 @@ if (rtc_include_tests) {
|
|||||||
"codecs/test/videocodec_test_stats_impl_unittest.cc",
|
"codecs/test/videocodec_test_stats_impl_unittest.cc",
|
||||||
"codecs/test/videoprocessor_unittest.cc",
|
"codecs/test/videoprocessor_unittest.cc",
|
||||||
"codecs/vp8/default_temporal_layers_unittest.cc",
|
"codecs/vp8/default_temporal_layers_unittest.cc",
|
||||||
|
"codecs/vp8/libvpx_vp8_simulcast_test.cc",
|
||||||
"codecs/vp8/screenshare_layers_unittest.cc",
|
"codecs/vp8/screenshare_layers_unittest.cc",
|
||||||
"codecs/vp8/simulcast_unittest.cc",
|
|
||||||
"codecs/vp9/svc_config_unittest.cc",
|
"codecs/vp9/svc_config_unittest.cc",
|
||||||
"codecs/vp9/svc_rate_allocator_unittest.cc",
|
"codecs/vp9/svc_rate_allocator_unittest.cc",
|
||||||
"decoding_state_unittest.cc",
|
"decoding_state_unittest.cc",
|
||||||
@ -809,7 +813,6 @@ if (rtc_include_tests) {
|
|||||||
":codec_globals_headers",
|
":codec_globals_headers",
|
||||||
":encoded_frame",
|
":encoded_frame",
|
||||||
":mock_headers",
|
":mock_headers",
|
||||||
":simulcast_test_utility",
|
|
||||||
":video_codec_interface",
|
":video_codec_interface",
|
||||||
":video_codecs_test_framework",
|
":video_codecs_test_framework",
|
||||||
":video_coding",
|
":video_coding",
|
||||||
@ -823,6 +826,8 @@ if (rtc_include_tests) {
|
|||||||
"..:module_api",
|
"..:module_api",
|
||||||
"../..:webrtc_common",
|
"../..:webrtc_common",
|
||||||
"../../:typedefs",
|
"../../:typedefs",
|
||||||
|
"../../api:create_simulcast_test_fixture_api",
|
||||||
|
"../../api:simulcast_test_fixture_api",
|
||||||
"../../api:videocodec_test_fixture_api",
|
"../../api:videocodec_test_fixture_api",
|
||||||
"../../api/video:video_frame",
|
"../../api/video:video_frame",
|
||||||
"../../api/video:video_frame_i420",
|
"../../api/video:video_frame_i420",
|
||||||
@ -842,6 +847,7 @@ if (rtc_include_tests) {
|
|||||||
"../../system_wrappers:metrics_default",
|
"../../system_wrappers:metrics_default",
|
||||||
"../../test:field_trial",
|
"../../test:field_trial",
|
||||||
"../../test:fileutils",
|
"../../test:fileutils",
|
||||||
|
"../../test:test_common",
|
||||||
"../../test:test_support",
|
"../../test:test_support",
|
||||||
"../../test:video_test_common",
|
"../../test:video_test_common",
|
||||||
"../../test:video_test_support",
|
"../../test:video_test_support",
|
||||||
|
108
modules/video_coding/codecs/vp8/libvpx_vp8_simulcast_test.cc
Normal file
108
modules/video_coding/codecs/vp8/libvpx_vp8_simulcast_test.cc
Normal file
@ -0,0 +1,108 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Use of this source code is governed by a BSD-style license
|
||||||
|
* that can be found in the LICENSE file in the root of the source
|
||||||
|
* tree. An additional intellectual property rights grant can be found
|
||||||
|
* in the file PATENTS. All contributing project authors may
|
||||||
|
* be found in the AUTHORS file in the root of the source tree.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
|
#include "api/test/create_simulcast_test_fixture.h"
|
||||||
|
#include "api/test/simulcast_test_fixture.h"
|
||||||
|
#include "modules/video_coding/codecs/vp8/include/vp8.h"
|
||||||
|
#include "rtc_base/ptr_util.h"
|
||||||
|
#include "test/function_video_decoder_factory.h"
|
||||||
|
#include "test/function_video_encoder_factory.h"
|
||||||
|
#include "test/gtest.h"
|
||||||
|
|
||||||
|
namespace webrtc {
|
||||||
|
namespace test {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
std::unique_ptr<SimulcastTestFixture> CreateSpecificSimulcastTestFixture() {
|
||||||
|
std::unique_ptr<VideoEncoderFactory> encoder_factory =
|
||||||
|
rtc::MakeUnique<FunctionVideoEncoderFactory>(
|
||||||
|
[]() { return VP8Encoder::Create(); });
|
||||||
|
std::unique_ptr<VideoDecoderFactory> decoder_factory =
|
||||||
|
rtc::MakeUnique<FunctionVideoDecoderFactory>(
|
||||||
|
[]() { return VP8Decoder::Create(); });
|
||||||
|
return CreateSimulcastTestFixture(std::move(encoder_factory),
|
||||||
|
std::move(decoder_factory));
|
||||||
|
}
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
TEST(LibvpxVp8SimulcastTest, TestKeyFrameRequestsOnAllStreams) {
|
||||||
|
auto fixture = CreateSpecificSimulcastTestFixture();
|
||||||
|
fixture->TestKeyFrameRequestsOnAllStreams();
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(LibvpxVp8SimulcastTest, TestPaddingAllStreams) {
|
||||||
|
auto fixture = CreateSpecificSimulcastTestFixture();
|
||||||
|
fixture->TestPaddingAllStreams();
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(LibvpxVp8SimulcastTest, TestPaddingTwoStreams) {
|
||||||
|
auto fixture = CreateSpecificSimulcastTestFixture();
|
||||||
|
fixture->TestPaddingTwoStreams();
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(LibvpxVp8SimulcastTest, TestPaddingTwoStreamsOneMaxedOut) {
|
||||||
|
auto fixture = CreateSpecificSimulcastTestFixture();
|
||||||
|
fixture->TestPaddingTwoStreamsOneMaxedOut();
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(LibvpxVp8SimulcastTest, TestPaddingOneStream) {
|
||||||
|
auto fixture = CreateSpecificSimulcastTestFixture();
|
||||||
|
fixture->TestPaddingOneStream();
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(LibvpxVp8SimulcastTest, TestPaddingOneStreamTwoMaxedOut) {
|
||||||
|
auto fixture = CreateSpecificSimulcastTestFixture();
|
||||||
|
fixture->TestPaddingOneStreamTwoMaxedOut();
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(LibvpxVp8SimulcastTest, TestSendAllStreams) {
|
||||||
|
auto fixture = CreateSpecificSimulcastTestFixture();
|
||||||
|
fixture->TestSendAllStreams();
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(LibvpxVp8SimulcastTest, TestDisablingStreams) {
|
||||||
|
auto fixture = CreateSpecificSimulcastTestFixture();
|
||||||
|
fixture->TestDisablingStreams();
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(LibvpxVp8SimulcastTest, TestActiveStreams) {
|
||||||
|
auto fixture = CreateSpecificSimulcastTestFixture();
|
||||||
|
fixture->TestActiveStreams();
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(LibvpxVp8SimulcastTest, TestSwitchingToOneStream) {
|
||||||
|
auto fixture = CreateSpecificSimulcastTestFixture();
|
||||||
|
fixture->TestSwitchingToOneStream();
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(LibvpxVp8SimulcastTest, TestSwitchingToOneOddStream) {
|
||||||
|
auto fixture = CreateSpecificSimulcastTestFixture();
|
||||||
|
fixture->TestSwitchingToOneOddStream();
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(LibvpxVp8SimulcastTest, TestSwitchingToOneSmallStream) {
|
||||||
|
auto fixture = CreateSpecificSimulcastTestFixture();
|
||||||
|
fixture->TestSwitchingToOneSmallStream();
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(LibvpxVp8SimulcastTest, TestSpatioTemporalLayers333PatternEncoder) {
|
||||||
|
auto fixture = CreateSpecificSimulcastTestFixture();
|
||||||
|
fixture->TestSpatioTemporalLayers333PatternEncoder();
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(LibvpxVp8SimulcastTest, TestStrideEncodeDecode) {
|
||||||
|
auto fixture = CreateSpecificSimulcastTestFixture();
|
||||||
|
fixture->TestStrideEncodeDecode();
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace test
|
||||||
|
} // namespace webrtc
|
806
modules/video_coding/codecs/vp8/simulcast_test_fixture_impl.cc
Normal file
806
modules/video_coding/codecs/vp8/simulcast_test_fixture_impl.cc
Normal file
@ -0,0 +1,806 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Use of this source code is governed by a BSD-style license
|
||||||
|
* that can be found in the LICENSE file in the root of the source
|
||||||
|
* tree. An additional intellectual property rights grant can be found
|
||||||
|
* in the file PATENTS. All contributing project authors may
|
||||||
|
* be found in the AUTHORS file in the root of the source tree.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "modules/video_coding/codecs/vp8/simulcast_test_fixture_impl.h"
|
||||||
|
|
||||||
|
#include <algorithm>
|
||||||
|
#include <map>
|
||||||
|
#include <memory>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "api/video_codecs/sdp_video_format.h"
|
||||||
|
#include "common_video/include/video_frame.h"
|
||||||
|
#include "common_video/libyuv/include/webrtc_libyuv.h"
|
||||||
|
#include "modules/video_coding/codecs/vp8/include/vp8.h"
|
||||||
|
#include "modules/video_coding/codecs/vp8/temporal_layers.h"
|
||||||
|
#include "modules/video_coding/include/video_coding_defines.h"
|
||||||
|
#include "rtc_base/checks.h"
|
||||||
|
#include "test/gtest.h"
|
||||||
|
|
||||||
|
using ::testing::_;
|
||||||
|
using ::testing::AllOf;
|
||||||
|
using ::testing::Field;
|
||||||
|
using ::testing::Return;
|
||||||
|
|
||||||
|
namespace webrtc {
|
||||||
|
namespace test {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
const int kDefaultWidth = 1280;
|
||||||
|
const int kDefaultHeight = 720;
|
||||||
|
const int kNumberOfSimulcastStreams = 3;
|
||||||
|
const int kColorY = 66;
|
||||||
|
const int kColorU = 22;
|
||||||
|
const int kColorV = 33;
|
||||||
|
const int kMaxBitrates[kNumberOfSimulcastStreams] = {150, 600, 1200};
|
||||||
|
const int kMinBitrates[kNumberOfSimulcastStreams] = {50, 150, 600};
|
||||||
|
const int kTargetBitrates[kNumberOfSimulcastStreams] = {100, 450, 1000};
|
||||||
|
const int kDefaultTemporalLayerProfile[3] = {3, 3, 3};
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
void SetExpectedValues3(T value0, T value1, T value2, T* expected_values) {
|
||||||
|
expected_values[0] = value0;
|
||||||
|
expected_values[1] = value1;
|
||||||
|
expected_values[2] = value2;
|
||||||
|
}
|
||||||
|
|
||||||
|
enum PlaneType {
|
||||||
|
kYPlane = 0,
|
||||||
|
kUPlane = 1,
|
||||||
|
kVPlane = 2,
|
||||||
|
kNumOfPlanes = 3,
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
class SimulcastTestFixtureImpl::Vp8TestEncodedImageCallback
|
||||||
|
: public EncodedImageCallback {
|
||||||
|
public:
|
||||||
|
Vp8TestEncodedImageCallback() : picture_id_(-1) {
|
||||||
|
memset(temporal_layer_, -1, sizeof(temporal_layer_));
|
||||||
|
memset(layer_sync_, false, sizeof(layer_sync_));
|
||||||
|
}
|
||||||
|
|
||||||
|
~Vp8TestEncodedImageCallback() {
|
||||||
|
delete[] encoded_key_frame_._buffer;
|
||||||
|
delete[] encoded_frame_._buffer;
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual Result OnEncodedImage(const EncodedImage& encoded_image,
|
||||||
|
const CodecSpecificInfo* codec_specific_info,
|
||||||
|
const RTPFragmentationHeader* fragmentation) {
|
||||||
|
// Only store the base layer.
|
||||||
|
if (codec_specific_info->codecSpecific.VP8.simulcastIdx == 0) {
|
||||||
|
if (encoded_image._frameType == kVideoFrameKey) {
|
||||||
|
delete[] encoded_key_frame_._buffer;
|
||||||
|
encoded_key_frame_._buffer = new uint8_t[encoded_image._size];
|
||||||
|
encoded_key_frame_._size = encoded_image._size;
|
||||||
|
encoded_key_frame_._length = encoded_image._length;
|
||||||
|
encoded_key_frame_._frameType = kVideoFrameKey;
|
||||||
|
encoded_key_frame_._completeFrame = encoded_image._completeFrame;
|
||||||
|
memcpy(encoded_key_frame_._buffer, encoded_image._buffer,
|
||||||
|
encoded_image._length);
|
||||||
|
} else {
|
||||||
|
delete[] encoded_frame_._buffer;
|
||||||
|
encoded_frame_._buffer = new uint8_t[encoded_image._size];
|
||||||
|
encoded_frame_._size = encoded_image._size;
|
||||||
|
encoded_frame_._length = encoded_image._length;
|
||||||
|
memcpy(encoded_frame_._buffer, encoded_image._buffer,
|
||||||
|
encoded_image._length);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
layer_sync_[codec_specific_info->codecSpecific.VP8.simulcastIdx] =
|
||||||
|
codec_specific_info->codecSpecific.VP8.layerSync;
|
||||||
|
temporal_layer_[codec_specific_info->codecSpecific.VP8.simulcastIdx] =
|
||||||
|
codec_specific_info->codecSpecific.VP8.temporalIdx;
|
||||||
|
return Result(Result::OK, encoded_image._timeStamp);
|
||||||
|
}
|
||||||
|
void GetLastEncodedFrameInfo(int* picture_id,
|
||||||
|
int* temporal_layer,
|
||||||
|
bool* layer_sync,
|
||||||
|
int stream) {
|
||||||
|
*picture_id = picture_id_;
|
||||||
|
*temporal_layer = temporal_layer_[stream];
|
||||||
|
*layer_sync = layer_sync_[stream];
|
||||||
|
}
|
||||||
|
void GetLastEncodedKeyFrame(EncodedImage* encoded_key_frame) {
|
||||||
|
*encoded_key_frame = encoded_key_frame_;
|
||||||
|
}
|
||||||
|
void GetLastEncodedFrame(EncodedImage* encoded_frame) {
|
||||||
|
*encoded_frame = encoded_frame_;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
EncodedImage encoded_key_frame_;
|
||||||
|
EncodedImage encoded_frame_;
|
||||||
|
int picture_id_;
|
||||||
|
int temporal_layer_[kNumberOfSimulcastStreams];
|
||||||
|
bool layer_sync_[kNumberOfSimulcastStreams];
|
||||||
|
};
|
||||||
|
|
||||||
|
class SimulcastTestFixtureImpl::Vp8TestDecodedImageCallback
|
||||||
|
: public DecodedImageCallback {
|
||||||
|
public:
|
||||||
|
Vp8TestDecodedImageCallback() : decoded_frames_(0) {}
|
||||||
|
int32_t Decoded(VideoFrame& decoded_image) override {
|
||||||
|
rtc::scoped_refptr<I420BufferInterface> i420_buffer =
|
||||||
|
decoded_image.video_frame_buffer()->ToI420();
|
||||||
|
for (int i = 0; i < decoded_image.width(); ++i) {
|
||||||
|
EXPECT_NEAR(kColorY, i420_buffer->DataY()[i], 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(mikhal): Verify the difference between U,V and the original.
|
||||||
|
for (int i = 0; i < i420_buffer->ChromaWidth(); ++i) {
|
||||||
|
EXPECT_NEAR(kColorU, i420_buffer->DataU()[i], 4);
|
||||||
|
EXPECT_NEAR(kColorV, i420_buffer->DataV()[i], 4);
|
||||||
|
}
|
||||||
|
decoded_frames_++;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
int32_t Decoded(VideoFrame& decoded_image, int64_t decode_time_ms) override {
|
||||||
|
RTC_NOTREACHED();
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
void Decoded(VideoFrame& decoded_image,
|
||||||
|
rtc::Optional<int32_t> decode_time_ms,
|
||||||
|
rtc::Optional<uint8_t> qp) override {
|
||||||
|
Decoded(decoded_image);
|
||||||
|
}
|
||||||
|
int DecodedFrames() { return decoded_frames_; }
|
||||||
|
|
||||||
|
private:
|
||||||
|
int decoded_frames_;
|
||||||
|
};
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
void SetPlane(uint8_t* data, uint8_t value, int width, int height, int stride) {
|
||||||
|
for (int i = 0; i < height; i++, data += stride) {
|
||||||
|
// Setting allocated area to zero - setting only image size to
|
||||||
|
// requested values - will make it easier to distinguish between image
|
||||||
|
// size and frame size (accounting for stride).
|
||||||
|
memset(data, value, width);
|
||||||
|
memset(data + width, 0, stride - width);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fills in an I420Buffer from |plane_colors|.
|
||||||
|
void CreateImage(const rtc::scoped_refptr<I420Buffer>& buffer,
|
||||||
|
int plane_colors[kNumOfPlanes]) {
|
||||||
|
SetPlane(buffer->MutableDataY(), plane_colors[0], buffer->width(),
|
||||||
|
buffer->height(), buffer->StrideY());
|
||||||
|
|
||||||
|
SetPlane(buffer->MutableDataU(), plane_colors[1], buffer->ChromaWidth(),
|
||||||
|
buffer->ChromaHeight(), buffer->StrideU());
|
||||||
|
|
||||||
|
SetPlane(buffer->MutableDataV(), plane_colors[2], buffer->ChromaWidth(),
|
||||||
|
buffer->ChromaHeight(), buffer->StrideV());
|
||||||
|
}
|
||||||
|
|
||||||
|
void ConfigureStream(int width,
|
||||||
|
int height,
|
||||||
|
int max_bitrate,
|
||||||
|
int min_bitrate,
|
||||||
|
int target_bitrate,
|
||||||
|
SimulcastStream* stream,
|
||||||
|
int num_temporal_layers) {
|
||||||
|
assert(stream);
|
||||||
|
stream->width = width;
|
||||||
|
stream->height = height;
|
||||||
|
stream->maxBitrate = max_bitrate;
|
||||||
|
stream->minBitrate = min_bitrate;
|
||||||
|
stream->targetBitrate = target_bitrate;
|
||||||
|
stream->numberOfTemporalLayers = num_temporal_layers;
|
||||||
|
stream->qpMax = 45;
|
||||||
|
stream->active = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
void SimulcastTestFixtureImpl::DefaultSettings(
|
||||||
|
VideoCodec* settings,
|
||||||
|
const int* temporal_layer_profile) {
|
||||||
|
RTC_CHECK(settings);
|
||||||
|
memset(settings, 0, sizeof(VideoCodec));
|
||||||
|
settings->codecType = kVideoCodecVP8;
|
||||||
|
// 96 to 127 dynamic payload types for video codecs
|
||||||
|
settings->plType = 120;
|
||||||
|
settings->startBitrate = 300;
|
||||||
|
settings->minBitrate = 30;
|
||||||
|
settings->maxBitrate = 0;
|
||||||
|
settings->maxFramerate = 30;
|
||||||
|
settings->width = kDefaultWidth;
|
||||||
|
settings->height = kDefaultHeight;
|
||||||
|
settings->numberOfSimulcastStreams = kNumberOfSimulcastStreams;
|
||||||
|
settings->active = true;
|
||||||
|
ASSERT_EQ(3, kNumberOfSimulcastStreams);
|
||||||
|
settings->timing_frame_thresholds = {kDefaultTimingFramesDelayMs,
|
||||||
|
kDefaultOutlierFrameSizePercent};
|
||||||
|
ConfigureStream(kDefaultWidth / 4, kDefaultHeight / 4, kMaxBitrates[0],
|
||||||
|
kMinBitrates[0], kTargetBitrates[0],
|
||||||
|
&settings->simulcastStream[0], temporal_layer_profile[0]);
|
||||||
|
ConfigureStream(kDefaultWidth / 2, kDefaultHeight / 2, kMaxBitrates[1],
|
||||||
|
kMinBitrates[1], kTargetBitrates[1],
|
||||||
|
&settings->simulcastStream[1], temporal_layer_profile[1]);
|
||||||
|
ConfigureStream(kDefaultWidth, kDefaultHeight, kMaxBitrates[2],
|
||||||
|
kMinBitrates[2], kTargetBitrates[2],
|
||||||
|
&settings->simulcastStream[2], temporal_layer_profile[2]);
|
||||||
|
settings->VP8()->denoisingOn = true;
|
||||||
|
settings->VP8()->automaticResizeOn = false;
|
||||||
|
settings->VP8()->frameDroppingOn = true;
|
||||||
|
settings->VP8()->keyFrameInterval = 3000;
|
||||||
|
}
|
||||||
|
|
||||||
|
SimulcastTestFixtureImpl::SimulcastTestFixtureImpl(
|
||||||
|
std::unique_ptr<VideoEncoderFactory> encoder_factory,
|
||||||
|
std::unique_ptr<VideoDecoderFactory> decoder_factory) {
|
||||||
|
encoder_ = encoder_factory->CreateVideoEncoder(SdpVideoFormat("VP8"));
|
||||||
|
decoder_ = decoder_factory->CreateVideoDecoder(SdpVideoFormat("VP8"));
|
||||||
|
SetUpCodec(kDefaultTemporalLayerProfile);
|
||||||
|
}
|
||||||
|
|
||||||
|
SimulcastTestFixtureImpl::~SimulcastTestFixtureImpl() {
|
||||||
|
encoder_->Release();
|
||||||
|
decoder_->Release();
|
||||||
|
}
|
||||||
|
|
||||||
|
void SimulcastTestFixtureImpl::SetUpCodec(const int* temporal_layer_profile) {
|
||||||
|
encoder_->RegisterEncodeCompleteCallback(&encoder_callback_);
|
||||||
|
decoder_->RegisterDecodeCompleteCallback(&decoder_callback_);
|
||||||
|
DefaultSettings(&settings_, temporal_layer_profile);
|
||||||
|
SetUpRateAllocator();
|
||||||
|
EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200));
|
||||||
|
EXPECT_EQ(0, decoder_->InitDecode(&settings_, 1));
|
||||||
|
input_buffer_ = I420Buffer::Create(kDefaultWidth, kDefaultHeight);
|
||||||
|
input_buffer_->InitializeData();
|
||||||
|
input_frame_.reset(
|
||||||
|
new VideoFrame(input_buffer_, 0, 0, webrtc::kVideoRotation_0));
|
||||||
|
}
|
||||||
|
|
||||||
|
void SimulcastTestFixtureImpl::SetUpRateAllocator() {
|
||||||
|
rate_allocator_.reset(new SimulcastRateAllocator(settings_));
|
||||||
|
}
|
||||||
|
|
||||||
|
void SimulcastTestFixtureImpl::SetRates(uint32_t bitrate_kbps, uint32_t fps) {
|
||||||
|
encoder_->SetRateAllocation(
|
||||||
|
rate_allocator_->GetAllocation(bitrate_kbps * 1000, fps), fps);
|
||||||
|
}
|
||||||
|
|
||||||
|
void SimulcastTestFixtureImpl::RunActiveStreamsTest(
|
||||||
|
const std::vector<bool> active_streams) {
|
||||||
|
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
|
||||||
|
kVideoFrameDelta);
|
||||||
|
UpdateActiveStreams(active_streams);
|
||||||
|
// Set sufficient bitrate for all streams so we can test active without
|
||||||
|
// bitrate being an issue.
|
||||||
|
SetRates(kMaxBitrates[0] + kMaxBitrates[1] + kMaxBitrates[2], 30);
|
||||||
|
|
||||||
|
ExpectStreams(kVideoFrameKey, active_streams);
|
||||||
|
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||||
|
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||||
|
|
||||||
|
ExpectStreams(kVideoFrameDelta, active_streams);
|
||||||
|
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||||
|
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||||
|
}
|
||||||
|
|
||||||
|
void SimulcastTestFixtureImpl::UpdateActiveStreams(
|
||||||
|
const std::vector<bool> active_streams) {
|
||||||
|
ASSERT_EQ(static_cast<int>(active_streams.size()), kNumberOfSimulcastStreams);
|
||||||
|
for (size_t i = 0; i < active_streams.size(); ++i) {
|
||||||
|
settings_.simulcastStream[i].active = active_streams[i];
|
||||||
|
}
|
||||||
|
// Re initialize the allocator and encoder with the new settings.
|
||||||
|
// TODO(bugs.webrtc.org/8807): Currently, we do a full "hard"
|
||||||
|
// reconfiguration of the allocator and encoder. When the video bitrate
|
||||||
|
// allocator has support for updating active streams without a
|
||||||
|
// reinitialization, we can just call that here instead.
|
||||||
|
SetUpRateAllocator();
|
||||||
|
EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200));
|
||||||
|
}
|
||||||
|
|
||||||
|
void SimulcastTestFixtureImpl::ExpectStreams(
|
||||||
|
FrameType frame_type,
|
||||||
|
const std::vector<bool> expected_streams_active) {
|
||||||
|
ASSERT_EQ(static_cast<int>(expected_streams_active.size()),
|
||||||
|
kNumberOfSimulcastStreams);
|
||||||
|
if (expected_streams_active[0]) {
|
||||||
|
EXPECT_CALL(
|
||||||
|
encoder_callback_,
|
||||||
|
OnEncodedImage(
|
||||||
|
AllOf(Field(&EncodedImage::_frameType, frame_type),
|
||||||
|
Field(&EncodedImage::_encodedWidth, kDefaultWidth / 4),
|
||||||
|
Field(&EncodedImage::_encodedHeight, kDefaultHeight / 4)),
|
||||||
|
_, _))
|
||||||
|
.Times(1)
|
||||||
|
.WillRepeatedly(Return(
|
||||||
|
EncodedImageCallback::Result(EncodedImageCallback::Result::OK, 0)));
|
||||||
|
}
|
||||||
|
if (expected_streams_active[1]) {
|
||||||
|
EXPECT_CALL(
|
||||||
|
encoder_callback_,
|
||||||
|
OnEncodedImage(
|
||||||
|
AllOf(Field(&EncodedImage::_frameType, frame_type),
|
||||||
|
Field(&EncodedImage::_encodedWidth, kDefaultWidth / 2),
|
||||||
|
Field(&EncodedImage::_encodedHeight, kDefaultHeight / 2)),
|
||||||
|
_, _))
|
||||||
|
.Times(1)
|
||||||
|
.WillRepeatedly(Return(
|
||||||
|
EncodedImageCallback::Result(EncodedImageCallback::Result::OK, 0)));
|
||||||
|
}
|
||||||
|
if (expected_streams_active[2]) {
|
||||||
|
EXPECT_CALL(encoder_callback_,
|
||||||
|
OnEncodedImage(
|
||||||
|
AllOf(Field(&EncodedImage::_frameType, frame_type),
|
||||||
|
Field(&EncodedImage::_encodedWidth, kDefaultWidth),
|
||||||
|
Field(&EncodedImage::_encodedHeight, kDefaultHeight)),
|
||||||
|
_, _))
|
||||||
|
.Times(1)
|
||||||
|
.WillRepeatedly(Return(
|
||||||
|
EncodedImageCallback::Result(EncodedImageCallback::Result::OK, 0)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void SimulcastTestFixtureImpl::ExpectStreams(FrameType frame_type,
|
||||||
|
int expected_video_streams) {
|
||||||
|
ASSERT_GE(expected_video_streams, 0);
|
||||||
|
ASSERT_LE(expected_video_streams, kNumberOfSimulcastStreams);
|
||||||
|
std::vector<bool> expected_streams_active(kNumberOfSimulcastStreams, false);
|
||||||
|
for (int i = 0; i < expected_video_streams; ++i) {
|
||||||
|
expected_streams_active[i] = true;
|
||||||
|
}
|
||||||
|
ExpectStreams(frame_type, expected_streams_active);
|
||||||
|
}
|
||||||
|
|
||||||
|
void SimulcastTestFixtureImpl::VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
||||||
|
Vp8TestEncodedImageCallback* encoder_callback,
|
||||||
|
const int* expected_temporal_idx,
|
||||||
|
const bool* expected_layer_sync,
|
||||||
|
int num_spatial_layers) {
|
||||||
|
int picture_id = -1;
|
||||||
|
int temporal_layer = -1;
|
||||||
|
bool layer_sync = false;
|
||||||
|
for (int i = 0; i < num_spatial_layers; i++) {
|
||||||
|
encoder_callback->GetLastEncodedFrameInfo(&picture_id, &temporal_layer,
|
||||||
|
&layer_sync, i);
|
||||||
|
EXPECT_EQ(expected_temporal_idx[i], temporal_layer);
|
||||||
|
EXPECT_EQ(expected_layer_sync[i], layer_sync);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// We currently expect all active streams to generate a key frame even though
|
||||||
|
// a key frame was only requested for some of them.
|
||||||
|
void SimulcastTestFixtureImpl::TestKeyFrameRequestsOnAllStreams() {
|
||||||
|
SetRates(kMaxBitrates[2], 30); // To get all three streams.
|
||||||
|
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
|
||||||
|
kVideoFrameDelta);
|
||||||
|
ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
|
||||||
|
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||||
|
|
||||||
|
ExpectStreams(kVideoFrameDelta, kNumberOfSimulcastStreams);
|
||||||
|
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||||
|
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||||
|
|
||||||
|
frame_types[0] = kVideoFrameKey;
|
||||||
|
ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
|
||||||
|
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||||
|
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||||
|
|
||||||
|
std::fill(frame_types.begin(), frame_types.end(), kVideoFrameDelta);
|
||||||
|
frame_types[1] = kVideoFrameKey;
|
||||||
|
ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
|
||||||
|
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||||
|
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||||
|
|
||||||
|
std::fill(frame_types.begin(), frame_types.end(), kVideoFrameDelta);
|
||||||
|
frame_types[2] = kVideoFrameKey;
|
||||||
|
ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
|
||||||
|
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||||
|
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||||
|
|
||||||
|
std::fill(frame_types.begin(), frame_types.end(), kVideoFrameDelta);
|
||||||
|
ExpectStreams(kVideoFrameDelta, kNumberOfSimulcastStreams);
|
||||||
|
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||||
|
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||||
|
}
|
||||||
|
|
||||||
|
void SimulcastTestFixtureImpl::TestPaddingAllStreams() {
|
||||||
|
// We should always encode the base layer.
|
||||||
|
SetRates(kMinBitrates[0] - 1, 30);
|
||||||
|
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
|
||||||
|
kVideoFrameDelta);
|
||||||
|
ExpectStreams(kVideoFrameKey, 1);
|
||||||
|
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||||
|
|
||||||
|
ExpectStreams(kVideoFrameDelta, 1);
|
||||||
|
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||||
|
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||||
|
}
|
||||||
|
|
||||||
|
void SimulcastTestFixtureImpl::TestPaddingTwoStreams() {
|
||||||
|
// We have just enough to get only the first stream and padding for two.
|
||||||
|
SetRates(kMinBitrates[0], 30);
|
||||||
|
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
|
||||||
|
kVideoFrameDelta);
|
||||||
|
ExpectStreams(kVideoFrameKey, 1);
|
||||||
|
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||||
|
|
||||||
|
ExpectStreams(kVideoFrameDelta, 1);
|
||||||
|
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||||
|
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||||
|
}
|
||||||
|
|
||||||
|
void SimulcastTestFixtureImpl::TestPaddingTwoStreamsOneMaxedOut() {
|
||||||
|
// We are just below limit of sending second stream, so we should get
|
||||||
|
// the first stream maxed out (at |maxBitrate|), and padding for two.
|
||||||
|
SetRates(kTargetBitrates[0] + kMinBitrates[1] - 1, 30);
|
||||||
|
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
|
||||||
|
kVideoFrameDelta);
|
||||||
|
ExpectStreams(kVideoFrameKey, 1);
|
||||||
|
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||||
|
|
||||||
|
ExpectStreams(kVideoFrameDelta, 1);
|
||||||
|
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||||
|
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||||
|
}
|
||||||
|
|
||||||
|
void SimulcastTestFixtureImpl::TestPaddingOneStream() {
|
||||||
|
// We have just enough to send two streams, so padding for one stream.
|
||||||
|
SetRates(kTargetBitrates[0] + kMinBitrates[1], 30);
|
||||||
|
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
|
||||||
|
kVideoFrameDelta);
|
||||||
|
ExpectStreams(kVideoFrameKey, 2);
|
||||||
|
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||||
|
|
||||||
|
ExpectStreams(kVideoFrameDelta, 2);
|
||||||
|
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||||
|
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||||
|
}
|
||||||
|
|
||||||
|
void SimulcastTestFixtureImpl::TestPaddingOneStreamTwoMaxedOut() {
|
||||||
|
// We are just below limit of sending third stream, so we should get
|
||||||
|
// first stream's rate maxed out at |targetBitrate|, second at |maxBitrate|.
|
||||||
|
SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] - 1, 30);
|
||||||
|
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
|
||||||
|
kVideoFrameDelta);
|
||||||
|
ExpectStreams(kVideoFrameKey, 2);
|
||||||
|
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||||
|
|
||||||
|
ExpectStreams(kVideoFrameDelta, 2);
|
||||||
|
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||||
|
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||||
|
}
|
||||||
|
|
||||||
|
void SimulcastTestFixtureImpl::TestSendAllStreams() {
|
||||||
|
// We have just enough to send all streams.
|
||||||
|
SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2], 30);
|
||||||
|
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
|
||||||
|
kVideoFrameDelta);
|
||||||
|
ExpectStreams(kVideoFrameKey, 3);
|
||||||
|
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||||
|
|
||||||
|
ExpectStreams(kVideoFrameDelta, 3);
|
||||||
|
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||||
|
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||||
|
}
|
||||||
|
|
||||||
|
void SimulcastTestFixtureImpl::TestDisablingStreams() {
|
||||||
|
// We should get three media streams.
|
||||||
|
SetRates(kMaxBitrates[0] + kMaxBitrates[1] + kMaxBitrates[2], 30);
|
||||||
|
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
|
||||||
|
kVideoFrameDelta);
|
||||||
|
ExpectStreams(kVideoFrameKey, 3);
|
||||||
|
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||||
|
|
||||||
|
ExpectStreams(kVideoFrameDelta, 3);
|
||||||
|
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||||
|
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||||
|
|
||||||
|
// We should only get two streams and padding for one.
|
||||||
|
SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] / 2, 30);
|
||||||
|
ExpectStreams(kVideoFrameDelta, 2);
|
||||||
|
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||||
|
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||||
|
|
||||||
|
// We should only get the first stream and padding for two.
|
||||||
|
SetRates(kTargetBitrates[0] + kMinBitrates[1] / 2, 30);
|
||||||
|
ExpectStreams(kVideoFrameDelta, 1);
|
||||||
|
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||||
|
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||||
|
|
||||||
|
// We don't have enough bitrate for the thumbnail stream, but we should get
|
||||||
|
// it anyway with current configuration.
|
||||||
|
SetRates(kTargetBitrates[0] - 1, 30);
|
||||||
|
ExpectStreams(kVideoFrameDelta, 1);
|
||||||
|
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||||
|
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||||
|
|
||||||
|
// We should only get two streams and padding for one.
|
||||||
|
SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] / 2, 30);
|
||||||
|
// We get a key frame because a new stream is being enabled.
|
||||||
|
ExpectStreams(kVideoFrameKey, 2);
|
||||||
|
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||||
|
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||||
|
|
||||||
|
// We should get all three streams.
|
||||||
|
SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kTargetBitrates[2], 30);
|
||||||
|
// We get a key frame because a new stream is being enabled.
|
||||||
|
ExpectStreams(kVideoFrameKey, 3);
|
||||||
|
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||||
|
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||||
|
}
|
||||||
|
|
||||||
|
void SimulcastTestFixtureImpl::TestActiveStreams() {
|
||||||
|
// All streams on.
|
||||||
|
RunActiveStreamsTest({true, true, true});
|
||||||
|
// All streams off.
|
||||||
|
RunActiveStreamsTest({false, false, false});
|
||||||
|
// Low stream off.
|
||||||
|
RunActiveStreamsTest({false, true, true});
|
||||||
|
// Middle stream off.
|
||||||
|
RunActiveStreamsTest({true, false, true});
|
||||||
|
// High stream off.
|
||||||
|
RunActiveStreamsTest({true, true, false});
|
||||||
|
// Only low stream turned on.
|
||||||
|
RunActiveStreamsTest({true, false, false});
|
||||||
|
// Only middle stream turned on.
|
||||||
|
RunActiveStreamsTest({false, true, false});
|
||||||
|
// Only high stream turned on.
|
||||||
|
RunActiveStreamsTest({false, false, true});
|
||||||
|
}
|
||||||
|
|
||||||
|
void SimulcastTestFixtureImpl::SwitchingToOneStream(int width, int height) {
|
||||||
|
// Disable all streams except the last and set the bitrate of the last to
|
||||||
|
// 100 kbps. This verifies the way GTP switches to screenshare mode.
|
||||||
|
settings_.VP8()->numberOfTemporalLayers = 1;
|
||||||
|
settings_.maxBitrate = 100;
|
||||||
|
settings_.startBitrate = 100;
|
||||||
|
settings_.width = width;
|
||||||
|
settings_.height = height;
|
||||||
|
for (int i = 0; i < settings_.numberOfSimulcastStreams - 1; ++i) {
|
||||||
|
settings_.simulcastStream[i].maxBitrate = 0;
|
||||||
|
settings_.simulcastStream[i].width = settings_.width;
|
||||||
|
settings_.simulcastStream[i].height = settings_.height;
|
||||||
|
settings_.simulcastStream[i].numberOfTemporalLayers = 1;
|
||||||
|
}
|
||||||
|
// Setting input image to new resolution.
|
||||||
|
input_buffer_ = I420Buffer::Create(settings_.width, settings_.height);
|
||||||
|
input_buffer_->InitializeData();
|
||||||
|
|
||||||
|
input_frame_.reset(
|
||||||
|
new VideoFrame(input_buffer_, 0, 0, webrtc::kVideoRotation_0));
|
||||||
|
|
||||||
|
// The for loop above did not set the bitrate of the highest layer.
|
||||||
|
settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].maxBitrate =
|
||||||
|
0;
|
||||||
|
// The highest layer has to correspond to the non-simulcast resolution.
|
||||||
|
settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].width =
|
||||||
|
settings_.width;
|
||||||
|
settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].height =
|
||||||
|
settings_.height;
|
||||||
|
SetUpRateAllocator();
|
||||||
|
EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200));
|
||||||
|
|
||||||
|
// Encode one frame and verify.
|
||||||
|
SetRates(kMaxBitrates[0] + kMaxBitrates[1], 30);
|
||||||
|
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
|
||||||
|
kVideoFrameDelta);
|
||||||
|
EXPECT_CALL(
|
||||||
|
encoder_callback_,
|
||||||
|
OnEncodedImage(AllOf(Field(&EncodedImage::_frameType, kVideoFrameKey),
|
||||||
|
Field(&EncodedImage::_encodedWidth, width),
|
||||||
|
Field(&EncodedImage::_encodedHeight, height)),
|
||||||
|
_, _))
|
||||||
|
.Times(1)
|
||||||
|
.WillRepeatedly(Return(
|
||||||
|
EncodedImageCallback::Result(EncodedImageCallback::Result::OK, 0)));
|
||||||
|
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||||
|
|
||||||
|
// Switch back.
|
||||||
|
DefaultSettings(&settings_, kDefaultTemporalLayerProfile);
|
||||||
|
// Start at the lowest bitrate for enabling base stream.
|
||||||
|
settings_.startBitrate = kMinBitrates[0];
|
||||||
|
SetUpRateAllocator();
|
||||||
|
EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200));
|
||||||
|
SetRates(settings_.startBitrate, 30);
|
||||||
|
ExpectStreams(kVideoFrameKey, 1);
|
||||||
|
// Resize |input_frame_| to the new resolution.
|
||||||
|
input_buffer_ = I420Buffer::Create(settings_.width, settings_.height);
|
||||||
|
input_buffer_->InitializeData();
|
||||||
|
input_frame_.reset(
|
||||||
|
new VideoFrame(input_buffer_, 0, 0, webrtc::kVideoRotation_0));
|
||||||
|
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||||
|
}
|
||||||
|
|
||||||
|
void SimulcastTestFixtureImpl::TestSwitchingToOneStream() {
|
||||||
|
SwitchingToOneStream(1024, 768);
|
||||||
|
}
|
||||||
|
|
||||||
|
void SimulcastTestFixtureImpl::TestSwitchingToOneOddStream() {
|
||||||
|
SwitchingToOneStream(1023, 769);
|
||||||
|
}
|
||||||
|
|
||||||
|
void SimulcastTestFixtureImpl::TestSwitchingToOneSmallStream() {
|
||||||
|
SwitchingToOneStream(4, 4);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test the layer pattern and sync flag for various spatial-temporal patterns.
|
||||||
|
// 3-3-3 pattern: 3 temporal layers for all spatial streams, so same
|
||||||
|
// temporal_layer id and layer_sync is expected for all streams.
|
||||||
|
void SimulcastTestFixtureImpl::TestSpatioTemporalLayers333PatternEncoder() {
|
||||||
|
Vp8TestEncodedImageCallback encoder_callback;
|
||||||
|
encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
|
||||||
|
SetRates(kMaxBitrates[2], 30); // To get all three streams.
|
||||||
|
|
||||||
|
int expected_temporal_idx[3] = {-1, -1, -1};
|
||||||
|
bool expected_layer_sync[3] = {false, false, false};
|
||||||
|
|
||||||
|
// First frame: #0.
|
||||||
|
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
|
||||||
|
SetExpectedValues3<int>(0, 0, 0, expected_temporal_idx);
|
||||||
|
SetExpectedValues3<bool>(true, true, true, expected_layer_sync);
|
||||||
|
VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
||||||
|
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
||||||
|
|
||||||
|
// Next frame: #1.
|
||||||
|
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||||
|
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
|
||||||
|
SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
|
||||||
|
SetExpectedValues3<bool>(true, true, true, expected_layer_sync);
|
||||||
|
VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
||||||
|
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
||||||
|
|
||||||
|
// Next frame: #2.
|
||||||
|
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||||
|
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
|
||||||
|
SetExpectedValues3<int>(1, 1, 1, expected_temporal_idx);
|
||||||
|
SetExpectedValues3<bool>(true, true, true, expected_layer_sync);
|
||||||
|
VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
||||||
|
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
||||||
|
|
||||||
|
// Next frame: #3.
|
||||||
|
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||||
|
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
|
||||||
|
SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
|
||||||
|
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
|
||||||
|
VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
||||||
|
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
||||||
|
|
||||||
|
// Next frame: #4.
|
||||||
|
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||||
|
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
|
||||||
|
SetExpectedValues3<int>(0, 0, 0, expected_temporal_idx);
|
||||||
|
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
|
||||||
|
VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
||||||
|
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
||||||
|
|
||||||
|
// Next frame: #5.
|
||||||
|
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||||
|
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
|
||||||
|
SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
|
||||||
|
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
|
||||||
|
VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
||||||
|
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test the layer pattern and sync flag for various spatial-temporal patterns.
|
||||||
|
// 3-2-1 pattern: 3 temporal layers for lowest resolution, 2 for middle, and
|
||||||
|
// 1 temporal layer for highest resolution.
|
||||||
|
// For this profile, we expect the temporal index pattern to be:
|
||||||
|
// 1st stream: 0, 2, 1, 2, ....
|
||||||
|
// 2nd stream: 0, 1, 0, 1, ...
|
||||||
|
// 3rd stream: -1, -1, -1, -1, ....
|
||||||
|
// Regarding the 3rd stream, note that a stream/encoder with 1 temporal layer
|
||||||
|
// should always have temporal layer idx set to kNoTemporalIdx = -1.
|
||||||
|
// Since CodecSpecificInfoVP8.temporalIdx is uint8_t, this will wrap to 255.
|
||||||
|
// TODO(marpan): Although this seems safe for now, we should fix this.
|
||||||
|
void SimulcastTestFixtureImpl::TestSpatioTemporalLayers321PatternEncoder() {
|
||||||
|
int temporal_layer_profile[3] = {3, 2, 1};
|
||||||
|
SetUpCodec(temporal_layer_profile);
|
||||||
|
Vp8TestEncodedImageCallback encoder_callback;
|
||||||
|
encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
|
||||||
|
SetRates(kMaxBitrates[2], 30); // To get all three streams.
|
||||||
|
|
||||||
|
int expected_temporal_idx[3] = {-1, -1, -1};
|
||||||
|
bool expected_layer_sync[3] = {false, false, false};
|
||||||
|
|
||||||
|
// First frame: #0.
|
||||||
|
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
|
||||||
|
SetExpectedValues3<int>(0, 0, 255, expected_temporal_idx);
|
||||||
|
SetExpectedValues3<bool>(true, true, false, expected_layer_sync);
|
||||||
|
VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
||||||
|
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
||||||
|
|
||||||
|
// Next frame: #1.
|
||||||
|
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||||
|
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
|
||||||
|
SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
|
||||||
|
SetExpectedValues3<bool>(true, true, false, expected_layer_sync);
|
||||||
|
VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
||||||
|
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
||||||
|
|
||||||
|
// Next frame: #2.
|
||||||
|
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||||
|
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
|
||||||
|
SetExpectedValues3<int>(1, 0, 255, expected_temporal_idx);
|
||||||
|
SetExpectedValues3<bool>(true, false, false, expected_layer_sync);
|
||||||
|
VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
||||||
|
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
||||||
|
|
||||||
|
// Next frame: #3.
|
||||||
|
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||||
|
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
|
||||||
|
SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
|
||||||
|
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
|
||||||
|
VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
||||||
|
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
||||||
|
|
||||||
|
// Next frame: #4.
|
||||||
|
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||||
|
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
|
||||||
|
SetExpectedValues3<int>(0, 0, 255, expected_temporal_idx);
|
||||||
|
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
|
||||||
|
VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
||||||
|
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
||||||
|
|
||||||
|
// Next frame: #5.
|
||||||
|
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||||
|
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
|
||||||
|
SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
|
||||||
|
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
|
||||||
|
VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
||||||
|
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
||||||
|
}
|
||||||
|
|
||||||
|
void SimulcastTestFixtureImpl::TestStrideEncodeDecode() {
|
||||||
|
Vp8TestEncodedImageCallback encoder_callback;
|
||||||
|
Vp8TestDecodedImageCallback decoder_callback;
|
||||||
|
encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
|
||||||
|
decoder_->RegisterDecodeCompleteCallback(&decoder_callback);
|
||||||
|
|
||||||
|
SetRates(kMaxBitrates[2], 30); // To get all three streams.
|
||||||
|
// Setting two (possibly) problematic use cases for stride:
|
||||||
|
// 1. stride > width 2. stride_y != stride_uv/2
|
||||||
|
int stride_y = kDefaultWidth + 20;
|
||||||
|
int stride_uv = ((kDefaultWidth + 1) / 2) + 5;
|
||||||
|
input_buffer_ = I420Buffer::Create(kDefaultWidth, kDefaultHeight, stride_y,
|
||||||
|
stride_uv, stride_uv);
|
||||||
|
input_frame_.reset(
|
||||||
|
new VideoFrame(input_buffer_, 0, 0, webrtc::kVideoRotation_0));
|
||||||
|
|
||||||
|
// Set color.
|
||||||
|
int plane_offset[kNumOfPlanes];
|
||||||
|
plane_offset[kYPlane] = kColorY;
|
||||||
|
plane_offset[kUPlane] = kColorU;
|
||||||
|
plane_offset[kVPlane] = kColorV;
|
||||||
|
CreateImage(input_buffer_, plane_offset);
|
||||||
|
|
||||||
|
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
|
||||||
|
|
||||||
|
// Change color.
|
||||||
|
plane_offset[kYPlane] += 1;
|
||||||
|
plane_offset[kUPlane] += 1;
|
||||||
|
plane_offset[kVPlane] += 1;
|
||||||
|
CreateImage(input_buffer_, plane_offset);
|
||||||
|
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||||
|
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
|
||||||
|
|
||||||
|
EncodedImage encoded_frame;
|
||||||
|
// Only encoding one frame - so will be a key frame.
|
||||||
|
encoder_callback.GetLastEncodedKeyFrame(&encoded_frame);
|
||||||
|
EXPECT_EQ(0, decoder_->Decode(encoded_frame, false, NULL, 0));
|
||||||
|
encoder_callback.GetLastEncodedFrame(&encoded_frame);
|
||||||
|
decoder_->Decode(encoded_frame, false, NULL, 0);
|
||||||
|
EXPECT_EQ(2, decoder_callback.DecodedFrames());
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace test
|
||||||
|
} // namespace webrtc
|
@ -0,0 +1,88 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Use of this source code is governed by a BSD-style license
|
||||||
|
* that can be found in the LICENSE file in the root of the source
|
||||||
|
* tree. An additional intellectual property rights grant can be found
|
||||||
|
* in the file PATENTS. All contributing project authors may
|
||||||
|
* be found in the AUTHORS file in the root of the source tree.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef MODULES_VIDEO_CODING_CODECS_VP8_SIMULCAST_TEST_FIXTURE_IMPL_H_
|
||||||
|
#define MODULES_VIDEO_CODING_CODECS_VP8_SIMULCAST_TEST_FIXTURE_IMPL_H_
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "api/test/simulcast_test_fixture.h"
|
||||||
|
#include "api/video/i420_buffer.h"
|
||||||
|
#include "api/video/video_frame.h"
|
||||||
|
#include "api/video_codecs/video_decoder_factory.h"
|
||||||
|
#include "api/video_codecs/video_encoder_factory.h"
|
||||||
|
#include "common_types.h" // NOLINT(build/include)
|
||||||
|
#include "modules/video_coding/codecs/vp8/simulcast_rate_allocator.h"
|
||||||
|
#include "modules/video_coding/include/mock/mock_video_codec_interface.h"
|
||||||
|
|
||||||
|
namespace webrtc {
|
||||||
|
namespace test {
|
||||||
|
|
||||||
|
class SimulcastTestFixtureImpl final : public SimulcastTestFixture {
|
||||||
|
public:
|
||||||
|
SimulcastTestFixtureImpl(
|
||||||
|
std::unique_ptr<VideoEncoderFactory> encoder_factory,
|
||||||
|
std::unique_ptr<VideoDecoderFactory> decoder_factory);
|
||||||
|
~SimulcastTestFixtureImpl() final;
|
||||||
|
|
||||||
|
// Implements SimulcastTestFixture.
|
||||||
|
void TestKeyFrameRequestsOnAllStreams() override;
|
||||||
|
void TestPaddingAllStreams() override;
|
||||||
|
void TestPaddingTwoStreams() override;
|
||||||
|
void TestPaddingTwoStreamsOneMaxedOut() override;
|
||||||
|
void TestPaddingOneStream() override;
|
||||||
|
void TestPaddingOneStreamTwoMaxedOut() override;
|
||||||
|
void TestSendAllStreams() override;
|
||||||
|
void TestDisablingStreams() override;
|
||||||
|
void TestActiveStreams() override;
|
||||||
|
void TestSwitchingToOneStream() override;
|
||||||
|
void TestSwitchingToOneOddStream() override;
|
||||||
|
void TestSwitchingToOneSmallStream() override;
|
||||||
|
void TestSpatioTemporalLayers333PatternEncoder() override;
|
||||||
|
void TestSpatioTemporalLayers321PatternEncoder() override;
|
||||||
|
void TestStrideEncodeDecode() override;
|
||||||
|
|
||||||
|
static void DefaultSettings(VideoCodec* settings,
|
||||||
|
const int* temporal_layer_profile);
|
||||||
|
|
||||||
|
private:
|
||||||
|
class Vp8TestEncodedImageCallback;
|
||||||
|
class Vp8TestDecodedImageCallback;
|
||||||
|
|
||||||
|
void SetUpCodec(const int* temporal_layer_profile);
|
||||||
|
void SetUpRateAllocator();
|
||||||
|
void SetRates(uint32_t bitrate_kbps, uint32_t fps);
|
||||||
|
void RunActiveStreamsTest(const std::vector<bool> active_streams);
|
||||||
|
void UpdateActiveStreams(const std::vector<bool> active_streams);
|
||||||
|
void ExpectStreams(FrameType frame_type,
|
||||||
|
const std::vector<bool> expected_streams_active);
|
||||||
|
void ExpectStreams(FrameType frame_type, int expected_video_streams);
|
||||||
|
void VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
||||||
|
Vp8TestEncodedImageCallback* encoder_callback,
|
||||||
|
const int* expected_temporal_idx,
|
||||||
|
const bool* expected_layer_sync,
|
||||||
|
int num_spatial_layers);
|
||||||
|
void SwitchingToOneStream(int width, int height);
|
||||||
|
|
||||||
|
std::unique_ptr<VideoEncoder> encoder_;
|
||||||
|
MockEncodedImageCallback encoder_callback_;
|
||||||
|
std::unique_ptr<VideoDecoder> decoder_;
|
||||||
|
MockDecodedImageCallback decoder_callback_;
|
||||||
|
VideoCodec settings_;
|
||||||
|
rtc::scoped_refptr<I420Buffer> input_buffer_;
|
||||||
|
std::unique_ptr<VideoFrame> input_frame_;
|
||||||
|
std::unique_ptr<SimulcastRateAllocator> rate_allocator_;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace test
|
||||||
|
} // namespace webrtc
|
||||||
|
|
||||||
|
#endif // MODULES_VIDEO_CODING_CODECS_VP8_SIMULCAST_TEST_FIXTURE_IMPL_H_
|
@ -1,813 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* Use of this source code is governed by a BSD-style license
|
|
||||||
* that can be found in the LICENSE file in the root of the source
|
|
||||||
* tree. An additional intellectual property rights grant can be found
|
|
||||||
* in the file PATENTS. All contributing project authors may
|
|
||||||
* be found in the AUTHORS file in the root of the source tree.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef MODULES_VIDEO_CODING_CODECS_VP8_SIMULCAST_TEST_UTILITY_H_
|
|
||||||
#define MODULES_VIDEO_CODING_CODECS_VP8_SIMULCAST_TEST_UTILITY_H_
|
|
||||||
|
|
||||||
#include <algorithm>
|
|
||||||
#include <map>
|
|
||||||
#include <memory>
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
#include "api/video/i420_buffer.h"
|
|
||||||
#include "api/video/video_frame.h"
|
|
||||||
#include "common_video/include/video_frame.h"
|
|
||||||
#include "common_video/libyuv/include/webrtc_libyuv.h"
|
|
||||||
#include "modules/video_coding/codecs/vp8/include/vp8.h"
|
|
||||||
#include "modules/video_coding/codecs/vp8/simulcast_rate_allocator.h"
|
|
||||||
#include "modules/video_coding/codecs/vp8/temporal_layers.h"
|
|
||||||
#include "modules/video_coding/include/mock/mock_video_codec_interface.h"
|
|
||||||
#include "modules/video_coding/include/video_coding_defines.h"
|
|
||||||
#include "rtc_base/checks.h"
|
|
||||||
#include "test/gtest.h"
|
|
||||||
|
|
||||||
using ::testing::_;
|
|
||||||
using ::testing::AllOf;
|
|
||||||
using ::testing::Field;
|
|
||||||
using ::testing::Return;
|
|
||||||
|
|
||||||
namespace webrtc {
|
|
||||||
namespace testing {
|
|
||||||
|
|
||||||
const int kDefaultWidth = 1280;
|
|
||||||
const int kDefaultHeight = 720;
|
|
||||||
const int kNumberOfSimulcastStreams = 3;
|
|
||||||
const int kColorY = 66;
|
|
||||||
const int kColorU = 22;
|
|
||||||
const int kColorV = 33;
|
|
||||||
const int kMaxBitrates[kNumberOfSimulcastStreams] = {150, 600, 1200};
|
|
||||||
const int kMinBitrates[kNumberOfSimulcastStreams] = {50, 150, 600};
|
|
||||||
const int kTargetBitrates[kNumberOfSimulcastStreams] = {100, 450, 1000};
|
|
||||||
const int kDefaultTemporalLayerProfile[3] = {3, 3, 3};
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
void SetExpectedValues3(T value0, T value1, T value2, T* expected_values) {
|
|
||||||
expected_values[0] = value0;
|
|
||||||
expected_values[1] = value1;
|
|
||||||
expected_values[2] = value2;
|
|
||||||
}
|
|
||||||
|
|
||||||
enum PlaneType {
|
|
||||||
kYPlane = 0,
|
|
||||||
kUPlane = 1,
|
|
||||||
kVPlane = 2,
|
|
||||||
kNumOfPlanes = 3,
|
|
||||||
};
|
|
||||||
|
|
||||||
class Vp8TestEncodedImageCallback : public EncodedImageCallback {
|
|
||||||
public:
|
|
||||||
Vp8TestEncodedImageCallback() : picture_id_(-1) {
|
|
||||||
memset(temporal_layer_, -1, sizeof(temporal_layer_));
|
|
||||||
memset(layer_sync_, false, sizeof(layer_sync_));
|
|
||||||
}
|
|
||||||
|
|
||||||
~Vp8TestEncodedImageCallback() {
|
|
||||||
delete[] encoded_key_frame_._buffer;
|
|
||||||
delete[] encoded_frame_._buffer;
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual Result OnEncodedImage(const EncodedImage& encoded_image,
|
|
||||||
const CodecSpecificInfo* codec_specific_info,
|
|
||||||
const RTPFragmentationHeader* fragmentation) {
|
|
||||||
// Only store the base layer.
|
|
||||||
if (codec_specific_info->codecSpecific.VP8.simulcastIdx == 0) {
|
|
||||||
if (encoded_image._frameType == kVideoFrameKey) {
|
|
||||||
delete[] encoded_key_frame_._buffer;
|
|
||||||
encoded_key_frame_._buffer = new uint8_t[encoded_image._size];
|
|
||||||
encoded_key_frame_._size = encoded_image._size;
|
|
||||||
encoded_key_frame_._length = encoded_image._length;
|
|
||||||
encoded_key_frame_._frameType = kVideoFrameKey;
|
|
||||||
encoded_key_frame_._completeFrame = encoded_image._completeFrame;
|
|
||||||
memcpy(encoded_key_frame_._buffer, encoded_image._buffer,
|
|
||||||
encoded_image._length);
|
|
||||||
} else {
|
|
||||||
delete[] encoded_frame_._buffer;
|
|
||||||
encoded_frame_._buffer = new uint8_t[encoded_image._size];
|
|
||||||
encoded_frame_._size = encoded_image._size;
|
|
||||||
encoded_frame_._length = encoded_image._length;
|
|
||||||
memcpy(encoded_frame_._buffer, encoded_image._buffer,
|
|
||||||
encoded_image._length);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
layer_sync_[codec_specific_info->codecSpecific.VP8.simulcastIdx] =
|
|
||||||
codec_specific_info->codecSpecific.VP8.layerSync;
|
|
||||||
temporal_layer_[codec_specific_info->codecSpecific.VP8.simulcastIdx] =
|
|
||||||
codec_specific_info->codecSpecific.VP8.temporalIdx;
|
|
||||||
return Result(Result::OK, encoded_image._timeStamp);
|
|
||||||
}
|
|
||||||
void GetLastEncodedFrameInfo(int* picture_id,
|
|
||||||
int* temporal_layer,
|
|
||||||
bool* layer_sync,
|
|
||||||
int stream) {
|
|
||||||
*picture_id = picture_id_;
|
|
||||||
*temporal_layer = temporal_layer_[stream];
|
|
||||||
*layer_sync = layer_sync_[stream];
|
|
||||||
}
|
|
||||||
void GetLastEncodedKeyFrame(EncodedImage* encoded_key_frame) {
|
|
||||||
*encoded_key_frame = encoded_key_frame_;
|
|
||||||
}
|
|
||||||
void GetLastEncodedFrame(EncodedImage* encoded_frame) {
|
|
||||||
*encoded_frame = encoded_frame_;
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
EncodedImage encoded_key_frame_;
|
|
||||||
EncodedImage encoded_frame_;
|
|
||||||
int picture_id_;
|
|
||||||
int temporal_layer_[kNumberOfSimulcastStreams];
|
|
||||||
bool layer_sync_[kNumberOfSimulcastStreams];
|
|
||||||
};
|
|
||||||
|
|
||||||
class Vp8TestDecodedImageCallback : public DecodedImageCallback {
|
|
||||||
public:
|
|
||||||
Vp8TestDecodedImageCallback() : decoded_frames_(0) {}
|
|
||||||
int32_t Decoded(VideoFrame& decoded_image) override {
|
|
||||||
rtc::scoped_refptr<I420BufferInterface> i420_buffer =
|
|
||||||
decoded_image.video_frame_buffer()->ToI420();
|
|
||||||
for (int i = 0; i < decoded_image.width(); ++i) {
|
|
||||||
EXPECT_NEAR(kColorY, i420_buffer->DataY()[i], 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(mikhal): Verify the difference between U,V and the original.
|
|
||||||
for (int i = 0; i < i420_buffer->ChromaWidth(); ++i) {
|
|
||||||
EXPECT_NEAR(kColorU, i420_buffer->DataU()[i], 4);
|
|
||||||
EXPECT_NEAR(kColorV, i420_buffer->DataV()[i], 4);
|
|
||||||
}
|
|
||||||
decoded_frames_++;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
int32_t Decoded(VideoFrame& decoded_image, int64_t decode_time_ms) override {
|
|
||||||
RTC_NOTREACHED();
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
void Decoded(VideoFrame& decoded_image,
|
|
||||||
rtc::Optional<int32_t> decode_time_ms,
|
|
||||||
rtc::Optional<uint8_t> qp) override {
|
|
||||||
Decoded(decoded_image);
|
|
||||||
}
|
|
||||||
int DecodedFrames() { return decoded_frames_; }
|
|
||||||
|
|
||||||
private:
|
|
||||||
int decoded_frames_;
|
|
||||||
};
|
|
||||||
|
|
||||||
class TestVp8Simulcast : public ::testing::Test {
|
|
||||||
public:
|
|
||||||
static void SetPlane(uint8_t* data,
|
|
||||||
uint8_t value,
|
|
||||||
int width,
|
|
||||||
int height,
|
|
||||||
int stride) {
|
|
||||||
for (int i = 0; i < height; i++, data += stride) {
|
|
||||||
// Setting allocated area to zero - setting only image size to
|
|
||||||
// requested values - will make it easier to distinguish between image
|
|
||||||
// size and frame size (accounting for stride).
|
|
||||||
memset(data, value, width);
|
|
||||||
memset(data + width, 0, stride - width);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fills in an I420Buffer from |plane_colors|.
|
|
||||||
static void CreateImage(const rtc::scoped_refptr<I420Buffer>& buffer,
|
|
||||||
int plane_colors[kNumOfPlanes]) {
|
|
||||||
SetPlane(buffer->MutableDataY(), plane_colors[0], buffer->width(),
|
|
||||||
buffer->height(), buffer->StrideY());
|
|
||||||
|
|
||||||
SetPlane(buffer->MutableDataU(), plane_colors[1], buffer->ChromaWidth(),
|
|
||||||
buffer->ChromaHeight(), buffer->StrideU());
|
|
||||||
|
|
||||||
SetPlane(buffer->MutableDataV(), plane_colors[2], buffer->ChromaWidth(),
|
|
||||||
buffer->ChromaHeight(), buffer->StrideV());
|
|
||||||
}
|
|
||||||
|
|
||||||
static void DefaultSettings(VideoCodec* settings,
|
|
||||||
const int* temporal_layer_profile) {
|
|
||||||
RTC_CHECK(settings);
|
|
||||||
memset(settings, 0, sizeof(VideoCodec));
|
|
||||||
settings->codecType = kVideoCodecVP8;
|
|
||||||
// 96 to 127 dynamic payload types for video codecs
|
|
||||||
settings->plType = 120;
|
|
||||||
settings->startBitrate = 300;
|
|
||||||
settings->minBitrate = 30;
|
|
||||||
settings->maxBitrate = 0;
|
|
||||||
settings->maxFramerate = 30;
|
|
||||||
settings->width = kDefaultWidth;
|
|
||||||
settings->height = kDefaultHeight;
|
|
||||||
settings->numberOfSimulcastStreams = kNumberOfSimulcastStreams;
|
|
||||||
settings->active = true;
|
|
||||||
ASSERT_EQ(3, kNumberOfSimulcastStreams);
|
|
||||||
settings->timing_frame_thresholds = {kDefaultTimingFramesDelayMs,
|
|
||||||
kDefaultOutlierFrameSizePercent};
|
|
||||||
ConfigureStream(kDefaultWidth / 4, kDefaultHeight / 4, kMaxBitrates[0],
|
|
||||||
kMinBitrates[0], kTargetBitrates[0],
|
|
||||||
&settings->simulcastStream[0], temporal_layer_profile[0]);
|
|
||||||
ConfigureStream(kDefaultWidth / 2, kDefaultHeight / 2, kMaxBitrates[1],
|
|
||||||
kMinBitrates[1], kTargetBitrates[1],
|
|
||||||
&settings->simulcastStream[1], temporal_layer_profile[1]);
|
|
||||||
ConfigureStream(kDefaultWidth, kDefaultHeight, kMaxBitrates[2],
|
|
||||||
kMinBitrates[2], kTargetBitrates[2],
|
|
||||||
&settings->simulcastStream[2], temporal_layer_profile[2]);
|
|
||||||
settings->VP8()->denoisingOn = true;
|
|
||||||
settings->VP8()->automaticResizeOn = false;
|
|
||||||
settings->VP8()->frameDroppingOn = true;
|
|
||||||
settings->VP8()->keyFrameInterval = 3000;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void ConfigureStream(int width,
|
|
||||||
int height,
|
|
||||||
int max_bitrate,
|
|
||||||
int min_bitrate,
|
|
||||||
int target_bitrate,
|
|
||||||
SimulcastStream* stream,
|
|
||||||
int num_temporal_layers) {
|
|
||||||
assert(stream);
|
|
||||||
stream->width = width;
|
|
||||||
stream->height = height;
|
|
||||||
stream->maxBitrate = max_bitrate;
|
|
||||||
stream->minBitrate = min_bitrate;
|
|
||||||
stream->targetBitrate = target_bitrate;
|
|
||||||
stream->numberOfTemporalLayers = num_temporal_layers;
|
|
||||||
stream->qpMax = 45;
|
|
||||||
stream->active = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
protected:
|
|
||||||
virtual std::unique_ptr<VP8Encoder> CreateEncoder() = 0;
|
|
||||||
virtual std::unique_ptr<VP8Decoder> CreateDecoder() = 0;
|
|
||||||
|
|
||||||
void SetUp() override {
|
|
||||||
encoder_ = CreateEncoder();
|
|
||||||
decoder_ = CreateDecoder();
|
|
||||||
SetUpCodec(kDefaultTemporalLayerProfile);
|
|
||||||
}
|
|
||||||
|
|
||||||
void TearDown() override {
|
|
||||||
encoder_->Release();
|
|
||||||
decoder_->Release();
|
|
||||||
encoder_.reset();
|
|
||||||
decoder_.reset();
|
|
||||||
}
|
|
||||||
|
|
||||||
void SetUpCodec(const int* temporal_layer_profile) {
|
|
||||||
encoder_->RegisterEncodeCompleteCallback(&encoder_callback_);
|
|
||||||
decoder_->RegisterDecodeCompleteCallback(&decoder_callback_);
|
|
||||||
DefaultSettings(&settings_, temporal_layer_profile);
|
|
||||||
SetUpRateAllocator();
|
|
||||||
EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200));
|
|
||||||
EXPECT_EQ(0, decoder_->InitDecode(&settings_, 1));
|
|
||||||
input_buffer_ = I420Buffer::Create(kDefaultWidth, kDefaultHeight);
|
|
||||||
input_buffer_->InitializeData();
|
|
||||||
input_frame_.reset(
|
|
||||||
new VideoFrame(input_buffer_, 0, 0, webrtc::kVideoRotation_0));
|
|
||||||
}
|
|
||||||
|
|
||||||
void SetUpRateAllocator() {
|
|
||||||
rate_allocator_.reset(new SimulcastRateAllocator(settings_));
|
|
||||||
}
|
|
||||||
|
|
||||||
void SetRates(uint32_t bitrate_kbps, uint32_t fps) {
|
|
||||||
encoder_->SetRateAllocation(
|
|
||||||
rate_allocator_->GetAllocation(bitrate_kbps * 1000, fps), fps);
|
|
||||||
}
|
|
||||||
|
|
||||||
void RunActiveStreamsTest(const std::vector<bool> active_streams) {
|
|
||||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
|
|
||||||
kVideoFrameDelta);
|
|
||||||
UpdateActiveStreams(active_streams);
|
|
||||||
// Set sufficient bitrate for all streams so we can test active without
|
|
||||||
// bitrate being an issue.
|
|
||||||
SetRates(kMaxBitrates[0] + kMaxBitrates[1] + kMaxBitrates[2], 30);
|
|
||||||
|
|
||||||
ExpectStreams(kVideoFrameKey, active_streams);
|
|
||||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
|
||||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
|
||||||
|
|
||||||
ExpectStreams(kVideoFrameDelta, active_streams);
|
|
||||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
|
||||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
|
||||||
}
|
|
||||||
|
|
||||||
void UpdateActiveStreams(const std::vector<bool> active_streams) {
|
|
||||||
ASSERT_EQ(static_cast<int>(active_streams.size()),
|
|
||||||
kNumberOfSimulcastStreams);
|
|
||||||
for (size_t i = 0; i < active_streams.size(); ++i) {
|
|
||||||
settings_.simulcastStream[i].active = active_streams[i];
|
|
||||||
}
|
|
||||||
// Re initialize the allocator and encoder with the new settings.
|
|
||||||
// TODO(bugs.webrtc.org/8807): Currently, we do a full "hard"
|
|
||||||
// reconfiguration of the allocator and encoder. When the video bitrate
|
|
||||||
// allocator has support for updating active streams without a
|
|
||||||
// reinitialization, we can just call that here instead.
|
|
||||||
SetUpRateAllocator();
|
|
||||||
EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200));
|
|
||||||
}
|
|
||||||
|
|
||||||
void ExpectStreams(FrameType frame_type,
|
|
||||||
const std::vector<bool> expected_streams_active) {
|
|
||||||
ASSERT_EQ(static_cast<int>(expected_streams_active.size()),
|
|
||||||
kNumberOfSimulcastStreams);
|
|
||||||
if (expected_streams_active[0]) {
|
|
||||||
EXPECT_CALL(
|
|
||||||
encoder_callback_,
|
|
||||||
OnEncodedImage(
|
|
||||||
AllOf(Field(&EncodedImage::_frameType, frame_type),
|
|
||||||
Field(&EncodedImage::_encodedWidth, kDefaultWidth / 4),
|
|
||||||
Field(&EncodedImage::_encodedHeight, kDefaultHeight / 4)),
|
|
||||||
_, _))
|
|
||||||
.Times(1)
|
|
||||||
.WillRepeatedly(Return(EncodedImageCallback::Result(
|
|
||||||
EncodedImageCallback::Result::OK, 0)));
|
|
||||||
}
|
|
||||||
if (expected_streams_active[1]) {
|
|
||||||
EXPECT_CALL(
|
|
||||||
encoder_callback_,
|
|
||||||
OnEncodedImage(
|
|
||||||
AllOf(Field(&EncodedImage::_frameType, frame_type),
|
|
||||||
Field(&EncodedImage::_encodedWidth, kDefaultWidth / 2),
|
|
||||||
Field(&EncodedImage::_encodedHeight, kDefaultHeight / 2)),
|
|
||||||
_, _))
|
|
||||||
.Times(1)
|
|
||||||
.WillRepeatedly(Return(EncodedImageCallback::Result(
|
|
||||||
EncodedImageCallback::Result::OK, 0)));
|
|
||||||
}
|
|
||||||
if (expected_streams_active[2]) {
|
|
||||||
EXPECT_CALL(
|
|
||||||
encoder_callback_,
|
|
||||||
OnEncodedImage(
|
|
||||||
AllOf(Field(&EncodedImage::_frameType, frame_type),
|
|
||||||
Field(&EncodedImage::_encodedWidth, kDefaultWidth),
|
|
||||||
Field(&EncodedImage::_encodedHeight, kDefaultHeight)),
|
|
||||||
_, _))
|
|
||||||
.Times(1)
|
|
||||||
.WillRepeatedly(Return(EncodedImageCallback::Result(
|
|
||||||
EncodedImageCallback::Result::OK, 0)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void ExpectStreams(FrameType frame_type, int expected_video_streams) {
|
|
||||||
ASSERT_GE(expected_video_streams, 0);
|
|
||||||
ASSERT_LE(expected_video_streams, kNumberOfSimulcastStreams);
|
|
||||||
std::vector<bool> expected_streams_active(kNumberOfSimulcastStreams, false);
|
|
||||||
for (int i = 0; i < expected_video_streams; ++i) {
|
|
||||||
expected_streams_active[i] = true;
|
|
||||||
}
|
|
||||||
ExpectStreams(frame_type, expected_streams_active);
|
|
||||||
}
|
|
||||||
|
|
||||||
void VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
|
||||||
Vp8TestEncodedImageCallback* encoder_callback,
|
|
||||||
const int* expected_temporal_idx,
|
|
||||||
const bool* expected_layer_sync,
|
|
||||||
int num_spatial_layers) {
|
|
||||||
int picture_id = -1;
|
|
||||||
int temporal_layer = -1;
|
|
||||||
bool layer_sync = false;
|
|
||||||
for (int i = 0; i < num_spatial_layers; i++) {
|
|
||||||
encoder_callback->GetLastEncodedFrameInfo(&picture_id, &temporal_layer,
|
|
||||||
&layer_sync, i);
|
|
||||||
EXPECT_EQ(expected_temporal_idx[i], temporal_layer);
|
|
||||||
EXPECT_EQ(expected_layer_sync[i], layer_sync);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// We currently expect all active streams to generate a key frame even though
|
|
||||||
// a key frame was only requested for some of them.
|
|
||||||
void TestKeyFrameRequestsOnAllStreams() {
|
|
||||||
SetRates(kMaxBitrates[2], 30); // To get all three streams.
|
|
||||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
|
|
||||||
kVideoFrameDelta);
|
|
||||||
ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
|
|
||||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
|
||||||
|
|
||||||
ExpectStreams(kVideoFrameDelta, kNumberOfSimulcastStreams);
|
|
||||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
|
||||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
|
||||||
|
|
||||||
frame_types[0] = kVideoFrameKey;
|
|
||||||
ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
|
|
||||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
|
||||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
|
||||||
|
|
||||||
std::fill(frame_types.begin(), frame_types.end(), kVideoFrameDelta);
|
|
||||||
frame_types[1] = kVideoFrameKey;
|
|
||||||
ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
|
|
||||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
|
||||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
|
||||||
|
|
||||||
std::fill(frame_types.begin(), frame_types.end(), kVideoFrameDelta);
|
|
||||||
frame_types[2] = kVideoFrameKey;
|
|
||||||
ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
|
|
||||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
|
||||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
|
||||||
|
|
||||||
std::fill(frame_types.begin(), frame_types.end(), kVideoFrameDelta);
|
|
||||||
ExpectStreams(kVideoFrameDelta, kNumberOfSimulcastStreams);
|
|
||||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
|
||||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
|
||||||
}
|
|
||||||
|
|
||||||
void TestPaddingAllStreams() {
|
|
||||||
// We should always encode the base layer.
|
|
||||||
SetRates(kMinBitrates[0] - 1, 30);
|
|
||||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
|
|
||||||
kVideoFrameDelta);
|
|
||||||
ExpectStreams(kVideoFrameKey, 1);
|
|
||||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
|
||||||
|
|
||||||
ExpectStreams(kVideoFrameDelta, 1);
|
|
||||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
|
||||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
|
||||||
}
|
|
||||||
|
|
||||||
void TestPaddingTwoStreams() {
|
|
||||||
// We have just enough to get only the first stream and padding for two.
|
|
||||||
SetRates(kMinBitrates[0], 30);
|
|
||||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
|
|
||||||
kVideoFrameDelta);
|
|
||||||
ExpectStreams(kVideoFrameKey, 1);
|
|
||||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
|
||||||
|
|
||||||
ExpectStreams(kVideoFrameDelta, 1);
|
|
||||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
|
||||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
|
||||||
}
|
|
||||||
|
|
||||||
void TestPaddingTwoStreamsOneMaxedOut() {
|
|
||||||
// We are just below limit of sending second stream, so we should get
|
|
||||||
// the first stream maxed out (at |maxBitrate|), and padding for two.
|
|
||||||
SetRates(kTargetBitrates[0] + kMinBitrates[1] - 1, 30);
|
|
||||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
|
|
||||||
kVideoFrameDelta);
|
|
||||||
ExpectStreams(kVideoFrameKey, 1);
|
|
||||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
|
||||||
|
|
||||||
ExpectStreams(kVideoFrameDelta, 1);
|
|
||||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
|
||||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
|
||||||
}
|
|
||||||
|
|
||||||
void TestPaddingOneStream() {
|
|
||||||
// We have just enough to send two streams, so padding for one stream.
|
|
||||||
SetRates(kTargetBitrates[0] + kMinBitrates[1], 30);
|
|
||||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
|
|
||||||
kVideoFrameDelta);
|
|
||||||
ExpectStreams(kVideoFrameKey, 2);
|
|
||||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
|
||||||
|
|
||||||
ExpectStreams(kVideoFrameDelta, 2);
|
|
||||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
|
||||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
|
||||||
}
|
|
||||||
|
|
||||||
void TestPaddingOneStreamTwoMaxedOut() {
|
|
||||||
// We are just below limit of sending third stream, so we should get
|
|
||||||
// first stream's rate maxed out at |targetBitrate|, second at |maxBitrate|.
|
|
||||||
SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] - 1, 30);
|
|
||||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
|
|
||||||
kVideoFrameDelta);
|
|
||||||
ExpectStreams(kVideoFrameKey, 2);
|
|
||||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
|
||||||
|
|
||||||
ExpectStreams(kVideoFrameDelta, 2);
|
|
||||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
|
||||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
|
||||||
}
|
|
||||||
|
|
||||||
void TestSendAllStreams() {
|
|
||||||
// We have just enough to send all streams.
|
|
||||||
SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2], 30);
|
|
||||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
|
|
||||||
kVideoFrameDelta);
|
|
||||||
ExpectStreams(kVideoFrameKey, 3);
|
|
||||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
|
||||||
|
|
||||||
ExpectStreams(kVideoFrameDelta, 3);
|
|
||||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
|
||||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
|
||||||
}
|
|
||||||
|
|
||||||
void TestDisablingStreams() {
|
|
||||||
// We should get three media streams.
|
|
||||||
SetRates(kMaxBitrates[0] + kMaxBitrates[1] + kMaxBitrates[2], 30);
|
|
||||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
|
|
||||||
kVideoFrameDelta);
|
|
||||||
ExpectStreams(kVideoFrameKey, 3);
|
|
||||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
|
||||||
|
|
||||||
ExpectStreams(kVideoFrameDelta, 3);
|
|
||||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
|
||||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
|
||||||
|
|
||||||
// We should only get two streams and padding for one.
|
|
||||||
SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] / 2, 30);
|
|
||||||
ExpectStreams(kVideoFrameDelta, 2);
|
|
||||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
|
||||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
|
||||||
|
|
||||||
// We should only get the first stream and padding for two.
|
|
||||||
SetRates(kTargetBitrates[0] + kMinBitrates[1] / 2, 30);
|
|
||||||
ExpectStreams(kVideoFrameDelta, 1);
|
|
||||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
|
||||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
|
||||||
|
|
||||||
// We don't have enough bitrate for the thumbnail stream, but we should get
|
|
||||||
// it anyway with current configuration.
|
|
||||||
SetRates(kTargetBitrates[0] - 1, 30);
|
|
||||||
ExpectStreams(kVideoFrameDelta, 1);
|
|
||||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
|
||||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
|
||||||
|
|
||||||
// We should only get two streams and padding for one.
|
|
||||||
SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] / 2, 30);
|
|
||||||
// We get a key frame because a new stream is being enabled.
|
|
||||||
ExpectStreams(kVideoFrameKey, 2);
|
|
||||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
|
||||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
|
||||||
|
|
||||||
// We should get all three streams.
|
|
||||||
SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kTargetBitrates[2], 30);
|
|
||||||
// We get a key frame because a new stream is being enabled.
|
|
||||||
ExpectStreams(kVideoFrameKey, 3);
|
|
||||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
|
||||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
|
||||||
}
|
|
||||||
|
|
||||||
void TestActiveStreams() {
|
|
||||||
// All streams on.
|
|
||||||
RunActiveStreamsTest({true, true, true});
|
|
||||||
// All streams off.
|
|
||||||
RunActiveStreamsTest({false, false, false});
|
|
||||||
// Low stream off.
|
|
||||||
RunActiveStreamsTest({false, true, true});
|
|
||||||
// Middle stream off.
|
|
||||||
RunActiveStreamsTest({true, false, true});
|
|
||||||
// High stream off.
|
|
||||||
RunActiveStreamsTest({true, true, false});
|
|
||||||
// Only low stream turned on.
|
|
||||||
RunActiveStreamsTest({true, false, false});
|
|
||||||
// Only middle stream turned on.
|
|
||||||
RunActiveStreamsTest({false, true, false});
|
|
||||||
// Only high stream turned on.
|
|
||||||
RunActiveStreamsTest({false, false, true});
|
|
||||||
}
|
|
||||||
|
|
||||||
void SwitchingToOneStream(int width, int height) {
|
|
||||||
// Disable all streams except the last and set the bitrate of the last to
|
|
||||||
// 100 kbps. This verifies the way GTP switches to screenshare mode.
|
|
||||||
settings_.VP8()->numberOfTemporalLayers = 1;
|
|
||||||
settings_.maxBitrate = 100;
|
|
||||||
settings_.startBitrate = 100;
|
|
||||||
settings_.width = width;
|
|
||||||
settings_.height = height;
|
|
||||||
for (int i = 0; i < settings_.numberOfSimulcastStreams - 1; ++i) {
|
|
||||||
settings_.simulcastStream[i].maxBitrate = 0;
|
|
||||||
settings_.simulcastStream[i].width = settings_.width;
|
|
||||||
settings_.simulcastStream[i].height = settings_.height;
|
|
||||||
settings_.simulcastStream[i].numberOfTemporalLayers = 1;
|
|
||||||
}
|
|
||||||
// Setting input image to new resolution.
|
|
||||||
input_buffer_ = I420Buffer::Create(settings_.width, settings_.height);
|
|
||||||
input_buffer_->InitializeData();
|
|
||||||
|
|
||||||
input_frame_.reset(
|
|
||||||
new VideoFrame(input_buffer_, 0, 0, webrtc::kVideoRotation_0));
|
|
||||||
|
|
||||||
// The for loop above did not set the bitrate of the highest layer.
|
|
||||||
settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1]
|
|
||||||
.maxBitrate = 0;
|
|
||||||
// The highest layer has to correspond to the non-simulcast resolution.
|
|
||||||
settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].width =
|
|
||||||
settings_.width;
|
|
||||||
settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].height =
|
|
||||||
settings_.height;
|
|
||||||
SetUpRateAllocator();
|
|
||||||
EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200));
|
|
||||||
|
|
||||||
// Encode one frame and verify.
|
|
||||||
SetRates(kMaxBitrates[0] + kMaxBitrates[1], 30);
|
|
||||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
|
|
||||||
kVideoFrameDelta);
|
|
||||||
EXPECT_CALL(
|
|
||||||
encoder_callback_,
|
|
||||||
OnEncodedImage(AllOf(Field(&EncodedImage::_frameType, kVideoFrameKey),
|
|
||||||
Field(&EncodedImage::_encodedWidth, width),
|
|
||||||
Field(&EncodedImage::_encodedHeight, height)),
|
|
||||||
_, _))
|
|
||||||
.Times(1)
|
|
||||||
.WillRepeatedly(Return(
|
|
||||||
EncodedImageCallback::Result(EncodedImageCallback::Result::OK, 0)));
|
|
||||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
|
||||||
|
|
||||||
// Switch back.
|
|
||||||
DefaultSettings(&settings_, kDefaultTemporalLayerProfile);
|
|
||||||
// Start at the lowest bitrate for enabling base stream.
|
|
||||||
settings_.startBitrate = kMinBitrates[0];
|
|
||||||
SetUpRateAllocator();
|
|
||||||
EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200));
|
|
||||||
SetRates(settings_.startBitrate, 30);
|
|
||||||
ExpectStreams(kVideoFrameKey, 1);
|
|
||||||
// Resize |input_frame_| to the new resolution.
|
|
||||||
input_buffer_ = I420Buffer::Create(settings_.width, settings_.height);
|
|
||||||
input_buffer_->InitializeData();
|
|
||||||
input_frame_.reset(
|
|
||||||
new VideoFrame(input_buffer_, 0, 0, webrtc::kVideoRotation_0));
|
|
||||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
|
||||||
}
|
|
||||||
|
|
||||||
void TestSwitchingToOneStream() { SwitchingToOneStream(1024, 768); }
|
|
||||||
|
|
||||||
void TestSwitchingToOneOddStream() { SwitchingToOneStream(1023, 769); }
|
|
||||||
|
|
||||||
void TestSwitchingToOneSmallStream() { SwitchingToOneStream(4, 4); }
|
|
||||||
|
|
||||||
// Test the layer pattern and sync flag for various spatial-temporal patterns.
|
|
||||||
// 3-3-3 pattern: 3 temporal layers for all spatial streams, so same
|
|
||||||
// temporal_layer id and layer_sync is expected for all streams.
|
|
||||||
void TestSaptioTemporalLayers333PatternEncoder() {
|
|
||||||
Vp8TestEncodedImageCallback encoder_callback;
|
|
||||||
encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
|
|
||||||
SetRates(kMaxBitrates[2], 30); // To get all three streams.
|
|
||||||
|
|
||||||
int expected_temporal_idx[3] = {-1, -1, -1};
|
|
||||||
bool expected_layer_sync[3] = {false, false, false};
|
|
||||||
|
|
||||||
// First frame: #0.
|
|
||||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
|
|
||||||
SetExpectedValues3<int>(0, 0, 0, expected_temporal_idx);
|
|
||||||
SetExpectedValues3<bool>(true, true, true, expected_layer_sync);
|
|
||||||
VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
|
||||||
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
|
||||||
|
|
||||||
// Next frame: #1.
|
|
||||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
|
||||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
|
|
||||||
SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
|
|
||||||
SetExpectedValues3<bool>(true, true, true, expected_layer_sync);
|
|
||||||
VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
|
||||||
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
|
||||||
|
|
||||||
// Next frame: #2.
|
|
||||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
|
||||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
|
|
||||||
SetExpectedValues3<int>(1, 1, 1, expected_temporal_idx);
|
|
||||||
SetExpectedValues3<bool>(true, true, true, expected_layer_sync);
|
|
||||||
VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
|
||||||
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
|
||||||
|
|
||||||
// Next frame: #3.
|
|
||||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
|
||||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
|
|
||||||
SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
|
|
||||||
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
|
|
||||||
VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
|
||||||
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
|
||||||
|
|
||||||
// Next frame: #4.
|
|
||||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
|
||||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
|
|
||||||
SetExpectedValues3<int>(0, 0, 0, expected_temporal_idx);
|
|
||||||
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
|
|
||||||
VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
|
||||||
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
|
||||||
|
|
||||||
// Next frame: #5.
|
|
||||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
|
||||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
|
|
||||||
SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
|
|
||||||
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
|
|
||||||
VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
|
||||||
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test the layer pattern and sync flag for various spatial-temporal patterns.
|
|
||||||
// 3-2-1 pattern: 3 temporal layers for lowest resolution, 2 for middle, and
|
|
||||||
// 1 temporal layer for highest resolution.
|
|
||||||
// For this profile, we expect the temporal index pattern to be:
|
|
||||||
// 1st stream: 0, 2, 1, 2, ....
|
|
||||||
// 2nd stream: 0, 1, 0, 1, ...
|
|
||||||
// 3rd stream: -1, -1, -1, -1, ....
|
|
||||||
// Regarding the 3rd stream, note that a stream/encoder with 1 temporal layer
|
|
||||||
// should always have temporal layer idx set to kNoTemporalIdx = -1.
|
|
||||||
// Since CodecSpecificInfoVP8.temporalIdx is uint8_t, this will wrap to 255.
|
|
||||||
// TODO(marpan): Although this seems safe for now, we should fix this.
|
|
||||||
void TestSpatioTemporalLayers321PatternEncoder() {
|
|
||||||
int temporal_layer_profile[3] = {3, 2, 1};
|
|
||||||
SetUpCodec(temporal_layer_profile);
|
|
||||||
Vp8TestEncodedImageCallback encoder_callback;
|
|
||||||
encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
|
|
||||||
SetRates(kMaxBitrates[2], 30); // To get all three streams.
|
|
||||||
|
|
||||||
int expected_temporal_idx[3] = {-1, -1, -1};
|
|
||||||
bool expected_layer_sync[3] = {false, false, false};
|
|
||||||
|
|
||||||
// First frame: #0.
|
|
||||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
|
|
||||||
SetExpectedValues3<int>(0, 0, 255, expected_temporal_idx);
|
|
||||||
SetExpectedValues3<bool>(true, true, false, expected_layer_sync);
|
|
||||||
VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
|
||||||
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
|
||||||
|
|
||||||
// Next frame: #1.
|
|
||||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
|
||||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
|
|
||||||
SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
|
|
||||||
SetExpectedValues3<bool>(true, true, false, expected_layer_sync);
|
|
||||||
VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
|
||||||
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
|
||||||
|
|
||||||
// Next frame: #2.
|
|
||||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
|
||||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
|
|
||||||
SetExpectedValues3<int>(1, 0, 255, expected_temporal_idx);
|
|
||||||
SetExpectedValues3<bool>(true, false, false, expected_layer_sync);
|
|
||||||
VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
|
||||||
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
|
||||||
|
|
||||||
// Next frame: #3.
|
|
||||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
|
||||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
|
|
||||||
SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
|
|
||||||
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
|
|
||||||
VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
|
||||||
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
|
||||||
|
|
||||||
// Next frame: #4.
|
|
||||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
|
||||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
|
|
||||||
SetExpectedValues3<int>(0, 0, 255, expected_temporal_idx);
|
|
||||||
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
|
|
||||||
VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
|
||||||
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
|
||||||
|
|
||||||
// Next frame: #5.
|
|
||||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
|
||||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
|
|
||||||
SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
|
|
||||||
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
|
|
||||||
VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
|
||||||
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
|
||||||
}
|
|
||||||
|
|
||||||
void TestStrideEncodeDecode() {
|
|
||||||
Vp8TestEncodedImageCallback encoder_callback;
|
|
||||||
Vp8TestDecodedImageCallback decoder_callback;
|
|
||||||
encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
|
|
||||||
decoder_->RegisterDecodeCompleteCallback(&decoder_callback);
|
|
||||||
|
|
||||||
SetRates(kMaxBitrates[2], 30); // To get all three streams.
|
|
||||||
// Setting two (possibly) problematic use cases for stride:
|
|
||||||
// 1. stride > width 2. stride_y != stride_uv/2
|
|
||||||
int stride_y = kDefaultWidth + 20;
|
|
||||||
int stride_uv = ((kDefaultWidth + 1) / 2) + 5;
|
|
||||||
input_buffer_ = I420Buffer::Create(kDefaultWidth, kDefaultHeight, stride_y,
|
|
||||||
stride_uv, stride_uv);
|
|
||||||
input_frame_.reset(
|
|
||||||
new VideoFrame(input_buffer_, 0, 0, webrtc::kVideoRotation_0));
|
|
||||||
|
|
||||||
// Set color.
|
|
||||||
int plane_offset[kNumOfPlanes];
|
|
||||||
plane_offset[kYPlane] = kColorY;
|
|
||||||
plane_offset[kUPlane] = kColorU;
|
|
||||||
plane_offset[kVPlane] = kColorV;
|
|
||||||
CreateImage(input_buffer_, plane_offset);
|
|
||||||
|
|
||||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
|
|
||||||
|
|
||||||
// Change color.
|
|
||||||
plane_offset[kYPlane] += 1;
|
|
||||||
plane_offset[kUPlane] += 1;
|
|
||||||
plane_offset[kVPlane] += 1;
|
|
||||||
CreateImage(input_buffer_, plane_offset);
|
|
||||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
|
||||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
|
|
||||||
|
|
||||||
EncodedImage encoded_frame;
|
|
||||||
// Only encoding one frame - so will be a key frame.
|
|
||||||
encoder_callback.GetLastEncodedKeyFrame(&encoded_frame);
|
|
||||||
EXPECT_EQ(0, decoder_->Decode(encoded_frame, false, NULL, 0));
|
|
||||||
encoder_callback.GetLastEncodedFrame(&encoded_frame);
|
|
||||||
decoder_->Decode(encoded_frame, false, NULL, 0);
|
|
||||||
EXPECT_EQ(2, decoder_callback.DecodedFrames());
|
|
||||||
}
|
|
||||||
|
|
||||||
std::unique_ptr<VP8Encoder> encoder_;
|
|
||||||
MockEncodedImageCallback encoder_callback_;
|
|
||||||
std::unique_ptr<VP8Decoder> decoder_;
|
|
||||||
MockDecodedImageCallback decoder_callback_;
|
|
||||||
VideoCodec settings_;
|
|
||||||
rtc::scoped_refptr<I420Buffer> input_buffer_;
|
|
||||||
std::unique_ptr<VideoFrame> input_frame_;
|
|
||||||
std::unique_ptr<SimulcastRateAllocator> rate_allocator_;
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace testing
|
|
||||||
} // namespace webrtc
|
|
||||||
|
|
||||||
#endif // MODULES_VIDEO_CODING_CODECS_VP8_SIMULCAST_TEST_UTILITY_H_
|
|
@ -1,82 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* Use of this source code is governed by a BSD-style license
|
|
||||||
* that can be found in the LICENSE file in the root of the source
|
|
||||||
* tree. An additional intellectual property rights grant can be found
|
|
||||||
* in the file PATENTS. All contributing project authors may
|
|
||||||
* be found in the AUTHORS file in the root of the source tree.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include "modules/video_coding/codecs/vp8/simulcast_test_utility.h"
|
|
||||||
|
|
||||||
namespace webrtc {
|
|
||||||
namespace testing {
|
|
||||||
|
|
||||||
class TestVp8Impl : public TestVp8Simulcast {
|
|
||||||
protected:
|
|
||||||
std::unique_ptr<VP8Encoder> CreateEncoder() override {
|
|
||||||
return VP8Encoder::Create();
|
|
||||||
}
|
|
||||||
std::unique_ptr<VP8Decoder> CreateDecoder() override {
|
|
||||||
return VP8Decoder::Create();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
TEST_F(TestVp8Impl, TestKeyFrameRequestsOnAllStreams) {
|
|
||||||
TestVp8Simulcast::TestKeyFrameRequestsOnAllStreams();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(TestVp8Impl, TestPaddingAllStreams) {
|
|
||||||
TestVp8Simulcast::TestPaddingAllStreams();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(TestVp8Impl, TestPaddingTwoStreams) {
|
|
||||||
TestVp8Simulcast::TestPaddingTwoStreams();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(TestVp8Impl, TestPaddingTwoStreamsOneMaxedOut) {
|
|
||||||
TestVp8Simulcast::TestPaddingTwoStreamsOneMaxedOut();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(TestVp8Impl, TestPaddingOneStream) {
|
|
||||||
TestVp8Simulcast::TestPaddingOneStream();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(TestVp8Impl, TestPaddingOneStreamTwoMaxedOut) {
|
|
||||||
TestVp8Simulcast::TestPaddingOneStreamTwoMaxedOut();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(TestVp8Impl, TestSendAllStreams) {
|
|
||||||
TestVp8Simulcast::TestSendAllStreams();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(TestVp8Impl, TestDisablingStreams) {
|
|
||||||
TestVp8Simulcast::TestDisablingStreams();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(TestVp8Impl, TestActiveStreams) {
|
|
||||||
TestVp8Simulcast::TestActiveStreams();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(TestVp8Impl, TestSwitchingToOneStream) {
|
|
||||||
TestVp8Simulcast::TestSwitchingToOneStream();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(TestVp8Impl, TestSwitchingToOneOddStream) {
|
|
||||||
TestVp8Simulcast::TestSwitchingToOneOddStream();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(TestVp8Impl, TestSwitchingToOneSmallStream) {
|
|
||||||
TestVp8Simulcast::TestSwitchingToOneSmallStream();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(TestVp8Impl, TestSaptioTemporalLayers333PatternEncoder) {
|
|
||||||
TestVp8Simulcast::TestSaptioTemporalLayers333PatternEncoder();
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(TestVp8Impl, TestStrideEncodeDecode) {
|
|
||||||
TestVp8Simulcast::TestStrideEncodeDecode();
|
|
||||||
}
|
|
||||||
} // namespace testing
|
|
||||||
} // namespace webrtc
|
|
@ -518,6 +518,7 @@ rtc_source_set("test_common") {
|
|||||||
"fake_encoder.cc",
|
"fake_encoder.cc",
|
||||||
"fake_encoder.h",
|
"fake_encoder.h",
|
||||||
"fake_videorenderer.h",
|
"fake_videorenderer.h",
|
||||||
|
"function_video_decoder_factory.h",
|
||||||
"function_video_encoder_factory.h",
|
"function_video_encoder_factory.h",
|
||||||
"layer_filtering_transport.cc",
|
"layer_filtering_transport.cc",
|
||||||
"layer_filtering_transport.h",
|
"layer_filtering_transport.h",
|
||||||
|
51
test/function_video_decoder_factory.h
Normal file
51
test/function_video_decoder_factory.h
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Use of this source code is governed by a BSD-style license
|
||||||
|
* that can be found in the LICENSE file in the root of the source
|
||||||
|
* tree. An additional intellectual property rights grant can be found
|
||||||
|
* in the file PATENTS. All contributing project authors may
|
||||||
|
* be found in the AUTHORS file in the root of the source tree.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef TEST_FUNCTION_VIDEO_DECODER_FACTORY_H_
|
||||||
|
#define TEST_FUNCTION_VIDEO_DECODER_FACTORY_H_
|
||||||
|
|
||||||
|
#include <functional>
|
||||||
|
#include <memory>
|
||||||
|
#include <utility>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "api/video_codecs/sdp_video_format.h"
|
||||||
|
#include "api/video_codecs/video_decoder_factory.h"
|
||||||
|
#include "rtc_base/checks.h"
|
||||||
|
|
||||||
|
namespace webrtc {
|
||||||
|
namespace test {
|
||||||
|
|
||||||
|
// A decoder factory producing decoders by calling a supplied create function.
|
||||||
|
class FunctionVideoDecoderFactory final : public VideoDecoderFactory {
|
||||||
|
public:
|
||||||
|
explicit FunctionVideoDecoderFactory(
|
||||||
|
std::function<std::unique_ptr<VideoDecoder>()> create)
|
||||||
|
: create_(std::move(create)) {}
|
||||||
|
|
||||||
|
// Unused by tests.
|
||||||
|
std::vector<SdpVideoFormat> GetSupportedFormats() const override {
|
||||||
|
RTC_NOTREACHED();
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
std::unique_ptr<VideoDecoder> CreateVideoDecoder(
|
||||||
|
const SdpVideoFormat& /* format */) override {
|
||||||
|
return create_();
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
const std::function<std::unique_ptr<VideoDecoder>()> create_;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace test
|
||||||
|
} // namespace webrtc
|
||||||
|
|
||||||
|
#endif // TEST_FUNCTION_VIDEO_DECODER_FACTORY_H_
|
@ -16,7 +16,9 @@
|
|||||||
#include <utility>
|
#include <utility>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
|
#include "api/video_codecs/sdp_video_format.h"
|
||||||
#include "api/video_codecs/video_encoder_factory.h"
|
#include "api/video_codecs/video_encoder_factory.h"
|
||||||
|
#include "rtc_base/checks.h"
|
||||||
|
|
||||||
namespace webrtc {
|
namespace webrtc {
|
||||||
namespace test {
|
namespace test {
|
||||||
|
Reference in New Issue
Block a user