Delete logic to set picture id and tl0 pic idx in encoders.
It would be nice to also delete the fields from CodecSpecificInfo, but those fields are used on the receive side. Bug: webrtc:8830 Change-Id: I1a3f13ea2c024cbd73b33fd9dd58e531d3576a55 Reviewed-on: https://webrtc-review.googlesource.com/64780 Commit-Queue: Niels Moller <nisse@webrtc.org> Reviewed-by: Erik Språng <sprang@webrtc.org> Reviewed-by: Åsa Persson <asapersson@webrtc.org> Cr-Commit-Position: refs/heads/master@{#22625}
This commit is contained in:
@ -286,13 +286,11 @@ std::vector<TemporalLayers::FrameConfig> GetTemporalPattern(size_t num_layers) {
|
||||
}
|
||||
} // namespace
|
||||
|
||||
DefaultTemporalLayers::DefaultTemporalLayers(int number_of_temporal_layers,
|
||||
uint8_t initial_tl0_pic_idx)
|
||||
DefaultTemporalLayers::DefaultTemporalLayers(int number_of_temporal_layers)
|
||||
: num_layers_(std::max(1, number_of_temporal_layers)),
|
||||
temporal_ids_(GetTemporalIds(num_layers_)),
|
||||
temporal_layer_sync_(GetTemporalLayerSync(num_layers_)),
|
||||
temporal_pattern_(GetTemporalPattern(num_layers_)),
|
||||
tl0_pic_idx_(initial_tl0_pic_idx),
|
||||
pattern_idx_(255),
|
||||
last_base_layer_sync_(false) {
|
||||
RTC_DCHECK_EQ(temporal_pattern_.size(), temporal_layer_sync_.size());
|
||||
@ -305,10 +303,6 @@ DefaultTemporalLayers::DefaultTemporalLayers(int number_of_temporal_layers,
|
||||
RTC_DCHECK_LE(temporal_ids_.size(), temporal_pattern_.size());
|
||||
}
|
||||
|
||||
uint8_t DefaultTemporalLayers::Tl0PicIdx() const {
|
||||
return tl0_pic_idx_;
|
||||
}
|
||||
|
||||
void DefaultTemporalLayers::OnRatesUpdated(
|
||||
const std::vector<uint32_t>& bitrates_bps,
|
||||
int framerate_fps) {
|
||||
@ -367,7 +361,6 @@ void DefaultTemporalLayers::PopulateCodecSpecific(
|
||||
if (num_layers_ == 1) {
|
||||
vp8_info->temporalIdx = kNoTemporalIdx;
|
||||
vp8_info->layerSync = false;
|
||||
vp8_info->tl0PicIdx = kNoTl0PicIdx;
|
||||
} else {
|
||||
vp8_info->temporalIdx = tl_config.packetizer_temporal_idx;
|
||||
vp8_info->layerSync = tl_config.layer_sync;
|
||||
@ -380,10 +373,7 @@ void DefaultTemporalLayers::PopulateCodecSpecific(
|
||||
// be a layer sync.
|
||||
vp8_info->layerSync = true;
|
||||
}
|
||||
if (vp8_info->temporalIdx == 0)
|
||||
tl0_pic_idx_++;
|
||||
last_base_layer_sync_ = frame_is_keyframe;
|
||||
vp8_info->tl0PicIdx = tl0_pic_idx_;
|
||||
}
|
||||
}
|
||||
|
||||
@ -414,9 +404,8 @@ std::vector<std::set<uint8_t>> GetTemporalDependencies(
|
||||
}
|
||||
|
||||
DefaultTemporalLayersChecker::DefaultTemporalLayersChecker(
|
||||
int num_temporal_layers,
|
||||
uint8_t initial_tl0_pic_idx)
|
||||
: TemporalLayersChecker(num_temporal_layers, initial_tl0_pic_idx),
|
||||
int num_temporal_layers)
|
||||
: TemporalLayersChecker(num_temporal_layers),
|
||||
num_layers_(std::max(1, num_temporal_layers)),
|
||||
temporal_ids_(GetTemporalIds(num_layers_)),
|
||||
temporal_dependencies_(GetTemporalDependencies(num_layers_)),
|
||||
|
||||
@ -23,8 +23,7 @@ namespace webrtc {
|
||||
|
||||
class DefaultTemporalLayers : public TemporalLayers {
|
||||
public:
|
||||
DefaultTemporalLayers(int number_of_temporal_layers,
|
||||
uint8_t initial_tl0_pic_idx);
|
||||
explicit DefaultTemporalLayers(int number_of_temporal_layers);
|
||||
virtual ~DefaultTemporalLayers() {}
|
||||
|
||||
// Returns the recommended VP8 encode flags needed. May refresh the decoder
|
||||
@ -44,15 +43,12 @@ class DefaultTemporalLayers : public TemporalLayers {
|
||||
|
||||
void FrameEncoded(unsigned int size, int qp) override {}
|
||||
|
||||
uint8_t Tl0PicIdx() const override;
|
||||
|
||||
private:
|
||||
const size_t num_layers_;
|
||||
const std::vector<unsigned int> temporal_ids_;
|
||||
const std::vector<bool> temporal_layer_sync_;
|
||||
const std::vector<TemporalLayers::FrameConfig> temporal_pattern_;
|
||||
|
||||
uint8_t tl0_pic_idx_;
|
||||
uint8_t pattern_idx_;
|
||||
bool last_base_layer_sync_;
|
||||
// Updated cumulative bitrates, per temporal layer.
|
||||
@ -61,8 +57,7 @@ class DefaultTemporalLayers : public TemporalLayers {
|
||||
|
||||
class DefaultTemporalLayersChecker : public TemporalLayersChecker {
|
||||
public:
|
||||
DefaultTemporalLayersChecker(int number_of_temporal_layers,
|
||||
uint8_t initial_tl0_pic_idx);
|
||||
explicit DefaultTemporalLayersChecker(int number_of_temporal_layers);
|
||||
bool CheckTemporalConfig(
|
||||
bool frame_is_keyframe,
|
||||
const TemporalLayers::FrameConfig& frame_config) override;
|
||||
|
||||
@ -71,8 +71,8 @@ std::vector<uint32_t> GetTemporalLayerRates(int target_bitrate_kbps,
|
||||
} // namespace
|
||||
|
||||
TEST(TemporalLayersTest, 2Layers) {
|
||||
DefaultTemporalLayers tl(2, 0);
|
||||
DefaultTemporalLayersChecker checker(2, 0);
|
||||
DefaultTemporalLayers tl(2);
|
||||
DefaultTemporalLayersChecker checker(2);
|
||||
Vp8EncoderConfig cfg;
|
||||
CodecSpecificInfoVP8 vp8_info;
|
||||
tl.OnRatesUpdated(GetTemporalLayerRates(500, 30, 1), 30);
|
||||
@ -119,8 +119,8 @@ TEST(TemporalLayersTest, 2Layers) {
|
||||
}
|
||||
|
||||
TEST(TemporalLayersTest, 3Layers) {
|
||||
DefaultTemporalLayers tl(3, 0);
|
||||
DefaultTemporalLayersChecker checker(3, 0);
|
||||
DefaultTemporalLayers tl(3);
|
||||
DefaultTemporalLayersChecker checker(3);
|
||||
Vp8EncoderConfig cfg;
|
||||
CodecSpecificInfoVP8 vp8_info;
|
||||
tl.OnRatesUpdated(GetTemporalLayerRates(500, 30, 1), 30);
|
||||
@ -168,8 +168,8 @@ TEST(TemporalLayersTest, 3Layers) {
|
||||
|
||||
TEST(TemporalLayersTest, Alternative3Layers) {
|
||||
ScopedFieldTrials field_trial("WebRTC-UseShortVP8TL3Pattern/Enabled/");
|
||||
DefaultTemporalLayers tl(3, 0);
|
||||
DefaultTemporalLayersChecker checker(3, 0);
|
||||
DefaultTemporalLayers tl(3);
|
||||
DefaultTemporalLayersChecker checker(3);
|
||||
Vp8EncoderConfig cfg;
|
||||
CodecSpecificInfoVP8 vp8_info;
|
||||
tl.OnRatesUpdated(GetTemporalLayerRates(500, 30, 1), 30);
|
||||
@ -204,8 +204,8 @@ TEST(TemporalLayersTest, Alternative3Layers) {
|
||||
}
|
||||
|
||||
TEST(TemporalLayersTest, 4Layers) {
|
||||
DefaultTemporalLayers tl(4, 0);
|
||||
DefaultTemporalLayersChecker checker(4, 0);
|
||||
DefaultTemporalLayers tl(4);
|
||||
DefaultTemporalLayersChecker checker(4);
|
||||
Vp8EncoderConfig cfg;
|
||||
CodecSpecificInfoVP8 vp8_info;
|
||||
tl.OnRatesUpdated(GetTemporalLayerRates(500, 30, 1), 30);
|
||||
@ -251,8 +251,8 @@ TEST(TemporalLayersTest, 4Layers) {
|
||||
}
|
||||
|
||||
TEST(TemporalLayersTest, KeyFrame) {
|
||||
DefaultTemporalLayers tl(3, 0);
|
||||
DefaultTemporalLayersChecker checker(3, 0);
|
||||
DefaultTemporalLayers tl(3);
|
||||
DefaultTemporalLayersChecker checker(3);
|
||||
Vp8EncoderConfig cfg;
|
||||
CodecSpecificInfoVP8 vp8_info;
|
||||
tl.OnRatesUpdated(GetTemporalLayerRates(500, 30, 1), 30);
|
||||
@ -366,7 +366,7 @@ INSTANTIATE_TEST_CASE_P(DefaultTemporalLayersTest,
|
||||
|
||||
TEST_P(TemporalLayersReferenceTest, ValidFrameConfigs) {
|
||||
const int num_layers = GetParam();
|
||||
DefaultTemporalLayers tl(num_layers, 0);
|
||||
DefaultTemporalLayers tl(num_layers);
|
||||
Vp8EncoderConfig cfg;
|
||||
tl.OnRatesUpdated(GetTemporalLayerRates(500, 30, 1), 30);
|
||||
tl.UpdateConfiguration(&cfg);
|
||||
|
||||
@ -18,7 +18,6 @@
|
||||
#include "modules/video_coding/codecs/vp8/simulcast_rate_allocator.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/ptr_util.h"
|
||||
#include "rtc_base/random.h"
|
||||
#include "rtc_base/timeutils.h"
|
||||
#include "rtc_base/trace_event.h"
|
||||
#include "system_wrappers/include/field_trial.h"
|
||||
@ -208,12 +207,6 @@ LibvpxVp8Encoder::LibvpxVp8Encoder()
|
||||
number_of_cores_(0),
|
||||
rc_max_intra_target_(0),
|
||||
key_frame_request_(kMaxSimulcastStreams, false) {
|
||||
Random random(rtc::TimeMicros());
|
||||
picture_id_.reserve(kMaxSimulcastStreams);
|
||||
for (int i = 0; i < kMaxSimulcastStreams; ++i) {
|
||||
picture_id_.push_back(random.Rand<uint16_t>() & 0x7FFF);
|
||||
tl0_pic_idx_.push_back(random.Rand<uint8_t>());
|
||||
}
|
||||
temporal_layers_.reserve(kMaxSimulcastStreams);
|
||||
temporal_layers_checkers_.reserve(kMaxSimulcastStreams);
|
||||
raw_images_.reserve(kMaxSimulcastStreams);
|
||||
@ -253,9 +246,6 @@ int LibvpxVp8Encoder::Release() {
|
||||
vpx_img_free(&raw_images_.back());
|
||||
raw_images_.pop_back();
|
||||
}
|
||||
for (size_t i = 0; i < temporal_layers_.size(); ++i) {
|
||||
tl0_pic_idx_[i] = temporal_layers_[i]->Tl0PicIdx();
|
||||
}
|
||||
temporal_layers_.clear();
|
||||
temporal_layers_checkers_.clear();
|
||||
inited_ = false;
|
||||
@ -347,9 +337,9 @@ void LibvpxVp8Encoder::SetupTemporalLayers(int num_streams,
|
||||
RTC_DCHECK(temporal_layers_.empty());
|
||||
for (int i = 0; i < num_streams; ++i) {
|
||||
temporal_layers_.emplace_back(
|
||||
TemporalLayers::CreateTemporalLayers(codec, i, tl0_pic_idx_[i]));
|
||||
TemporalLayers::CreateTemporalLayers(codec, i));
|
||||
temporal_layers_checkers_.emplace_back(
|
||||
TemporalLayers::CreateTemporalLayersChecker(codec, i, tl0_pic_idx_[i]));
|
||||
TemporalLayers::CreateTemporalLayersChecker(codec, i));
|
||||
}
|
||||
}
|
||||
|
||||
@ -871,15 +861,12 @@ void LibvpxVp8Encoder::PopulateCodecSpecific(
|
||||
codec_specific->codecType = kVideoCodecVP8;
|
||||
codec_specific->codec_name = ImplementationName();
|
||||
CodecSpecificInfoVP8* vp8Info = &(codec_specific->codecSpecific.VP8);
|
||||
vp8Info->pictureId = picture_id_[stream_idx];
|
||||
vp8Info->simulcastIdx = stream_idx;
|
||||
vp8Info->keyIdx = kNoKeyIdx; // TODO(hlundin) populate this
|
||||
vp8Info->nonReference = (pkt.data.frame.flags & VPX_FRAME_IS_DROPPABLE) != 0;
|
||||
temporal_layers_[stream_idx]->PopulateCodecSpecific(
|
||||
(pkt.data.frame.flags & VPX_FRAME_IS_KEY) != 0, tl_config, vp8Info,
|
||||
timestamp);
|
||||
// Prepare next.
|
||||
picture_id_[stream_idx] = (picture_id_[stream_idx] + 1) & 0x7FFF;
|
||||
}
|
||||
|
||||
int LibvpxVp8Encoder::GetEncodedPartitions(
|
||||
|
||||
@ -96,8 +96,6 @@ class LibvpxVp8Encoder : public VP8Encoder {
|
||||
uint32_t rc_max_intra_target_;
|
||||
std::vector<std::unique_ptr<TemporalLayers>> temporal_layers_;
|
||||
std::vector<std::unique_ptr<TemporalLayersChecker>> temporal_layers_checkers_;
|
||||
std::vector<uint16_t> picture_id_;
|
||||
std::vector<uint8_t> tl0_pic_idx_;
|
||||
std::vector<bool> key_frame_request_;
|
||||
std::vector<bool> send_stream_;
|
||||
std::vector<int> cpu_speed_;
|
||||
|
||||
@ -38,13 +38,11 @@ constexpr int ScreenshareLayers::kMaxNumTemporalLayers;
|
||||
const int ScreenshareLayers::kMaxFrameIntervalMs = 2750;
|
||||
|
||||
ScreenshareLayers::ScreenshareLayers(int num_temporal_layers,
|
||||
uint8_t initial_tl0_pic_idx,
|
||||
Clock* clock)
|
||||
: clock_(clock),
|
||||
number_of_temporal_layers_(
|
||||
std::min(kMaxNumTemporalLayers, num_temporal_layers)),
|
||||
last_base_layer_sync_(false),
|
||||
tl0_pic_idx_(initial_tl0_pic_idx),
|
||||
active_layer_(-1),
|
||||
last_timestamp_(-1),
|
||||
last_sync_timestamp_(-1),
|
||||
@ -63,10 +61,6 @@ ScreenshareLayers::~ScreenshareLayers() {
|
||||
UpdateHistograms();
|
||||
}
|
||||
|
||||
uint8_t ScreenshareLayers::Tl0PicIdx() const {
|
||||
return tl0_pic_idx_;
|
||||
}
|
||||
|
||||
TemporalLayers::FrameConfig ScreenshareLayers::UpdateLayerConfig(
|
||||
uint32_t timestamp) {
|
||||
if (number_of_temporal_layers_ <= 1) {
|
||||
@ -286,7 +280,6 @@ void ScreenshareLayers::PopulateCodecSpecific(
|
||||
if (number_of_temporal_layers_ == 1) {
|
||||
vp8_info->temporalIdx = kNoTemporalIdx;
|
||||
vp8_info->layerSync = false;
|
||||
vp8_info->tl0PicIdx = kNoTl0PicIdx;
|
||||
} else {
|
||||
int64_t unwrapped_timestamp = time_wrap_handler_.Unwrap(timestamp);
|
||||
vp8_info->temporalIdx = tl_config.packetizer_temporal_idx;
|
||||
@ -301,11 +294,7 @@ void ScreenshareLayers::PopulateCodecSpecific(
|
||||
last_sync_timestamp_ = unwrapped_timestamp;
|
||||
vp8_info->layerSync = true;
|
||||
}
|
||||
if (vp8_info->temporalIdx == 0) {
|
||||
tl0_pic_idx_++;
|
||||
}
|
||||
last_base_layer_sync_ = frame_is_keyframe;
|
||||
vp8_info->tl0PicIdx = tl0_pic_idx_;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -29,7 +29,6 @@ class ScreenshareLayers : public TemporalLayers {
|
||||
static const int kMaxFrameIntervalMs;
|
||||
|
||||
ScreenshareLayers(int num_temporal_layers,
|
||||
uint8_t initial_tl0_pic_idx,
|
||||
Clock* clock);
|
||||
virtual ~ScreenshareLayers();
|
||||
|
||||
@ -52,8 +51,6 @@ class ScreenshareLayers : public TemporalLayers {
|
||||
|
||||
void FrameEncoded(unsigned int size, int qp) override;
|
||||
|
||||
uint8_t Tl0PicIdx() const override;
|
||||
|
||||
private:
|
||||
enum class TemporalLayerState : int { kDrop, kTl0, kTl1, kTl1Sync };
|
||||
|
||||
@ -64,7 +61,6 @@ class ScreenshareLayers : public TemporalLayers {
|
||||
|
||||
int number_of_temporal_layers_;
|
||||
bool last_base_layer_sync_;
|
||||
uint8_t tl0_pic_idx_;
|
||||
int active_layer_;
|
||||
int64_t last_timestamp_;
|
||||
int64_t last_sync_timestamp_;
|
||||
|
||||
@ -61,7 +61,7 @@ class ScreenshareLayerTest : public ::testing::Test {
|
||||
virtual ~ScreenshareLayerTest() {}
|
||||
|
||||
void SetUp() override {
|
||||
layers_.reset(new ScreenshareLayers(2, 0, &clock_));
|
||||
layers_.reset(new ScreenshareLayers(2, &clock_));
|
||||
cfg_ = ConfigureBitrates();
|
||||
}
|
||||
|
||||
@ -175,7 +175,7 @@ class ScreenshareLayerTest : public ::testing::Test {
|
||||
};
|
||||
|
||||
TEST_F(ScreenshareLayerTest, 1Layer) {
|
||||
layers_.reset(new ScreenshareLayers(1, 0, &clock_));
|
||||
layers_.reset(new ScreenshareLayers(1, &clock_));
|
||||
ConfigureBitrates();
|
||||
// One layer screenshare should not use the frame dropper as all frames will
|
||||
// belong to the base layer.
|
||||
@ -184,13 +184,11 @@ TEST_F(ScreenshareLayerTest, 1Layer) {
|
||||
timestamp_ += kTimestampDelta5Fps;
|
||||
EXPECT_EQ(static_cast<uint8_t>(kNoTemporalIdx), vp8_info_.temporalIdx);
|
||||
EXPECT_FALSE(vp8_info_.layerSync);
|
||||
EXPECT_EQ(kNoTl0PicIdx, vp8_info_.tl0PicIdx);
|
||||
|
||||
flags = EncodeFrame(false);
|
||||
EXPECT_EQ(kSingleLayerFlags, flags);
|
||||
EXPECT_EQ(static_cast<uint8_t>(kNoTemporalIdx), vp8_info_.temporalIdx);
|
||||
EXPECT_FALSE(vp8_info_.layerSync);
|
||||
EXPECT_EQ(kNoTl0PicIdx, vp8_info_.tl0PicIdx);
|
||||
}
|
||||
|
||||
TEST_F(ScreenshareLayerTest, 2LayersPeriodicSync) {
|
||||
@ -540,7 +538,7 @@ TEST_F(ScreenshareLayerTest, UpdatesHistograms) {
|
||||
}
|
||||
|
||||
TEST_F(ScreenshareLayerTest, AllowsUpdateConfigBeforeSetRates) {
|
||||
layers_.reset(new ScreenshareLayers(2, 0, &clock_));
|
||||
layers_.reset(new ScreenshareLayers(2, &clock_));
|
||||
// New layer instance, OnRatesUpdated() never called.
|
||||
// UpdateConfiguration() call should not cause crash.
|
||||
layers_->UpdateConfiguration(&cfg_);
|
||||
|
||||
@ -96,7 +96,6 @@ class Vp8TestEncodedImageCallback : public EncodedImageCallback {
|
||||
encoded_image._length);
|
||||
}
|
||||
}
|
||||
picture_id_ = codec_specific_info->codecSpecific.VP8.pictureId;
|
||||
layer_sync_[codec_specific_info->codecSpecific.VP8.simulcastIdx] =
|
||||
codec_specific_info->codecSpecific.VP8.layerSync;
|
||||
temporal_layer_[codec_specific_info->codecSpecific.VP8.simulcastIdx] =
|
||||
|
||||
@ -49,34 +49,30 @@ bool IsConferenceModeScreenshare(const VideoCodec& codec) {
|
||||
|
||||
std::unique_ptr<TemporalLayers> TemporalLayers::CreateTemporalLayers(
|
||||
const VideoCodec& codec,
|
||||
size_t spatial_id,
|
||||
uint8_t initial_tl0_pic_idx) {
|
||||
size_t spatial_id) {
|
||||
if (IsConferenceModeScreenshare(codec) && spatial_id == 0) {
|
||||
// Conference mode temporal layering for screen content in base stream.
|
||||
return rtc::MakeUnique<ScreenshareLayers>(2, initial_tl0_pic_idx,
|
||||
Clock::GetRealTimeClock());
|
||||
return rtc::MakeUnique<ScreenshareLayers>(2, Clock::GetRealTimeClock());
|
||||
}
|
||||
|
||||
return rtc::MakeUnique<DefaultTemporalLayers>(
|
||||
NumTemporalLayers(codec, spatial_id), initial_tl0_pic_idx);
|
||||
NumTemporalLayers(codec, spatial_id));
|
||||
}
|
||||
|
||||
std::unique_ptr<TemporalLayersChecker>
|
||||
TemporalLayers::CreateTemporalLayersChecker(const VideoCodec& codec,
|
||||
size_t spatial_id,
|
||||
uint8_t initial_tl0_pic_idx) {
|
||||
size_t spatial_id) {
|
||||
if (IsConferenceModeScreenshare(codec) && spatial_id == 0) {
|
||||
// Conference mode temporal layering for screen content in base stream,
|
||||
// use generic checker.
|
||||
return rtc::MakeUnique<TemporalLayersChecker>(2, initial_tl0_pic_idx);
|
||||
return rtc::MakeUnique<TemporalLayersChecker>(2);
|
||||
}
|
||||
|
||||
return rtc::MakeUnique<DefaultTemporalLayersChecker>(
|
||||
NumTemporalLayers(codec, spatial_id), initial_tl0_pic_idx);
|
||||
NumTemporalLayers(codec, spatial_id));
|
||||
}
|
||||
|
||||
TemporalLayersChecker::TemporalLayersChecker(int num_temporal_layers,
|
||||
uint8_t /*initial_tl0_pic_idx*/)
|
||||
TemporalLayersChecker::TemporalLayersChecker(int num_temporal_layers)
|
||||
: num_temporal_layers_(num_temporal_layers),
|
||||
sequence_number_(0),
|
||||
last_sync_sequence_number_(0),
|
||||
|
||||
@ -99,12 +99,10 @@ class TemporalLayers {
|
||||
|
||||
static std::unique_ptr<TemporalLayers> CreateTemporalLayers(
|
||||
const VideoCodec& codec,
|
||||
size_t spatial_id,
|
||||
uint8_t initial_tl0_pic_idx);
|
||||
size_t spatial_id);
|
||||
static std::unique_ptr<TemporalLayersChecker> CreateTemporalLayersChecker(
|
||||
const VideoCodec& codec,
|
||||
size_t spatial_id,
|
||||
uint8_t initial_tl0_pic_idx);
|
||||
size_t spatial_id);
|
||||
|
||||
// Factory for TemporalLayer strategy. Default behavior is a fixed pattern
|
||||
// of temporal layers. See default_temporal_layers.cc
|
||||
@ -129,10 +127,6 @@ class TemporalLayers {
|
||||
uint32_t timestamp) = 0;
|
||||
|
||||
virtual void FrameEncoded(unsigned int size, int qp) = 0;
|
||||
|
||||
// Returns the current tl0_pic_idx, so it can be reused in future
|
||||
// instantiations.
|
||||
virtual uint8_t Tl0PicIdx() const = 0;
|
||||
};
|
||||
|
||||
// TODO(webrtc:9012): Remove TemporalLayersFactory type and field once all
|
||||
@ -149,7 +143,7 @@ class TemporalLayersFactory {
|
||||
// each UpdateLayersConfig() of a corresponding TemporalLayers class.
|
||||
class TemporalLayersChecker {
|
||||
public:
|
||||
TemporalLayersChecker(int num_temporal_layers, uint8_t initial_tl0_pic_idx);
|
||||
explicit TemporalLayersChecker(int num_temporal_layers);
|
||||
virtual ~TemporalLayersChecker() {}
|
||||
|
||||
virtual bool CheckTemporalConfig(
|
||||
|
||||
@ -100,16 +100,10 @@ class TestVp8Impl : public VideoCodecUnitTest {
|
||||
EXPECT_EQ(0u, codec_specific_info->codecSpecific.VP8.simulcastIdx);
|
||||
}
|
||||
|
||||
void EncodeAndExpectFrameWith(int16_t picture_id,
|
||||
int tl0_pic_idx,
|
||||
uint8_t temporal_idx) {
|
||||
void EncodeAndExpectFrameWith(uint8_t temporal_idx) {
|
||||
EncodedImage encoded_frame;
|
||||
CodecSpecificInfo codec_specific_info;
|
||||
EncodeAndWaitForFrame(&encoded_frame, &codec_specific_info);
|
||||
EXPECT_EQ(picture_id % (1 << 15),
|
||||
codec_specific_info.codecSpecific.VP8.pictureId);
|
||||
EXPECT_EQ(tl0_pic_idx % (1 << 8),
|
||||
codec_specific_info.codecSpecific.VP8.tl0PicIdx);
|
||||
EXPECT_EQ(temporal_idx, codec_specific_info.codecSpecific.VP8.temporalIdx);
|
||||
}
|
||||
|
||||
@ -319,7 +313,7 @@ TEST_F(TestVp8Impl, MAYBE_DecodeWithACompleteKeyFrame) {
|
||||
EXPECT_GT(I420PSNR(input_frame_.get(), decoded_frame.get()), 36);
|
||||
}
|
||||
|
||||
TEST_F(TestVp8Impl, EncoderWith2TemporalLayersRetainsRtpStateAfterRelease) {
|
||||
TEST_F(TestVp8Impl, EncoderWith2TemporalLayers) {
|
||||
codec_settings_.VP8()->numberOfTemporalLayers = 2;
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
encoder_->InitEncode(&codec_settings_, kNumCores, kMaxPayloadSize));
|
||||
@ -330,78 +324,16 @@ TEST_F(TestVp8Impl, EncoderWith2TemporalLayersRetainsRtpStateAfterRelease) {
|
||||
EncodeAndWaitForFrame(&encoded_frame, &codec_specific_info);
|
||||
|
||||
EXPECT_EQ(0, codec_specific_info.codecSpecific.VP8.temporalIdx);
|
||||
const int16_t picture_id = codec_specific_info.codecSpecific.VP8.pictureId;
|
||||
const int tl0_pic_idx = codec_specific_info.codecSpecific.VP8.tl0PicIdx;
|
||||
// Temporal layer 1.
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + kTimestampIncrement);
|
||||
|
||||
EncodeAndExpectFrameWith(picture_id + 1, tl0_pic_idx + 0, 1);
|
||||
EncodeAndExpectFrameWith(1);
|
||||
// Temporal layer 0.
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + kTimestampIncrement);
|
||||
EncodeAndExpectFrameWith(picture_id + 2, tl0_pic_idx + 1, 0);
|
||||
EncodeAndExpectFrameWith(0);
|
||||
// Temporal layer 1.
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + kTimestampIncrement);
|
||||
EncodeAndExpectFrameWith(picture_id + 3, tl0_pic_idx + 1, 1);
|
||||
|
||||
// Reinit.
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release());
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
encoder_->InitEncode(&codec_settings_, kNumCores, kMaxPayloadSize));
|
||||
|
||||
// Temporal layer 0.
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + kTimestampIncrement);
|
||||
EncodeAndExpectFrameWith(picture_id + 4, tl0_pic_idx + 2, 0);
|
||||
// Temporal layer 1.
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + kTimestampIncrement);
|
||||
EncodeAndExpectFrameWith(picture_id + 5, tl0_pic_idx + 2, 1);
|
||||
// Temporal layer 0.
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + kTimestampIncrement);
|
||||
EncodeAndExpectFrameWith(picture_id + 6, tl0_pic_idx + 3, 0);
|
||||
// Temporal layer 1.
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + kTimestampIncrement);
|
||||
EncodeAndExpectFrameWith(picture_id + 7, tl0_pic_idx + 3, 1);
|
||||
}
|
||||
|
||||
TEST_F(TestVp8Impl, EncoderWith3TemporalLayersRetainsRtpStateAfterRelease) {
|
||||
codec_settings_.VP8()->numberOfTemporalLayers = 3;
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
encoder_->InitEncode(&codec_settings_, kNumCores, kMaxPayloadSize));
|
||||
|
||||
EncodedImage encoded_frame;
|
||||
CodecSpecificInfo codec_specific_info;
|
||||
EncodeAndWaitForFrame(&encoded_frame, &codec_specific_info);
|
||||
|
||||
// Temporal layer 0.
|
||||
EXPECT_EQ(0, codec_specific_info.codecSpecific.VP8.temporalIdx);
|
||||
const int16_t picture_id = codec_specific_info.codecSpecific.VP8.pictureId;
|
||||
const int tl0_pic_idx = codec_specific_info.codecSpecific.VP8.tl0PicIdx;
|
||||
// Temporal layer 2.
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + kTimestampIncrement);
|
||||
EncodeAndExpectFrameWith(picture_id + 1, tl0_pic_idx + 0, 2);
|
||||
// Temporal layer 1.
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + kTimestampIncrement);
|
||||
EncodeAndExpectFrameWith(picture_id + 2, tl0_pic_idx + 0, 1);
|
||||
// Temporal layer 2.
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + kTimestampIncrement);
|
||||
EncodeAndExpectFrameWith(picture_id + 3, tl0_pic_idx + 0, 2);
|
||||
|
||||
// Reinit.
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release());
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
encoder_->InitEncode(&codec_settings_, kNumCores, kMaxPayloadSize));
|
||||
|
||||
// Temporal layer 0.
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + kTimestampIncrement);
|
||||
EncodeAndExpectFrameWith(picture_id + 4, tl0_pic_idx + 1, 0);
|
||||
// Temporal layer 2.
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + kTimestampIncrement);
|
||||
EncodeAndExpectFrameWith(picture_id + 5, tl0_pic_idx + 1, 2);
|
||||
// Temporal layer 1.
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + kTimestampIncrement);
|
||||
EncodeAndExpectFrameWith(picture_id + 6, tl0_pic_idx + 1, 1);
|
||||
// Temporal layer 2.
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + kTimestampIncrement);
|
||||
EncodeAndExpectFrameWith(picture_id + 7, tl0_pic_idx + 1, 2);
|
||||
EncodeAndExpectFrameWith(1);
|
||||
}
|
||||
|
||||
TEST_F(TestVp8Impl, ScalingDisabledIfAutomaticResizeOff) {
|
||||
|
||||
@ -36,14 +36,10 @@ class TestVp9Impl : public VideoCodecUnitTest {
|
||||
return codec_settings;
|
||||
}
|
||||
|
||||
void ExpectFrameWith(int16_t picture_id,
|
||||
int tl0_pic_idx,
|
||||
uint8_t temporal_idx) {
|
||||
void ExpectFrameWith(uint8_t temporal_idx) {
|
||||
EncodedImage encoded_frame;
|
||||
CodecSpecificInfo codec_specific_info;
|
||||
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||
EXPECT_EQ(picture_id, codec_specific_info.codecSpecific.VP9.picture_id);
|
||||
EXPECT_EQ(tl0_pic_idx, codec_specific_info.codecSpecific.VP9.tl0_pic_idx);
|
||||
EXPECT_EQ(temporal_idx, codec_specific_info.codecSpecific.VP9.temporal_idx);
|
||||
}
|
||||
};
|
||||
@ -121,7 +117,7 @@ TEST_F(TestVp9Impl, ParserQpEqualsEncodedQp) {
|
||||
EXPECT_EQ(encoded_frame.qp_, qp);
|
||||
}
|
||||
|
||||
TEST_F(TestVp9Impl, EncoderRetainsRtpStateAfterRelease) {
|
||||
TEST_F(TestVp9Impl, EncoderWith2TemporalLayers) {
|
||||
// Override default settings.
|
||||
codec_settings_.VP9()->numberOfTemporalLayers = 2;
|
||||
// Tl0PidIdx is only used in non-flexible mode.
|
||||
@ -136,8 +132,6 @@ TEST_F(TestVp9Impl, EncoderRetainsRtpStateAfterRelease) {
|
||||
EncodedImage encoded_frame;
|
||||
CodecSpecificInfo codec_specific_info;
|
||||
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||
int16_t picture_id = codec_specific_info.codecSpecific.VP9.picture_id;
|
||||
int tl0_pic_idx = codec_specific_info.codecSpecific.VP9.tl0_pic_idx;
|
||||
EXPECT_EQ(0, codec_specific_info.codecSpecific.VP9.temporal_idx);
|
||||
|
||||
// Temporal layer 1.
|
||||
@ -145,61 +139,21 @@ TEST_F(TestVp9Impl, EncoderRetainsRtpStateAfterRelease) {
|
||||
kTimestampIncrementPerFrame);
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
encoder_->Encode(*input_frame_, nullptr, nullptr));
|
||||
ExpectFrameWith((picture_id + 1) % (1 << 15), tl0_pic_idx, 1);
|
||||
ExpectFrameWith(1);
|
||||
|
||||
// Temporal layer 0.
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() +
|
||||
kTimestampIncrementPerFrame);
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
encoder_->Encode(*input_frame_, nullptr, nullptr));
|
||||
ExpectFrameWith((picture_id + 2) % (1 << 15), (tl0_pic_idx + 1) % (1 << 8),
|
||||
0);
|
||||
ExpectFrameWith(0);
|
||||
|
||||
// Temporal layer 1.
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() +
|
||||
kTimestampIncrementPerFrame);
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
encoder_->Encode(*input_frame_, nullptr, nullptr));
|
||||
ExpectFrameWith((picture_id + 3) % (1 << 15), (tl0_pic_idx + 1) % (1 << 8),
|
||||
1);
|
||||
|
||||
// Reinit.
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release());
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
encoder_->InitEncode(&codec_settings_, 1 /* number of cores */,
|
||||
0 /* max payload size (unused) */));
|
||||
|
||||
// Temporal layer 0.
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() +
|
||||
kTimestampIncrementPerFrame);
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
encoder_->Encode(*input_frame_, nullptr, nullptr));
|
||||
ExpectFrameWith((picture_id + 4) % (1 << 15), (tl0_pic_idx + 2) % (1 << 8),
|
||||
0);
|
||||
|
||||
// Temporal layer 1.
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() +
|
||||
kTimestampIncrementPerFrame);
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
encoder_->Encode(*input_frame_, nullptr, nullptr));
|
||||
ExpectFrameWith((picture_id + 5) % (1 << 15), (tl0_pic_idx + 2) % (1 << 8),
|
||||
1);
|
||||
|
||||
// Temporal layer 0.
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() +
|
||||
kTimestampIncrementPerFrame);
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
encoder_->Encode(*input_frame_, nullptr, nullptr));
|
||||
ExpectFrameWith((picture_id + 6) % (1 << 15), (tl0_pic_idx + 3) % (1 << 8),
|
||||
0);
|
||||
|
||||
// Temporal layer 1.
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() +
|
||||
kTimestampIncrementPerFrame);
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
encoder_->Encode(*input_frame_, nullptr, nullptr));
|
||||
ExpectFrameWith((picture_id + 7) % (1 << 15), (tl0_pic_idx + 3) % (1 << 8),
|
||||
1);
|
||||
ExpectFrameWith(1);
|
||||
}
|
||||
|
||||
TEST_F(TestVp9Impl, EncoderExplicitLayering) {
|
||||
|
||||
@ -28,7 +28,6 @@
|
||||
#include "rtc_base/keep_ref_until_done.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/ptr_util.h"
|
||||
#include "rtc_base/random.h"
|
||||
#include "rtc_base/timeutils.h"
|
||||
#include "rtc_base/trace_event.h"
|
||||
|
||||
@ -83,10 +82,6 @@ VP9EncoderImpl::VP9EncoderImpl()
|
||||
spatial_layer_(new ScreenshareLayersVP9(2)) {
|
||||
memset(&codec_, 0, sizeof(codec_));
|
||||
memset(&svc_params_, 0, sizeof(vpx_svc_extra_cfg_t));
|
||||
|
||||
Random random(rtc::TimeMicros());
|
||||
picture_id_ = random.Rand<uint16_t>() & 0x7FFF;
|
||||
tl0_pic_idx_ = random.Rand<uint8_t>();
|
||||
}
|
||||
|
||||
VP9EncoderImpl::~VP9EncoderImpl() {
|
||||
@ -637,7 +632,6 @@ void VP9EncoderImpl::PopulateCodecSpecific(CodecSpecificInfo* codec_specific,
|
||||
}
|
||||
|
||||
if (is_first_frame) {
|
||||
picture_id_ = (picture_id_ + 1) & 0x7FFF;
|
||||
// TODO(asapersson): this info has to be obtained from the encoder.
|
||||
vp9_info->inter_layer_predicted = false;
|
||||
++frames_since_kf_;
|
||||
@ -652,15 +646,6 @@ void VP9EncoderImpl::PopulateCodecSpecific(CodecSpecificInfo* codec_specific,
|
||||
frames_since_kf_ = 0;
|
||||
}
|
||||
|
||||
vp9_info->picture_id = picture_id_;
|
||||
|
||||
if (!vp9_info->flexible_mode) {
|
||||
if (layer_id.temporal_layer_id == 0 && layer_id.spatial_layer_id == 0) {
|
||||
tl0_pic_idx_++;
|
||||
}
|
||||
vp9_info->tl0_pic_idx = tl0_pic_idx_;
|
||||
}
|
||||
|
||||
// Always populate this, so that the packetizer can properly set the marker
|
||||
// bit.
|
||||
vp9_info->num_spatial_layers = num_spatial_layers_;
|
||||
|
||||
@ -127,10 +127,6 @@ class VP9EncoderImpl : public VP9Encoder {
|
||||
uint8_t num_ref_pics_[kMaxVp9NumberOfSpatialLayers];
|
||||
uint8_t p_diff_[kMaxVp9NumberOfSpatialLayers][kMaxVp9RefPics];
|
||||
std::unique_ptr<ScreenshareLayersVP9> spatial_layer_;
|
||||
|
||||
// RTP state.
|
||||
uint16_t picture_id_;
|
||||
uint8_t tl0_pic_idx_; // Only used in non-flexible mode.
|
||||
};
|
||||
|
||||
class VP9DecoderImpl : public VP9Decoder {
|
||||
|
||||
Reference in New Issue
Block a user