Split vp8_impl into webm_vp8_encoder and webm_vp8_decoder

This work is in preparation for refactoring the TemporalLayers api.

Bug: webrtc:9012
Change-Id: I01908ee034fb79996e687ff72d10178acf102321
Reviewed-on: https://webrtc-review.googlesource.com/61781
Reviewed-by: Magnus Jedvert <magjed@webrtc.org>
Reviewed-by: Ilya Nikolaevskiy <ilnik@webrtc.org>
Commit-Queue: Erik Språng <sprang@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#22445}
This commit is contained in:
Erik Språng
2018-03-14 17:52:55 +01:00
committed by Commit Bot
parent b3bac5ec26
commit cc681ccf6b
9 changed files with 487 additions and 442 deletions

View File

@ -9,7 +9,7 @@
*/
#include "modules/video_coding/codecs/vp8/default_temporal_layers.h"
#include "modules/video_coding/codecs/vp8/vp8_impl.h"
#include "modules/video_coding/codecs/vp8/libvpx_vp8_encoder.h"
#include "modules/video_coding/include/video_codec_interface.h"
#include "test/field_trial.h"
#include "test/gtest.h"
@ -85,7 +85,7 @@ TEST(TemporalLayersTest, 2Layers) {
uint32_t timestamp = 0;
for (int i = 0; i < 16; ++i) {
TemporalLayers::FrameConfig tl_config = tl.UpdateLayerConfig(timestamp);
EXPECT_EQ(expected_flags[i], VP8EncoderImpl::EncodeFlags(tl_config)) << i;
EXPECT_EQ(expected_flags[i], LibvpxVp8Encoder::EncodeFlags(tl_config)) << i;
tl.PopulateCodecSpecific(i == 0, tl_config, &vp8_info, 0);
EXPECT_TRUE(checker.CheckTemporalConfig(i == 0, tl_config));
EXPECT_EQ(expected_temporal_idx[i], vp8_info.temporalIdx);
@ -133,7 +133,7 @@ TEST(TemporalLayersTest, 3Layers) {
unsigned int timestamp = 0;
for (int i = 0; i < 16; ++i) {
TemporalLayers::FrameConfig tl_config = tl.UpdateLayerConfig(timestamp);
EXPECT_EQ(expected_flags[i], VP8EncoderImpl::EncodeFlags(tl_config)) << i;
EXPECT_EQ(expected_flags[i], LibvpxVp8Encoder::EncodeFlags(tl_config)) << i;
tl.PopulateCodecSpecific(i == 0, tl_config, &vp8_info, 0);
EXPECT_TRUE(checker.CheckTemporalConfig(i == 0, tl_config));
EXPECT_EQ(expected_temporal_idx[i], vp8_info.temporalIdx);
@ -170,7 +170,7 @@ TEST(TemporalLayersTest, Alternative3Layers) {
unsigned int timestamp = 0;
for (int i = 0; i < 8; ++i) {
TemporalLayers::FrameConfig tl_config = tl.UpdateLayerConfig(timestamp);
EXPECT_EQ(expected_flags[i], VP8EncoderImpl::EncodeFlags(tl_config)) << i;
EXPECT_EQ(expected_flags[i], LibvpxVp8Encoder::EncodeFlags(tl_config)) << i;
tl.PopulateCodecSpecific(i == 0, tl_config, &vp8_info, 0);
EXPECT_TRUE(checker.CheckTemporalConfig(i == 0, tl_config));
EXPECT_EQ(expected_temporal_idx[i], vp8_info.temporalIdx);
@ -217,7 +217,7 @@ TEST(TemporalLayersTest, 4Layers) {
uint32_t timestamp = 0;
for (int i = 0; i < 16; ++i) {
TemporalLayers::FrameConfig tl_config = tl.UpdateLayerConfig(timestamp);
EXPECT_EQ(expected_flags[i], VP8EncoderImpl::EncodeFlags(tl_config)) << i;
EXPECT_EQ(expected_flags[i], LibvpxVp8Encoder::EncodeFlags(tl_config)) << i;
tl.PopulateCodecSpecific(i == 0, tl_config, &vp8_info, 0);
EXPECT_TRUE(checker.CheckTemporalConfig(i == 0, tl_config));
EXPECT_EQ(expected_temporal_idx[i], vp8_info.temporalIdx);
@ -254,7 +254,7 @@ TEST(TemporalLayersTest, KeyFrame) {
uint32_t timestamp = 0;
for (int i = 0; i < 7; ++i) {
TemporalLayers::FrameConfig tl_config = tl.UpdateLayerConfig(timestamp);
EXPECT_EQ(expected_flags[i], VP8EncoderImpl::EncodeFlags(tl_config)) << i;
EXPECT_EQ(expected_flags[i], LibvpxVp8Encoder::EncodeFlags(tl_config)) << i;
tl.PopulateCodecSpecific(true, tl_config, &vp8_info, 0);
EXPECT_TRUE(checker.CheckTemporalConfig(true, tl_config));
EXPECT_EQ(expected_temporal_idx[i], tl_config.packetizer_temporal_idx);
@ -266,7 +266,7 @@ TEST(TemporalLayersTest, KeyFrame) {
timestamp += 3000;
}
TemporalLayers::FrameConfig tl_config = tl.UpdateLayerConfig(timestamp);
EXPECT_EQ(expected_flags[7], VP8EncoderImpl::EncodeFlags(tl_config));
EXPECT_EQ(expected_flags[7], LibvpxVp8Encoder::EncodeFlags(tl_config));
tl.PopulateCodecSpecific(false, tl_config, &vp8_info, 0);
EXPECT_TRUE(checker.CheckTemporalConfig(false, tl_config));
EXPECT_NE(0, vp8_info.temporalIdx)

View File

@ -0,0 +1,343 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <algorithm>
#include <string>
#include "common_video/libyuv/include/webrtc_libyuv.h"
#include "modules/video_coding/codecs/vp8/libvpx_vp8_decoder.h"
#include "rtc_base/checks.h"
#include "rtc_base/numerics/exp_filter.h"
#include "rtc_base/ptr_util.h"
#include "rtc_base/timeutils.h"
#include "system_wrappers/include/field_trial.h"
#include "system_wrappers/include/metrics.h"
#include "third_party/libyuv/include/libyuv/convert.h"
#include "third_party/libyuv/include/libyuv/scale.h"
namespace webrtc {
namespace {
constexpr int kVp8ErrorPropagationTh = 30;
// vpx_decoder.h documentation indicates decode deadline is time in us, with
// "Set to zero for unlimited.", but actual implementation requires this to be
// a mode with 0 meaning allow delay and 1 not allowing it.
constexpr long kDecodeDeadlineRealtime = 1; // NOLINT
const char kVp8PostProcArmFieldTrial[] = "WebRTC-VP8-Postproc-Config-Arm";
void GetPostProcParamsFromFieldTrialGroup(
LibvpxVp8Decoder::DeblockParams* deblock_params) {
std::string group =
webrtc::field_trial::FindFullName(kVp8PostProcArmFieldTrial);
if (group.empty())
return;
LibvpxVp8Decoder::DeblockParams params;
if (sscanf(group.c_str(), "Enabled-%d,%d,%d", &params.max_level,
&params.min_qp, &params.degrade_qp) != 3)
return;
if (params.max_level < 0 || params.max_level > 16)
return;
if (params.min_qp < 0 || params.degrade_qp <= params.min_qp)
return;
*deblock_params = params;
}
} // namespace
std::unique_ptr<VP8Decoder> VP8Decoder::Create() {
return rtc::MakeUnique<LibvpxVp8Decoder>();
}
class LibvpxVp8Decoder::QpSmoother {
public:
QpSmoother() : last_sample_ms_(rtc::TimeMillis()), smoother_(kAlpha) {}
int GetAvg() const {
float value = smoother_.filtered();
return (value == rtc::ExpFilter::kValueUndefined) ? 0
: static_cast<int>(value);
}
void Add(float sample) {
int64_t now_ms = rtc::TimeMillis();
smoother_.Apply(static_cast<float>(now_ms - last_sample_ms_), sample);
last_sample_ms_ = now_ms;
}
void Reset() { smoother_.Reset(kAlpha); }
private:
const float kAlpha = 0.95f;
int64_t last_sample_ms_;
rtc::ExpFilter smoother_;
};
LibvpxVp8Decoder::LibvpxVp8Decoder()
: use_postproc_arm_(
webrtc::field_trial::IsEnabled(kVp8PostProcArmFieldTrial)),
buffer_pool_(false, 300 /* max_number_of_buffers*/),
decode_complete_callback_(NULL),
inited_(false),
decoder_(NULL),
propagation_cnt_(-1),
last_frame_width_(0),
last_frame_height_(0),
key_frame_required_(true),
qp_smoother_(use_postproc_arm_ ? new QpSmoother() : nullptr) {
if (use_postproc_arm_)
GetPostProcParamsFromFieldTrialGroup(&deblock_);
}
LibvpxVp8Decoder::~LibvpxVp8Decoder() {
inited_ = true; // in order to do the actual release
Release();
}
int LibvpxVp8Decoder::InitDecode(const VideoCodec* inst, int number_of_cores) {
int ret_val = Release();
if (ret_val < 0) {
return ret_val;
}
if (decoder_ == NULL) {
decoder_ = new vpx_codec_ctx_t;
memset(decoder_, 0, sizeof(*decoder_));
}
vpx_codec_dec_cfg_t cfg;
// Setting number of threads to a constant value (1)
cfg.threads = 1;
cfg.h = cfg.w = 0; // set after decode
#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64) || \
defined(WEBRTC_ANDROID)
vpx_codec_flags_t flags = use_postproc_arm_ ? VPX_CODEC_USE_POSTPROC : 0;
#else
vpx_codec_flags_t flags = VPX_CODEC_USE_POSTPROC;
#endif
if (vpx_codec_dec_init(decoder_, vpx_codec_vp8_dx(), &cfg, flags)) {
delete decoder_;
decoder_ = nullptr;
return WEBRTC_VIDEO_CODEC_MEMORY;
}
propagation_cnt_ = -1;
inited_ = true;
// Always start with a complete key frame.
key_frame_required_ = true;
return WEBRTC_VIDEO_CODEC_OK;
}
int LibvpxVp8Decoder::Decode(const EncodedImage& input_image,
bool missing_frames,
const RTPFragmentationHeader* fragmentation,
const CodecSpecificInfo* codec_specific_info,
int64_t /*render_time_ms*/) {
if (!inited_) {
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
if (decode_complete_callback_ == NULL) {
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
if (input_image._buffer == NULL && input_image._length > 0) {
// Reset to avoid requesting key frames too often.
if (propagation_cnt_ > 0)
propagation_cnt_ = 0;
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
// Post process configurations.
#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64) || \
defined(WEBRTC_ANDROID)
if (use_postproc_arm_) {
vp8_postproc_cfg_t ppcfg;
ppcfg.post_proc_flag = VP8_MFQE;
// For low resolutions, use stronger deblocking filter.
int last_width_x_height = last_frame_width_ * last_frame_height_;
if (last_width_x_height > 0 && last_width_x_height <= 320 * 240) {
// Enable the deblock and demacroblocker based on qp thresholds.
RTC_DCHECK(qp_smoother_);
int qp = qp_smoother_->GetAvg();
if (qp > deblock_.min_qp) {
int level = deblock_.max_level;
if (qp < deblock_.degrade_qp) {
// Use lower level.
level = deblock_.max_level * (qp - deblock_.min_qp) /
(deblock_.degrade_qp - deblock_.min_qp);
}
// Deblocking level only affects VP8_DEMACROBLOCK.
ppcfg.deblocking_level = std::max(level, 1);
ppcfg.post_proc_flag |= VP8_DEBLOCK | VP8_DEMACROBLOCK;
}
}
vpx_codec_control(decoder_, VP8_SET_POSTPROC, &ppcfg);
}
#else
vp8_postproc_cfg_t ppcfg;
// MFQE enabled to reduce key frame popping.
ppcfg.post_proc_flag = VP8_MFQE | VP8_DEBLOCK;
// For VGA resolutions and lower, enable the demacroblocker postproc.
if (last_frame_width_ * last_frame_height_ <= 640 * 360) {
ppcfg.post_proc_flag |= VP8_DEMACROBLOCK;
}
// Strength of deblocking filter. Valid range:[0,16]
ppcfg.deblocking_level = 3;
vpx_codec_control(decoder_, VP8_SET_POSTPROC, &ppcfg);
#endif
// Always start with a complete key frame.
if (key_frame_required_) {
if (input_image._frameType != kVideoFrameKey)
return WEBRTC_VIDEO_CODEC_ERROR;
// We have a key frame - is it complete?
if (input_image._completeFrame) {
key_frame_required_ = false;
} else {
return WEBRTC_VIDEO_CODEC_ERROR;
}
}
// Restrict error propagation using key frame requests.
// Reset on a key frame refresh.
if (input_image._frameType == kVideoFrameKey && input_image._completeFrame) {
propagation_cnt_ = -1;
// Start count on first loss.
} else if ((!input_image._completeFrame || missing_frames) &&
propagation_cnt_ == -1) {
propagation_cnt_ = 0;
}
if (propagation_cnt_ >= 0) {
propagation_cnt_++;
}
vpx_codec_iter_t iter = NULL;
vpx_image_t* img;
int ret;
// Check for missing frames.
if (missing_frames) {
// Call decoder with zero data length to signal missing frames.
if (vpx_codec_decode(decoder_, NULL, 0, 0, kDecodeDeadlineRealtime)) {
// Reset to avoid requesting key frames too often.
if (propagation_cnt_ > 0)
propagation_cnt_ = 0;
return WEBRTC_VIDEO_CODEC_ERROR;
}
img = vpx_codec_get_frame(decoder_, &iter);
iter = NULL;
}
uint8_t* buffer = input_image._buffer;
if (input_image._length == 0) {
buffer = NULL; // Triggers full frame concealment.
}
if (vpx_codec_decode(decoder_, buffer, input_image._length, 0,
kDecodeDeadlineRealtime)) {
// Reset to avoid requesting key frames too often.
if (propagation_cnt_ > 0) {
propagation_cnt_ = 0;
}
return WEBRTC_VIDEO_CODEC_ERROR;
}
img = vpx_codec_get_frame(decoder_, &iter);
int qp;
vpx_codec_err_t vpx_ret =
vpx_codec_control(decoder_, VPXD_GET_LAST_QUANTIZER, &qp);
RTC_DCHECK_EQ(vpx_ret, VPX_CODEC_OK);
ret = ReturnFrame(img, input_image._timeStamp, input_image.ntp_time_ms_, qp);
if (ret != 0) {
// Reset to avoid requesting key frames too often.
if (ret < 0 && propagation_cnt_ > 0)
propagation_cnt_ = 0;
return ret;
}
// Check Vs. threshold
if (propagation_cnt_ > kVp8ErrorPropagationTh) {
// Reset to avoid requesting key frames too often.
propagation_cnt_ = 0;
return WEBRTC_VIDEO_CODEC_ERROR;
}
return WEBRTC_VIDEO_CODEC_OK;
}
int LibvpxVp8Decoder::ReturnFrame(const vpx_image_t* img,
uint32_t timestamp,
int64_t ntp_time_ms,
int qp) {
if (img == NULL) {
// Decoder OK and NULL image => No show frame
return WEBRTC_VIDEO_CODEC_NO_OUTPUT;
}
if (qp_smoother_) {
if (last_frame_width_ != static_cast<int>(img->d_w) ||
last_frame_height_ != static_cast<int>(img->d_h)) {
qp_smoother_->Reset();
}
qp_smoother_->Add(qp);
}
last_frame_width_ = img->d_w;
last_frame_height_ = img->d_h;
// Allocate memory for decoded image.
rtc::scoped_refptr<I420Buffer> buffer =
buffer_pool_.CreateBuffer(img->d_w, img->d_h);
if (!buffer.get()) {
// Pool has too many pending frames.
RTC_HISTOGRAM_BOOLEAN("WebRTC.Video.LibvpxVp8Decoder.TooManyPendingFrames",
1);
return WEBRTC_VIDEO_CODEC_NO_OUTPUT;
}
libyuv::I420Copy(img->planes[VPX_PLANE_Y], img->stride[VPX_PLANE_Y],
img->planes[VPX_PLANE_U], img->stride[VPX_PLANE_U],
img->planes[VPX_PLANE_V], img->stride[VPX_PLANE_V],
buffer->MutableDataY(), buffer->StrideY(),
buffer->MutableDataU(), buffer->StrideU(),
buffer->MutableDataV(), buffer->StrideV(), img->d_w,
img->d_h);
VideoFrame decoded_image(buffer, timestamp, 0, kVideoRotation_0);
decoded_image.set_ntp_time_ms(ntp_time_ms);
decode_complete_callback_->Decoded(decoded_image, rtc::nullopt, qp);
return WEBRTC_VIDEO_CODEC_OK;
}
int LibvpxVp8Decoder::RegisterDecodeCompleteCallback(
DecodedImageCallback* callback) {
decode_complete_callback_ = callback;
return WEBRTC_VIDEO_CODEC_OK;
}
int LibvpxVp8Decoder::Release() {
int ret_val = WEBRTC_VIDEO_CODEC_OK;
if (decoder_ != NULL) {
if (inited_) {
if (vpx_codec_destroy(decoder_)) {
ret_val = WEBRTC_VIDEO_CODEC_MEMORY;
}
}
delete decoder_;
decoder_ = NULL;
}
buffer_pool_.Release();
inited_ = false;
return ret_val;
}
const char* LibvpxVp8Decoder::ImplementationName() const {
return "libvpx";
}
} // namespace webrtc

View File

@ -0,0 +1,75 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_VIDEO_CODING_CODECS_VP8_LIBVPX_VP8_DECODER_H_
#define MODULES_VIDEO_CODING_CODECS_VP8_LIBVPX_VP8_DECODER_H_
#include <memory>
#include "api/video_codecs/video_decoder.h"
#include "common_types.h" // NOLINT(build/include)
#include "common_video/include/i420_buffer_pool.h"
#include "common_video/include/video_frame.h"
#include "modules/include/module_common_types.h"
#include "modules/video_coding/codecs/vp8/include/vp8.h"
#include "modules/video_coding/include/video_codec_interface.h"
#include "vpx/vp8dx.h"
#include "vpx/vpx_decoder.h"
namespace webrtc {
class LibvpxVp8Decoder : public VP8Decoder {
public:
LibvpxVp8Decoder();
~LibvpxVp8Decoder() override;
int InitDecode(const VideoCodec* inst, int number_of_cores) override;
int Decode(const EncodedImage& input_image,
bool missing_frames,
const RTPFragmentationHeader* fragmentation,
const CodecSpecificInfo* codec_specific_info,
int64_t /*render_time_ms*/) override;
int RegisterDecodeCompleteCallback(DecodedImageCallback* callback) override;
int Release() override;
const char* ImplementationName() const override;
struct DeblockParams {
int max_level = 6; // Deblocking strength: [0, 16].
int degrade_qp = 1; // If QP value is below, start lowering |max_level|.
int min_qp = 0; // If QP value is below, turn off deblocking.
};
private:
class QpSmoother;
int ReturnFrame(const vpx_image_t* img,
uint32_t timeStamp,
int64_t ntp_time_ms,
int qp);
const bool use_postproc_arm_;
I420BufferPool buffer_pool_;
DecodedImageCallback* decode_complete_callback_;
bool inited_;
vpx_codec_ctx_t* decoder_;
int propagation_cnt_;
int last_frame_width_;
int last_frame_height_;
bool key_frame_required_;
DeblockParams deblock_;
const std::unique_ptr<QpSmoother> qp_smoother_;
};
} // namespace webrtc
#endif // MODULES_VIDEO_CODING_CODECS_VP8_LIBVPX_VP8_DECODER_H_

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
@ -8,48 +8,34 @@
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/video_coding/codecs/vp8/vp8_impl.h"
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <algorithm>
#include <string>
#include <vector>
#include "common_types.h" // NOLINT(build/include)
#include "common_video/libyuv/include/webrtc_libyuv.h"
#include "modules/include/module_common_types.h"
#include "modules/video_coding/codecs/vp8/include/vp8_common_types.h"
#include "modules/video_coding/codecs/vp8/screenshare_layers.h"
#include "modules/video_coding/codecs/vp8/libvpx_vp8_encoder.h"
#include "modules/video_coding/codecs/vp8/simulcast_rate_allocator.h"
#include "modules/video_coding/codecs/vp8/temporal_layers.h"
#include "modules/video_coding/include/video_codec_interface.h"
#include "rtc_base/checks.h"
#include "rtc_base/numerics/exp_filter.h"
#include "rtc_base/ptr_util.h"
#include "rtc_base/random.h"
#include "rtc_base/timeutils.h"
#include "rtc_base/trace_event.h"
#include "system_wrappers/include/clock.h"
#include "system_wrappers/include/field_trial.h"
#include "system_wrappers/include/metrics.h"
#include "third_party/libyuv/include/libyuv/convert.h"
#include "third_party/libyuv/include/libyuv/scale.h"
namespace webrtc {
namespace {
const char kVp8PostProcArmFieldTrial[] = "WebRTC-VP8-Postproc-Config-Arm";
const char kVp8GfBoostFieldTrial[] = "WebRTC-VP8-GfBoost";
// QP is obtained from VP8-bitstream for HW, so the QP corresponds to the
// bitstream range of [0, 127] and not the user-level range of [0,63].
static const int kLowVp8QpThreshold = 29;
static const int kHighVp8QpThreshold = 95;
constexpr int kLowVp8QpThreshold = 29;
constexpr int kHighVp8QpThreshold = 95;
const int kTokenPartitions = VP8_ONE_TOKENPARTITION;
enum { kVp8ErrorPropagationTh = 30 };
enum { kVp832ByteAlign = 32 };
constexpr int kTokenPartitions = VP8_ONE_TOKENPARTITION;
constexpr uint32_t kVp832ByteAlign = 32u;
// VP8 denoiser states.
enum denoiserState {
@ -134,27 +120,6 @@ bool GetGfBoostPercentageFromFieldTrialGroup(int* boost_percentage) {
return true;
}
void GetPostProcParamsFromFieldTrialGroup(
VP8DecoderImpl::DeblockParams* deblock_params) {
std::string group =
webrtc::field_trial::FindFullName(kVp8PostProcArmFieldTrial);
if (group.empty())
return;
VP8DecoderImpl::DeblockParams params;
if (sscanf(group.c_str(), "Enabled-%d,%d,%d", &params.max_level,
&params.min_qp, &params.degrade_qp) != 3)
return;
if (params.max_level < 0 || params.max_level > 16)
return;
if (params.min_qp < 0 || params.degrade_qp <= params.min_qp)
return;
*deblock_params = params;
}
static_assert(
VP8_TS_MAX_PERIODICITY == VPX_TS_MAX_PERIODICITY,
"VP8_TS_MAX_PERIODICITY must be kept in sync with the constant in libvpx.");
@ -203,18 +168,13 @@ bool UpdateVpxConfiguration(TemporalLayers* temporal_layers,
FillInEncoderConfig(cfg, config);
return res;
}
} // namespace
std::unique_ptr<VP8Encoder> VP8Encoder::Create() {
return rtc::MakeUnique<VP8EncoderImpl>();
return rtc::MakeUnique<LibvpxVp8Encoder>();
}
std::unique_ptr<VP8Decoder> VP8Decoder::Create() {
return rtc::MakeUnique<VP8DecoderImpl>();
}
vpx_enc_frame_flags_t VP8EncoderImpl::EncodeFlags(
vpx_enc_frame_flags_t LibvpxVp8Encoder::EncodeFlags(
const TemporalLayers::FrameConfig& references) {
RTC_DCHECK(!references.drop_frame);
@ -238,7 +198,7 @@ vpx_enc_frame_flags_t VP8EncoderImpl::EncodeFlags(
return flags;
}
VP8EncoderImpl::VP8EncoderImpl()
LibvpxVp8Encoder::LibvpxVp8Encoder()
: use_gf_boost_(webrtc::field_trial::IsEnabled(kVp8GfBoostFieldTrial)),
encoded_complete_callback_(nullptr),
inited_(false),
@ -265,11 +225,11 @@ VP8EncoderImpl::VP8EncoderImpl()
downsampling_factors_.reserve(kMaxSimulcastStreams);
}
VP8EncoderImpl::~VP8EncoderImpl() {
LibvpxVp8Encoder::~LibvpxVp8Encoder() {
Release();
}
int VP8EncoderImpl::Release() {
int LibvpxVp8Encoder::Release() {
int ret_val = WEBRTC_VIDEO_CODEC_OK;
while (!encoded_images_.empty()) {
@ -302,8 +262,8 @@ int VP8EncoderImpl::Release() {
return ret_val;
}
int VP8EncoderImpl::SetRateAllocation(const BitrateAllocation& bitrate,
uint32_t new_framerate) {
int LibvpxVp8Encoder::SetRateAllocation(const BitrateAllocation& bitrate,
uint32_t new_framerate) {
if (!inited_)
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
@ -365,11 +325,11 @@ int VP8EncoderImpl::SetRateAllocation(const BitrateAllocation& bitrate,
return WEBRTC_VIDEO_CODEC_OK;
}
const char* VP8EncoderImpl::ImplementationName() const {
const char* LibvpxVp8Encoder::ImplementationName() const {
return "libvpx";
}
void VP8EncoderImpl::SetStreamState(bool send_stream, int stream_idx) {
void LibvpxVp8Encoder::SetStreamState(bool send_stream, int stream_idx) {
if (send_stream && !send_stream_[stream_idx]) {
// Need a key frame if we have not sent this stream before.
key_frame_request_[stream_idx] = true;
@ -377,9 +337,9 @@ void VP8EncoderImpl::SetStreamState(bool send_stream, int stream_idx) {
send_stream_[stream_idx] = send_stream;
}
void VP8EncoderImpl::SetupTemporalLayers(int num_streams,
int num_temporal_layers,
const VideoCodec& codec) {
void LibvpxVp8Encoder::SetupTemporalLayers(int num_streams,
int num_temporal_layers,
const VideoCodec& codec) {
RTC_DCHECK(codec.VP8().tl_factory != nullptr);
const TemporalLayersFactory* tl_factory = codec.VP8().tl_factory;
if (num_streams == 1) {
@ -400,9 +360,9 @@ void VP8EncoderImpl::SetupTemporalLayers(int num_streams,
}
}
int VP8EncoderImpl::InitEncode(const VideoCodec* inst,
int number_of_cores,
size_t /*maxPayloadSize */) {
int LibvpxVp8Encoder::InitEncode(const VideoCodec* inst,
int number_of_cores,
size_t /*maxPayloadSize */) {
if (inst == NULL) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
@ -619,9 +579,9 @@ int VP8EncoderImpl::InitEncode(const VideoCodec* inst,
return InitAndSetControlSettings();
}
int VP8EncoderImpl::SetCpuSpeed(int width, int height) {
#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64) \
|| defined(WEBRTC_ANDROID)
int LibvpxVp8Encoder::SetCpuSpeed(int width, int height) {
#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64) || \
defined(WEBRTC_ANDROID)
// On mobile platform, use a lower speed setting for lower resolutions for
// CPUs with 4 or more cores.
RTC_DCHECK_GT(number_of_cores_, 0);
@ -645,7 +605,7 @@ int VP8EncoderImpl::SetCpuSpeed(int width, int height) {
#endif
}
int VP8EncoderImpl::NumberOfThreads(int width, int height, int cpus) {
int LibvpxVp8Encoder::NumberOfThreads(int width, int height, int cpus) {
#if defined(WEBRTC_ANDROID)
if (width * height >= 320 * 180) {
if (cpus >= 4) {
@ -675,7 +635,7 @@ int VP8EncoderImpl::NumberOfThreads(int width, int height, int cpus) {
#endif
}
int VP8EncoderImpl::InitAndSetControlSettings() {
int LibvpxVp8Encoder::InitAndSetControlSettings() {
vpx_codec_flags_t flags = 0;
flags |= VPX_CODEC_USE_OUTPUT_PARTITION;
@ -700,8 +660,8 @@ int VP8EncoderImpl::InitAndSetControlSettings() {
// when encoding lower resolution streams. Would it work with the
// multi-res encoding feature?
denoiserState denoiser_state = kDenoiserOnYOnly;
#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64) \
|| defined(WEBRTC_ANDROID)
#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64) || \
defined(WEBRTC_ANDROID)
denoiser_state = kDenoiserOnYOnly;
#else
denoiser_state = kDenoiserOnAdaptive;
@ -739,7 +699,7 @@ int VP8EncoderImpl::InitAndSetControlSettings() {
return WEBRTC_VIDEO_CODEC_OK;
}
uint32_t VP8EncoderImpl::MaxIntraTarget(uint32_t optimalBuffersize) {
uint32_t LibvpxVp8Encoder::MaxIntraTarget(uint32_t optimalBuffersize) {
// Set max to the optimal buffer level (normalized by target BR),
// and scaled by a scalePar.
// Max target size = scalePar * optimalBufferSize * targetBR[Kbps].
@ -755,9 +715,9 @@ uint32_t VP8EncoderImpl::MaxIntraTarget(uint32_t optimalBuffersize) {
return (targetPct < minIntraTh) ? minIntraTh : targetPct;
}
int VP8EncoderImpl::Encode(const VideoFrame& frame,
const CodecSpecificInfo* codec_specific_info,
const std::vector<FrameType>* frame_types) {
int LibvpxVp8Encoder::Encode(const VideoFrame& frame,
const CodecSpecificInfo* codec_specific_info,
const std::vector<FrameType>* frame_types) {
RTC_DCHECK_EQ(frame.width(), codec_.width);
RTC_DCHECK_EQ(frame.height(), codec_.height);
@ -903,7 +863,7 @@ int VP8EncoderImpl::Encode(const VideoFrame& frame,
return error;
}
void VP8EncoderImpl::PopulateCodecSpecific(
void LibvpxVp8Encoder::PopulateCodecSpecific(
CodecSpecificInfo* codec_specific,
const TemporalLayers::FrameConfig& tl_config,
const vpx_codec_cx_pkt_t& pkt,
@ -924,7 +884,7 @@ void VP8EncoderImpl::PopulateCodecSpecific(
picture_id_[stream_idx] = (picture_id_[stream_idx] + 1) & 0x7FFF;
}
int VP8EncoderImpl::GetEncodedPartitions(
int LibvpxVp8Encoder::GetEncodedPartitions(
const TemporalLayers::FrameConfig tl_configs[],
const VideoFrame& input_image) {
int stream_idx = static_cast<int>(encoders_.size()) - 1;
@ -1013,7 +973,7 @@ int VP8EncoderImpl::GetEncodedPartitions(
return result;
}
VideoEncoder::ScalingSettings VP8EncoderImpl::GetScalingSettings() const {
VideoEncoder::ScalingSettings LibvpxVp8Encoder::GetScalingSettings() const {
const bool enable_scaling = encoders_.size() == 1 &&
configurations_[0].rc_dropframe_thresh > 0 &&
codec_.VP8().automaticResizeOn;
@ -1022,295 +982,14 @@ VideoEncoder::ScalingSettings VP8EncoderImpl::GetScalingSettings() const {
: VideoEncoder::ScalingSettings::kOff;
}
int VP8EncoderImpl::SetChannelParameters(uint32_t packetLoss, int64_t rtt) {
int LibvpxVp8Encoder::SetChannelParameters(uint32_t packetLoss, int64_t rtt) {
return WEBRTC_VIDEO_CODEC_OK;
}
int VP8EncoderImpl::RegisterEncodeCompleteCallback(
int LibvpxVp8Encoder::RegisterEncodeCompleteCallback(
EncodedImageCallback* callback) {
encoded_complete_callback_ = callback;
return WEBRTC_VIDEO_CODEC_OK;
}
class VP8DecoderImpl::QpSmoother {
public:
QpSmoother() : last_sample_ms_(rtc::TimeMillis()), smoother_(kAlpha) {}
int GetAvg() const {
float value = smoother_.filtered();
return (value == rtc::ExpFilter::kValueUndefined) ? 0
: static_cast<int>(value);
}
void Add(float sample) {
int64_t now_ms = rtc::TimeMillis();
smoother_.Apply(static_cast<float>(now_ms - last_sample_ms_), sample);
last_sample_ms_ = now_ms;
}
void Reset() { smoother_.Reset(kAlpha); }
private:
const float kAlpha = 0.95f;
int64_t last_sample_ms_;
rtc::ExpFilter smoother_;
};
VP8DecoderImpl::VP8DecoderImpl()
: use_postproc_arm_(
webrtc::field_trial::IsEnabled(kVp8PostProcArmFieldTrial)),
buffer_pool_(false, 300 /* max_number_of_buffers*/),
decode_complete_callback_(NULL),
inited_(false),
decoder_(NULL),
propagation_cnt_(-1),
last_frame_width_(0),
last_frame_height_(0),
key_frame_required_(true),
qp_smoother_(use_postproc_arm_ ? new QpSmoother() : nullptr) {
if (use_postproc_arm_)
GetPostProcParamsFromFieldTrialGroup(&deblock_);
}
VP8DecoderImpl::~VP8DecoderImpl() {
inited_ = true; // in order to do the actual release
Release();
}
int VP8DecoderImpl::InitDecode(const VideoCodec* inst, int number_of_cores) {
int ret_val = Release();
if (ret_val < 0) {
return ret_val;
}
if (decoder_ == NULL) {
decoder_ = new vpx_codec_ctx_t;
memset(decoder_, 0, sizeof(*decoder_));
}
vpx_codec_dec_cfg_t cfg;
// Setting number of threads to a constant value (1)
cfg.threads = 1;
cfg.h = cfg.w = 0; // set after decode
#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64) \
|| defined(WEBRTC_ANDROID)
vpx_codec_flags_t flags = use_postproc_arm_ ? VPX_CODEC_USE_POSTPROC : 0;
#else
vpx_codec_flags_t flags = VPX_CODEC_USE_POSTPROC;
#endif
if (vpx_codec_dec_init(decoder_, vpx_codec_vp8_dx(), &cfg, flags)) {
delete decoder_;
decoder_ = nullptr;
return WEBRTC_VIDEO_CODEC_MEMORY;
}
propagation_cnt_ = -1;
inited_ = true;
// Always start with a complete key frame.
key_frame_required_ = true;
return WEBRTC_VIDEO_CODEC_OK;
}
int VP8DecoderImpl::Decode(const EncodedImage& input_image,
bool missing_frames,
const RTPFragmentationHeader* fragmentation,
const CodecSpecificInfo* codec_specific_info,
int64_t /*render_time_ms*/) {
if (!inited_) {
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
if (decode_complete_callback_ == NULL) {
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
if (input_image._buffer == NULL && input_image._length > 0) {
// Reset to avoid requesting key frames too often.
if (propagation_cnt_ > 0)
propagation_cnt_ = 0;
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
// Post process configurations.
#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64) \
|| defined(WEBRTC_ANDROID)
if (use_postproc_arm_) {
vp8_postproc_cfg_t ppcfg;
ppcfg.post_proc_flag = VP8_MFQE;
// For low resolutions, use stronger deblocking filter.
int last_width_x_height = last_frame_width_ * last_frame_height_;
if (last_width_x_height > 0 && last_width_x_height <= 320 * 240) {
// Enable the deblock and demacroblocker based on qp thresholds.
RTC_DCHECK(qp_smoother_);
int qp = qp_smoother_->GetAvg();
if (qp > deblock_.min_qp) {
int level = deblock_.max_level;
if (qp < deblock_.degrade_qp) {
// Use lower level.
level = deblock_.max_level * (qp - deblock_.min_qp) /
(deblock_.degrade_qp - deblock_.min_qp);
}
// Deblocking level only affects VP8_DEMACROBLOCK.
ppcfg.deblocking_level = std::max(level, 1);
ppcfg.post_proc_flag |= VP8_DEBLOCK | VP8_DEMACROBLOCK;
}
}
vpx_codec_control(decoder_, VP8_SET_POSTPROC, &ppcfg);
}
#else
vp8_postproc_cfg_t ppcfg;
// MFQE enabled to reduce key frame popping.
ppcfg.post_proc_flag = VP8_MFQE | VP8_DEBLOCK;
// For VGA resolutions and lower, enable the demacroblocker postproc.
if (last_frame_width_ * last_frame_height_ <= 640 * 360) {
ppcfg.post_proc_flag |= VP8_DEMACROBLOCK;
}
// Strength of deblocking filter. Valid range:[0,16]
ppcfg.deblocking_level = 3;
vpx_codec_control(decoder_, VP8_SET_POSTPROC, &ppcfg);
#endif
// Always start with a complete key frame.
if (key_frame_required_) {
if (input_image._frameType != kVideoFrameKey)
return WEBRTC_VIDEO_CODEC_ERROR;
// We have a key frame - is it complete?
if (input_image._completeFrame) {
key_frame_required_ = false;
} else {
return WEBRTC_VIDEO_CODEC_ERROR;
}
}
// Restrict error propagation using key frame requests.
// Reset on a key frame refresh.
if (input_image._frameType == kVideoFrameKey && input_image._completeFrame) {
propagation_cnt_ = -1;
// Start count on first loss.
} else if ((!input_image._completeFrame || missing_frames) &&
propagation_cnt_ == -1) {
propagation_cnt_ = 0;
}
if (propagation_cnt_ >= 0) {
propagation_cnt_++;
}
vpx_codec_iter_t iter = NULL;
vpx_image_t* img;
int ret;
// Check for missing frames.
if (missing_frames) {
// Call decoder with zero data length to signal missing frames.
if (vpx_codec_decode(decoder_, NULL, 0, 0, VPX_DL_REALTIME)) {
// Reset to avoid requesting key frames too often.
if (propagation_cnt_ > 0)
propagation_cnt_ = 0;
return WEBRTC_VIDEO_CODEC_ERROR;
}
img = vpx_codec_get_frame(decoder_, &iter);
iter = NULL;
}
uint8_t* buffer = input_image._buffer;
if (input_image._length == 0) {
buffer = NULL; // Triggers full frame concealment.
}
if (vpx_codec_decode(decoder_, buffer, input_image._length, 0,
VPX_DL_REALTIME)) {
// Reset to avoid requesting key frames too often.
if (propagation_cnt_ > 0) {
propagation_cnt_ = 0;
}
return WEBRTC_VIDEO_CODEC_ERROR;
}
img = vpx_codec_get_frame(decoder_, &iter);
int qp;
vpx_codec_err_t vpx_ret =
vpx_codec_control(decoder_, VPXD_GET_LAST_QUANTIZER, &qp);
RTC_DCHECK_EQ(vpx_ret, VPX_CODEC_OK);
ret = ReturnFrame(img, input_image._timeStamp, input_image.ntp_time_ms_, qp);
if (ret != 0) {
// Reset to avoid requesting key frames too often.
if (ret < 0 && propagation_cnt_ > 0)
propagation_cnt_ = 0;
return ret;
}
// Check Vs. threshold
if (propagation_cnt_ > kVp8ErrorPropagationTh) {
// Reset to avoid requesting key frames too often.
propagation_cnt_ = 0;
return WEBRTC_VIDEO_CODEC_ERROR;
}
return WEBRTC_VIDEO_CODEC_OK;
}
int VP8DecoderImpl::ReturnFrame(const vpx_image_t* img,
uint32_t timestamp,
int64_t ntp_time_ms,
int qp) {
if (img == NULL) {
// Decoder OK and NULL image => No show frame
return WEBRTC_VIDEO_CODEC_NO_OUTPUT;
}
if (qp_smoother_) {
if (last_frame_width_ != static_cast<int>(img->d_w) ||
last_frame_height_ != static_cast<int>(img->d_h)) {
qp_smoother_->Reset();
}
qp_smoother_->Add(qp);
}
last_frame_width_ = img->d_w;
last_frame_height_ = img->d_h;
// Allocate memory for decoded image.
rtc::scoped_refptr<I420Buffer> buffer =
buffer_pool_.CreateBuffer(img->d_w, img->d_h);
if (!buffer.get()) {
// Pool has too many pending frames.
RTC_HISTOGRAM_BOOLEAN("WebRTC.Video.VP8DecoderImpl.TooManyPendingFrames",
1);
return WEBRTC_VIDEO_CODEC_NO_OUTPUT;
}
libyuv::I420Copy(img->planes[VPX_PLANE_Y], img->stride[VPX_PLANE_Y],
img->planes[VPX_PLANE_U], img->stride[VPX_PLANE_U],
img->planes[VPX_PLANE_V], img->stride[VPX_PLANE_V],
buffer->MutableDataY(), buffer->StrideY(),
buffer->MutableDataU(), buffer->StrideU(),
buffer->MutableDataV(), buffer->StrideV(), img->d_w,
img->d_h);
VideoFrame decoded_image(buffer, timestamp, 0, kVideoRotation_0);
decoded_image.set_ntp_time_ms(ntp_time_ms);
decode_complete_callback_->Decoded(decoded_image, rtc::nullopt, qp);
return WEBRTC_VIDEO_CODEC_OK;
}
int VP8DecoderImpl::RegisterDecodeCompleteCallback(
DecodedImageCallback* callback) {
decode_complete_callback_ = callback;
return WEBRTC_VIDEO_CODEC_OK;
}
int VP8DecoderImpl::Release() {
int ret_val = WEBRTC_VIDEO_CODEC_OK;
if (decoder_ != NULL) {
if (inited_) {
if (vpx_codec_destroy(decoder_)) {
ret_val = WEBRTC_VIDEO_CODEC_MEMORY;
}
}
delete decoder_;
decoder_ = NULL;
}
buffer_pool_.Release();
inited_ = false;
return ret_val;
}
const char* VP8DecoderImpl::ImplementationName() const {
return "libvpx";
}
} // namespace webrtc

View File

@ -1,45 +1,36 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*
* WEBRTC VP8 wrapper interface
*/
#ifndef MODULES_VIDEO_CODING_CODECS_VP8_VP8_IMPL_H_
#define MODULES_VIDEO_CODING_CODECS_VP8_VP8_IMPL_H_
#ifndef MODULES_VIDEO_CODING_CODECS_VP8_LIBVPX_VP8_ENCODER_H_
#define MODULES_VIDEO_CODING_CODECS_VP8_LIBVPX_VP8_ENCODER_H_
#include <memory>
#include <vector>
// NOTE: This include order must remain to avoid compile errors, even though
// it breaks the style guide.
#include "vpx/vp8cx.h"
#include "vpx/vp8dx.h"
#include "vpx/vpx_decoder.h"
#include "vpx/vpx_encoder.h"
#include "api/video/video_frame.h"
#include "common_video/include/i420_buffer_pool.h"
#include "api/video_codecs/video_encoder.h"
#include "common_types.h" // NOLINT(build/include)
#include "common_video/include/video_frame.h"
#include "modules/video_coding/codecs/vp8/include/vp8.h"
#include "modules/video_coding/codecs/vp8/temporal_layers.h"
#include "modules/video_coding/include/video_codec_interface.h"
#include "modules/video_coding/utility/quality_scaler.h"
#include "vpx/vp8cx.h"
#include "vpx/vpx_encoder.h"
namespace webrtc {
class TemporalLayers;
class VP8EncoderImpl : public VP8Encoder {
class LibvpxVp8Encoder : public VP8Encoder {
public:
VP8EncoderImpl();
virtual ~VP8EncoderImpl();
LibvpxVp8Encoder();
~LibvpxVp8Encoder() override;
int Release() override;
@ -117,51 +108,6 @@ class VP8EncoderImpl : public VP8Encoder {
std::vector<vpx_rational_t> downsampling_factors_;
};
class VP8DecoderImpl : public VP8Decoder {
public:
VP8DecoderImpl();
virtual ~VP8DecoderImpl();
int InitDecode(const VideoCodec* inst, int number_of_cores) override;
int Decode(const EncodedImage& input_image,
bool missing_frames,
const RTPFragmentationHeader* fragmentation,
const CodecSpecificInfo* codec_specific_info,
int64_t /*render_time_ms*/) override;
int RegisterDecodeCompleteCallback(DecodedImageCallback* callback) override;
int Release() override;
const char* ImplementationName() const override;
struct DeblockParams {
int max_level = 6; // Deblocking strength: [0, 16].
int degrade_qp = 1; // If QP value is below, start lowering |max_level|.
int min_qp = 0; // If QP value is below, turn off deblocking.
};
private:
class QpSmoother;
int ReturnFrame(const vpx_image_t* img,
uint32_t timeStamp,
int64_t ntp_time_ms,
int qp);
const bool use_postproc_arm_;
I420BufferPool buffer_pool_;
DecodedImageCallback* decode_complete_callback_;
bool inited_;
vpx_codec_ctx_t* decoder_;
int propagation_cnt_;
int last_frame_width_;
int last_frame_height_;
bool key_frame_required_;
DeblockParams deblock_;
const std::unique_ptr<QpSmoother> qp_smoother_;
};
} // namespace webrtc
#endif // MODULES_VIDEO_CODING_CODECS_VP8_VP8_IMPL_H_
#endif // MODULES_VIDEO_CODING_CODECS_VP8_LIBVPX_VP8_ENCODER_H_

View File

@ -11,8 +11,8 @@
#include <memory>
#include <vector>
#include "modules/video_coding/codecs/vp8/libvpx_vp8_encoder.h"
#include "modules/video_coding/codecs/vp8/screenshare_layers.h"
#include "modules/video_coding/codecs/vp8/vp8_impl.h"
#include "modules/video_coding/include/video_codec_interface.h"
#include "modules/video_coding/utility/mock/mock_frame_dropper.h"
#include "system_wrappers/include/clock.h"
@ -77,7 +77,7 @@ class ScreenshareLayerTest : public ::testing::Test {
return -1;
}
config_updated_ = layers_->UpdateConfiguration(&cfg_);
int flags = VP8EncoderImpl::EncodeFlags(tl_config_);
int flags = LibvpxVp8Encoder::EncodeFlags(tl_config_);
layers_->PopulateCodecSpecific(key_frame, tl_config_, &vp8_info_,
timestamp_);
EXPECT_NE(-1, frame_size_);
@ -436,7 +436,7 @@ TEST_F(ScreenshareLayerTest, RespectsMaxIntervalBetweenFrames) {
layers_->UpdateConfiguration(&cfg_);
EXPECT_EQ(kTl0Flags,
VP8EncoderImpl::EncodeFlags(UpdateLayerConfig(kStartTimestamp)));
LibvpxVp8Encoder::EncodeFlags(UpdateLayerConfig(kStartTimestamp)));
layers_->FrameEncoded(kLargeFrameSizeBytes, kDefaultQp);
const uint32_t kTwoSecondsLater =
@ -454,7 +454,7 @@ TEST_F(ScreenshareLayerTest, RespectsMaxIntervalBetweenFrames) {
// More than two seconds has passed since last frame, one should be emitted
// even if bitrate target is then exceeded.
EXPECT_EQ(kTl0Flags, VP8EncoderImpl::EncodeFlags(
EXPECT_EQ(kTl0Flags, LibvpxVp8Encoder::EncodeFlags(
UpdateLayerConfig(kTwoSecondsLater + 90)));
}
@ -473,7 +473,7 @@ TEST_F(ScreenshareLayerTest, UpdatesHistograms) {
dropped_frame = true;
continue;
}
int flags = VP8EncoderImpl::EncodeFlags(tl_config_);
int flags = LibvpxVp8Encoder::EncodeFlags(tl_config_);
if (flags != -1)
layers_->UpdateConfiguration(&cfg_);
@ -597,7 +597,7 @@ TEST_F(ScreenshareLayerTest, 2LayersSyncAtOvershootDrop) {
layers_->FrameEncoded(0, -1);
config_updated_ = layers_->UpdateConfiguration(&cfg_);
EXPECT_EQ(kTl1SyncFlags, VP8EncoderImpl::EncodeFlags(tl_config_));
EXPECT_EQ(kTl1SyncFlags, LibvpxVp8Encoder::EncodeFlags(tl_config_));
CodecSpecificInfoVP8 new_vp8_info;
layers_->PopulateCodecSpecific(false, tl_config_, &new_vp8_info, timestamp_);