Reformat the WebRTC code base
Running clang-format with chromium's style guide. The goal is n-fold: * providing consistency and readability (that's what code guidelines are for) * preventing noise with presubmit checks and git cl format * building on the previous point: making it easier to automatically fix format issues * you name it Please consider using git-hyper-blame to ignore this commit. Bug: webrtc:9340 Change-Id: I694567c4cdf8cee2860958cfe82bfaf25848bb87 Reviewed-on: https://webrtc-review.googlesource.com/81185 Reviewed-by: Patrik Höglund <phoglund@webrtc.org> Cr-Commit-Position: refs/heads/master@{#23660}
This commit is contained in:
@ -58,8 +58,9 @@ class H264DecoderImpl : public H264Decoder {
|
||||
// Called by FFmpeg when it needs a frame buffer to store decoded frames in.
|
||||
// The |VideoFrame| returned by FFmpeg at |Decode| originate from here. Their
|
||||
// buffers are reference counted and freed by FFmpeg using |AVFreeBuffer2|.
|
||||
static int AVGetBuffer2(
|
||||
AVCodecContext* context, AVFrame* av_frame, int flags);
|
||||
static int AVGetBuffer2(AVCodecContext* context,
|
||||
AVFrame* av_frame,
|
||||
int flags);
|
||||
// Called by FFmpeg when it is done with a video frame, see |AVGetBuffer2|.
|
||||
static void AVFreeBuffer2(void* opaque, uint8_t* data);
|
||||
|
||||
|
@ -45,17 +45,17 @@ enum H264EncoderImplEvent {
|
||||
int NumberOfThreads(int width, int height, int number_of_cores) {
|
||||
// TODO(hbos): In Chromium, multiple threads do not work with sandbox on Mac,
|
||||
// see crbug.com/583348. Until further investigated, only use one thread.
|
||||
// if (width * height >= 1920 * 1080 && number_of_cores > 8) {
|
||||
// return 8; // 8 threads for 1080p on high perf machines.
|
||||
// } else if (width * height > 1280 * 960 && number_of_cores >= 6) {
|
||||
// return 3; // 3 threads for 1080p.
|
||||
// } else if (width * height > 640 * 480 && number_of_cores >= 3) {
|
||||
// return 2; // 2 threads for qHD/HD.
|
||||
// } else {
|
||||
// return 1; // 1 thread for VGA or less.
|
||||
// }
|
||||
// TODO(sprang): Also check sSliceArgument.uiSliceNum om GetEncoderPrams(),
|
||||
// before enabling multithreading here.
|
||||
// if (width * height >= 1920 * 1080 && number_of_cores > 8) {
|
||||
// return 8; // 8 threads for 1080p on high perf machines.
|
||||
// } else if (width * height > 1280 * 960 && number_of_cores >= 6) {
|
||||
// return 3; // 3 threads for 1080p.
|
||||
// } else if (width * height > 640 * 480 && number_of_cores >= 3) {
|
||||
// return 2; // 2 threads for qHD/HD.
|
||||
// } else {
|
||||
// return 1; // 1 thread for VGA or less.
|
||||
// }
|
||||
// TODO(sprang): Also check sSliceArgument.uiSliceNum om GetEncoderPrams(),
|
||||
// before enabling multithreading here.
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -139,10 +139,10 @@ static void RtpFragmentize(EncodedImage* encoded_image,
|
||||
// Because the sum of all layer lengths, |required_size|, fits in a
|
||||
// |size_t|, we know that any indices in-between will not overflow.
|
||||
RTC_DCHECK_GE(layerInfo.pNalLengthInByte[nal], 4);
|
||||
RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len+0], start_code[0]);
|
||||
RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len+1], start_code[1]);
|
||||
RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len+2], start_code[2]);
|
||||
RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len+3], start_code[3]);
|
||||
RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len + 0], start_code[0]);
|
||||
RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len + 1], start_code[1]);
|
||||
RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len + 2], start_code[2]);
|
||||
RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len + 3], start_code[3]);
|
||||
frag_header->fragmentationOffset[frag] =
|
||||
encoded_image->_length + layer_len + sizeof(start_code);
|
||||
frag_header->fragmentationLength[frag] =
|
||||
@ -150,8 +150,7 @@ static void RtpFragmentize(EncodedImage* encoded_image,
|
||||
layer_len += layerInfo.pNalLengthInByte[nal];
|
||||
}
|
||||
// Copy the entire layer's data (including start codes).
|
||||
memcpy(encoded_image->_buffer + encoded_image->_length,
|
||||
layerInfo.pBsBuf,
|
||||
memcpy(encoded_image->_buffer + encoded_image->_length, layerInfo.pBsBuf,
|
||||
layer_len);
|
||||
encoded_image->_length += layer_len;
|
||||
}
|
||||
@ -190,8 +189,7 @@ int32_t H264EncoderImpl::InitEncode(const VideoCodec* codec_settings,
|
||||
int32_t number_of_cores,
|
||||
size_t max_payload_size) {
|
||||
ReportInit();
|
||||
if (!codec_settings ||
|
||||
codec_settings->codecType != kVideoCodecH264) {
|
||||
if (!codec_settings || codec_settings->codecType != kVideoCodecH264) {
|
||||
ReportError();
|
||||
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
|
||||
}
|
||||
@ -222,8 +220,7 @@ int32_t H264EncoderImpl::InitEncode(const VideoCodec* codec_settings,
|
||||
RTC_DCHECK(openh264_encoder_);
|
||||
if (kOpenH264EncoderDetailedLogging) {
|
||||
int trace_level = WELS_LOG_DETAIL;
|
||||
openh264_encoder_->SetOption(ENCODER_OPTION_TRACE_LEVEL,
|
||||
&trace_level);
|
||||
openh264_encoder_->SetOption(ENCODER_OPTION_TRACE_LEVEL, &trace_level);
|
||||
}
|
||||
// else WELS_LOG_DEFAULT is used by default.
|
||||
|
||||
@ -255,8 +252,7 @@ int32_t H264EncoderImpl::InitEncode(const VideoCodec* codec_settings,
|
||||
}
|
||||
// TODO(pbos): Base init params on these values before submitting.
|
||||
int video_format = EVideoFormatType::videoFormatI420;
|
||||
openh264_encoder_->SetOption(ENCODER_OPTION_DATAFORMAT,
|
||||
&video_format);
|
||||
openh264_encoder_->SetOption(ENCODER_OPTION_DATAFORMAT, &video_format);
|
||||
|
||||
// Initialize encoded image. Default buffer size: size of unencoded data.
|
||||
encoded_image_._size = CalcBufferSize(VideoType::kI420, codec_settings->width,
|
||||
@ -300,8 +296,7 @@ int32_t H264EncoderImpl::SetRateAllocation(
|
||||
memset(&target_bitrate, 0, sizeof(SBitrateInfo));
|
||||
target_bitrate.iLayer = SPATIAL_LAYER_ALL,
|
||||
target_bitrate.iBitrate = target_bps_;
|
||||
openh264_encoder_->SetOption(ENCODER_OPTION_BITRATE,
|
||||
&target_bitrate);
|
||||
openh264_encoder_->SetOption(ENCODER_OPTION_BITRATE, &target_bitrate);
|
||||
openh264_encoder_->SetOption(ENCODER_OPTION_FRAME_RATE, &max_frame_rate_);
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
@ -485,8 +480,7 @@ void H264EncoderImpl::ReportInit() {
|
||||
if (has_reported_init_)
|
||||
return;
|
||||
RTC_HISTOGRAM_ENUMERATION("WebRTC.Video.H264EncoderImpl.Event",
|
||||
kH264EncoderEventInit,
|
||||
kH264EncoderEventMax);
|
||||
kH264EncoderEventInit, kH264EncoderEventMax);
|
||||
has_reported_init_ = true;
|
||||
}
|
||||
|
||||
@ -494,13 +488,12 @@ void H264EncoderImpl::ReportError() {
|
||||
if (has_reported_error_)
|
||||
return;
|
||||
RTC_HISTOGRAM_ENUMERATION("WebRTC.Video.H264EncoderImpl.Event",
|
||||
kH264EncoderEventError,
|
||||
kH264EncoderEventMax);
|
||||
kH264EncoderEventError, kH264EncoderEventMax);
|
||||
has_reported_error_ = true;
|
||||
}
|
||||
|
||||
int32_t H264EncoderImpl::SetChannelParameters(
|
||||
uint32_t packet_loss, int64_t rtt) {
|
||||
int32_t H264EncoderImpl::SetChannelParameters(uint32_t packet_loss,
|
||||
int64_t rtt) {
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
|
@ -126,8 +126,8 @@ int32_t MultiplexDecoderAdapter::Decode(
|
||||
int32_t rv = 0;
|
||||
for (size_t i = 0; i < image.image_components.size(); i++) {
|
||||
rv = decoders_[image.image_components[i].component_index]->Decode(
|
||||
image.image_components[i].encoded_image, missing_frames,
|
||||
nullptr, render_time_ms);
|
||||
image.image_components[i].encoded_image, missing_frames, nullptr,
|
||||
render_time_ms);
|
||||
if (rv != WEBRTC_VIDEO_CODEC_OK)
|
||||
return rv;
|
||||
}
|
||||
|
@ -128,9 +128,8 @@ TEST_F(TestMultiplexAdapter, EncodeDecodeI420Frame) {
|
||||
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||
EXPECT_EQ(kVideoCodecMultiplex, codec_specific_info.codecType);
|
||||
|
||||
EXPECT_EQ(
|
||||
WEBRTC_VIDEO_CODEC_OK,
|
||||
decoder_->Decode(encoded_frame, false, &codec_specific_info, -1));
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
decoder_->Decode(encoded_frame, false, &codec_specific_info, -1));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
absl::optional<uint8_t> decoded_qp;
|
||||
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
|
||||
|
@ -308,9 +308,9 @@ bool VideoCodecTestFixtureImpl::Config::IsAsyncCodec() const {
|
||||
|
||||
// TODO(kthelgason): Move this out of the test fixture impl and
|
||||
// make available as a shared utility class.
|
||||
void VideoCodecTestFixtureImpl::H264KeyframeChecker::
|
||||
CheckEncodedFrame(webrtc::VideoCodecType codec,
|
||||
const EncodedImage& encoded_frame) const {
|
||||
void VideoCodecTestFixtureImpl::H264KeyframeChecker::CheckEncodedFrame(
|
||||
webrtc::VideoCodecType codec,
|
||||
const EncodedImage& encoded_frame) const {
|
||||
EXPECT_EQ(kVideoCodecH264, codec);
|
||||
bool contains_sps = false;
|
||||
bool contains_pps = false;
|
||||
@ -390,8 +390,7 @@ VideoCodecTestFixtureImpl::VideoCodecTestFixtureImpl(
|
||||
decoder_factory_(std::move(decoder_factory)),
|
||||
config_(config) {}
|
||||
|
||||
VideoCodecTestFixtureImpl::
|
||||
~VideoCodecTestFixtureImpl() = default;
|
||||
VideoCodecTestFixtureImpl::~VideoCodecTestFixtureImpl() = default;
|
||||
|
||||
// Processes all frames in the clip and verifies the result.
|
||||
void VideoCodecTestFixtureImpl::RunTest(
|
||||
|
@ -70,8 +70,8 @@ TEST(VideoCodecTestMediaCodec, ForemanCif500kbpsVp8) {
|
||||
|
||||
TEST(VideoCodecTestMediaCodec, ForemanCif500kbpsH264CBP) {
|
||||
auto config = CreateConfig();
|
||||
const auto frame_checker = rtc::MakeUnique<
|
||||
VideoCodecTestFixtureImpl::H264KeyframeChecker>();
|
||||
const auto frame_checker =
|
||||
rtc::MakeUnique<VideoCodecTestFixtureImpl::H264KeyframeChecker>();
|
||||
config.encoded_frame_checker = frame_checker.get();
|
||||
config.SetCodecSettings(cricket::kH264CodecName, 1, 1, 1, false, false, false,
|
||||
352, 288);
|
||||
@ -95,8 +95,8 @@ TEST(VideoCodecTestMediaCodec, ForemanCif500kbpsH264CBP) {
|
||||
// HW encoders that support CHP.
|
||||
TEST(VideoCodecTestMediaCodec, DISABLED_ForemanCif500kbpsH264CHP) {
|
||||
auto config = CreateConfig();
|
||||
const auto frame_checker = rtc::MakeUnique<
|
||||
VideoCodecTestFixtureImpl::H264KeyframeChecker>();
|
||||
const auto frame_checker =
|
||||
rtc::MakeUnique<VideoCodecTestFixtureImpl::H264KeyframeChecker>();
|
||||
|
||||
config.h264_codec_settings.profile = H264::kProfileConstrainedHigh;
|
||||
config.encoded_frame_checker = frame_checker.get();
|
||||
|
@ -39,8 +39,8 @@ VideoCodecTestFixture::Config CreateConfig() {
|
||||
} // namespace
|
||||
|
||||
TEST(VideoCodecTestOpenH264, ConstantHighBitrate) {
|
||||
auto frame_checker = rtc::MakeUnique<
|
||||
VideoCodecTestFixtureImpl::H264KeyframeChecker>();
|
||||
auto frame_checker =
|
||||
rtc::MakeUnique<VideoCodecTestFixtureImpl::H264KeyframeChecker>();
|
||||
auto config = CreateConfig();
|
||||
config.SetCodecSettings(cricket::kH264CodecName, 1, 1, 1, false, true, false,
|
||||
kCifWidth, kCifHeight);
|
||||
@ -60,8 +60,8 @@ TEST(VideoCodecTestOpenH264, ConstantHighBitrate) {
|
||||
// H264: Enable SingleNalUnit packetization mode. Encoder should split
|
||||
// large frames into multiple slices and limit length of NAL units.
|
||||
TEST(VideoCodecTestOpenH264, SingleNalUnit) {
|
||||
auto frame_checker = rtc::MakeUnique<
|
||||
VideoCodecTestFixtureImpl::H264KeyframeChecker>();
|
||||
auto frame_checker =
|
||||
rtc::MakeUnique<VideoCodecTestFixtureImpl::H264KeyframeChecker>();
|
||||
auto config = CreateConfig();
|
||||
config.h264_codec_settings.packetization_mode =
|
||||
H264PacketizationMode::SingleNalUnit;
|
||||
|
@ -37,8 +37,8 @@ std::unique_ptr<VideoCodecTestFixture> CreateTestFixtureWithConfig(
|
||||
VideoCodecTestFixture::Config config) {
|
||||
auto decoder_factory = CreateObjCDecoderFactory();
|
||||
auto encoder_factory = CreateObjCEncoderFactory();
|
||||
return CreateVideoCodecTestFixture(
|
||||
config, std::move(decoder_factory), std::move(encoder_factory));
|
||||
return CreateVideoCodecTestFixture(config, std::move(decoder_factory),
|
||||
std::move(encoder_factory));
|
||||
}
|
||||
} // namespace
|
||||
|
||||
@ -53,8 +53,8 @@ std::unique_ptr<VideoCodecTestFixture> CreateTestFixtureWithConfig(
|
||||
// TODO(kthelgason): Use RC Thresholds when the internal bitrateAdjuster is no
|
||||
// longer in use.
|
||||
MAYBE_TEST(VideoCodecTestVideoToolbox, ForemanCif500kbpsH264CBP) {
|
||||
const auto frame_checker = rtc::MakeUnique<
|
||||
VideoCodecTestFixtureImpl::H264KeyframeChecker>();
|
||||
const auto frame_checker =
|
||||
rtc::MakeUnique<VideoCodecTestFixtureImpl::H264KeyframeChecker>();
|
||||
auto config = CreateConfig();
|
||||
config.SetCodecSettings(cricket::kH264CodecName, 1, 1, 1, false, false, false,
|
||||
352, 288);
|
||||
@ -69,8 +69,8 @@ MAYBE_TEST(VideoCodecTestVideoToolbox, ForemanCif500kbpsH264CBP) {
|
||||
}
|
||||
|
||||
MAYBE_TEST(VideoCodecTestVideoToolbox, ForemanCif500kbpsH264CHP) {
|
||||
const auto frame_checker = rtc::MakeUnique<
|
||||
VideoCodecTestFixtureImpl::H264KeyframeChecker>();
|
||||
const auto frame_checker =
|
||||
rtc::MakeUnique<VideoCodecTestFixtureImpl::H264KeyframeChecker>();
|
||||
auto config = CreateConfig();
|
||||
config.h264_codec_settings.profile = H264::kProfileConstrainedHigh;
|
||||
config.SetCodecSettings(cricket::kH264CodecName, 1, 1, 1, false, false, false,
|
||||
|
@ -8,7 +8,6 @@
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
|
||||
#include <algorithm>
|
||||
#include <string>
|
||||
|
||||
|
@ -8,7 +8,6 @@
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
|
||||
#include <algorithm>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
@ -37,8 +37,7 @@ constexpr int ScreenshareLayers::kMaxNumTemporalLayers;
|
||||
// been exceeded. This prevents needless keyframe requests.
|
||||
const int ScreenshareLayers::kMaxFrameIntervalMs = 2750;
|
||||
|
||||
ScreenshareLayers::ScreenshareLayers(int num_temporal_layers,
|
||||
Clock* clock)
|
||||
ScreenshareLayers::ScreenshareLayers(int num_temporal_layers, Clock* clock)
|
||||
: clock_(clock),
|
||||
number_of_temporal_layers_(
|
||||
std::min(kMaxNumTemporalLayers, num_temporal_layers)),
|
||||
|
@ -28,8 +28,7 @@ class ScreenshareLayers : public TemporalLayers {
|
||||
static const double kAcceptableTargetOvershoot;
|
||||
static const int kMaxFrameIntervalMs;
|
||||
|
||||
ScreenshareLayers(int num_temporal_layers,
|
||||
Clock* clock);
|
||||
ScreenshareLayers(int num_temporal_layers, Clock* clock);
|
||||
virtual ~ScreenshareLayers();
|
||||
|
||||
// Returns the recommended VP8 encode flags needed. May refresh the decoder
|
||||
|
@ -12,8 +12,8 @@
|
||||
|
||||
#include <algorithm>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "modules/video_coding/codecs/vp8/include/vp8_common_types.h"
|
||||
#include "rtc_base/checks.h"
|
||||
|
@ -181,12 +181,12 @@ struct RTPVideoHeaderVP9 {
|
||||
bool beginning_of_frame; // True if this packet is the first in a VP9 layer
|
||||
// frame.
|
||||
bool end_of_frame; // True if this packet is the last in a VP9 layer frame.
|
||||
bool ss_data_available; // True if SS data is available in this payload
|
||||
// descriptor.
|
||||
bool ss_data_available; // True if SS data is available in this payload
|
||||
// descriptor.
|
||||
bool non_ref_for_inter_layer_pred; // True for frame which is not used as
|
||||
// reference for inter-layer prediction.
|
||||
int16_t picture_id; // PictureID index, 15 bits;
|
||||
// kNoPictureId if PictureID does not exist.
|
||||
int16_t picture_id; // PictureID index, 15 bits;
|
||||
// kNoPictureId if PictureID does not exist.
|
||||
int16_t max_picture_id; // Maximum picture ID index; either 0x7F or 0x7FFF;
|
||||
int16_t tl0_pic_idx; // TL0PIC_IDX, 8 bits;
|
||||
// kNoTl0PicIdx means no value provided.
|
||||
|
@ -85,7 +85,7 @@ std::vector<SpatialLayer> ConfigureSvcNormalVideo(size_t input_width,
|
||||
spatial_layer.minBitrate =
|
||||
std::max(static_cast<size_t>(min_bitrate), kMinVp9SvcBitrateKbps);
|
||||
spatial_layer.maxBitrate =
|
||||
static_cast<int>((1.6 * num_pixels + 50 * 1000) / 1000);
|
||||
static_cast<int>((1.6 * num_pixels + 50 * 1000) / 1000);
|
||||
spatial_layer.targetBitrate =
|
||||
(spatial_layer.minBitrate + spatial_layer.maxBitrate) / 2;
|
||||
spatial_layers.push_back(spatial_layer);
|
||||
|
@ -15,10 +15,10 @@
|
||||
#include <limits>
|
||||
#include <vector>
|
||||
|
||||
#include "vpx/vpx_encoder.h"
|
||||
#include "vpx/vpx_decoder.h"
|
||||
#include "vpx/vp8cx.h"
|
||||
#include "vpx/vp8dx.h"
|
||||
#include "vpx/vpx_decoder.h"
|
||||
#include "vpx/vpx_encoder.h"
|
||||
|
||||
#include "common_video/include/video_frame_buffer.h"
|
||||
#include "common_video/libyuv/include/webrtc_libyuv.h"
|
||||
@ -164,9 +164,8 @@ bool VP9EncoderImpl::SetSvcRates(
|
||||
RTC_LOG(LS_ERROR) << "Scaling factors not specified!";
|
||||
return false;
|
||||
}
|
||||
rate_ratio[i] =
|
||||
static_cast<float>(svc_params_.scaling_factor_num[i]) /
|
||||
svc_params_.scaling_factor_den[i];
|
||||
rate_ratio[i] = static_cast<float>(svc_params_.scaling_factor_num[i]) /
|
||||
svc_params_.scaling_factor_den[i];
|
||||
total += rate_ratio[i];
|
||||
}
|
||||
|
||||
@ -409,7 +408,7 @@ int VP9EncoderImpl::NumberOfThreads(int width,
|
||||
} else if (width * height >= 640 * 360 && number_of_cores > 2) {
|
||||
return 2;
|
||||
} else {
|
||||
// Use 2 threads for low res on ARM.
|
||||
// Use 2 threads for low res on ARM.
|
||||
#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64) || \
|
||||
defined(WEBRTC_ANDROID)
|
||||
if (width * height >= 320 * 180 && number_of_cores > 2) {
|
||||
@ -531,7 +530,7 @@ int VP9EncoderImpl::InitAndSetControlSettings(const VideoCodec* inst) {
|
||||
vpx_codec_control(encoder_, VP9E_SET_ROW_MT, 1);
|
||||
|
||||
#if !defined(WEBRTC_ARCH_ARM) && !defined(WEBRTC_ARCH_ARM64) && \
|
||||
!defined(ANDROID)
|
||||
!defined(ANDROID)
|
||||
// Do not enable the denoiser on ARM since optimization is pending.
|
||||
// Denoiser is on by default on other platforms.
|
||||
vpx_codec_control(encoder_, VP9E_SET_NOISE_SENSITIVITY,
|
||||
@ -716,11 +715,9 @@ void VP9EncoderImpl::PopulateCodecSpecific(CodecSpecificInfo* codec_specific,
|
||||
if (vp9_info->ss_data_available) {
|
||||
vp9_info->spatial_layer_resolution_present = true;
|
||||
for (size_t i = 0; i < vp9_info->num_spatial_layers; ++i) {
|
||||
vp9_info->width[i] = codec_.width *
|
||||
svc_params_.scaling_factor_num[i] /
|
||||
vp9_info->width[i] = codec_.width * svc_params_.scaling_factor_num[i] /
|
||||
svc_params_.scaling_factor_den[i];
|
||||
vp9_info->height[i] = codec_.height *
|
||||
svc_params_.scaling_factor_num[i] /
|
||||
vp9_info->height[i] = codec_.height * svc_params_.scaling_factor_num[i] /
|
||||
svc_params_.scaling_factor_den[i];
|
||||
}
|
||||
if (!vp9_info->flexible_mode) {
|
||||
|
Reference in New Issue
Block a user