Use backticks not vertical bars to denote variables in comments for /modules/video_coding

Bug: webrtc:12338
Change-Id: Ia8a9adea291d594e4f59a6a1203a7bfb0758adac
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/227165
Commit-Queue: Artem Titov <titovartem@webrtc.org>
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#34684}
This commit is contained in:
Artem Titov
2021-08-09 13:02:57 +02:00
committed by WebRTC LUCI CQ
parent 7f854bce1f
commit dcd7fc7ea8
83 changed files with 268 additions and 268 deletions

View File

@ -69,9 +69,9 @@ ScopedAVPacket MakeScopedAVPacket() {
int H264DecoderImpl::AVGetBuffer2(AVCodecContext* context,
AVFrame* av_frame,
int flags) {
// Set in |InitDecode|.
// Set in `InitDecode`.
H264DecoderImpl* decoder = static_cast<H264DecoderImpl*>(context->opaque);
// DCHECK values set in |InitDecode|.
// DCHECK values set in `InitDecode`.
RTC_DCHECK(decoder);
// Necessary capability to be allowed to provide our own buffers.
RTC_DCHECK(context->codec->capabilities | AV_CODEC_CAP_DR1);
@ -85,12 +85,12 @@ int H264DecoderImpl::AVGetBuffer2(AVCodecContext* context,
// |context->coded_width| due to reordering.
int width = av_frame->width;
int height = av_frame->height;
// See |lowres|, if used the decoder scales the image by 1/2^(lowres). This
// See `lowres`, if used the decoder scales the image by 1/2^(lowres). This
// has implications on which resolutions are valid, but we don't use it.
RTC_CHECK_EQ(context->lowres, 0);
// Adjust the |width| and |height| to values acceptable by the decoder.
// Without this, FFmpeg may overflow the buffer. If modified, |width| and/or
// |height| are larger than the actual image and the image has to be cropped
// Adjust the `width` and `height` to values acceptable by the decoder.
// Without this, FFmpeg may overflow the buffer. If modified, `width` and/or
// `height` are larger than the actual image and the image has to be cropped
// (top-left corner) after decoding to avoid visible borders to the right and
// bottom of the actual image.
avcodec_align_dimensions(context, &width, &height);
@ -105,8 +105,8 @@ int H264DecoderImpl::AVGetBuffer2(AVCodecContext* context,
return ret;
}
// The video frame is stored in |frame_buffer|. |av_frame| is FFmpeg's version
// of a video frame and will be set up to reference |frame_buffer|'s data.
// The video frame is stored in `frame_buffer`. `av_frame` is FFmpeg's version
// of a video frame and will be set up to reference `frame_buffer`'s data.
// FFmpeg expects the initial allocation to be zero-initialized according to
// http://crbug.com/390941. Our pool is set up to zero-initialize new buffers.
@ -125,7 +125,7 @@ int H264DecoderImpl::AVGetBuffer2(AVCodecContext* context,
av_frame->format = context->pix_fmt;
av_frame->reordered_opaque = context->reordered_opaque;
// Set |av_frame| members as required by FFmpeg.
// Set `av_frame` members as required by FFmpeg.
av_frame->data[kYPlaneIndex] = frame_buffer->MutableDataY();
av_frame->linesize[kYPlaneIndex] = frame_buffer->StrideY();
av_frame->data[kUPlaneIndex] = frame_buffer->MutableDataU();
@ -152,8 +152,8 @@ int H264DecoderImpl::AVGetBuffer2(AVCodecContext* context,
}
void H264DecoderImpl::AVFreeBuffer2(void* opaque, uint8_t* data) {
// The buffer pool recycles the buffer used by |video_frame| when there are no
// more references to it. |video_frame| is a thin buffer holder and is not
// The buffer pool recycles the buffer used by `video_frame` when there are no
// more references to it. `video_frame` is a thin buffer holder and is not
// recycled.
VideoFrame* video_frame = static_cast<VideoFrame*>(opaque);
delete video_frame;
@ -208,8 +208,8 @@ int32_t H264DecoderImpl::InitDecode(const VideoCodec* codec_settings,
// Function used by FFmpeg to get buffers to store decoded frames in.
av_context_->get_buffer2 = AVGetBuffer2;
// |get_buffer2| is called with the context, there |opaque| can be used to get
// a pointer |this|.
// `get_buffer2` is called with the context, there `opaque` can be used to get
// a pointer `this`.
av_context_->opaque = this;
const AVCodec* codec = avcodec_find_decoder(av_context_->codec_id);
@ -311,7 +311,7 @@ int32_t H264DecoderImpl::Decode(const EncodedImage& input_image,
h264_bitstream_parser_.ParseBitstream(input_image);
absl::optional<int> qp = h264_bitstream_parser_.GetLastSliceQp();
// Obtain the |video_frame| containing the decoded image.
// Obtain the `video_frame` containing the decoded image.
VideoFrame* input_frame =
static_cast<VideoFrame*>(av_buffer_get_opaque(av_frame_->buf[0]));
RTC_DCHECK(input_frame);
@ -377,7 +377,7 @@ int32_t H264DecoderImpl::Decode(const EncodedImage& input_image,
// interface to pass a VideoFrameBuffer instead of a VideoFrame?
decoded_image_callback_->Decoded(decoded_frame, absl::nullopt, qp);
// Stop referencing it, possibly freeing |input_frame|.
// Stop referencing it, possibly freeing `input_frame`.
av_frame_unref(av_frame_.get());
input_frame = nullptr;

View File

@ -60,8 +60,8 @@ class H264DecoderImpl : public H264Decoder {
H264DecoderImpl();
~H264DecoderImpl() override;
// If |codec_settings| is NULL it is ignored. If it is not NULL,
// |codec_settings->codecType| must be |kVideoCodecH264|.
// If `codec_settings` is NULL it is ignored. If it is not NULL,
// |codec_settings->codecType| must be `kVideoCodecH264`.
int32_t InitDecode(const VideoCodec* codec_settings,
int32_t number_of_cores) override;
int32_t Release() override;
@ -69,7 +69,7 @@ class H264DecoderImpl : public H264Decoder {
int32_t RegisterDecodeCompleteCallback(
DecodedImageCallback* callback) override;
// |missing_frames|, |fragmentation| and |render_time_ms| are ignored.
// `missing_frames`, `fragmentation` and `render_time_ms` are ignored.
int32_t Decode(const EncodedImage& input_image,
bool /*missing_frames*/,
int64_t render_time_ms = -1) override;
@ -78,12 +78,12 @@ class H264DecoderImpl : public H264Decoder {
private:
// Called by FFmpeg when it needs a frame buffer to store decoded frames in.
// The |VideoFrame| returned by FFmpeg at |Decode| originate from here. Their
// buffers are reference counted and freed by FFmpeg using |AVFreeBuffer2|.
// The `VideoFrame` returned by FFmpeg at `Decode` originate from here. Their
// buffers are reference counted and freed by FFmpeg using `AVFreeBuffer2`.
static int AVGetBuffer2(AVCodecContext* context,
AVFrame* av_frame,
int flags);
// Called by FFmpeg when it is done with a video frame, see |AVGetBuffer2|.
// Called by FFmpeg when it is done with a video frame, see `AVGetBuffer2`.
static void AVFreeBuffer2(void* opaque, uint8_t* data);
bool IsInitialized() const;
@ -92,7 +92,7 @@ class H264DecoderImpl : public H264Decoder {
void ReportInit();
void ReportError();
// Used by ffmpeg via |AVGetBuffer2()| to allocate I420 images.
// Used by ffmpeg via `AVGetBuffer2()` to allocate I420 images.
VideoFrameBufferPool ffmpeg_buffer_pool_;
// Used to allocate NV12 images if NV12 output is preferred.
VideoFrameBufferPool output_buffer_pool_;

View File

@ -88,11 +88,11 @@ VideoFrameType ConvertToVideoFrameType(EVideoFrameType type) {
} // namespace
// Helper method used by H264EncoderImpl::Encode.
// Copies the encoded bytes from |info| to |encoded_image|. The
// Copies the encoded bytes from `info` to `encoded_image`. The
// |encoded_image->_buffer| may be deleted and reallocated if a bigger buffer is
// required.
//
// After OpenH264 encoding, the encoded bytes are stored in |info| spread out
// After OpenH264 encoding, the encoded bytes are stored in `info` spread out
// over a number of layers and "NAL units". Each NAL unit is a fragment starting
// with the four-byte start code {0,0,0,1}. All of this data (including the
// start codes) is copied to the |encoded_image->_buffer|.
@ -104,7 +104,7 @@ static void RtpFragmentize(EncodedImage* encoded_image, SFrameBSInfo* info) {
const SLayerBSInfo& layerInfo = info->sLayerInfo[layer];
for (int nal = 0; nal < layerInfo.iNalCount; ++nal, ++fragments_count) {
RTC_CHECK_GE(layerInfo.pNalLengthInByte[nal], 0);
// Ensure |required_capacity| will not overflow.
// Ensure `required_capacity` will not overflow.
RTC_CHECK_LE(layerInfo.pNalLengthInByte[nal],
std::numeric_limits<size_t>::max() - required_capacity);
required_capacity += layerInfo.pNalLengthInByte[nal];
@ -124,8 +124,8 @@ static void RtpFragmentize(EncodedImage* encoded_image, SFrameBSInfo* info) {
// Iterate NAL units making up this layer, noting fragments.
size_t layer_len = 0;
for (int nal = 0; nal < layerInfo.iNalCount; ++nal, ++frag) {
// Because the sum of all layer lengths, |required_capacity|, fits in a
// |size_t|, we know that any indices in-between will not overflow.
// Because the sum of all layer lengths, `required_capacity`, fits in a
// `size_t`, we know that any indices in-between will not overflow.
RTC_DCHECK_GE(layerInfo.pNalLengthInByte[nal], 4);
RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len + 0], start_code[0]);
RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len + 1], start_code[1]);
@ -459,7 +459,7 @@ int32_t H264EncoderImpl::Encode(
}
if (send_key_frame) {
// API doc says ForceIntraFrame(false) does nothing, but calling this
// function forces a key frame regardless of the |bIDR| argument's value.
// function forces a key frame regardless of the `bIDR` argument's value.
// (If every frame is a key frame we get lag/delays.)
encoders_[i]->ForceIntraFrame(true);
configurations_[i].key_frame_request = false;
@ -485,7 +485,7 @@ int32_t H264EncoderImpl::Encode(
encoded_images_[i].SetSpatialIndex(configurations_[i].simulcast_idx);
// Split encoded image up into fragments. This also updates
// |encoded_image_|.
// `encoded_image_`.
RtpFragmentize(&encoded_images_[i], &info);
// Encoder can skip frames to save bandwidth in which case
@ -552,8 +552,8 @@ SEncParamExt H264EncoderImpl::CreateEncoderParams(size_t i) const {
// The following parameters are extension parameters (they're in SEncParamExt,
// not in SEncParamBase).
encoder_params.bEnableFrameSkip = configurations_[i].frame_dropping_on;
// |uiIntraPeriod| - multiple of GOP size
// |keyFrameInterval| - number of frames
// `uiIntraPeriod` - multiple of GOP size
// `keyFrameInterval` - number of frames
encoder_params.uiIntraPeriod = configurations_[i].key_frame_interval;
// Reuse SPS id if possible. This helps to avoid reset of chromium HW decoder
// on each key-frame.

View File

@ -58,7 +58,7 @@ class H264EncoderImpl : public H264Encoder {
~H264EncoderImpl() override;
// |settings.max_payload_size| is ignored.
// The following members of |codec_settings| are used. The rest are ignored.
// The following members of `codec_settings` are used. The rest are ignored.
// - codecType (must be kVideoCodecH264)
// - targetBitrate
// - maxFramerate

View File

@ -32,7 +32,7 @@ CreateH264Format(H264Profile profile,
const std::string& packetization_mode);
// Set to disable the H.264 encoder/decoder implementations that are provided if
// |rtc_use_h264| build flag is true (if false, this function does nothing).
// `rtc_use_h264` build flag is true (if false, this function does nothing).
// This function should only be called before or during WebRTC initialization
// and is not thread-safe.
RTC_EXPORT void DisableRtcUseH264();

View File

@ -24,7 +24,7 @@ namespace webrtc {
class MultiplexDecoderAdapter : public VideoDecoder {
public:
// |factory| is not owned and expected to outlive this class.
// `factory` is not owned and expected to outlive this class.
MultiplexDecoderAdapter(VideoDecoderFactory* factory,
const SdpVideoFormat& associated_format,
bool supports_augmenting_data = false);

View File

@ -33,7 +33,7 @@ enum AlphaCodecStream {
class MultiplexEncoderAdapter : public VideoEncoder {
public:
// |factory| is not owned and expected to outlive this class.
// `factory` is not owned and expected to outlive this class.
MultiplexEncoderAdapter(VideoEncoderFactory* factory,
const SdpVideoFormat& associated_format,
bool supports_augmenting_data = false);

View File

@ -25,11 +25,11 @@ namespace webrtc {
// bitstream data.
struct MultiplexImageHeader {
// The number of frame components making up the complete picture data.
// For example, |frame_count| = 2 for the case of YUV frame with Alpha frame.
// For example, `frame_count` = 2 for the case of YUV frame with Alpha frame.
uint8_t component_count;
// The increasing image ID given by the encoder. For different components
// of a single picture, they have the same |picture_index|.
// of a single picture, they have the same `picture_index`.
uint16_t image_index;
// The location of the first MultiplexImageComponentHeader in the bitstream,
@ -111,7 +111,7 @@ class MultiplexEncodedImagePacker {
// Note: It is caller responsibility to release the buffer of the result.
static EncodedImage PackAndRelease(const MultiplexImage& image);
// Note: The image components just share the memory with |combined_image|.
// Note: The image components just share the memory with `combined_image`.
static MultiplexImage Unpack(const EncodedImage& combined_image);
};

View File

@ -22,7 +22,7 @@
namespace webrtc {
// Callback wrapper that helps distinguish returned results from |encoders_|
// Callback wrapper that helps distinguish returned results from `encoders_`
// instances.
class MultiplexEncoderAdapter::AdapterEncodedImageCallback
: public webrtc::EncodedImageCallback {
@ -158,7 +158,7 @@ int MultiplexEncoderAdapter::Encode(
}
// The input image is forwarded as-is, unless it is a native buffer and
// |supports_augmented_data_| is true in which case we need to map it in order
// `supports_augmented_data_` is true in which case we need to map it in order
// to access the underlying AugmentedVideoFrameBuffer.
VideoFrame forwarded_image = input_image;
if (supports_augmented_data_ &&
@ -216,7 +216,7 @@ int MultiplexEncoderAdapter::Encode(
encoders_[kYUVStream]->Encode(forwarded_image, &adjusted_frame_types);
// If we do not receive an alpha frame, we send a single frame for this
// |picture_index_|. The receiver will receive |frame_count| as 1 which
// `picture_index_`. The receiver will receive `frame_count` as 1 which
// specifies this case.
if (rv || !has_alpha)
return rv;
@ -259,7 +259,7 @@ void MultiplexEncoderAdapter::SetRates(
bitrate_allocation.SetBitrate(
0, 0, parameters.bitrate.GetBitrate(0, 0) - augmenting_data_size_);
for (auto& encoder : encoders_) {
// TODO(emircan): |framerate| is used to calculate duration in encoder
// TODO(emircan): `framerate` is used to calculate duration in encoder
// instances. We report the total frame rate to keep real time for now.
// Remove this after refactoring duration logic.
encoder->SetRates(RateControlParameters(

View File

@ -201,7 +201,7 @@ class TestMultiplexAdapter : public VideoCodecUnitTest,
};
// TODO(emircan): Currently VideoCodecUnitTest tests do a complete setup
// step that goes beyond constructing |decoder_|. Simplify these tests to do
// step that goes beyond constructing `decoder_`. Simplify these tests to do
// less.
TEST_P(TestMultiplexAdapter, ConstructAndDestructDecoder) {
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Release());

View File

@ -83,7 +83,7 @@ class VideoCodecUnitTest : public ::testing::Test {
CodecSpecificInfo* codec_specific_info);
// Helper methods for waiting for multiple encoded frames. Caller must
// define how many frames are to be waited for via |num_frames| before calling
// define how many frames are to be waited for via `num_frames` before calling
// Encode(). Then, they can expect to retrive them via WaitForEncodedFrames().
void SetWaitForEncodedFramesThreshold(size_t num_frames);
bool WaitForEncodedFrames(

View File

@ -168,7 +168,7 @@ void VideoCodecTestFixtureImpl::Config::SetCodecSettings(
VideoCodecType codec_type = PayloadStringToCodecType(codec_name);
webrtc::test::CodecSettings(codec_type, &codec_settings);
// TODO(brandtr): Move the setting of |width| and |height| to the tests, and
// TODO(brandtr): Move the setting of `width` and `height` to the tests, and
// DCHECK that they are set before initializing the codec instead.
codec_settings.width = static_cast<uint16_t>(width);
codec_settings.height = static_cast<uint16_t>(height);

View File

@ -202,7 +202,7 @@ VideoStatistics VideoCodecTestStatsImpl::SliceAndCalcVideoStatistic(
const size_t target_bitrate_kbps =
CalcLayerTargetBitrateKbps(first_frame_num, last_frame_num, spatial_idx,
temporal_idx, aggregate_independent_layers);
RTC_CHECK_GT(target_bitrate_kbps, 0); // We divide by |target_bitrate_kbps|.
RTC_CHECK_GT(target_bitrate_kbps, 0); // We divide by `target_bitrate_kbps`.
for (size_t frame_num = first_frame_num; frame_num <= last_frame_num;
++frame_num) {

View File

@ -31,7 +31,7 @@ class VideoCodecTestStatsImpl : public VideoCodecTestStats {
// Creates a FrameStatistics for the next frame to be processed.
void AddFrame(const FrameStatistics& frame_stat);
// Returns the FrameStatistics corresponding to |frame_number| or |timestamp|.
// Returns the FrameStatistics corresponding to `frame_number` or `timestamp`.
FrameStatistics* GetFrame(size_t frame_number, size_t spatial_idx);
FrameStatistics* GetFrameWithTimestamp(size_t timestamp, size_t spatial_idx);

View File

@ -591,7 +591,7 @@ void VideoProcessor::FrameDecoded(const VideoFrame& decoded_frame,
// Erase all buffered input frames that we have moved past for all
// simulcast/spatial layers. Never buffer more than
// |kMaxBufferedInputFrames| frames, to protect against long runs of
// `kMaxBufferedInputFrames` frames, to protect against long runs of
// consecutive frame drops for a particular layer.
const auto min_last_decoded_frame_num = std::min_element(
last_decoded_frame_num_.cbegin(), last_decoded_frame_num_.cend());

View File

@ -219,7 +219,7 @@ class VideoProcessor {
std::vector<std::unique_ptr<VideoProcessorDecodeCompleteCallback>>
decode_callback_;
// Each call to ProcessFrame() will read one frame from |input_frame_reader_|.
// Each call to ProcessFrame() will read one frame from `input_frame_reader_`.
FrameReader* const input_frame_reader_;
// Input frames are used as reference for frame quality evaluations.

View File

@ -310,7 +310,7 @@ void DefaultTemporalLayers::OnRatesUpdated(
RTC_DCHECK_LT(stream_index, StreamCount());
RTC_DCHECK_GT(bitrates_bps.size(), 0);
RTC_DCHECK_LE(bitrates_bps.size(), num_layers_);
// |bitrates_bps| uses individual rate per layer, but Vp8EncoderConfig wants
// `bitrates_bps` uses individual rate per layer, but Vp8EncoderConfig wants
// the accumulated rate, so sum them up.
new_bitrates_bps_ = bitrates_bps;
new_bitrates_bps_->resize(num_layers_);
@ -419,11 +419,11 @@ Vp8FrameConfig DefaultTemporalLayers::NextFrameConfig(size_t stream_index,
// base-layer references).
tl_config.layer_sync = IsSyncFrame(tl_config);
// Increment frame age, this needs to be in sync with |pattern_idx_|,
// Increment frame age, this needs to be in sync with `pattern_idx_`,
// so must update it here. Resetting age to 0 must be done when encoding is
// complete though, and so in the case of pipelining encoder it might lag.
// To prevent this data spill over into the next iteration,
// the |pedning_frames_| map is reset in loops. If delay is constant,
// the `pedning_frames_` map is reset in loops. If delay is constant,
// the relative age should still be OK for the search order.
for (size_t& n : frames_since_buffer_refresh_) {
++n;
@ -444,7 +444,7 @@ Vp8FrameConfig DefaultTemporalLayers::NextFrameConfig(size_t stream_index,
void DefaultTemporalLayers::ValidateReferences(BufferFlags* flags,
Vp8BufferReference ref) const {
// Check if the buffer specified by |ref| is actually referenced, and if so
// Check if the buffer specified by `ref` is actually referenced, and if so
// if it also a dynamically updating one (buffers always just containing
// keyframes are always safe to reference).
if ((*flags & BufferFlags::kReference) &&
@ -552,7 +552,7 @@ void DefaultTemporalLayers::OnEncodeDone(size_t stream_index,
for (Vp8BufferReference buffer : kAllBuffers) {
if (is_static_buffer_[BufferToIndex(buffer)]) {
// Update frame count of all kf-only buffers, regardless of state of
// |pending_frames_|.
// `pending_frames_`.
ResetNumFramesSinceBufferRefresh(buffer);
} else {
// Key-frames update all buffers, this should be reflected when

View File

@ -653,8 +653,8 @@ TEST_F(TemporalLayersTest, KeyFrame) {
uint32_t timestamp = 0;
for (int i = 0; i < 7; ++i) {
// Temporal pattern starts from 0 after key frame. Let the first |i| - 1
// frames be delta frames, and the |i|th one key frame.
// Temporal pattern starts from 0 after key frame. Let the first `i` - 1
// frames be delta frames, and the `i`th one key frame.
for (int j = 1; j <= i; ++j) {
// Since last frame was always a keyframe and thus index 0 in the pattern,
// this loop starts at index 1.
@ -780,7 +780,7 @@ TEST_P(TemporalLayersReferenceTest, ValidFrameConfigs) {
// of the buffer state; which buffers references which temporal layers (if
// (any). If a given buffer is never updated, it is legal to reference it
// even for sync frames. In order to be general, don't assume TL0 always
// updates |last|.
// updates `last`.
std::vector<Vp8FrameConfig> tl_configs(kMaxPatternLength);
for (int i = 0; i < kMaxPatternLength; ++i) {
Vp8FrameConfig tl_config = tl.NextFrameConfig(0, timestamp_);

View File

@ -46,7 +46,7 @@ class LibvpxVp8Decoder : public VideoDecoder {
DeblockParams(int max_level, int degrade_qp, int min_qp)
: max_level(max_level), degrade_qp(degrade_qp), min_qp(min_qp) {}
int max_level; // Deblocking strength: [0, 16].
int degrade_qp; // If QP value is below, start lowering |max_level|.
int degrade_qp; // If QP value is below, start lowering `max_level`.
int min_qp; // If QP value is below, turn off deblocking.
};

View File

@ -107,10 +107,10 @@ bool MaybeSetNewValue(const absl::optional<T>& new_value,
}
}
// Adds configuration from |new_config| to |base_config|. Both configs consist
// of optionals, and only optionals which are set in |new_config| can have
// an effect. (That is, set values in |base_config| cannot be unset.)
// Returns |true| iff any changes were made to |base_config|.
// Adds configuration from `new_config` to `base_config`. Both configs consist
// of optionals, and only optionals which are set in `new_config` can have
// an effect. (That is, set values in `base_config` cannot be unset.)
// Returns `true` iff any changes were made to `base_config`.
bool MaybeExtendVp8EncoderConfig(const Vp8EncoderConfig& new_config,
Vp8EncoderConfig* base_config) {
bool changes_made = false;
@ -711,7 +711,7 @@ int LibvpxVp8Encoder::GetCpuSpeed(int width, int height) {
#else
// For non-ARM, increase encoding complexity (i.e., use lower speed setting)
// if resolution is below CIF. Otherwise, keep the default/user setting
// (|cpu_speed_default_|) set on InitEncode via VP8().complexity.
// (`cpu_speed_default_`) set on InitEncode via VP8().complexity.
if (width * height < 352 * 288)
return (cpu_speed_default_ < -4) ? -4 : cpu_speed_default_;
else
@ -976,8 +976,8 @@ int LibvpxVp8Encoder::Encode(const VideoFrame& frame,
flags[i] = send_key_frame ? VPX_EFLAG_FORCE_KF : EncodeFlags(tl_configs[i]);
}
// Scale and map buffers and set |raw_images_| to hold pointers to the result.
// Because |raw_images_| are set to hold pointers to the prepared buffers, we
// Scale and map buffers and set `raw_images_` to hold pointers to the result.
// Because `raw_images_` are set to hold pointers to the prepared buffers, we
// need to keep these buffers alive through reference counting until after
// encoding is complete.
std::vector<rtc::scoped_refptr<VideoFrameBuffer>> prepared_buffers =
@ -1017,7 +1017,7 @@ int LibvpxVp8Encoder::Encode(const VideoFrame& frame,
// Set the encoder frame flags and temporal layer_id for each spatial stream.
// Note that streams are defined starting from lowest resolution at
// position 0 to highest resolution at position |encoders_.size() - 1|,
// whereas |encoder_| is from highest to lowest resolution.
// whereas `encoder_` is from highest to lowest resolution.
for (size_t i = 0; i < encoders_.size(); ++i) {
const size_t stream_idx = encoders_.size() - 1 - i;
@ -1048,7 +1048,7 @@ int LibvpxVp8Encoder::Encode(const VideoFrame& frame,
(num_tries == 1 &&
error == WEBRTC_VIDEO_CODEC_TARGET_BITRATE_OVERSHOOT)) {
++num_tries;
// Note we must pass 0 for |flags| field in encode call below since they are
// Note we must pass 0 for `flags` field in encode call below since they are
// set above in |libvpx_interface_->vpx_codec_control_| function for each
// encoder/spatial layer.
error = libvpx_->codec_encode(&encoders_[0], &raw_images_[0], timestamp_,
@ -1237,8 +1237,8 @@ VideoEncoder::EncoderInfo LibvpxVp8Encoder::GetEncoderInfo() const {
VideoFrameBuffer::Type::kNV12};
if (inited_) {
// |encoder_idx| is libvpx index where 0 is highest resolution.
// |si| is simulcast index, where 0 is lowest resolution.
// `encoder_idx` is libvpx index where 0 is highest resolution.
// `si` is simulcast index, where 0 is lowest resolution.
for (size_t si = 0, encoder_idx = encoders_.size() - 1;
si < encoders_.size(); ++si, --encoder_idx) {
info.fps_allocation[si].clear();
@ -1308,7 +1308,7 @@ LibvpxVp8Encoder::PrepareBuffers(rtc::scoped_refptr<VideoFrameBuffer> buffer) {
rtc::scoped_refptr<VideoFrameBuffer> mapped_buffer;
if (buffer->type() != VideoFrameBuffer::Type::kNative) {
// |buffer| is already mapped.
// `buffer` is already mapped.
mapped_buffer = buffer;
} else {
// Attempt to map to one of the supported formats.
@ -1330,7 +1330,7 @@ LibvpxVp8Encoder::PrepareBuffers(rtc::scoped_refptr<VideoFrameBuffer> buffer) {
RTC_CHECK(converted_buffer->type() == VideoFrameBuffer::Type::kI420 ||
converted_buffer->type() == VideoFrameBuffer::Type::kI420A);
// Because |buffer| had to be converted, use |converted_buffer| instead...
// Because `buffer` had to be converted, use `converted_buffer` instead...
buffer = mapped_buffer = converted_buffer;
}
@ -1349,15 +1349,15 @@ LibvpxVp8Encoder::PrepareBuffers(rtc::scoped_refptr<VideoFrameBuffer> buffer) {
RTC_NOTREACHED();
}
// Prepare |raw_images_| from |mapped_buffer| and, if simulcast, scaled
// versions of |buffer|.
// Prepare `raw_images_` from `mapped_buffer` and, if simulcast, scaled
// versions of `buffer`.
std::vector<rtc::scoped_refptr<VideoFrameBuffer>> prepared_buffers;
SetRawImagePlanes(&raw_images_[0], mapped_buffer);
prepared_buffers.push_back(mapped_buffer);
for (size_t i = 1; i < encoders_.size(); ++i) {
// Native buffers should implement optimized scaling and is the preferred
// buffer to scale. But if the buffer isn't native, it should be cheaper to
// scale from the previously prepared buffer which is smaller than |buffer|.
// scale from the previously prepared buffer which is smaller than `buffer`.
VideoFrameBuffer* buffer_to_scale =
buffer->type() == VideoFrameBuffer::Type::kNative
? buffer.get()

View File

@ -83,7 +83,7 @@ class LibvpxVp8Encoder : public VideoEncoder {
int GetEncodedPartitions(const VideoFrame& input_image,
bool retransmission_allowed);
// Set the stream state for stream |stream_idx|.
// Set the stream state for stream `stream_idx`.
void SetStreamState(bool send_stream, int stream_idx);
uint32_t MaxIntraTarget(uint32_t optimal_buffer_size);
@ -95,8 +95,8 @@ class LibvpxVp8Encoder : public VideoEncoder {
bool UpdateVpxConfiguration(size_t stream_index);
void MaybeUpdatePixelFormat(vpx_img_fmt fmt);
// Prepares |raw_image_| to reference image data of |buffer|, or of mapped or
// scaled versions of |buffer|. Returns a list of buffers that got referenced
// Prepares `raw_image_` to reference image data of `buffer`, or of mapped or
// scaled versions of `buffer`. Returns a list of buffers that got referenced
// as a result, allowing the caller to keep references to them until after
// encoding has finished. On failure to convert the buffer, an empty list is
// returned.

View File

@ -255,7 +255,7 @@ void ScreenshareLayers::OnRatesUpdated(
RTC_DCHECK_GE(bitrates_bps.size(), 1);
RTC_DCHECK_LE(bitrates_bps.size(), 2);
// |bitrates_bps| uses individual rates per layer, but we want to use the
// `bitrates_bps` uses individual rates per layer, but we want to use the
// accumulated rate here.
uint32_t tl0_kbps = bitrates_bps[0] / 1000;
uint32_t tl1_kbps = tl0_kbps;
@ -354,7 +354,7 @@ void ScreenshareLayers::OnEncodeDone(size_t stream_index,
RTC_DCHECK_EQ(vp8_info.referencedBuffersCount, 0u);
RTC_DCHECK_EQ(vp8_info.updatedBuffersCount, 0u);
// Note that |frame_config| is not derefernced if |is_keyframe|,
// Note that `frame_config` is not derefernced if `is_keyframe`,
// meaning it's never dereferenced if the optional may be unset.
for (int i = 0; i < static_cast<int>(Vp8FrameConfig::Buffer::kCount); ++i) {
bool references = false;

View File

@ -34,7 +34,7 @@ class VP9Encoder : public VideoEncoder {
// Deprecated. Returns default implementation using VP9 Profile 0.
// TODO(emircan): Remove once this is no longer used.
static std::unique_ptr<VP9Encoder> Create();
// Parses VP9 Profile from |codec| and returns the appropriate implementation.
// Parses VP9 Profile from `codec` and returns the appropriate implementation.
static std::unique_ptr<VP9Encoder> Create(const cricket::VideoCodec& codec);
~VP9Encoder() override {}

View File

@ -240,7 +240,7 @@ int LibvpxVp9Decoder::Decode(const EncodedImage& input_image,
buffer = nullptr; // Triggers full frame concealment.
}
// During decode libvpx may get and release buffers from
// |libvpx_buffer_pool_|. In practice libvpx keeps a few (~3-4) buffers alive
// `libvpx_buffer_pool_`. In practice libvpx keeps a few (~3-4) buffers alive
// at a time.
if (vpx_codec_decode(decoder_, buffer,
static_cast<unsigned int>(input_image.size()), 0,
@ -273,7 +273,7 @@ int LibvpxVp9Decoder::ReturnFrame(
return WEBRTC_VIDEO_CODEC_NO_OUTPUT;
}
// This buffer contains all of |img|'s image data, a reference counted
// This buffer contains all of `img`'s image data, a reference counted
// Vp9FrameBuffer. (libvpx is done with the buffers after a few
// vpx_codec_decode calls or vpx_codec_destroy).
rtc::scoped_refptr<Vp9FrameBufferPool::Vp9FrameBuffer> img_buffer =
@ -310,7 +310,7 @@ int LibvpxVp9Decoder::ReturnFrame(
img->stride[VPX_PLANE_V],
// WrappedI420Buffer's mechanism for allowing the release of its
// frame buffer is through a callback function. This is where we
// should release |img_buffer|.
// should release `img_buffer`.
[img_buffer] {});
}
} else if (img->fmt == VPX_IMG_FMT_I444) {
@ -321,7 +321,7 @@ int LibvpxVp9Decoder::ReturnFrame(
img->stride[VPX_PLANE_V],
// WrappedI444Buffer's mechanism for allowing the release of its
// frame buffer is through a callback function. This is where we
// should release |img_buffer|.
// should release `img_buffer`.
[img_buffer] {});
} else {
RTC_LOG(LS_ERROR)
@ -373,7 +373,7 @@ int LibvpxVp9Decoder::Release() {
if (decoder_ != nullptr) {
if (inited_) {
// When a codec is destroyed libvpx will release any buffers of
// |libvpx_buffer_pool_| it is currently using.
// `libvpx_buffer_pool_` it is currently using.
if (vpx_codec_destroy(decoder_)) {
ret_val = WEBRTC_VIDEO_CODEC_MEMORY;
}

View File

@ -1041,7 +1041,7 @@ int LibvpxVp9Encoder::Encode(const VideoFrame& input_image,
// doing this.
input_image_ = &input_image;
// In case we need to map the buffer, |mapped_buffer| is used to keep it alive
// In case we need to map the buffer, `mapped_buffer` is used to keep it alive
// through reference counting until after encoding has finished.
rtc::scoped_refptr<const VideoFrameBuffer> mapped_buffer;
const I010BufferInterface* i010_buffer;
@ -1888,7 +1888,7 @@ rtc::scoped_refptr<VideoFrameBuffer> LibvpxVp9Encoder::PrepareBufferForProfile0(
rtc::scoped_refptr<VideoFrameBuffer> mapped_buffer;
if (buffer->type() != VideoFrameBuffer::Type::kNative) {
// |buffer| is already mapped.
// `buffer` is already mapped.
mapped_buffer = buffer;
} else {
// Attempt to map to one of the supported formats.
@ -1910,11 +1910,11 @@ rtc::scoped_refptr<VideoFrameBuffer> LibvpxVp9Encoder::PrepareBufferForProfile0(
RTC_CHECK(converted_buffer->type() == VideoFrameBuffer::Type::kI420 ||
converted_buffer->type() == VideoFrameBuffer::Type::kI420A);
// Because |buffer| had to be converted, use |converted_buffer| instead.
// Because `buffer` had to be converted, use `converted_buffer` instead.
buffer = mapped_buffer = converted_buffer;
}
// Prepare |raw_| from |mapped_buffer|.
// Prepare `raw_` from `mapped_buffer`.
switch (mapped_buffer->type()) {
case VideoFrameBuffer::Type::kI420:
case VideoFrameBuffer::Type::kI420A: {

View File

@ -103,8 +103,8 @@ class LibvpxVp9Encoder : public VP9Encoder {
size_t SteadyStateSize(int sid, int tid);
void MaybeRewrapRawWithFormat(const vpx_img_fmt fmt);
// Prepares |raw_| to reference image data of |buffer|, or of mapped or scaled
// versions of |buffer|. Returns the buffer that got referenced as a result,
// Prepares `raw_` to reference image data of `buffer`, or of mapped or scaled
// versions of `buffer`. Returns the buffer that got referenced as a result,
// allowing the caller to keep a reference to it until after encoding has
// finished. On failure to convert the buffer, null is returned.
rtc::scoped_refptr<VideoFrameBuffer> PrepareBufferForProfile0(
@ -202,9 +202,9 @@ class LibvpxVp9Encoder : public VP9Encoder {
// Flags that can affect speed vs quality tradeoff, and are configureable per
// resolution ranges.
struct PerformanceFlags {
// If false, a lookup will be made in |settings_by_resolution| base on the
// If false, a lookup will be made in `settings_by_resolution` base on the
// highest currently active resolution, and the overall speed then set to
// to the |base_layer_speed| matching that entry.
// to the `base_layer_speed` matching that entry.
// If true, each active resolution will have it's speed and deblock_mode set
// based on it resolution, and the high layer speed configured for non
// base temporal layer frames.
@ -223,9 +223,9 @@ class LibvpxVp9Encoder : public VP9Encoder {
// setting B at wvga and above, you'd use map {{0, A}, {230400, B}}.
std::map<int, ParameterSet> settings_by_resolution;
};
// Performance flags, ordered by |min_pixel_count|.
// Performance flags, ordered by `min_pixel_count`.
const PerformanceFlags performance_flags_;
// Caching of of |speed_configs_|, where index i maps to the resolution as
// Caching of of `speed_configs_`, where index i maps to the resolution as
// specified in |codec_.spatialLayer[i]|.
std::vector<PerformanceFlags::ParameterSet>
performance_flags_by_spatial_index_;

View File

@ -538,7 +538,7 @@ TEST(Vp9ImplTest, EnableDisableSpatialLayersWithSvcController) {
bitrate_allocation, codec_settings.maxFramerate));
frames = producer.SetNumInputFrames(num_frames_to_encode).Encode();
// With |sl_idx| spatial layer disabled, there are |sl_idx| spatial layers
// With `sl_idx` spatial layer disabled, there are `sl_idx` spatial layers
// left.
ASSERT_THAT(frames, SizeIs(num_frames_to_encode * sl_idx));
for (size_t i = 0; i < frames.size(); ++i) {

View File

@ -44,7 +44,7 @@ bool Vp9FrameBufferPool::InitializeVpxUsePool(
&Vp9FrameBufferPool::VpxGetFrameBuffer,
// Called by libvpx when it no longer uses a frame buffer.
&Vp9FrameBufferPool::VpxReleaseFrameBuffer,
// |this| will be passed as |user_priv| to VpxGetFrameBuffer.
// `this` will be passed as `user_priv` to VpxGetFrameBuffer.
this)) {
// Failed to configure libvpx to use Vp9FrameBufferPool.
return false;
@ -152,11 +152,11 @@ int32_t Vp9FrameBufferPool::VpxGetFrameBuffer(void* user_priv,
rtc::scoped_refptr<Vp9FrameBuffer> buffer = pool->GetFrameBuffer(min_size);
fb->data = buffer->GetData();
fb->size = buffer->GetDataSize();
// Store Vp9FrameBuffer* in |priv| for use in VpxReleaseFrameBuffer.
// This also makes vpx_codec_get_frame return images with their |fb_priv| set
// to |buffer| which is important for external reference counting.
// Release from refptr so that the buffer's |ref_count_| remains 1 when
// |buffer| goes out of scope.
// Store Vp9FrameBuffer* in `priv` for use in VpxReleaseFrameBuffer.
// This also makes vpx_codec_get_frame return images with their `fb_priv` set
// to `buffer` which is important for external reference counting.
// Release from refptr so that the buffer's `ref_count_` remains 1 when
// `buffer` goes out of scope.
fb->priv = static_cast<void*>(buffer.release());
return 0;
}
@ -171,7 +171,7 @@ int32_t Vp9FrameBufferPool::VpxReleaseFrameBuffer(void* user_priv,
buffer->Release();
// When libvpx fails to decode and you continue to try to decode (and fail)
// libvpx can for some reason try to release the same buffer multiple times.
// Setting |priv| to null protects against trying to Release multiple times.
// Setting `priv` to null protects against trying to Release multiple times.
fb->priv = nullptr;
}
return 0;

View File

@ -83,7 +83,7 @@ class Vp9FrameBufferPool {
// buffers used to decompress frames. This is only supported for VP9.
bool InitializeVpxUsePool(vpx_codec_ctx* vpx_codec_context);
// Gets a frame buffer of at least |min_size|, recycling an available one or
// Gets a frame buffer of at least `min_size`, recycling an available one or
// creating a new one. When no longer referenced from the outside the buffer
// becomes recyclable.
rtc::scoped_refptr<Vp9FrameBuffer> GetFrameBuffer(size_t min_size);
@ -99,10 +99,10 @@ class Vp9FrameBufferPool {
// InitializeVpxUsePool configures libvpx to call this function when it needs
// a new frame buffer. Parameters:
// |user_priv| Private data passed to libvpx, InitializeVpxUsePool sets it up
// `user_priv` Private data passed to libvpx, InitializeVpxUsePool sets it up
// to be a pointer to the pool.
// |min_size| Minimum size needed by libvpx (to decompress a frame).
// |fb| Pointer to the libvpx frame buffer object, this is updated to
// `min_size` Minimum size needed by libvpx (to decompress a frame).
// `fb` Pointer to the libvpx frame buffer object, this is updated to
// use the pool's buffer.
// Returns 0 on success. Returns < 0 on failure.
static int32_t VpxGetFrameBuffer(void* user_priv,
@ -111,15 +111,15 @@ class Vp9FrameBufferPool {
// InitializeVpxUsePool configures libvpx to call this function when it has
// finished using one of the pool's frame buffer. Parameters:
// |user_priv| Private data passed to libvpx, InitializeVpxUsePool sets it up
// `user_priv` Private data passed to libvpx, InitializeVpxUsePool sets it up
// to be a pointer to the pool.
// |fb| Pointer to the libvpx frame buffer object, its |priv| will be
// `fb` Pointer to the libvpx frame buffer object, its `priv` will be
// a pointer to one of the pool's Vp9FrameBuffer.
static int32_t VpxReleaseFrameBuffer(void* user_priv,
vpx_codec_frame_buffer* fb);
private:
// Protects |allocated_buffers_|.
// Protects `allocated_buffers_`.
mutable Mutex buffers_lock_;
// All buffers, in use or ready to be recycled.
std::vector<rtc::scoped_refptr<Vp9FrameBuffer>> allocated_buffers_