Replace rtc::Optional with absl::optional
This is a no-op change because rtc::Optional is an alias to absl::optional
This CL generated by running script from modules with parameters
'pacing video_coding congestion_controller remote_bitrate_estimator':
find $@ -type f \( -name \*.h -o -name \*.cc \) \
-exec sed -i 's|rtc::Optional|absl::optional|g' {} \+ \
-exec sed -i 's|rtc::nullopt|absl::nullopt|g' {} \+ \
-exec sed -i 's|#include "api/optional.h"|#include "absl/types/optional.h"|' {} \+
find $@ -type f -name BUILD.gn \
-exec sed -r -i 's|"(../)*api:optional"|"//third_party/abseil-cpp/absl/types:optional"|' {} \+;
git cl format
Bug: webrtc:9078
Change-Id: I8ea501d7f1ee36e8d8cd3ed37e6b763c7fe29118
Reviewed-on: https://webrtc-review.googlesource.com/83900
Reviewed-by: Philip Eliasson <philipel@webrtc.org>
Commit-Queue: Danil Chapovalov <danilchap@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#23640}
This commit is contained in:
committed by
Commit Bot
parent
ae1888629a
commit
0040b66ad3
@ -17,7 +17,6 @@ rtc_static_library("encoded_frame") {
|
||||
deps = [
|
||||
":video_codec_interface",
|
||||
"../../:webrtc_common",
|
||||
"../../api:optional",
|
||||
"../../api/video:video_frame_i420",
|
||||
"../../common_video:common_video",
|
||||
"../../modules:module_api",
|
||||
@ -28,6 +27,7 @@ rtc_static_library("encoded_frame") {
|
||||
"../../rtc_base/experiments:alr_experiment",
|
||||
"../../system_wrappers:field_trial_api",
|
||||
"../../system_wrappers:system_wrappers",
|
||||
"//third_party/abseil-cpp/absl/types:optional",
|
||||
]
|
||||
|
||||
if (!build_with_chromium && is_clang) {
|
||||
@ -165,7 +165,6 @@ rtc_static_library("video_coding") {
|
||||
"../..:webrtc_common",
|
||||
"../../:typedefs",
|
||||
"../../api:fec_controller_api",
|
||||
"../../api:optional",
|
||||
"../../api/video:encoded_frame",
|
||||
"../../api/video:video_frame",
|
||||
"../../api/video:video_frame_i420",
|
||||
@ -185,6 +184,7 @@ rtc_static_library("video_coding") {
|
||||
"../../system_wrappers:metrics_api",
|
||||
"../rtp_rtcp:rtp_rtcp_format",
|
||||
"../utility:utility",
|
||||
"//third_party/abseil-cpp/absl/types:optional",
|
||||
]
|
||||
}
|
||||
|
||||
@ -258,7 +258,6 @@ rtc_source_set("video_coding_utility") {
|
||||
"..:module_api",
|
||||
"../..:webrtc_common",
|
||||
"../../:typedefs",
|
||||
"../../api:optional",
|
||||
"../../api/video_codecs:video_codecs_api",
|
||||
"../../common_video",
|
||||
"../../modules/rtp_rtcp",
|
||||
@ -270,6 +269,7 @@ rtc_source_set("video_coding_utility") {
|
||||
"../../rtc_base/experiments:quality_scaling_experiment",
|
||||
"../../system_wrappers",
|
||||
"../rtp_rtcp:rtp_rtcp_format",
|
||||
"//third_party/abseil-cpp/absl/types:optional",
|
||||
]
|
||||
}
|
||||
|
||||
@ -412,7 +412,6 @@ rtc_static_library("webrtc_vp8_helpers") {
|
||||
"..:module_api",
|
||||
"../..:webrtc_common",
|
||||
"../../:typedefs",
|
||||
"../../api:optional",
|
||||
"../../api/video:video_frame",
|
||||
"../../api/video_codecs:video_codecs_api",
|
||||
"../../common_video",
|
||||
@ -422,6 +421,7 @@ rtc_static_library("webrtc_vp8_helpers") {
|
||||
"../../system_wrappers",
|
||||
"../../system_wrappers:field_trial_api",
|
||||
"../../system_wrappers:metrics_api",
|
||||
"//third_party/abseil-cpp/absl/types:optional",
|
||||
"//third_party/libyuv",
|
||||
]
|
||||
}
|
||||
@ -454,7 +454,6 @@ rtc_static_library("webrtc_vp8") {
|
||||
"..:module_api",
|
||||
"../..:webrtc_common",
|
||||
"../../:typedefs",
|
||||
"../../api:optional",
|
||||
"../../api/video:video_frame",
|
||||
"../../api/video_codecs:video_codecs_api",
|
||||
"../../common_video",
|
||||
@ -464,6 +463,7 @@ rtc_static_library("webrtc_vp8") {
|
||||
"../../system_wrappers",
|
||||
"../../system_wrappers:field_trial_api",
|
||||
"../../system_wrappers:metrics_api",
|
||||
"//third_party/abseil-cpp/absl/types:optional",
|
||||
"//third_party/libyuv",
|
||||
]
|
||||
if (rtc_build_libvpx) {
|
||||
@ -758,7 +758,6 @@ if (rtc_include_tests) {
|
||||
"../..:webrtc_common",
|
||||
"../../api:create_videocodec_test_fixture_api",
|
||||
"../../api:mock_video_codec_factory",
|
||||
"../../api:optional",
|
||||
"../../api:videocodec_test_fixture_api",
|
||||
"../../api/video:video_frame_i420",
|
||||
"../../api/video_codecs:rtc_software_fallback_wrappers",
|
||||
@ -773,6 +772,7 @@ if (rtc_include_tests) {
|
||||
"../../test:test_support",
|
||||
"../../test:video_test_common",
|
||||
"../rtp_rtcp:rtp_rtcp_format",
|
||||
"//third_party/abseil-cpp/absl/types:optional",
|
||||
]
|
||||
|
||||
data = video_coding_modules_tests_resources
|
||||
|
||||
@ -43,7 +43,7 @@ bool IsH264CodecSupported() {
|
||||
SdpVideoFormat CreateH264Format(H264::Profile profile,
|
||||
H264::Level level,
|
||||
const std::string& packetization_mode) {
|
||||
const rtc::Optional<std::string> profile_string =
|
||||
const absl::optional<std::string> profile_string =
|
||||
H264::ProfileLevelIdToString(H264::ProfileLevelId(profile, level));
|
||||
RTC_CHECK(profile_string);
|
||||
return SdpVideoFormat(
|
||||
|
||||
@ -325,7 +325,7 @@ int32_t H264DecoderImpl::Decode(const EncodedImage& input_image,
|
||||
RTC_CHECK_EQ(av_frame_->data[kVPlaneIndex], i420_buffer->DataV());
|
||||
video_frame->set_timestamp(input_image._timeStamp);
|
||||
|
||||
rtc::Optional<uint8_t> qp;
|
||||
absl::optional<uint8_t> qp;
|
||||
// TODO(sakal): Maybe it is possible to get QP directly from FFmpeg.
|
||||
h264_bitstream_parser_.ParseBitstream(input_image._buffer,
|
||||
input_image._length);
|
||||
@ -351,10 +351,10 @@ int32_t H264DecoderImpl::Decode(const EncodedImage& input_image,
|
||||
video_frame->rotation());
|
||||
// TODO(nisse): Timestamp and rotation are all zero here. Change decoder
|
||||
// interface to pass a VideoFrameBuffer instead of a VideoFrame?
|
||||
decoded_image_callback_->Decoded(cropped_frame, rtc::nullopt, qp);
|
||||
decoded_image_callback_->Decoded(cropped_frame, absl::nullopt, qp);
|
||||
} else {
|
||||
// Return decoded frame.
|
||||
decoded_image_callback_->Decoded(*video_frame, rtc::nullopt, qp);
|
||||
decoded_image_callback_->Decoded(*video_frame, absl::nullopt, qp);
|
||||
}
|
||||
// Stop referencing it, possibly freeing |video_frame|.
|
||||
av_frame_unref(av_frame_.get());
|
||||
|
||||
@ -50,7 +50,7 @@ TEST_F(TestH264Impl, MAYBE_EncodeDecode) {
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
decoder_->Decode(encoded_frame, false, nullptr, 0));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
rtc::Optional<uint8_t> decoded_qp;
|
||||
absl::optional<uint8_t> decoded_qp;
|
||||
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
|
||||
ASSERT_TRUE(decoded_frame);
|
||||
EXPECT_GT(I420PSNR(input_frame, decoded_frame.get()), 36);
|
||||
@ -67,7 +67,7 @@ TEST_F(TestH264Impl, MAYBE_DecodedQpEqualsEncodedQp) {
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
decoder_->Decode(encoded_frame, false, nullptr, 0));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
rtc::Optional<uint8_t> decoded_qp;
|
||||
absl::optional<uint8_t> decoded_qp;
|
||||
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
|
||||
ASSERT_TRUE(decoded_frame);
|
||||
ASSERT_TRUE(decoded_qp);
|
||||
|
||||
@ -42,8 +42,8 @@ class MultiplexDecoderAdapter : public VideoDecoder {
|
||||
|
||||
void Decoded(AlphaCodecStream stream_idx,
|
||||
VideoFrame* decoded_image,
|
||||
rtc::Optional<int32_t> decode_time_ms,
|
||||
rtc::Optional<uint8_t> qp);
|
||||
absl::optional<int32_t> decode_time_ms,
|
||||
absl::optional<uint8_t> qp);
|
||||
|
||||
private:
|
||||
// Wrapper class that redirects Decoded() calls.
|
||||
@ -53,11 +53,11 @@ class MultiplexDecoderAdapter : public VideoDecoder {
|
||||
struct DecodedImageData;
|
||||
|
||||
void MergeAlphaImages(VideoFrame* decoded_image,
|
||||
const rtc::Optional<int32_t>& decode_time_ms,
|
||||
const rtc::Optional<uint8_t>& qp,
|
||||
const absl::optional<int32_t>& decode_time_ms,
|
||||
const absl::optional<uint8_t>& qp,
|
||||
VideoFrame* multiplex_decoded_image,
|
||||
const rtc::Optional<int32_t>& multiplex_decode_time_ms,
|
||||
const rtc::Optional<uint8_t>& multiplex_qp);
|
||||
const absl::optional<int32_t>& multiplex_decode_time_ms,
|
||||
const absl::optional<uint8_t>& multiplex_qp);
|
||||
|
||||
VideoDecoderFactory* const factory_;
|
||||
const SdpVideoFormat associated_format_;
|
||||
|
||||
@ -33,8 +33,8 @@ class MultiplexDecoderAdapter::AdapterDecodedImageCallback
|
||||
: adapter_(adapter), stream_idx_(stream_idx) {}
|
||||
|
||||
void Decoded(VideoFrame& decoded_image,
|
||||
rtc::Optional<int32_t> decode_time_ms,
|
||||
rtc::Optional<uint8_t> qp) override {
|
||||
absl::optional<int32_t> decode_time_ms,
|
||||
absl::optional<uint8_t> qp) override {
|
||||
if (!adapter_)
|
||||
return;
|
||||
adapter_->Decoded(stream_idx_, &decoded_image, decode_time_ms, qp);
|
||||
@ -64,16 +64,16 @@ struct MultiplexDecoderAdapter::DecodedImageData {
|
||||
}
|
||||
DecodedImageData(AlphaCodecStream stream_idx,
|
||||
const VideoFrame& decoded_image,
|
||||
const rtc::Optional<int32_t>& decode_time_ms,
|
||||
const rtc::Optional<uint8_t>& qp)
|
||||
const absl::optional<int32_t>& decode_time_ms,
|
||||
const absl::optional<uint8_t>& qp)
|
||||
: stream_idx_(stream_idx),
|
||||
decoded_image_(decoded_image),
|
||||
decode_time_ms_(decode_time_ms),
|
||||
qp_(qp) {}
|
||||
const AlphaCodecStream stream_idx_;
|
||||
VideoFrame decoded_image_;
|
||||
const rtc::Optional<int32_t> decode_time_ms_;
|
||||
const rtc::Optional<uint8_t> qp_;
|
||||
const absl::optional<int32_t> decode_time_ms_;
|
||||
const absl::optional<uint8_t> qp_;
|
||||
|
||||
private:
|
||||
RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(DecodedImageData);
|
||||
@ -153,8 +153,8 @@ int32_t MultiplexDecoderAdapter::Release() {
|
||||
|
||||
void MultiplexDecoderAdapter::Decoded(AlphaCodecStream stream_idx,
|
||||
VideoFrame* decoded_image,
|
||||
rtc::Optional<int32_t> decode_time_ms,
|
||||
rtc::Optional<uint8_t> qp) {
|
||||
absl::optional<int32_t> decode_time_ms,
|
||||
absl::optional<uint8_t> qp) {
|
||||
const auto& other_decoded_data_it =
|
||||
decoded_data_.find(decoded_image->timestamp());
|
||||
if (other_decoded_data_it != decoded_data_.end()) {
|
||||
@ -184,11 +184,11 @@ void MultiplexDecoderAdapter::Decoded(AlphaCodecStream stream_idx,
|
||||
|
||||
void MultiplexDecoderAdapter::MergeAlphaImages(
|
||||
VideoFrame* decoded_image,
|
||||
const rtc::Optional<int32_t>& decode_time_ms,
|
||||
const rtc::Optional<uint8_t>& qp,
|
||||
const absl::optional<int32_t>& decode_time_ms,
|
||||
const absl::optional<uint8_t>& qp,
|
||||
VideoFrame* alpha_decoded_image,
|
||||
const rtc::Optional<int32_t>& alpha_decode_time_ms,
|
||||
const rtc::Optional<uint8_t>& alpha_qp) {
|
||||
const absl::optional<int32_t>& alpha_decode_time_ms,
|
||||
const absl::optional<uint8_t>& alpha_qp) {
|
||||
if (!alpha_decoded_image->timestamp()) {
|
||||
decoded_complete_callback_->Decoded(*decoded_image, decode_time_ms, qp);
|
||||
return;
|
||||
|
||||
@ -132,7 +132,7 @@ TEST_F(TestMultiplexAdapter, EncodeDecodeI420Frame) {
|
||||
WEBRTC_VIDEO_CODEC_OK,
|
||||
decoder_->Decode(encoded_frame, false, &codec_specific_info, -1));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
rtc::Optional<uint8_t> decoded_qp;
|
||||
absl::optional<uint8_t> decoded_qp;
|
||||
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
|
||||
ASSERT_TRUE(decoded_frame);
|
||||
EXPECT_GT(I420PSNR(input_frame, decoded_frame.get()), 36);
|
||||
@ -150,7 +150,7 @@ TEST_F(TestMultiplexAdapter, EncodeDecodeI420AFrame) {
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
decoder_->Decode(encoded_frame, false, nullptr, 0));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
rtc::Optional<uint8_t> decoded_qp;
|
||||
absl::optional<uint8_t> decoded_qp;
|
||||
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
|
||||
ASSERT_TRUE(decoded_frame);
|
||||
EXPECT_GT(I420PSNR(yuva_frame.get(), decoded_frame.get()), 36);
|
||||
|
||||
@ -50,8 +50,8 @@ VideoCodecUnitTest::FakeEncodeCompleteCallback::OnEncodedImage(
|
||||
|
||||
void VideoCodecUnitTest::FakeDecodeCompleteCallback::Decoded(
|
||||
VideoFrame& frame,
|
||||
rtc::Optional<int32_t> decode_time_ms,
|
||||
rtc::Optional<uint8_t> qp) {
|
||||
absl::optional<int32_t> decode_time_ms,
|
||||
absl::optional<uint8_t> qp) {
|
||||
rtc::CritScope lock(&test_->decoded_frame_section_);
|
||||
test_->decoded_frame_.emplace(frame);
|
||||
test_->decoded_qp_ = qp;
|
||||
@ -71,7 +71,8 @@ void VideoCodecUnitTest::SetUp() {
|
||||
|
||||
input_frame_generator_ = test::FrameGenerator::CreateSquareGenerator(
|
||||
codec_settings_.width, codec_settings_.height,
|
||||
rtc::Optional<test::FrameGenerator::OutputType>(), rtc::Optional<int>());
|
||||
absl::optional<test::FrameGenerator::OutputType>(),
|
||||
absl::optional<int>());
|
||||
|
||||
encoder_ = CreateEncoder();
|
||||
decoder_ = CreateDecoder();
|
||||
@ -141,7 +142,7 @@ bool VideoCodecUnitTest::WaitForEncodedFrames(
|
||||
}
|
||||
|
||||
bool VideoCodecUnitTest::WaitForDecodedFrame(std::unique_ptr<VideoFrame>* frame,
|
||||
rtc::Optional<uint8_t>* qp) {
|
||||
absl::optional<uint8_t>* qp) {
|
||||
bool ret = decoded_frame_event_.Wait(kDecodeTimeoutMs);
|
||||
EXPECT_TRUE(ret) << "Timed out while waiting for a decoded frame.";
|
||||
// This becomes unsafe if there are multiple threads waiting for frames.
|
||||
|
||||
@ -67,8 +67,8 @@ class VideoCodecUnitTest : public ::testing::Test {
|
||||
return -1;
|
||||
}
|
||||
void Decoded(VideoFrame& frame,
|
||||
rtc::Optional<int32_t> decode_time_ms,
|
||||
rtc::Optional<uint8_t> qp) override;
|
||||
absl::optional<int32_t> decode_time_ms,
|
||||
absl::optional<uint8_t> qp) override;
|
||||
|
||||
private:
|
||||
VideoCodecUnitTest* const test_;
|
||||
@ -97,7 +97,7 @@ class VideoCodecUnitTest : public ::testing::Test {
|
||||
|
||||
// Helper method for waiting a single decoded frame.
|
||||
bool WaitForDecodedFrame(std::unique_ptr<VideoFrame>* frame,
|
||||
rtc::Optional<uint8_t>* qp);
|
||||
absl::optional<uint8_t>* qp);
|
||||
|
||||
size_t GetNumEncodedFrames();
|
||||
|
||||
@ -120,9 +120,9 @@ class VideoCodecUnitTest : public ::testing::Test {
|
||||
|
||||
rtc::Event decoded_frame_event_;
|
||||
rtc::CriticalSection decoded_frame_section_;
|
||||
rtc::Optional<VideoFrame> decoded_frame_
|
||||
absl::optional<VideoFrame> decoded_frame_
|
||||
RTC_GUARDED_BY(decoded_frame_section_);
|
||||
rtc::Optional<uint8_t> decoded_qp_ RTC_GUARDED_BY(decoded_frame_section_);
|
||||
absl::optional<uint8_t> decoded_qp_ RTC_GUARDED_BY(decoded_frame_section_);
|
||||
|
||||
std::unique_ptr<test::FrameGenerator> input_frame_generator_;
|
||||
uint32_t last_input_frame_timestamp_;
|
||||
|
||||
@ -151,8 +151,8 @@ class VideoProcessor {
|
||||
}
|
||||
|
||||
void Decoded(webrtc::VideoFrame& image,
|
||||
rtc::Optional<int32_t> decode_time_ms,
|
||||
rtc::Optional<uint8_t> qp) override {
|
||||
absl::optional<int32_t> decode_time_ms,
|
||||
absl::optional<uint8_t> qp) override {
|
||||
Decoded(image);
|
||||
}
|
||||
|
||||
|
||||
@ -17,7 +17,7 @@
|
||||
|
||||
#include "modules/video_coding/codecs/vp8/temporal_layers.h"
|
||||
|
||||
#include "api/optional.h"
|
||||
#include "absl/types/optional.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
@ -52,7 +52,7 @@ class DefaultTemporalLayers : public TemporalLayers {
|
||||
uint8_t pattern_idx_;
|
||||
bool last_base_layer_sync_;
|
||||
// Updated cumulative bitrates, per temporal layer.
|
||||
rtc::Optional<std::vector<uint32_t>> new_bitrates_bps_;
|
||||
absl::optional<std::vector<uint32_t>> new_bitrates_bps_;
|
||||
};
|
||||
|
||||
class DefaultTemporalLayersChecker : public TemporalLayersChecker {
|
||||
|
||||
@ -308,7 +308,7 @@ int LibvpxVp8Decoder::ReturnFrame(const vpx_image_t* img,
|
||||
|
||||
VideoFrame decoded_image(buffer, timestamp, 0, kVideoRotation_0);
|
||||
decoded_image.set_ntp_time_ms(ntp_time_ms);
|
||||
decode_complete_callback_->Decoded(decoded_image, rtc::nullopt, qp);
|
||||
decode_complete_callback_->Decoded(decoded_image, absl::nullopt, qp);
|
||||
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
@ -72,9 +72,9 @@ class ScreenshareLayers : public TemporalLayers {
|
||||
uint32_t max_debt_bytes_;
|
||||
|
||||
// Configured max framerate.
|
||||
rtc::Optional<uint32_t> target_framerate_;
|
||||
absl::optional<uint32_t> target_framerate_;
|
||||
// Incoming framerate from capturer.
|
||||
rtc::Optional<uint32_t> capture_framerate_;
|
||||
absl::optional<uint32_t> capture_framerate_;
|
||||
// Tracks what framerate we actually encode, and drops frames on overshoot.
|
||||
RateStatistics encode_framerate_;
|
||||
bool bitrate_updated_;
|
||||
|
||||
@ -138,11 +138,11 @@ class ScreenshareLayerTest : public ::testing::Test {
|
||||
// FrameEncoded() call will be omitted and needs to be done by the caller.
|
||||
// Returns the flags for the last frame.
|
||||
int SkipUntilTl(int layer) {
|
||||
return SkipUntilTlAndSync(layer, rtc::nullopt);
|
||||
return SkipUntilTlAndSync(layer, absl::nullopt);
|
||||
}
|
||||
|
||||
// Same as SkipUntilTl, but also waits until the sync bit condition is met.
|
||||
int SkipUntilTlAndSync(int layer, rtc::Optional<bool> sync) {
|
||||
int SkipUntilTlAndSync(int layer, absl::optional<bool> sync) {
|
||||
int flags = 0;
|
||||
const int kMaxFramesToSkip =
|
||||
1 + (sync.value_or(false) ? kMaxSyncPeriodSeconds : 1) * kFrameRate;
|
||||
|
||||
@ -150,8 +150,8 @@ class SimulcastTestFixtureImpl::Vp8TestDecodedImageCallback
|
||||
return -1;
|
||||
}
|
||||
void Decoded(VideoFrame& decoded_image,
|
||||
rtc::Optional<int32_t> decode_time_ms,
|
||||
rtc::Optional<uint8_t> qp) override {
|
||||
absl::optional<int32_t> decode_time_ms,
|
||||
absl::optional<uint8_t> qp) override {
|
||||
Decoded(decoded_image);
|
||||
}
|
||||
int DecodedFrames() { return decoded_frames_; }
|
||||
|
||||
@ -162,7 +162,7 @@ TEST_F(TestVp8Impl, DecodedQpEqualsEncodedQp) {
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
decoder_->Decode(encoded_frame, false, nullptr, -1));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
rtc::Optional<uint8_t> decoded_qp;
|
||||
absl::optional<uint8_t> decoded_qp;
|
||||
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
|
||||
ASSERT_TRUE(decoded_frame);
|
||||
ASSERT_TRUE(decoded_qp);
|
||||
@ -249,7 +249,7 @@ TEST_F(TestVp8Impl, MAYBE_AlignedStrideEncodeDecode) {
|
||||
decoder_->Decode(encoded_frame, false, nullptr, -1));
|
||||
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
rtc::Optional<uint8_t> decoded_qp;
|
||||
absl::optional<uint8_t> decoded_qp;
|
||||
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
|
||||
ASSERT_TRUE(decoded_frame);
|
||||
// Compute PSNR on all planes (faster than SSIM).
|
||||
@ -283,7 +283,7 @@ TEST_F(TestVp8Impl, MAYBE_DecodeWithACompleteKeyFrame) {
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
decoder_->Decode(encoded_frame, false, nullptr, -1));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
rtc::Optional<uint8_t> decoded_qp;
|
||||
absl::optional<uint8_t> decoded_qp;
|
||||
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
|
||||
ASSERT_TRUE(decoded_frame);
|
||||
EXPECT_GT(I420PSNR(input_frame, decoded_frame.get()), 36);
|
||||
|
||||
@ -80,7 +80,7 @@ TEST_F(TestVp9Impl, EncodeDecode) {
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
decoder_->Decode(encoded_frame, false, nullptr, 0));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
rtc::Optional<uint8_t> decoded_qp;
|
||||
absl::optional<uint8_t> decoded_qp;
|
||||
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
|
||||
ASSERT_TRUE(decoded_frame);
|
||||
EXPECT_GT(I420PSNR(input_frame, decoded_frame.get()), 36);
|
||||
@ -118,7 +118,7 @@ TEST_F(TestVp9Impl, DecodedQpEqualsEncodedQp) {
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
decoder_->Decode(encoded_frame, false, nullptr, 0));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
rtc::Optional<uint8_t> decoded_qp;
|
||||
absl::optional<uint8_t> decoded_qp;
|
||||
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
|
||||
ASSERT_TRUE(decoded_frame);
|
||||
ASSERT_TRUE(decoded_qp);
|
||||
|
||||
@ -1101,7 +1101,7 @@ int VP9DecoderImpl::ReturnFrame(const vpx_image_t* img,
|
||||
0 /* render_time_ms */, webrtc::kVideoRotation_0);
|
||||
decoded_image.set_ntp_time_ms(ntp_time_ms);
|
||||
|
||||
decode_complete_callback_->Decoded(decoded_image, rtc::nullopt, qp);
|
||||
decode_complete_callback_->Decoded(decoded_image, absl::nullopt, qp);
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
|
||||
@ -113,7 +113,7 @@ class VP9EncoderImpl : public VP9Encoder {
|
||||
InterLayerPredMode inter_layer_pred_;
|
||||
|
||||
// Framerate controller.
|
||||
rtc::Optional<float> target_framerate_fps_;
|
||||
absl::optional<float> target_framerate_fps_;
|
||||
RateStatistics output_framerate_;
|
||||
uint32_t last_encoded_frame_rtp_timestamp_;
|
||||
|
||||
|
||||
@ -599,7 +599,7 @@ void FrameBuffer::UpdateJitterDelay() {
|
||||
|
||||
void FrameBuffer::UpdateTimingFrameInfo() {
|
||||
TRACE_EVENT0("webrtc", "FrameBuffer::UpdateTimingFrameInfo");
|
||||
rtc::Optional<TimingFrameInfo> info = timing_->GetTimingFrameInfo();
|
||||
absl::optional<TimingFrameInfo> info = timing_->GetTimingFrameInfo();
|
||||
if (info && stats_callback_)
|
||||
stats_callback_->OnTimingFrameInfoUpdated(*info);
|
||||
}
|
||||
|
||||
@ -156,11 +156,11 @@ bool RtpFrameObject::delayed_by_retransmission() const {
|
||||
return times_nacked() > 0;
|
||||
}
|
||||
|
||||
rtc::Optional<RTPVideoTypeHeader> RtpFrameObject::GetCodecHeader() const {
|
||||
absl::optional<RTPVideoTypeHeader> RtpFrameObject::GetCodecHeader() const {
|
||||
rtc::CritScope lock(&packet_buffer_->crit_);
|
||||
VCMPacket* packet = packet_buffer_->GetPacket(first_seq_num_);
|
||||
if (!packet)
|
||||
return rtc::nullopt;
|
||||
return absl::nullopt;
|
||||
return packet->video_header.codecHeader;
|
||||
}
|
||||
|
||||
|
||||
@ -11,7 +11,7 @@
|
||||
#ifndef MODULES_VIDEO_CODING_FRAME_OBJECT_H_
|
||||
#define MODULES_VIDEO_CODING_FRAME_OBJECT_H_
|
||||
|
||||
#include "api/optional.h"
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/video/encoded_frame.h"
|
||||
#include "common_types.h" // NOLINT(build/include)
|
||||
#include "modules/include/module_common_types.h"
|
||||
@ -41,7 +41,7 @@ class RtpFrameObject : public EncodedFrame {
|
||||
int64_t ReceivedTime() const override;
|
||||
int64_t RenderTime() const override;
|
||||
bool delayed_by_retransmission() const override;
|
||||
rtc::Optional<RTPVideoTypeHeader> GetCodecHeader() const;
|
||||
absl::optional<RTPVideoTypeHeader> GetCodecHeader() const;
|
||||
|
||||
private:
|
||||
rtc::scoped_refptr<PacketBuffer> packet_buffer_;
|
||||
|
||||
@ -57,15 +57,15 @@ int32_t VCMDecodedFrameCallback::Decoded(VideoFrame& decodedImage) {
|
||||
int32_t VCMDecodedFrameCallback::Decoded(VideoFrame& decodedImage,
|
||||
int64_t decode_time_ms) {
|
||||
Decoded(decodedImage,
|
||||
decode_time_ms >= 0 ? rtc::Optional<int32_t>(decode_time_ms)
|
||||
: rtc::nullopt,
|
||||
rtc::nullopt);
|
||||
decode_time_ms >= 0 ? absl::optional<int32_t>(decode_time_ms)
|
||||
: absl::nullopt,
|
||||
absl::nullopt);
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
void VCMDecodedFrameCallback::Decoded(VideoFrame& decodedImage,
|
||||
rtc::Optional<int32_t> decode_time_ms,
|
||||
rtc::Optional<uint8_t> qp) {
|
||||
absl::optional<int32_t> decode_time_ms,
|
||||
absl::optional<uint8_t> qp) {
|
||||
RTC_DCHECK(_receiveCallback) << "Callback must not be null at this point";
|
||||
TRACE_EVENT_INSTANT1("webrtc", "VCMDecodedFrameCallback::Decoded",
|
||||
"timestamp", decodedImage.timestamp());
|
||||
|
||||
@ -46,8 +46,8 @@ class VCMDecodedFrameCallback : public DecodedImageCallback {
|
||||
int32_t Decoded(VideoFrame& decodedImage) override;
|
||||
int32_t Decoded(VideoFrame& decodedImage, int64_t decode_time_ms) override;
|
||||
void Decoded(VideoFrame& decodedImage,
|
||||
rtc::Optional<int32_t> decode_time_ms,
|
||||
rtc::Optional<uint8_t> qp) override;
|
||||
absl::optional<int32_t> decode_time_ms,
|
||||
absl::optional<uint8_t> qp) override;
|
||||
int32_t ReceivedDecodedReferenceFrame(const uint64_t pictureId) override;
|
||||
int32_t ReceivedDecodedFrame(const uint64_t pictureId) override;
|
||||
|
||||
|
||||
@ -12,7 +12,7 @@
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "api/optional.h"
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/video/i420_buffer.h"
|
||||
#include "modules/include/module_common_types_public.h"
|
||||
#include "modules/video_coding/encoded_frame.h"
|
||||
@ -178,7 +178,7 @@ VCMEncodedFrameCallback::VCMEncodedFrameCallback(
|
||||
incorrect_capture_time_logged_messages_(0),
|
||||
reordered_frames_logged_messages_(0),
|
||||
stalled_encoder_logged_messages_(0) {
|
||||
rtc::Optional<AlrExperimentSettings> experiment_settings =
|
||||
absl::optional<AlrExperimentSettings> experiment_settings =
|
||||
AlrExperimentSettings::CreateFromFieldTrial(
|
||||
AlrExperimentSettings::kStrictPacingAndProbingExperimentName);
|
||||
if (experiment_settings) {
|
||||
@ -249,10 +249,10 @@ void VCMEncodedFrameCallback::OnEncodeStarted(uint32_t rtp_timestamp,
|
||||
rtp_timestamp, capture_time_ms, rtc::TimeMillis());
|
||||
}
|
||||
|
||||
rtc::Optional<int64_t> VCMEncodedFrameCallback::ExtractEncodeStartTime(
|
||||
absl::optional<int64_t> VCMEncodedFrameCallback::ExtractEncodeStartTime(
|
||||
size_t simulcast_svc_idx,
|
||||
EncodedImage* encoded_image) {
|
||||
rtc::Optional<int64_t> result;
|
||||
absl::optional<int64_t> result;
|
||||
size_t num_simulcast_svc_streams = timing_frames_info_.size();
|
||||
if (simulcast_svc_idx < num_simulcast_svc_streams) {
|
||||
auto encode_start_list =
|
||||
@ -308,8 +308,8 @@ rtc::Optional<int64_t> VCMEncodedFrameCallback::ExtractEncodeStartTime(
|
||||
|
||||
void VCMEncodedFrameCallback::FillTimingInfo(size_t simulcast_svc_idx,
|
||||
EncodedImage* encoded_image) {
|
||||
rtc::Optional<size_t> outlier_frame_size;
|
||||
rtc::Optional<int64_t> encode_start_ms;
|
||||
absl::optional<size_t> outlier_frame_size;
|
||||
absl::optional<int64_t> encode_start_ms;
|
||||
uint8_t timing_flags = VideoSendTiming::kNotTriggered;
|
||||
{
|
||||
rtc::CritScope crit(&timing_params_lock_);
|
||||
|
||||
@ -79,8 +79,8 @@ class VCMEncodedFrameCallback : public EncodedImageCallback {
|
||||
private:
|
||||
// For non-internal-source encoders, returns encode started time and fixes
|
||||
// capture timestamp for the frame, if corrupted by the encoder.
|
||||
rtc::Optional<int64_t> ExtractEncodeStartTime(size_t simulcast_svc_idx,
|
||||
EncodedImage* encoded_image)
|
||||
absl::optional<int64_t> ExtractEncodeStartTime(size_t simulcast_svc_idx,
|
||||
EncodedImage* encoded_image)
|
||||
RTC_EXCLUSIVE_LOCKS_REQUIRED(timing_params_lock_);
|
||||
|
||||
void FillTimingInfo(size_t simulcast_svc_idx, EncodedImage* encoded_image);
|
||||
|
||||
@ -219,9 +219,9 @@ void H264SpsPpsTracker::InsertSpsPpsNalus(const std::vector<uint8_t>& sps,
|
||||
RTC_LOG(LS_WARNING) << "SPS Nalu header missing";
|
||||
return;
|
||||
}
|
||||
rtc::Optional<SpsParser::SpsState> parsed_sps = SpsParser::ParseSps(
|
||||
absl::optional<SpsParser::SpsState> parsed_sps = SpsParser::ParseSps(
|
||||
sps.data() + kNaluHeaderOffset, sps.size() - kNaluHeaderOffset);
|
||||
rtc::Optional<PpsParser::PpsState> parsed_pps = PpsParser::ParsePps(
|
||||
absl::optional<PpsParser::PpsState> parsed_pps = PpsParser::ParsePps(
|
||||
pps.data() + kNaluHeaderOffset, pps.size() - kNaluHeaderOffset);
|
||||
|
||||
if (!parsed_sps) {
|
||||
|
||||
@ -34,7 +34,7 @@ class MockVCMReceiveCallback : public VCMReceiveCallback {
|
||||
virtual ~MockVCMReceiveCallback() {}
|
||||
|
||||
MOCK_METHOD3(FrameToRender,
|
||||
int32_t(VideoFrame&, rtc::Optional<uint8_t>, VideoContentType));
|
||||
int32_t(VideoFrame&, absl::optional<uint8_t>, VideoContentType));
|
||||
MOCK_METHOD1(ReceivedDecodedReferenceFrame, int32_t(const uint64_t));
|
||||
MOCK_METHOD1(OnIncomingPayloadType, void(int));
|
||||
MOCK_METHOD1(OnDecoderImplementationName, void(const char*));
|
||||
|
||||
@ -58,8 +58,8 @@ class MockDecodedImageCallback : public DecodedImageCallback {
|
||||
int64_t decode_time_ms));
|
||||
MOCK_METHOD3(Decoded,
|
||||
void(VideoFrame& decodedImage, // NOLINT
|
||||
rtc::Optional<int32_t> decode_time_ms,
|
||||
rtc::Optional<uint8_t> qp));
|
||||
absl::optional<int32_t> decode_time_ms,
|
||||
absl::optional<uint8_t> qp));
|
||||
MOCK_METHOD1(ReceivedDecodedReferenceFrame,
|
||||
int32_t(const uint64_t pictureId));
|
||||
MOCK_METHOD1(ReceivedDecodedFrame, int32_t(const uint64_t pictureId));
|
||||
|
||||
@ -70,7 +70,7 @@ struct VCMFrameCount {
|
||||
class VCMReceiveCallback {
|
||||
public:
|
||||
virtual int32_t FrameToRender(VideoFrame& videoFrame, // NOLINT
|
||||
rtc::Optional<uint8_t> qp,
|
||||
absl::optional<uint8_t> qp,
|
||||
VideoContentType content_type) = 0;
|
||||
|
||||
virtual int32_t ReceivedDecodedReferenceFrame(const uint64_t pictureId);
|
||||
|
||||
@ -199,12 +199,12 @@ void PacketBuffer::PaddingReceived(uint16_t seq_num) {
|
||||
received_frame_callback_->OnReceivedFrame(std::move(frame));
|
||||
}
|
||||
|
||||
rtc::Optional<int64_t> PacketBuffer::LastReceivedPacketMs() const {
|
||||
absl::optional<int64_t> PacketBuffer::LastReceivedPacketMs() const {
|
||||
rtc::CritScope lock(&crit_);
|
||||
return last_received_packet_ms_;
|
||||
}
|
||||
|
||||
rtc::Optional<int64_t> PacketBuffer::LastReceivedKeyframePacketMs() const {
|
||||
absl::optional<int64_t> PacketBuffer::LastReceivedKeyframePacketMs() const {
|
||||
rtc::CritScope lock(&crit_);
|
||||
return last_received_keyframe_packet_ms_;
|
||||
}
|
||||
|
||||
@ -58,8 +58,8 @@ class PacketBuffer {
|
||||
void PaddingReceived(uint16_t seq_num);
|
||||
|
||||
// Timestamp (not RTP timestamp) of the last received packet/keyframe packet.
|
||||
rtc::Optional<int64_t> LastReceivedPacketMs() const;
|
||||
rtc::Optional<int64_t> LastReceivedKeyframePacketMs() const;
|
||||
absl::optional<int64_t> LastReceivedPacketMs() const;
|
||||
absl::optional<int64_t> LastReceivedKeyframePacketMs() const;
|
||||
|
||||
// Returns number of different frames seen in the packet buffer
|
||||
int GetUniqueFramesSeen() const;
|
||||
@ -159,13 +159,13 @@ class PacketBuffer {
|
||||
OnReceivedFrameCallback* const received_frame_callback_;
|
||||
|
||||
// Timestamp (not RTP timestamp) of the last received packet/keyframe packet.
|
||||
rtc::Optional<int64_t> last_received_packet_ms_ RTC_GUARDED_BY(crit_);
|
||||
rtc::Optional<int64_t> last_received_keyframe_packet_ms_
|
||||
absl::optional<int64_t> last_received_packet_ms_ RTC_GUARDED_BY(crit_);
|
||||
absl::optional<int64_t> last_received_keyframe_packet_ms_
|
||||
RTC_GUARDED_BY(crit_);
|
||||
|
||||
int unique_frames_seen_ RTC_GUARDED_BY(crit_);
|
||||
|
||||
rtc::Optional<uint16_t> newest_inserted_seq_num_ RTC_GUARDED_BY(crit_);
|
||||
absl::optional<uint16_t> newest_inserted_seq_num_ RTC_GUARDED_BY(crit_);
|
||||
std::set<uint16_t, DescendingSeqNumComp<uint16_t>> missing_packets_
|
||||
RTC_GUARDED_BY(crit_);
|
||||
|
||||
|
||||
@ -240,7 +240,7 @@ RtpFrameReferenceFinder::ManageFrameGeneric(RtpFrameObject* frame,
|
||||
|
||||
RtpFrameReferenceFinder::FrameDecision RtpFrameReferenceFinder::ManageFrameVp8(
|
||||
RtpFrameObject* frame) {
|
||||
rtc::Optional<RTPVideoTypeHeader> rtp_codec_header = frame->GetCodecHeader();
|
||||
absl::optional<RTPVideoTypeHeader> rtp_codec_header = frame->GetCodecHeader();
|
||||
if (!rtp_codec_header) {
|
||||
RTC_LOG(LS_WARNING)
|
||||
<< "Failed to get codec header from frame, dropping frame.";
|
||||
@ -393,7 +393,7 @@ void RtpFrameReferenceFinder::UpdateLayerInfoVp8(
|
||||
|
||||
RtpFrameReferenceFinder::FrameDecision RtpFrameReferenceFinder::ManageFrameVp9(
|
||||
RtpFrameObject* frame) {
|
||||
rtc::Optional<RTPVideoTypeHeader> rtp_codec_header = frame->GetCodecHeader();
|
||||
absl::optional<RTPVideoTypeHeader> rtp_codec_header = frame->GetCodecHeader();
|
||||
if (!rtp_codec_header) {
|
||||
RTC_LOG(LS_WARNING)
|
||||
<< "Failed to get codec header from frame, dropping frame.";
|
||||
|
||||
@ -239,7 +239,7 @@ void VCMTiming::SetTimingFrameInfo(const TimingFrameInfo& info) {
|
||||
timing_frame_info_.emplace(info);
|
||||
}
|
||||
|
||||
rtc::Optional<TimingFrameInfo> VCMTiming::GetTimingFrameInfo() {
|
||||
absl::optional<TimingFrameInfo> VCMTiming::GetTimingFrameInfo() {
|
||||
rtc::CritScope cs(&crit_sect_);
|
||||
return timing_frame_info_;
|
||||
}
|
||||
|
||||
@ -95,7 +95,7 @@ class VCMTiming {
|
||||
int* render_delay_ms) const;
|
||||
|
||||
void SetTimingFrameInfo(const TimingFrameInfo& info);
|
||||
rtc::Optional<TimingFrameInfo> GetTimingFrameInfo();
|
||||
absl::optional<TimingFrameInfo> GetTimingFrameInfo();
|
||||
|
||||
enum { kDefaultRenderDelayMs = 10 };
|
||||
enum { kDelayMaxChangeMsPerS = 100 };
|
||||
@ -124,7 +124,7 @@ class VCMTiming {
|
||||
int current_delay_ms_ RTC_GUARDED_BY(crit_sect_);
|
||||
int last_decode_ms_ RTC_GUARDED_BY(crit_sect_);
|
||||
uint32_t prev_frame_timestamp_ RTC_GUARDED_BY(crit_sect_);
|
||||
rtc::Optional<TimingFrameInfo> timing_frame_info_ RTC_GUARDED_BY(crit_sect_);
|
||||
absl::optional<TimingFrameInfo> timing_frame_info_ RTC_GUARDED_BY(crit_sect_);
|
||||
size_t num_decoded_frames_ RTC_GUARDED_BY(crit_sect_);
|
||||
};
|
||||
} // namespace webrtc
|
||||
|
||||
@ -23,13 +23,13 @@ void MovingAverage::AddSample(int sample) {
|
||||
sum_history_[count_ % sum_history_.size()] = sum_;
|
||||
}
|
||||
|
||||
rtc::Optional<int> MovingAverage::GetAverage() const {
|
||||
absl::optional<int> MovingAverage::GetAverage() const {
|
||||
return GetAverage(size());
|
||||
}
|
||||
|
||||
rtc::Optional<int> MovingAverage::GetAverage(size_t num_samples) const {
|
||||
absl::optional<int> MovingAverage::GetAverage(size_t num_samples) const {
|
||||
if (num_samples > size() || num_samples == 0)
|
||||
return rtc::nullopt;
|
||||
return absl::nullopt;
|
||||
int sum = sum_ - sum_history_[(count_ - num_samples) % sum_history_.size()];
|
||||
return sum / static_cast<int>(num_samples);
|
||||
}
|
||||
|
||||
@ -13,7 +13,7 @@
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "api/optional.h"
|
||||
#include "absl/types/optional.h"
|
||||
|
||||
namespace webrtc {
|
||||
class MovingAverage {
|
||||
@ -21,8 +21,8 @@ class MovingAverage {
|
||||
explicit MovingAverage(size_t s);
|
||||
~MovingAverage();
|
||||
void AddSample(int sample);
|
||||
rtc::Optional<int> GetAverage() const;
|
||||
rtc::Optional<int> GetAverage(size_t num_samples) const;
|
||||
absl::optional<int> GetAverage() const;
|
||||
absl::optional<int> GetAverage(size_t num_samples) const;
|
||||
void Reset();
|
||||
size_t size() const;
|
||||
|
||||
|
||||
@ -44,10 +44,10 @@ class QualityScaler::QpSmoother {
|
||||
explicit QpSmoother(float alpha)
|
||||
: alpha_(alpha), last_sample_ms_(rtc::TimeMillis()), smoother_(alpha) {}
|
||||
|
||||
rtc::Optional<int> GetAvg() const {
|
||||
absl::optional<int> GetAvg() const {
|
||||
float value = smoother_.filtered();
|
||||
if (value == rtc::ExpFilter::kValueUndefined) {
|
||||
return rtc::nullopt;
|
||||
return absl::nullopt;
|
||||
}
|
||||
return static_cast<int>(value);
|
||||
}
|
||||
@ -182,7 +182,7 @@ void QualityScaler::CheckQp() {
|
||||
observed_enough_frames_ = true;
|
||||
|
||||
// Check if we should scale down due to high frame drop.
|
||||
const rtc::Optional<int> drop_rate =
|
||||
const absl::optional<int> drop_rate =
|
||||
config_.use_all_drop_reasons ? framedrop_percent_all_.GetAverage()
|
||||
: framedrop_percent_media_opt_.GetAverage();
|
||||
if (drop_rate && *drop_rate >= kFramedropPercentThreshold) {
|
||||
@ -192,10 +192,10 @@ void QualityScaler::CheckQp() {
|
||||
}
|
||||
|
||||
// Check if we should scale up or down based on QP.
|
||||
const rtc::Optional<int> avg_qp_high = qp_smoother_high_
|
||||
? qp_smoother_high_->GetAvg()
|
||||
: average_qp_.GetAverage();
|
||||
const rtc::Optional<int> avg_qp_low =
|
||||
const absl::optional<int> avg_qp_high = qp_smoother_high_
|
||||
? qp_smoother_high_->GetAvg()
|
||||
: average_qp_.GetAverage();
|
||||
const absl::optional<int> avg_qp_low =
|
||||
qp_smoother_low_ ? qp_smoother_low_->GetAvg() : average_qp_.GetAverage();
|
||||
if (avg_qp_high && avg_qp_low) {
|
||||
RTC_LOG(LS_INFO) << "Checking average QP " << *avg_qp_high << " ("
|
||||
|
||||
@ -14,7 +14,7 @@
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
|
||||
#include "api/optional.h"
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/video_codecs/video_encoder.h"
|
||||
#include "common_types.h" // NOLINT(build/include)
|
||||
#include "modules/video_coding/utility/moving_average.h"
|
||||
|
||||
@ -711,8 +711,8 @@ TEST_F(TestPacketBuffer, ContinuousSeqNumDoubleMarkerBit) {
|
||||
}
|
||||
|
||||
TEST_F(TestPacketBuffer, PacketTimestamps) {
|
||||
rtc::Optional<int64_t> packet_ms;
|
||||
rtc::Optional<int64_t> packet_keyframe_ms;
|
||||
absl::optional<int64_t> packet_ms;
|
||||
absl::optional<int64_t> packet_keyframe_ms;
|
||||
|
||||
packet_ms = packet_buffer_->LastReceivedPacketMs();
|
||||
packet_keyframe_ms = packet_buffer_->LastReceivedKeyframePacketMs();
|
||||
|
||||
Reference in New Issue
Block a user