Revert "Reland "Copy video frames metadata between encoded and plain frames in one place""
This reverts commit 4fb12b0caec9faa57cfbceb0f86b0e10c32a0cc2. Reason for revert: Breaks some asan chromium bots Original change's description: > Reland "Copy video frames metadata between encoded and plain frames in one place" > > Reland with fixes. > > Currently some video frames metadata like rotation or ntp timestamps are > copied in every encoder and decoder separately. This CL makes copying to > happen at a single place for send or receive side. This will make it > easier to add new metadata in the future. > > Also, added some missing tests. > > Original Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/133346 > > Bug: webrtc:10460 > Change-Id: Ia71198685de7fbd990704b575231cdce94dc0645 > Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/134961 > Reviewed-by: Johannes Kron <kron@webrtc.org> > Reviewed-by: Niels Moller <nisse@webrtc.org> > Reviewed-by: Ilya Nikolaevskiy <ilnik@webrtc.org> > Commit-Queue: Ilya Nikolaevskiy <ilnik@webrtc.org> > Cr-Commit-Position: refs/heads/master@{#27828} TBR=ilnik@webrtc.org,nisse@webrtc.org,sprang@webrtc.org,kron@webrtc.org,artit@webrtc.org # Not skipping CQ checks because original CL landed > 1 day ago. Bug: webrtc:10460 Change-Id: I9c87a43a716622b389974cb8377f973573fc29a5 Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/135747 Reviewed-by: Ilya Nikolaevskiy <ilnik@webrtc.org> Commit-Queue: Ilya Nikolaevskiy <ilnik@webrtc.org> Cr-Commit-Position: refs/heads/master@{#27895}
This commit is contained in:

committed by
Commit Bot

parent
e42246aea3
commit
de20b9683c
@ -28,13 +28,6 @@ void FakeVideoRenderer::OnFrame(const webrtc::VideoFrame& frame) {
|
|||||||
height_ = frame.height();
|
height_ = frame.height();
|
||||||
rotation_ = frame.rotation();
|
rotation_ = frame.rotation();
|
||||||
timestamp_us_ = frame.timestamp_us();
|
timestamp_us_ = frame.timestamp_us();
|
||||||
ntp_timestamp_ms_ = frame.ntp_time_ms();
|
|
||||||
color_space_ = frame.color_space();
|
|
||||||
frame_rendered_event_.Set();
|
|
||||||
}
|
|
||||||
|
|
||||||
bool FakeVideoRenderer::WaitForRenderedFrame(int64_t timeout_ms) {
|
|
||||||
return frame_rendered_event_.Wait(timeout_ms);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace cricket
|
} // namespace cricket
|
||||||
|
@ -19,7 +19,6 @@
|
|||||||
#include "api/video/video_rotation.h"
|
#include "api/video/video_rotation.h"
|
||||||
#include "api/video/video_sink_interface.h"
|
#include "api/video/video_sink_interface.h"
|
||||||
#include "rtc_base/critical_section.h"
|
#include "rtc_base/critical_section.h"
|
||||||
#include "rtc_base/event.h"
|
|
||||||
|
|
||||||
namespace cricket {
|
namespace cricket {
|
||||||
|
|
||||||
@ -31,7 +30,6 @@ class FakeVideoRenderer : public rtc::VideoSinkInterface<webrtc::VideoFrame> {
|
|||||||
void OnFrame(const webrtc::VideoFrame& frame) override;
|
void OnFrame(const webrtc::VideoFrame& frame) override;
|
||||||
|
|
||||||
int errors() const { return errors_; }
|
int errors() const { return errors_; }
|
||||||
|
|
||||||
int width() const {
|
int width() const {
|
||||||
rtc::CritScope cs(&crit_);
|
rtc::CritScope cs(&crit_);
|
||||||
return width_;
|
return width_;
|
||||||
@ -40,7 +38,6 @@ class FakeVideoRenderer : public rtc::VideoSinkInterface<webrtc::VideoFrame> {
|
|||||||
rtc::CritScope cs(&crit_);
|
rtc::CritScope cs(&crit_);
|
||||||
return height_;
|
return height_;
|
||||||
}
|
}
|
||||||
|
|
||||||
webrtc::VideoRotation rotation() const {
|
webrtc::VideoRotation rotation() const {
|
||||||
rtc::CritScope cs(&crit_);
|
rtc::CritScope cs(&crit_);
|
||||||
return rotation_;
|
return rotation_;
|
||||||
@ -50,29 +47,15 @@ class FakeVideoRenderer : public rtc::VideoSinkInterface<webrtc::VideoFrame> {
|
|||||||
rtc::CritScope cs(&crit_);
|
rtc::CritScope cs(&crit_);
|
||||||
return timestamp_us_;
|
return timestamp_us_;
|
||||||
}
|
}
|
||||||
|
|
||||||
int num_rendered_frames() const {
|
int num_rendered_frames() const {
|
||||||
rtc::CritScope cs(&crit_);
|
rtc::CritScope cs(&crit_);
|
||||||
return num_rendered_frames_;
|
return num_rendered_frames_;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool black_frame() const {
|
bool black_frame() const {
|
||||||
rtc::CritScope cs(&crit_);
|
rtc::CritScope cs(&crit_);
|
||||||
return black_frame_;
|
return black_frame_;
|
||||||
}
|
}
|
||||||
|
|
||||||
int64_t ntp_time_ms() const {
|
|
||||||
rtc::CritScope cs(&crit_);
|
|
||||||
return ntp_timestamp_ms_;
|
|
||||||
}
|
|
||||||
|
|
||||||
absl::optional<webrtc::ColorSpace> color_space() const {
|
|
||||||
rtc::CritScope cs(&crit_);
|
|
||||||
return color_space_;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool WaitForRenderedFrame(int64_t timeout_ms);
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
static bool CheckFrameColorYuv(uint8_t y_min,
|
static bool CheckFrameColorYuv(uint8_t y_min,
|
||||||
uint8_t y_max,
|
uint8_t y_max,
|
||||||
@ -133,11 +116,8 @@ class FakeVideoRenderer : public rtc::VideoSinkInterface<webrtc::VideoFrame> {
|
|||||||
webrtc::VideoRotation rotation_ = webrtc::kVideoRotation_0;
|
webrtc::VideoRotation rotation_ = webrtc::kVideoRotation_0;
|
||||||
int64_t timestamp_us_ = 0;
|
int64_t timestamp_us_ = 0;
|
||||||
int num_rendered_frames_ = 0;
|
int num_rendered_frames_ = 0;
|
||||||
int64_t ntp_timestamp_ms_ = 0;
|
|
||||||
bool black_frame_ = false;
|
bool black_frame_ = false;
|
||||||
rtc::CriticalSection crit_;
|
rtc::CriticalSection crit_;
|
||||||
rtc::Event frame_rendered_event_;
|
|
||||||
absl::optional<webrtc::ColorSpace> color_space_;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace cricket
|
} // namespace cricket
|
||||||
|
@ -19,6 +19,7 @@
|
|||||||
#include "api/video_codecs/video_decoder.h"
|
#include "api/video_codecs/video_decoder.h"
|
||||||
#include "api/video_codecs/video_encoder.h"
|
#include "api/video_codecs/video_encoder.h"
|
||||||
#include "common_video/libyuv/include/webrtc_libyuv.h"
|
#include "common_video/libyuv/include/webrtc_libyuv.h"
|
||||||
|
#include "common_video/test/utilities.h"
|
||||||
#include "media/base/codec.h"
|
#include "media/base/codec.h"
|
||||||
#include "media/base/media_constants.h"
|
#include "media/base/media_constants.h"
|
||||||
#include "modules/video_coding/codecs/h264/include/h264.h"
|
#include "modules/video_coding/codecs/h264/include/h264.h"
|
||||||
@ -48,9 +49,17 @@ class TestH264Impl : public VideoCodecUnitTest {
|
|||||||
#ifdef WEBRTC_USE_H264
|
#ifdef WEBRTC_USE_H264
|
||||||
#define MAYBE_EncodeDecode EncodeDecode
|
#define MAYBE_EncodeDecode EncodeDecode
|
||||||
#define MAYBE_DecodedQpEqualsEncodedQp DecodedQpEqualsEncodedQp
|
#define MAYBE_DecodedQpEqualsEncodedQp DecodedQpEqualsEncodedQp
|
||||||
|
#define MAYBE_EncodedColorSpaceEqualsInputColorSpace \
|
||||||
|
EncodedColorSpaceEqualsInputColorSpace
|
||||||
|
#define MAYBE_DecodedColorSpaceEqualsEncodedColorSpace \
|
||||||
|
DecodedColorSpaceEqualsEncodedColorSpace
|
||||||
#else
|
#else
|
||||||
#define MAYBE_EncodeDecode DISABLED_EncodeDecode
|
#define MAYBE_EncodeDecode DISABLED_EncodeDecode
|
||||||
#define MAYBE_DecodedQpEqualsEncodedQp DISABLED_DecodedQpEqualsEncodedQp
|
#define MAYBE_DecodedQpEqualsEncodedQp DISABLED_DecodedQpEqualsEncodedQp
|
||||||
|
#define MAYBE_EncodedColorSpaceEqualsInputColorSpace \
|
||||||
|
DISABLED_EncodedColorSpaceEqualsInputColorSpace
|
||||||
|
#define MAYBE_DecodedColorSpaceEqualsEncodedColorSpace \
|
||||||
|
DISABLED_DecodedColorSpaceEqualsEncodedColorSpace
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
TEST_F(TestH264Impl, MAYBE_EncodeDecode) {
|
TEST_F(TestH264Impl, MAYBE_EncodeDecode) {
|
||||||
@ -96,4 +105,45 @@ TEST_F(TestH264Impl, MAYBE_DecodedQpEqualsEncodedQp) {
|
|||||||
EXPECT_EQ(encoded_frame.qp_, *decoded_qp);
|
EXPECT_EQ(encoded_frame.qp_, *decoded_qp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST_F(TestH264Impl, MAYBE_EncodedColorSpaceEqualsInputColorSpace) {
|
||||||
|
VideoFrame* input_frame = NextInputFrame();
|
||||||
|
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(*input_frame, nullptr));
|
||||||
|
EncodedImage encoded_frame;
|
||||||
|
CodecSpecificInfo codec_specific_info;
|
||||||
|
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||||
|
EXPECT_FALSE(encoded_frame.ColorSpace());
|
||||||
|
|
||||||
|
// Video frame with explicit color space information.
|
||||||
|
ColorSpace color_space = CreateTestColorSpace(/*with_hdr_metadata=*/false);
|
||||||
|
VideoFrame input_frame_w_color_space =
|
||||||
|
VideoFrame::Builder()
|
||||||
|
.set_video_frame_buffer(input_frame->video_frame_buffer())
|
||||||
|
.set_color_space(color_space)
|
||||||
|
.build();
|
||||||
|
|
||||||
|
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||||
|
encoder_->Encode(input_frame_w_color_space, nullptr));
|
||||||
|
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||||
|
ASSERT_TRUE(encoded_frame.ColorSpace());
|
||||||
|
EXPECT_EQ(*encoded_frame.ColorSpace(), color_space);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(TestH264Impl, MAYBE_DecodedColorSpaceEqualsEncodedColorSpace) {
|
||||||
|
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||||
|
encoder_->Encode(*NextInputFrame(), nullptr));
|
||||||
|
EncodedImage encoded_frame;
|
||||||
|
CodecSpecificInfo codec_specific_info;
|
||||||
|
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||||
|
// Add color space to encoded frame.
|
||||||
|
ColorSpace color_space = CreateTestColorSpace(/*with_hdr_metadata=*/false);
|
||||||
|
encoded_frame.SetColorSpace(color_space);
|
||||||
|
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, 0));
|
||||||
|
std::unique_ptr<VideoFrame> decoded_frame;
|
||||||
|
absl::optional<uint8_t> decoded_qp;
|
||||||
|
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
|
||||||
|
ASSERT_TRUE(decoded_frame);
|
||||||
|
ASSERT_TRUE(decoded_frame->color_space());
|
||||||
|
EXPECT_EQ(color_space, *decoded_frame->color_space());
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace webrtc
|
} // namespace webrtc
|
||||||
|
@ -227,10 +227,51 @@ TEST_F(TestVp8Impl, OnEncodedImageReportsInfo) {
|
|||||||
EncodeAndWaitForFrame(*input_frame, &encoded_frame, &codec_specific_info);
|
EncodeAndWaitForFrame(*input_frame, &encoded_frame, &codec_specific_info);
|
||||||
|
|
||||||
EXPECT_EQ(kInitialTimestampRtp, encoded_frame.Timestamp());
|
EXPECT_EQ(kInitialTimestampRtp, encoded_frame.Timestamp());
|
||||||
|
EXPECT_EQ(kInitialTimestampMs, encoded_frame.capture_time_ms_);
|
||||||
EXPECT_EQ(kWidth, static_cast<int>(encoded_frame._encodedWidth));
|
EXPECT_EQ(kWidth, static_cast<int>(encoded_frame._encodedWidth));
|
||||||
EXPECT_EQ(kHeight, static_cast<int>(encoded_frame._encodedHeight));
|
EXPECT_EQ(kHeight, static_cast<int>(encoded_frame._encodedHeight));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// We only test the encoder here, since the decoded frame rotation is set based
|
||||||
|
// on the CVO RTP header extension in VCMDecodedFrameCallback::Decoded.
|
||||||
|
// TODO(brandtr): Consider passing through the rotation flag through the decoder
|
||||||
|
// in the same way as done in the encoder.
|
||||||
|
TEST_F(TestVp8Impl, EncodedRotationEqualsInputRotation) {
|
||||||
|
VideoFrame* input_frame = NextInputFrame();
|
||||||
|
input_frame->set_rotation(kVideoRotation_0);
|
||||||
|
|
||||||
|
EncodedImage encoded_frame;
|
||||||
|
CodecSpecificInfo codec_specific_info;
|
||||||
|
EncodeAndWaitForFrame(*input_frame, &encoded_frame, &codec_specific_info);
|
||||||
|
EXPECT_EQ(kVideoRotation_0, encoded_frame.rotation_);
|
||||||
|
|
||||||
|
input_frame->set_rotation(kVideoRotation_90);
|
||||||
|
EncodeAndWaitForFrame(*input_frame, &encoded_frame, &codec_specific_info);
|
||||||
|
EXPECT_EQ(kVideoRotation_90, encoded_frame.rotation_);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(TestVp8Impl, EncodedColorSpaceEqualsInputColorSpace) {
|
||||||
|
// Video frame without explicit color space information.
|
||||||
|
VideoFrame* input_frame = NextInputFrame();
|
||||||
|
EncodedImage encoded_frame;
|
||||||
|
CodecSpecificInfo codec_specific_info;
|
||||||
|
EncodeAndWaitForFrame(*input_frame, &encoded_frame, &codec_specific_info);
|
||||||
|
EXPECT_FALSE(encoded_frame.ColorSpace());
|
||||||
|
|
||||||
|
// Video frame with explicit color space information.
|
||||||
|
ColorSpace color_space = CreateTestColorSpace(/*with_hdr_metadata=*/false);
|
||||||
|
VideoFrame input_frame_w_color_space =
|
||||||
|
VideoFrame::Builder()
|
||||||
|
.set_video_frame_buffer(input_frame->video_frame_buffer())
|
||||||
|
.set_color_space(color_space)
|
||||||
|
.build();
|
||||||
|
|
||||||
|
EncodeAndWaitForFrame(input_frame_w_color_space, &encoded_frame,
|
||||||
|
&codec_specific_info);
|
||||||
|
ASSERT_TRUE(encoded_frame.ColorSpace());
|
||||||
|
EXPECT_EQ(*encoded_frame.ColorSpace(), color_space);
|
||||||
|
}
|
||||||
|
|
||||||
TEST_F(TestVp8Impl, DecodedQpEqualsEncodedQp) {
|
TEST_F(TestVp8Impl, DecodedQpEqualsEncodedQp) {
|
||||||
VideoFrame* input_frame = NextInputFrame();
|
VideoFrame* input_frame = NextInputFrame();
|
||||||
EncodedImage encoded_frame;
|
EncodedImage encoded_frame;
|
||||||
@ -249,6 +290,24 @@ TEST_F(TestVp8Impl, DecodedQpEqualsEncodedQp) {
|
|||||||
EXPECT_EQ(encoded_frame.qp_, *decoded_qp);
|
EXPECT_EQ(encoded_frame.qp_, *decoded_qp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST_F(TestVp8Impl, DecodedColorSpaceEqualsEncodedColorSpace) {
|
||||||
|
VideoFrame* input_frame = NextInputFrame();
|
||||||
|
EncodedImage encoded_frame;
|
||||||
|
CodecSpecificInfo codec_specific_info;
|
||||||
|
EncodeAndWaitForFrame(*input_frame, &encoded_frame, &codec_specific_info);
|
||||||
|
|
||||||
|
// Encoded frame with explicit color space information.
|
||||||
|
ColorSpace color_space = CreateTestColorSpace(/*with_hdr_metadata=*/false);
|
||||||
|
encoded_frame.SetColorSpace(color_space);
|
||||||
|
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, -1));
|
||||||
|
std::unique_ptr<VideoFrame> decoded_frame;
|
||||||
|
absl::optional<uint8_t> decoded_qp;
|
||||||
|
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
|
||||||
|
ASSERT_TRUE(decoded_frame);
|
||||||
|
ASSERT_TRUE(decoded_frame->color_space());
|
||||||
|
EXPECT_EQ(color_space, *decoded_frame->color_space());
|
||||||
|
}
|
||||||
|
|
||||||
TEST_F(TestVp8Impl, ChecksSimulcastSettings) {
|
TEST_F(TestVp8Impl, ChecksSimulcastSettings) {
|
||||||
codec_settings_.numberOfSimulcastStreams = 2;
|
codec_settings_.numberOfSimulcastStreams = 2;
|
||||||
// Resolutions are not in ascending order, temporal layers do not match.
|
// Resolutions are not in ascending order, temporal layers do not match.
|
||||||
@ -343,6 +402,7 @@ TEST_F(TestVp8Impl, MAYBE_AlignedStrideEncodeDecode) {
|
|||||||
// Compute PSNR on all planes (faster than SSIM).
|
// Compute PSNR on all planes (faster than SSIM).
|
||||||
EXPECT_GT(I420PSNR(input_frame, decoded_frame.get()), 36);
|
EXPECT_GT(I420PSNR(input_frame, decoded_frame.get()), 36);
|
||||||
EXPECT_EQ(kInitialTimestampRtp, decoded_frame->timestamp());
|
EXPECT_EQ(kInitialTimestampRtp, decoded_frame->timestamp());
|
||||||
|
EXPECT_EQ(kTestNtpTimeMs, decoded_frame->ntp_time_ms());
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(WEBRTC_ANDROID)
|
#if defined(WEBRTC_ANDROID)
|
||||||
|
@ -11,6 +11,7 @@
|
|||||||
#include "api/video/color_space.h"
|
#include "api/video/color_space.h"
|
||||||
#include "api/video/i420_buffer.h"
|
#include "api/video/i420_buffer.h"
|
||||||
#include "common_video/libyuv/include/webrtc_libyuv.h"
|
#include "common_video/libyuv/include/webrtc_libyuv.h"
|
||||||
|
#include "common_video/test/utilities.h"
|
||||||
#include "media/base/vp9_profile.h"
|
#include "media/base/vp9_profile.h"
|
||||||
#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
|
#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
|
||||||
#include "modules/video_coding/codecs/test/video_codec_unittest.h"
|
#include "modules/video_coding/codecs/test/video_codec_unittest.h"
|
||||||
@ -145,7 +146,50 @@ TEST_F(TestVp9Impl, EncodeDecode) {
|
|||||||
color_space.chroma_siting_vertical());
|
color_space.chroma_siting_vertical());
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(TestVp9Impl, DecodedColorSpaceFromBitstream) {
|
// We only test the encoder here, since the decoded frame rotation is set based
|
||||||
|
// on the CVO RTP header extension in VCMDecodedFrameCallback::Decoded.
|
||||||
|
// TODO(brandtr): Consider passing through the rotation flag through the decoder
|
||||||
|
// in the same way as done in the encoder.
|
||||||
|
TEST_F(TestVp9Impl, EncodedRotationEqualsInputRotation) {
|
||||||
|
VideoFrame* input_frame = NextInputFrame();
|
||||||
|
input_frame->set_rotation(kVideoRotation_0);
|
||||||
|
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(*input_frame, nullptr));
|
||||||
|
EncodedImage encoded_frame;
|
||||||
|
CodecSpecificInfo codec_specific_info;
|
||||||
|
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||||
|
EXPECT_EQ(kVideoRotation_0, encoded_frame.rotation_);
|
||||||
|
|
||||||
|
input_frame = NextInputFrame();
|
||||||
|
input_frame->set_rotation(kVideoRotation_90);
|
||||||
|
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(*input_frame, nullptr));
|
||||||
|
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||||
|
EXPECT_EQ(kVideoRotation_90, encoded_frame.rotation_);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(TestVp9Impl, EncodedColorSpaceEqualsInputColorSpace) {
|
||||||
|
// Video frame without explicit color space information.
|
||||||
|
VideoFrame* input_frame = NextInputFrame();
|
||||||
|
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(*input_frame, nullptr));
|
||||||
|
EncodedImage encoded_frame;
|
||||||
|
CodecSpecificInfo codec_specific_info;
|
||||||
|
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||||
|
EXPECT_FALSE(encoded_frame.ColorSpace());
|
||||||
|
|
||||||
|
// Video frame with explicit color space information.
|
||||||
|
ColorSpace color_space = CreateTestColorSpace(/*with_hdr_metadata=*/true);
|
||||||
|
VideoFrame input_frame_w_hdr =
|
||||||
|
VideoFrame::Builder()
|
||||||
|
.set_video_frame_buffer(input_frame->video_frame_buffer())
|
||||||
|
.set_color_space(color_space)
|
||||||
|
.build();
|
||||||
|
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||||
|
encoder_->Encode(input_frame_w_hdr, nullptr));
|
||||||
|
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||||
|
ASSERT_TRUE(encoded_frame.ColorSpace());
|
||||||
|
EXPECT_EQ(*encoded_frame.ColorSpace(), color_space);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(TestVp9Impl, DecodedColorSpaceEqualsEncodedColorSpace) {
|
||||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||||
encoder_->Encode(*NextInputFrame(), nullptr));
|
encoder_->Encode(*NextInputFrame(), nullptr));
|
||||||
EncodedImage encoded_frame;
|
EncodedImage encoded_frame;
|
||||||
@ -162,6 +206,15 @@ TEST_F(TestVp9Impl, DecodedColorSpaceFromBitstream) {
|
|||||||
ASSERT_TRUE(decoded_frame->color_space());
|
ASSERT_TRUE(decoded_frame->color_space());
|
||||||
// No HDR metadata present.
|
// No HDR metadata present.
|
||||||
EXPECT_FALSE(decoded_frame->color_space()->hdr_metadata());
|
EXPECT_FALSE(decoded_frame->color_space()->hdr_metadata());
|
||||||
|
|
||||||
|
// Encoded frame with explicit color space information.
|
||||||
|
ColorSpace color_space = CreateTestColorSpace(/*with_hdr_metadata=*/true);
|
||||||
|
encoded_frame.SetColorSpace(color_space);
|
||||||
|
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, false, 0));
|
||||||
|
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
|
||||||
|
ASSERT_TRUE(decoded_frame);
|
||||||
|
ASSERT_TRUE(decoded_frame->color_space());
|
||||||
|
EXPECT_EQ(color_space, *decoded_frame->color_space());
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(TestVp9Impl, DecodedQpEqualsEncodedQp) {
|
TEST_F(TestVp9Impl, DecodedQpEqualsEncodedQp) {
|
||||||
|
@ -52,10 +52,8 @@ class VCMEncodedFrame : protected EncodedImage {
|
|||||||
return static_cast<const webrtc::EncodedImage&>(*this);
|
return static_cast<const webrtc::EncodedImage&>(*this);
|
||||||
}
|
}
|
||||||
|
|
||||||
using EncodedImage::ColorSpace;
|
|
||||||
using EncodedImage::data;
|
using EncodedImage::data;
|
||||||
using EncodedImage::set_size;
|
using EncodedImage::set_size;
|
||||||
using EncodedImage::SetColorSpace;
|
|
||||||
using EncodedImage::SetSpatialIndex;
|
using EncodedImage::SetSpatialIndex;
|
||||||
using EncodedImage::SetTimestamp;
|
using EncodedImage::SetTimestamp;
|
||||||
using EncodedImage::size;
|
using EncodedImage::size;
|
||||||
|
@ -80,12 +80,6 @@ void VCMDecodedFrameCallback::Decoded(VideoFrame& decodedImage,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
decodedImage.set_ntp_time_ms(frameInfo->ntp_time_ms);
|
|
||||||
if (frameInfo->color_space) {
|
|
||||||
decodedImage.set_color_space(*frameInfo->color_space);
|
|
||||||
}
|
|
||||||
decodedImage.set_rotation(frameInfo->rotation);
|
|
||||||
|
|
||||||
const int64_t now_ms = _clock->TimeInMilliseconds();
|
const int64_t now_ms = _clock->TimeInMilliseconds();
|
||||||
if (!decode_time_ms) {
|
if (!decode_time_ms) {
|
||||||
decode_time_ms = now_ms - frameInfo->decodeStartTimeMs;
|
decode_time_ms = now_ms - frameInfo->decodeStartTimeMs;
|
||||||
@ -146,6 +140,7 @@ void VCMDecodedFrameCallback::Decoded(VideoFrame& decodedImage,
|
|||||||
|
|
||||||
decodedImage.set_timestamp_us(frameInfo->renderTimeMs *
|
decodedImage.set_timestamp_us(frameInfo->renderTimeMs *
|
||||||
rtc::kNumMicrosecsPerMillisec);
|
rtc::kNumMicrosecsPerMillisec);
|
||||||
|
decodedImage.set_rotation(frameInfo->rotation);
|
||||||
_receiveCallback->FrameToRender(decodedImage, qp, frameInfo->content_type);
|
_receiveCallback->FrameToRender(decodedImage, qp, frameInfo->content_type);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -204,9 +199,6 @@ int32_t VCMGenericDecoder::Decode(const VCMEncodedFrame& frame, int64_t nowMs) {
|
|||||||
_frameInfos[_nextFrameInfoIdx].renderTimeMs = frame.RenderTimeMs();
|
_frameInfos[_nextFrameInfoIdx].renderTimeMs = frame.RenderTimeMs();
|
||||||
_frameInfos[_nextFrameInfoIdx].rotation = frame.rotation();
|
_frameInfos[_nextFrameInfoIdx].rotation = frame.rotation();
|
||||||
_frameInfos[_nextFrameInfoIdx].timing = frame.video_timing();
|
_frameInfos[_nextFrameInfoIdx].timing = frame.video_timing();
|
||||||
_frameInfos[_nextFrameInfoIdx].ntp_time_ms =
|
|
||||||
frame.EncodedImage().ntp_time_ms_;
|
|
||||||
_frameInfos[_nextFrameInfoIdx].color_space = frame.ColorSpace();
|
|
||||||
// Set correctly only for key frames. Thus, use latest key frame
|
// Set correctly only for key frames. Thus, use latest key frame
|
||||||
// content type. If the corresponding key frame was lost, decode will fail
|
// content type. If the corresponding key frame was lost, decode will fail
|
||||||
// and content type will be ignored.
|
// and content type will be ignored.
|
||||||
|
@ -34,8 +34,6 @@ struct VCMFrameInformation {
|
|||||||
VideoRotation rotation;
|
VideoRotation rotation;
|
||||||
VideoContentType content_type;
|
VideoContentType content_type;
|
||||||
EncodedImage::Timing timing;
|
EncodedImage::Timing timing;
|
||||||
int64_t ntp_time_ms;
|
|
||||||
const ColorSpace* color_space;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
class VCMDecodedFrameCallback : public DecodedImageCallback {
|
class VCMDecodedFrameCallback : public DecodedImageCallback {
|
||||||
|
@ -45,13 +45,12 @@ int32_t FakeDecoder::Decode(const EncodedImage& input,
|
|||||||
height_ = input._encodedHeight;
|
height_ = input._encodedHeight;
|
||||||
}
|
}
|
||||||
|
|
||||||
rtc::scoped_refptr<I420Buffer> buffer = I420Buffer::Create(width_, height_);
|
VideoFrame frame =
|
||||||
I420Buffer::SetBlack(buffer);
|
VideoFrame::Builder()
|
||||||
VideoFrame frame = VideoFrame::Builder()
|
.set_video_frame_buffer(I420Buffer::Create(width_, height_))
|
||||||
.set_video_frame_buffer(buffer)
|
.set_rotation(webrtc::kVideoRotation_0)
|
||||||
.set_rotation(webrtc::kVideoRotation_0)
|
.set_timestamp_ms(render_time_ms)
|
||||||
.set_timestamp_ms(render_time_ms)
|
.build();
|
||||||
.build();
|
|
||||||
frame.set_timestamp(input.Timestamp());
|
frame.set_timestamp(input.Timestamp());
|
||||||
frame.set_ntp_time_ms(input.ntp_time_ms_);
|
frame.set_ntp_time_ms(input.ntp_time_ms_);
|
||||||
|
|
||||||
|
@ -171,8 +171,8 @@ rtc_source_set("video_stream_encoder_impl") {
|
|||||||
"encoder_bitrate_adjuster.h",
|
"encoder_bitrate_adjuster.h",
|
||||||
"encoder_overshoot_detector.cc",
|
"encoder_overshoot_detector.cc",
|
||||||
"encoder_overshoot_detector.h",
|
"encoder_overshoot_detector.h",
|
||||||
"frame_encode_metadata_writer.cc",
|
"frame_encode_timer.cc",
|
||||||
"frame_encode_metadata_writer.h",
|
"frame_encode_timer.h",
|
||||||
"overuse_frame_detector.cc",
|
"overuse_frame_detector.cc",
|
||||||
"overuse_frame_detector.h",
|
"overuse_frame_detector.h",
|
||||||
"video_stream_encoder.cc",
|
"video_stream_encoder.cc",
|
||||||
@ -492,7 +492,7 @@ if (rtc_include_tests) {
|
|||||||
"end_to_end_tests/ssrc_tests.cc",
|
"end_to_end_tests/ssrc_tests.cc",
|
||||||
"end_to_end_tests/stats_tests.cc",
|
"end_to_end_tests/stats_tests.cc",
|
||||||
"end_to_end_tests/transport_feedback_tests.cc",
|
"end_to_end_tests/transport_feedback_tests.cc",
|
||||||
"frame_encode_metadata_writer_unittest.cc",
|
"frame_encode_timer_unittest.cc",
|
||||||
"overuse_frame_detector_unittest.cc",
|
"overuse_frame_detector_unittest.cc",
|
||||||
"picture_id_tests.cc",
|
"picture_id_tests.cc",
|
||||||
"quality_scaling_tests.cc",
|
"quality_scaling_tests.cc",
|
||||||
|
@ -8,7 +8,7 @@
|
|||||||
* be found in the AUTHORS file in the root of the source tree.
|
* be found in the AUTHORS file in the root of the source tree.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include "video/frame_encode_metadata_writer.h"
|
#include "video/frame_encode_timer.h"
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
|
|
||||||
@ -23,31 +23,29 @@ const int kMessagesThrottlingThreshold = 2;
|
|||||||
const int kThrottleRatio = 100000;
|
const int kThrottleRatio = 100000;
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
FrameEncodeMetadataWriter::TimingFramesLayerInfo::TimingFramesLayerInfo() =
|
FrameEncodeTimer::TimingFramesLayerInfo::TimingFramesLayerInfo() = default;
|
||||||
default;
|
FrameEncodeTimer::TimingFramesLayerInfo::~TimingFramesLayerInfo() = default;
|
||||||
FrameEncodeMetadataWriter::TimingFramesLayerInfo::~TimingFramesLayerInfo() =
|
|
||||||
default;
|
|
||||||
|
|
||||||
FrameEncodeMetadataWriter::FrameEncodeMetadataWriter(
|
FrameEncodeTimer::FrameEncodeTimer(EncodedImageCallback* frame_drop_callback)
|
||||||
EncodedImageCallback* frame_drop_callback)
|
|
||||||
: frame_drop_callback_(frame_drop_callback),
|
: frame_drop_callback_(frame_drop_callback),
|
||||||
internal_source_(false),
|
internal_source_(false),
|
||||||
framerate_fps_(0),
|
framerate_fps_(0),
|
||||||
last_timing_frame_time_ms_(-1),
|
last_timing_frame_time_ms_(-1),
|
||||||
|
incorrect_capture_time_logged_messages_(0),
|
||||||
reordered_frames_logged_messages_(0),
|
reordered_frames_logged_messages_(0),
|
||||||
stalled_encoder_logged_messages_(0) {
|
stalled_encoder_logged_messages_(0) {
|
||||||
codec_settings_.timing_frame_thresholds = {-1, 0};
|
codec_settings_.timing_frame_thresholds = {-1, 0};
|
||||||
}
|
}
|
||||||
FrameEncodeMetadataWriter::~FrameEncodeMetadataWriter() {}
|
FrameEncodeTimer::~FrameEncodeTimer() {}
|
||||||
|
|
||||||
void FrameEncodeMetadataWriter::OnEncoderInit(const VideoCodec& codec,
|
void FrameEncodeTimer::OnEncoderInit(const VideoCodec& codec,
|
||||||
bool internal_source) {
|
bool internal_source) {
|
||||||
rtc::CritScope cs(&lock_);
|
rtc::CritScope cs(&lock_);
|
||||||
codec_settings_ = codec;
|
codec_settings_ = codec;
|
||||||
internal_source_ = internal_source;
|
internal_source_ = internal_source;
|
||||||
}
|
}
|
||||||
|
|
||||||
void FrameEncodeMetadataWriter::OnSetRates(
|
void FrameEncodeTimer::OnSetRates(
|
||||||
const VideoBitrateAllocation& bitrate_allocation,
|
const VideoBitrateAllocation& bitrate_allocation,
|
||||||
uint32_t framerate_fps) {
|
uint32_t framerate_fps) {
|
||||||
rtc::CritScope cs(&lock_);
|
rtc::CritScope cs(&lock_);
|
||||||
@ -62,7 +60,8 @@ void FrameEncodeMetadataWriter::OnSetRates(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void FrameEncodeMetadataWriter::OnEncodeStarted(const VideoFrame& frame) {
|
void FrameEncodeTimer::OnEncodeStarted(uint32_t rtp_timestamp,
|
||||||
|
int64_t capture_time_ms) {
|
||||||
rtc::CritScope cs(&lock_);
|
rtc::CritScope cs(&lock_);
|
||||||
if (internal_source_) {
|
if (internal_source_) {
|
||||||
return;
|
return;
|
||||||
@ -70,24 +69,19 @@ void FrameEncodeMetadataWriter::OnEncodeStarted(const VideoFrame& frame) {
|
|||||||
|
|
||||||
const size_t num_spatial_layers = NumSpatialLayers();
|
const size_t num_spatial_layers = NumSpatialLayers();
|
||||||
timing_frames_info_.resize(num_spatial_layers);
|
timing_frames_info_.resize(num_spatial_layers);
|
||||||
FrameMetadata metadata;
|
|
||||||
metadata.rtp_timestamp = frame.timestamp();
|
|
||||||
metadata.encode_start_time_ms = rtc::TimeMillis();
|
|
||||||
metadata.ntp_time_ms = frame.ntp_time_ms();
|
|
||||||
metadata.timestamp_us = frame.timestamp_us();
|
|
||||||
metadata.rotation = frame.rotation();
|
|
||||||
metadata.color_space = frame.color_space();
|
|
||||||
for (size_t si = 0; si < num_spatial_layers; ++si) {
|
for (size_t si = 0; si < num_spatial_layers; ++si) {
|
||||||
RTC_DCHECK(timing_frames_info_[si].frames.empty() ||
|
RTC_DCHECK(
|
||||||
rtc::TimeDiff(
|
timing_frames_info_[si].encode_start_list.empty() ||
|
||||||
frame.render_time_ms(),
|
rtc::TimeDiff(
|
||||||
timing_frames_info_[si].frames.back().timestamp_us / 1000) >=
|
capture_time_ms,
|
||||||
0);
|
timing_frames_info_[si].encode_start_list.back().capture_time_ms) >=
|
||||||
|
0);
|
||||||
// If stream is disabled due to low bandwidth OnEncodeStarted still will be
|
// If stream is disabled due to low bandwidth OnEncodeStarted still will be
|
||||||
// called and have to be ignored.
|
// called and have to be ignored.
|
||||||
if (timing_frames_info_[si].target_bitrate_bytes_per_sec == 0)
|
if (timing_frames_info_[si].target_bitrate_bytes_per_sec == 0)
|
||||||
return;
|
return;
|
||||||
if (timing_frames_info_[si].frames.size() == kMaxEncodeStartTimeListSize) {
|
if (timing_frames_info_[si].encode_start_list.size() ==
|
||||||
|
kMaxEncodeStartTimeListSize) {
|
||||||
++stalled_encoder_logged_messages_;
|
++stalled_encoder_logged_messages_;
|
||||||
if (stalled_encoder_logged_messages_ <= kMessagesThrottlingThreshold ||
|
if (stalled_encoder_logged_messages_ <= kMessagesThrottlingThreshold ||
|
||||||
stalled_encoder_logged_messages_ % kThrottleRatio == 0) {
|
stalled_encoder_logged_messages_ % kThrottleRatio == 0) {
|
||||||
@ -101,26 +95,25 @@ void FrameEncodeMetadataWriter::OnEncodeStarted(const VideoFrame& frame) {
|
|||||||
}
|
}
|
||||||
frame_drop_callback_->OnDroppedFrame(
|
frame_drop_callback_->OnDroppedFrame(
|
||||||
EncodedImageCallback::DropReason::kDroppedByEncoder);
|
EncodedImageCallback::DropReason::kDroppedByEncoder);
|
||||||
timing_frames_info_[si].frames.pop_front();
|
timing_frames_info_[si].encode_start_list.pop_front();
|
||||||
}
|
}
|
||||||
timing_frames_info_[si].frames.emplace_back(metadata);
|
timing_frames_info_[si].encode_start_list.emplace_back(
|
||||||
|
rtp_timestamp, capture_time_ms, rtc::TimeMillis());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void FrameEncodeMetadataWriter::FillTimingInfo(size_t simulcast_svc_idx,
|
void FrameEncodeTimer::FillTimingInfo(size_t simulcast_svc_idx,
|
||||||
EncodedImage* encoded_image) {
|
EncodedImage* encoded_image,
|
||||||
|
int64_t encode_done_ms) {
|
||||||
rtc::CritScope cs(&lock_);
|
rtc::CritScope cs(&lock_);
|
||||||
absl::optional<size_t> outlier_frame_size;
|
absl::optional<size_t> outlier_frame_size;
|
||||||
absl::optional<int64_t> encode_start_ms;
|
absl::optional<int64_t> encode_start_ms;
|
||||||
uint8_t timing_flags = VideoSendTiming::kNotTriggered;
|
uint8_t timing_flags = VideoSendTiming::kNotTriggered;
|
||||||
|
|
||||||
int64_t encode_done_ms = rtc::TimeMillis();
|
|
||||||
|
|
||||||
// Encoders with internal sources do not call OnEncodeStarted
|
// Encoders with internal sources do not call OnEncodeStarted
|
||||||
// |timing_frames_info_| may be not filled here.
|
// |timing_frames_info_| may be not filled here.
|
||||||
if (!internal_source_) {
|
if (!internal_source_) {
|
||||||
encode_start_ms =
|
encode_start_ms = ExtractEncodeStartTime(simulcast_svc_idx, encoded_image);
|
||||||
ExtractEncodeStartTimeAndFillMetadata(simulcast_svc_idx, encoded_image);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (timing_frames_info_.size() > simulcast_svc_idx) {
|
if (timing_frames_info_.size() > simulcast_svc_idx) {
|
||||||
@ -183,7 +176,7 @@ void FrameEncodeMetadataWriter::FillTimingInfo(size_t simulcast_svc_idx,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void FrameEncodeMetadataWriter::Reset() {
|
void FrameEncodeTimer::Reset() {
|
||||||
rtc::CritScope cs(&lock_);
|
rtc::CritScope cs(&lock_);
|
||||||
timing_frames_info_.clear();
|
timing_frames_info_.clear();
|
||||||
last_timing_frame_time_ms_ = -1;
|
last_timing_frame_time_ms_ = -1;
|
||||||
@ -191,40 +184,48 @@ void FrameEncodeMetadataWriter::Reset() {
|
|||||||
stalled_encoder_logged_messages_ = 0;
|
stalled_encoder_logged_messages_ = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
absl::optional<int64_t>
|
absl::optional<int64_t> FrameEncodeTimer::ExtractEncodeStartTime(
|
||||||
FrameEncodeMetadataWriter::ExtractEncodeStartTimeAndFillMetadata(
|
|
||||||
size_t simulcast_svc_idx,
|
size_t simulcast_svc_idx,
|
||||||
EncodedImage* encoded_image) {
|
EncodedImage* encoded_image) {
|
||||||
absl::optional<int64_t> result;
|
absl::optional<int64_t> result;
|
||||||
size_t num_simulcast_svc_streams = timing_frames_info_.size();
|
size_t num_simulcast_svc_streams = timing_frames_info_.size();
|
||||||
if (simulcast_svc_idx < num_simulcast_svc_streams) {
|
if (simulcast_svc_idx < num_simulcast_svc_streams) {
|
||||||
auto metadata_list = &timing_frames_info_[simulcast_svc_idx].frames;
|
auto encode_start_list =
|
||||||
|
&timing_frames_info_[simulcast_svc_idx].encode_start_list;
|
||||||
// Skip frames for which there was OnEncodeStarted but no OnEncodedImage
|
// Skip frames for which there was OnEncodeStarted but no OnEncodedImage
|
||||||
// call. These are dropped by encoder internally.
|
// call. These are dropped by encoder internally.
|
||||||
// Because some hardware encoders don't preserve capture timestamp we
|
// Because some hardware encoders don't preserve capture timestamp we
|
||||||
// use RTP timestamps here.
|
// use RTP timestamps here.
|
||||||
while (!metadata_list->empty() &&
|
while (!encode_start_list->empty() &&
|
||||||
IsNewerTimestamp(encoded_image->Timestamp(),
|
IsNewerTimestamp(encoded_image->Timestamp(),
|
||||||
metadata_list->front().rtp_timestamp)) {
|
encode_start_list->front().rtp_timestamp)) {
|
||||||
frame_drop_callback_->OnDroppedFrame(
|
frame_drop_callback_->OnDroppedFrame(
|
||||||
EncodedImageCallback::DropReason::kDroppedByEncoder);
|
EncodedImageCallback::DropReason::kDroppedByEncoder);
|
||||||
metadata_list->pop_front();
|
encode_start_list->pop_front();
|
||||||
}
|
}
|
||||||
if (!metadata_list->empty() &&
|
if (!encode_start_list->empty() &&
|
||||||
metadata_list->front().rtp_timestamp == encoded_image->Timestamp()) {
|
encode_start_list->front().rtp_timestamp ==
|
||||||
result.emplace(metadata_list->front().encode_start_time_ms);
|
encoded_image->Timestamp()) {
|
||||||
|
result.emplace(encode_start_list->front().encode_start_time_ms);
|
||||||
encoded_image->capture_time_ms_ =
|
if (encoded_image->capture_time_ms_ !=
|
||||||
metadata_list->front().timestamp_us / 1000;
|
encode_start_list->front().capture_time_ms) {
|
||||||
encoded_image->ntp_time_ms_ = metadata_list->front().ntp_time_ms;
|
// Force correct capture timestamp.
|
||||||
encoded_image->rotation_ = metadata_list->front().rotation;
|
encoded_image->capture_time_ms_ =
|
||||||
encoded_image->SetColorSpace(metadata_list->front().color_space);
|
encode_start_list->front().capture_time_ms;
|
||||||
encoded_image->content_type_ =
|
++incorrect_capture_time_logged_messages_;
|
||||||
(codec_settings_.mode == VideoCodecMode::kScreensharing)
|
if (incorrect_capture_time_logged_messages_ <=
|
||||||
? VideoContentType::SCREENSHARE
|
kMessagesThrottlingThreshold ||
|
||||||
: VideoContentType::UNSPECIFIED;
|
incorrect_capture_time_logged_messages_ % kThrottleRatio == 0) {
|
||||||
|
RTC_LOG(LS_WARNING)
|
||||||
metadata_list->pop_front();
|
<< "Encoder is not preserving capture timestamps.";
|
||||||
|
if (incorrect_capture_time_logged_messages_ ==
|
||||||
|
kMessagesThrottlingThreshold) {
|
||||||
|
RTC_LOG(LS_WARNING) << "Too many log messages. Further incorrect "
|
||||||
|
"timestamps warnings will be throttled.";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
encode_start_list->pop_front();
|
||||||
} else {
|
} else {
|
||||||
++reordered_frames_logged_messages_;
|
++reordered_frames_logged_messages_;
|
||||||
if (reordered_frames_logged_messages_ <= kMessagesThrottlingThreshold ||
|
if (reordered_frames_logged_messages_ <= kMessagesThrottlingThreshold ||
|
||||||
@ -242,7 +243,7 @@ FrameEncodeMetadataWriter::ExtractEncodeStartTimeAndFillMetadata(
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t FrameEncodeMetadataWriter::NumSpatialLayers() const {
|
size_t FrameEncodeTimer::NumSpatialLayers() const {
|
||||||
size_t num_spatial_layers = codec_settings_.numberOfSimulcastStreams;
|
size_t num_spatial_layers = codec_settings_.numberOfSimulcastStreams;
|
||||||
if (codec_settings_.codecType == kVideoCodecVP9) {
|
if (codec_settings_.codecType == kVideoCodecVP9) {
|
||||||
num_spatial_layers = std::max(
|
num_spatial_layers = std::max(
|
@ -8,8 +8,8 @@
|
|||||||
* be found in the AUTHORS file in the root of the source tree.
|
* be found in the AUTHORS file in the root of the source tree.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef VIDEO_FRAME_ENCODE_METADATA_WRITER_H_
|
#ifndef VIDEO_FRAME_ENCODE_TIMER_H_
|
||||||
#define VIDEO_FRAME_ENCODE_METADATA_WRITER_H_
|
#define VIDEO_FRAME_ENCODE_TIMER_H_
|
||||||
|
|
||||||
#include <list>
|
#include <list>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
@ -22,18 +22,20 @@
|
|||||||
|
|
||||||
namespace webrtc {
|
namespace webrtc {
|
||||||
|
|
||||||
class FrameEncodeMetadataWriter {
|
class FrameEncodeTimer {
|
||||||
public:
|
public:
|
||||||
explicit FrameEncodeMetadataWriter(EncodedImageCallback* frame_drop_callback);
|
explicit FrameEncodeTimer(EncodedImageCallback* frame_drop_callback);
|
||||||
~FrameEncodeMetadataWriter();
|
~FrameEncodeTimer();
|
||||||
|
|
||||||
void OnEncoderInit(const VideoCodec& codec, bool internal_source);
|
void OnEncoderInit(const VideoCodec& codec, bool internal_source);
|
||||||
void OnSetRates(const VideoBitrateAllocation& bitrate_allocation,
|
void OnSetRates(const VideoBitrateAllocation& bitrate_allocation,
|
||||||
uint32_t framerate_fps);
|
uint32_t framerate_fps);
|
||||||
|
|
||||||
void OnEncodeStarted(const VideoFrame& frame);
|
void OnEncodeStarted(uint32_t rtp_timestamp, int64_t capture_time_ms);
|
||||||
|
|
||||||
void FillTimingInfo(size_t simulcast_svc_idx, EncodedImage* encoded_image);
|
void FillTimingInfo(size_t simulcast_svc_idx,
|
||||||
|
EncodedImage* encoded_image,
|
||||||
|
int64_t encode_done_ms);
|
||||||
void Reset();
|
void Reset();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
@ -41,23 +43,26 @@ class FrameEncodeMetadataWriter {
|
|||||||
|
|
||||||
// For non-internal-source encoders, returns encode started time and fixes
|
// For non-internal-source encoders, returns encode started time and fixes
|
||||||
// capture timestamp for the frame, if corrupted by the encoder.
|
// capture timestamp for the frame, if corrupted by the encoder.
|
||||||
absl::optional<int64_t> ExtractEncodeStartTimeAndFillMetadata(
|
absl::optional<int64_t> ExtractEncodeStartTime(size_t simulcast_svc_idx,
|
||||||
size_t simulcast_svc_idx,
|
EncodedImage* encoded_image)
|
||||||
EncodedImage* encoded_image) RTC_EXCLUSIVE_LOCKS_REQUIRED(lock_);
|
RTC_EXCLUSIVE_LOCKS_REQUIRED(lock_);
|
||||||
|
|
||||||
struct FrameMetadata {
|
struct EncodeStartTimeRecord {
|
||||||
|
EncodeStartTimeRecord(uint32_t timestamp,
|
||||||
|
int64_t capture_time,
|
||||||
|
int64_t encode_start_time)
|
||||||
|
: rtp_timestamp(timestamp),
|
||||||
|
capture_time_ms(capture_time),
|
||||||
|
encode_start_time_ms(encode_start_time) {}
|
||||||
uint32_t rtp_timestamp;
|
uint32_t rtp_timestamp;
|
||||||
|
int64_t capture_time_ms;
|
||||||
int64_t encode_start_time_ms;
|
int64_t encode_start_time_ms;
|
||||||
int64_t ntp_time_ms = 0;
|
|
||||||
int64_t timestamp_us = 0;
|
|
||||||
VideoRotation rotation = kVideoRotation_0;
|
|
||||||
absl::optional<ColorSpace> color_space;
|
|
||||||
};
|
};
|
||||||
struct TimingFramesLayerInfo {
|
struct TimingFramesLayerInfo {
|
||||||
TimingFramesLayerInfo();
|
TimingFramesLayerInfo();
|
||||||
~TimingFramesLayerInfo();
|
~TimingFramesLayerInfo();
|
||||||
size_t target_bitrate_bytes_per_sec = 0;
|
size_t target_bitrate_bytes_per_sec = 0;
|
||||||
std::list<FrameMetadata> frames;
|
std::list<EncodeStartTimeRecord> encode_start_list;
|
||||||
};
|
};
|
||||||
|
|
||||||
rtc::CriticalSection lock_;
|
rtc::CriticalSection lock_;
|
||||||
@ -69,10 +74,11 @@ class FrameEncodeMetadataWriter {
|
|||||||
// Separate instance for each simulcast stream or spatial layer.
|
// Separate instance for each simulcast stream or spatial layer.
|
||||||
std::vector<TimingFramesLayerInfo> timing_frames_info_ RTC_GUARDED_BY(&lock_);
|
std::vector<TimingFramesLayerInfo> timing_frames_info_ RTC_GUARDED_BY(&lock_);
|
||||||
int64_t last_timing_frame_time_ms_ RTC_GUARDED_BY(&lock_);
|
int64_t last_timing_frame_time_ms_ RTC_GUARDED_BY(&lock_);
|
||||||
|
size_t incorrect_capture_time_logged_messages_ RTC_GUARDED_BY(&lock_);
|
||||||
size_t reordered_frames_logged_messages_ RTC_GUARDED_BY(&lock_);
|
size_t reordered_frames_logged_messages_ RTC_GUARDED_BY(&lock_);
|
||||||
size_t stalled_encoder_logged_messages_ RTC_GUARDED_BY(&lock_);
|
size_t stalled_encoder_logged_messages_ RTC_GUARDED_BY(&lock_);
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace webrtc
|
} // namespace webrtc
|
||||||
|
|
||||||
#endif // VIDEO_FRAME_ENCODE_METADATA_WRITER_H_
|
#endif // VIDEO_FRAME_ENCODE_TIMER_H_
|
@ -8,25 +8,18 @@
|
|||||||
* be found in the AUTHORS file in the root of the source tree.
|
* be found in the AUTHORS file in the root of the source tree.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include "video/frame_encode_metadata_writer.h"
|
|
||||||
|
|
||||||
#include <cstddef>
|
#include <cstddef>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
#include "api/video/i420_buffer.h"
|
|
||||||
#include "api/video/video_frame.h"
|
|
||||||
#include "api/video/video_timing.h"
|
#include "api/video/video_timing.h"
|
||||||
#include "common_video/test/utilities.h"
|
|
||||||
#include "modules/video_coding/include/video_coding_defines.h"
|
#include "modules/video_coding/include/video_coding_defines.h"
|
||||||
#include "rtc_base/time_utils.h"
|
#include "rtc_base/time_utils.h"
|
||||||
#include "test/gtest.h"
|
#include "test/gtest.h"
|
||||||
|
#include "video/frame_encode_timer.h"
|
||||||
|
|
||||||
namespace webrtc {
|
namespace webrtc {
|
||||||
namespace test {
|
namespace test {
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
const rtc::scoped_refptr<I420Buffer> kFrameBuffer = I420Buffer::Create(4, 4);
|
|
||||||
|
|
||||||
inline size_t FrameSize(const size_t& min_frame_size,
|
inline size_t FrameSize(const size_t& min_frame_size,
|
||||||
const size_t& max_frame_size,
|
const size_t& max_frame_size,
|
||||||
const int& s,
|
const int& s,
|
||||||
@ -72,7 +65,7 @@ std::vector<std::vector<FrameType>> GetTimingFrames(
|
|||||||
const int num_streams,
|
const int num_streams,
|
||||||
const int num_frames) {
|
const int num_frames) {
|
||||||
FakeEncodedImageCallback sink;
|
FakeEncodedImageCallback sink;
|
||||||
FrameEncodeMetadataWriter encode_timer(&sink);
|
FrameEncodeTimer encode_timer(&sink);
|
||||||
VideoCodec codec_settings;
|
VideoCodec codec_settings;
|
||||||
codec_settings.numberOfSimulcastStreams = num_streams;
|
codec_settings.numberOfSimulcastStreams = num_streams;
|
||||||
codec_settings.timing_frame_thresholds = {delay_ms,
|
codec_settings.timing_frame_thresholds = {delay_ms,
|
||||||
@ -90,12 +83,8 @@ std::vector<std::vector<FrameType>> GetTimingFrames(
|
|||||||
int64_t current_timestamp = 0;
|
int64_t current_timestamp = 0;
|
||||||
for (int i = 0; i < num_frames; ++i) {
|
for (int i = 0; i < num_frames; ++i) {
|
||||||
current_timestamp += 1;
|
current_timestamp += 1;
|
||||||
VideoFrame frame = VideoFrame::Builder()
|
encode_timer.OnEncodeStarted(static_cast<uint32_t>(current_timestamp * 90),
|
||||||
.set_timestamp_rtp(current_timestamp * 90)
|
current_timestamp);
|
||||||
.set_timestamp_ms(current_timestamp)
|
|
||||||
.set_video_frame_buffer(kFrameBuffer)
|
|
||||||
.build();
|
|
||||||
encode_timer.OnEncodeStarted(frame);
|
|
||||||
for (int si = 0; si < num_streams; ++si) {
|
for (int si = 0; si < num_streams; ++si) {
|
||||||
// every (5+s)-th frame is dropped on s-th stream by design.
|
// every (5+s)-th frame is dropped on s-th stream by design.
|
||||||
bool dropped = i % (5 + si) == 0;
|
bool dropped = i % (5 + si) == 0;
|
||||||
@ -112,7 +101,7 @@ std::vector<std::vector<FrameType>> GetTimingFrames(
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
encode_timer.FillTimingInfo(si, &image);
|
encode_timer.FillTimingInfo(si, &image, current_timestamp);
|
||||||
|
|
||||||
if (IsTimingFrame(image)) {
|
if (IsTimingFrame(image)) {
|
||||||
result[si].push_back(FrameType::kTiming);
|
result[si].push_back(FrameType::kTiming);
|
||||||
@ -201,7 +190,7 @@ TEST(FrameEncodeTimerTest, NoTimingFrameIfNoEncodeStartTime) {
|
|||||||
image.SetTimestamp(static_cast<uint32_t>(timestamp * 90));
|
image.SetTimestamp(static_cast<uint32_t>(timestamp * 90));
|
||||||
|
|
||||||
FakeEncodedImageCallback sink;
|
FakeEncodedImageCallback sink;
|
||||||
FrameEncodeMetadataWriter encode_timer(&sink);
|
FrameEncodeTimer encode_timer(&sink);
|
||||||
VideoCodec codec_settings;
|
VideoCodec codec_settings;
|
||||||
// Make all frames timing frames.
|
// Make all frames timing frames.
|
||||||
codec_settings.timing_frame_thresholds.delay_ms = 1;
|
codec_settings.timing_frame_thresholds.delay_ms = 1;
|
||||||
@ -211,20 +200,16 @@ TEST(FrameEncodeTimerTest, NoTimingFrameIfNoEncodeStartTime) {
|
|||||||
encode_timer.OnSetRates(bitrate_allocation, 30);
|
encode_timer.OnSetRates(bitrate_allocation, 30);
|
||||||
|
|
||||||
// Verify a single frame works with encode start time set.
|
// Verify a single frame works with encode start time set.
|
||||||
VideoFrame frame = VideoFrame::Builder()
|
encode_timer.OnEncodeStarted(static_cast<uint32_t>(timestamp * 90),
|
||||||
.set_timestamp_ms(timestamp)
|
timestamp);
|
||||||
.set_timestamp_rtp(timestamp * 90)
|
encode_timer.FillTimingInfo(0, &image, timestamp);
|
||||||
.set_video_frame_buffer(kFrameBuffer)
|
|
||||||
.build();
|
|
||||||
encode_timer.OnEncodeStarted(frame);
|
|
||||||
encode_timer.FillTimingInfo(0, &image);
|
|
||||||
EXPECT_TRUE(IsTimingFrame(image));
|
EXPECT_TRUE(IsTimingFrame(image));
|
||||||
|
|
||||||
// New frame, now skip OnEncodeStarted. Should not result in timing frame.
|
// New frame, now skip OnEncodeStarted. Should not result in timing frame.
|
||||||
image.capture_time_ms_ = ++timestamp;
|
image.capture_time_ms_ = ++timestamp;
|
||||||
image.SetTimestamp(static_cast<uint32_t>(timestamp * 90));
|
image.SetTimestamp(static_cast<uint32_t>(timestamp * 90));
|
||||||
image.timing_ = EncodedImage::Timing();
|
image.timing_ = EncodedImage::Timing();
|
||||||
encode_timer.FillTimingInfo(0, &image);
|
encode_timer.FillTimingInfo(0, &image, timestamp);
|
||||||
EXPECT_FALSE(IsTimingFrame(image));
|
EXPECT_FALSE(IsTimingFrame(image));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -241,7 +226,7 @@ TEST(FrameEncodeTimerTest, AdjustsCaptureTimeForInternalSourceEncoder) {
|
|||||||
image.SetTimestamp(static_cast<uint32_t>(timestamp * 90));
|
image.SetTimestamp(static_cast<uint32_t>(timestamp * 90));
|
||||||
|
|
||||||
FakeEncodedImageCallback sink;
|
FakeEncodedImageCallback sink;
|
||||||
FrameEncodeMetadataWriter encode_timer(&sink);
|
FrameEncodeTimer encode_timer(&sink);
|
||||||
|
|
||||||
VideoCodec codec_settings;
|
VideoCodec codec_settings;
|
||||||
// Make all frames timing frames.
|
// Make all frames timing frames.
|
||||||
@ -253,7 +238,7 @@ TEST(FrameEncodeTimerTest, AdjustsCaptureTimeForInternalSourceEncoder) {
|
|||||||
encode_timer.OnSetRates(bitrate_allocation, 30);
|
encode_timer.OnSetRates(bitrate_allocation, 30);
|
||||||
|
|
||||||
// Verify a single frame without encode timestamps isn't a timing frame.
|
// Verify a single frame without encode timestamps isn't a timing frame.
|
||||||
encode_timer.FillTimingInfo(0, &image);
|
encode_timer.FillTimingInfo(0, &image, timestamp);
|
||||||
EXPECT_FALSE(IsTimingFrame(image));
|
EXPECT_FALSE(IsTimingFrame(image));
|
||||||
|
|
||||||
// New frame, but this time with encode timestamps set in timing_.
|
// New frame, but this time with encode timestamps set in timing_.
|
||||||
@ -263,14 +248,14 @@ TEST(FrameEncodeTimerTest, AdjustsCaptureTimeForInternalSourceEncoder) {
|
|||||||
image.timing_ = EncodedImage::Timing();
|
image.timing_ = EncodedImage::Timing();
|
||||||
image.timing_.encode_start_ms = timestamp + kEncodeStartDelayMs;
|
image.timing_.encode_start_ms = timestamp + kEncodeStartDelayMs;
|
||||||
image.timing_.encode_finish_ms = timestamp + kEncodeFinishDelayMs;
|
image.timing_.encode_finish_ms = timestamp + kEncodeFinishDelayMs;
|
||||||
|
const int64_t kEncodeDoneTimestamp = 1234567;
|
||||||
encode_timer.FillTimingInfo(0, &image);
|
encode_timer.FillTimingInfo(0, &image, kEncodeDoneTimestamp);
|
||||||
EXPECT_TRUE(IsTimingFrame(image));
|
EXPECT_TRUE(IsTimingFrame(image));
|
||||||
|
|
||||||
// Frame is captured kEncodeFinishDelayMs before it's encoded, so restored
|
// Frame is captured kEncodeFinishDelayMs before it's encoded, so restored
|
||||||
// capture timestamp should be kEncodeFinishDelayMs in the past.
|
// capture timestamp should be kEncodeFinishDelayMs in the past.
|
||||||
EXPECT_NEAR(image.capture_time_ms_, rtc::TimeMillis() - kEncodeFinishDelayMs,
|
EXPECT_EQ(image.capture_time_ms_,
|
||||||
1);
|
kEncodeDoneTimestamp - kEncodeFinishDelayMs);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(FrameEncodeTimerTest, NotifiesAboutDroppedFrames) {
|
TEST(FrameEncodeTimerTest, NotifiesAboutDroppedFrames) {
|
||||||
@ -280,7 +265,7 @@ TEST(FrameEncodeTimerTest, NotifiesAboutDroppedFrames) {
|
|||||||
const int64_t kTimestampMs4 = 47721870;
|
const int64_t kTimestampMs4 = 47721870;
|
||||||
|
|
||||||
FakeEncodedImageCallback sink;
|
FakeEncodedImageCallback sink;
|
||||||
FrameEncodeMetadataWriter encode_timer(&sink);
|
FrameEncodeTimer encode_timer(&sink);
|
||||||
encode_timer.OnEncoderInit(VideoCodec(), false);
|
encode_timer.OnEncoderInit(VideoCodec(), false);
|
||||||
// Any non-zero bitrate needed to be set before the first frame.
|
// Any non-zero bitrate needed to be set before the first frame.
|
||||||
VideoBitrateAllocation bitrate_allocation;
|
VideoBitrateAllocation bitrate_allocation;
|
||||||
@ -288,27 +273,17 @@ TEST(FrameEncodeTimerTest, NotifiesAboutDroppedFrames) {
|
|||||||
encode_timer.OnSetRates(bitrate_allocation, 30);
|
encode_timer.OnSetRates(bitrate_allocation, 30);
|
||||||
|
|
||||||
EncodedImage image;
|
EncodedImage image;
|
||||||
VideoFrame frame = VideoFrame::Builder()
|
|
||||||
.set_timestamp_rtp(kTimestampMs1 * 90)
|
|
||||||
.set_timestamp_ms(kTimestampMs1)
|
|
||||||
.set_video_frame_buffer(kFrameBuffer)
|
|
||||||
.build();
|
|
||||||
|
|
||||||
image.capture_time_ms_ = kTimestampMs1;
|
image.capture_time_ms_ = kTimestampMs1;
|
||||||
image.SetTimestamp(static_cast<uint32_t>(image.capture_time_ms_ * 90));
|
image.SetTimestamp(static_cast<uint32_t>(image.capture_time_ms_ * 90));
|
||||||
frame.set_timestamp(image.capture_time_ms_ * 90);
|
encode_timer.OnEncodeStarted(image.Timestamp(), image.capture_time_ms_);
|
||||||
frame.set_timestamp_us(image.capture_time_ms_ * 1000);
|
|
||||||
encode_timer.OnEncodeStarted(frame);
|
|
||||||
|
|
||||||
EXPECT_EQ(0u, sink.GetNumFramesDropped());
|
EXPECT_EQ(0u, sink.GetNumFramesDropped());
|
||||||
encode_timer.FillTimingInfo(0, &image);
|
encode_timer.FillTimingInfo(0, &image, kTimestampMs1);
|
||||||
|
|
||||||
image.capture_time_ms_ = kTimestampMs2;
|
image.capture_time_ms_ = kTimestampMs2;
|
||||||
image.SetTimestamp(static_cast<uint32_t>(image.capture_time_ms_ * 90));
|
image.SetTimestamp(static_cast<uint32_t>(image.capture_time_ms_ * 90));
|
||||||
image.timing_ = EncodedImage::Timing();
|
image.timing_ = EncodedImage::Timing();
|
||||||
frame.set_timestamp(image.capture_time_ms_ * 90);
|
encode_timer.OnEncodeStarted(image.Timestamp(), image.capture_time_ms_);
|
||||||
frame.set_timestamp_us(image.capture_time_ms_ * 1000);
|
|
||||||
encode_timer.OnEncodeStarted(frame);
|
|
||||||
// No OnEncodedImageCall for timestamp2. Yet, at this moment it's not known
|
// No OnEncodedImageCall for timestamp2. Yet, at this moment it's not known
|
||||||
// that frame with timestamp2 was dropped.
|
// that frame with timestamp2 was dropped.
|
||||||
EXPECT_EQ(0u, sink.GetNumFramesDropped());
|
EXPECT_EQ(0u, sink.GetNumFramesDropped());
|
||||||
@ -316,19 +291,15 @@ TEST(FrameEncodeTimerTest, NotifiesAboutDroppedFrames) {
|
|||||||
image.capture_time_ms_ = kTimestampMs3;
|
image.capture_time_ms_ = kTimestampMs3;
|
||||||
image.SetTimestamp(static_cast<uint32_t>(image.capture_time_ms_ * 90));
|
image.SetTimestamp(static_cast<uint32_t>(image.capture_time_ms_ * 90));
|
||||||
image.timing_ = EncodedImage::Timing();
|
image.timing_ = EncodedImage::Timing();
|
||||||
frame.set_timestamp(image.capture_time_ms_ * 90);
|
encode_timer.OnEncodeStarted(image.Timestamp(), image.capture_time_ms_);
|
||||||
frame.set_timestamp_us(image.capture_time_ms_ * 1000);
|
encode_timer.FillTimingInfo(0, &image, kTimestampMs3);
|
||||||
encode_timer.OnEncodeStarted(frame);
|
|
||||||
encode_timer.FillTimingInfo(0, &image);
|
|
||||||
EXPECT_EQ(1u, sink.GetNumFramesDropped());
|
EXPECT_EQ(1u, sink.GetNumFramesDropped());
|
||||||
|
|
||||||
image.capture_time_ms_ = kTimestampMs4;
|
image.capture_time_ms_ = kTimestampMs4;
|
||||||
image.SetTimestamp(static_cast<uint32_t>(image.capture_time_ms_ * 90));
|
image.SetTimestamp(static_cast<uint32_t>(image.capture_time_ms_ * 90));
|
||||||
image.timing_ = EncodedImage::Timing();
|
image.timing_ = EncodedImage::Timing();
|
||||||
frame.set_timestamp(image.capture_time_ms_ * 90);
|
encode_timer.OnEncodeStarted(image.Timestamp(), image.capture_time_ms_);
|
||||||
frame.set_timestamp_us(image.capture_time_ms_ * 1000);
|
encode_timer.FillTimingInfo(0, &image, kTimestampMs4);
|
||||||
encode_timer.OnEncodeStarted(frame);
|
|
||||||
encode_timer.FillTimingInfo(0, &image);
|
|
||||||
EXPECT_EQ(1u, sink.GetNumFramesDropped());
|
EXPECT_EQ(1u, sink.GetNumFramesDropped());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -337,7 +308,7 @@ TEST(FrameEncodeTimerTest, RestoresCaptureTimestamps) {
|
|||||||
const int64_t kTimestampMs = 123456;
|
const int64_t kTimestampMs = 123456;
|
||||||
FakeEncodedImageCallback sink;
|
FakeEncodedImageCallback sink;
|
||||||
|
|
||||||
FrameEncodeMetadataWriter encode_timer(&sink);
|
FrameEncodeTimer encode_timer(&sink);
|
||||||
encode_timer.OnEncoderInit(VideoCodec(), false);
|
encode_timer.OnEncoderInit(VideoCodec(), false);
|
||||||
// Any non-zero bitrate needed to be set before the first frame.
|
// Any non-zero bitrate needed to be set before the first frame.
|
||||||
VideoBitrateAllocation bitrate_allocation;
|
VideoBitrateAllocation bitrate_allocation;
|
||||||
@ -346,93 +317,11 @@ TEST(FrameEncodeTimerTest, RestoresCaptureTimestamps) {
|
|||||||
|
|
||||||
image.capture_time_ms_ = kTimestampMs; // Correct timestamp.
|
image.capture_time_ms_ = kTimestampMs; // Correct timestamp.
|
||||||
image.SetTimestamp(static_cast<uint32_t>(image.capture_time_ms_ * 90));
|
image.SetTimestamp(static_cast<uint32_t>(image.capture_time_ms_ * 90));
|
||||||
VideoFrame frame = VideoFrame::Builder()
|
encode_timer.OnEncodeStarted(image.Timestamp(), image.capture_time_ms_);
|
||||||
.set_timestamp_ms(image.capture_time_ms_)
|
|
||||||
.set_timestamp_rtp(image.capture_time_ms_ * 90)
|
|
||||||
.set_video_frame_buffer(kFrameBuffer)
|
|
||||||
.build();
|
|
||||||
encode_timer.OnEncodeStarted(frame);
|
|
||||||
image.capture_time_ms_ = 0; // Incorrect timestamp.
|
image.capture_time_ms_ = 0; // Incorrect timestamp.
|
||||||
encode_timer.FillTimingInfo(0, &image);
|
encode_timer.FillTimingInfo(0, &image, kTimestampMs);
|
||||||
EXPECT_EQ(kTimestampMs, image.capture_time_ms_);
|
EXPECT_EQ(kTimestampMs, image.capture_time_ms_);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(FrameEncodeTimerTest, CopiesRotation) {
|
|
||||||
EncodedImage image;
|
|
||||||
const int64_t kTimestampMs = 123456;
|
|
||||||
FakeEncodedImageCallback sink;
|
|
||||||
|
|
||||||
FrameEncodeMetadataWriter encode_timer(&sink);
|
|
||||||
encode_timer.OnEncoderInit(VideoCodec(), false);
|
|
||||||
// Any non-zero bitrate needed to be set before the first frame.
|
|
||||||
VideoBitrateAllocation bitrate_allocation;
|
|
||||||
bitrate_allocation.SetBitrate(0, 0, 500000);
|
|
||||||
encode_timer.OnSetRates(bitrate_allocation, 30);
|
|
||||||
|
|
||||||
image.SetTimestamp(static_cast<uint32_t>(kTimestampMs * 90));
|
|
||||||
VideoFrame frame = VideoFrame::Builder()
|
|
||||||
.set_timestamp_ms(kTimestampMs)
|
|
||||||
.set_timestamp_rtp(kTimestampMs * 90)
|
|
||||||
.set_rotation(kVideoRotation_180)
|
|
||||||
.set_video_frame_buffer(kFrameBuffer)
|
|
||||||
.build();
|
|
||||||
encode_timer.OnEncodeStarted(frame);
|
|
||||||
encode_timer.FillTimingInfo(0, &image);
|
|
||||||
EXPECT_EQ(kVideoRotation_180, image.rotation_);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(FrameEncodeTimerTest, SetsContentType) {
|
|
||||||
EncodedImage image;
|
|
||||||
const int64_t kTimestampMs = 123456;
|
|
||||||
FakeEncodedImageCallback sink;
|
|
||||||
|
|
||||||
FrameEncodeMetadataWriter encode_timer(&sink);
|
|
||||||
VideoCodec codec;
|
|
||||||
codec.mode = VideoCodecMode::kScreensharing;
|
|
||||||
encode_timer.OnEncoderInit(codec, false);
|
|
||||||
// Any non-zero bitrate needed to be set before the first frame.
|
|
||||||
VideoBitrateAllocation bitrate_allocation;
|
|
||||||
bitrate_allocation.SetBitrate(0, 0, 500000);
|
|
||||||
encode_timer.OnSetRates(bitrate_allocation, 30);
|
|
||||||
|
|
||||||
image.SetTimestamp(static_cast<uint32_t>(kTimestampMs * 90));
|
|
||||||
VideoFrame frame = VideoFrame::Builder()
|
|
||||||
.set_timestamp_ms(kTimestampMs)
|
|
||||||
.set_timestamp_rtp(kTimestampMs * 90)
|
|
||||||
.set_rotation(kVideoRotation_180)
|
|
||||||
.set_video_frame_buffer(kFrameBuffer)
|
|
||||||
.build();
|
|
||||||
encode_timer.OnEncodeStarted(frame);
|
|
||||||
encode_timer.FillTimingInfo(0, &image);
|
|
||||||
EXPECT_EQ(VideoContentType::SCREENSHARE, image.content_type_);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(FrameEncodeTimerTest, CopiesColorSpace) {
|
|
||||||
EncodedImage image;
|
|
||||||
const int64_t kTimestampMs = 123456;
|
|
||||||
FakeEncodedImageCallback sink;
|
|
||||||
|
|
||||||
FrameEncodeMetadataWriter encode_timer(&sink);
|
|
||||||
encode_timer.OnEncoderInit(VideoCodec(), false);
|
|
||||||
// Any non-zero bitrate needed to be set before the first frame.
|
|
||||||
VideoBitrateAllocation bitrate_allocation;
|
|
||||||
bitrate_allocation.SetBitrate(0, 0, 500000);
|
|
||||||
encode_timer.OnSetRates(bitrate_allocation, 30);
|
|
||||||
|
|
||||||
webrtc::ColorSpace color_space =
|
|
||||||
CreateTestColorSpace(/*with_hdr_metadata=*/true);
|
|
||||||
image.SetTimestamp(static_cast<uint32_t>(kTimestampMs * 90));
|
|
||||||
VideoFrame frame = VideoFrame::Builder()
|
|
||||||
.set_timestamp_ms(kTimestampMs)
|
|
||||||
.set_timestamp_rtp(kTimestampMs * 90)
|
|
||||||
.set_color_space(color_space)
|
|
||||||
.set_video_frame_buffer(kFrameBuffer)
|
|
||||||
.build();
|
|
||||||
encode_timer.OnEncodeStarted(frame);
|
|
||||||
encode_timer.FillTimingInfo(0, &image);
|
|
||||||
ASSERT_NE(image.ColorSpace(), nullptr);
|
|
||||||
EXPECT_EQ(color_space, *image.ColorSpace());
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace test
|
} // namespace test
|
||||||
} // namespace webrtc
|
} // namespace webrtc
|
@ -14,12 +14,9 @@
|
|||||||
#include "test/gmock.h"
|
#include "test/gmock.h"
|
||||||
#include "test/gtest.h"
|
#include "test/gtest.h"
|
||||||
|
|
||||||
#include "absl/memory/memory.h"
|
|
||||||
#include "api/task_queue/default_task_queue_factory.h"
|
#include "api/task_queue/default_task_queue_factory.h"
|
||||||
#include "api/test/video/function_video_decoder_factory.h"
|
|
||||||
#include "api/video_codecs/video_decoder.h"
|
#include "api/video_codecs/video_decoder.h"
|
||||||
#include "call/rtp_stream_receiver_controller.h"
|
#include "call/rtp_stream_receiver_controller.h"
|
||||||
#include "common_video/test/utilities.h"
|
|
||||||
#include "media/base/fake_video_renderer.h"
|
#include "media/base/fake_video_renderer.h"
|
||||||
#include "modules/pacing/packet_router.h"
|
#include "modules/pacing/packet_router.h"
|
||||||
#include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
|
#include "modules/rtp_rtcp/source/rtp_packet_to_send.h"
|
||||||
@ -27,7 +24,6 @@
|
|||||||
#include "rtc_base/critical_section.h"
|
#include "rtc_base/critical_section.h"
|
||||||
#include "rtc_base/event.h"
|
#include "rtc_base/event.h"
|
||||||
#include "system_wrappers/include/clock.h"
|
#include "system_wrappers/include/clock.h"
|
||||||
#include "test/fake_decoder.h"
|
|
||||||
#include "test/field_trial.h"
|
#include "test/field_trial.h"
|
||||||
#include "test/video_decoder_proxy_factory.h"
|
#include "test/video_decoder_proxy_factory.h"
|
||||||
#include "video/call_stats.h"
|
#include "video/call_stats.h"
|
||||||
@ -66,12 +62,6 @@ class MockVideoDecoder : public VideoDecoder {
|
|||||||
|
|
||||||
class FrameObjectFake : public video_coding::EncodedFrame {
|
class FrameObjectFake : public video_coding::EncodedFrame {
|
||||||
public:
|
public:
|
||||||
void SetPayloadType(uint8_t payload_type) { _payloadType = payload_type; }
|
|
||||||
|
|
||||||
void SetRotation(const VideoRotation& rotation) { rotation_ = rotation; }
|
|
||||||
|
|
||||||
void SetNtpTime(int64_t ntp_time_ms) { ntp_time_ms_ = ntp_time_ms; }
|
|
||||||
|
|
||||||
int64_t ReceivedTime() const override { return 0; }
|
int64_t ReceivedTime() const override { return 0; }
|
||||||
|
|
||||||
int64_t RenderTime() const override { return _renderTimeMs; }
|
int64_t RenderTime() const override { return _renderTimeMs; }
|
||||||
@ -222,90 +212,4 @@ TEST_F(VideoReceiveStreamTest, PlayoutDelayPreservesDefaultMinValue) {
|
|||||||
EXPECT_EQ(default_min_playout_latency, timing_->min_playout_delay());
|
EXPECT_EQ(default_min_playout_latency, timing_->min_playout_delay());
|
||||||
}
|
}
|
||||||
|
|
||||||
class VideoReceiveStreamTestWithFakeDecoder : public ::testing::Test {
|
|
||||||
public:
|
|
||||||
VideoReceiveStreamTestWithFakeDecoder()
|
|
||||||
: fake_decoder_factory_(
|
|
||||||
[]() { return absl::make_unique<test::FakeDecoder>(); }),
|
|
||||||
process_thread_(ProcessThread::Create("TestThread")),
|
|
||||||
task_queue_factory_(CreateDefaultTaskQueueFactory()),
|
|
||||||
config_(&mock_transport_),
|
|
||||||
call_stats_(Clock::GetRealTimeClock(), process_thread_.get()) {}
|
|
||||||
|
|
||||||
void SetUp() {
|
|
||||||
constexpr int kDefaultNumCpuCores = 2;
|
|
||||||
config_.rtp.remote_ssrc = 1111;
|
|
||||||
config_.rtp.local_ssrc = 2222;
|
|
||||||
config_.renderer = &fake_renderer_;
|
|
||||||
VideoReceiveStream::Decoder fake_decoder;
|
|
||||||
fake_decoder.payload_type = 99;
|
|
||||||
fake_decoder.video_format = SdpVideoFormat("VP8");
|
|
||||||
fake_decoder.decoder_factory = &fake_decoder_factory_;
|
|
||||||
config_.decoders.push_back(fake_decoder);
|
|
||||||
Clock* clock = Clock::GetRealTimeClock();
|
|
||||||
timing_ = new VCMTiming(clock);
|
|
||||||
|
|
||||||
video_receive_stream_.reset(new webrtc::internal::VideoReceiveStream(
|
|
||||||
task_queue_factory_.get(), &rtp_stream_receiver_controller_,
|
|
||||||
kDefaultNumCpuCores, &packet_router_, config_.Copy(),
|
|
||||||
process_thread_.get(), &call_stats_, clock, timing_));
|
|
||||||
}
|
|
||||||
|
|
||||||
protected:
|
|
||||||
test::FunctionVideoDecoderFactory fake_decoder_factory_;
|
|
||||||
std::unique_ptr<ProcessThread> process_thread_;
|
|
||||||
const std::unique_ptr<TaskQueueFactory> task_queue_factory_;
|
|
||||||
VideoReceiveStream::Config config_;
|
|
||||||
CallStats call_stats_;
|
|
||||||
cricket::FakeVideoRenderer fake_renderer_;
|
|
||||||
MockTransport mock_transport_;
|
|
||||||
PacketRouter packet_router_;
|
|
||||||
RtpStreamReceiverController rtp_stream_receiver_controller_;
|
|
||||||
std::unique_ptr<webrtc::internal::VideoReceiveStream> video_receive_stream_;
|
|
||||||
VCMTiming* timing_;
|
|
||||||
};
|
|
||||||
|
|
||||||
TEST_F(VideoReceiveStreamTestWithFakeDecoder, PassesNtpTime) {
|
|
||||||
const int64_t kNtpTimestamp = 12345;
|
|
||||||
std::unique_ptr<FrameObjectFake> test_frame(new FrameObjectFake());
|
|
||||||
test_frame->SetPayloadType(99);
|
|
||||||
test_frame->id.picture_id = 0;
|
|
||||||
test_frame->SetNtpTime(kNtpTimestamp);
|
|
||||||
|
|
||||||
video_receive_stream_->Start();
|
|
||||||
video_receive_stream_->OnCompleteFrame(std::move(test_frame));
|
|
||||||
EXPECT_TRUE(fake_renderer_.WaitForRenderedFrame(kDefaultTimeOutMs));
|
|
||||||
EXPECT_EQ(kNtpTimestamp, fake_renderer_.ntp_time_ms());
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(VideoReceiveStreamTestWithFakeDecoder, PassesRotation) {
|
|
||||||
const webrtc::VideoRotation kRotation = webrtc::kVideoRotation_180;
|
|
||||||
std::unique_ptr<FrameObjectFake> test_frame(new FrameObjectFake());
|
|
||||||
test_frame->SetPayloadType(99);
|
|
||||||
test_frame->id.picture_id = 0;
|
|
||||||
test_frame->SetRotation(kRotation);
|
|
||||||
|
|
||||||
video_receive_stream_->Start();
|
|
||||||
video_receive_stream_->OnCompleteFrame(std::move(test_frame));
|
|
||||||
EXPECT_TRUE(fake_renderer_.WaitForRenderedFrame(kDefaultTimeOutMs));
|
|
||||||
|
|
||||||
EXPECT_EQ(kRotation, fake_renderer_.rotation());
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(VideoReceiveStreamTestWithFakeDecoder, PassesColorSpace) {
|
|
||||||
std::unique_ptr<FrameObjectFake> test_frame(new FrameObjectFake());
|
|
||||||
test_frame->SetPayloadType(99);
|
|
||||||
test_frame->id.picture_id = 0;
|
|
||||||
webrtc::ColorSpace color_space =
|
|
||||||
CreateTestColorSpace(/*with_hdr_metadata=*/true);
|
|
||||||
test_frame->SetColorSpace(color_space);
|
|
||||||
|
|
||||||
video_receive_stream_->Start();
|
|
||||||
video_receive_stream_->OnCompleteFrame(std::move(test_frame));
|
|
||||||
EXPECT_TRUE(fake_renderer_.WaitForRenderedFrame(kDefaultTimeOutMs));
|
|
||||||
|
|
||||||
ASSERT_TRUE(fake_renderer_.color_space().has_value());
|
|
||||||
EXPECT_EQ(color_space, *fake_renderer_.color_space());
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace webrtc
|
} // namespace webrtc
|
||||||
|
@ -490,7 +490,7 @@ VideoStreamEncoder::VideoStreamEncoder(
|
|||||||
input_framerate_(kFrameRateAvergingWindowSizeMs, 1000),
|
input_framerate_(kFrameRateAvergingWindowSizeMs, 1000),
|
||||||
pending_frame_drops_(0),
|
pending_frame_drops_(0),
|
||||||
next_frame_types_(1, VideoFrameType::kVideoFrameDelta),
|
next_frame_types_(1, VideoFrameType::kVideoFrameDelta),
|
||||||
frame_encode_metadata_writer_(this),
|
frame_encoder_timer_(this),
|
||||||
experiment_groups_(GetExperimentGroups()),
|
experiment_groups_(GetExperimentGroups()),
|
||||||
next_frame_id_(0),
|
next_frame_id_(0),
|
||||||
encoder_queue_(task_queue_factory->CreateTaskQueue(
|
encoder_queue_(task_queue_factory->CreateTaskQueue(
|
||||||
@ -745,11 +745,10 @@ void VideoStreamEncoder::ReconfigureEncoder() {
|
|||||||
} else {
|
} else {
|
||||||
encoder_initialized_ = true;
|
encoder_initialized_ = true;
|
||||||
encoder_->RegisterEncodeCompleteCallback(this);
|
encoder_->RegisterEncodeCompleteCallback(this);
|
||||||
frame_encode_metadata_writer_.OnEncoderInit(send_codec_,
|
frame_encoder_timer_.OnEncoderInit(send_codec_, HasInternalSource());
|
||||||
HasInternalSource());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
frame_encode_metadata_writer_.Reset();
|
frame_encoder_timer_.Reset();
|
||||||
last_encode_info_ms_ = absl::nullopt;
|
last_encode_info_ms_ = absl::nullopt;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1075,7 +1074,7 @@ void VideoStreamEncoder::SetEncoderRates(
|
|||||||
|
|
||||||
if (settings_changes) {
|
if (settings_changes) {
|
||||||
encoder_->SetRates(rate_settings);
|
encoder_->SetRates(rate_settings);
|
||||||
frame_encode_metadata_writer_.OnSetRates(
|
frame_encoder_timer_.OnSetRates(
|
||||||
rate_settings.bitrate,
|
rate_settings.bitrate,
|
||||||
static_cast<uint32_t>(rate_settings.framerate_fps + 0.5));
|
static_cast<uint32_t>(rate_settings.framerate_fps + 0.5));
|
||||||
}
|
}
|
||||||
@ -1335,7 +1334,8 @@ void VideoStreamEncoder::EncodeVideoFrame(const VideoFrame& video_frame,
|
|||||||
TRACE_EVENT1("webrtc", "VCMGenericEncoder::Encode", "timestamp",
|
TRACE_EVENT1("webrtc", "VCMGenericEncoder::Encode", "timestamp",
|
||||||
out_frame.timestamp());
|
out_frame.timestamp());
|
||||||
|
|
||||||
frame_encode_metadata_writer_.OnEncodeStarted(out_frame);
|
frame_encoder_timer_.OnEncodeStarted(out_frame.timestamp(),
|
||||||
|
out_frame.render_time_ms());
|
||||||
|
|
||||||
const int32_t encode_status = encoder_->Encode(out_frame, &next_frame_types_);
|
const int32_t encode_status = encoder_->Encode(out_frame, &next_frame_types_);
|
||||||
|
|
||||||
@ -1405,7 +1405,9 @@ EncodedImageCallback::Result VideoStreamEncoder::OnEncodedImage(
|
|||||||
const size_t spatial_idx = encoded_image.SpatialIndex().value_or(0);
|
const size_t spatial_idx = encoded_image.SpatialIndex().value_or(0);
|
||||||
EncodedImage image_copy(encoded_image);
|
EncodedImage image_copy(encoded_image);
|
||||||
|
|
||||||
frame_encode_metadata_writer_.FillTimingInfo(spatial_idx, &image_copy);
|
frame_encoder_timer_.FillTimingInfo(
|
||||||
|
spatial_idx, &image_copy,
|
||||||
|
rtc::TimeMicros() / rtc::kNumMicrosecsPerMillisec);
|
||||||
|
|
||||||
// Piggyback ALR experiment group id and simulcast id into the content type.
|
// Piggyback ALR experiment group id and simulcast id into the content type.
|
||||||
const uint8_t experiment_id =
|
const uint8_t experiment_id =
|
||||||
|
@ -37,7 +37,7 @@
|
|||||||
#include "rtc_base/synchronization/sequence_checker.h"
|
#include "rtc_base/synchronization/sequence_checker.h"
|
||||||
#include "rtc_base/task_queue.h"
|
#include "rtc_base/task_queue.h"
|
||||||
#include "video/encoder_bitrate_adjuster.h"
|
#include "video/encoder_bitrate_adjuster.h"
|
||||||
#include "video/frame_encode_metadata_writer.h"
|
#include "video/frame_encode_timer.h"
|
||||||
#include "video/overuse_frame_detector.h"
|
#include "video/overuse_frame_detector.h"
|
||||||
|
|
||||||
namespace webrtc {
|
namespace webrtc {
|
||||||
@ -345,7 +345,7 @@ class VideoStreamEncoder : public VideoStreamEncoderInterface,
|
|||||||
// turn this into a simple bool |pending_keyframe_request_|.
|
// turn this into a simple bool |pending_keyframe_request_|.
|
||||||
std::vector<VideoFrameType> next_frame_types_ RTC_GUARDED_BY(&encoder_queue_);
|
std::vector<VideoFrameType> next_frame_types_ RTC_GUARDED_BY(&encoder_queue_);
|
||||||
|
|
||||||
FrameEncodeMetadataWriter frame_encode_metadata_writer_;
|
FrameEncodeTimer frame_encoder_timer_;
|
||||||
|
|
||||||
// Experiment groups parsed from field trials for realtime video ([0]) and
|
// Experiment groups parsed from field trials for realtime video ([0]) and
|
||||||
// screenshare ([1]). 0 means no group specified. Positive values are
|
// screenshare ([1]). 0 means no group specified. Positive values are
|
||||||
|
Reference in New Issue
Block a user