Get frame type, width and height from the generic descriptor.
Bug: webrtc:9361 Change-Id: I5558ba02f921880f9c4677b85830c7c18faffea4 Reviewed-on: https://webrtc-review.googlesource.com/c/106382 Reviewed-by: Danil Chapovalov <danilchap@webrtc.org> Reviewed-by: Ilya Nikolaevskiy <ilnik@webrtc.org> Reviewed-by: Niels Moller <nisse@webrtc.org> Commit-Queue: Philip Eliasson <philipel@webrtc.org> Cr-Commit-Position: refs/heads/master@{#25231}
This commit is contained in:
@ -144,6 +144,8 @@ RTPVideoHeader RtpPayloadParams::GetRtpVideoHeader(
|
|||||||
rtp_video_header.rotation = image.rotation_;
|
rtp_video_header.rotation = image.rotation_;
|
||||||
rtp_video_header.content_type = image.content_type_;
|
rtp_video_header.content_type = image.content_type_;
|
||||||
rtp_video_header.playout_delay = image.playout_delay_;
|
rtp_video_header.playout_delay = image.playout_delay_;
|
||||||
|
rtp_video_header.width = image._encodedWidth;
|
||||||
|
rtp_video_header.height = image._encodedHeight;
|
||||||
|
|
||||||
SetVideoTiming(image, &rtp_video_header.video_timing);
|
SetVideoTiming(image, &rtp_video_header.video_timing);
|
||||||
|
|
||||||
|
@ -309,9 +309,13 @@ class RtpPayloadParamsVp8ToGenericTest : public ::testing::Test {
|
|||||||
int64_t shared_frame_id,
|
int64_t shared_frame_id,
|
||||||
FrameType frame_type,
|
FrameType frame_type,
|
||||||
LayerSync layer_sync,
|
LayerSync layer_sync,
|
||||||
const std::set<int64_t>& expected_deps) {
|
const std::set<int64_t>& expected_deps,
|
||||||
|
uint16_t width = 0,
|
||||||
|
uint16_t height = 0) {
|
||||||
EncodedImage encoded_image;
|
EncodedImage encoded_image;
|
||||||
encoded_image._frameType = frame_type;
|
encoded_image._frameType = frame_type;
|
||||||
|
encoded_image._encodedWidth = width;
|
||||||
|
encoded_image._encodedHeight = height;
|
||||||
|
|
||||||
CodecSpecificInfo codec_info{};
|
CodecSpecificInfo codec_info{};
|
||||||
codec_info.codecType = kVideoCodecVP8;
|
codec_info.codecType = kVideoCodecVP8;
|
||||||
@ -330,6 +334,9 @@ class RtpPayloadParamsVp8ToGenericTest : public ::testing::Test {
|
|||||||
std::set<int64_t> actual_deps(header.generic->dependencies.begin(),
|
std::set<int64_t> actual_deps(header.generic->dependencies.begin(),
|
||||||
header.generic->dependencies.end());
|
header.generic->dependencies.end());
|
||||||
EXPECT_EQ(expected_deps, actual_deps);
|
EXPECT_EQ(expected_deps, actual_deps);
|
||||||
|
|
||||||
|
EXPECT_EQ(header.width, width);
|
||||||
|
EXPECT_EQ(header.height, height);
|
||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
@ -339,13 +346,13 @@ class RtpPayloadParamsVp8ToGenericTest : public ::testing::Test {
|
|||||||
};
|
};
|
||||||
|
|
||||||
TEST_F(RtpPayloadParamsVp8ToGenericTest, Keyframe) {
|
TEST_F(RtpPayloadParamsVp8ToGenericTest, Keyframe) {
|
||||||
ConvertAndCheck(0, 0, kVideoFrameKey, kNoSync, {});
|
ConvertAndCheck(0, 0, kVideoFrameKey, kNoSync, {}, 480, 360);
|
||||||
ConvertAndCheck(0, 1, kVideoFrameDelta, kNoSync, {0});
|
ConvertAndCheck(0, 1, kVideoFrameDelta, kNoSync, {0});
|
||||||
ConvertAndCheck(0, 2, kVideoFrameKey, kNoSync, {});
|
ConvertAndCheck(0, 2, kVideoFrameKey, kNoSync, {}, 480, 360);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(RtpPayloadParamsVp8ToGenericTest, TooHighTemporalIndex) {
|
TEST_F(RtpPayloadParamsVp8ToGenericTest, TooHighTemporalIndex) {
|
||||||
ConvertAndCheck(0, 0, kVideoFrameKey, kNoSync, {});
|
ConvertAndCheck(0, 0, kVideoFrameKey, kNoSync, {}, 480, 360);
|
||||||
|
|
||||||
EncodedImage encoded_image;
|
EncodedImage encoded_image;
|
||||||
encoded_image._frameType = kVideoFrameDelta;
|
encoded_image._frameType = kVideoFrameDelta;
|
||||||
@ -362,7 +369,7 @@ TEST_F(RtpPayloadParamsVp8ToGenericTest, TooHighTemporalIndex) {
|
|||||||
|
|
||||||
TEST_F(RtpPayloadParamsVp8ToGenericTest, LayerSync) {
|
TEST_F(RtpPayloadParamsVp8ToGenericTest, LayerSync) {
|
||||||
// 02120212 pattern
|
// 02120212 pattern
|
||||||
ConvertAndCheck(0, 0, kVideoFrameKey, kNoSync, {});
|
ConvertAndCheck(0, 0, kVideoFrameKey, kNoSync, {}, 480, 360);
|
||||||
ConvertAndCheck(2, 1, kVideoFrameDelta, kNoSync, {0});
|
ConvertAndCheck(2, 1, kVideoFrameDelta, kNoSync, {0});
|
||||||
ConvertAndCheck(1, 2, kVideoFrameDelta, kNoSync, {0});
|
ConvertAndCheck(1, 2, kVideoFrameDelta, kNoSync, {0});
|
||||||
ConvertAndCheck(2, 3, kVideoFrameDelta, kNoSync, {0, 1, 2});
|
ConvertAndCheck(2, 3, kVideoFrameDelta, kNoSync, {0, 1, 2});
|
||||||
@ -375,7 +382,7 @@ TEST_F(RtpPayloadParamsVp8ToGenericTest, LayerSync) {
|
|||||||
|
|
||||||
TEST_F(RtpPayloadParamsVp8ToGenericTest, FrameIdGaps) {
|
TEST_F(RtpPayloadParamsVp8ToGenericTest, FrameIdGaps) {
|
||||||
// 0101 pattern
|
// 0101 pattern
|
||||||
ConvertAndCheck(0, 0, kVideoFrameKey, kNoSync, {});
|
ConvertAndCheck(0, 0, kVideoFrameKey, kNoSync, {}, 480, 360);
|
||||||
ConvertAndCheck(1, 1, kVideoFrameDelta, kNoSync, {0});
|
ConvertAndCheck(1, 1, kVideoFrameDelta, kNoSync, {0});
|
||||||
|
|
||||||
ConvertAndCheck(0, 5, kVideoFrameDelta, kNoSync, {0});
|
ConvertAndCheck(0, 5, kVideoFrameDelta, kNoSync, {0});
|
||||||
|
@ -91,6 +91,11 @@ void AddRtpHeaderExtensions(const RTPVideoHeader& video_header,
|
|||||||
generic_descriptor.SetSpatialLayersBitmask(spatial_bimask);
|
generic_descriptor.SetSpatialLayersBitmask(spatial_bimask);
|
||||||
|
|
||||||
generic_descriptor.SetTemporalLayer(video_header.generic->temporal_index);
|
generic_descriptor.SetTemporalLayer(video_header.generic->temporal_index);
|
||||||
|
|
||||||
|
if (frame_type == kVideoFrameKey) {
|
||||||
|
generic_descriptor.SetResolution(video_header.width,
|
||||||
|
video_header.height);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
packet->SetExtension<RtpGenericFrameDescriptorExtension>(
|
packet->SetExtension<RtpGenericFrameDescriptorExtension>(
|
||||||
generic_descriptor);
|
generic_descriptor);
|
||||||
|
@ -486,6 +486,16 @@ void RtpVideoStreamReceiver::ReceivePacket(const RtpPacketReceived& packet) {
|
|||||||
webrtc_rtp_header.header.markerBit ||
|
webrtc_rtp_header.header.markerBit ||
|
||||||
(generic_descriptor_wire->LastSubFrameInFrame() &&
|
(generic_descriptor_wire->LastSubFrameInFrame() &&
|
||||||
generic_descriptor_wire->LastPacketInSubFrame());
|
generic_descriptor_wire->LastPacketInSubFrame());
|
||||||
|
|
||||||
|
if (generic_descriptor_wire->FirstPacketInSubFrame()) {
|
||||||
|
webrtc_rtp_header.frameType =
|
||||||
|
generic_descriptor_wire->FrameDependenciesDiffs().empty()
|
||||||
|
? kVideoFrameKey
|
||||||
|
: kVideoFrameDelta;
|
||||||
|
}
|
||||||
|
|
||||||
|
webrtc_rtp_header.video_header().width = generic_descriptor_wire->Width();
|
||||||
|
webrtc_rtp_header.video_header().height = generic_descriptor_wire->Height();
|
||||||
} else {
|
} else {
|
||||||
generic_descriptor_wire.reset();
|
generic_descriptor_wire.reset();
|
||||||
}
|
}
|
||||||
|
@ -568,10 +568,8 @@ TEST_F(RtpVideoStreamReceiverTest, ParseGenericDescriptorTwoPackets) {
|
|||||||
first_packet_descriptor.SetFirstSubFrameInFrame(true);
|
first_packet_descriptor.SetFirstSubFrameInFrame(true);
|
||||||
first_packet_descriptor.SetLastSubFrameInFrame(true);
|
first_packet_descriptor.SetLastSubFrameInFrame(true);
|
||||||
first_packet_descriptor.SetFrameId(100);
|
first_packet_descriptor.SetFrameId(100);
|
||||||
first_packet_descriptor.SetTemporalLayer(1);
|
|
||||||
first_packet_descriptor.SetSpatialLayersBitmask(1 << kSpatialIndex);
|
first_packet_descriptor.SetSpatialLayersBitmask(1 << kSpatialIndex);
|
||||||
first_packet_descriptor.AddFrameDependencyDiff(90);
|
first_packet_descriptor.SetResolution(480, 360);
|
||||||
first_packet_descriptor.AddFrameDependencyDiff(80);
|
|
||||||
EXPECT_TRUE(first_packet.SetExtension<RtpGenericFrameDescriptorExtension>(
|
EXPECT_TRUE(first_packet.SetExtension<RtpGenericFrameDescriptorExtension>(
|
||||||
first_packet_descriptor));
|
first_packet_descriptor));
|
||||||
|
|
||||||
@ -606,10 +604,10 @@ TEST_F(RtpVideoStreamReceiverTest, ParseGenericDescriptorTwoPackets) {
|
|||||||
|
|
||||||
EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
|
EXPECT_CALL(mock_on_complete_frame_callback_, DoOnCompleteFrame)
|
||||||
.WillOnce(Invoke([kSpatialIndex](video_coding::EncodedFrame* frame) {
|
.WillOnce(Invoke([kSpatialIndex](video_coding::EncodedFrame* frame) {
|
||||||
EXPECT_EQ(frame->num_references, 2U);
|
EXPECT_EQ(frame->num_references, 0U);
|
||||||
EXPECT_EQ(frame->references[0], frame->id.picture_id - 90);
|
|
||||||
EXPECT_EQ(frame->references[1], frame->id.picture_id - 80);
|
|
||||||
EXPECT_EQ(frame->id.spatial_layer, kSpatialIndex);
|
EXPECT_EQ(frame->id.spatial_layer, kSpatialIndex);
|
||||||
|
EXPECT_EQ(frame->EncodedImage()._encodedWidth, 480u);
|
||||||
|
EXPECT_EQ(frame->EncodedImage()._encodedHeight, 360u);
|
||||||
}));
|
}));
|
||||||
|
|
||||||
rtp_video_stream_receiver_->OnRtpPacket(second_packet);
|
rtp_video_stream_receiver_->OnRtpPacket(second_packet);
|
||||||
|
Reference in New Issue
Block a user