Add experimental extension RtpVideoLayersAllocation

The extension is suggested to be used for signaling per target bitrate, resolution
and frame rate to a SFU to allow a SFU to know what video layers a client is currently targeting.
It is hoped to replace the current Target bitrate RTCP XR message currently used only for screen share.

Bug: webrtc:12000
Change-Id: Id7b55e7ddaf6304e31839fd0482b096e1dbe8925
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/185980
Reviewed-by: Henrik Lundin <henrik.lundin@webrtc.org>
Reviewed-by: Danil Chapovalov <danilchap@webrtc.org>
Commit-Queue: Per Kjellander <perkj@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#32313}
This commit is contained in:
Per Kjellander
2020-10-05 13:51:47 +02:00
committed by Commit Bot
parent 3c65a2b5d4
commit 6556ed2402
12 changed files with 529 additions and 1 deletions

View File

@ -21,6 +21,7 @@ rtc_library("video_rtp_headers") {
"hdr_metadata.h",
"video_content_type.cc",
"video_content_type.h",
"video_layers_allocation.h",
"video_rotation.h",
"video_timing.cc",
"video_timing.h",
@ -30,8 +31,12 @@ rtc_library("video_rtp_headers") {
"..:array_view",
"../../rtc_base:rtc_base_approved",
"../../rtc_base/system:rtc_export",
"../units:data_rate",
]
absl_deps = [
"//third_party/abseil-cpp/absl/container:inlined_vector",
"//third_party/abseil-cpp/absl/types:optional",
]
absl_deps = [ "//third_party/abseil-cpp/absl/types:optional" ]
}
rtc_library("video_frame") {

View File

@ -0,0 +1,77 @@
/*
* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_VIDEO_VIDEO_LAYERS_ALLOCATION_H_
#define API_VIDEO_VIDEO_LAYERS_ALLOCATION_H_
#include <cstdint>
#include "absl/container/inlined_vector.h"
#include "api/units/data_rate.h"
namespace webrtc {
// This struct contains additional stream-level information needed by a
// Selective Forwarding Middlebox to make relay decisions of RTP streams.
struct VideoLayersAllocation {
static constexpr int kMaxSpatialIds = 4;
static constexpr int kMaxTemporalIds = 4;
friend bool operator==(const VideoLayersAllocation& lhs,
const VideoLayersAllocation& rhs) {
return lhs.rtp_stream_index == rhs.rtp_stream_index &&
lhs.resolution_and_frame_rate_is_valid ==
rhs.resolution_and_frame_rate_is_valid &&
lhs.active_spatial_layers == rhs.active_spatial_layers;
}
friend bool operator!=(const VideoLayersAllocation& lhs,
const VideoLayersAllocation& rhs) {
return !(lhs == rhs);
}
struct SpatialLayer {
friend bool operator==(const SpatialLayer& lhs, const SpatialLayer& rhs) {
return lhs.rtp_stream_index == rhs.rtp_stream_index &&
lhs.spatial_id == rhs.spatial_id &&
lhs.target_bitrate_per_temporal_layer ==
rhs.target_bitrate_per_temporal_layer &&
lhs.width == rhs.width && lhs.height == rhs.height &&
lhs.frame_rate_fps == rhs.frame_rate_fps;
}
friend bool operator!=(const SpatialLayer& lhs, const SpatialLayer& rhs) {
return !(lhs == rhs);
}
int rtp_stream_index = 0;
// Index of the spatial layer per `rtp_stream_index`.
int spatial_id = 0;
// Target bitrate per decode target.
absl::InlinedVector<DataRate, kMaxTemporalIds>
target_bitrate_per_temporal_layer;
// These fields are only valid if `resolution_and_frame_rate_is_valid` is
// true
uint16_t width = 0;
uint16_t height = 0;
// Max frame rate used in any temporal layer of this spatial layer.
uint8_t frame_rate_fps = 0;
};
// Index of the rtp stream this allocation is sent on. Used for mapping
// a SpatialLayer to a rtp stream.
int rtp_stream_index = 0;
bool resolution_and_frame_rate_is_valid = false;
absl::InlinedVector<SpatialLayer, kMaxSpatialIds> active_spatial_layers;
};
} // namespace webrtc
#endif // API_VIDEO_VIDEO_LAYERS_ALLOCATION_H_

View File

@ -52,6 +52,7 @@ rtc_library("rtp_rtcp_format") {
"source/rtp_packet.h",
"source/rtp_packet_received.h",
"source/rtp_packet_to_send.h",
"source/rtp_video_layers_allocation_extension.h",
]
sources = [
"include/report_block_data.cc",
@ -95,6 +96,7 @@ rtc_library("rtp_rtcp_format") {
"source/rtp_packet.cc",
"source/rtp_packet_received.cc",
"source/rtp_packet_to_send.cc",
"source/rtp_video_layers_allocation_extension.cc",
]
deps = [
@ -500,6 +502,7 @@ if (rtc_include_tests) {
"source/rtp_sender_video_unittest.cc",
"source/rtp_sequence_number_map_unittest.cc",
"source/rtp_utility_unittest.cc",
"source/rtp_video_layers_allocation_extension_unittest.cc",
"source/source_tracker_unittest.cc",
"source/time_util_unittest.cc",
"source/ulpfec_generator_unittest.cc",

View File

@ -65,6 +65,7 @@ enum RTPExtensionType : int {
kRtpExtensionTransportSequenceNumber02,
kRtpExtensionPlayoutDelay,
kRtpExtensionVideoContentType,
kRtpExtensionVideoLayersAllocation,
kRtpExtensionVideoTiming,
kRtpExtensionRtpStreamId,
kRtpExtensionRepairedRtpStreamId,

View File

@ -13,6 +13,7 @@
#include "modules/rtp_rtcp/source/rtp_dependency_descriptor_extension.h"
#include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor_extension.h"
#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
#include "modules/rtp_rtcp/source/rtp_video_layers_allocation_extension.h"
#include "rtc_base/arraysize.h"
#include "rtc_base/checks.h"
#include "rtc_base/logging.h"
@ -40,6 +41,7 @@ constexpr ExtensionInfo kExtensions[] = {
CreateExtensionInfo<TransportSequenceNumberV2>(),
CreateExtensionInfo<PlayoutDelayLimits>(),
CreateExtensionInfo<VideoContentTypeExtension>(),
CreateExtensionInfo<RtpVideoLayersAllocationExtension>(),
CreateExtensionInfo<VideoTimingExtension>(),
CreateExtensionInfo<RtpStreamId>(),
CreateExtensionInfo<RepairedRtpStreamId>(),

View File

@ -196,6 +196,7 @@ void RtpPacket::ZeroMutableExtensions() {
case RTPExtensionType::kRtpExtensionRepairedRtpStreamId:
case RTPExtensionType::kRtpExtensionRtpStreamId:
case RTPExtensionType::kRtpExtensionVideoContentType:
case RTPExtensionType::kRtpExtensionVideoLayersAllocation:
case RTPExtensionType::kRtpExtensionVideoRotation:
case RTPExtensionType::kRtpExtensionInbandComfortNoise: {
// Non-mutable extension. Don't change it.

View File

@ -118,6 +118,7 @@ bool IsNonVolatile(RTPExtensionType type) {
case kRtpExtensionVideoRotation:
case kRtpExtensionPlayoutDelay:
case kRtpExtensionVideoContentType:
case kRtpExtensionVideoLayersAllocation:
case kRtpExtensionVideoTiming:
case kRtpExtensionRepairedRtpStreamId:
case kRtpExtensionColorSpace:

View File

@ -492,6 +492,10 @@ void RtpHeaderParser::ParseOneByteExtensionHeader(
&header->extension.video_timing);
break;
}
case kRtpExtensionVideoLayersAllocation:
RTC_LOG(WARNING) << "VideoLayersAllocation extension unsupported by "
"rtp header parser.";
break;
case kRtpExtensionRtpStreamId: {
std::string name(reinterpret_cast<const char*>(ptr), len + 1);
if (IsLegalRsidName(name)) {

View File

@ -0,0 +1,259 @@
/*
* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/rtp_rtcp/source/rtp_video_layers_allocation_extension.h"
#include <limits>
#include "api/video/video_layers_allocation.h"
#include "rtc_base/bit_buffer.h"
namespace webrtc {
constexpr RTPExtensionType RtpVideoLayersAllocationExtension::kId;
constexpr const char RtpVideoLayersAllocationExtension::kUri[];
namespace {
// Counts the number of bits used in the binary representation of val.
size_t CountBits(uint64_t val) {
size_t bit_count = 0;
while (val != 0) {
bit_count++;
val >>= 1;
}
return bit_count;
}
// Counts the number of bits used if `val`is encoded using unsigned exponential
// Golomb encoding.
// TODO(bugs.webrtc.org/12000): Move to bit_buffer.cc if Golomb encoding is used
// in the final version.
size_t SizeExponentialGolomb(uint32_t val) {
if (val == std::numeric_limits<uint32_t>::max()) {
return 0;
}
uint64_t val_to_encode = static_cast<uint64_t>(val) + 1;
return CountBits(val_to_encode) * 2 - 1;
}
} // namespace
// TODO(bugs.webrtc.org/12000): Review and revise the content and encoding of
// this extension. This is an experimental first version.
// 0 1 2
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | NS|RSID|T|X|Res| Bit encoded data...
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// NS: Number of spatial layers/simulcast streams - 1. 2 bits, thus allowing
// passing number of layers/streams up-to 4.
// RSID: RTP stream id this allocation is sent on, numbered from 0. 2 bits.
// T: indicates if all spatial layers have the same amount of temporal layers.
// X: indicates if resolution and frame rate per spatial layer is present.
// Res: 2 bits reserved for future use.
// Bit encoded data: consists of following fields written in order:
// 1) T=1: Nt - 2-bit value of number of temporal layers - 1
// T=0: NS 2-bit values of numbers of temporal layers - 1 for all spatial
// layers from lower to higher.
// 2) Bitrates:
// One value for each spatial x temporal layer.
// Format: RSID (2-bit) SID(2-bit),folowed by bitrate for all temporal
// layers for the RSID,SID tuple. All bitrates are in kbps. All bitrates are
// total required bitrate to receive the corresponding layer, i.e. in
// simulcast mode they include only corresponding spatial layer, in full-svc
// all lower spatial layers are included. All lower temporal layers are also
// included. All bitrates are written using unsigned Exponential Golomb
// encoding.
// 3) [only if X bit is set]. Encoded width, 16-bit, height, 16-bit,
// max frame rate 8-bit per spatial layer in order from lower to higher.
bool RtpVideoLayersAllocationExtension::Write(
rtc::ArrayView<uint8_t> data,
const VideoLayersAllocation& allocation) {
RTC_DCHECK_LT(allocation.rtp_stream_index,
VideoLayersAllocation::kMaxSpatialIds);
RTC_DCHECK_GE(data.size(), ValueSize(allocation));
rtc::BitBufferWriter writer(data.data(), data.size());
// NS:
if (allocation.active_spatial_layers.empty())
return false;
writer.WriteBits(allocation.active_spatial_layers.size() - 1, 2);
// RSID:
writer.WriteBits(allocation.rtp_stream_index, 2);
// T:
bool num_tls_is_the_same = true;
size_t first_layers_number_of_temporal_layers =
allocation.active_spatial_layers.front()
.target_bitrate_per_temporal_layer.size();
for (const auto& spatial_layer : allocation.active_spatial_layers) {
if (first_layers_number_of_temporal_layers !=
spatial_layer.target_bitrate_per_temporal_layer.size()) {
num_tls_is_the_same = false;
break;
}
}
writer.WriteBits(num_tls_is_the_same ? 1 : 0, 1);
// X:
writer.WriteBits(allocation.resolution_and_frame_rate_is_valid ? 1 : 0, 1);
// RESERVED:
writer.WriteBits(/*val=*/0, /*bit_count=*/2);
if (num_tls_is_the_same) {
writer.WriteBits(first_layers_number_of_temporal_layers - 1, 2);
} else {
for (const auto& spatial_layer : allocation.active_spatial_layers) {
writer.WriteBits(
spatial_layer.target_bitrate_per_temporal_layer.size() - 1, 2);
}
}
for (const auto& spatial_layer : allocation.active_spatial_layers) {
writer.WriteBits(spatial_layer.rtp_stream_index, 2);
writer.WriteBits(spatial_layer.spatial_id, 2);
for (const DataRate& bitrate :
spatial_layer.target_bitrate_per_temporal_layer) {
writer.WriteExponentialGolomb(bitrate.kbps());
}
}
if (allocation.resolution_and_frame_rate_is_valid) {
for (const auto& spatial_layer : allocation.active_spatial_layers) {
writer.WriteUInt16(spatial_layer.width);
writer.WriteUInt16(spatial_layer.height);
writer.WriteUInt8(spatial_layer.frame_rate_fps);
}
}
return true;
}
bool RtpVideoLayersAllocationExtension::Parse(
rtc::ArrayView<const uint8_t> data,
VideoLayersAllocation* allocation) {
if (data.size() == 0)
return false;
rtc::BitBuffer reader(data.data(), data.size());
if (!allocation)
return false;
uint32_t val;
// NS:
if (!reader.ReadBits(&val, 2))
return false;
int active_spatial_layers = val + 1;
// RSID:
if (!reader.ReadBits(&val, 2))
return false;
allocation->rtp_stream_index = val;
// T:
if (!reader.ReadBits(&val, 1))
return false;
bool num_tls_is_constant = (val == 1);
// X:
if (!reader.ReadBits(&val, 1))
return false;
allocation->resolution_and_frame_rate_is_valid = (val == 1);
// RESERVED:
if (!reader.ReadBits(&val, 2))
return false;
int number_of_temporal_layers[VideoLayersAllocation::kMaxSpatialIds];
if (num_tls_is_constant) {
if (!reader.ReadBits(&val, 2))
return false;
for (int sl_idx = 0; sl_idx < active_spatial_layers; ++sl_idx) {
number_of_temporal_layers[sl_idx] = val + 1;
}
} else {
for (int sl_idx = 0; sl_idx < active_spatial_layers; ++sl_idx) {
if (!reader.ReadBits(&val, 2))
return false;
number_of_temporal_layers[sl_idx] = val + 1;
if (number_of_temporal_layers[sl_idx] >
VideoLayersAllocation::kMaxTemporalIds)
return false;
}
}
for (int sl_idx = 0; sl_idx < active_spatial_layers; ++sl_idx) {
allocation->active_spatial_layers.emplace_back();
auto& spatial_layer = allocation->active_spatial_layers.back();
auto& temporal_layers = spatial_layer.target_bitrate_per_temporal_layer;
if (!reader.ReadBits(&val, 2))
return false;
spatial_layer.rtp_stream_index = val;
if (!reader.ReadBits(&val, 2))
return false;
spatial_layer.spatial_id = val;
for (int tl_idx = 0; tl_idx < number_of_temporal_layers[sl_idx]; ++tl_idx) {
reader.ReadExponentialGolomb(&val);
temporal_layers.push_back(DataRate::KilobitsPerSec(val));
}
}
if (allocation->resolution_and_frame_rate_is_valid) {
for (auto& spatial_layer : allocation->active_spatial_layers) {
if (!reader.ReadUInt16(&spatial_layer.width))
return false;
if (!reader.ReadUInt16(&spatial_layer.height))
return false;
if (!reader.ReadUInt8(&spatial_layer.frame_rate_fps))
return false;
}
}
return true;
}
size_t RtpVideoLayersAllocationExtension::ValueSize(
const VideoLayersAllocation& allocation) {
if (allocation.active_spatial_layers.empty()) {
return 0;
}
size_t size_in_bits = 8; // Fixed first byte.¨
bool num_tls_is_the_same = true;
size_t first_layers_number_of_temporal_layers =
allocation.active_spatial_layers.front()
.target_bitrate_per_temporal_layer.size();
for (const auto& spatial_layer : allocation.active_spatial_layers) {
if (first_layers_number_of_temporal_layers !=
spatial_layer.target_bitrate_per_temporal_layer.size()) {
num_tls_is_the_same = false;
}
size_in_bits += 4; // RSID, SID tuple.
for (const auto& bitrate :
spatial_layer.target_bitrate_per_temporal_layer) {
size_in_bits += SizeExponentialGolomb(bitrate.kbps());
}
}
if (num_tls_is_the_same) {
size_in_bits += 2;
} else {
for (const auto& spatial_layer : allocation.active_spatial_layers) {
size_in_bits +=
2 * spatial_layer.target_bitrate_per_temporal_layer.size();
}
}
if (allocation.resolution_and_frame_rate_is_valid) {
size_in_bits += allocation.active_spatial_layers.size() * 5 * 8;
}
return (size_in_bits + 7) / 8;
}
} // namespace webrtc

View File

@ -0,0 +1,35 @@
/*
* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef MODULES_RTP_RTCP_SOURCE_RTP_VIDEO_LAYERS_ALLOCATION_EXTENSION_H_
#define MODULES_RTP_RTCP_SOURCE_RTP_VIDEO_LAYERS_ALLOCATION_EXTENSION_H_
#include "api/video/video_layers_allocation.h"
#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
namespace webrtc {
// TODO(bugs.webrtc.org/12000): Note that this extensions is being developed and
// the wire format will likely change.
class RtpVideoLayersAllocationExtension {
public:
using value_type = VideoLayersAllocation;
static constexpr RTPExtensionType kId = kRtpExtensionVideoLayersAllocation;
static constexpr const char kUri[] =
"http://www.webrtc.org/experiments/rtp-hdrext/video-layers-allocation00";
static bool Parse(rtc::ArrayView<const uint8_t> data,
VideoLayersAllocation* allocation);
static size_t ValueSize(const VideoLayersAllocation& allocation);
static bool Write(rtc::ArrayView<uint8_t> data,
const VideoLayersAllocation& allocation);
};
} // namespace webrtc
#endif // MODULES_RTP_RTCP_SOURCE_RTP_VIDEO_LAYERS_ALLOCATION_EXTENSION_H_

View File

@ -0,0 +1,134 @@
/*
* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "modules/rtp_rtcp/source/rtp_video_layers_allocation_extension.h"
#include "api/video/video_layers_allocation.h"
#include "rtc_base/buffer.h"
#include "test/gmock.h"
namespace webrtc {
namespace {
TEST(RtpVideoLayersAllocationExtension,
WriteEmptyLayersAllocationReturnsFalse) {
VideoLayersAllocation written_allocation;
rtc::Buffer buffer(
RtpVideoLayersAllocationExtension::ValueSize(written_allocation));
EXPECT_FALSE(
RtpVideoLayersAllocationExtension::Write(buffer, written_allocation));
}
TEST(RtpVideoLayersAllocationExtension,
CanWriteAndParse2SpatialWith2TemporalLayers) {
VideoLayersAllocation written_allocation;
written_allocation.rtp_stream_index = 1;
written_allocation.active_spatial_layers = {
{
/*rtp_stream_index*/ 0,
/*spatial_id*/ 0,
/*target_bitrate_per_temporal_layer*/
{DataRate::KilobitsPerSec(25), DataRate::KilobitsPerSec(50)},
/*width*/ 0,
/*height*/ 0,
/*frame_rate_fps*/ 0,
},
{
/*rtp_stream_index*/ 1,
/*spatial_id*/ 0,
/*target_bitrate_per_temporal_layer*/
{DataRate::KilobitsPerSec(100), DataRate::KilobitsPerSec(200)},
/*width*/ 0,
/*height*/ 0,
/*frame_rate_fps*/ 0,
},
};
rtc::Buffer buffer(
RtpVideoLayersAllocationExtension::ValueSize(written_allocation));
EXPECT_TRUE(
RtpVideoLayersAllocationExtension::Write(buffer, written_allocation));
VideoLayersAllocation parsed_allocation;
EXPECT_TRUE(
RtpVideoLayersAllocationExtension::Parse(buffer, &parsed_allocation));
EXPECT_EQ(written_allocation, parsed_allocation);
}
TEST(RtpVideoLayersAllocationExtension,
CanWriteAndParseAllocationWithDifferentNumerOfTemporalLayers) {
VideoLayersAllocation written_allocation;
written_allocation.rtp_stream_index = 1;
written_allocation.active_spatial_layers = {
{
/*rtp_stream_index*/ 0,
/*spatial_id*/ 0,
/*target_bitrate_per_temporal_layer*/
{DataRate::KilobitsPerSec(25), DataRate::KilobitsPerSec(50)},
/*width*/ 0,
/*height*/ 0,
/*frame_rate_fps*/ 0,
},
{
/*rtp_stream_index*/ 1,
/*spatial_id*/ 0,
/*target_bitrate_per_temporal_layer*/ {DataRate::KilobitsPerSec(100)},
/*width*/ 0,
/*height*/ 0,
/*frame_rate_fps*/ 0,
},
};
rtc::Buffer buffer(
RtpVideoLayersAllocationExtension::ValueSize(written_allocation));
EXPECT_TRUE(
RtpVideoLayersAllocationExtension::Write(buffer, written_allocation));
VideoLayersAllocation parsed_allocation;
EXPECT_TRUE(
RtpVideoLayersAllocationExtension::Parse(buffer, &parsed_allocation));
EXPECT_EQ(written_allocation, parsed_allocation);
}
TEST(RtpVideoLayersAllocationExtension,
CanWriteAndParseAllocationWithResolution) {
VideoLayersAllocation written_allocation;
written_allocation.rtp_stream_index = 1;
written_allocation.resolution_and_frame_rate_is_valid = true;
written_allocation.active_spatial_layers = {
{
/*rtp_stream_index*/ 0,
/*spatial_id*/ 0,
/*target_bitrate_per_temporal_layer*/
{DataRate::KilobitsPerSec(25), DataRate::KilobitsPerSec(50)},
/*width*/ 320,
/*height*/ 240,
/*frame_rate_fps*/ 8,
},
{
/*rtp_stream_index*/ 0,
/*spatial_id*/ 1,
/*target_bitrate_per_temporal_layer*/
{DataRate::KilobitsPerSec(100), DataRate::KilobitsPerSec(200)},
/*width*/ 640,
/*height*/ 320,
/*frame_rate_fps*/ 30,
},
};
rtc::Buffer buffer(
RtpVideoLayersAllocationExtension::ValueSize(written_allocation));
EXPECT_TRUE(
RtpVideoLayersAllocationExtension::Write(buffer, written_allocation));
VideoLayersAllocation parsed_allocation;
EXPECT_TRUE(
RtpVideoLayersAllocationExtension::Parse(buffer, &parsed_allocation));
EXPECT_EQ(written_allocation, parsed_allocation);
}
} // namespace
} // namespace webrtc

View File

@ -15,6 +15,7 @@
#include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor_extension.h"
#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
#include "modules/rtp_rtcp/source/rtp_packet_received.h"
#include "modules/rtp_rtcp/source/rtp_video_layers_allocation_extension.h"
namespace webrtc {
// We decide which header extensions to register by reading four bytes
@ -142,6 +143,11 @@ void FuzzOneInput(const uint8_t* data, size_t size) {
packet.GetExtension<InbandComfortNoiseExtension>(&noise_level);
break;
}
case kRtpExtensionVideoLayersAllocation: {
VideoLayersAllocation allocation;
packet.GetExtension<RtpVideoLayersAllocationExtension>(&allocation);
break;
}
case kRtpExtensionGenericFrameDescriptor02:
// This extension requires state to read and so complicated that
// deserves own fuzzer.