Do not propagate generic descriptor on receiving frame
It was used only for the frame decryptor. Decryptor needs only raw representation that it can recreate in a way compatible with the new version of the descriptor. Bug: webrtc:10342 Change-Id: Ie098235ebb87c6f5e2af42d0022d2365cd6bfa29 Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/166163 Reviewed-by: Philip Eliasson <philipel@webrtc.org> Reviewed-by: Erik Språng <sprang@webrtc.org> Commit-Queue: Danil Chapovalov <danilchap@webrtc.org> Cr-Commit-Position: refs/heads/master@{#30501}
This commit is contained in:

committed by
Commit Bot

parent
8cfecac6e8
commit
abf73de8ea
@ -165,6 +165,8 @@ rtc_library("rtp_rtcp") {
|
|||||||
"source/rtcp_receiver.h",
|
"source/rtcp_receiver.h",
|
||||||
"source/rtcp_sender.cc",
|
"source/rtcp_sender.cc",
|
||||||
"source/rtcp_sender.h",
|
"source/rtcp_sender.h",
|
||||||
|
"source/rtp_descriptor_authentication.cc",
|
||||||
|
"source/rtp_descriptor_authentication.h",
|
||||||
"source/rtp_format.cc",
|
"source/rtp_format.cc",
|
||||||
"source/rtp_format.h",
|
"source/rtp_format.h",
|
||||||
"source/rtp_format_h264.cc",
|
"source/rtp_format_h264.cc",
|
||||||
|
49
modules/rtp_rtcp/source/rtp_descriptor_authentication.cc
Normal file
49
modules/rtp_rtcp/source/rtp_descriptor_authentication.cc
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Use of this source code is governed by a BSD-style license
|
||||||
|
* that can be found in the LICENSE file in the root of the source
|
||||||
|
* tree. An additional intellectual property rights grant can be found
|
||||||
|
* in the file PATENTS. All contributing project authors may
|
||||||
|
* be found in the AUTHORS file in the root of the source tree.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "modules/rtp_rtcp/source/rtp_descriptor_authentication.h"
|
||||||
|
|
||||||
|
#include <cstdint>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor.h"
|
||||||
|
#include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor_extension.h"
|
||||||
|
#include "modules/rtp_rtcp/source/rtp_video_header.h"
|
||||||
|
|
||||||
|
namespace webrtc {
|
||||||
|
|
||||||
|
std::vector<uint8_t> RtpDescriptorAuthentication(
|
||||||
|
const RTPVideoHeader::GenericDescriptorInfo& descriptor) {
|
||||||
|
// Default way of creating additional data for an encrypted frame.
|
||||||
|
if (descriptor.spatial_index < 0 || descriptor.temporal_index < 0 ||
|
||||||
|
descriptor.spatial_index >=
|
||||||
|
RtpGenericFrameDescriptor::kMaxSpatialLayers ||
|
||||||
|
descriptor.temporal_index >=
|
||||||
|
RtpGenericFrameDescriptor::kMaxTemporalLayers ||
|
||||||
|
descriptor.dependencies.size() >
|
||||||
|
RtpGenericFrameDescriptor::kMaxNumFrameDependencies) {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
RtpGenericFrameDescriptor frame_descriptor;
|
||||||
|
frame_descriptor.SetFirstPacketInSubFrame(true);
|
||||||
|
frame_descriptor.SetLastPacketInSubFrame(false);
|
||||||
|
frame_descriptor.SetTemporalLayer(descriptor.temporal_index);
|
||||||
|
frame_descriptor.SetSpatialLayersBitmask(1 << descriptor.spatial_index);
|
||||||
|
frame_descriptor.SetFrameId(descriptor.frame_id & 0xFFFF);
|
||||||
|
for (int64_t dependency : descriptor.dependencies) {
|
||||||
|
frame_descriptor.AddFrameDependencyDiff(descriptor.frame_id - dependency);
|
||||||
|
}
|
||||||
|
std::vector<uint8_t> result(
|
||||||
|
RtpGenericFrameDescriptorExtension00::ValueSize(frame_descriptor));
|
||||||
|
RtpGenericFrameDescriptorExtension00::Write(result, frame_descriptor);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace webrtc
|
27
modules/rtp_rtcp/source/rtp_descriptor_authentication.h
Normal file
27
modules/rtp_rtcp/source/rtp_descriptor_authentication.h
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
|
||||||
|
*
|
||||||
|
* Use of this source code is governed by a BSD-style license
|
||||||
|
* that can be found in the LICENSE file in the root of the source
|
||||||
|
* tree. An additional intellectual property rights grant can be found
|
||||||
|
* in the file PATENTS. All contributing project authors may
|
||||||
|
* be found in the AUTHORS file in the root of the source tree.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef MODULES_RTP_RTCP_SOURCE_RTP_DESCRIPTOR_AUTHENTICATION_H_
|
||||||
|
#define MODULES_RTP_RTCP_SOURCE_RTP_DESCRIPTOR_AUTHENTICATION_H_
|
||||||
|
|
||||||
|
#include <cstdint>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "modules/rtp_rtcp/source/rtp_video_header.h"
|
||||||
|
|
||||||
|
namespace webrtc {
|
||||||
|
|
||||||
|
// Converts frame dependencies into array of bytes for authentication.
|
||||||
|
std::vector<uint8_t> RtpDescriptorAuthentication(
|
||||||
|
const RTPVideoHeader::GenericDescriptorInfo& descriptor);
|
||||||
|
|
||||||
|
} // namespace webrtc
|
||||||
|
|
||||||
|
#endif // MODULES_RTP_RTCP_SOURCE_RTP_DESCRIPTOR_AUTHENTICATION_H_
|
@ -97,22 +97,4 @@ bool RtpGenericFrameDescriptor::AddFrameDependencyDiff(uint16_t fdiff) {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void RtpGenericFrameDescriptor::SetByteRepresentation(
|
|
||||||
rtc::ArrayView<const uint8_t> byte_representation) {
|
|
||||||
RTC_CHECK(!byte_representation.empty());
|
|
||||||
byte_representation_.assign(byte_representation.begin(),
|
|
||||||
byte_representation.end());
|
|
||||||
// Clear end_of_subframe bit.
|
|
||||||
// Because ByteRepresentation is used for frame authentication, bit describing
|
|
||||||
// position of the packet in the frame shouldn't be part of it.
|
|
||||||
// This match RtpVideoSender where descriptor is passed for authentication
|
|
||||||
// before end_of_subframe bit is decided and set, i.e. it is always 0.
|
|
||||||
byte_representation_[0] &= ~0x40;
|
|
||||||
}
|
|
||||||
|
|
||||||
rtc::ArrayView<const uint8_t>
|
|
||||||
RtpGenericFrameDescriptor::GetByteRepresentation() {
|
|
||||||
return byte_representation_;
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace webrtc
|
} // namespace webrtc
|
||||||
|
@ -70,9 +70,6 @@ class RtpGenericFrameDescriptor {
|
|||||||
// Returns false on failure, i.e. number of dependencies is too large.
|
// Returns false on failure, i.e. number of dependencies is too large.
|
||||||
bool AddFrameDependencyDiff(uint16_t fdiff);
|
bool AddFrameDependencyDiff(uint16_t fdiff);
|
||||||
|
|
||||||
void SetByteRepresentation(rtc::ArrayView<const uint8_t> representation);
|
|
||||||
rtc::ArrayView<const uint8_t> GetByteRepresentation();
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
bool beginning_of_subframe_ = false;
|
bool beginning_of_subframe_ = false;
|
||||||
bool end_of_subframe_ = false;
|
bool end_of_subframe_ = false;
|
||||||
@ -86,8 +83,6 @@ class RtpGenericFrameDescriptor {
|
|||||||
uint16_t frame_deps_id_diffs_[kMaxNumFrameDependencies];
|
uint16_t frame_deps_id_diffs_[kMaxNumFrameDependencies];
|
||||||
int width_ = 0;
|
int width_ = 0;
|
||||||
int height_ = 0;
|
int height_ = 0;
|
||||||
|
|
||||||
std::vector<uint8_t> byte_representation_;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace webrtc
|
} // namespace webrtc
|
||||||
|
@ -37,7 +37,6 @@ RtpFrameObject::RtpFrameObject(
|
|||||||
VideoContentType content_type,
|
VideoContentType content_type,
|
||||||
const RTPVideoHeader& video_header,
|
const RTPVideoHeader& video_header,
|
||||||
const absl::optional<webrtc::ColorSpace>& color_space,
|
const absl::optional<webrtc::ColorSpace>& color_space,
|
||||||
const absl::optional<RtpGenericFrameDescriptor>& generic_descriptor,
|
|
||||||
RtpPacketInfos packet_infos,
|
RtpPacketInfos packet_infos,
|
||||||
rtc::scoped_refptr<EncodedImageBuffer> image_buffer)
|
rtc::scoped_refptr<EncodedImageBuffer> image_buffer)
|
||||||
: first_seq_num_(first_seq_num),
|
: first_seq_num_(first_seq_num),
|
||||||
@ -45,7 +44,6 @@ RtpFrameObject::RtpFrameObject(
|
|||||||
last_packet_received_time_(last_packet_received_time),
|
last_packet_received_time_(last_packet_received_time),
|
||||||
times_nacked_(times_nacked) {
|
times_nacked_(times_nacked) {
|
||||||
rtp_video_header_ = video_header;
|
rtp_video_header_ = video_header;
|
||||||
rtp_generic_frame_descriptor_ = generic_descriptor;
|
|
||||||
|
|
||||||
// EncodedFrame members
|
// EncodedFrame members
|
||||||
codec_type_ = codec;
|
codec_type_ = codec;
|
||||||
@ -131,11 +129,6 @@ const RTPVideoHeader& RtpFrameObject::GetRtpVideoHeader() const {
|
|||||||
return rtp_video_header_;
|
return rtp_video_header_;
|
||||||
}
|
}
|
||||||
|
|
||||||
const absl::optional<RtpGenericFrameDescriptor>&
|
|
||||||
RtpFrameObject::GetGenericFrameDescriptor() const {
|
|
||||||
return rtp_generic_frame_descriptor_;
|
|
||||||
}
|
|
||||||
|
|
||||||
const FrameMarking& RtpFrameObject::GetFrameMarking() const {
|
const FrameMarking& RtpFrameObject::GetFrameMarking() const {
|
||||||
return rtp_video_header_.frame_marking;
|
return rtp_video_header_.frame_marking;
|
||||||
}
|
}
|
||||||
|
@ -13,32 +13,29 @@
|
|||||||
|
|
||||||
#include "absl/types/optional.h"
|
#include "absl/types/optional.h"
|
||||||
#include "api/video/encoded_frame.h"
|
#include "api/video/encoded_frame.h"
|
||||||
#include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor.h"
|
|
||||||
|
|
||||||
namespace webrtc {
|
namespace webrtc {
|
||||||
namespace video_coding {
|
namespace video_coding {
|
||||||
|
|
||||||
class RtpFrameObject : public EncodedFrame {
|
class RtpFrameObject : public EncodedFrame {
|
||||||
public:
|
public:
|
||||||
RtpFrameObject(
|
RtpFrameObject(uint16_t first_seq_num,
|
||||||
uint16_t first_seq_num,
|
uint16_t last_seq_num,
|
||||||
uint16_t last_seq_num,
|
bool markerBit,
|
||||||
bool markerBit,
|
int times_nacked,
|
||||||
int times_nacked,
|
int64_t first_packet_received_time,
|
||||||
int64_t first_packet_received_time,
|
int64_t last_packet_received_time,
|
||||||
int64_t last_packet_received_time,
|
uint32_t rtp_timestamp,
|
||||||
uint32_t rtp_timestamp,
|
int64_t ntp_time_ms,
|
||||||
int64_t ntp_time_ms,
|
const VideoSendTiming& timing,
|
||||||
const VideoSendTiming& timing,
|
uint8_t payload_type,
|
||||||
uint8_t payload_type,
|
VideoCodecType codec,
|
||||||
VideoCodecType codec,
|
VideoRotation rotation,
|
||||||
VideoRotation rotation,
|
VideoContentType content_type,
|
||||||
VideoContentType content_type,
|
const RTPVideoHeader& video_header,
|
||||||
const RTPVideoHeader& video_header,
|
const absl::optional<webrtc::ColorSpace>& color_space,
|
||||||
const absl::optional<webrtc::ColorSpace>& color_space,
|
RtpPacketInfos packet_infos,
|
||||||
const absl::optional<RtpGenericFrameDescriptor>& generic_descriptor,
|
rtc::scoped_refptr<EncodedImageBuffer> image_buffer);
|
||||||
RtpPacketInfos packet_infos,
|
|
||||||
rtc::scoped_refptr<EncodedImageBuffer> image_buffer);
|
|
||||||
|
|
||||||
~RtpFrameObject() override;
|
~RtpFrameObject() override;
|
||||||
uint16_t first_seq_num() const;
|
uint16_t first_seq_num() const;
|
||||||
@ -50,13 +47,10 @@ class RtpFrameObject : public EncodedFrame {
|
|||||||
int64_t RenderTime() const override;
|
int64_t RenderTime() const override;
|
||||||
bool delayed_by_retransmission() const override;
|
bool delayed_by_retransmission() const override;
|
||||||
const RTPVideoHeader& GetRtpVideoHeader() const;
|
const RTPVideoHeader& GetRtpVideoHeader() const;
|
||||||
const absl::optional<RtpGenericFrameDescriptor>& GetGenericFrameDescriptor()
|
|
||||||
const;
|
|
||||||
const FrameMarking& GetFrameMarking() const;
|
const FrameMarking& GetFrameMarking() const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
RTPVideoHeader rtp_video_header_;
|
RTPVideoHeader rtp_video_header_;
|
||||||
absl::optional<RtpGenericFrameDescriptor> rtp_generic_frame_descriptor_;
|
|
||||||
VideoCodecType codec_type_;
|
VideoCodecType codec_type_;
|
||||||
uint16_t first_seq_num_;
|
uint16_t first_seq_num_;
|
||||||
uint16_t last_seq_num_;
|
uint16_t last_seq_num_;
|
||||||
|
@ -473,7 +473,6 @@ std::unique_ptr<RtpFrameObject> PacketBuffer::AssembleFrame(
|
|||||||
last_packet.video_header.content_type, //
|
last_packet.video_header.content_type, //
|
||||||
first_packet.video_header, //
|
first_packet.video_header, //
|
||||||
last_packet.video_header.color_space, //
|
last_packet.video_header.color_space, //
|
||||||
first_packet.generic_descriptor, //
|
|
||||||
RtpPacketInfos(std::move(packet_infos)), //
|
RtpPacketInfos(std::move(packet_infos)), //
|
||||||
std::move(bitstream));
|
std::move(bitstream));
|
||||||
}
|
}
|
||||||
|
@ -19,7 +19,6 @@
|
|||||||
#include "absl/base/attributes.h"
|
#include "absl/base/attributes.h"
|
||||||
#include "api/rtp_packet_info.h"
|
#include "api/rtp_packet_info.h"
|
||||||
#include "api/video/encoded_image.h"
|
#include "api/video/encoded_image.h"
|
||||||
#include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor.h"
|
|
||||||
#include "modules/rtp_rtcp/source/rtp_packet_received.h"
|
#include "modules/rtp_rtcp/source/rtp_packet_received.h"
|
||||||
#include "modules/rtp_rtcp/source/rtp_video_header.h"
|
#include "modules/rtp_rtcp/source/rtp_video_header.h"
|
||||||
#include "modules/video_coding/frame_object.h"
|
#include "modules/video_coding/frame_object.h"
|
||||||
@ -67,7 +66,6 @@ class PacketBuffer {
|
|||||||
|
|
||||||
rtc::CopyOnWriteBuffer video_payload;
|
rtc::CopyOnWriteBuffer video_payload;
|
||||||
RTPVideoHeader video_header;
|
RTPVideoHeader video_header;
|
||||||
absl::optional<RtpGenericFrameDescriptor> generic_descriptor;
|
|
||||||
|
|
||||||
RtpPacketInfo packet_info;
|
RtpPacketInfo packet_info;
|
||||||
};
|
};
|
||||||
|
@ -57,7 +57,6 @@ std::unique_ptr<RtpFrameObject> CreateFrame(
|
|||||||
VideoContentType::UNSPECIFIED,
|
VideoContentType::UNSPECIFIED,
|
||||||
video_header,
|
video_header,
|
||||||
/*color_space=*/absl::nullopt,
|
/*color_space=*/absl::nullopt,
|
||||||
/*generic_descriptor=*/absl::nullopt,
|
|
||||||
RtpPacketInfos(),
|
RtpPacketInfos(),
|
||||||
EncodedImageBuffer::Create(/*size=*/0));
|
EncodedImageBuffer::Create(/*size=*/0));
|
||||||
// clang-format on
|
// clang-format on
|
||||||
|
@ -63,25 +63,31 @@ class NullCallback : public video_coding::OnCompleteFrameCallback {
|
|||||||
std::unique_ptr<video_coding::EncodedFrame> frame) override {}
|
std::unique_ptr<video_coding::EncodedFrame> frame) override {}
|
||||||
};
|
};
|
||||||
|
|
||||||
RtpGenericFrameDescriptor GenerateRtpGenericFrameDescriptor(
|
absl::optional<RTPVideoHeader::GenericDescriptorInfo>
|
||||||
DataReader* reader) {
|
GenerateGenericFrameDependencies(DataReader* reader) {
|
||||||
RtpGenericFrameDescriptor res;
|
absl::optional<RTPVideoHeader::GenericDescriptorInfo> result;
|
||||||
res.SetFirstPacketInSubFrame(true);
|
uint8_t flags = reader->GetNum<uint8_t>();
|
||||||
res.SetFrameId(reader->GetNum<uint16_t>());
|
if (flags & 0b1000'0000) {
|
||||||
|
// i.e. with 50% chance there are no generic dependencies.
|
||||||
int spatial_layer =
|
// in such case codec-specfic code path of the RtpFrameReferenceFinder will
|
||||||
reader->GetNum<uint8_t>() % RtpGenericFrameDescriptor::kMaxSpatialLayers;
|
// be validated.
|
||||||
res.SetSpatialLayersBitmask(1 << spatial_layer);
|
return result;
|
||||||
res.SetTemporalLayer(reader->GetNum<uint8_t>() %
|
|
||||||
RtpGenericFrameDescriptor::kMaxTemporalLayers);
|
|
||||||
|
|
||||||
int num_diffs = (reader->GetNum<uint8_t>() %
|
|
||||||
RtpGenericFrameDescriptor::kMaxNumFrameDependencies);
|
|
||||||
for (int i = 0; i < num_diffs; ++i) {
|
|
||||||
res.AddFrameDependencyDiff(reader->GetNum<uint16_t>() % (1 << 14));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return res;
|
result.emplace();
|
||||||
|
result->frame_id = reader->GetNum<int64_t>();
|
||||||
|
result->spatial_index = (flags & 0b0111'0000) >> 4;
|
||||||
|
result->temporal_index = (flags & 0b0000'1110) >> 1;
|
||||||
|
result->discardable = (flags & 0b0000'0001);
|
||||||
|
|
||||||
|
// Larger than supported by the RtpFrameReferenceFinder.
|
||||||
|
int num_diffs = (reader->GetNum<uint8_t>() % 16);
|
||||||
|
for (int i = 0; i < num_diffs; ++i) {
|
||||||
|
result->dependencies.push_back(result->frame_id -
|
||||||
|
(reader->GetNum<uint16_t>() % (1 << 14)));
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
}
|
}
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
@ -90,7 +96,7 @@ void FuzzOneInput(const uint8_t* data, size_t size) {
|
|||||||
NullCallback cb;
|
NullCallback cb;
|
||||||
video_coding::RtpFrameReferenceFinder reference_finder(&cb);
|
video_coding::RtpFrameReferenceFinder reference_finder(&cb);
|
||||||
|
|
||||||
auto codec = static_cast<VideoCodecType>(reader.GetNum<uint8_t>() % 4);
|
auto codec = static_cast<VideoCodecType>(reader.GetNum<uint8_t>() % 5);
|
||||||
|
|
||||||
while (reader.MoreToRead()) {
|
while (reader.MoreToRead()) {
|
||||||
uint16_t first_seq_num = reader.GetNum<uint16_t>();
|
uint16_t first_seq_num = reader.GetNum<uint16_t>();
|
||||||
@ -128,6 +134,7 @@ void FuzzOneInput(const uint8_t* data, size_t size) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
reader.CopyTo(&video_header.frame_marking);
|
reader.CopyTo(&video_header.frame_marking);
|
||||||
|
video_header.generic = GenerateGenericFrameDependencies(&reader);
|
||||||
|
|
||||||
// clang-format off
|
// clang-format off
|
||||||
auto frame = std::make_unique<video_coding::RtpFrameObject>(
|
auto frame = std::make_unique<video_coding::RtpFrameObject>(
|
||||||
@ -146,7 +153,6 @@ void FuzzOneInput(const uint8_t* data, size_t size) {
|
|||||||
VideoContentType::UNSPECIFIED,
|
VideoContentType::UNSPECIFIED,
|
||||||
video_header,
|
video_header,
|
||||||
/*color_space=*/absl::nullopt,
|
/*color_space=*/absl::nullopt,
|
||||||
GenerateRtpGenericFrameDescriptor(&reader),
|
|
||||||
RtpPacketInfos(),
|
RtpPacketInfos(),
|
||||||
EncodedImageBuffer::Create(/*size=*/0));
|
EncodedImageBuffer::Create(/*size=*/0));
|
||||||
// clang-format on
|
// clang-format on
|
||||||
|
@ -11,7 +11,10 @@
|
|||||||
#include "video/buffered_frame_decryptor.h"
|
#include "video/buffered_frame_decryptor.h"
|
||||||
|
|
||||||
#include <utility>
|
#include <utility>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "modules/rtp_rtcp/source/rtp_descriptor_authentication.h"
|
||||||
|
#include "modules/video_coding/frame_object.h"
|
||||||
#include "rtc_base/logging.h"
|
#include "rtc_base/logging.h"
|
||||||
#include "system_wrappers/include/field_trial.h"
|
#include "system_wrappers/include/field_trial.h"
|
||||||
|
|
||||||
@ -60,8 +63,8 @@ BufferedFrameDecryptor::FrameDecision BufferedFrameDecryptor::DecryptFrame(
|
|||||||
return FrameDecision::kStash;
|
return FrameDecision::kStash;
|
||||||
}
|
}
|
||||||
// When using encryption we expect the frame to have the generic descriptor.
|
// When using encryption we expect the frame to have the generic descriptor.
|
||||||
absl::optional<RtpGenericFrameDescriptor> descriptor =
|
const absl::optional<RTPVideoHeader::GenericDescriptorInfo>& descriptor =
|
||||||
frame->GetGenericFrameDescriptor();
|
frame->GetRtpVideoHeader().generic;
|
||||||
if (!descriptor) {
|
if (!descriptor) {
|
||||||
RTC_LOG(LS_ERROR) << "No generic frame descriptor found dropping frame.";
|
RTC_LOG(LS_ERROR) << "No generic frame descriptor found dropping frame.";
|
||||||
return FrameDecision::kDrop;
|
return FrameDecision::kDrop;
|
||||||
@ -76,9 +79,9 @@ BufferedFrameDecryptor::FrameDecision BufferedFrameDecryptor::DecryptFrame(
|
|||||||
max_plaintext_byte_size);
|
max_plaintext_byte_size);
|
||||||
|
|
||||||
// Only enable authenticating the header if the field trial is enabled.
|
// Only enable authenticating the header if the field trial is enabled.
|
||||||
rtc::ArrayView<const uint8_t> additional_data;
|
std::vector<uint8_t> additional_data;
|
||||||
if (generic_descriptor_auth_experiment_) {
|
if (generic_descriptor_auth_experiment_) {
|
||||||
additional_data = descriptor->GetByteRepresentation();
|
additional_data = RtpDescriptorAuthentication(*descriptor);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Attempt to decrypt the video frame.
|
// Attempt to decrypt the video frame.
|
||||||
|
@ -57,6 +57,8 @@ class BufferedFrameDecryptorTest : public ::testing::Test,
|
|||||||
std::unique_ptr<video_coding::RtpFrameObject> CreateRtpFrameObject(
|
std::unique_ptr<video_coding::RtpFrameObject> CreateRtpFrameObject(
|
||||||
bool key_frame) {
|
bool key_frame) {
|
||||||
seq_num_++;
|
seq_num_++;
|
||||||
|
RTPVideoHeader rtp_video_header;
|
||||||
|
rtp_video_header.generic.emplace();
|
||||||
|
|
||||||
// clang-format off
|
// clang-format off
|
||||||
return std::make_unique<video_coding::RtpFrameObject>(
|
return std::make_unique<video_coding::RtpFrameObject>(
|
||||||
@ -73,9 +75,8 @@ class BufferedFrameDecryptorTest : public ::testing::Test,
|
|||||||
kVideoCodecGeneric,
|
kVideoCodecGeneric,
|
||||||
kVideoRotation_0,
|
kVideoRotation_0,
|
||||||
VideoContentType::UNSPECIFIED,
|
VideoContentType::UNSPECIFIED,
|
||||||
RTPVideoHeader(),
|
rtp_video_header,
|
||||||
/*color_space=*/absl::nullopt,
|
/*color_space=*/absl::nullopt,
|
||||||
RtpGenericFrameDescriptor(),
|
|
||||||
RtpPacketInfos(),
|
RtpPacketInfos(),
|
||||||
EncodedImageBuffer::Create(/*size=*/0));
|
EncodedImageBuffer::Create(/*size=*/0));
|
||||||
// clang-format on
|
// clang-format on
|
||||||
|
@ -28,6 +28,7 @@
|
|||||||
#include "modules/rtp_rtcp/include/ulpfec_receiver.h"
|
#include "modules/rtp_rtcp/include/ulpfec_receiver.h"
|
||||||
#include "modules/rtp_rtcp/source/create_video_rtp_depacketizer.h"
|
#include "modules/rtp_rtcp/source/create_video_rtp_depacketizer.h"
|
||||||
#include "modules/rtp_rtcp/source/rtp_format.h"
|
#include "modules/rtp_rtcp/source/rtp_format.h"
|
||||||
|
#include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor.h"
|
||||||
#include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor_extension.h"
|
#include "modules/rtp_rtcp/source/rtp_generic_frame_descriptor_extension.h"
|
||||||
#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
|
#include "modules/rtp_rtcp/source/rtp_header_extensions.h"
|
||||||
#include "modules/rtp_rtcp/source/rtp_packet_received.h"
|
#include "modules/rtp_rtcp/source/rtp_packet_received.h"
|
||||||
@ -367,51 +368,43 @@ void RtpVideoStreamReceiver::OnReceivedPayloadData(
|
|||||||
rtp_packet.GetExtension<PlayoutDelayLimits>(&video_header.playout_delay);
|
rtp_packet.GetExtension<PlayoutDelayLimits>(&video_header.playout_delay);
|
||||||
rtp_packet.GetExtension<FrameMarkingExtension>(&video_header.frame_marking);
|
rtp_packet.GetExtension<FrameMarkingExtension>(&video_header.frame_marking);
|
||||||
|
|
||||||
RtpGenericFrameDescriptor& generic_descriptor =
|
if (rtp_packet.HasExtension<RtpGenericFrameDescriptorExtension00>() &&
|
||||||
packet->generic_descriptor.emplace();
|
rtp_packet.HasExtension<RtpGenericFrameDescriptorExtension01>()) {
|
||||||
if (rtp_packet.GetExtension<RtpGenericFrameDescriptorExtension01>(
|
RTC_LOG(LS_WARNING) << "RTP packet had two different GFD versions.";
|
||||||
&generic_descriptor)) {
|
return;
|
||||||
if (rtp_packet.HasExtension<RtpGenericFrameDescriptorExtension00>()) {
|
|
||||||
RTC_LOG(LS_WARNING) << "RTP packet had two different GFD versions.";
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
generic_descriptor.SetByteRepresentation(
|
|
||||||
rtp_packet.GetRawExtension<RtpGenericFrameDescriptorExtension01>());
|
|
||||||
} else if ((rtp_packet.GetExtension<RtpGenericFrameDescriptorExtension00>(
|
|
||||||
&generic_descriptor))) {
|
|
||||||
generic_descriptor.SetByteRepresentation(
|
|
||||||
rtp_packet.GetRawExtension<RtpGenericFrameDescriptorExtension00>());
|
|
||||||
} else {
|
|
||||||
packet->generic_descriptor = absl::nullopt;
|
|
||||||
}
|
}
|
||||||
if (packet->generic_descriptor != absl::nullopt) {
|
|
||||||
video_header.is_first_packet_in_frame =
|
|
||||||
packet->generic_descriptor->FirstPacketInSubFrame();
|
|
||||||
video_header.is_last_packet_in_frame =
|
|
||||||
packet->generic_descriptor->LastPacketInSubFrame();
|
|
||||||
|
|
||||||
if (packet->generic_descriptor->FirstPacketInSubFrame()) {
|
RtpGenericFrameDescriptor generic_descriptor;
|
||||||
|
bool has_generic_descriptor =
|
||||||
|
rtp_packet.GetExtension<RtpGenericFrameDescriptorExtension01>(
|
||||||
|
&generic_descriptor) ||
|
||||||
|
rtp_packet.GetExtension<RtpGenericFrameDescriptorExtension00>(
|
||||||
|
&generic_descriptor);
|
||||||
|
if (has_generic_descriptor) {
|
||||||
|
video_header.is_first_packet_in_frame =
|
||||||
|
generic_descriptor.FirstPacketInSubFrame();
|
||||||
|
video_header.is_last_packet_in_frame =
|
||||||
|
generic_descriptor.LastPacketInSubFrame();
|
||||||
|
|
||||||
|
if (generic_descriptor.FirstPacketInSubFrame()) {
|
||||||
video_header.frame_type =
|
video_header.frame_type =
|
||||||
packet->generic_descriptor->FrameDependenciesDiffs().empty()
|
generic_descriptor.FrameDependenciesDiffs().empty()
|
||||||
? VideoFrameType::kVideoFrameKey
|
? VideoFrameType::kVideoFrameKey
|
||||||
: VideoFrameType::kVideoFrameDelta;
|
: VideoFrameType::kVideoFrameDelta;
|
||||||
|
|
||||||
auto& descriptor = video_header.generic.emplace();
|
auto& descriptor = video_header.generic.emplace();
|
||||||
int64_t frame_id =
|
int64_t frame_id =
|
||||||
frame_id_unwrapper_.Unwrap(packet->generic_descriptor->FrameId());
|
frame_id_unwrapper_.Unwrap(generic_descriptor.FrameId());
|
||||||
descriptor.frame_id = frame_id;
|
descriptor.frame_id = frame_id;
|
||||||
descriptor.spatial_index = packet->generic_descriptor->SpatialLayer();
|
descriptor.spatial_index = generic_descriptor.SpatialLayer();
|
||||||
descriptor.temporal_index = packet->generic_descriptor->TemporalLayer();
|
descriptor.temporal_index = generic_descriptor.TemporalLayer();
|
||||||
descriptor.discardable =
|
descriptor.discardable = generic_descriptor.Discardable().value_or(false);
|
||||||
packet->generic_descriptor->Discardable().value_or(false);
|
for (uint16_t fdiff : generic_descriptor.FrameDependenciesDiffs()) {
|
||||||
for (uint16_t fdiff :
|
|
||||||
packet->generic_descriptor->FrameDependenciesDiffs()) {
|
|
||||||
descriptor.dependencies.push_back(frame_id - fdiff);
|
descriptor.dependencies.push_back(frame_id - fdiff);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
video_header.width = generic_descriptor.Width();
|
||||||
video_header.width = packet->generic_descriptor->Width();
|
video_header.height = generic_descriptor.Height();
|
||||||
video_header.height = packet->generic_descriptor->Height();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Color space should only be transmitted in the last packet of a frame,
|
// Color space should only be transmitted in the last packet of a frame,
|
||||||
@ -435,7 +428,7 @@ void RtpVideoStreamReceiver::OnReceivedPayloadData(
|
|||||||
// TODO(bugs.webrtc.org/10336): Implement support for reordering.
|
// TODO(bugs.webrtc.org/10336): Implement support for reordering.
|
||||||
RTC_LOG(LS_INFO)
|
RTC_LOG(LS_INFO)
|
||||||
<< "LossNotificationController does not support reordering.";
|
<< "LossNotificationController does not support reordering.";
|
||||||
} else if (!packet->generic_descriptor) {
|
} else if (!has_generic_descriptor) {
|
||||||
RTC_LOG(LS_WARNING) << "LossNotificationController requires generic "
|
RTC_LOG(LS_WARNING) << "LossNotificationController requires generic "
|
||||||
"frame descriptor, but it is missing.";
|
"frame descriptor, but it is missing.";
|
||||||
} else {
|
} else {
|
||||||
|
Reference in New Issue
Block a user