Move allocation and rtp conversion logic out of payload router.
Makes it easier to write tests, and allows for moving rtp module ownership into the payload router in the future. The RtpPayloadParams class is split into declaration and definition and moved into separate files. Bug: webrtc:9517 Change-Id: I8700628edff19abcacfe8d3a20e4ba7476f712ad Reviewed-on: https://webrtc-review.googlesource.com/88564 Commit-Queue: Stefan Holmer <stefan@webrtc.org> Reviewed-by: Sebastian Jansson <srte@webrtc.org> Cr-Commit-Position: refs/heads/master@{#23983}
This commit is contained in:

committed by
Commit Bot

parent
3643aef89c
commit
1da4d79ba3
1
BUILD.gn
1
BUILD.gn
@ -422,6 +422,7 @@ if (rtc_include_tests) {
|
||||
"api:rtc_api_unittests",
|
||||
"api/audio/test:audio_api_unittests",
|
||||
"api/audio_codecs/test:audio_codecs_api_unittests",
|
||||
"api/video/test:rtc_api_video_unittests",
|
||||
"api/video_codecs/test:video_codecs_api_unittests",
|
||||
"p2p:libstunprober_unittests",
|
||||
"p2p:rtc_p2p_unittests",
|
||||
|
27
api/video/test/BUILD.gn
Normal file
27
api/video/test/BUILD.gn
Normal file
@ -0,0 +1,27 @@
|
||||
# Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||
#
|
||||
# Use of this source code is governed by a BSD-style license
|
||||
# that can be found in the LICENSE file in the root of the source
|
||||
# tree. An additional intellectual property rights grant can be found
|
||||
# in the file PATENTS. All contributing project authors may
|
||||
# be found in the AUTHORS file in the root of the source tree.
|
||||
|
||||
import("../../../webrtc.gni")
|
||||
|
||||
rtc_source_set("rtc_api_video_unittests") {
|
||||
testonly = true
|
||||
|
||||
sources = [
|
||||
"video_bitrate_allocation_unittest.cc",
|
||||
]
|
||||
|
||||
if (!build_with_chromium && is_clang) {
|
||||
# Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
|
||||
suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
|
||||
}
|
||||
|
||||
deps = [
|
||||
"..:video_bitrate_allocation",
|
||||
"../../../test:test_support",
|
||||
]
|
||||
}
|
63
api/video/test/video_bitrate_allocation_unittest.cc
Normal file
63
api/video/test/video_bitrate_allocation_unittest.cc
Normal file
@ -0,0 +1,63 @@
|
||||
/*
|
||||
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
#include "api/video/video_bitrate_allocation.h"
|
||||
#include "test/gtest.h"
|
||||
|
||||
namespace webrtc {
|
||||
TEST(VideoBitrateAllocation, SimulcastTargetBitrate) {
|
||||
VideoBitrateAllocation bitrate;
|
||||
bitrate.SetBitrate(0, 0, 10000);
|
||||
bitrate.SetBitrate(0, 1, 20000);
|
||||
bitrate.SetBitrate(1, 0, 40000);
|
||||
bitrate.SetBitrate(1, 1, 80000);
|
||||
|
||||
VideoBitrateAllocation layer0_bitrate;
|
||||
layer0_bitrate.SetBitrate(0, 0, 10000);
|
||||
layer0_bitrate.SetBitrate(0, 1, 20000);
|
||||
|
||||
VideoBitrateAllocation layer1_bitrate;
|
||||
layer1_bitrate.SetBitrate(0, 0, 40000);
|
||||
layer1_bitrate.SetBitrate(0, 1, 80000);
|
||||
|
||||
std::vector<absl::optional<VideoBitrateAllocation>> layer_allocations =
|
||||
bitrate.GetSimulcastAllocations();
|
||||
|
||||
EXPECT_EQ(layer0_bitrate, layer_allocations[0]);
|
||||
EXPECT_EQ(layer1_bitrate, layer_allocations[1]);
|
||||
}
|
||||
|
||||
TEST(VideoBitrateAllocation, SimulcastTargetBitrateWithInactiveStream) {
|
||||
// Create bitrate allocation with bitrate only for the first and third stream.
|
||||
VideoBitrateAllocation bitrate;
|
||||
bitrate.SetBitrate(0, 0, 10000);
|
||||
bitrate.SetBitrate(0, 1, 20000);
|
||||
bitrate.SetBitrate(2, 0, 40000);
|
||||
bitrate.SetBitrate(2, 1, 80000);
|
||||
|
||||
VideoBitrateAllocation layer0_bitrate;
|
||||
layer0_bitrate.SetBitrate(0, 0, 10000);
|
||||
layer0_bitrate.SetBitrate(0, 1, 20000);
|
||||
|
||||
VideoBitrateAllocation layer2_bitrate;
|
||||
layer2_bitrate.SetBitrate(0, 0, 40000);
|
||||
layer2_bitrate.SetBitrate(0, 1, 80000);
|
||||
|
||||
std::vector<absl::optional<VideoBitrateAllocation>> layer_allocations =
|
||||
bitrate.GetSimulcastAllocations();
|
||||
|
||||
EXPECT_EQ(layer0_bitrate, layer_allocations[0]);
|
||||
EXPECT_FALSE(layer_allocations[1]);
|
||||
EXPECT_EQ(layer2_bitrate, layer_allocations[2]);
|
||||
}
|
||||
} // namespace webrtc
|
@ -107,6 +107,23 @@ std::vector<uint32_t> VideoBitrateAllocation::GetTemporalLayerAllocation(
|
||||
return temporal_rates;
|
||||
}
|
||||
|
||||
std::vector<absl::optional<VideoBitrateAllocation>>
|
||||
VideoBitrateAllocation::GetSimulcastAllocations() const {
|
||||
std::vector<absl::optional<VideoBitrateAllocation>> bitrates;
|
||||
for (size_t si = 0; si < kMaxSpatialLayers; ++si) {
|
||||
absl::optional<VideoBitrateAllocation> layer_bitrate;
|
||||
if (IsSpatialLayerUsed(si)) {
|
||||
layer_bitrate = VideoBitrateAllocation();
|
||||
for (int tl = 0; tl < kMaxTemporalStreams; ++tl) {
|
||||
if (HasBitrate(si, tl))
|
||||
layer_bitrate->SetBitrate(0, tl, GetBitrate(si, tl));
|
||||
}
|
||||
}
|
||||
bitrates.push_back(layer_bitrate);
|
||||
}
|
||||
return bitrates;
|
||||
}
|
||||
|
||||
bool VideoBitrateAllocation::operator==(
|
||||
const VideoBitrateAllocation& other) const {
|
||||
for (size_t si = 0; si < kMaxSpatialLayers; ++si) {
|
||||
|
@ -62,6 +62,12 @@ class VideoBitrateAllocation {
|
||||
// layer with a defined bitrate.
|
||||
std::vector<uint32_t> GetTemporalLayerAllocation(size_t spatial_index) const;
|
||||
|
||||
// Returns one VideoBitrateAllocation for each spatial layer. This is used to
|
||||
// configure simulcast streams. Note that the length of the returned vector is
|
||||
// always kMaxSpatialLayers, the optional is unset for unused layers.
|
||||
std::vector<absl::optional<VideoBitrateAllocation>> GetSimulcastAllocations()
|
||||
const;
|
||||
|
||||
uint32_t get_sum_bps() const { return sum_; } // Sum of all bitrates.
|
||||
uint32_t get_sum_kbps() const {
|
||||
// Round down to not exceed the allocated bitrate.
|
||||
|
@ -100,6 +100,8 @@ rtc_source_set("rtp_sender") {
|
||||
sources = [
|
||||
"payload_router.cc",
|
||||
"payload_router.h",
|
||||
"rtp_payload_params.cc",
|
||||
"rtp_payload_params.h",
|
||||
"rtp_transport_controller_send.cc",
|
||||
"rtp_transport_controller_send.h",
|
||||
]
|
||||
@ -281,6 +283,7 @@ if (rtc_include_tests) {
|
||||
"rtcp_demuxer_unittest.cc",
|
||||
"rtp_bitrate_configurator_unittest.cc",
|
||||
"rtp_demuxer_unittest.cc",
|
||||
"rtp_payload_params_unittest.cc",
|
||||
"rtp_rtcp_demuxer_helper_unittest.cc",
|
||||
"rtx_receive_stream_unittest.cc",
|
||||
]
|
||||
|
@ -14,157 +14,27 @@
|
||||
#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
|
||||
#include "modules/video_coding/include/video_codec_interface.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/random.h"
|
||||
#include "rtc_base/timeutils.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace {
|
||||
// Map information from info into rtp.
|
||||
void CopyCodecSpecific(const CodecSpecificInfo* info, RTPVideoHeader* rtp) {
|
||||
RTC_DCHECK(info);
|
||||
rtp->codec = info->codecType;
|
||||
absl::optional<size_t> GetSimulcastIdx(const CodecSpecificInfo* info) {
|
||||
if (!info)
|
||||
return absl::nullopt;
|
||||
switch (info->codecType) {
|
||||
case kVideoCodecVP8: {
|
||||
rtp->vp8().InitRTPVideoHeaderVP8();
|
||||
rtp->vp8().nonReference = info->codecSpecific.VP8.nonReference;
|
||||
rtp->vp8().temporalIdx = info->codecSpecific.VP8.temporalIdx;
|
||||
rtp->vp8().layerSync = info->codecSpecific.VP8.layerSync;
|
||||
rtp->vp8().keyIdx = info->codecSpecific.VP8.keyIdx;
|
||||
rtp->simulcastIdx = info->codecSpecific.VP8.simulcastIdx;
|
||||
return;
|
||||
}
|
||||
case kVideoCodecVP9: {
|
||||
rtp->vp9().InitRTPVideoHeaderVP9();
|
||||
rtp->vp9().inter_pic_predicted =
|
||||
info->codecSpecific.VP9.inter_pic_predicted;
|
||||
rtp->vp9().flexible_mode = info->codecSpecific.VP9.flexible_mode;
|
||||
rtp->vp9().ss_data_available = info->codecSpecific.VP9.ss_data_available;
|
||||
rtp->vp9().non_ref_for_inter_layer_pred =
|
||||
info->codecSpecific.VP9.non_ref_for_inter_layer_pred;
|
||||
rtp->vp9().temporal_idx = info->codecSpecific.VP9.temporal_idx;
|
||||
rtp->vp9().spatial_idx = info->codecSpecific.VP9.spatial_idx;
|
||||
rtp->vp9().temporal_up_switch =
|
||||
info->codecSpecific.VP9.temporal_up_switch;
|
||||
rtp->vp9().inter_layer_predicted =
|
||||
info->codecSpecific.VP9.inter_layer_predicted;
|
||||
rtp->vp9().gof_idx = info->codecSpecific.VP9.gof_idx;
|
||||
rtp->vp9().num_spatial_layers =
|
||||
info->codecSpecific.VP9.num_spatial_layers;
|
||||
|
||||
if (info->codecSpecific.VP9.ss_data_available) {
|
||||
rtp->vp9().spatial_layer_resolution_present =
|
||||
info->codecSpecific.VP9.spatial_layer_resolution_present;
|
||||
if (info->codecSpecific.VP9.spatial_layer_resolution_present) {
|
||||
for (size_t i = 0; i < info->codecSpecific.VP9.num_spatial_layers;
|
||||
++i) {
|
||||
rtp->vp9().width[i] = info->codecSpecific.VP9.width[i];
|
||||
rtp->vp9().height[i] = info->codecSpecific.VP9.height[i];
|
||||
}
|
||||
}
|
||||
rtp->vp9().gof.CopyGofInfoVP9(info->codecSpecific.VP9.gof);
|
||||
}
|
||||
|
||||
rtp->vp9().num_ref_pics = info->codecSpecific.VP9.num_ref_pics;
|
||||
for (int i = 0; i < info->codecSpecific.VP9.num_ref_pics; ++i) {
|
||||
rtp->vp9().pid_diff[i] = info->codecSpecific.VP9.p_diff[i];
|
||||
}
|
||||
rtp->vp9().end_of_picture = info->codecSpecific.VP9.end_of_picture;
|
||||
return;
|
||||
}
|
||||
case kVideoCodecH264: {
|
||||
auto& h264_header = rtp->video_type_header.emplace<RTPVideoHeaderH264>();
|
||||
h264_header.packetization_mode =
|
||||
info->codecSpecific.H264.packetization_mode;
|
||||
rtp->simulcastIdx = info->codecSpecific.H264.simulcast_idx;
|
||||
return;
|
||||
}
|
||||
case kVideoCodecVP8:
|
||||
return absl::optional<size_t>(info->codecSpecific.VP8.simulcastIdx);
|
||||
case kVideoCodecH264:
|
||||
return absl::optional<size_t>(info->codecSpecific.H264.simulcast_idx);
|
||||
case kVideoCodecMultiplex:
|
||||
case kVideoCodecGeneric:
|
||||
rtp->codec = kVideoCodecGeneric;
|
||||
rtp->simulcastIdx = info->codecSpecific.generic.simulcast_idx;
|
||||
return;
|
||||
return absl::optional<size_t>(info->codecSpecific.generic.simulcast_idx);
|
||||
default:
|
||||
return;
|
||||
return absl::nullopt;
|
||||
}
|
||||
}
|
||||
|
||||
void SetVideoTiming(VideoSendTiming* timing, const EncodedImage& image) {
|
||||
if (image.timing_.flags == VideoSendTiming::TimingFrameFlags::kInvalid ||
|
||||
image.timing_.flags == VideoSendTiming::TimingFrameFlags::kNotTriggered) {
|
||||
timing->flags = VideoSendTiming::TimingFrameFlags::kInvalid;
|
||||
return;
|
||||
}
|
||||
|
||||
timing->encode_start_delta_ms = VideoSendTiming::GetDeltaCappedMs(
|
||||
image.capture_time_ms_, image.timing_.encode_start_ms);
|
||||
timing->encode_finish_delta_ms = VideoSendTiming::GetDeltaCappedMs(
|
||||
image.capture_time_ms_, image.timing_.encode_finish_ms);
|
||||
timing->packetization_finish_delta_ms = 0;
|
||||
timing->pacer_exit_delta_ms = 0;
|
||||
timing->network_timestamp_delta_ms = 0;
|
||||
timing->network2_timestamp_delta_ms = 0;
|
||||
timing->flags = image.timing_.flags;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
// State for setting picture id and tl0 pic idx, for VP8 and VP9
|
||||
// TODO(nisse): Make these properties not codec specific.
|
||||
class PayloadRouter::RtpPayloadParams final {
|
||||
public:
|
||||
RtpPayloadParams(const uint32_t ssrc, const RtpPayloadState* state)
|
||||
: ssrc_(ssrc) {
|
||||
Random random(rtc::TimeMicros());
|
||||
state_.picture_id =
|
||||
state ? state->picture_id : (random.Rand<int16_t>() & 0x7FFF);
|
||||
state_.tl0_pic_idx = state ? state->tl0_pic_idx : (random.Rand<uint8_t>());
|
||||
}
|
||||
~RtpPayloadParams() {}
|
||||
|
||||
void Set(RTPVideoHeader* rtp_video_header, bool first_frame_in_picture) {
|
||||
// Always set picture id. Set tl0_pic_idx iff temporal index is set.
|
||||
if (first_frame_in_picture) {
|
||||
state_.picture_id =
|
||||
(static_cast<uint16_t>(state_.picture_id) + 1) & 0x7FFF;
|
||||
}
|
||||
if (rtp_video_header->codec == kVideoCodecVP8) {
|
||||
rtp_video_header->vp8().pictureId = state_.picture_id;
|
||||
|
||||
if (rtp_video_header->vp8().temporalIdx != kNoTemporalIdx) {
|
||||
if (rtp_video_header->vp8().temporalIdx == 0) {
|
||||
++state_.tl0_pic_idx;
|
||||
}
|
||||
rtp_video_header->vp8().tl0PicIdx = state_.tl0_pic_idx;
|
||||
}
|
||||
}
|
||||
if (rtp_video_header->codec == kVideoCodecVP9) {
|
||||
rtp_video_header->vp9().picture_id = state_.picture_id;
|
||||
|
||||
// Note that in the case that we have no temporal layers but we do have
|
||||
// spatial layers, packets will carry layering info with a temporal_idx of
|
||||
// zero, and we then have to set and increment tl0_pic_idx.
|
||||
if (rtp_video_header->vp9().temporal_idx != kNoTemporalIdx ||
|
||||
rtp_video_header->vp9().spatial_idx != kNoSpatialIdx) {
|
||||
if (first_frame_in_picture &&
|
||||
(rtp_video_header->vp9().temporal_idx == 0 ||
|
||||
rtp_video_header->vp9().temporal_idx == kNoTemporalIdx)) {
|
||||
++state_.tl0_pic_idx;
|
||||
}
|
||||
rtp_video_header->vp9().tl0_pic_idx = state_.tl0_pic_idx;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t ssrc() const { return ssrc_; }
|
||||
|
||||
RtpPayloadState state() const { return state_; }
|
||||
|
||||
private:
|
||||
const uint32_t ssrc_;
|
||||
RtpPayloadState state_;
|
||||
};
|
||||
|
||||
PayloadRouter::PayloadRouter(const std::vector<RtpRtcp*>& rtp_modules,
|
||||
const std::vector<uint32_t>& ssrcs,
|
||||
int payload_type,
|
||||
@ -231,25 +101,10 @@ EncodedImageCallback::Result PayloadRouter::OnEncodedImage(
|
||||
if (!active_)
|
||||
return Result(Result::ERROR_SEND_FAILED);
|
||||
|
||||
RTPVideoHeader rtp_video_header;
|
||||
if (codec_specific_info)
|
||||
CopyCodecSpecific(codec_specific_info, &rtp_video_header);
|
||||
|
||||
rtp_video_header.rotation = encoded_image.rotation_;
|
||||
rtp_video_header.content_type = encoded_image.content_type_;
|
||||
rtp_video_header.playout_delay = encoded_image.playout_delay_;
|
||||
|
||||
SetVideoTiming(&rtp_video_header.video_timing, encoded_image);
|
||||
|
||||
int stream_index = rtp_video_header.simulcastIdx;
|
||||
size_t stream_index = GetSimulcastIdx(codec_specific_info).value_or(0);
|
||||
RTC_DCHECK_LT(stream_index, rtp_modules_.size());
|
||||
|
||||
// Sets picture id and tl0 pic idx.
|
||||
const bool first_frame_in_picture =
|
||||
(codec_specific_info && codec_specific_info->codecType == kVideoCodecVP9)
|
||||
? codec_specific_info->codecSpecific.VP9.first_frame_in_picture
|
||||
: true;
|
||||
params_[stream_index].Set(&rtp_video_header, first_frame_in_picture);
|
||||
RTPVideoHeader rtp_video_header = params_[stream_index].GetRtpVideoHeader(
|
||||
encoded_image, codec_specific_info);
|
||||
|
||||
uint32_t frame_id;
|
||||
if (!rtp_modules_[stream_index]->Sending()) {
|
||||
@ -274,22 +129,16 @@ void PayloadRouter::OnBitrateAllocationUpdated(
|
||||
// If spatial scalability is enabled, it is covered by a single stream.
|
||||
rtp_modules_[0]->SetVideoBitrateAllocation(bitrate);
|
||||
} else {
|
||||
std::vector<absl::optional<VideoBitrateAllocation>> layer_bitrates =
|
||||
bitrate.GetSimulcastAllocations();
|
||||
// Simulcast is in use, split the VideoBitrateAllocation into one struct
|
||||
// per rtp stream, moving over the temporal layer allocation.
|
||||
for (size_t si = 0; si < rtp_modules_.size(); ++si) {
|
||||
// Don't send empty TargetBitrate messages on streams not being relayed.
|
||||
if (!bitrate.IsSpatialLayerUsed(si)) {
|
||||
// The next spatial layer could be used if the current one is
|
||||
// inactive.
|
||||
continue;
|
||||
for (size_t i = 0; i < rtp_modules_.size(); ++i) {
|
||||
// The next spatial layer could be used if the current one is
|
||||
// inactive.
|
||||
if (layer_bitrates[i]) {
|
||||
rtp_modules_[i]->SetVideoBitrateAllocation(*layer_bitrates[i]);
|
||||
}
|
||||
|
||||
VideoBitrateAllocation layer_bitrate;
|
||||
for (int tl = 0; tl < kMaxTemporalStreams; ++tl) {
|
||||
if (bitrate.HasBitrate(si, tl))
|
||||
layer_bitrate.SetBitrate(0, tl, bitrate.GetBitrate(si, tl));
|
||||
}
|
||||
rtp_modules_[si]->SetVideoBitrateAllocation(layer_bitrate);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include <vector>
|
||||
|
||||
#include "api/video_codecs/video_encoder.h"
|
||||
#include "call/rtp_payload_params.h"
|
||||
#include "common_types.h" // NOLINT(build/include)
|
||||
#include "modules/rtp_rtcp/source/rtp_video_header.h"
|
||||
#include "rtc_base/constructormagic.h"
|
||||
@ -26,12 +27,6 @@ namespace webrtc {
|
||||
class RTPFragmentationHeader;
|
||||
class RtpRtcp;
|
||||
|
||||
// Currently only VP8/VP9 specific.
|
||||
struct RtpPayloadState {
|
||||
int16_t picture_id = -1;
|
||||
uint8_t tl0_pic_idx = 0;
|
||||
};
|
||||
|
||||
// PayloadRouter routes outgoing data to the correct sending RTP module, based
|
||||
// on the simulcast layer in RTPVideoHeader.
|
||||
class PayloadRouter : public EncodedImageCallback {
|
||||
@ -63,8 +58,6 @@ class PayloadRouter : public EncodedImageCallback {
|
||||
void OnBitrateAllocationUpdated(const VideoBitrateAllocation& bitrate);
|
||||
|
||||
private:
|
||||
class RtpPayloadParams;
|
||||
|
||||
void UpdateModuleSendingState() RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_);
|
||||
|
||||
rtc::CriticalSection crit_;
|
||||
|
@ -31,10 +31,6 @@ namespace {
|
||||
const int8_t kPayloadType = 96;
|
||||
const uint32_t kSsrc1 = 12345;
|
||||
const uint32_t kSsrc2 = 23456;
|
||||
const uint32_t kSsrc3 = 34567;
|
||||
const int16_t kPictureId = 123;
|
||||
const int16_t kTl0PicIdx = 20;
|
||||
const uint8_t kTemporalIdx = 1;
|
||||
const int16_t kInitialPictureId1 = 222;
|
||||
const int16_t kInitialPictureId2 = 44;
|
||||
const int16_t kInitialTl0PicIdx1 = 99;
|
||||
@ -220,243 +216,6 @@ TEST(PayloadRouterTest, SendSimulcastSetActiveModules) {
|
||||
.error);
|
||||
}
|
||||
|
||||
TEST(PayloadRouterTest, SimulcastTargetBitrate) {
|
||||
NiceMock<MockRtpRtcp> rtp_1;
|
||||
NiceMock<MockRtpRtcp> rtp_2;
|
||||
std::vector<RtpRtcp*> modules = {&rtp_1, &rtp_2};
|
||||
|
||||
PayloadRouter payload_router(modules, {kSsrc1, kSsrc2}, kPayloadType, {});
|
||||
payload_router.SetActive(true);
|
||||
|
||||
VideoBitrateAllocation bitrate;
|
||||
bitrate.SetBitrate(0, 0, 10000);
|
||||
bitrate.SetBitrate(0, 1, 20000);
|
||||
bitrate.SetBitrate(1, 0, 40000);
|
||||
bitrate.SetBitrate(1, 1, 80000);
|
||||
|
||||
VideoBitrateAllocation layer0_bitrate;
|
||||
layer0_bitrate.SetBitrate(0, 0, 10000);
|
||||
layer0_bitrate.SetBitrate(0, 1, 20000);
|
||||
|
||||
VideoBitrateAllocation layer1_bitrate;
|
||||
layer1_bitrate.SetBitrate(0, 0, 40000);
|
||||
layer1_bitrate.SetBitrate(0, 1, 80000);
|
||||
|
||||
EXPECT_CALL(rtp_1, SetVideoBitrateAllocation(layer0_bitrate)).Times(1);
|
||||
EXPECT_CALL(rtp_2, SetVideoBitrateAllocation(layer1_bitrate)).Times(1);
|
||||
|
||||
payload_router.OnBitrateAllocationUpdated(bitrate);
|
||||
}
|
||||
|
||||
// If the middle of three streams is inactive the first and last streams should
|
||||
// be asked to send the TargetBitrate message.
|
||||
TEST(PayloadRouterTest, SimulcastTargetBitrateWithInactiveStream) {
|
||||
// Set up three active rtp modules.
|
||||
NiceMock<MockRtpRtcp> rtp_1;
|
||||
NiceMock<MockRtpRtcp> rtp_2;
|
||||
NiceMock<MockRtpRtcp> rtp_3;
|
||||
std::vector<RtpRtcp*> modules = {&rtp_1, &rtp_2, &rtp_3};
|
||||
PayloadRouter payload_router(modules, {kSsrc1, kSsrc2, kSsrc3}, kPayloadType,
|
||||
{});
|
||||
payload_router.SetActive(true);
|
||||
|
||||
// Create bitrate allocation with bitrate only for the first and third stream.
|
||||
VideoBitrateAllocation bitrate;
|
||||
bitrate.SetBitrate(0, 0, 10000);
|
||||
bitrate.SetBitrate(0, 1, 20000);
|
||||
bitrate.SetBitrate(2, 0, 40000);
|
||||
bitrate.SetBitrate(2, 1, 80000);
|
||||
|
||||
VideoBitrateAllocation layer0_bitrate;
|
||||
layer0_bitrate.SetBitrate(0, 0, 10000);
|
||||
layer0_bitrate.SetBitrate(0, 1, 20000);
|
||||
|
||||
VideoBitrateAllocation layer2_bitrate;
|
||||
layer2_bitrate.SetBitrate(0, 0, 40000);
|
||||
layer2_bitrate.SetBitrate(0, 1, 80000);
|
||||
|
||||
// Expect the first and third rtp module to be asked to send a TargetBitrate
|
||||
// message. (No target bitrate with 0bps sent from the second one.)
|
||||
EXPECT_CALL(rtp_1, SetVideoBitrateAllocation(layer0_bitrate)).Times(1);
|
||||
EXPECT_CALL(rtp_2, SetVideoBitrateAllocation(_)).Times(0);
|
||||
EXPECT_CALL(rtp_3, SetVideoBitrateAllocation(layer2_bitrate)).Times(1);
|
||||
|
||||
payload_router.OnBitrateAllocationUpdated(bitrate);
|
||||
}
|
||||
|
||||
TEST(PayloadRouterTest, SvcTargetBitrate) {
|
||||
NiceMock<MockRtpRtcp> rtp_1;
|
||||
std::vector<RtpRtcp*> modules = {&rtp_1};
|
||||
PayloadRouter payload_router(modules, {kSsrc1}, kPayloadType, {});
|
||||
payload_router.SetActive(true);
|
||||
|
||||
VideoBitrateAllocation bitrate;
|
||||
bitrate.SetBitrate(0, 0, 10000);
|
||||
bitrate.SetBitrate(0, 1, 20000);
|
||||
bitrate.SetBitrate(1, 0, 40000);
|
||||
bitrate.SetBitrate(1, 1, 80000);
|
||||
|
||||
EXPECT_CALL(rtp_1, SetVideoBitrateAllocation(bitrate)).Times(1);
|
||||
|
||||
payload_router.OnBitrateAllocationUpdated(bitrate);
|
||||
}
|
||||
|
||||
TEST(PayloadRouterTest, InfoMappedToRtpVideoHeader_Vp8) {
|
||||
NiceMock<MockRtpRtcp> rtp1;
|
||||
NiceMock<MockRtpRtcp> rtp2;
|
||||
std::vector<RtpRtcp*> modules = {&rtp1, &rtp2};
|
||||
RtpPayloadState state2;
|
||||
state2.picture_id = kPictureId;
|
||||
state2.tl0_pic_idx = kTl0PicIdx;
|
||||
std::map<uint32_t, RtpPayloadState> states = {{kSsrc2, state2}};
|
||||
|
||||
PayloadRouter payload_router(modules, {kSsrc1, kSsrc2}, kPayloadType, states);
|
||||
payload_router.SetActive(true);
|
||||
|
||||
EncodedImage encoded_image;
|
||||
encoded_image.rotation_ = kVideoRotation_90;
|
||||
encoded_image.content_type_ = VideoContentType::SCREENSHARE;
|
||||
|
||||
CodecSpecificInfo codec_info;
|
||||
memset(&codec_info, 0, sizeof(CodecSpecificInfo));
|
||||
codec_info.codecType = kVideoCodecVP8;
|
||||
codec_info.codecSpecific.VP8.simulcastIdx = 1;
|
||||
codec_info.codecSpecific.VP8.temporalIdx = kTemporalIdx;
|
||||
codec_info.codecSpecific.VP8.keyIdx = kNoKeyIdx;
|
||||
codec_info.codecSpecific.VP8.layerSync = true;
|
||||
codec_info.codecSpecific.VP8.nonReference = true;
|
||||
|
||||
EXPECT_CALL(rtp2, Sending()).WillOnce(Return(true));
|
||||
EXPECT_CALL(rtp2, SendOutgoingData(_, _, _, _, _, _, nullptr, _, _))
|
||||
.WillOnce(Invoke([](Unused, Unused, Unused, Unused, Unused, Unused,
|
||||
Unused, const RTPVideoHeader* header, Unused) {
|
||||
EXPECT_EQ(kVideoRotation_90, header->rotation);
|
||||
EXPECT_EQ(VideoContentType::SCREENSHARE, header->content_type);
|
||||
EXPECT_EQ(1, header->simulcastIdx);
|
||||
EXPECT_EQ(kVideoCodecVP8, header->codec);
|
||||
EXPECT_EQ(kPictureId + 1, header->vp8().pictureId);
|
||||
EXPECT_EQ(kTemporalIdx, header->vp8().temporalIdx);
|
||||
EXPECT_EQ(kTl0PicIdx, header->vp8().tl0PicIdx);
|
||||
EXPECT_EQ(kNoKeyIdx, header->vp8().keyIdx);
|
||||
EXPECT_TRUE(header->vp8().layerSync);
|
||||
EXPECT_TRUE(header->vp8().nonReference);
|
||||
return true;
|
||||
}));
|
||||
|
||||
EXPECT_EQ(
|
||||
EncodedImageCallback::Result::OK,
|
||||
payload_router.OnEncodedImage(encoded_image, &codec_info, nullptr).error);
|
||||
}
|
||||
|
||||
TEST(PayloadRouterTest, InfoMappedToRtpVideoHeader_Vp9) {
|
||||
RtpPayloadState state;
|
||||
state.picture_id = kPictureId;
|
||||
state.tl0_pic_idx = kTl0PicIdx;
|
||||
std::map<uint32_t, RtpPayloadState> states = {{kSsrc1, state}};
|
||||
|
||||
NiceMock<MockRtpRtcp> rtp;
|
||||
std::vector<RtpRtcp*> modules = {&rtp};
|
||||
PayloadRouter router(modules, {kSsrc1}, kPayloadType, states);
|
||||
router.SetActive(true);
|
||||
|
||||
EncodedImage encoded_image;
|
||||
encoded_image.rotation_ = kVideoRotation_90;
|
||||
encoded_image.content_type_ = VideoContentType::SCREENSHARE;
|
||||
|
||||
CodecSpecificInfo codec_info;
|
||||
memset(&codec_info, 0, sizeof(CodecSpecificInfo));
|
||||
codec_info.codecType = kVideoCodecVP9;
|
||||
codec_info.codecSpecific.VP9.num_spatial_layers = 3;
|
||||
codec_info.codecSpecific.VP9.first_frame_in_picture = true;
|
||||
codec_info.codecSpecific.VP9.spatial_idx = 0;
|
||||
codec_info.codecSpecific.VP9.temporal_idx = 2;
|
||||
codec_info.codecSpecific.VP9.end_of_picture = false;
|
||||
|
||||
EXPECT_CALL(rtp, SendOutgoingData(_, _, _, _, _, _, nullptr, _, _))
|
||||
.WillOnce(
|
||||
Invoke([&codec_info](Unused, Unused, Unused, Unused, Unused, Unused,
|
||||
Unused, const RTPVideoHeader* header, Unused) {
|
||||
EXPECT_EQ(kVideoRotation_90, header->rotation);
|
||||
EXPECT_EQ(VideoContentType::SCREENSHARE, header->content_type);
|
||||
EXPECT_EQ(kVideoCodecVP9, header->codec);
|
||||
EXPECT_EQ(kPictureId + 1, header->vp9().picture_id);
|
||||
EXPECT_EQ(kTl0PicIdx, header->vp9().tl0_pic_idx);
|
||||
EXPECT_EQ(header->vp9().temporal_idx,
|
||||
codec_info.codecSpecific.VP9.temporal_idx);
|
||||
EXPECT_EQ(header->vp9().spatial_idx,
|
||||
codec_info.codecSpecific.VP9.spatial_idx);
|
||||
EXPECT_EQ(header->vp9().num_spatial_layers,
|
||||
codec_info.codecSpecific.VP9.num_spatial_layers);
|
||||
EXPECT_EQ(header->vp9().end_of_picture,
|
||||
codec_info.codecSpecific.VP9.end_of_picture);
|
||||
return true;
|
||||
}));
|
||||
EXPECT_CALL(rtp, Sending()).WillOnce(Return(true));
|
||||
|
||||
EXPECT_EQ(EncodedImageCallback::Result::OK,
|
||||
router.OnEncodedImage(encoded_image, &codec_info, nullptr).error);
|
||||
|
||||
// Next spatial layer.
|
||||
codec_info.codecSpecific.VP9.first_frame_in_picture = false;
|
||||
codec_info.codecSpecific.VP9.spatial_idx += 1;
|
||||
codec_info.codecSpecific.VP9.end_of_picture = true;
|
||||
|
||||
EXPECT_CALL(rtp, SendOutgoingData(_, _, _, _, _, _, nullptr, _, _))
|
||||
.WillOnce(
|
||||
Invoke([&codec_info](Unused, Unused, Unused, Unused, Unused, Unused,
|
||||
Unused, const RTPVideoHeader* header, Unused) {
|
||||
EXPECT_EQ(kVideoRotation_90, header->rotation);
|
||||
EXPECT_EQ(VideoContentType::SCREENSHARE, header->content_type);
|
||||
EXPECT_EQ(kVideoCodecVP9, header->codec);
|
||||
EXPECT_EQ(kPictureId + 1, header->vp9().picture_id);
|
||||
EXPECT_EQ(kTl0PicIdx, header->vp9().tl0_pic_idx);
|
||||
EXPECT_EQ(header->vp9().temporal_idx,
|
||||
codec_info.codecSpecific.VP9.temporal_idx);
|
||||
EXPECT_EQ(header->vp9().spatial_idx,
|
||||
codec_info.codecSpecific.VP9.spatial_idx);
|
||||
EXPECT_EQ(header->vp9().num_spatial_layers,
|
||||
codec_info.codecSpecific.VP9.num_spatial_layers);
|
||||
EXPECT_EQ(header->vp9().end_of_picture,
|
||||
codec_info.codecSpecific.VP9.end_of_picture);
|
||||
return true;
|
||||
}));
|
||||
EXPECT_CALL(rtp, Sending()).WillOnce(Return(true));
|
||||
|
||||
EXPECT_EQ(EncodedImageCallback::Result::OK,
|
||||
router.OnEncodedImage(encoded_image, &codec_info, nullptr).error);
|
||||
}
|
||||
|
||||
TEST(PayloadRouterTest, InfoMappedToRtpVideoHeader_H264) {
|
||||
NiceMock<MockRtpRtcp> rtp1;
|
||||
std::vector<RtpRtcp*> modules = {&rtp1};
|
||||
PayloadRouter payload_router(modules, {kSsrc1}, kPayloadType, {});
|
||||
payload_router.SetActive(true);
|
||||
|
||||
EncodedImage encoded_image;
|
||||
CodecSpecificInfo codec_info;
|
||||
memset(&codec_info, 0, sizeof(CodecSpecificInfo));
|
||||
codec_info.codecType = kVideoCodecH264;
|
||||
codec_info.codecSpecific.H264.packetization_mode =
|
||||
H264PacketizationMode::SingleNalUnit;
|
||||
|
||||
EXPECT_CALL(rtp1, Sending()).WillOnce(Return(true));
|
||||
EXPECT_CALL(rtp1, SendOutgoingData(_, _, _, _, _, _, nullptr, _, _))
|
||||
.WillOnce(Invoke([](Unused, Unused, Unused, Unused, Unused, Unused,
|
||||
Unused, const RTPVideoHeader* header, Unused) {
|
||||
EXPECT_EQ(0, header->simulcastIdx);
|
||||
EXPECT_EQ(kVideoCodecH264, header->codec);
|
||||
const auto& h264 =
|
||||
absl::get<RTPVideoHeaderH264>(header->video_type_header);
|
||||
EXPECT_EQ(H264PacketizationMode::SingleNalUnit,
|
||||
h264.packetization_mode);
|
||||
return true;
|
||||
}));
|
||||
|
||||
EXPECT_EQ(
|
||||
EncodedImageCallback::Result::OK,
|
||||
payload_router.OnEncodedImage(encoded_image, &codec_info, nullptr).error);
|
||||
}
|
||||
|
||||
TEST(PayloadRouterTest, CreateWithNoPreviousStates) {
|
||||
NiceMock<MockRtpRtcp> rtp1;
|
||||
NiceMock<MockRtpRtcp> rtp2;
|
||||
@ -495,227 +254,4 @@ TEST(PayloadRouterTest, CreateWithPreviousStates) {
|
||||
EXPECT_EQ(kInitialPictureId2, initial_states[kSsrc2].picture_id);
|
||||
EXPECT_EQ(kInitialTl0PicIdx2, initial_states[kSsrc2].tl0_pic_idx);
|
||||
}
|
||||
|
||||
TEST(PayloadRouterTest, PictureIdIsSetForVp8) {
|
||||
RtpPayloadState state1;
|
||||
state1.picture_id = kInitialPictureId1;
|
||||
state1.tl0_pic_idx = kInitialTl0PicIdx1;
|
||||
RtpPayloadState state2;
|
||||
state2.picture_id = kInitialPictureId2;
|
||||
state2.tl0_pic_idx = kInitialTl0PicIdx2;
|
||||
std::map<uint32_t, RtpPayloadState> states = {{kSsrc1, state1},
|
||||
{kSsrc2, state2}};
|
||||
|
||||
NiceMock<MockRtpRtcp> rtp1;
|
||||
NiceMock<MockRtpRtcp> rtp2;
|
||||
std::vector<RtpRtcp*> modules = {&rtp1, &rtp2};
|
||||
PayloadRouter router(modules, {kSsrc1, kSsrc2}, kPayloadType, states);
|
||||
router.SetActive(true);
|
||||
|
||||
EncodedImage encoded_image;
|
||||
// Modules are sending for this test.
|
||||
// OnEncodedImage, simulcastIdx: 0.
|
||||
CodecSpecificInfo codec_info;
|
||||
memset(&codec_info, 0, sizeof(CodecSpecificInfo));
|
||||
codec_info.codecType = kVideoCodecVP8;
|
||||
codec_info.codecSpecific.VP8.simulcastIdx = 0;
|
||||
|
||||
EXPECT_CALL(rtp1, SendOutgoingData(_, _, _, _, _, _, nullptr, _, _))
|
||||
.WillOnce(Invoke([](Unused, Unused, Unused, Unused, Unused, Unused,
|
||||
Unused, const RTPVideoHeader* header, Unused) {
|
||||
EXPECT_EQ(kVideoCodecVP8, header->codec);
|
||||
EXPECT_EQ(kInitialPictureId1 + 1, header->vp8().pictureId);
|
||||
return true;
|
||||
}));
|
||||
EXPECT_CALL(rtp1, Sending()).WillOnce(Return(true));
|
||||
|
||||
EXPECT_EQ(EncodedImageCallback::Result::OK,
|
||||
router.OnEncodedImage(encoded_image, &codec_info, nullptr).error);
|
||||
|
||||
// OnEncodedImage, simulcastIdx: 1.
|
||||
codec_info.codecSpecific.VP8.simulcastIdx = 1;
|
||||
|
||||
EXPECT_CALL(rtp2, SendOutgoingData(_, _, _, _, _, _, nullptr, _, _))
|
||||
.WillOnce(Invoke([](Unused, Unused, Unused, Unused, Unused, Unused,
|
||||
Unused, const RTPVideoHeader* header, Unused) {
|
||||
EXPECT_EQ(kVideoCodecVP8, header->codec);
|
||||
EXPECT_EQ(kInitialPictureId2 + 1, header->vp8().pictureId);
|
||||
return true;
|
||||
}));
|
||||
EXPECT_CALL(rtp2, Sending()).WillOnce(Return(true));
|
||||
|
||||
EXPECT_EQ(EncodedImageCallback::Result::OK,
|
||||
router.OnEncodedImage(encoded_image, &codec_info, nullptr).error);
|
||||
|
||||
// State should hold latest used picture id and tl0_pic_idx.
|
||||
states = router.GetRtpPayloadStates();
|
||||
EXPECT_EQ(2u, states.size());
|
||||
EXPECT_EQ(kInitialPictureId1 + 1, states[kSsrc1].picture_id);
|
||||
EXPECT_EQ(kInitialTl0PicIdx1 + 1, states[kSsrc1].tl0_pic_idx);
|
||||
EXPECT_EQ(kInitialPictureId2 + 1, states[kSsrc2].picture_id);
|
||||
EXPECT_EQ(kInitialTl0PicIdx2 + 1, states[kSsrc2].tl0_pic_idx);
|
||||
}
|
||||
|
||||
TEST(PayloadRouterTest, PictureIdWraps) {
|
||||
RtpPayloadState state1;
|
||||
state1.picture_id = kMaxTwoBytePictureId;
|
||||
state1.tl0_pic_idx = kInitialTl0PicIdx1;
|
||||
|
||||
NiceMock<MockRtpRtcp> rtp;
|
||||
std::vector<RtpRtcp*> modules = {&rtp};
|
||||
PayloadRouter router(modules, {kSsrc1}, kPayloadType, {{kSsrc1, state1}});
|
||||
router.SetActive(true);
|
||||
|
||||
EncodedImage encoded_image;
|
||||
CodecSpecificInfo codec_info;
|
||||
memset(&codec_info, 0, sizeof(CodecSpecificInfo));
|
||||
codec_info.codecType = kVideoCodecVP8;
|
||||
codec_info.codecSpecific.VP8.temporalIdx = kNoTemporalIdx;
|
||||
|
||||
EXPECT_CALL(rtp, SendOutgoingData(_, _, _, _, _, _, nullptr, _, _))
|
||||
.WillOnce(Invoke([](Unused, Unused, Unused, Unused, Unused, Unused,
|
||||
Unused, const RTPVideoHeader* header, Unused) {
|
||||
EXPECT_EQ(kVideoCodecVP8, header->codec);
|
||||
EXPECT_EQ(0, header->vp8().pictureId);
|
||||
return true;
|
||||
}));
|
||||
EXPECT_CALL(rtp, Sending()).WillOnce(Return(true));
|
||||
|
||||
EXPECT_EQ(EncodedImageCallback::Result::OK,
|
||||
router.OnEncodedImage(encoded_image, &codec_info, nullptr).error);
|
||||
|
||||
// State should hold latest used picture id and tl0_pic_idx.
|
||||
std::map<uint32_t, RtpPayloadState> states = router.GetRtpPayloadStates();
|
||||
EXPECT_EQ(1u, states.size());
|
||||
EXPECT_EQ(0, states[kSsrc1].picture_id); // Wrapped.
|
||||
EXPECT_EQ(kInitialTl0PicIdx1, states[kSsrc1].tl0_pic_idx);
|
||||
}
|
||||
|
||||
TEST(PayloadRouterTest, Tl0PicIdxUpdatedForVp8) {
|
||||
RtpPayloadState state;
|
||||
state.picture_id = kInitialPictureId1;
|
||||
state.tl0_pic_idx = kInitialTl0PicIdx1;
|
||||
std::map<uint32_t, RtpPayloadState> states = {{kSsrc1, state}};
|
||||
|
||||
NiceMock<MockRtpRtcp> rtp;
|
||||
std::vector<RtpRtcp*> modules = {&rtp};
|
||||
PayloadRouter router(modules, {kSsrc1}, kPayloadType, states);
|
||||
router.SetActive(true);
|
||||
|
||||
EncodedImage encoded_image;
|
||||
// Modules are sending for this test.
|
||||
// OnEncodedImage, temporalIdx: 1.
|
||||
CodecSpecificInfo codec_info;
|
||||
memset(&codec_info, 0, sizeof(CodecSpecificInfo));
|
||||
codec_info.codecType = kVideoCodecVP8;
|
||||
codec_info.codecSpecific.VP8.temporalIdx = 1;
|
||||
|
||||
EXPECT_CALL(rtp, SendOutgoingData(_, _, _, _, _, _, nullptr, _, _))
|
||||
.WillOnce(Invoke([](Unused, Unused, Unused, Unused, Unused, Unused,
|
||||
Unused, const RTPVideoHeader* header, Unused) {
|
||||
EXPECT_EQ(kVideoCodecVP8, header->codec);
|
||||
EXPECT_EQ(kInitialPictureId1 + 1, header->vp8().pictureId);
|
||||
EXPECT_EQ(kInitialTl0PicIdx1, header->vp8().tl0PicIdx);
|
||||
return true;
|
||||
}));
|
||||
EXPECT_CALL(rtp, Sending()).WillOnce(Return(true));
|
||||
|
||||
EXPECT_EQ(EncodedImageCallback::Result::OK,
|
||||
router.OnEncodedImage(encoded_image, &codec_info, nullptr).error);
|
||||
|
||||
// OnEncodedImage, temporalIdx: 0.
|
||||
codec_info.codecSpecific.VP8.temporalIdx = 0;
|
||||
|
||||
EXPECT_CALL(rtp, SendOutgoingData(_, _, _, _, _, _, nullptr, _, _))
|
||||
.WillOnce(Invoke([](Unused, Unused, Unused, Unused, Unused, Unused,
|
||||
Unused, const RTPVideoHeader* header, Unused) {
|
||||
EXPECT_EQ(kVideoCodecVP8, header->codec);
|
||||
EXPECT_EQ(kInitialPictureId1 + 2, header->vp8().pictureId);
|
||||
EXPECT_EQ(kInitialTl0PicIdx1 + 1, header->vp8().tl0PicIdx);
|
||||
return true;
|
||||
}));
|
||||
EXPECT_CALL(rtp, Sending()).WillOnce(Return(true));
|
||||
|
||||
EXPECT_EQ(EncodedImageCallback::Result::OK,
|
||||
router.OnEncodedImage(encoded_image, &codec_info, nullptr).error);
|
||||
|
||||
// State should hold latest used picture id and tl0_pic_idx.
|
||||
states = router.GetRtpPayloadStates();
|
||||
EXPECT_EQ(1u, states.size());
|
||||
EXPECT_EQ(kInitialPictureId1 + 2, states[kSsrc1].picture_id);
|
||||
EXPECT_EQ(kInitialTl0PicIdx1 + 1, states[kSsrc1].tl0_pic_idx);
|
||||
}
|
||||
|
||||
TEST(PayloadRouterTest, Tl0PicIdxUpdatedForVp9) {
|
||||
RtpPayloadState state;
|
||||
state.picture_id = kInitialPictureId1;
|
||||
state.tl0_pic_idx = kInitialTl0PicIdx1;
|
||||
std::map<uint32_t, RtpPayloadState> states = {{kSsrc1, state}};
|
||||
|
||||
NiceMock<MockRtpRtcp> rtp;
|
||||
std::vector<RtpRtcp*> modules = {&rtp};
|
||||
PayloadRouter router(modules, {kSsrc1}, kPayloadType, states);
|
||||
router.SetActive(true);
|
||||
|
||||
EncodedImage encoded_image;
|
||||
// Modules are sending for this test.
|
||||
// OnEncodedImage, temporalIdx: 1.
|
||||
CodecSpecificInfo codec_info;
|
||||
memset(&codec_info, 0, sizeof(CodecSpecificInfo));
|
||||
codec_info.codecType = kVideoCodecVP9;
|
||||
codec_info.codecSpecific.VP9.temporal_idx = 1;
|
||||
codec_info.codecSpecific.VP9.first_frame_in_picture = true;
|
||||
|
||||
EXPECT_CALL(rtp, SendOutgoingData(_, _, _, _, _, _, nullptr, _, _))
|
||||
.WillOnce(Invoke([](Unused, Unused, Unused, Unused, Unused, Unused,
|
||||
Unused, const RTPVideoHeader* header, Unused) {
|
||||
EXPECT_EQ(kVideoCodecVP9, header->codec);
|
||||
EXPECT_EQ(kInitialPictureId1 + 1, header->vp9().picture_id);
|
||||
EXPECT_EQ(kInitialTl0PicIdx1, header->vp9().tl0_pic_idx);
|
||||
return true;
|
||||
}));
|
||||
EXPECT_CALL(rtp, Sending()).WillOnce(Return(true));
|
||||
|
||||
EXPECT_EQ(EncodedImageCallback::Result::OK,
|
||||
router.OnEncodedImage(encoded_image, &codec_info, nullptr).error);
|
||||
|
||||
// OnEncodedImage, temporalIdx: 0.
|
||||
codec_info.codecSpecific.VP9.temporal_idx = 0;
|
||||
|
||||
EXPECT_CALL(rtp, SendOutgoingData(_, _, _, _, _, _, nullptr, _, _))
|
||||
.WillOnce(Invoke([](Unused, Unused, Unused, Unused, Unused, Unused,
|
||||
Unused, const RTPVideoHeader* header, Unused) {
|
||||
EXPECT_EQ(kVideoCodecVP9, header->codec);
|
||||
EXPECT_EQ(kInitialPictureId1 + 2, header->vp9().picture_id);
|
||||
EXPECT_EQ(kInitialTl0PicIdx1 + 1, header->vp9().tl0_pic_idx);
|
||||
return true;
|
||||
}));
|
||||
EXPECT_CALL(rtp, Sending()).WillOnce(Return(true));
|
||||
|
||||
EXPECT_EQ(EncodedImageCallback::Result::OK,
|
||||
router.OnEncodedImage(encoded_image, &codec_info, nullptr).error);
|
||||
|
||||
// OnEncodedImage, first_frame_in_picture = false
|
||||
codec_info.codecSpecific.VP9.first_frame_in_picture = false;
|
||||
|
||||
EXPECT_CALL(rtp, SendOutgoingData(_, _, _, _, _, _, nullptr, _, _))
|
||||
.WillOnce(Invoke([](Unused, Unused, Unused, Unused, Unused, Unused,
|
||||
Unused, const RTPVideoHeader* header, Unused) {
|
||||
EXPECT_EQ(kVideoCodecVP9, header->codec);
|
||||
EXPECT_EQ(kInitialPictureId1 + 2, header->vp9().picture_id);
|
||||
EXPECT_EQ(kInitialTl0PicIdx1 + 1, header->vp9().tl0_pic_idx);
|
||||
return true;
|
||||
}));
|
||||
EXPECT_CALL(rtp, Sending()).WillOnce(Return(true));
|
||||
|
||||
EXPECT_EQ(EncodedImageCallback::Result::OK,
|
||||
router.OnEncodedImage(encoded_image, &codec_info, nullptr).error);
|
||||
|
||||
// State should hold latest used picture id and tl0_pic_idx.
|
||||
states = router.GetRtpPayloadStates();
|
||||
EXPECT_EQ(1u, states.size());
|
||||
EXPECT_EQ(kInitialPictureId1 + 2, states[kSsrc1].picture_id);
|
||||
EXPECT_EQ(kInitialTl0PicIdx1 + 1, states[kSsrc1].tl0_pic_idx);
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
180
call/rtp_payload_params.cc
Normal file
180
call/rtp_payload_params.cc
Normal file
@ -0,0 +1,180 @@
|
||||
/*
|
||||
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "call/rtp_payload_params.h"
|
||||
|
||||
#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
|
||||
#include "modules/video_coding/include/video_codec_interface.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/random.h"
|
||||
#include "rtc_base/timeutils.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace {
|
||||
void PopulateRtpWithCodecSpecifics(const CodecSpecificInfo& info,
|
||||
RTPVideoHeader* rtp) {
|
||||
rtp->codec = info.codecType;
|
||||
switch (info.codecType) {
|
||||
case kVideoCodecVP8: {
|
||||
rtp->vp8().InitRTPVideoHeaderVP8();
|
||||
rtp->vp8().nonReference = info.codecSpecific.VP8.nonReference;
|
||||
rtp->vp8().temporalIdx = info.codecSpecific.VP8.temporalIdx;
|
||||
rtp->vp8().layerSync = info.codecSpecific.VP8.layerSync;
|
||||
rtp->vp8().keyIdx = info.codecSpecific.VP8.keyIdx;
|
||||
rtp->simulcastIdx = info.codecSpecific.VP8.simulcastIdx;
|
||||
return;
|
||||
}
|
||||
case kVideoCodecVP9: {
|
||||
rtp->vp9().InitRTPVideoHeaderVP9();
|
||||
rtp->vp9().inter_pic_predicted =
|
||||
info.codecSpecific.VP9.inter_pic_predicted;
|
||||
rtp->vp9().flexible_mode = info.codecSpecific.VP9.flexible_mode;
|
||||
rtp->vp9().ss_data_available = info.codecSpecific.VP9.ss_data_available;
|
||||
rtp->vp9().non_ref_for_inter_layer_pred =
|
||||
info.codecSpecific.VP9.non_ref_for_inter_layer_pred;
|
||||
rtp->vp9().temporal_idx = info.codecSpecific.VP9.temporal_idx;
|
||||
rtp->vp9().spatial_idx = info.codecSpecific.VP9.spatial_idx;
|
||||
rtp->vp9().temporal_up_switch = info.codecSpecific.VP9.temporal_up_switch;
|
||||
rtp->vp9().inter_layer_predicted =
|
||||
info.codecSpecific.VP9.inter_layer_predicted;
|
||||
rtp->vp9().gof_idx = info.codecSpecific.VP9.gof_idx;
|
||||
rtp->vp9().num_spatial_layers = info.codecSpecific.VP9.num_spatial_layers;
|
||||
|
||||
if (info.codecSpecific.VP9.ss_data_available) {
|
||||
rtp->vp9().spatial_layer_resolution_present =
|
||||
info.codecSpecific.VP9.spatial_layer_resolution_present;
|
||||
if (info.codecSpecific.VP9.spatial_layer_resolution_present) {
|
||||
for (size_t i = 0; i < info.codecSpecific.VP9.num_spatial_layers;
|
||||
++i) {
|
||||
rtp->vp9().width[i] = info.codecSpecific.VP9.width[i];
|
||||
rtp->vp9().height[i] = info.codecSpecific.VP9.height[i];
|
||||
}
|
||||
}
|
||||
rtp->vp9().gof.CopyGofInfoVP9(info.codecSpecific.VP9.gof);
|
||||
}
|
||||
|
||||
rtp->vp9().num_ref_pics = info.codecSpecific.VP9.num_ref_pics;
|
||||
for (int i = 0; i < info.codecSpecific.VP9.num_ref_pics; ++i) {
|
||||
rtp->vp9().pid_diff[i] = info.codecSpecific.VP9.p_diff[i];
|
||||
}
|
||||
rtp->vp9().end_of_picture = info.codecSpecific.VP9.end_of_picture;
|
||||
return;
|
||||
}
|
||||
case kVideoCodecH264: {
|
||||
auto& h264_header = rtp->video_type_header.emplace<RTPVideoHeaderH264>();
|
||||
h264_header.packetization_mode =
|
||||
info.codecSpecific.H264.packetization_mode;
|
||||
rtp->simulcastIdx = info.codecSpecific.H264.simulcast_idx;
|
||||
return;
|
||||
}
|
||||
case kVideoCodecMultiplex:
|
||||
case kVideoCodecGeneric:
|
||||
rtp->codec = kVideoCodecGeneric;
|
||||
rtp->simulcastIdx = info.codecSpecific.generic.simulcast_idx;
|
||||
return;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
void SetVideoTiming(const EncodedImage& image, VideoSendTiming* timing) {
|
||||
if (image.timing_.flags == VideoSendTiming::TimingFrameFlags::kInvalid ||
|
||||
image.timing_.flags == VideoSendTiming::TimingFrameFlags::kNotTriggered) {
|
||||
timing->flags = VideoSendTiming::TimingFrameFlags::kInvalid;
|
||||
return;
|
||||
}
|
||||
|
||||
timing->encode_start_delta_ms = VideoSendTiming::GetDeltaCappedMs(
|
||||
image.capture_time_ms_, image.timing_.encode_start_ms);
|
||||
timing->encode_finish_delta_ms = VideoSendTiming::GetDeltaCappedMs(
|
||||
image.capture_time_ms_, image.timing_.encode_finish_ms);
|
||||
timing->packetization_finish_delta_ms = 0;
|
||||
timing->pacer_exit_delta_ms = 0;
|
||||
timing->network_timestamp_delta_ms = 0;
|
||||
timing->network2_timestamp_delta_ms = 0;
|
||||
timing->flags = image.timing_.flags;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
RtpPayloadParams::RtpPayloadParams(const uint32_t ssrc,
|
||||
const RtpPayloadState* state)
|
||||
: ssrc_(ssrc) {
|
||||
Random random(rtc::TimeMicros());
|
||||
state_.picture_id =
|
||||
state ? state->picture_id : (random.Rand<int16_t>() & 0x7FFF);
|
||||
state_.tl0_pic_idx = state ? state->tl0_pic_idx : (random.Rand<uint8_t>());
|
||||
}
|
||||
RtpPayloadParams::~RtpPayloadParams() {}
|
||||
|
||||
RTPVideoHeader RtpPayloadParams::GetRtpVideoHeader(
|
||||
const EncodedImage& image,
|
||||
const CodecSpecificInfo* codec_specific_info) {
|
||||
RTPVideoHeader rtp_video_header;
|
||||
if (codec_specific_info) {
|
||||
PopulateRtpWithCodecSpecifics(*codec_specific_info, &rtp_video_header);
|
||||
}
|
||||
rtp_video_header.rotation = image.rotation_;
|
||||
rtp_video_header.content_type = image.content_type_;
|
||||
rtp_video_header.playout_delay = image.playout_delay_;
|
||||
|
||||
SetVideoTiming(image, &rtp_video_header.video_timing);
|
||||
|
||||
// Sets picture id and tl0 pic idx.
|
||||
const bool first_frame_in_picture =
|
||||
(codec_specific_info && codec_specific_info->codecType == kVideoCodecVP9)
|
||||
? codec_specific_info->codecSpecific.VP9.first_frame_in_picture
|
||||
: true;
|
||||
Set(&rtp_video_header, first_frame_in_picture);
|
||||
return rtp_video_header;
|
||||
}
|
||||
|
||||
uint32_t RtpPayloadParams::ssrc() const {
|
||||
return ssrc_;
|
||||
}
|
||||
|
||||
RtpPayloadState RtpPayloadParams::state() const {
|
||||
return state_;
|
||||
}
|
||||
|
||||
void RtpPayloadParams::Set(RTPVideoHeader* rtp_video_header,
|
||||
bool first_frame_in_picture) {
|
||||
// Always set picture id. Set tl0_pic_idx iff temporal index is set.
|
||||
if (first_frame_in_picture) {
|
||||
state_.picture_id = (static_cast<uint16_t>(state_.picture_id) + 1) & 0x7FFF;
|
||||
}
|
||||
if (rtp_video_header->codec == kVideoCodecVP8) {
|
||||
rtp_video_header->vp8().pictureId = state_.picture_id;
|
||||
|
||||
if (rtp_video_header->vp8().temporalIdx != kNoTemporalIdx) {
|
||||
if (rtp_video_header->vp8().temporalIdx == 0) {
|
||||
++state_.tl0_pic_idx;
|
||||
}
|
||||
rtp_video_header->vp8().tl0PicIdx = state_.tl0_pic_idx;
|
||||
}
|
||||
}
|
||||
if (rtp_video_header->codec == kVideoCodecVP9) {
|
||||
rtp_video_header->vp9().picture_id = state_.picture_id;
|
||||
|
||||
// Note that in the case that we have no temporal layers but we do have
|
||||
// spatial layers, packets will carry layering info with a temporal_idx of
|
||||
// zero, and we then have to set and increment tl0_pic_idx.
|
||||
if (rtp_video_header->vp9().temporal_idx != kNoTemporalIdx ||
|
||||
rtp_video_header->vp9().spatial_idx != kNoSpatialIdx) {
|
||||
if (first_frame_in_picture &&
|
||||
(rtp_video_header->vp9().temporal_idx == 0 ||
|
||||
rtp_video_header->vp9().temporal_idx == kNoTemporalIdx)) {
|
||||
++state_.tl0_pic_idx;
|
||||
}
|
||||
rtp_video_header->vp9().tl0_pic_idx = state_.tl0_pic_idx;
|
||||
}
|
||||
}
|
||||
}
|
||||
} // namespace webrtc
|
54
call/rtp_payload_params.h
Normal file
54
call/rtp_payload_params.h
Normal file
@ -0,0 +1,54 @@
|
||||
/*
|
||||
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef CALL_RTP_PAYLOAD_PARAMS_H_
|
||||
#define CALL_RTP_PAYLOAD_PARAMS_H_
|
||||
|
||||
#include <map>
|
||||
#include <vector>
|
||||
|
||||
#include "api/video_codecs/video_encoder.h"
|
||||
#include "common_types.h" // NOLINT(build/include)
|
||||
#include "modules/rtp_rtcp/source/rtp_video_header.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class RTPFragmentationHeader;
|
||||
class RtpRtcp;
|
||||
|
||||
// Currently only VP8/VP9 specific.
|
||||
struct RtpPayloadState {
|
||||
int16_t picture_id = -1;
|
||||
uint8_t tl0_pic_idx = 0;
|
||||
};
|
||||
|
||||
// State for setting picture id and tl0 pic idx, for VP8 and VP9
|
||||
// TODO(nisse): Make these properties not codec specific.
|
||||
class RtpPayloadParams final {
|
||||
public:
|
||||
RtpPayloadParams(const uint32_t ssrc, const RtpPayloadState* state);
|
||||
~RtpPayloadParams();
|
||||
|
||||
RTPVideoHeader GetRtpVideoHeader(
|
||||
const EncodedImage& image,
|
||||
const CodecSpecificInfo* codec_specific_info);
|
||||
|
||||
uint32_t ssrc() const;
|
||||
|
||||
RtpPayloadState state() const;
|
||||
|
||||
private:
|
||||
void Set(RTPVideoHeader* rtp_video_header, bool first_frame_in_picture);
|
||||
|
||||
const uint32_t ssrc_;
|
||||
RtpPayloadState state_;
|
||||
};
|
||||
} // namespace webrtc
|
||||
#endif // CALL_RTP_PAYLOAD_PARAMS_H_
|
254
call/rtp_payload_params_unittest.cc
Normal file
254
call/rtp_payload_params_unittest.cc
Normal file
@ -0,0 +1,254 @@
|
||||
/*
|
||||
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "call/payload_router.h"
|
||||
#include "modules/video_coding/include/video_codec_interface.h"
|
||||
#include "test/gtest.h"
|
||||
|
||||
namespace webrtc {
|
||||
namespace {
|
||||
const uint32_t kSsrc1 = 12345;
|
||||
const uint32_t kSsrc2 = 23456;
|
||||
const int16_t kPictureId = 123;
|
||||
const int16_t kTl0PicIdx = 20;
|
||||
const uint8_t kTemporalIdx = 1;
|
||||
const int16_t kInitialPictureId1 = 222;
|
||||
const int16_t kInitialTl0PicIdx1 = 99;
|
||||
} // namespace
|
||||
|
||||
TEST(RtpPayloadParamsTest, InfoMappedToRtpVideoHeader_Vp8) {
|
||||
RtpPayloadState state2;
|
||||
state2.picture_id = kPictureId;
|
||||
state2.tl0_pic_idx = kTl0PicIdx;
|
||||
std::map<uint32_t, RtpPayloadState> states = {{kSsrc2, state2}};
|
||||
|
||||
RtpPayloadParams params(kSsrc2, &state2);
|
||||
EncodedImage encoded_image;
|
||||
encoded_image.rotation_ = kVideoRotation_90;
|
||||
encoded_image.content_type_ = VideoContentType::SCREENSHARE;
|
||||
|
||||
CodecSpecificInfo codec_info;
|
||||
memset(&codec_info, 0, sizeof(CodecSpecificInfo));
|
||||
codec_info.codecType = kVideoCodecVP8;
|
||||
codec_info.codecSpecific.VP8.simulcastIdx = 1;
|
||||
codec_info.codecSpecific.VP8.temporalIdx = kTemporalIdx;
|
||||
codec_info.codecSpecific.VP8.keyIdx = kNoKeyIdx;
|
||||
codec_info.codecSpecific.VP8.layerSync = true;
|
||||
codec_info.codecSpecific.VP8.nonReference = true;
|
||||
|
||||
RTPVideoHeader header = params.GetRtpVideoHeader(encoded_image, &codec_info);
|
||||
|
||||
EXPECT_EQ(kVideoRotation_90, header.rotation);
|
||||
EXPECT_EQ(VideoContentType::SCREENSHARE, header.content_type);
|
||||
EXPECT_EQ(1, header.simulcastIdx);
|
||||
EXPECT_EQ(kVideoCodecVP8, header.codec);
|
||||
EXPECT_EQ(kPictureId + 1, header.vp8().pictureId);
|
||||
EXPECT_EQ(kTemporalIdx, header.vp8().temporalIdx);
|
||||
EXPECT_EQ(kTl0PicIdx, header.vp8().tl0PicIdx);
|
||||
EXPECT_EQ(kNoKeyIdx, header.vp8().keyIdx);
|
||||
EXPECT_TRUE(header.vp8().layerSync);
|
||||
EXPECT_TRUE(header.vp8().nonReference);
|
||||
}
|
||||
|
||||
TEST(RtpPayloadParamsTest, InfoMappedToRtpVideoHeader_Vp9) {
|
||||
RtpPayloadState state;
|
||||
state.picture_id = kPictureId;
|
||||
state.tl0_pic_idx = kTl0PicIdx;
|
||||
RtpPayloadParams params(kSsrc1, &state);
|
||||
|
||||
EncodedImage encoded_image;
|
||||
encoded_image.rotation_ = kVideoRotation_90;
|
||||
encoded_image.content_type_ = VideoContentType::SCREENSHARE;
|
||||
|
||||
CodecSpecificInfo codec_info;
|
||||
memset(&codec_info, 0, sizeof(CodecSpecificInfo));
|
||||
codec_info.codecType = kVideoCodecVP9;
|
||||
codec_info.codecSpecific.VP9.num_spatial_layers = 3;
|
||||
codec_info.codecSpecific.VP9.first_frame_in_picture = true;
|
||||
codec_info.codecSpecific.VP9.spatial_idx = 0;
|
||||
codec_info.codecSpecific.VP9.temporal_idx = 2;
|
||||
codec_info.codecSpecific.VP9.end_of_picture = false;
|
||||
|
||||
RTPVideoHeader header = params.GetRtpVideoHeader(encoded_image, &codec_info);
|
||||
|
||||
EXPECT_EQ(kVideoRotation_90, header.rotation);
|
||||
EXPECT_EQ(VideoContentType::SCREENSHARE, header.content_type);
|
||||
EXPECT_EQ(kVideoCodecVP9, header.codec);
|
||||
EXPECT_EQ(kPictureId + 1, header.vp9().picture_id);
|
||||
EXPECT_EQ(kTl0PicIdx, header.vp9().tl0_pic_idx);
|
||||
EXPECT_EQ(header.vp9().temporal_idx,
|
||||
codec_info.codecSpecific.VP9.temporal_idx);
|
||||
EXPECT_EQ(header.vp9().spatial_idx, codec_info.codecSpecific.VP9.spatial_idx);
|
||||
EXPECT_EQ(header.vp9().num_spatial_layers,
|
||||
codec_info.codecSpecific.VP9.num_spatial_layers);
|
||||
EXPECT_EQ(header.vp9().end_of_picture,
|
||||
codec_info.codecSpecific.VP9.end_of_picture);
|
||||
|
||||
// Next spatial layer.
|
||||
codec_info.codecSpecific.VP9.first_frame_in_picture = false;
|
||||
codec_info.codecSpecific.VP9.spatial_idx += 1;
|
||||
codec_info.codecSpecific.VP9.end_of_picture = true;
|
||||
|
||||
header = params.GetRtpVideoHeader(encoded_image, &codec_info);
|
||||
|
||||
EXPECT_EQ(kVideoRotation_90, header.rotation);
|
||||
EXPECT_EQ(VideoContentType::SCREENSHARE, header.content_type);
|
||||
EXPECT_EQ(kVideoCodecVP9, header.codec);
|
||||
EXPECT_EQ(kPictureId + 1, header.vp9().picture_id);
|
||||
EXPECT_EQ(kTl0PicIdx, header.vp9().tl0_pic_idx);
|
||||
EXPECT_EQ(header.vp9().temporal_idx,
|
||||
codec_info.codecSpecific.VP9.temporal_idx);
|
||||
EXPECT_EQ(header.vp9().spatial_idx, codec_info.codecSpecific.VP9.spatial_idx);
|
||||
EXPECT_EQ(header.vp9().num_spatial_layers,
|
||||
codec_info.codecSpecific.VP9.num_spatial_layers);
|
||||
EXPECT_EQ(header.vp9().end_of_picture,
|
||||
codec_info.codecSpecific.VP9.end_of_picture);
|
||||
}
|
||||
|
||||
TEST(RtpPayloadParamsTest, InfoMappedToRtpVideoHeader_H264) {
|
||||
RtpPayloadParams params(kSsrc1, {});
|
||||
|
||||
EncodedImage encoded_image;
|
||||
CodecSpecificInfo codec_info;
|
||||
memset(&codec_info, 0, sizeof(CodecSpecificInfo));
|
||||
codec_info.codecType = kVideoCodecH264;
|
||||
codec_info.codecSpecific.H264.packetization_mode =
|
||||
H264PacketizationMode::SingleNalUnit;
|
||||
|
||||
RTPVideoHeader header = params.GetRtpVideoHeader(encoded_image, &codec_info);
|
||||
|
||||
EXPECT_EQ(0, header.simulcastIdx);
|
||||
EXPECT_EQ(kVideoCodecH264, header.codec);
|
||||
const auto& h264 = absl::get<RTPVideoHeaderH264>(header.video_type_header);
|
||||
EXPECT_EQ(H264PacketizationMode::SingleNalUnit, h264.packetization_mode);
|
||||
}
|
||||
|
||||
TEST(RtpPayloadParamsTest, PictureIdIsSetForVp8) {
|
||||
RtpPayloadState state;
|
||||
state.picture_id = kInitialPictureId1;
|
||||
state.tl0_pic_idx = kInitialTl0PicIdx1;
|
||||
|
||||
EncodedImage encoded_image;
|
||||
CodecSpecificInfo codec_info;
|
||||
memset(&codec_info, 0, sizeof(CodecSpecificInfo));
|
||||
codec_info.codecType = kVideoCodecVP8;
|
||||
codec_info.codecSpecific.VP8.simulcastIdx = 0;
|
||||
|
||||
RtpPayloadParams params(kSsrc1, &state);
|
||||
RTPVideoHeader header = params.GetRtpVideoHeader(encoded_image, &codec_info);
|
||||
EXPECT_EQ(kVideoCodecVP8, header.codec);
|
||||
EXPECT_EQ(kInitialPictureId1 + 1, header.vp8().pictureId);
|
||||
|
||||
// State should hold latest used picture id and tl0_pic_idx.
|
||||
state = params.state();
|
||||
EXPECT_EQ(kInitialPictureId1 + 1, state.picture_id);
|
||||
EXPECT_EQ(kInitialTl0PicIdx1 + 1, state.tl0_pic_idx);
|
||||
}
|
||||
|
||||
TEST(RtpPayloadParamsTest, PictureIdWraps) {
|
||||
RtpPayloadState state;
|
||||
state.picture_id = kMaxTwoBytePictureId;
|
||||
state.tl0_pic_idx = kInitialTl0PicIdx1;
|
||||
|
||||
EncodedImage encoded_image;
|
||||
CodecSpecificInfo codec_info;
|
||||
memset(&codec_info, 0, sizeof(CodecSpecificInfo));
|
||||
codec_info.codecType = kVideoCodecVP8;
|
||||
codec_info.codecSpecific.VP8.temporalIdx = kNoTemporalIdx;
|
||||
|
||||
RtpPayloadParams params(kSsrc1, &state);
|
||||
RTPVideoHeader header = params.GetRtpVideoHeader(encoded_image, &codec_info);
|
||||
EXPECT_EQ(kVideoCodecVP8, header.codec);
|
||||
EXPECT_EQ(0, header.vp8().pictureId);
|
||||
|
||||
// State should hold latest used picture id and tl0_pic_idx.
|
||||
EXPECT_EQ(0, params.state().picture_id); // Wrapped.
|
||||
EXPECT_EQ(kInitialTl0PicIdx1, params.state().tl0_pic_idx);
|
||||
}
|
||||
|
||||
TEST(RtpPayloadParamsTest, Tl0PicIdxUpdatedForVp8) {
|
||||
RtpPayloadState state;
|
||||
state.picture_id = kInitialPictureId1;
|
||||
state.tl0_pic_idx = kInitialTl0PicIdx1;
|
||||
|
||||
EncodedImage encoded_image;
|
||||
// Modules are sending for this test.
|
||||
// OnEncodedImage, temporalIdx: 1.
|
||||
CodecSpecificInfo codec_info;
|
||||
memset(&codec_info, 0, sizeof(CodecSpecificInfo));
|
||||
codec_info.codecType = kVideoCodecVP8;
|
||||
codec_info.codecSpecific.VP8.temporalIdx = 1;
|
||||
|
||||
RtpPayloadParams params(kSsrc1, &state);
|
||||
RTPVideoHeader header = params.GetRtpVideoHeader(encoded_image, &codec_info);
|
||||
|
||||
EXPECT_EQ(kVideoCodecVP8, header.codec);
|
||||
EXPECT_EQ(kInitialPictureId1 + 1, header.vp8().pictureId);
|
||||
EXPECT_EQ(kInitialTl0PicIdx1, header.vp8().tl0PicIdx);
|
||||
|
||||
// OnEncodedImage, temporalIdx: 0.
|
||||
codec_info.codecSpecific.VP8.temporalIdx = 0;
|
||||
|
||||
header = params.GetRtpVideoHeader(encoded_image, &codec_info);
|
||||
EXPECT_EQ(kVideoCodecVP8, header.codec);
|
||||
EXPECT_EQ(kInitialPictureId1 + 2, header.vp8().pictureId);
|
||||
EXPECT_EQ(kInitialTl0PicIdx1 + 1, header.vp8().tl0PicIdx);
|
||||
|
||||
// State should hold latest used picture id and tl0_pic_idx.
|
||||
EXPECT_EQ(kInitialPictureId1 + 2, params.state().picture_id);
|
||||
EXPECT_EQ(kInitialTl0PicIdx1 + 1, params.state().tl0_pic_idx);
|
||||
}
|
||||
|
||||
TEST(RtpPayloadParamsTest, Tl0PicIdxUpdatedForVp9) {
|
||||
RtpPayloadState state;
|
||||
state.picture_id = kInitialPictureId1;
|
||||
state.tl0_pic_idx = kInitialTl0PicIdx1;
|
||||
|
||||
EncodedImage encoded_image;
|
||||
// Modules are sending for this test.
|
||||
// OnEncodedImage, temporalIdx: 1.
|
||||
CodecSpecificInfo codec_info;
|
||||
memset(&codec_info, 0, sizeof(CodecSpecificInfo));
|
||||
codec_info.codecType = kVideoCodecVP9;
|
||||
codec_info.codecSpecific.VP9.temporal_idx = 1;
|
||||
codec_info.codecSpecific.VP9.first_frame_in_picture = true;
|
||||
|
||||
RtpPayloadParams params(kSsrc1, &state);
|
||||
RTPVideoHeader header = params.GetRtpVideoHeader(encoded_image, &codec_info);
|
||||
|
||||
EXPECT_EQ(kVideoCodecVP9, header.codec);
|
||||
EXPECT_EQ(kInitialPictureId1 + 1, header.vp9().picture_id);
|
||||
EXPECT_EQ(kInitialTl0PicIdx1, header.vp9().tl0_pic_idx);
|
||||
|
||||
// OnEncodedImage, temporalIdx: 0.
|
||||
codec_info.codecSpecific.VP9.temporal_idx = 0;
|
||||
|
||||
header = params.GetRtpVideoHeader(encoded_image, &codec_info);
|
||||
|
||||
EXPECT_EQ(kVideoCodecVP9, header.codec);
|
||||
EXPECT_EQ(kInitialPictureId1 + 2, header.vp9().picture_id);
|
||||
EXPECT_EQ(kInitialTl0PicIdx1 + 1, header.vp9().tl0_pic_idx);
|
||||
|
||||
// OnEncodedImage, first_frame_in_picture = false
|
||||
codec_info.codecSpecific.VP9.first_frame_in_picture = false;
|
||||
|
||||
header = params.GetRtpVideoHeader(encoded_image, &codec_info);
|
||||
|
||||
EXPECT_EQ(kVideoCodecVP9, header.codec);
|
||||
EXPECT_EQ(kInitialPictureId1 + 2, header.vp9().picture_id);
|
||||
EXPECT_EQ(kInitialTl0PicIdx1 + 1, header.vp9().tl0_pic_idx);
|
||||
|
||||
// State should hold latest used picture id and tl0_pic_idx.
|
||||
EXPECT_EQ(kInitialPictureId1 + 2, params.state().picture_id);
|
||||
EXPECT_EQ(kInitialTl0PicIdx1 + 1, params.state().tl0_pic_idx);
|
||||
}
|
||||
} // namespace webrtc
|
Reference in New Issue
Block a user