Add support for RtpEncodingParameters num_temporal_layers.
Configuring different number of temporal layers per simulcast layer is not supported. Bug: webrtc:9785 Change-Id: I5709b2235233420e22e68fb0ae512305ae87e36c Reviewed-on: https://webrtc-review.googlesource.com/c/102120 Commit-Queue: Åsa Persson <asapersson@webrtc.org> Reviewed-by: Seth Hampson <shampson@webrtc.org> Reviewed-by: Sami Kalliomäki <sakal@webrtc.org> Reviewed-by: Rasmus Brandt <brandtr@webrtc.org> Reviewed-by: Erik Språng <sprang@webrtc.org> Cr-Commit-Position: refs/heads/master@{#24942}
This commit is contained in:
@ -428,11 +428,18 @@ struct RtpEncodingParameters {
|
||||
absl::optional<int> min_bitrate_bps;
|
||||
|
||||
// Specifies the maximum framerate in fps for video.
|
||||
// TODO(asapersson): Different framerates are not supported per stream.
|
||||
// If set, the maximum |max_framerate| is currently used.
|
||||
// TODO(asapersson): Different framerates are not supported per simulcast
|
||||
// layer. If set, the maximum |max_framerate| is currently used.
|
||||
// Not supported for screencast.
|
||||
absl::optional<int> max_framerate;
|
||||
|
||||
// Specifies the number of temporal layers for video (if the feature is
|
||||
// supported by the codec implementation).
|
||||
// TODO(asapersson): Different number of temporal layers are not supported
|
||||
// per simulcast layer.
|
||||
// Not supported for screencast.
|
||||
absl::optional<int> num_temporal_layers;
|
||||
|
||||
// For video, scale the resolution down by this factor.
|
||||
// TODO(deadbeef): Not implemented.
|
||||
absl::optional<double> scale_resolution_down_by;
|
||||
@ -466,6 +473,7 @@ struct RtpEncodingParameters {
|
||||
max_bitrate_bps == o.max_bitrate_bps &&
|
||||
min_bitrate_bps == o.min_bitrate_bps &&
|
||||
max_framerate == o.max_framerate &&
|
||||
num_temporal_layers == o.num_temporal_layers &&
|
||||
scale_resolution_down_by == o.scale_resolution_down_by &&
|
||||
scale_framerate_down_by == o.scale_framerate_down_by &&
|
||||
active == o.active && rid == o.rid &&
|
||||
|
||||
@ -127,6 +127,7 @@ rtc_static_library("rtc_media_base") {
|
||||
"..:webrtc_common",
|
||||
"../api:libjingle_peerconnection_api",
|
||||
"../api/audio_codecs:audio_codecs_api",
|
||||
"../api/video:video_bitrate_allocation",
|
||||
"../api/video:video_frame",
|
||||
"../api/video:video_frame_i420",
|
||||
"../api/video_codecs:video_codecs_api",
|
||||
|
||||
@ -10,6 +10,9 @@
|
||||
|
||||
#include "media/base/mediaengine.h"
|
||||
|
||||
#include "api/video/video_bitrate_allocation.h"
|
||||
#include "rtc_base/stringencode.h"
|
||||
|
||||
namespace cricket {
|
||||
|
||||
RtpCapabilities::RtpCapabilities() = default;
|
||||
@ -79,6 +82,24 @@ webrtc::RTCError ValidateRtpParameters(
|
||||
"larger than max bitrate.");
|
||||
}
|
||||
}
|
||||
if (rtp_parameters.encodings[i].num_temporal_layers) {
|
||||
if (*rtp_parameters.encodings[i].num_temporal_layers < 1 ||
|
||||
*rtp_parameters.encodings[i].num_temporal_layers >
|
||||
webrtc::kMaxTemporalStreams) {
|
||||
LOG_AND_RETURN_ERROR(RTCErrorType::INVALID_RANGE,
|
||||
"Attempted to set RtpParameters "
|
||||
"num_temporal_layers to an invalid number.");
|
||||
}
|
||||
}
|
||||
if (i > 0 && (rtp_parameters.encodings[i].num_temporal_layers !=
|
||||
rtp_parameters.encodings[i - 1].num_temporal_layers)) {
|
||||
LOG_AND_RETURN_ERROR(
|
||||
RTCErrorType::INVALID_MODIFICATION,
|
||||
"Attempted to set RtpParameters num_temporal_layers "
|
||||
"at encoding layer i: " +
|
||||
rtc::ToString(i) +
|
||||
" to a different value than other encoding layers.");
|
||||
}
|
||||
}
|
||||
return webrtc::RTCError::OK();
|
||||
}
|
||||
|
||||
@ -146,6 +146,11 @@ int GetMaxFramerate(const webrtc::VideoEncoderConfig& encoder_config,
|
||||
return max_fps;
|
||||
}
|
||||
|
||||
bool IsTemporalLayersSupported(const std::string& codec_name) {
|
||||
return CodecNamesEq(codec_name, kVp8CodecName) ||
|
||||
CodecNamesEq(codec_name, kVp9CodecName);
|
||||
}
|
||||
|
||||
static std::string CodecVectorToString(const std::vector<VideoCodec>& codecs) {
|
||||
rtc::StringBuilder out;
|
||||
out << "{";
|
||||
@ -1735,7 +1740,9 @@ webrtc::RTCError WebRtcVideoChannel::WebRtcVideoSendStream::SetRtpParameters(
|
||||
(new_parameters.encodings[i].max_bitrate_bps !=
|
||||
rtp_parameters_.encodings[i].max_bitrate_bps) ||
|
||||
(new_parameters.encodings[i].max_framerate !=
|
||||
rtp_parameters_.encodings[i].max_framerate)) {
|
||||
rtp_parameters_.encodings[i].max_framerate) ||
|
||||
(new_parameters.encodings[i].num_temporal_layers !=
|
||||
rtp_parameters_.encodings[i].num_temporal_layers)) {
|
||||
new_param = true;
|
||||
break;
|
||||
}
|
||||
@ -1894,6 +1901,10 @@ WebRtcVideoChannel::WebRtcVideoSendStream::CreateVideoEncoderConfig(
|
||||
encoder_config.simulcast_layers[i].max_framerate =
|
||||
*rtp_parameters_.encodings[i].max_framerate;
|
||||
}
|
||||
if (rtp_parameters_.encodings[i].num_temporal_layers) {
|
||||
encoder_config.simulcast_layers[i].num_temporal_layers =
|
||||
*rtp_parameters_.encodings[i].num_temporal_layers;
|
||||
}
|
||||
}
|
||||
|
||||
int max_qp = kDefaultQpMax;
|
||||
@ -2606,6 +2617,12 @@ std::vector<webrtc::VideoStream> EncoderStreamFactory::CreateEncoderStreams(
|
||||
if (!is_screenshare_) {
|
||||
// Update simulcast framerates with max configured max framerate.
|
||||
layers[i].max_framerate = max_framerate;
|
||||
// Update with configured num temporal layers if supported by codec.
|
||||
if (encoder_config.simulcast_layers[i].num_temporal_layers &&
|
||||
IsTemporalLayersSupported(codec_name_)) {
|
||||
layers[i].num_temporal_layers =
|
||||
*encoder_config.simulcast_layers[i].num_temporal_layers;
|
||||
}
|
||||
}
|
||||
// Update simulcast bitrates with configured min and max bitrate.
|
||||
if (encoder_config.simulcast_layers[i].min_bitrate_bps > 0) {
|
||||
@ -2688,6 +2705,14 @@ std::vector<webrtc::VideoStream> EncoderStreamFactory::CreateEncoderStreams(
|
||||
layer.num_temporal_layers = vp9_settings.numberOfTemporalLayers;
|
||||
}
|
||||
|
||||
if (!is_screenshare_ && IsTemporalLayersSupported(codec_name_)) {
|
||||
// Use configured number of temporal layers if set.
|
||||
if (encoder_config.simulcast_layers[0].num_temporal_layers) {
|
||||
layer.num_temporal_layers =
|
||||
*encoder_config.simulcast_layers[0].num_temporal_layers;
|
||||
}
|
||||
}
|
||||
|
||||
layers.push_back(layer);
|
||||
return layers;
|
||||
}
|
||||
|
||||
@ -18,6 +18,7 @@
|
||||
#include "api/test/mock_video_decoder_factory.h"
|
||||
#include "api/test/mock_video_encoder_factory.h"
|
||||
#include "api/units/time_delta.h"
|
||||
#include "api/video/video_bitrate_allocation.h"
|
||||
#include "api/video_codecs/builtin_video_decoder_factory.h"
|
||||
#include "api/video_codecs/builtin_video_encoder_factory.h"
|
||||
#include "api/video_codecs/sdp_video_format.h"
|
||||
@ -5460,6 +5461,30 @@ TEST_F(WebRtcVideoChannelTest, SetMaxFramerateOneStream) {
|
||||
EXPECT_EQ(kNewMaxFramerate, stream->GetVideoStreams()[0].max_framerate);
|
||||
}
|
||||
|
||||
TEST_F(WebRtcVideoChannelTest, SetNumTemporalLayersForSingleStream) {
|
||||
FakeVideoSendStream* stream = AddSendStream();
|
||||
|
||||
webrtc::RtpParameters parameters = channel_->GetRtpSendParameters(last_ssrc_);
|
||||
EXPECT_EQ(1UL, parameters.encodings.size());
|
||||
EXPECT_FALSE(parameters.encodings[0].num_temporal_layers.has_value());
|
||||
EXPECT_TRUE(channel_->SetRtpSendParameters(last_ssrc_, parameters).ok());
|
||||
|
||||
// Note that this is testing the behavior of the FakeVideoSendStream, which
|
||||
// also calls to CreateEncoderStreams to get the VideoStreams, so essentially
|
||||
// we are just testing the behavior of
|
||||
// EncoderStreamFactory::CreateEncoderStreams.
|
||||
ASSERT_EQ(1UL, stream->GetVideoStreams().size());
|
||||
EXPECT_FALSE(stream->GetVideoStreams()[0].num_temporal_layers.has_value());
|
||||
|
||||
// Set temporal layers and check that VideoStream.num_temporal_layers is set.
|
||||
parameters = channel_->GetRtpSendParameters(last_ssrc_);
|
||||
parameters.encodings[0].num_temporal_layers = 2;
|
||||
EXPECT_TRUE(channel_->SetRtpSendParameters(last_ssrc_, parameters).ok());
|
||||
|
||||
ASSERT_EQ(1UL, stream->GetVideoStreams().size());
|
||||
EXPECT_EQ(2UL, stream->GetVideoStreams()[0].num_temporal_layers);
|
||||
}
|
||||
|
||||
TEST_F(WebRtcVideoChannelTest,
|
||||
CannotSetRtpSendParametersWithIncorrectNumberOfEncodings) {
|
||||
AddSendStream();
|
||||
@ -5637,6 +5662,158 @@ TEST_F(WebRtcVideoChannelTest, GetAndSetRtpSendParametersMaxFramerate) {
|
||||
EXPECT_EQ(25, parameters.encodings[2].max_framerate);
|
||||
}
|
||||
|
||||
TEST_F(WebRtcVideoChannelTest,
|
||||
SetRtpSendParametersNumTemporalLayersFailsForInvalidRange) {
|
||||
const size_t kNumSimulcastStreams = 3;
|
||||
SetUpSimulcast(true, false);
|
||||
|
||||
// Get and set the rtp encoding parameters.
|
||||
webrtc::RtpParameters parameters = channel_->GetRtpSendParameters(last_ssrc_);
|
||||
EXPECT_EQ(kNumSimulcastStreams, parameters.encodings.size());
|
||||
|
||||
// Num temporal layers should be in the range [1, kMaxTemporalStreams].
|
||||
parameters.encodings[0].num_temporal_layers = 0;
|
||||
EXPECT_EQ(webrtc::RTCErrorType::INVALID_RANGE,
|
||||
channel_->SetRtpSendParameters(last_ssrc_, parameters).type());
|
||||
parameters.encodings[0].num_temporal_layers = webrtc::kMaxTemporalStreams + 1;
|
||||
EXPECT_EQ(webrtc::RTCErrorType::INVALID_RANGE,
|
||||
channel_->SetRtpSendParameters(last_ssrc_, parameters).type());
|
||||
}
|
||||
|
||||
TEST_F(WebRtcVideoChannelTest,
|
||||
SetRtpSendParametersNumTemporalLayersFailsForInvalidModification) {
|
||||
const size_t kNumSimulcastStreams = 3;
|
||||
SetUpSimulcast(true, false);
|
||||
|
||||
// Get and set the rtp encoding parameters.
|
||||
webrtc::RtpParameters parameters = channel_->GetRtpSendParameters(last_ssrc_);
|
||||
EXPECT_EQ(kNumSimulcastStreams, parameters.encodings.size());
|
||||
|
||||
// No/all layers should be set.
|
||||
parameters.encodings[0].num_temporal_layers = 1;
|
||||
EXPECT_EQ(webrtc::RTCErrorType::INVALID_MODIFICATION,
|
||||
channel_->SetRtpSendParameters(last_ssrc_, parameters).type());
|
||||
|
||||
// Different values not supported.
|
||||
parameters.encodings[0].num_temporal_layers = 1;
|
||||
parameters.encodings[1].num_temporal_layers = 2;
|
||||
parameters.encodings[2].num_temporal_layers = 2;
|
||||
EXPECT_EQ(webrtc::RTCErrorType::INVALID_MODIFICATION,
|
||||
channel_->SetRtpSendParameters(last_ssrc_, parameters).type());
|
||||
}
|
||||
|
||||
TEST_F(WebRtcVideoChannelTest, GetAndSetRtpSendParametersNumTemporalLayers) {
|
||||
const size_t kNumSimulcastStreams = 3;
|
||||
SetUpSimulcast(true, false);
|
||||
|
||||
// Get and set the rtp encoding parameters.
|
||||
webrtc::RtpParameters parameters = channel_->GetRtpSendParameters(last_ssrc_);
|
||||
EXPECT_EQ(kNumSimulcastStreams, parameters.encodings.size());
|
||||
for (const auto& encoding : parameters.encodings)
|
||||
EXPECT_FALSE(encoding.num_temporal_layers);
|
||||
|
||||
// Change the value and set it on the VideoChannel.
|
||||
parameters.encodings[0].num_temporal_layers = 3;
|
||||
parameters.encodings[1].num_temporal_layers = 3;
|
||||
parameters.encodings[2].num_temporal_layers = 3;
|
||||
EXPECT_TRUE(channel_->SetRtpSendParameters(last_ssrc_, parameters).ok());
|
||||
|
||||
// Verify that the number of temporal layers are set on the VideoChannel.
|
||||
parameters = channel_->GetRtpSendParameters(last_ssrc_);
|
||||
EXPECT_EQ(kNumSimulcastStreams, parameters.encodings.size());
|
||||
EXPECT_EQ(3, parameters.encodings[0].num_temporal_layers);
|
||||
EXPECT_EQ(3, parameters.encodings[1].num_temporal_layers);
|
||||
EXPECT_EQ(3, parameters.encodings[2].num_temporal_layers);
|
||||
}
|
||||
|
||||
TEST_F(WebRtcVideoChannelTest, NumTemporalLayersPropagatedToEncoder) {
|
||||
const size_t kNumSimulcastStreams = 3;
|
||||
FakeVideoSendStream* stream = SetUpSimulcast(true, false);
|
||||
|
||||
// Send a full size frame so all simulcast layers are used when reconfiguring.
|
||||
FakeVideoCapturerWithTaskQueue capturer;
|
||||
VideoOptions options;
|
||||
EXPECT_TRUE(channel_->SetVideoSend(last_ssrc_, &options, &capturer));
|
||||
EXPECT_EQ(cricket::CS_RUNNING,
|
||||
capturer.Start(capturer.GetSupportedFormats()->front()));
|
||||
channel_->SetSend(true);
|
||||
EXPECT_TRUE(capturer.CaptureFrame());
|
||||
|
||||
// Get and set the rtp encoding parameters.
|
||||
// Change the value and set it on the VideoChannel.
|
||||
webrtc::RtpParameters parameters = channel_->GetRtpSendParameters(last_ssrc_);
|
||||
EXPECT_EQ(kNumSimulcastStreams, parameters.encodings.size());
|
||||
parameters.encodings[0].num_temporal_layers = 2;
|
||||
parameters.encodings[1].num_temporal_layers = 2;
|
||||
parameters.encodings[2].num_temporal_layers = 2;
|
||||
EXPECT_TRUE(channel_->SetRtpSendParameters(last_ssrc_, parameters).ok());
|
||||
|
||||
// Verify that the new value is propagated down to the encoder.
|
||||
// Check that WebRtcVideoSendStream updates VideoEncoderConfig correctly.
|
||||
EXPECT_EQ(2, stream->num_encoder_reconfigurations());
|
||||
webrtc::VideoEncoderConfig encoder_config = stream->GetEncoderConfig().Copy();
|
||||
EXPECT_EQ(kNumSimulcastStreams, encoder_config.number_of_streams);
|
||||
EXPECT_EQ(kNumSimulcastStreams, encoder_config.simulcast_layers.size());
|
||||
EXPECT_EQ(2UL, encoder_config.simulcast_layers[0].num_temporal_layers);
|
||||
EXPECT_EQ(2UL, encoder_config.simulcast_layers[1].num_temporal_layers);
|
||||
EXPECT_EQ(2UL, encoder_config.simulcast_layers[2].num_temporal_layers);
|
||||
|
||||
// FakeVideoSendStream calls CreateEncoderStreams, test that the vector of
|
||||
// VideoStreams are created appropriately for the simulcast case.
|
||||
EXPECT_EQ(kNumSimulcastStreams, stream->GetVideoStreams().size());
|
||||
EXPECT_EQ(2UL, stream->GetVideoStreams()[0].num_temporal_layers);
|
||||
EXPECT_EQ(2UL, stream->GetVideoStreams()[1].num_temporal_layers);
|
||||
EXPECT_EQ(2UL, stream->GetVideoStreams()[2].num_temporal_layers);
|
||||
|
||||
// No parameter changed, encoder should not be reconfigured.
|
||||
EXPECT_TRUE(channel_->SetRtpSendParameters(last_ssrc_, parameters).ok());
|
||||
EXPECT_EQ(2, stream->num_encoder_reconfigurations());
|
||||
|
||||
EXPECT_TRUE(channel_->SetVideoSend(last_ssrc_, nullptr, nullptr));
|
||||
}
|
||||
|
||||
TEST_F(WebRtcVideoChannelTest,
|
||||
DefaultValuePropagatedToEncoderForUnsetNumTemporalLayers) {
|
||||
const size_t kDefaultNumTemporalLayers = 3;
|
||||
const size_t kNumSimulcastStreams = 3;
|
||||
FakeVideoSendStream* stream = SetUpSimulcast(true, false);
|
||||
|
||||
// Send a full size frame so all simulcast layers are used when reconfiguring.
|
||||
FakeVideoCapturerWithTaskQueue capturer;
|
||||
VideoOptions options;
|
||||
EXPECT_TRUE(channel_->SetVideoSend(last_ssrc_, &options, &capturer));
|
||||
EXPECT_EQ(cricket::CS_RUNNING,
|
||||
capturer.Start(capturer.GetSupportedFormats()->front()));
|
||||
channel_->SetSend(true);
|
||||
EXPECT_TRUE(capturer.CaptureFrame());
|
||||
|
||||
// Change rtp encoding parameters, num_temporal_layers not changed.
|
||||
webrtc::RtpParameters parameters = channel_->GetRtpSendParameters(last_ssrc_);
|
||||
EXPECT_EQ(kNumSimulcastStreams, parameters.encodings.size());
|
||||
parameters.encodings[0].min_bitrate_bps = 33000;
|
||||
EXPECT_TRUE(channel_->SetRtpSendParameters(last_ssrc_, parameters).ok());
|
||||
|
||||
// Verify that no value is propagated down to the encoder.
|
||||
webrtc::VideoEncoderConfig encoder_config = stream->GetEncoderConfig().Copy();
|
||||
EXPECT_EQ(kNumSimulcastStreams, encoder_config.number_of_streams);
|
||||
EXPECT_EQ(kNumSimulcastStreams, encoder_config.simulcast_layers.size());
|
||||
EXPECT_FALSE(encoder_config.simulcast_layers[0].num_temporal_layers);
|
||||
EXPECT_FALSE(encoder_config.simulcast_layers[1].num_temporal_layers);
|
||||
EXPECT_FALSE(encoder_config.simulcast_layers[2].num_temporal_layers);
|
||||
|
||||
// FakeVideoSendStream calls CreateEncoderStreams, test that the vector of
|
||||
// VideoStreams are created appropriately for the simulcast case.
|
||||
EXPECT_EQ(kNumSimulcastStreams, stream->GetVideoStreams().size());
|
||||
EXPECT_EQ(kDefaultNumTemporalLayers,
|
||||
stream->GetVideoStreams()[0].num_temporal_layers);
|
||||
EXPECT_EQ(kDefaultNumTemporalLayers,
|
||||
stream->GetVideoStreams()[1].num_temporal_layers);
|
||||
EXPECT_EQ(kDefaultNumTemporalLayers,
|
||||
stream->GetVideoStreams()[2].num_temporal_layers);
|
||||
|
||||
EXPECT_TRUE(channel_->SetVideoSend(last_ssrc_, nullptr, nullptr));
|
||||
}
|
||||
|
||||
TEST_F(WebRtcVideoChannelTest, MaxSimulcastFrameratePropagatedToEncoder) {
|
||||
const size_t kNumSimulcastStreams = 3;
|
||||
FakeVideoSendStream* stream = SetUpSimulcast(true, false);
|
||||
|
||||
@ -38,17 +38,20 @@ public class RtpParameters {
|
||||
@Nullable public Integer minBitrateBps;
|
||||
// The max framerate in fps for video.
|
||||
@Nullable public Integer maxFramerate;
|
||||
// The number of temporal layers for video.
|
||||
@Nullable public Integer numTemporalLayers;
|
||||
// SSRC to be used by this encoding.
|
||||
// Can't be changed between getParameters/setParameters.
|
||||
public Long ssrc;
|
||||
|
||||
@CalledByNative("Encoding")
|
||||
Encoding(boolean active, Integer maxBitrateBps, Integer minBitrateBps, Integer maxFramerate,
|
||||
Long ssrc) {
|
||||
Integer numTemporalLayers, Long ssrc) {
|
||||
this.active = active;
|
||||
this.maxBitrateBps = maxBitrateBps;
|
||||
this.minBitrateBps = minBitrateBps;
|
||||
this.maxFramerate = maxFramerate;
|
||||
this.numTemporalLayers = numTemporalLayers;
|
||||
this.ssrc = ssrc;
|
||||
}
|
||||
|
||||
@ -75,6 +78,12 @@ public class RtpParameters {
|
||||
return maxFramerate;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
@CalledByNative("Encoding")
|
||||
Integer getNumTemporalLayers() {
|
||||
return numTemporalLayers;
|
||||
}
|
||||
|
||||
@CalledByNative("Encoding")
|
||||
Long getSsrc() {
|
||||
return ssrc;
|
||||
|
||||
@ -855,10 +855,12 @@ public class PeerConnectionTest {
|
||||
assertNull(rtpParameters.encodings.get(0).maxBitrateBps);
|
||||
assertNull(rtpParameters.encodings.get(0).minBitrateBps);
|
||||
assertNull(rtpParameters.encodings.get(0).maxFramerate);
|
||||
assertNull(rtpParameters.encodings.get(0).numTemporalLayers);
|
||||
|
||||
rtpParameters.encodings.get(0).maxBitrateBps = 300000;
|
||||
rtpParameters.encodings.get(0).minBitrateBps = 100000;
|
||||
rtpParameters.encodings.get(0).maxFramerate = 20;
|
||||
rtpParameters.encodings.get(0).numTemporalLayers = 2;
|
||||
assertTrue(videoSender.setParameters(rtpParameters));
|
||||
|
||||
// Create a DTMF sender.
|
||||
@ -872,6 +874,7 @@ public class PeerConnectionTest {
|
||||
assertEquals(300000, (int) rtpParameters.encodings.get(0).maxBitrateBps);
|
||||
assertEquals(100000, (int) rtpParameters.encodings.get(0).minBitrateBps);
|
||||
assertEquals(20, (int) rtpParameters.encodings.get(0).maxFramerate);
|
||||
assertEquals(2, (int) rtpParameters.encodings.get(0).numTemporalLayers);
|
||||
|
||||
// Test send & receive UTF-8 text.
|
||||
answeringExpectations.expectMessage(
|
||||
|
||||
@ -27,6 +27,7 @@ ScopedJavaLocalRef<jobject> NativeToJavaRtpEncodingParameter(
|
||||
env, encoding.active, NativeToJavaInteger(env, encoding.max_bitrate_bps),
|
||||
NativeToJavaInteger(env, encoding.min_bitrate_bps),
|
||||
NativeToJavaInteger(env, encoding.max_framerate),
|
||||
NativeToJavaInteger(env, encoding.num_temporal_layers),
|
||||
encoding.ssrc ? NativeToJavaLong(env, *encoding.ssrc) : nullptr);
|
||||
}
|
||||
|
||||
@ -72,6 +73,10 @@ RtpEncodingParameters JavaToNativeRtpEncodingParameters(
|
||||
ScopedJavaLocalRef<jobject> j_max_framerate =
|
||||
Java_Encoding_getMaxFramerate(jni, j_encoding_parameters);
|
||||
encoding.max_framerate = JavaToNativeOptionalInt(jni, j_max_framerate);
|
||||
ScopedJavaLocalRef<jobject> j_num_temporal_layers =
|
||||
Java_Encoding_getNumTemporalLayers(jni, j_encoding_parameters);
|
||||
encoding.num_temporal_layers =
|
||||
JavaToNativeOptionalInt(jni, j_num_temporal_layers);
|
||||
ScopedJavaLocalRef<jobject> j_ssrc =
|
||||
Java_Encoding_getSsrc(jni, j_encoding_parameters);
|
||||
if (!IsNull(jni, j_ssrc))
|
||||
|
||||
Reference in New Issue
Block a user