Remove kSkipFrame usage.
Since padding is no longer sent on Encoded() callbacks, dummy callbacks aren't required to generate padding. This skip-frame behavior can then be removed to get rid of dummy callbacks though nothing was encoded. As frames don't have to be generated for frames that don't have to be sent we skip encoding frames that aren't intended to be sent either, reducing CPU load. BUG= R=mflodman@webrtc.org, stefan@webrtc.org Review URL: https://codereview.webrtc.org/1369923005 . Cr-Commit-Position: refs/heads/master@{#10181}
This commit is contained in:
@ -262,6 +262,10 @@ int SimulcastEncoderAdapter::Encode(
|
||||
int src_width = input_image.width();
|
||||
int src_height = input_image.height();
|
||||
for (size_t stream_idx = 0; stream_idx < streaminfos_.size(); ++stream_idx) {
|
||||
// Don't encode frames in resolutions that we don't intend to send.
|
||||
if (!streaminfos_[stream_idx].send_stream)
|
||||
continue;
|
||||
|
||||
std::vector<VideoFrameType> stream_frame_types;
|
||||
if (send_key_frame) {
|
||||
stream_frame_types.push_back(kKeyFrame);
|
||||
@ -390,23 +394,8 @@ int32_t SimulcastEncoderAdapter::Encoded(
|
||||
CodecSpecificInfoVP8* vp8Info = &(stream_codec_specific.codecSpecific.VP8);
|
||||
vp8Info->simulcastIdx = stream_idx;
|
||||
|
||||
if (streaminfos_[stream_idx].send_stream) {
|
||||
return encoded_complete_callback_->Encoded(encodedImage,
|
||||
&stream_codec_specific,
|
||||
fragmentation);
|
||||
} else {
|
||||
EncodedImage dummy_image;
|
||||
// Required in case padding is applied to dropped frames.
|
||||
dummy_image._timeStamp = encodedImage._timeStamp;
|
||||
dummy_image.capture_time_ms_ = encodedImage.capture_time_ms_;
|
||||
dummy_image._encodedWidth = encodedImage._encodedWidth;
|
||||
dummy_image._encodedHeight = encodedImage._encodedHeight;
|
||||
dummy_image._length = 0;
|
||||
dummy_image._frameType = kSkipFrame;
|
||||
vp8Info->keyIdx = kNoKeyIdx;
|
||||
return encoded_complete_callback_->Encoded(dummy_image,
|
||||
&stream_codec_specific, NULL);
|
||||
}
|
||||
return encoded_complete_callback_->Encoded(
|
||||
encodedImage, &stream_codec_specific, fragmentation);
|
||||
}
|
||||
|
||||
uint32_t SimulcastEncoderAdapter::GetStreamBitrate(
|
||||
|
||||
@ -351,6 +351,9 @@ TEST_F(TestSimulcastEncoderAdapterFake, SetChannelParameters) {
|
||||
TEST_F(TestSimulcastEncoderAdapterFake, EncodedCallbackForDifferentEncoders) {
|
||||
SetupCodec();
|
||||
|
||||
// Set bitrates so that we send all layers.
|
||||
adapter_->SetRates(1200, 30);
|
||||
|
||||
// At this point, the simulcast encoder adapter should have 3 streams: HD,
|
||||
// quarter HD, and quarter quarter HD. We're going to mostly ignore the exact
|
||||
// resolutions, to test that the adapter forwards on the correct resolution
|
||||
|
||||
@ -367,13 +367,6 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
.Times(1)
|
||||
.WillRepeatedly(Return(0));
|
||||
}
|
||||
if (expected_video_streams < kNumberOfSimulcastStreams) {
|
||||
EXPECT_CALL(encoder_callback_, Encoded(
|
||||
AllOf(Field(&EncodedImage::_frameType, kSkipFrame),
|
||||
Field(&EncodedImage::_length, 0)), _, _))
|
||||
.Times(kNumberOfSimulcastStreams - expected_video_streams)
|
||||
.WillRepeatedly(Return(0));
|
||||
}
|
||||
}
|
||||
|
||||
void VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
||||
|
||||
@ -1018,17 +1018,6 @@ int VP8EncoderImpl::GetEncodedPartitions(const VideoFrame& input_image,
|
||||
} else if (codec_.mode == kScreensharing) {
|
||||
result = WEBRTC_VIDEO_CODEC_TARGET_BITRATE_OVERSHOOT;
|
||||
}
|
||||
} else {
|
||||
// Required in case padding is applied to dropped frames.
|
||||
encoded_images_[encoder_idx]._length = 0;
|
||||
encoded_images_[encoder_idx]._frameType = kSkipFrame;
|
||||
codec_specific.codecType = kVideoCodecVP8;
|
||||
CodecSpecificInfoVP8* vp8Info = &(codec_specific.codecSpecific.VP8);
|
||||
vp8Info->pictureId = picture_id_[stream_idx];
|
||||
vp8Info->simulcastIdx = stream_idx;
|
||||
vp8Info->keyIdx = kNoKeyIdx;
|
||||
encoded_complete_callback_->Encoded(encoded_images_[encoder_idx],
|
||||
&codec_specific, NULL);
|
||||
}
|
||||
}
|
||||
if (encoders_.size() == 1 && send_stream_[0]) {
|
||||
|
||||
@ -233,11 +233,9 @@ webrtc::FrameType VCMEncodedFrame::ConvertFrameType(VideoFrameType frameType)
|
||||
return kVideoFrameKey;
|
||||
case kDeltaFrame:
|
||||
return kVideoFrameDelta;
|
||||
case kSkipFrame:
|
||||
return kFrameEmpty;
|
||||
default:
|
||||
return kVideoFrameDelta;
|
||||
}
|
||||
// Bogus default return value.
|
||||
return kVideoFrameDelta;
|
||||
}
|
||||
|
||||
VideoFrameType VCMEncodedFrame::ConvertFrameType(webrtc::FrameType frame_type) {
|
||||
|
||||
@ -100,10 +100,8 @@ int32_t FakeEncoder::Encode(const VideoFrame& input_image,
|
||||
encoded._encodedWidth = config_.simulcastStream[i].width;
|
||||
encoded._encodedHeight = config_.simulcastStream[i].height;
|
||||
// Always encode something on the first frame.
|
||||
if (min_stream_bits > bits_available && i > 0) {
|
||||
encoded._length = 0;
|
||||
encoded._frameType = kSkipFrame;
|
||||
}
|
||||
if (min_stream_bits > bits_available && i > 0)
|
||||
continue;
|
||||
assert(callback_ != NULL);
|
||||
if (callback_->Encoded(encoded, &specifics, NULL) != 0)
|
||||
return -1;
|
||||
|
||||
@ -168,7 +168,6 @@ void SendStatisticsProxy::OnSendEncodedImage(
|
||||
stats->height = encoded_image._encodedHeight;
|
||||
update_times_[ssrc].resolution_update_ms = clock_->TimeInMilliseconds();
|
||||
|
||||
if (encoded_image._frameType != kSkipFrame)
|
||||
key_frame_counter_.Add(encoded_image._frameType == kKeyFrame);
|
||||
|
||||
// TODO(asapersson): This is incorrect if simulcast layers are encoded on
|
||||
|
||||
@ -169,9 +169,6 @@ class VideoFrame {
|
||||
enum VideoFrameType {
|
||||
kKeyFrame = 0,
|
||||
kDeltaFrame = 1,
|
||||
kGoldenFrame = 2,
|
||||
kAltRefFrame = 3,
|
||||
kSkipFrame = 4
|
||||
};
|
||||
|
||||
// TODO(pbos): Rename EncodedFrame and reformat this class' members.
|
||||
|
||||
Reference in New Issue
Block a user