Add rotation to EncodedImage and make sure it is passed through encoders.
This fix a potential race where the rotation information of a sent frame does not match the encoded frame. BUG=webrtc:5783 TEST= Run ApprtcDemo on IOs and Android with and without capture to texture and both VP8 and H264. R=magjed@webrtc.org, pbos@webrtc.org, tkchin@webrtc.org TBR=tkchin_webrtc // For IOS changes. Review URL: https://codereview.webrtc.org/1886113003 . Cr-Commit-Position: refs/heads/master@{#12426}
This commit is contained in:
@ -393,6 +393,7 @@ int32_t H264EncoderImpl::Encode(
|
||||
encoded_image_._timeStamp = frame.timestamp();
|
||||
encoded_image_.ntp_time_ms_ = frame.ntp_time_ms();
|
||||
encoded_image_.capture_time_ms_ = frame.render_time_ms();
|
||||
encoded_image_.rotation_ = frame.rotation();
|
||||
encoded_image_._frameType = EVideoFrameType_to_FrameType(info.eFrameType);
|
||||
|
||||
// Split encoded image up into fragments. This also updates |encoded_image_|.
|
||||
|
||||
@ -118,8 +118,14 @@ struct FrameEncodeParams {
|
||||
int32_t w,
|
||||
int32_t h,
|
||||
int64_t rtms,
|
||||
uint32_t ts)
|
||||
: encoder(e), width(w), height(h), render_time_ms(rtms), timestamp(ts) {
|
||||
uint32_t ts,
|
||||
webrtc::VideoRotation r)
|
||||
: encoder(e),
|
||||
width(w),
|
||||
height(h),
|
||||
render_time_ms(rtms),
|
||||
timestamp(ts),
|
||||
rotation(r) {
|
||||
if (csi) {
|
||||
codec_specific_info = *csi;
|
||||
} else {
|
||||
@ -133,6 +139,7 @@ struct FrameEncodeParams {
|
||||
int32_t height;
|
||||
int64_t render_time_ms;
|
||||
uint32_t timestamp;
|
||||
webrtc::VideoRotation rotation;
|
||||
};
|
||||
|
||||
// We receive I420Frames as input, but we need to feed CVPixelBuffers into the
|
||||
@ -185,7 +192,8 @@ void VTCompressionOutputCallback(void* encoder,
|
||||
encode_params->encoder->OnEncodedFrame(
|
||||
status, info_flags, sample_buffer, encode_params->codec_specific_info,
|
||||
encode_params->width, encode_params->height,
|
||||
encode_params->render_time_ms, encode_params->timestamp);
|
||||
encode_params->render_time_ms, encode_params->timestamp,
|
||||
encode_params->rotation);
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
@ -306,7 +314,7 @@ int H264VideoToolboxEncoder::Encode(
|
||||
std::unique_ptr<internal::FrameEncodeParams> encode_params;
|
||||
encode_params.reset(new internal::FrameEncodeParams(
|
||||
this, codec_specific_info, width_, height_, input_image.render_time_ms(),
|
||||
input_image.timestamp()));
|
||||
input_image.timestamp(), input_image.rotation()));
|
||||
|
||||
// Update the bitrate if needed.
|
||||
SetBitrateBps(bitrate_adjuster_.GetAdjustedBitrateBps());
|
||||
@ -471,7 +479,8 @@ void H264VideoToolboxEncoder::OnEncodedFrame(
|
||||
int32_t width,
|
||||
int32_t height,
|
||||
int64_t render_time_ms,
|
||||
uint32_t timestamp) {
|
||||
uint32_t timestamp,
|
||||
VideoRotation rotation) {
|
||||
if (status != noErr) {
|
||||
LOG(LS_ERROR) << "H264 encode failed.";
|
||||
return;
|
||||
@ -511,6 +520,7 @@ void H264VideoToolboxEncoder::OnEncodedFrame(
|
||||
is_keyframe ? webrtc::kVideoFrameKey : webrtc::kVideoFrameDelta;
|
||||
frame.capture_time_ms_ = render_time_ms;
|
||||
frame._timeStamp = timestamp;
|
||||
frame.rotation_ = rotation;
|
||||
|
||||
int result = callback_->Encoded(frame, &codec_specific_info, header.get());
|
||||
if (result != 0) {
|
||||
|
||||
@ -12,6 +12,7 @@
|
||||
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_H264_VIDEO_TOOLBOX_ENCODER_H_
|
||||
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_H264_VIDEO_TOOLBOX_ENCODER_H_
|
||||
|
||||
#include "webrtc/common_video/rotation.h"
|
||||
#include "webrtc/modules/video_coding/codecs/h264/include/h264.h"
|
||||
#include "webrtc/modules/video_coding/include/bitrate_adjuster.h"
|
||||
|
||||
@ -58,7 +59,8 @@ class H264VideoToolboxEncoder : public H264Encoder {
|
||||
int32_t width,
|
||||
int32_t height,
|
||||
int64_t render_time_ms,
|
||||
uint32_t timestamp);
|
||||
uint32_t timestamp,
|
||||
VideoRotation rotation);
|
||||
|
||||
private:
|
||||
int ResetCompressionSession();
|
||||
|
||||
@ -1024,6 +1024,7 @@ int VP8EncoderImpl::GetEncodedPartitions(const VideoFrame& input_image,
|
||||
encoded_images_[encoder_idx]._timeStamp = input_image.timestamp();
|
||||
encoded_images_[encoder_idx].capture_time_ms_ =
|
||||
input_image.render_time_ms();
|
||||
encoded_images_[encoder_idx].rotation_ = input_image.rotation();
|
||||
|
||||
int qp = -1;
|
||||
vpx_codec_control(&encoders_[encoder_idx], VP8E_GET_LAST_QUANTIZER_64, &qp);
|
||||
|
||||
@ -692,6 +692,7 @@ int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
|
||||
TRACE_COUNTER1("webrtc", "EncodedFrameSize", encoded_image_._length);
|
||||
encoded_image_._timeStamp = input_image_->timestamp();
|
||||
encoded_image_.capture_time_ms_ = input_image_->render_time_ms();
|
||||
encoded_image_.rotation_ = input_image_->rotation();
|
||||
encoded_image_._encodedHeight = raw_->d_h;
|
||||
encoded_image_._encodedWidth = raw_->d_w;
|
||||
int qp = -1;
|
||||
|
||||
@ -101,7 +101,6 @@ VCMGenericEncoder::VCMGenericEncoder(
|
||||
vcm_encoded_frame_callback_(encoded_frame_callback),
|
||||
internal_source_(internal_source),
|
||||
encoder_params_({0, 0, 0, 0}),
|
||||
rotation_(kVideoRotation_0),
|
||||
is_screenshare_(false) {}
|
||||
|
||||
VCMGenericEncoder::~VCMGenericEncoder() {}
|
||||
@ -141,15 +140,6 @@ int32_t VCMGenericEncoder::Encode(const VideoFrame& frame,
|
||||
for (FrameType frame_type : frame_types)
|
||||
RTC_DCHECK(frame_type == kVideoFrameKey || frame_type == kVideoFrameDelta);
|
||||
|
||||
rotation_ = frame.rotation();
|
||||
|
||||
// Keep track of the current frame rotation and apply to the output of the
|
||||
// encoder. There might not be exact as the encoder could have one frame delay
|
||||
// but it should be close enough.
|
||||
// TODO(pbos): Map from timestamp, this is racy (even if rotation_ is locked
|
||||
// properly, which it isn't). More than one frame may be in the pipeline.
|
||||
vcm_encoded_frame_callback_->SetRotation(rotation_);
|
||||
|
||||
int32_t result = encoder_->Encode(frame, codec_specific, &frame_types);
|
||||
|
||||
if (vcm_encoded_frame_callback_) {
|
||||
@ -228,7 +218,6 @@ VCMEncodedFrameCallback::VCMEncodedFrameCallback(
|
||||
media_opt_(nullptr),
|
||||
payload_type_(0),
|
||||
internal_source_(false),
|
||||
rotation_(kVideoRotation_0),
|
||||
post_encode_callback_(post_encode_callback) {}
|
||||
|
||||
VCMEncodedFrameCallback::~VCMEncodedFrameCallback() {}
|
||||
@ -254,7 +243,7 @@ int32_t VCMEncodedFrameCallback::Encoded(
|
||||
memset(&rtp_video_header, 0, sizeof(RTPVideoHeader));
|
||||
if (codec_specific)
|
||||
CopyCodecSpecific(codec_specific, &rtp_video_header);
|
||||
rtp_video_header.rotation = rotation_;
|
||||
rtp_video_header.rotation = encoded_image.rotation_;
|
||||
|
||||
int32_t ret_val = send_callback_->SendData(
|
||||
payload_type_, encoded_image, fragmentation_header, &rtp_video_header);
|
||||
|
||||
@ -48,7 +48,6 @@ class VCMEncodedFrameCallback : public EncodedImageCallback {
|
||||
void SetInternalSource(bool internal_source) {
|
||||
internal_source_ = internal_source;
|
||||
}
|
||||
void SetRotation(VideoRotation rotation) { rotation_ = rotation; }
|
||||
void SignalLastEncoderImplementationUsed(
|
||||
const char* encoder_implementation_name);
|
||||
|
||||
@ -57,7 +56,6 @@ class VCMEncodedFrameCallback : public EncodedImageCallback {
|
||||
media_optimization::MediaOptimization* media_opt_;
|
||||
uint8_t payload_type_;
|
||||
bool internal_source_;
|
||||
VideoRotation rotation_;
|
||||
|
||||
EncodedImageCallback* post_encode_callback_;
|
||||
};
|
||||
@ -96,7 +94,6 @@ class VCMGenericEncoder {
|
||||
const bool internal_source_;
|
||||
rtc::CriticalSection params_lock_;
|
||||
EncoderParameters encoder_params_ GUARDED_BY(params_lock_);
|
||||
VideoRotation rotation_;
|
||||
bool is_screenshare_;
|
||||
};
|
||||
|
||||
|
||||
@ -144,9 +144,15 @@ const VideoFrame& QualityScaler::GetScaledFrame(const VideoFrame& frame) {
|
||||
if (scaler_.Scale(frame, &scaled_frame_) != 0)
|
||||
return frame;
|
||||
|
||||
// TODO(perkj): Refactor the scaler to not own |scaled_frame|. VideoFrame are
|
||||
// just thin wrappers so instead the scaler should return a
|
||||
// rtc::scoped_refptr<VideoFrameBuffer> and a new VideoFrame be created with
|
||||
// the meta data from |frame|. That way we would not have to set all these
|
||||
// meta data.
|
||||
scaled_frame_.set_ntp_time_ms(frame.ntp_time_ms());
|
||||
scaled_frame_.set_timestamp(frame.timestamp());
|
||||
scaled_frame_.set_render_time_ms(frame.render_time_ms());
|
||||
scaled_frame_.set_rotation(frame.rotation());
|
||||
|
||||
return scaled_frame_;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user