Reland of Delete webrtc::VideoFrame methods buffer and stride. (patchset #1 id:1 of https://codereview.webrtc.org/1983583002/ )
Reason for revert: Should work after cl https://codereview.webrtc.org/1985693002/ is landed, which initializes the frames used by FakeWebRtcVideoCaptureModule. So intend to reland after that, with no changes. Original issue's description: > Revert of Delete webrtc::VideoFrame methods buffer and stride. (patchset #2 id:290001 of https://codereview.webrtc.org/1963413004/ ) > > Reason for revert: > Speculative revert to see if failures on the DrMemory bot are related to this cl. See e.g. here: > https://build.chromium.org/p/client.webrtc/builders/Win%20DrMemory%20Full/builds/4243 > > UNINITIALIZED READ: reading 0x04980040-0x04980060 32 byte(s) within 0x04980040-0x04980060 > # 0 CopyRow_AVX > # 1 CopyPlane > # 2 I420Copy > # 3 webrtc::ExtractBuffer > # 4 cricket::WebRtcVideoCapturer::SignalFrameCapturedOnStartThread > # 5 cricket::WebRtcVideoCapturer::OnIncomingCapturedFrame > # 6 FakeWebRtcVideoCaptureModule::SendFrame > # 7 WebRtcVideoCapturerTest_TestCaptureVcm_Test::TestBody > # 8 testing::internal::HandleSehExceptionsInMethodIfSupported<> > > Original issue's description: > > Reland of Delete webrtc::VideoFrame methods buffer and stride. (patchset #1 id:1 of https://codereview.webrtc.org/1935443002/ ) > > > > Reason for revert: > > I plan to reland this change in a week or two, after downstream users are updated. > > > > Original issue's description: > > > Revert of Delete webrtc::VideoFrame methods buffer and stride. (patchset #14 id:250001 of https://codereview.webrtc.org/1900673002/ ) > > > > > > Reason for revert: > > > Breaks chrome FYI bots. > > > > > > Original issue's description: > > > > Delete webrtc::VideoFrame methods buffer and stride. > > > > > > > > To make the HasOneRef/IsMutable hack work, also had to change the > > > > video_frame_buffer method to return a const ref to a scoped_ref_ptr, > > > > to not imply an AddRef. > > > > > > > > BUG=webrtc:5682 > > > > > > TBR=perkj@webrtc.org,magjed@webrtc.org,pbos@webrtc.org,pthatcher@webrtc.org,stefan@webrtc.org > > > # Skipping CQ checks because original CL landed less than 1 days ago. > > > NOPRESUBMIT=true > > > NOTREECHECKS=true > > > NOTRY=true > > > BUG=webrtc:5682 > > > > > > Committed: https://crrev.com/5b3c443d301f2c2f18dac5b02652c08b91ea3828 > > > Cr-Commit-Position: refs/heads/master@{#12558} > > > > TBR=perkj@webrtc.org,magjed@webrtc.org,pbos@webrtc.org,pthatcher@webrtc.org,stefan@webrtc.org > > # Not skipping CQ checks because original CL landed more than 1 days ago. > > BUG=webrtc:5682 > > > > Committed: https://crrev.com/d0dc66e0ea30c8614001e425a4ae0aa7dd56c2a7 > > Cr-Commit-Position: refs/heads/master@{#12721} > > TBR=perkj@webrtc.org,magjed@webrtc.org,pbos@webrtc.org,pthatcher@webrtc.org,stefan@webrtc.org,nisse@webrtc.org > # Skipping CQ checks because original CL landed less than 1 days ago. > NOPRESUBMIT=true > NOTREECHECKS=true > NOTRY=true > BUG=webrtc:5682 > > Committed: https://crrev.com/d49c30cd2fe442f2b5b4ecec8d5cbaa430464725 > Cr-Commit-Position: refs/heads/master@{#12745} TBR=perkj@webrtc.org,magjed@webrtc.org,pbos@webrtc.org,pthatcher@webrtc.org,stefan@webrtc.org,tommi@webrtc.org # Not skipping CQ checks because original CL landed more than 1 days ago. BUG=webrtc:5682 Review-Url: https://codereview.webrtc.org/1979193003 Cr-Commit-Position: refs/heads/master@{#12773}
This commit is contained in:
@ -129,10 +129,12 @@ int H264DecoderImpl::AVGetBuffer2(
|
||||
video_frame->set_video_frame_buffer(
|
||||
decoder->pool_.CreateBuffer(width, height));
|
||||
// DCHECK that we have a continuous buffer as is required.
|
||||
RTC_DCHECK_EQ(video_frame->buffer(kUPlane),
|
||||
video_frame->buffer(kYPlane) + video_frame->allocated_size(kYPlane));
|
||||
RTC_DCHECK_EQ(video_frame->buffer(kVPlane),
|
||||
video_frame->buffer(kUPlane) + video_frame->allocated_size(kUPlane));
|
||||
RTC_DCHECK_EQ(video_frame->video_frame_buffer()->DataU(),
|
||||
video_frame->video_frame_buffer()->DataY() +
|
||||
video_frame->allocated_size(kYPlane));
|
||||
RTC_DCHECK_EQ(video_frame->video_frame_buffer()->DataV(),
|
||||
video_frame->video_frame_buffer()->DataU() +
|
||||
video_frame->allocated_size(kUPlane));
|
||||
int total_size = video_frame->allocated_size(kYPlane) +
|
||||
video_frame->allocated_size(kUPlane) +
|
||||
video_frame->allocated_size(kVPlane);
|
||||
@ -141,12 +143,18 @@ int H264DecoderImpl::AVGetBuffer2(
|
||||
av_frame->reordered_opaque = context->reordered_opaque;
|
||||
|
||||
// Set |av_frame| members as required by FFmpeg.
|
||||
av_frame->data[kYPlaneIndex] = video_frame->buffer(kYPlane);
|
||||
av_frame->linesize[kYPlaneIndex] = video_frame->stride(kYPlane);
|
||||
av_frame->data[kUPlaneIndex] = video_frame->buffer(kUPlane);
|
||||
av_frame->linesize[kUPlaneIndex] = video_frame->stride(kUPlane);
|
||||
av_frame->data[kVPlaneIndex] = video_frame->buffer(kVPlane);
|
||||
av_frame->linesize[kVPlaneIndex] = video_frame->stride(kVPlane);
|
||||
av_frame->data[kYPlaneIndex] =
|
||||
video_frame->video_frame_buffer()->MutableDataY();
|
||||
av_frame->linesize[kYPlaneIndex] =
|
||||
video_frame->video_frame_buffer()->StrideY();
|
||||
av_frame->data[kUPlaneIndex] =
|
||||
video_frame->video_frame_buffer()->MutableDataU();
|
||||
av_frame->linesize[kUPlaneIndex] =
|
||||
video_frame->video_frame_buffer()->StrideU();
|
||||
av_frame->data[kVPlaneIndex] =
|
||||
video_frame->video_frame_buffer()->MutableDataV();
|
||||
av_frame->linesize[kVPlaneIndex] =
|
||||
video_frame->video_frame_buffer()->StrideV();
|
||||
RTC_DCHECK_EQ(av_frame->extended_data, av_frame->data);
|
||||
|
||||
av_frame->buf[0] = av_buffer_create(av_frame->data[kYPlaneIndex],
|
||||
@ -339,9 +347,12 @@ int32_t H264DecoderImpl::Decode(const EncodedImage& input_image,
|
||||
VideoFrame* video_frame = static_cast<VideoFrame*>(
|
||||
av_buffer_get_opaque(av_frame_->buf[0]));
|
||||
RTC_DCHECK(video_frame);
|
||||
RTC_CHECK_EQ(av_frame_->data[kYPlane], video_frame->buffer(kYPlane));
|
||||
RTC_CHECK_EQ(av_frame_->data[kUPlane], video_frame->buffer(kUPlane));
|
||||
RTC_CHECK_EQ(av_frame_->data[kVPlane], video_frame->buffer(kVPlane));
|
||||
RTC_CHECK_EQ(av_frame_->data[kYPlane],
|
||||
video_frame->video_frame_buffer()->DataY());
|
||||
RTC_CHECK_EQ(av_frame_->data[kUPlane],
|
||||
video_frame->video_frame_buffer()->DataU());
|
||||
RTC_CHECK_EQ(av_frame_->data[kVPlane],
|
||||
video_frame->video_frame_buffer()->DataV());
|
||||
video_frame->set_timestamp(input_image._timeStamp);
|
||||
|
||||
// The decoded image may be larger than what is supposed to be visible, see
|
||||
@ -352,9 +363,9 @@ int32_t H264DecoderImpl::Decode(const EncodedImage& input_image,
|
||||
video_frame->set_video_frame_buffer(
|
||||
new rtc::RefCountedObject<WrappedI420Buffer>(
|
||||
av_frame_->width, av_frame_->height,
|
||||
buf->data(kYPlane), buf->stride(kYPlane),
|
||||
buf->data(kUPlane), buf->stride(kUPlane),
|
||||
buf->data(kVPlane), buf->stride(kVPlane),
|
||||
buf->DataY(), buf->StrideY(),
|
||||
buf->DataU(), buf->StrideU(),
|
||||
buf->DataV(), buf->StrideV(),
|
||||
rtc::KeepRefUntilDone(buf)));
|
||||
}
|
||||
|
||||
|
||||
@ -367,12 +367,12 @@ int32_t H264EncoderImpl::Encode(
|
||||
picture.iPicHeight = frame.height();
|
||||
picture.iColorFormat = EVideoFormatType::videoFormatI420;
|
||||
picture.uiTimeStamp = frame.ntp_time_ms();
|
||||
picture.iStride[0] = frame.stride(kYPlane);
|
||||
picture.iStride[1] = frame.stride(kUPlane);
|
||||
picture.iStride[2] = frame.stride(kVPlane);
|
||||
picture.pData[0] = const_cast<uint8_t*>(frame.buffer(kYPlane));
|
||||
picture.pData[1] = const_cast<uint8_t*>(frame.buffer(kUPlane));
|
||||
picture.pData[2] = const_cast<uint8_t*>(frame.buffer(kVPlane));
|
||||
picture.iStride[0] = frame.video_frame_buffer()->StrideY();
|
||||
picture.iStride[1] = frame.video_frame_buffer()->StrideU();
|
||||
picture.iStride[2] = frame.video_frame_buffer()->StrideV();
|
||||
picture.pData[0] = const_cast<uint8_t*>(frame.video_frame_buffer()->DataY());
|
||||
picture.pData[1] = const_cast<uint8_t*>(frame.video_frame_buffer()->DataU());
|
||||
picture.pData[2] = const_cast<uint8_t*>(frame.video_frame_buffer()->DataV());
|
||||
|
||||
// EncodeFrame output.
|
||||
SFrameBSInfo info;
|
||||
|
||||
@ -168,10 +168,14 @@ bool CopyVideoFrameToPixelBuffer(const webrtc::VideoFrame& frame,
|
||||
int dst_stride_uv = CVPixelBufferGetBytesPerRowOfPlane(pixel_buffer, 1);
|
||||
// Convert I420 to NV12.
|
||||
int ret = libyuv::I420ToNV12(
|
||||
frame.buffer(webrtc::kYPlane), frame.stride(webrtc::kYPlane),
|
||||
frame.buffer(webrtc::kUPlane), frame.stride(webrtc::kUPlane),
|
||||
frame.buffer(webrtc::kVPlane), frame.stride(webrtc::kVPlane), dst_y,
|
||||
dst_stride_y, dst_uv, dst_stride_uv, frame.width(), frame.height());
|
||||
frame.video_frame_buffer()->DataY(),
|
||||
frame.video_frame_buffer()->StrideY(),
|
||||
frame.video_frame_buffer()->DataU(),
|
||||
frame.video_frame_buffer()->StrideU(),
|
||||
frame.video_frame_buffer()->DataV(),
|
||||
frame.video_frame_buffer()->StrideV(),
|
||||
dst_y, dst_stride_y, dst_uv, dst_stride_uv,
|
||||
frame.width(), frame.height());
|
||||
CVPixelBufferUnlockBaseAddress(pixel_buffer, 0);
|
||||
if (ret) {
|
||||
LOG(LS_ERROR) << "Error converting I420 VideoFrame to NV12 :" << ret;
|
||||
|
||||
@ -301,14 +301,21 @@ int SimulcastEncoderAdapter::Encode(
|
||||
// Aligning stride values based on width.
|
||||
dst_frame.CreateEmptyFrame(dst_width, dst_height, dst_width,
|
||||
(dst_width + 1) / 2, (dst_width + 1) / 2);
|
||||
libyuv::I420Scale(
|
||||
input_image.buffer(kYPlane), input_image.stride(kYPlane),
|
||||
input_image.buffer(kUPlane), input_image.stride(kUPlane),
|
||||
input_image.buffer(kVPlane), input_image.stride(kVPlane), src_width,
|
||||
src_height, dst_frame.buffer(kYPlane), dst_frame.stride(kYPlane),
|
||||
dst_frame.buffer(kUPlane), dst_frame.stride(kUPlane),
|
||||
dst_frame.buffer(kVPlane), dst_frame.stride(kVPlane), dst_width,
|
||||
dst_height, libyuv::kFilterBilinear);
|
||||
libyuv::I420Scale(input_image.video_frame_buffer()->DataY(),
|
||||
input_image.video_frame_buffer()->StrideY(),
|
||||
input_image.video_frame_buffer()->DataU(),
|
||||
input_image.video_frame_buffer()->StrideU(),
|
||||
input_image.video_frame_buffer()->DataV(),
|
||||
input_image.video_frame_buffer()->StrideV(),
|
||||
src_width, src_height,
|
||||
dst_frame.video_frame_buffer()->MutableDataY(),
|
||||
dst_frame.video_frame_buffer()->StrideY(),
|
||||
dst_frame.video_frame_buffer()->MutableDataU(),
|
||||
dst_frame.video_frame_buffer()->StrideU(),
|
||||
dst_frame.video_frame_buffer()->MutableDataV(),
|
||||
dst_frame.video_frame_buffer()->StrideV(),
|
||||
dst_width, dst_height,
|
||||
libyuv::kFilterBilinear);
|
||||
dst_frame.set_timestamp(input_image.timestamp());
|
||||
dst_frame.set_render_time_ms(input_image.render_time_ms());
|
||||
streaminfos_[stream_idx].encoder->Encode(dst_frame, codec_specific_info,
|
||||
|
||||
@ -119,13 +119,13 @@ class Vp8TestDecodedImageCallback : public DecodedImageCallback {
|
||||
Vp8TestDecodedImageCallback() : decoded_frames_(0) {}
|
||||
int32_t Decoded(VideoFrame& decoded_image) override {
|
||||
for (int i = 0; i < decoded_image.width(); ++i) {
|
||||
EXPECT_NEAR(kColorY, decoded_image.buffer(kYPlane)[i], 1);
|
||||
EXPECT_NEAR(kColorY, decoded_image.video_frame_buffer()->DataY()[i], 1);
|
||||
}
|
||||
|
||||
// TODO(mikhal): Verify the difference between U,V and the original.
|
||||
for (int i = 0; i < ((decoded_image.width() + 1) / 2); ++i) {
|
||||
EXPECT_NEAR(kColorU, decoded_image.buffer(kUPlane)[i], 4);
|
||||
EXPECT_NEAR(kColorV, decoded_image.buffer(kVPlane)[i], 4);
|
||||
EXPECT_NEAR(kColorU, decoded_image.video_frame_buffer()->DataU()[i], 4);
|
||||
EXPECT_NEAR(kColorV, decoded_image.video_frame_buffer()->DataV()[i], 4);
|
||||
}
|
||||
decoded_frames_++;
|
||||
return 0;
|
||||
@ -222,26 +222,40 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
TestVp8Simulcast(VP8Encoder* encoder, VP8Decoder* decoder)
|
||||
: encoder_(encoder), decoder_(decoder) {}
|
||||
|
||||
// Creates an VideoFrame from |plane_colors|.
|
||||
static void CreateImage(VideoFrame* frame, int plane_colors[kNumOfPlanes]) {
|
||||
for (int plane_num = 0; plane_num < kNumOfPlanes; ++plane_num) {
|
||||
int width =
|
||||
(plane_num != kYPlane ? (frame->width() + 1) / 2 : frame->width());
|
||||
int height =
|
||||
(plane_num != kYPlane ? (frame->height() + 1) / 2 : frame->height());
|
||||
PlaneType plane_type = static_cast<PlaneType>(plane_num);
|
||||
uint8_t* data = frame->buffer(plane_type);
|
||||
static void SetPlane(uint8_t* data,
|
||||
uint8_t value,
|
||||
int width,
|
||||
int height,
|
||||
int stride) {
|
||||
for (int i = 0; i < height; i++, data += stride) {
|
||||
// Setting allocated area to zero - setting only image size to
|
||||
// requested values - will make it easier to distinguish between image
|
||||
// size and frame size (accounting for stride).
|
||||
memset(frame->buffer(plane_type), 0, frame->allocated_size(plane_type));
|
||||
for (int i = 0; i < height; i++) {
|
||||
memset(data, plane_colors[plane_num], width);
|
||||
data += frame->stride(plane_type);
|
||||
}
|
||||
memset(data, value, width);
|
||||
memset(data + width, 0, stride - width);
|
||||
}
|
||||
}
|
||||
|
||||
// Fills in an VideoFrameBuffer from |plane_colors|.
|
||||
static void CreateImage(const rtc::scoped_refptr<VideoFrameBuffer>& buffer,
|
||||
int plane_colors[kNumOfPlanes]) {
|
||||
int width = buffer->width();
|
||||
int height = buffer->height();
|
||||
int chroma_width = (width + 1) / 2;
|
||||
int chroma_height = (height + 1) / 2;
|
||||
|
||||
SetPlane(buffer->MutableDataY(), plane_colors[0],
|
||||
width, height, buffer->StrideY());
|
||||
|
||||
SetPlane(buffer->MutableDataU(), plane_colors[1],
|
||||
chroma_width, chroma_height,
|
||||
buffer->StrideU());
|
||||
|
||||
SetPlane(buffer->MutableDataV(), plane_colors[2],
|
||||
chroma_width, chroma_height,
|
||||
buffer->StrideV());
|
||||
}
|
||||
|
||||
static void DefaultSettings(VideoCodec* settings,
|
||||
const int* temporal_layer_profile) {
|
||||
assert(settings);
|
||||
@ -305,11 +319,11 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
int half_width = (kDefaultWidth + 1) / 2;
|
||||
input_frame_.CreateEmptyFrame(kDefaultWidth, kDefaultHeight, kDefaultWidth,
|
||||
half_width, half_width);
|
||||
memset(input_frame_.buffer(kYPlane), 0,
|
||||
memset(input_frame_.video_frame_buffer()->MutableDataY(), 0,
|
||||
input_frame_.allocated_size(kYPlane));
|
||||
memset(input_frame_.buffer(kUPlane), 0,
|
||||
memset(input_frame_.video_frame_buffer()->MutableDataU(), 0,
|
||||
input_frame_.allocated_size(kUPlane));
|
||||
memset(input_frame_.buffer(kVPlane), 0,
|
||||
memset(input_frame_.video_frame_buffer()->MutableDataV(), 0,
|
||||
input_frame_.allocated_size(kVPlane));
|
||||
}
|
||||
|
||||
@ -555,11 +569,11 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
int half_width = (settings_.width + 1) / 2;
|
||||
input_frame_.CreateEmptyFrame(settings_.width, settings_.height,
|
||||
settings_.width, half_width, half_width);
|
||||
memset(input_frame_.buffer(kYPlane), 0,
|
||||
memset(input_frame_.video_frame_buffer()->MutableDataY(), 0,
|
||||
input_frame_.allocated_size(kYPlane));
|
||||
memset(input_frame_.buffer(kUPlane), 0,
|
||||
memset(input_frame_.video_frame_buffer()->MutableDataU(), 0,
|
||||
input_frame_.allocated_size(kUPlane));
|
||||
memset(input_frame_.buffer(kVPlane), 0,
|
||||
memset(input_frame_.video_frame_buffer()->MutableDataV(), 0,
|
||||
input_frame_.allocated_size(kVPlane));
|
||||
|
||||
// The for loop above did not set the bitrate of the highest layer.
|
||||
@ -596,11 +610,11 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
half_width = (settings_.width + 1) / 2;
|
||||
input_frame_.CreateEmptyFrame(settings_.width, settings_.height,
|
||||
settings_.width, half_width, half_width);
|
||||
memset(input_frame_.buffer(kYPlane), 0,
|
||||
memset(input_frame_.video_frame_buffer()->MutableDataY(), 0,
|
||||
input_frame_.allocated_size(kYPlane));
|
||||
memset(input_frame_.buffer(kUPlane), 0,
|
||||
memset(input_frame_.video_frame_buffer()->MutableDataU(), 0,
|
||||
input_frame_.allocated_size(kUPlane));
|
||||
memset(input_frame_.buffer(kVPlane), 0,
|
||||
memset(input_frame_.video_frame_buffer()->MutableDataV(), 0,
|
||||
input_frame_.allocated_size(kVPlane));
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
}
|
||||
@ -691,7 +705,7 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
plane_offset[kYPlane] = kColorY;
|
||||
plane_offset[kUPlane] = kColorU;
|
||||
plane_offset[kVPlane] = kColorV;
|
||||
CreateImage(&input_frame_, plane_offset);
|
||||
CreateImage(input_frame_.video_frame_buffer(), plane_offset);
|
||||
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
|
||||
int picture_id = -1;
|
||||
@ -707,7 +721,7 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
plane_offset[kYPlane] += 1;
|
||||
plane_offset[kUPlane] += 1;
|
||||
plane_offset[kVPlane] += 1;
|
||||
CreateImage(&input_frame_, plane_offset);
|
||||
CreateImage(input_frame_.video_frame_buffer(), plane_offset);
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
|
||||
|
||||
@ -715,7 +729,7 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
plane_offset[kYPlane] += 1;
|
||||
plane_offset[kUPlane] += 1;
|
||||
plane_offset[kVPlane] += 1;
|
||||
CreateImage(&input_frame_, plane_offset);
|
||||
CreateImage(input_frame_.video_frame_buffer(), plane_offset);
|
||||
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
|
||||
@ -724,7 +738,7 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
plane_offset[kYPlane] += 1;
|
||||
plane_offset[kUPlane] += 1;
|
||||
plane_offset[kVPlane] += 1;
|
||||
CreateImage(&input_frame_, plane_offset);
|
||||
CreateImage(input_frame_.video_frame_buffer(), plane_offset);
|
||||
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
|
||||
@ -739,7 +753,7 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
plane_offset[kYPlane] = kColorY;
|
||||
plane_offset[kUPlane] = kColorU;
|
||||
plane_offset[kVPlane] = kColorV;
|
||||
CreateImage(&input_frame_, plane_offset);
|
||||
CreateImage(input_frame_.video_frame_buffer(), plane_offset);
|
||||
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, &codec_specific, NULL));
|
||||
@ -898,7 +912,7 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
plane_offset[kYPlane] = kColorY;
|
||||
plane_offset[kUPlane] = kColorU;
|
||||
plane_offset[kVPlane] = kColorV;
|
||||
CreateImage(&input_frame_, plane_offset);
|
||||
CreateImage(input_frame_.video_frame_buffer(), plane_offset);
|
||||
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
|
||||
|
||||
@ -906,7 +920,7 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
plane_offset[kYPlane] += 1;
|
||||
plane_offset[kUPlane] += 1;
|
||||
plane_offset[kVPlane] += 1;
|
||||
CreateImage(&input_frame_, plane_offset);
|
||||
CreateImage(input_frame_.video_frame_buffer(), plane_offset);
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
|
||||
|
||||
|
||||
@ -746,15 +746,18 @@ int VP8EncoderImpl::Encode(const VideoFrame& frame,
|
||||
// Image in vpx_image_t format.
|
||||
// Input image is const. VP8's raw image is not defined as const.
|
||||
raw_images_[0].planes[VPX_PLANE_Y] =
|
||||
const_cast<uint8_t*>(input_image.buffer(kYPlane));
|
||||
const_cast<uint8_t*>(input_image.video_frame_buffer()->DataY());
|
||||
raw_images_[0].planes[VPX_PLANE_U] =
|
||||
const_cast<uint8_t*>(input_image.buffer(kUPlane));
|
||||
const_cast<uint8_t*>(input_image.video_frame_buffer()->DataU());
|
||||
raw_images_[0].planes[VPX_PLANE_V] =
|
||||
const_cast<uint8_t*>(input_image.buffer(kVPlane));
|
||||
const_cast<uint8_t*>(input_image.video_frame_buffer()->DataV());
|
||||
|
||||
raw_images_[0].stride[VPX_PLANE_Y] = input_image.stride(kYPlane);
|
||||
raw_images_[0].stride[VPX_PLANE_U] = input_image.stride(kUPlane);
|
||||
raw_images_[0].stride[VPX_PLANE_V] = input_image.stride(kVPlane);
|
||||
raw_images_[0].stride[VPX_PLANE_Y] =
|
||||
input_image.video_frame_buffer()->StrideY();
|
||||
raw_images_[0].stride[VPX_PLANE_U] =
|
||||
input_image.video_frame_buffer()->StrideU();
|
||||
raw_images_[0].stride[VPX_PLANE_V] =
|
||||
input_image.video_frame_buffer()->StrideV();
|
||||
|
||||
for (size_t i = 1; i < encoders_.size(); ++i) {
|
||||
// Scale the image down a number of times by downsampling factor
|
||||
@ -1351,9 +1354,12 @@ int VP8DecoderImpl::ReturnFrame(const vpx_image_t* img,
|
||||
libyuv::I420Copy(img->planes[VPX_PLANE_Y], img->stride[VPX_PLANE_Y],
|
||||
img->planes[VPX_PLANE_U], img->stride[VPX_PLANE_U],
|
||||
img->planes[VPX_PLANE_V], img->stride[VPX_PLANE_V],
|
||||
decoded_image.buffer(kYPlane), decoded_image.stride(kYPlane),
|
||||
decoded_image.buffer(kUPlane), decoded_image.stride(kUPlane),
|
||||
decoded_image.buffer(kVPlane), decoded_image.stride(kVPlane),
|
||||
decoded_image.video_frame_buffer()->MutableDataY(),
|
||||
decoded_image.video_frame_buffer()->StrideY(),
|
||||
decoded_image.video_frame_buffer()->MutableDataU(),
|
||||
decoded_image.video_frame_buffer()->StrideU(),
|
||||
decoded_image.video_frame_buffer()->MutableDataV(),
|
||||
decoded_image.video_frame_buffer()->StrideV(),
|
||||
img->d_w, img->d_h);
|
||||
decoded_image.set_ntp_time_ms(ntp_time_ms);
|
||||
int ret = decode_complete_callback_->Decoded(decoded_image);
|
||||
|
||||
@ -504,12 +504,15 @@ int VP9EncoderImpl::Encode(const VideoFrame& input_image,
|
||||
|
||||
// Image in vpx_image_t format.
|
||||
// Input image is const. VPX's raw image is not defined as const.
|
||||
raw_->planes[VPX_PLANE_Y] = const_cast<uint8_t*>(input_image.buffer(kYPlane));
|
||||
raw_->planes[VPX_PLANE_U] = const_cast<uint8_t*>(input_image.buffer(kUPlane));
|
||||
raw_->planes[VPX_PLANE_V] = const_cast<uint8_t*>(input_image.buffer(kVPlane));
|
||||
raw_->stride[VPX_PLANE_Y] = input_image.stride(kYPlane);
|
||||
raw_->stride[VPX_PLANE_U] = input_image.stride(kUPlane);
|
||||
raw_->stride[VPX_PLANE_V] = input_image.stride(kVPlane);
|
||||
raw_->planes[VPX_PLANE_Y] =
|
||||
const_cast<uint8_t*>(input_image.video_frame_buffer()->DataY());
|
||||
raw_->planes[VPX_PLANE_U] =
|
||||
const_cast<uint8_t*>(input_image.video_frame_buffer()->DataU());
|
||||
raw_->planes[VPX_PLANE_V] =
|
||||
const_cast<uint8_t*>(input_image.video_frame_buffer()->DataV());
|
||||
raw_->stride[VPX_PLANE_Y] = input_image.video_frame_buffer()->StrideY();
|
||||
raw_->stride[VPX_PLANE_U] = input_image.video_frame_buffer()->StrideU();
|
||||
raw_->stride[VPX_PLANE_V] = input_image.video_frame_buffer()->StrideV();
|
||||
|
||||
vpx_enc_frame_flags_t flags = 0;
|
||||
bool send_keyframe = (frame_type == kVideoFrameKey);
|
||||
|
||||
Reference in New Issue
Block a user