Reland of Delete webrtc::VideoFrame methods buffer and stride. (patchset #1 id:1 of https://codereview.webrtc.org/1935443002/ )
Reason for revert: I plan to reland this change in a week or two, after downstream users are updated. Original issue's description: > Revert of Delete webrtc::VideoFrame methods buffer and stride. (patchset #14 id:250001 of https://codereview.webrtc.org/1900673002/ ) > > Reason for revert: > Breaks chrome FYI bots. > > Original issue's description: > > Delete webrtc::VideoFrame methods buffer and stride. > > > > To make the HasOneRef/IsMutable hack work, also had to change the > > video_frame_buffer method to return a const ref to a scoped_ref_ptr, > > to not imply an AddRef. > > > > BUG=webrtc:5682 > > TBR=perkj@webrtc.org,magjed@webrtc.org,pbos@webrtc.org,pthatcher@webrtc.org,stefan@webrtc.org > # Skipping CQ checks because original CL landed less than 1 days ago. > NOPRESUBMIT=true > NOTREECHECKS=true > NOTRY=true > BUG=webrtc:5682 > > Committed: https://crrev.com/5b3c443d301f2c2f18dac5b02652c08b91ea3828 > Cr-Commit-Position: refs/heads/master@{#12558} TBR=perkj@webrtc.org,magjed@webrtc.org,pbos@webrtc.org,pthatcher@webrtc.org,stefan@webrtc.org # Not skipping CQ checks because original CL landed more than 1 days ago. BUG=webrtc:5682 Review-Url: https://codereview.webrtc.org/1963413004 Cr-Commit-Position: refs/heads/master@{#12721}
This commit is contained in:
@ -795,9 +795,12 @@ bool MediaCodecVideoEncoder::EncodeByteBufferOnCodecThread(JNIEnv* jni,
|
||||
CHECK_EXCEPTION(jni);
|
||||
RTC_CHECK(yuv_buffer) << "Indirect buffer??";
|
||||
RTC_CHECK(!libyuv::ConvertFromI420(
|
||||
frame.buffer(webrtc::kYPlane), frame.stride(webrtc::kYPlane),
|
||||
frame.buffer(webrtc::kUPlane), frame.stride(webrtc::kUPlane),
|
||||
frame.buffer(webrtc::kVPlane), frame.stride(webrtc::kVPlane),
|
||||
frame.video_frame_buffer()->DataY(),
|
||||
frame.video_frame_buffer()->StrideY(),
|
||||
frame.video_frame_buffer()->DataU(),
|
||||
frame.video_frame_buffer()->StrideU(),
|
||||
frame.video_frame_buffer()->DataV(),
|
||||
frame.video_frame_buffer()->StrideV(),
|
||||
yuv_buffer, width_, width_, height_, encoder_fourcc_))
|
||||
<< "ConvertFromI420 failed";
|
||||
|
||||
|
||||
@ -51,9 +51,9 @@ TEST(TestVideoFrame, SizeAllocation) {
|
||||
VideoFrame frame;
|
||||
frame. CreateEmptyFrame(10, 10, 12, 14, 220);
|
||||
int height = frame.height();
|
||||
int stride_y = frame.stride(kYPlane);
|
||||
int stride_u = frame.stride(kUPlane);
|
||||
int stride_v = frame.stride(kVPlane);
|
||||
int stride_y = frame.video_frame_buffer()->StrideY();
|
||||
int stride_u = frame.video_frame_buffer()->StrideU();
|
||||
int stride_v = frame.video_frame_buffer()->StrideV();
|
||||
// Verify that allocated size was computed correctly.
|
||||
EXPECT_EQ(ExpectedSize(stride_y, height, kYPlane),
|
||||
frame.allocated_size(kYPlane));
|
||||
@ -101,9 +101,12 @@ TEST(TestVideoFrame, CopyFrame) {
|
||||
// Frame of larger dimensions.
|
||||
small_frame.CreateEmptyFrame(width, height,
|
||||
stride_y, stride_u, stride_v);
|
||||
memset(small_frame.buffer(kYPlane), 1, small_frame.allocated_size(kYPlane));
|
||||
memset(small_frame.buffer(kUPlane), 2, small_frame.allocated_size(kUPlane));
|
||||
memset(small_frame.buffer(kVPlane), 3, small_frame.allocated_size(kVPlane));
|
||||
memset(small_frame.video_frame_buffer()->MutableDataY(), 1,
|
||||
small_frame.allocated_size(kYPlane));
|
||||
memset(small_frame.video_frame_buffer()->MutableDataU(), 2,
|
||||
small_frame.allocated_size(kUPlane));
|
||||
memset(small_frame.video_frame_buffer()->MutableDataV(), 3,
|
||||
small_frame.allocated_size(kVPlane));
|
||||
big_frame.CopyFrame(small_frame);
|
||||
EXPECT_TRUE(test::FramesEqual(small_frame, big_frame));
|
||||
}
|
||||
@ -141,12 +144,12 @@ TEST(TestVideoFrame, ShallowCopy) {
|
||||
const VideoFrame* const_frame1_ptr = &frame1;
|
||||
const VideoFrame* const_frame2_ptr = &frame2;
|
||||
|
||||
EXPECT_TRUE(const_frame1_ptr->buffer(kYPlane) ==
|
||||
const_frame2_ptr->buffer(kYPlane));
|
||||
EXPECT_TRUE(const_frame1_ptr->buffer(kUPlane) ==
|
||||
const_frame2_ptr->buffer(kUPlane));
|
||||
EXPECT_TRUE(const_frame1_ptr->buffer(kVPlane) ==
|
||||
const_frame2_ptr->buffer(kVPlane));
|
||||
EXPECT_TRUE(const_frame1_ptr->video_frame_buffer()->DataY() ==
|
||||
const_frame2_ptr->video_frame_buffer()->DataY());
|
||||
EXPECT_TRUE(const_frame1_ptr->video_frame_buffer()->DataU() ==
|
||||
const_frame2_ptr->video_frame_buffer()->DataU());
|
||||
EXPECT_TRUE(const_frame1_ptr->video_frame_buffer()->DataV() ==
|
||||
const_frame2_ptr->video_frame_buffer()->DataV());
|
||||
|
||||
EXPECT_EQ(frame2.timestamp(), frame1.timestamp());
|
||||
EXPECT_EQ(frame2.ntp_time_ms(), frame1.ntp_time_ms());
|
||||
@ -184,12 +187,12 @@ TEST(TestVideoFrame, CopyBuffer) {
|
||||
width, height, stride_y, stride_uv, stride_uv,
|
||||
kVideoRotation_0);
|
||||
// Expect exactly the same pixel data.
|
||||
EXPECT_TRUE(
|
||||
test::EqualPlane(buffer_y, frame2.buffer(kYPlane), stride_y, 15, 15));
|
||||
EXPECT_TRUE(
|
||||
test::EqualPlane(buffer_u, frame2.buffer(kUPlane), stride_uv, 8, 8));
|
||||
EXPECT_TRUE(
|
||||
test::EqualPlane(buffer_v, frame2.buffer(kVPlane), stride_uv, 8, 8));
|
||||
EXPECT_TRUE(test::EqualPlane(buffer_y, frame2.video_frame_buffer()->DataY(),
|
||||
stride_y, 15, 15));
|
||||
EXPECT_TRUE(test::EqualPlane(buffer_u, frame2.video_frame_buffer()->DataU(),
|
||||
stride_uv, 8, 8));
|
||||
EXPECT_TRUE(test::EqualPlane(buffer_v, frame2.video_frame_buffer()->DataV(),
|
||||
stride_uv, 8, 8));
|
||||
|
||||
// Compare size.
|
||||
EXPECT_LE(kSizeY, frame2.allocated_size(kYPlane));
|
||||
@ -200,27 +203,27 @@ TEST(TestVideoFrame, CopyBuffer) {
|
||||
TEST(TestVideoFrame, ReuseAllocation) {
|
||||
VideoFrame frame;
|
||||
frame.CreateEmptyFrame(640, 320, 640, 320, 320);
|
||||
const uint8_t* y = frame.buffer(kYPlane);
|
||||
const uint8_t* u = frame.buffer(kUPlane);
|
||||
const uint8_t* v = frame.buffer(kVPlane);
|
||||
const uint8_t* y = frame.video_frame_buffer()->DataY();
|
||||
const uint8_t* u = frame.video_frame_buffer()->DataU();
|
||||
const uint8_t* v = frame.video_frame_buffer()->DataV();
|
||||
frame.CreateEmptyFrame(640, 320, 640, 320, 320);
|
||||
EXPECT_EQ(y, frame.buffer(kYPlane));
|
||||
EXPECT_EQ(u, frame.buffer(kUPlane));
|
||||
EXPECT_EQ(v, frame.buffer(kVPlane));
|
||||
EXPECT_EQ(y, frame.video_frame_buffer()->DataY());
|
||||
EXPECT_EQ(u, frame.video_frame_buffer()->DataU());
|
||||
EXPECT_EQ(v, frame.video_frame_buffer()->DataV());
|
||||
}
|
||||
|
||||
TEST(TestVideoFrame, FailToReuseAllocation) {
|
||||
VideoFrame frame1;
|
||||
frame1.CreateEmptyFrame(640, 320, 640, 320, 320);
|
||||
const uint8_t* y = frame1.buffer(kYPlane);
|
||||
const uint8_t* u = frame1.buffer(kUPlane);
|
||||
const uint8_t* v = frame1.buffer(kVPlane);
|
||||
const uint8_t* y = frame1.video_frame_buffer()->DataY();
|
||||
const uint8_t* u = frame1.video_frame_buffer()->DataU();
|
||||
const uint8_t* v = frame1.video_frame_buffer()->DataV();
|
||||
// Make a shallow copy of |frame1|.
|
||||
VideoFrame frame2(frame1.video_frame_buffer(), 0, 0, kVideoRotation_0);
|
||||
frame1.CreateEmptyFrame(640, 320, 640, 320, 320);
|
||||
EXPECT_NE(y, frame1.buffer(kYPlane));
|
||||
EXPECT_NE(u, frame1.buffer(kUPlane));
|
||||
EXPECT_NE(v, frame1.buffer(kVPlane));
|
||||
EXPECT_NE(y, frame1.video_frame_buffer()->DataY());
|
||||
EXPECT_NE(u, frame1.video_frame_buffer()->DataU());
|
||||
EXPECT_NE(v, frame1.video_frame_buffer()->DataV());
|
||||
}
|
||||
|
||||
TEST(TestVideoFrame, TextureInitialValues) {
|
||||
|
||||
@ -20,61 +20,6 @@
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
int PrintBuffer(const uint8_t* buffer, int width, int height, int stride) {
|
||||
if (buffer == NULL)
|
||||
return -1;
|
||||
int k;
|
||||
const uint8_t* tmp_buffer = buffer;
|
||||
for (int i = 0; i < height; i++) {
|
||||
k = 0;
|
||||
for (int j = 0; j < width; j++) {
|
||||
printf("%d ", tmp_buffer[k++]);
|
||||
}
|
||||
tmp_buffer += stride;
|
||||
printf(" \n");
|
||||
}
|
||||
printf(" \n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
int PrintFrame(const VideoFrame* frame, const char* str) {
|
||||
if (frame == NULL)
|
||||
return -1;
|
||||
printf("%s %dx%d \n", str, frame->width(), frame->height());
|
||||
|
||||
int ret = 0;
|
||||
for (int plane_num = 0; plane_num < kNumOfPlanes; ++plane_num) {
|
||||
PlaneType plane_type = static_cast<PlaneType>(plane_num);
|
||||
int width = (plane_num ? (frame->width() + 1) / 2 : frame->width());
|
||||
int height = (plane_num ? (frame->height() + 1) / 2 : frame->height());
|
||||
ret += PrintBuffer(frame->buffer(plane_type), width, height,
|
||||
frame->stride(plane_type));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
// Create an image from on a YUV frame. Every plane value starts with a start
|
||||
// value, and will be set to increasing values.
|
||||
void CreateImage(VideoFrame* frame, int plane_offset[kNumOfPlanes]) {
|
||||
if (frame == NULL)
|
||||
return;
|
||||
for (int plane_num = 0; plane_num < kNumOfPlanes; ++plane_num) {
|
||||
int width = (plane_num != kYPlane ? (frame->width() + 1) / 2 :
|
||||
frame->width());
|
||||
int height = (plane_num != kYPlane ? (frame->height() + 1) / 2 :
|
||||
frame->height());
|
||||
PlaneType plane_type = static_cast<PlaneType>(plane_num);
|
||||
uint8_t *data = frame->buffer(plane_type);
|
||||
for (int i = 0; i < height; i++) {
|
||||
for (int j = 0; j < width; j++) {
|
||||
data[j] = static_cast<uint8_t>(i + plane_offset[plane_num] + j);
|
||||
}
|
||||
data += frame->stride(plane_type);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class TestLibYuv : public ::testing::Test {
|
||||
protected:
|
||||
TestLibYuv();
|
||||
|
||||
@ -47,6 +47,7 @@ int Scaler::Set(int src_width, int src_height,
|
||||
return 0;
|
||||
}
|
||||
|
||||
// TODO(nisse): Should work with VideoFrameBuffer instead.
|
||||
int Scaler::Scale(const VideoFrame& src_frame, VideoFrame* dst_frame) {
|
||||
assert(dst_frame);
|
||||
if (src_frame.IsZeroSize())
|
||||
@ -69,29 +70,33 @@ int Scaler::Scale(const VideoFrame& src_frame, VideoFrame* dst_frame) {
|
||||
const int src_offset_x = ((src_width_ - cropped_src_width) / 2) & ~1;
|
||||
const int src_offset_y = ((src_height_ - cropped_src_height) / 2) & ~1;
|
||||
|
||||
const uint8_t* y_ptr = src_frame.buffer(kYPlane) +
|
||||
src_offset_y * src_frame.stride(kYPlane) +
|
||||
const uint8_t* y_ptr =
|
||||
src_frame.video_frame_buffer()->DataY() +
|
||||
src_offset_y * src_frame.video_frame_buffer()->StrideY() +
|
||||
src_offset_x;
|
||||
const uint8_t* u_ptr = src_frame.buffer(kUPlane) +
|
||||
src_offset_y / 2 * src_frame.stride(kUPlane) +
|
||||
const uint8_t* u_ptr =
|
||||
src_frame.video_frame_buffer()->DataU() +
|
||||
src_offset_y / 2 * src_frame.video_frame_buffer()->StrideU() +
|
||||
src_offset_x / 2;
|
||||
const uint8_t* v_ptr = src_frame.buffer(kVPlane) +
|
||||
src_offset_y / 2 * src_frame.stride(kVPlane) +
|
||||
const uint8_t* v_ptr =
|
||||
src_frame.video_frame_buffer()->DataV() +
|
||||
src_offset_y / 2 * src_frame.video_frame_buffer()->StrideV() +
|
||||
src_offset_x / 2;
|
||||
|
||||
return libyuv::I420Scale(y_ptr,
|
||||
src_frame.stride(kYPlane),
|
||||
return libyuv::I420Scale(
|
||||
y_ptr,
|
||||
src_frame.video_frame_buffer()->StrideY(),
|
||||
u_ptr,
|
||||
src_frame.stride(kUPlane),
|
||||
src_frame.video_frame_buffer()->StrideU(),
|
||||
v_ptr,
|
||||
src_frame.stride(kVPlane),
|
||||
src_frame.video_frame_buffer()->StrideV(),
|
||||
cropped_src_width, cropped_src_height,
|
||||
dst_frame->buffer(kYPlane),
|
||||
dst_frame->stride(kYPlane),
|
||||
dst_frame->buffer(kUPlane),
|
||||
dst_frame->stride(kUPlane),
|
||||
dst_frame->buffer(kVPlane),
|
||||
dst_frame->stride(kVPlane),
|
||||
dst_frame->video_frame_buffer()->MutableDataY(),
|
||||
dst_frame->video_frame_buffer()->StrideY(),
|
||||
dst_frame->video_frame_buffer()->MutableDataU(),
|
||||
dst_frame->video_frame_buffer()->StrideU(),
|
||||
dst_frame->video_frame_buffer()->MutableDataV(),
|
||||
dst_frame->video_frame_buffer()->StrideV(),
|
||||
dst_width_, dst_height_,
|
||||
libyuv::FilterMode(method_));
|
||||
}
|
||||
|
||||
@ -102,23 +102,42 @@ size_t CalcBufferSize(VideoType type, int width, int height) {
|
||||
return buffer_size;
|
||||
}
|
||||
|
||||
static int PrintPlane(const uint8_t* buf,
|
||||
int width,
|
||||
int height,
|
||||
int stride,
|
||||
FILE* file) {
|
||||
for (int i = 0; i < height; i++, buf += stride) {
|
||||
if (fwrite(buf, 1, width, file) != static_cast<unsigned int>(width))
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
// TODO(nisse): Belongs with the test code?
|
||||
int PrintVideoFrame(const VideoFrame& frame, FILE* file) {
|
||||
if (file == NULL)
|
||||
return -1;
|
||||
if (frame.IsZeroSize())
|
||||
return -1;
|
||||
for (int planeNum = 0; planeNum < kNumOfPlanes; ++planeNum) {
|
||||
int width = (planeNum ? (frame.width() + 1) / 2 : frame.width());
|
||||
int height = (planeNum ? (frame.height() + 1) / 2 : frame.height());
|
||||
PlaneType plane_type = static_cast<PlaneType>(planeNum);
|
||||
const uint8_t* plane_buffer = frame.buffer(plane_type);
|
||||
for (int y = 0; y < height; y++) {
|
||||
if (fwrite(plane_buffer, 1, width, file) !=
|
||||
static_cast<unsigned int>(width)) {
|
||||
int width = frame.video_frame_buffer()->width();
|
||||
int height = frame.video_frame_buffer()->height();
|
||||
int chroma_width = (width + 1) / 2;
|
||||
int chroma_height = (height + 1) / 2;
|
||||
|
||||
if (PrintPlane(frame.video_frame_buffer()->DataY(), width, height,
|
||||
frame.video_frame_buffer()->StrideY(), file) < 0) {
|
||||
return -1;
|
||||
}
|
||||
plane_buffer += frame.stride(plane_type);
|
||||
if (PrintPlane(frame.video_frame_buffer()->DataU(),
|
||||
chroma_width, chroma_height,
|
||||
frame.video_frame_buffer()->StrideU(), file) < 0) {
|
||||
return -1;
|
||||
}
|
||||
if (PrintPlane(frame.video_frame_buffer()->DataV(),
|
||||
chroma_width, chroma_height,
|
||||
frame.video_frame_buffer()->StrideV(), file) < 0) {
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -133,22 +152,23 @@ int ExtractBuffer(const VideoFrame& input_frame, size_t size, uint8_t* buffer) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int pos = 0;
|
||||
uint8_t* buffer_ptr = buffer;
|
||||
int width = input_frame.video_frame_buffer()->width();
|
||||
int height = input_frame.video_frame_buffer()->height();
|
||||
int chroma_width = (width + 1) / 2;
|
||||
int chroma_height = (height + 1) / 2;
|
||||
|
||||
libyuv::I420Copy(input_frame.video_frame_buffer()->DataY(),
|
||||
input_frame.video_frame_buffer()->StrideY(),
|
||||
input_frame.video_frame_buffer()->DataU(),
|
||||
input_frame.video_frame_buffer()->StrideU(),
|
||||
input_frame.video_frame_buffer()->DataV(),
|
||||
input_frame.video_frame_buffer()->StrideV(),
|
||||
buffer, width,
|
||||
buffer + width*height, chroma_width,
|
||||
buffer + width*height + chroma_width*chroma_height,
|
||||
chroma_width,
|
||||
width, height);
|
||||
|
||||
for (int plane = 0; plane < kNumOfPlanes; ++plane) {
|
||||
int width = (plane ? (input_frame.width() + 1) / 2 :
|
||||
input_frame.width());
|
||||
int height = (plane ? (input_frame.height() + 1) / 2 :
|
||||
input_frame.height());
|
||||
const uint8_t* plane_ptr = input_frame.buffer(
|
||||
static_cast<PlaneType>(plane));
|
||||
for (int y = 0; y < height; y++) {
|
||||
memcpy(&buffer_ptr[pos], plane_ptr, width);
|
||||
pos += width;
|
||||
plane_ptr += input_frame.stride(static_cast<PlaneType>(plane));
|
||||
}
|
||||
}
|
||||
return static_cast<int>(length);
|
||||
}
|
||||
|
||||
@ -228,6 +248,7 @@ int ConvertVideoType(VideoType video_type) {
|
||||
return libyuv::FOURCC_ANY;
|
||||
}
|
||||
|
||||
// TODO(nisse): Delete this wrapper, let callers use libyuv directly.
|
||||
int ConvertToI420(VideoType src_video_type,
|
||||
const uint8_t* src_frame,
|
||||
int crop_x,
|
||||
@ -245,13 +266,14 @@ int ConvertToI420(VideoType src_video_type,
|
||||
dst_width = dst_frame->height();
|
||||
dst_height = dst_frame->width();
|
||||
}
|
||||
return libyuv::ConvertToI420(src_frame, sample_size,
|
||||
dst_frame->buffer(kYPlane),
|
||||
dst_frame->stride(kYPlane),
|
||||
dst_frame->buffer(kUPlane),
|
||||
dst_frame->stride(kUPlane),
|
||||
dst_frame->buffer(kVPlane),
|
||||
dst_frame->stride(kVPlane),
|
||||
return libyuv::ConvertToI420(
|
||||
src_frame, sample_size,
|
||||
dst_frame->video_frame_buffer()->MutableDataY(),
|
||||
dst_frame->video_frame_buffer()->StrideY(),
|
||||
dst_frame->video_frame_buffer()->MutableDataU(),
|
||||
dst_frame->video_frame_buffer()->StrideU(),
|
||||
dst_frame->video_frame_buffer()->MutableDataV(),
|
||||
dst_frame->video_frame_buffer()->StrideV(),
|
||||
crop_x, crop_y,
|
||||
src_width, src_height,
|
||||
dst_width, dst_height,
|
||||
@ -263,12 +285,13 @@ int ConvertFromI420(const VideoFrame& src_frame,
|
||||
VideoType dst_video_type,
|
||||
int dst_sample_size,
|
||||
uint8_t* dst_frame) {
|
||||
return libyuv::ConvertFromI420(src_frame.buffer(kYPlane),
|
||||
src_frame.stride(kYPlane),
|
||||
src_frame.buffer(kUPlane),
|
||||
src_frame.stride(kUPlane),
|
||||
src_frame.buffer(kVPlane),
|
||||
src_frame.stride(kVPlane),
|
||||
return libyuv::ConvertFromI420(
|
||||
src_frame.video_frame_buffer()->DataY(),
|
||||
src_frame.video_frame_buffer()->StrideY(),
|
||||
src_frame.video_frame_buffer()->DataU(),
|
||||
src_frame.video_frame_buffer()->StrideU(),
|
||||
src_frame.video_frame_buffer()->DataV(),
|
||||
src_frame.video_frame_buffer()->StrideV(),
|
||||
dst_frame, dst_sample_size,
|
||||
src_frame.width(), src_frame.height(),
|
||||
ConvertVideoType(dst_video_type));
|
||||
@ -280,12 +303,13 @@ int ConvertFromYV12(const VideoFrame& src_frame,
|
||||
int dst_sample_size,
|
||||
uint8_t* dst_frame) {
|
||||
// YV12 = Y, V, U
|
||||
return libyuv::ConvertFromI420(src_frame.buffer(kYPlane),
|
||||
src_frame.stride(kYPlane),
|
||||
src_frame.buffer(kVPlane),
|
||||
src_frame.stride(kVPlane),
|
||||
src_frame.buffer(kUPlane),
|
||||
src_frame.stride(kUPlane),
|
||||
return libyuv::ConvertFromI420(
|
||||
src_frame.video_frame_buffer()->DataY(),
|
||||
src_frame.video_frame_buffer()->StrideY(),
|
||||
src_frame.video_frame_buffer()->DataV(),
|
||||
src_frame.video_frame_buffer()->StrideV(),
|
||||
src_frame.video_frame_buffer()->DataU(),
|
||||
src_frame.video_frame_buffer()->StrideU(),
|
||||
dst_frame, dst_sample_size,
|
||||
src_frame.width(), src_frame.height(),
|
||||
ConvertVideoType(dst_video_type));
|
||||
@ -301,18 +325,18 @@ double I420PSNR(const VideoFrame* ref_frame, const VideoFrame* test_frame) {
|
||||
else if (ref_frame->width() < 0 || ref_frame->height() < 0)
|
||||
return -1;
|
||||
|
||||
double psnr = libyuv::I420Psnr(ref_frame->buffer(kYPlane),
|
||||
ref_frame->stride(kYPlane),
|
||||
ref_frame->buffer(kUPlane),
|
||||
ref_frame->stride(kUPlane),
|
||||
ref_frame->buffer(kVPlane),
|
||||
ref_frame->stride(kVPlane),
|
||||
test_frame->buffer(kYPlane),
|
||||
test_frame->stride(kYPlane),
|
||||
test_frame->buffer(kUPlane),
|
||||
test_frame->stride(kUPlane),
|
||||
test_frame->buffer(kVPlane),
|
||||
test_frame->stride(kVPlane),
|
||||
double psnr = libyuv::I420Psnr(ref_frame->video_frame_buffer()->DataY(),
|
||||
ref_frame->video_frame_buffer()->StrideY(),
|
||||
ref_frame->video_frame_buffer()->DataU(),
|
||||
ref_frame->video_frame_buffer()->StrideU(),
|
||||
ref_frame->video_frame_buffer()->DataV(),
|
||||
ref_frame->video_frame_buffer()->StrideV(),
|
||||
test_frame->video_frame_buffer()->DataY(),
|
||||
test_frame->video_frame_buffer()->StrideY(),
|
||||
test_frame->video_frame_buffer()->DataU(),
|
||||
test_frame->video_frame_buffer()->StrideU(),
|
||||
test_frame->video_frame_buffer()->DataV(),
|
||||
test_frame->video_frame_buffer()->StrideV(),
|
||||
test_frame->width(), test_frame->height());
|
||||
// LibYuv sets the max psnr value to 128, we restrict it here.
|
||||
// In case of 0 mse in one frame, 128 can skew the results significantly.
|
||||
@ -329,18 +353,18 @@ double I420SSIM(const VideoFrame* ref_frame, const VideoFrame* test_frame) {
|
||||
else if (ref_frame->width() < 0 || ref_frame->height() < 0)
|
||||
return -1;
|
||||
|
||||
return libyuv::I420Ssim(ref_frame->buffer(kYPlane),
|
||||
ref_frame->stride(kYPlane),
|
||||
ref_frame->buffer(kUPlane),
|
||||
ref_frame->stride(kUPlane),
|
||||
ref_frame->buffer(kVPlane),
|
||||
ref_frame->stride(kVPlane),
|
||||
test_frame->buffer(kYPlane),
|
||||
test_frame->stride(kYPlane),
|
||||
test_frame->buffer(kUPlane),
|
||||
test_frame->stride(kUPlane),
|
||||
test_frame->buffer(kVPlane),
|
||||
test_frame->stride(kVPlane),
|
||||
return libyuv::I420Ssim(ref_frame->video_frame_buffer()->DataY(),
|
||||
ref_frame->video_frame_buffer()->StrideY(),
|
||||
ref_frame->video_frame_buffer()->DataU(),
|
||||
ref_frame->video_frame_buffer()->StrideU(),
|
||||
ref_frame->video_frame_buffer()->DataV(),
|
||||
ref_frame->video_frame_buffer()->StrideV(),
|
||||
test_frame->video_frame_buffer()->DataY(),
|
||||
test_frame->video_frame_buffer()->StrideY(),
|
||||
test_frame->video_frame_buffer()->DataU(),
|
||||
test_frame->video_frame_buffer()->StrideU(),
|
||||
test_frame->video_frame_buffer()->DataV(),
|
||||
test_frame->video_frame_buffer()->StrideV(),
|
||||
test_frame->width(), test_frame->height());
|
||||
}
|
||||
} // namespace webrtc
|
||||
|
||||
@ -69,8 +69,10 @@ void VideoFrame::CreateEmptyFrame(int width,
|
||||
if (video_frame_buffer_ && video_frame_buffer_->IsMutable() &&
|
||||
!video_frame_buffer_->native_handle() &&
|
||||
width == video_frame_buffer_->width() &&
|
||||
height == video_frame_buffer_->height() && stride_y == stride(kYPlane) &&
|
||||
stride_u == stride(kUPlane) && stride_v == stride(kVPlane)) {
|
||||
height == video_frame_buffer_->height() &&
|
||||
stride_y == video_frame_buffer_->StrideY() &&
|
||||
stride_u == video_frame_buffer_->StrideU() &&
|
||||
stride_v == video_frame_buffer_->StrideV()) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -93,9 +95,9 @@ void VideoFrame::CreateFrame(const uint8_t* buffer_y,
|
||||
const int expected_size_u = half_height * stride_u;
|
||||
const int expected_size_v = half_height * stride_v;
|
||||
CreateEmptyFrame(width, height, stride_y, stride_u, stride_v);
|
||||
memcpy(buffer(kYPlane), buffer_y, expected_size_y);
|
||||
memcpy(buffer(kUPlane), buffer_u, expected_size_u);
|
||||
memcpy(buffer(kVPlane), buffer_v, expected_size_v);
|
||||
memcpy(video_frame_buffer_->MutableDataY(), buffer_y, expected_size_y);
|
||||
memcpy(video_frame_buffer_->MutableDataU(), buffer_u, expected_size_u);
|
||||
memcpy(video_frame_buffer_->MutableDataV(), buffer_v, expected_size_v);
|
||||
rotation_ = rotation;
|
||||
}
|
||||
|
||||
@ -130,22 +132,26 @@ void VideoFrame::ShallowCopy(const VideoFrame& videoFrame) {
|
||||
rotation_ = videoFrame.rotation_;
|
||||
}
|
||||
|
||||
uint8_t* VideoFrame::buffer(PlaneType type) {
|
||||
return video_frame_buffer_ ? video_frame_buffer_->MutableData(type)
|
||||
: nullptr;
|
||||
}
|
||||
|
||||
const uint8_t* VideoFrame::buffer(PlaneType type) const {
|
||||
return video_frame_buffer_ ? video_frame_buffer_->data(type) : nullptr;
|
||||
}
|
||||
|
||||
// TODO(nisse): Delete. Besides test code, only one use, in
|
||||
// webrtcvideoengine2.cc:CreateBlackFrame.
|
||||
int VideoFrame::allocated_size(PlaneType type) const {
|
||||
const int plane_height = (type == kYPlane) ? height() : (height() + 1) / 2;
|
||||
return plane_height * stride(type);
|
||||
}
|
||||
|
||||
int VideoFrame::stride(PlaneType type) const {
|
||||
return video_frame_buffer_ ? video_frame_buffer_->stride(type) : 0;
|
||||
int stride;
|
||||
switch (type) {
|
||||
case kYPlane:
|
||||
stride = video_frame_buffer_->StrideY();
|
||||
break;
|
||||
case kUPlane:
|
||||
stride = video_frame_buffer_->StrideU();
|
||||
break;
|
||||
case kVPlane:
|
||||
stride = video_frame_buffer_->StrideV();
|
||||
break;
|
||||
default:
|
||||
RTC_NOTREACHED();
|
||||
return 0;
|
||||
}
|
||||
return plane_height * stride;
|
||||
}
|
||||
|
||||
int VideoFrame::width() const {
|
||||
@ -160,7 +166,8 @@ bool VideoFrame::IsZeroSize() const {
|
||||
return !video_frame_buffer_;
|
||||
}
|
||||
|
||||
rtc::scoped_refptr<VideoFrameBuffer> VideoFrame::video_frame_buffer() const {
|
||||
const rtc::scoped_refptr<VideoFrameBuffer>& VideoFrame::video_frame_buffer()
|
||||
const {
|
||||
return video_frame_buffer_;
|
||||
}
|
||||
|
||||
|
||||
@ -1560,19 +1560,22 @@ WebRtcVideoChannel2::WebRtcVideoSendStream::~WebRtcVideoSendStream() {
|
||||
DestroyVideoEncoder(&allocated_encoder_);
|
||||
}
|
||||
|
||||
static void CreateBlackFrame(webrtc::VideoFrame* video_frame,
|
||||
int width,
|
||||
static webrtc::VideoFrame CreateBlackFrame(int width,
|
||||
int height,
|
||||
int64_t render_time_ms_,
|
||||
webrtc::VideoRotation rotation) {
|
||||
video_frame->CreateEmptyFrame(width, height, width, (width + 1) / 2,
|
||||
webrtc::VideoFrame frame;
|
||||
frame.CreateEmptyFrame(width, height, width, (width + 1) / 2,
|
||||
(width + 1) / 2);
|
||||
memset(video_frame->buffer(webrtc::kYPlane), 16,
|
||||
video_frame->allocated_size(webrtc::kYPlane));
|
||||
memset(video_frame->buffer(webrtc::kUPlane), 128,
|
||||
video_frame->allocated_size(webrtc::kUPlane));
|
||||
memset(video_frame->buffer(webrtc::kVPlane), 128,
|
||||
video_frame->allocated_size(webrtc::kVPlane));
|
||||
video_frame->set_rotation(rotation);
|
||||
memset(frame.video_frame_buffer()->MutableDataY(), 16,
|
||||
frame.allocated_size(webrtc::kYPlane));
|
||||
memset(frame.video_frame_buffer()->MutableDataU(), 128,
|
||||
frame.allocated_size(webrtc::kUPlane));
|
||||
memset(frame.video_frame_buffer()->MutableDataV(), 128,
|
||||
frame.allocated_size(webrtc::kVPlane));
|
||||
frame.set_rotation(rotation);
|
||||
frame.set_render_time_ms(render_time_ms_);
|
||||
return frame;
|
||||
}
|
||||
|
||||
void WebRtcVideoChannel2::WebRtcVideoSendStream::OnFrame(
|
||||
@ -1630,19 +1633,17 @@ void WebRtcVideoChannel2::WebRtcVideoSendStream::SetSource(
|
||||
if (source == NULL) {
|
||||
if (stream_ != NULL) {
|
||||
LOG(LS_VERBOSE) << "Disabling capturer, sending black frame.";
|
||||
webrtc::VideoFrame black_frame;
|
||||
|
||||
CreateBlackFrame(&black_frame, last_dimensions_.width,
|
||||
last_dimensions_.height, last_rotation_);
|
||||
|
||||
// Force this black frame not to be dropped due to timestamp order
|
||||
// check. As IncomingCapturedFrame will drop the frame if this frame's
|
||||
// timestamp is less than or equal to last frame's timestamp, it is
|
||||
// necessary to give this black frame a larger timestamp than the
|
||||
// previous one.
|
||||
last_frame_timestamp_ms_ += 1;
|
||||
black_frame.set_render_time_ms(last_frame_timestamp_ms_);
|
||||
stream_->Input()->IncomingCapturedFrame(black_frame);
|
||||
stream_->Input()->IncomingCapturedFrame(
|
||||
CreateBlackFrame(last_dimensions_.width, last_dimensions_.height,
|
||||
last_frame_timestamp_ms_, last_rotation_));
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -69,11 +69,11 @@ static void CreateBlackFrame(webrtc::VideoFrame* video_frame,
|
||||
int height) {
|
||||
video_frame->CreateEmptyFrame(
|
||||
width, height, width, (width + 1) / 2, (width + 1) / 2);
|
||||
memset(video_frame->buffer(webrtc::kYPlane), 16,
|
||||
memset(video_frame->video_frame_buffer()->MutableDataY(), 16,
|
||||
video_frame->allocated_size(webrtc::kYPlane));
|
||||
memset(video_frame->buffer(webrtc::kUPlane), 128,
|
||||
memset(video_frame->video_frame_buffer()->MutableDataU(), 128,
|
||||
video_frame->allocated_size(webrtc::kUPlane));
|
||||
memset(video_frame->buffer(webrtc::kVPlane), 128,
|
||||
memset(video_frame->video_frame_buffer()->MutableDataV(), 128,
|
||||
video_frame->allocated_size(webrtc::kVPlane));
|
||||
}
|
||||
|
||||
|
||||
@ -23,6 +23,7 @@
|
||||
#include "webrtc/modules/video_capture/video_capture_factory.h"
|
||||
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
|
||||
#include "webrtc/system_wrappers/include/sleep.h"
|
||||
#include "webrtc/test/frame_utils.h"
|
||||
#include "webrtc/video_frame.h"
|
||||
|
||||
using webrtc::CriticalSectionWrapper;
|
||||
@ -59,32 +60,6 @@ static const int kTestHeight = 288;
|
||||
static const int kTestWidth = 352;
|
||||
static const int kTestFramerate = 30;
|
||||
|
||||
// Compares the content of two video frames.
|
||||
static bool CompareFrames(const webrtc::VideoFrame& frame1,
|
||||
const webrtc::VideoFrame& frame2) {
|
||||
bool result =
|
||||
(frame1.stride(webrtc::kYPlane) == frame2.stride(webrtc::kYPlane)) &&
|
||||
(frame1.stride(webrtc::kUPlane) == frame2.stride(webrtc::kUPlane)) &&
|
||||
(frame1.stride(webrtc::kVPlane) == frame2.stride(webrtc::kVPlane)) &&
|
||||
(frame1.width() == frame2.width()) &&
|
||||
(frame1.height() == frame2.height());
|
||||
|
||||
if (!result)
|
||||
return false;
|
||||
for (int plane = 0; plane < webrtc::kNumOfPlanes; plane ++) {
|
||||
webrtc::PlaneType plane_type = static_cast<webrtc::PlaneType>(plane);
|
||||
int allocated_size1 = frame1.allocated_size(plane_type);
|
||||
int allocated_size2 = frame2.allocated_size(plane_type);
|
||||
if (allocated_size1 != allocated_size2)
|
||||
return false;
|
||||
const uint8_t* plane_buffer1 = frame1.buffer(plane_type);
|
||||
const uint8_t* plane_buffer2 = frame2.buffer(plane_type);
|
||||
if (memcmp(plane_buffer1, plane_buffer2, allocated_size1))
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
class TestVideoCaptureCallback : public VideoCaptureDataCallback {
|
||||
public:
|
||||
TestVideoCaptureCallback()
|
||||
@ -131,7 +106,7 @@ class TestVideoCaptureCallback : public VideoCaptureDataCallback {
|
||||
|
||||
incoming_frames_++;
|
||||
last_render_time_ms_ = videoFrame.render_time_ms();
|
||||
last_frame_.CopyFrame(videoFrame);
|
||||
last_frame_ = videoFrame.video_frame_buffer();
|
||||
}
|
||||
|
||||
virtual void OnCaptureDelayChanged(const int32_t id,
|
||||
@ -167,7 +142,8 @@ class TestVideoCaptureCallback : public VideoCaptureDataCallback {
|
||||
|
||||
bool CompareLastFrame(const webrtc::VideoFrame& frame) {
|
||||
CriticalSectionScoped cs(capture_cs_.get());
|
||||
return CompareFrames(last_frame_, frame);
|
||||
return webrtc::test::FrameBufsEqual(last_frame_,
|
||||
frame.video_frame_buffer());
|
||||
}
|
||||
|
||||
void SetExpectedCaptureRotation(webrtc::VideoRotation rotation) {
|
||||
@ -182,7 +158,7 @@ class TestVideoCaptureCallback : public VideoCaptureDataCallback {
|
||||
int64_t last_render_time_ms_;
|
||||
int incoming_frames_;
|
||||
int timing_warnings_;
|
||||
webrtc::VideoFrame last_frame_;
|
||||
rtc::scoped_refptr<webrtc::VideoFrameBuffer> last_frame_;
|
||||
webrtc::VideoRotation rotate_frame_;
|
||||
};
|
||||
|
||||
@ -447,10 +423,11 @@ class VideoCaptureExternalTest : public testing::Test {
|
||||
test_frame_.CreateEmptyFrame(kTestWidth, kTestHeight, kTestWidth,
|
||||
((kTestWidth + 1) / 2), (kTestWidth + 1) / 2);
|
||||
SleepMs(1); // Wait 1ms so that two tests can't have the same timestamp.
|
||||
memset(test_frame_.buffer(webrtc::kYPlane), 127, kTestWidth * kTestHeight);
|
||||
memset(test_frame_.buffer(webrtc::kUPlane), 127,
|
||||
memset(test_frame_.video_frame_buffer()->MutableDataY(), 127,
|
||||
kTestWidth * kTestHeight);
|
||||
memset(test_frame_.video_frame_buffer()->MutableDataU(), 127,
|
||||
((kTestWidth + 1) / 2) * ((kTestHeight + 1) / 2));
|
||||
memset(test_frame_.buffer(webrtc::kVPlane), 127,
|
||||
memset(test_frame_.video_frame_buffer()->MutableDataV(), 127,
|
||||
((kTestWidth + 1) / 2) * ((kTestHeight + 1) / 2));
|
||||
|
||||
capture_module_->RegisterCaptureDataCallback(capture_callback_);
|
||||
|
||||
@ -172,6 +172,7 @@
|
||||
'video_capture_module_internal_impl',
|
||||
'webrtc_utility',
|
||||
'<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers',
|
||||
'<(webrtc_root)/test/test.gyp:video_test_common',
|
||||
'<(DEPTH)/testing/gtest.gyp:gtest',
|
||||
],
|
||||
'sources': [
|
||||
|
||||
@ -129,10 +129,12 @@ int H264DecoderImpl::AVGetBuffer2(
|
||||
video_frame->set_video_frame_buffer(
|
||||
decoder->pool_.CreateBuffer(width, height));
|
||||
// DCHECK that we have a continuous buffer as is required.
|
||||
RTC_DCHECK_EQ(video_frame->buffer(kUPlane),
|
||||
video_frame->buffer(kYPlane) + video_frame->allocated_size(kYPlane));
|
||||
RTC_DCHECK_EQ(video_frame->buffer(kVPlane),
|
||||
video_frame->buffer(kUPlane) + video_frame->allocated_size(kUPlane));
|
||||
RTC_DCHECK_EQ(video_frame->video_frame_buffer()->DataU(),
|
||||
video_frame->video_frame_buffer()->DataY() +
|
||||
video_frame->allocated_size(kYPlane));
|
||||
RTC_DCHECK_EQ(video_frame->video_frame_buffer()->DataV(),
|
||||
video_frame->video_frame_buffer()->DataU() +
|
||||
video_frame->allocated_size(kUPlane));
|
||||
int total_size = video_frame->allocated_size(kYPlane) +
|
||||
video_frame->allocated_size(kUPlane) +
|
||||
video_frame->allocated_size(kVPlane);
|
||||
@ -141,12 +143,18 @@ int H264DecoderImpl::AVGetBuffer2(
|
||||
av_frame->reordered_opaque = context->reordered_opaque;
|
||||
|
||||
// Set |av_frame| members as required by FFmpeg.
|
||||
av_frame->data[kYPlaneIndex] = video_frame->buffer(kYPlane);
|
||||
av_frame->linesize[kYPlaneIndex] = video_frame->stride(kYPlane);
|
||||
av_frame->data[kUPlaneIndex] = video_frame->buffer(kUPlane);
|
||||
av_frame->linesize[kUPlaneIndex] = video_frame->stride(kUPlane);
|
||||
av_frame->data[kVPlaneIndex] = video_frame->buffer(kVPlane);
|
||||
av_frame->linesize[kVPlaneIndex] = video_frame->stride(kVPlane);
|
||||
av_frame->data[kYPlaneIndex] =
|
||||
video_frame->video_frame_buffer()->MutableDataY();
|
||||
av_frame->linesize[kYPlaneIndex] =
|
||||
video_frame->video_frame_buffer()->StrideY();
|
||||
av_frame->data[kUPlaneIndex] =
|
||||
video_frame->video_frame_buffer()->MutableDataU();
|
||||
av_frame->linesize[kUPlaneIndex] =
|
||||
video_frame->video_frame_buffer()->StrideU();
|
||||
av_frame->data[kVPlaneIndex] =
|
||||
video_frame->video_frame_buffer()->MutableDataV();
|
||||
av_frame->linesize[kVPlaneIndex] =
|
||||
video_frame->video_frame_buffer()->StrideV();
|
||||
RTC_DCHECK_EQ(av_frame->extended_data, av_frame->data);
|
||||
|
||||
av_frame->buf[0] = av_buffer_create(av_frame->data[kYPlaneIndex],
|
||||
@ -339,9 +347,12 @@ int32_t H264DecoderImpl::Decode(const EncodedImage& input_image,
|
||||
VideoFrame* video_frame = static_cast<VideoFrame*>(
|
||||
av_buffer_get_opaque(av_frame_->buf[0]));
|
||||
RTC_DCHECK(video_frame);
|
||||
RTC_CHECK_EQ(av_frame_->data[kYPlane], video_frame->buffer(kYPlane));
|
||||
RTC_CHECK_EQ(av_frame_->data[kUPlane], video_frame->buffer(kUPlane));
|
||||
RTC_CHECK_EQ(av_frame_->data[kVPlane], video_frame->buffer(kVPlane));
|
||||
RTC_CHECK_EQ(av_frame_->data[kYPlane],
|
||||
video_frame->video_frame_buffer()->DataY());
|
||||
RTC_CHECK_EQ(av_frame_->data[kUPlane],
|
||||
video_frame->video_frame_buffer()->DataU());
|
||||
RTC_CHECK_EQ(av_frame_->data[kVPlane],
|
||||
video_frame->video_frame_buffer()->DataV());
|
||||
video_frame->set_timestamp(input_image._timeStamp);
|
||||
|
||||
// The decoded image may be larger than what is supposed to be visible, see
|
||||
@ -352,9 +363,9 @@ int32_t H264DecoderImpl::Decode(const EncodedImage& input_image,
|
||||
video_frame->set_video_frame_buffer(
|
||||
new rtc::RefCountedObject<WrappedI420Buffer>(
|
||||
av_frame_->width, av_frame_->height,
|
||||
buf->data(kYPlane), buf->stride(kYPlane),
|
||||
buf->data(kUPlane), buf->stride(kUPlane),
|
||||
buf->data(kVPlane), buf->stride(kVPlane),
|
||||
buf->DataY(), buf->StrideY(),
|
||||
buf->DataU(), buf->StrideU(),
|
||||
buf->DataV(), buf->StrideV(),
|
||||
rtc::KeepRefUntilDone(buf)));
|
||||
}
|
||||
|
||||
|
||||
@ -367,12 +367,12 @@ int32_t H264EncoderImpl::Encode(
|
||||
picture.iPicHeight = frame.height();
|
||||
picture.iColorFormat = EVideoFormatType::videoFormatI420;
|
||||
picture.uiTimeStamp = frame.ntp_time_ms();
|
||||
picture.iStride[0] = frame.stride(kYPlane);
|
||||
picture.iStride[1] = frame.stride(kUPlane);
|
||||
picture.iStride[2] = frame.stride(kVPlane);
|
||||
picture.pData[0] = const_cast<uint8_t*>(frame.buffer(kYPlane));
|
||||
picture.pData[1] = const_cast<uint8_t*>(frame.buffer(kUPlane));
|
||||
picture.pData[2] = const_cast<uint8_t*>(frame.buffer(kVPlane));
|
||||
picture.iStride[0] = frame.video_frame_buffer()->StrideY();
|
||||
picture.iStride[1] = frame.video_frame_buffer()->StrideU();
|
||||
picture.iStride[2] = frame.video_frame_buffer()->StrideV();
|
||||
picture.pData[0] = const_cast<uint8_t*>(frame.video_frame_buffer()->DataY());
|
||||
picture.pData[1] = const_cast<uint8_t*>(frame.video_frame_buffer()->DataU());
|
||||
picture.pData[2] = const_cast<uint8_t*>(frame.video_frame_buffer()->DataV());
|
||||
|
||||
// EncodeFrame output.
|
||||
SFrameBSInfo info;
|
||||
|
||||
@ -168,10 +168,14 @@ bool CopyVideoFrameToPixelBuffer(const webrtc::VideoFrame& frame,
|
||||
int dst_stride_uv = CVPixelBufferGetBytesPerRowOfPlane(pixel_buffer, 1);
|
||||
// Convert I420 to NV12.
|
||||
int ret = libyuv::I420ToNV12(
|
||||
frame.buffer(webrtc::kYPlane), frame.stride(webrtc::kYPlane),
|
||||
frame.buffer(webrtc::kUPlane), frame.stride(webrtc::kUPlane),
|
||||
frame.buffer(webrtc::kVPlane), frame.stride(webrtc::kVPlane), dst_y,
|
||||
dst_stride_y, dst_uv, dst_stride_uv, frame.width(), frame.height());
|
||||
frame.video_frame_buffer()->DataY(),
|
||||
frame.video_frame_buffer()->StrideY(),
|
||||
frame.video_frame_buffer()->DataU(),
|
||||
frame.video_frame_buffer()->StrideU(),
|
||||
frame.video_frame_buffer()->DataV(),
|
||||
frame.video_frame_buffer()->StrideV(),
|
||||
dst_y, dst_stride_y, dst_uv, dst_stride_uv,
|
||||
frame.width(), frame.height());
|
||||
CVPixelBufferUnlockBaseAddress(pixel_buffer, 0);
|
||||
if (ret) {
|
||||
LOG(LS_ERROR) << "Error converting I420 VideoFrame to NV12 :" << ret;
|
||||
|
||||
@ -301,14 +301,21 @@ int SimulcastEncoderAdapter::Encode(
|
||||
// Aligning stride values based on width.
|
||||
dst_frame.CreateEmptyFrame(dst_width, dst_height, dst_width,
|
||||
(dst_width + 1) / 2, (dst_width + 1) / 2);
|
||||
libyuv::I420Scale(
|
||||
input_image.buffer(kYPlane), input_image.stride(kYPlane),
|
||||
input_image.buffer(kUPlane), input_image.stride(kUPlane),
|
||||
input_image.buffer(kVPlane), input_image.stride(kVPlane), src_width,
|
||||
src_height, dst_frame.buffer(kYPlane), dst_frame.stride(kYPlane),
|
||||
dst_frame.buffer(kUPlane), dst_frame.stride(kUPlane),
|
||||
dst_frame.buffer(kVPlane), dst_frame.stride(kVPlane), dst_width,
|
||||
dst_height, libyuv::kFilterBilinear);
|
||||
libyuv::I420Scale(input_image.video_frame_buffer()->DataY(),
|
||||
input_image.video_frame_buffer()->StrideY(),
|
||||
input_image.video_frame_buffer()->DataU(),
|
||||
input_image.video_frame_buffer()->StrideU(),
|
||||
input_image.video_frame_buffer()->DataV(),
|
||||
input_image.video_frame_buffer()->StrideV(),
|
||||
src_width, src_height,
|
||||
dst_frame.video_frame_buffer()->MutableDataY(),
|
||||
dst_frame.video_frame_buffer()->StrideY(),
|
||||
dst_frame.video_frame_buffer()->MutableDataU(),
|
||||
dst_frame.video_frame_buffer()->StrideU(),
|
||||
dst_frame.video_frame_buffer()->MutableDataV(),
|
||||
dst_frame.video_frame_buffer()->StrideV(),
|
||||
dst_width, dst_height,
|
||||
libyuv::kFilterBilinear);
|
||||
dst_frame.set_timestamp(input_image.timestamp());
|
||||
dst_frame.set_render_time_ms(input_image.render_time_ms());
|
||||
streaminfos_[stream_idx].encoder->Encode(dst_frame, codec_specific_info,
|
||||
|
||||
@ -119,13 +119,13 @@ class Vp8TestDecodedImageCallback : public DecodedImageCallback {
|
||||
Vp8TestDecodedImageCallback() : decoded_frames_(0) {}
|
||||
int32_t Decoded(VideoFrame& decoded_image) override {
|
||||
for (int i = 0; i < decoded_image.width(); ++i) {
|
||||
EXPECT_NEAR(kColorY, decoded_image.buffer(kYPlane)[i], 1);
|
||||
EXPECT_NEAR(kColorY, decoded_image.video_frame_buffer()->DataY()[i], 1);
|
||||
}
|
||||
|
||||
// TODO(mikhal): Verify the difference between U,V and the original.
|
||||
for (int i = 0; i < ((decoded_image.width() + 1) / 2); ++i) {
|
||||
EXPECT_NEAR(kColorU, decoded_image.buffer(kUPlane)[i], 4);
|
||||
EXPECT_NEAR(kColorV, decoded_image.buffer(kVPlane)[i], 4);
|
||||
EXPECT_NEAR(kColorU, decoded_image.video_frame_buffer()->DataU()[i], 4);
|
||||
EXPECT_NEAR(kColorV, decoded_image.video_frame_buffer()->DataV()[i], 4);
|
||||
}
|
||||
decoded_frames_++;
|
||||
return 0;
|
||||
@ -222,24 +222,38 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
TestVp8Simulcast(VP8Encoder* encoder, VP8Decoder* decoder)
|
||||
: encoder_(encoder), decoder_(decoder) {}
|
||||
|
||||
// Creates an VideoFrame from |plane_colors|.
|
||||
static void CreateImage(VideoFrame* frame, int plane_colors[kNumOfPlanes]) {
|
||||
for (int plane_num = 0; plane_num < kNumOfPlanes; ++plane_num) {
|
||||
int width =
|
||||
(plane_num != kYPlane ? (frame->width() + 1) / 2 : frame->width());
|
||||
int height =
|
||||
(plane_num != kYPlane ? (frame->height() + 1) / 2 : frame->height());
|
||||
PlaneType plane_type = static_cast<PlaneType>(plane_num);
|
||||
uint8_t* data = frame->buffer(plane_type);
|
||||
static void SetPlane(uint8_t* data,
|
||||
uint8_t value,
|
||||
int width,
|
||||
int height,
|
||||
int stride) {
|
||||
for (int i = 0; i < height; i++, data += stride) {
|
||||
// Setting allocated area to zero - setting only image size to
|
||||
// requested values - will make it easier to distinguish between image
|
||||
// size and frame size (accounting for stride).
|
||||
memset(frame->buffer(plane_type), 0, frame->allocated_size(plane_type));
|
||||
for (int i = 0; i < height; i++) {
|
||||
memset(data, plane_colors[plane_num], width);
|
||||
data += frame->stride(plane_type);
|
||||
memset(data, value, width);
|
||||
memset(data + width, 0, stride - width);
|
||||
}
|
||||
}
|
||||
|
||||
// Fills in an VideoFrameBuffer from |plane_colors|.
|
||||
static void CreateImage(const rtc::scoped_refptr<VideoFrameBuffer>& buffer,
|
||||
int plane_colors[kNumOfPlanes]) {
|
||||
int width = buffer->width();
|
||||
int height = buffer->height();
|
||||
int chroma_width = (width + 1) / 2;
|
||||
int chroma_height = (height + 1) / 2;
|
||||
|
||||
SetPlane(buffer->MutableDataY(), plane_colors[0],
|
||||
width, height, buffer->StrideY());
|
||||
|
||||
SetPlane(buffer->MutableDataU(), plane_colors[1],
|
||||
chroma_width, chroma_height,
|
||||
buffer->StrideU());
|
||||
|
||||
SetPlane(buffer->MutableDataV(), plane_colors[2],
|
||||
chroma_width, chroma_height,
|
||||
buffer->StrideV());
|
||||
}
|
||||
|
||||
static void DefaultSettings(VideoCodec* settings,
|
||||
@ -305,11 +319,11 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
int half_width = (kDefaultWidth + 1) / 2;
|
||||
input_frame_.CreateEmptyFrame(kDefaultWidth, kDefaultHeight, kDefaultWidth,
|
||||
half_width, half_width);
|
||||
memset(input_frame_.buffer(kYPlane), 0,
|
||||
memset(input_frame_.video_frame_buffer()->MutableDataY(), 0,
|
||||
input_frame_.allocated_size(kYPlane));
|
||||
memset(input_frame_.buffer(kUPlane), 0,
|
||||
memset(input_frame_.video_frame_buffer()->MutableDataU(), 0,
|
||||
input_frame_.allocated_size(kUPlane));
|
||||
memset(input_frame_.buffer(kVPlane), 0,
|
||||
memset(input_frame_.video_frame_buffer()->MutableDataV(), 0,
|
||||
input_frame_.allocated_size(kVPlane));
|
||||
}
|
||||
|
||||
@ -555,11 +569,11 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
int half_width = (settings_.width + 1) / 2;
|
||||
input_frame_.CreateEmptyFrame(settings_.width, settings_.height,
|
||||
settings_.width, half_width, half_width);
|
||||
memset(input_frame_.buffer(kYPlane), 0,
|
||||
memset(input_frame_.video_frame_buffer()->MutableDataY(), 0,
|
||||
input_frame_.allocated_size(kYPlane));
|
||||
memset(input_frame_.buffer(kUPlane), 0,
|
||||
memset(input_frame_.video_frame_buffer()->MutableDataU(), 0,
|
||||
input_frame_.allocated_size(kUPlane));
|
||||
memset(input_frame_.buffer(kVPlane), 0,
|
||||
memset(input_frame_.video_frame_buffer()->MutableDataV(), 0,
|
||||
input_frame_.allocated_size(kVPlane));
|
||||
|
||||
// The for loop above did not set the bitrate of the highest layer.
|
||||
@ -596,11 +610,11 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
half_width = (settings_.width + 1) / 2;
|
||||
input_frame_.CreateEmptyFrame(settings_.width, settings_.height,
|
||||
settings_.width, half_width, half_width);
|
||||
memset(input_frame_.buffer(kYPlane), 0,
|
||||
memset(input_frame_.video_frame_buffer()->MutableDataY(), 0,
|
||||
input_frame_.allocated_size(kYPlane));
|
||||
memset(input_frame_.buffer(kUPlane), 0,
|
||||
memset(input_frame_.video_frame_buffer()->MutableDataU(), 0,
|
||||
input_frame_.allocated_size(kUPlane));
|
||||
memset(input_frame_.buffer(kVPlane), 0,
|
||||
memset(input_frame_.video_frame_buffer()->MutableDataV(), 0,
|
||||
input_frame_.allocated_size(kVPlane));
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
}
|
||||
@ -691,7 +705,7 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
plane_offset[kYPlane] = kColorY;
|
||||
plane_offset[kUPlane] = kColorU;
|
||||
plane_offset[kVPlane] = kColorV;
|
||||
CreateImage(&input_frame_, plane_offset);
|
||||
CreateImage(input_frame_.video_frame_buffer(), plane_offset);
|
||||
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
|
||||
int picture_id = -1;
|
||||
@ -707,7 +721,7 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
plane_offset[kYPlane] += 1;
|
||||
plane_offset[kUPlane] += 1;
|
||||
plane_offset[kVPlane] += 1;
|
||||
CreateImage(&input_frame_, plane_offset);
|
||||
CreateImage(input_frame_.video_frame_buffer(), plane_offset);
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
|
||||
|
||||
@ -715,7 +729,7 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
plane_offset[kYPlane] += 1;
|
||||
plane_offset[kUPlane] += 1;
|
||||
plane_offset[kVPlane] += 1;
|
||||
CreateImage(&input_frame_, plane_offset);
|
||||
CreateImage(input_frame_.video_frame_buffer(), plane_offset);
|
||||
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
|
||||
@ -724,7 +738,7 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
plane_offset[kYPlane] += 1;
|
||||
plane_offset[kUPlane] += 1;
|
||||
plane_offset[kVPlane] += 1;
|
||||
CreateImage(&input_frame_, plane_offset);
|
||||
CreateImage(input_frame_.video_frame_buffer(), plane_offset);
|
||||
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
|
||||
@ -739,7 +753,7 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
plane_offset[kYPlane] = kColorY;
|
||||
plane_offset[kUPlane] = kColorU;
|
||||
plane_offset[kVPlane] = kColorV;
|
||||
CreateImage(&input_frame_, plane_offset);
|
||||
CreateImage(input_frame_.video_frame_buffer(), plane_offset);
|
||||
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, &codec_specific, NULL));
|
||||
@ -898,7 +912,7 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
plane_offset[kYPlane] = kColorY;
|
||||
plane_offset[kUPlane] = kColorU;
|
||||
plane_offset[kVPlane] = kColorV;
|
||||
CreateImage(&input_frame_, plane_offset);
|
||||
CreateImage(input_frame_.video_frame_buffer(), plane_offset);
|
||||
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
|
||||
|
||||
@ -906,7 +920,7 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
plane_offset[kYPlane] += 1;
|
||||
plane_offset[kUPlane] += 1;
|
||||
plane_offset[kVPlane] += 1;
|
||||
CreateImage(&input_frame_, plane_offset);
|
||||
CreateImage(input_frame_.video_frame_buffer(), plane_offset);
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
|
||||
|
||||
|
||||
@ -752,15 +752,18 @@ int VP8EncoderImpl::Encode(const VideoFrame& frame,
|
||||
// Image in vpx_image_t format.
|
||||
// Input image is const. VP8's raw image is not defined as const.
|
||||
raw_images_[0].planes[VPX_PLANE_Y] =
|
||||
const_cast<uint8_t*>(input_image.buffer(kYPlane));
|
||||
const_cast<uint8_t*>(input_image.video_frame_buffer()->DataY());
|
||||
raw_images_[0].planes[VPX_PLANE_U] =
|
||||
const_cast<uint8_t*>(input_image.buffer(kUPlane));
|
||||
const_cast<uint8_t*>(input_image.video_frame_buffer()->DataU());
|
||||
raw_images_[0].planes[VPX_PLANE_V] =
|
||||
const_cast<uint8_t*>(input_image.buffer(kVPlane));
|
||||
const_cast<uint8_t*>(input_image.video_frame_buffer()->DataV());
|
||||
|
||||
raw_images_[0].stride[VPX_PLANE_Y] = input_image.stride(kYPlane);
|
||||
raw_images_[0].stride[VPX_PLANE_U] = input_image.stride(kUPlane);
|
||||
raw_images_[0].stride[VPX_PLANE_V] = input_image.stride(kVPlane);
|
||||
raw_images_[0].stride[VPX_PLANE_Y] =
|
||||
input_image.video_frame_buffer()->StrideY();
|
||||
raw_images_[0].stride[VPX_PLANE_U] =
|
||||
input_image.video_frame_buffer()->StrideU();
|
||||
raw_images_[0].stride[VPX_PLANE_V] =
|
||||
input_image.video_frame_buffer()->StrideV();
|
||||
|
||||
for (size_t i = 1; i < encoders_.size(); ++i) {
|
||||
// Scale the image down a number of times by downsampling factor
|
||||
@ -1357,9 +1360,12 @@ int VP8DecoderImpl::ReturnFrame(const vpx_image_t* img,
|
||||
libyuv::I420Copy(img->planes[VPX_PLANE_Y], img->stride[VPX_PLANE_Y],
|
||||
img->planes[VPX_PLANE_U], img->stride[VPX_PLANE_U],
|
||||
img->planes[VPX_PLANE_V], img->stride[VPX_PLANE_V],
|
||||
decoded_image.buffer(kYPlane), decoded_image.stride(kYPlane),
|
||||
decoded_image.buffer(kUPlane), decoded_image.stride(kUPlane),
|
||||
decoded_image.buffer(kVPlane), decoded_image.stride(kVPlane),
|
||||
decoded_image.video_frame_buffer()->MutableDataY(),
|
||||
decoded_image.video_frame_buffer()->StrideY(),
|
||||
decoded_image.video_frame_buffer()->MutableDataU(),
|
||||
decoded_image.video_frame_buffer()->StrideU(),
|
||||
decoded_image.video_frame_buffer()->MutableDataV(),
|
||||
decoded_image.video_frame_buffer()->StrideV(),
|
||||
img->d_w, img->d_h);
|
||||
decoded_image.set_ntp_time_ms(ntp_time_ms);
|
||||
int ret = decode_complete_callback_->Decoded(decoded_image);
|
||||
|
||||
@ -500,12 +500,15 @@ int VP9EncoderImpl::Encode(const VideoFrame& input_image,
|
||||
|
||||
// Image in vpx_image_t format.
|
||||
// Input image is const. VPX's raw image is not defined as const.
|
||||
raw_->planes[VPX_PLANE_Y] = const_cast<uint8_t*>(input_image.buffer(kYPlane));
|
||||
raw_->planes[VPX_PLANE_U] = const_cast<uint8_t*>(input_image.buffer(kUPlane));
|
||||
raw_->planes[VPX_PLANE_V] = const_cast<uint8_t*>(input_image.buffer(kVPlane));
|
||||
raw_->stride[VPX_PLANE_Y] = input_image.stride(kYPlane);
|
||||
raw_->stride[VPX_PLANE_U] = input_image.stride(kUPlane);
|
||||
raw_->stride[VPX_PLANE_V] = input_image.stride(kVPlane);
|
||||
raw_->planes[VPX_PLANE_Y] =
|
||||
const_cast<uint8_t*>(input_image.video_frame_buffer()->DataY());
|
||||
raw_->planes[VPX_PLANE_U] =
|
||||
const_cast<uint8_t*>(input_image.video_frame_buffer()->DataU());
|
||||
raw_->planes[VPX_PLANE_V] =
|
||||
const_cast<uint8_t*>(input_image.video_frame_buffer()->DataV());
|
||||
raw_->stride[VPX_PLANE_Y] = input_image.video_frame_buffer()->StrideY();
|
||||
raw_->stride[VPX_PLANE_U] = input_image.video_frame_buffer()->StrideU();
|
||||
raw_->stride[VPX_PLANE_V] = input_image.video_frame_buffer()->StrideV();
|
||||
|
||||
vpx_enc_frame_flags_t flags = 0;
|
||||
bool send_keyframe = (frame_type == kVideoFrameKey);
|
||||
|
||||
@ -71,9 +71,12 @@ void VideoProcessingTest::SetUp() {
|
||||
video_frame_.CreateEmptyFrame(width_, height_, width_,
|
||||
half_width_, half_width_);
|
||||
// Clear video frame so DrMemory/Valgrind will allow reads of the buffer.
|
||||
memset(video_frame_.buffer(kYPlane), 0, video_frame_.allocated_size(kYPlane));
|
||||
memset(video_frame_.buffer(kUPlane), 0, video_frame_.allocated_size(kUPlane));
|
||||
memset(video_frame_.buffer(kVPlane), 0, video_frame_.allocated_size(kVPlane));
|
||||
memset(video_frame_.video_frame_buffer()->MutableDataY(), 0,
|
||||
video_frame_.allocated_size(kYPlane));
|
||||
memset(video_frame_.video_frame_buffer()->MutableDataU(), 0,
|
||||
video_frame_.allocated_size(kUPlane));
|
||||
memset(video_frame_.video_frame_buffer()->MutableDataV(), 0,
|
||||
video_frame_.allocated_size(kVPlane));
|
||||
const std::string video_file =
|
||||
webrtc::test::ResourcePath("foreman_cif", "yuv");
|
||||
source_file_ = fopen(video_file.c_str(), "rb");
|
||||
|
||||
@ -81,17 +81,19 @@ void VideoDenoiser::DenoiserReset(const VideoFrame& frame,
|
||||
height_ = frame.height();
|
||||
mb_cols_ = width_ >> 4;
|
||||
mb_rows_ = height_ >> 4;
|
||||
stride_y_ = frame.stride(kYPlane);
|
||||
stride_u_ = frame.stride(kUPlane);
|
||||
stride_v_ = frame.stride(kVPlane);
|
||||
stride_y_ = frame.video_frame_buffer()->StrideY();
|
||||
stride_u_ = frame.video_frame_buffer()->StrideU();
|
||||
stride_v_ = frame.video_frame_buffer()->StrideV();
|
||||
|
||||
// Allocate an empty buffer for denoised_frame_prev.
|
||||
denoised_frame_prev->CreateEmptyFrame(width_, height_, stride_y_, stride_u_,
|
||||
stride_v_);
|
||||
// Allocate and initialize denoised_frame with key frame.
|
||||
denoised_frame->CreateFrame(frame.buffer(kYPlane), frame.buffer(kUPlane),
|
||||
frame.buffer(kVPlane), width_, height_, stride_y_,
|
||||
stride_u_, stride_v_, kVideoRotation_0);
|
||||
denoised_frame->CreateFrame(
|
||||
frame.video_frame_buffer()->DataY(),
|
||||
frame.video_frame_buffer()->DataU(),
|
||||
frame.video_frame_buffer()->DataV(),
|
||||
width_, height_, stride_y_, stride_u_, stride_v_, kVideoRotation_0);
|
||||
// Set time parameters to the output frame.
|
||||
denoised_frame->set_timestamp(frame.timestamp());
|
||||
denoised_frame->set_render_time_ms(frame.render_time_ms());
|
||||
@ -236,13 +238,14 @@ void VideoDenoiser::DenoiseFrame(const VideoFrame& frame,
|
||||
}
|
||||
|
||||
// Set buffer pointers.
|
||||
const uint8_t* y_src = frame.buffer(kYPlane);
|
||||
const uint8_t* u_src = frame.buffer(kUPlane);
|
||||
const uint8_t* v_src = frame.buffer(kVPlane);
|
||||
uint8_t* y_dst = denoised_frame->buffer(kYPlane);
|
||||
uint8_t* u_dst = denoised_frame->buffer(kUPlane);
|
||||
uint8_t* v_dst = denoised_frame->buffer(kVPlane);
|
||||
uint8_t* y_dst_prev = denoised_frame_prev->buffer(kYPlane);
|
||||
const uint8_t* y_src = frame.video_frame_buffer()->DataY();
|
||||
const uint8_t* u_src = frame.video_frame_buffer()->DataU();
|
||||
const uint8_t* v_src = frame.video_frame_buffer()->DataV();
|
||||
uint8_t* y_dst = denoised_frame->video_frame_buffer()->MutableDataY();
|
||||
uint8_t* u_dst = denoised_frame->video_frame_buffer()->MutableDataU();
|
||||
uint8_t* v_dst = denoised_frame->video_frame_buffer()->MutableDataV();
|
||||
uint8_t* y_dst_prev =
|
||||
denoised_frame_prev->video_frame_buffer()->MutableDataY();
|
||||
memset(x_density_.get(), 0, mb_cols_);
|
||||
memset(y_density_.get(), 0, mb_rows_);
|
||||
memset(moving_object_.get(), 1, mb_cols_ * mb_rows_);
|
||||
|
||||
@ -41,9 +41,12 @@ class ChromaGenerator : public FrameGenerator {
|
||||
uint8_t u = fabs(sin(angle_)) * 0xFF;
|
||||
uint8_t v = fabs(cos(angle_)) * 0xFF;
|
||||
|
||||
memset(frame_.buffer(kYPlane), 0x80, frame_.allocated_size(kYPlane));
|
||||
memset(frame_.buffer(kUPlane), u, frame_.allocated_size(kUPlane));
|
||||
memset(frame_.buffer(kVPlane), v, frame_.allocated_size(kVPlane));
|
||||
memset(frame_.video_frame_buffer()->MutableDataY(), 0x80,
|
||||
frame_.allocated_size(kYPlane));
|
||||
memset(frame_.video_frame_buffer()->MutableDataU(), u,
|
||||
frame_.allocated_size(kUPlane));
|
||||
memset(frame_.video_frame_buffer()->MutableDataV(), v,
|
||||
frame_.allocated_size(kVPlane));
|
||||
return &frame_;
|
||||
}
|
||||
|
||||
@ -202,24 +205,24 @@ class ScrollingImageFrameGenerator : public FrameGenerator {
|
||||
int pixels_scrolled_y =
|
||||
static_cast<int>(scroll_margin_y * scroll_factor + 0.5);
|
||||
|
||||
int offset_y = (current_source_frame_->stride(PlaneType::kYPlane) *
|
||||
int offset_y = (current_source_frame_->video_frame_buffer()->StrideY() *
|
||||
pixels_scrolled_y) +
|
||||
pixels_scrolled_x;
|
||||
int offset_u = (current_source_frame_->stride(PlaneType::kUPlane) *
|
||||
int offset_u = (current_source_frame_->video_frame_buffer()->StrideU() *
|
||||
(pixels_scrolled_y / 2)) +
|
||||
(pixels_scrolled_x / 2);
|
||||
int offset_v = (current_source_frame_->stride(PlaneType::kVPlane) *
|
||||
int offset_v = (current_source_frame_->video_frame_buffer()->StrideV() *
|
||||
(pixels_scrolled_y / 2)) +
|
||||
(pixels_scrolled_x / 2);
|
||||
|
||||
current_frame_.CreateFrame(
|
||||
¤t_source_frame_->buffer(PlaneType::kYPlane)[offset_y],
|
||||
¤t_source_frame_->buffer(PlaneType::kUPlane)[offset_u],
|
||||
¤t_source_frame_->buffer(PlaneType::kVPlane)[offset_v],
|
||||
¤t_source_frame_->video_frame_buffer()->DataY()[offset_y],
|
||||
¤t_source_frame_->video_frame_buffer()->DataU()[offset_u],
|
||||
¤t_source_frame_->video_frame_buffer()->DataV()[offset_v],
|
||||
kTargetWidth, kTargetHeight,
|
||||
current_source_frame_->stride(PlaneType::kYPlane),
|
||||
current_source_frame_->stride(PlaneType::kUPlane),
|
||||
current_source_frame_->stride(PlaneType::kVPlane),
|
||||
current_source_frame_->video_frame_buffer()->StrideY(),
|
||||
current_source_frame_->video_frame_buffer()->StrideU(),
|
||||
current_source_frame_->video_frame_buffer()->StrideV(),
|
||||
kVideoRotation_0);
|
||||
}
|
||||
|
||||
|
||||
@ -59,17 +59,17 @@ class FrameGeneratorTest : public ::testing::Test {
|
||||
void CheckFrameAndMutate(VideoFrame* frame, uint8_t y, uint8_t u, uint8_t v) {
|
||||
// Check that frame is valid, has the correct color and timestamp are clean.
|
||||
ASSERT_NE(nullptr, frame);
|
||||
uint8_t* buffer;
|
||||
const uint8_t* buffer;
|
||||
ASSERT_EQ(y_size, frame->allocated_size(PlaneType::kYPlane));
|
||||
buffer = frame->buffer(PlaneType::kYPlane);
|
||||
buffer = frame->video_frame_buffer()->DataY();
|
||||
for (int i = 0; i < y_size; ++i)
|
||||
ASSERT_EQ(y, buffer[i]);
|
||||
ASSERT_EQ(uv_size, frame->allocated_size(PlaneType::kUPlane));
|
||||
buffer = frame->buffer(PlaneType::kUPlane);
|
||||
buffer = frame->video_frame_buffer()->DataU();
|
||||
for (int i = 0; i < uv_size; ++i)
|
||||
ASSERT_EQ(u, buffer[i]);
|
||||
ASSERT_EQ(uv_size, frame->allocated_size(PlaneType::kVPlane));
|
||||
buffer = frame->buffer(PlaneType::kVPlane);
|
||||
buffer = frame->video_frame_buffer()->DataV();
|
||||
for (int i = 0; i < uv_size; ++i)
|
||||
ASSERT_EQ(v, buffer[i]);
|
||||
EXPECT_EQ(0, frame->ntp_time_ms());
|
||||
|
||||
@ -54,8 +54,7 @@ class VideoCaptureInputTest : public ::testing::Test {
|
||||
EXPECT_TRUE(input_->GetVideoFrame(&frame));
|
||||
ASSERT_TRUE(frame.video_frame_buffer());
|
||||
if (!frame.video_frame_buffer()->native_handle()) {
|
||||
output_frame_ybuffers_.push_back(
|
||||
static_cast<const VideoFrame*>(&frame)->buffer(kYPlane));
|
||||
output_frame_ybuffers_.push_back(frame.video_frame_buffer()->DataY());
|
||||
}
|
||||
output_frames_.push_back(
|
||||
std::unique_ptr<VideoFrame>(new VideoFrame(frame)));
|
||||
@ -181,8 +180,7 @@ TEST_F(VideoCaptureInputTest, TestI420Frames) {
|
||||
std::vector<const uint8_t*> ybuffer_pointers;
|
||||
for (int i = 0; i < kNumFrame; ++i) {
|
||||
input_frames_.push_back(CreateVideoFrame(static_cast<uint8_t>(i + 1)));
|
||||
const VideoFrame* const_input_frame = input_frames_[i].get();
|
||||
ybuffer_pointers.push_back(const_input_frame->buffer(kYPlane));
|
||||
ybuffer_pointers.push_back(input_frames_[i]->video_frame_buffer()->DataY());
|
||||
AddInputFrame(input_frames_[i].get());
|
||||
WaitOutputFrame();
|
||||
}
|
||||
|
||||
@ -107,11 +107,11 @@ class VideoEncoderSoftwareFallbackWrapperTest : public ::testing::Test {
|
||||
void VideoEncoderSoftwareFallbackWrapperTest::EncodeFrame() {
|
||||
frame_.CreateEmptyFrame(kWidth, kHeight, kWidth, (kWidth + 1) / 2,
|
||||
(kWidth + 1) / 2);
|
||||
memset(frame_.buffer(webrtc::kYPlane), 16,
|
||||
memset(frame_.video_frame_buffer()->MutableDataY(), 16,
|
||||
frame_.allocated_size(webrtc::kYPlane));
|
||||
memset(frame_.buffer(webrtc::kUPlane), 128,
|
||||
memset(frame_.video_frame_buffer()->MutableDataU(), 128,
|
||||
frame_.allocated_size(webrtc::kUPlane));
|
||||
memset(frame_.buffer(webrtc::kVPlane), 128,
|
||||
memset(frame_.video_frame_buffer()->MutableDataV(), 128,
|
||||
frame_.allocated_size(webrtc::kVPlane));
|
||||
|
||||
std::vector<FrameType> types(1, kVideoFrameKey);
|
||||
|
||||
@ -65,17 +65,9 @@ class VideoFrame {
|
||||
// reference to the video buffer also retained by |videoFrame|.
|
||||
void ShallowCopy(const VideoFrame& videoFrame);
|
||||
|
||||
// Get pointer to buffer per plane.
|
||||
uint8_t* buffer(PlaneType type);
|
||||
// Overloading with const.
|
||||
const uint8_t* buffer(PlaneType type) const;
|
||||
|
||||
// Get allocated size per plane.
|
||||
int allocated_size(PlaneType type) const;
|
||||
|
||||
// Get allocated stride per plane.
|
||||
int stride(PlaneType type) const;
|
||||
|
||||
// Get frame width.
|
||||
int width() const;
|
||||
|
||||
@ -124,7 +116,10 @@ class VideoFrame {
|
||||
|
||||
// Return the underlying buffer. Never nullptr for a properly
|
||||
// initialized VideoFrame.
|
||||
rtc::scoped_refptr<webrtc::VideoFrameBuffer> video_frame_buffer() const;
|
||||
// Creating a new reference breaks the HasOneRef and IsMutable
|
||||
// logic. So return a const ref to our reference.
|
||||
const rtc::scoped_refptr<webrtc::VideoFrameBuffer>& video_frame_buffer()
|
||||
const;
|
||||
|
||||
// Set the underlying buffer.
|
||||
void set_video_frame_buffer(
|
||||
|
||||
Reference in New Issue
Block a user