Revert of Move MutableDataY{,U,V} methods to I420Buffer only. (patchset #14 id:260001 of https://codereview.webrtc.org/2278883002/ )
Reason for revert:
Broke downstream application.
Original issue's description:
> Move MutableDataY{,U,V} methods to I420Buffer only.
>
> Deleted from the VideoFrameBuffer base class.
>
> BUG=webrtc:5921
>
> Committed: https://crrev.com/5539ef6c03c273f39fadae41ace47fdc11ac6d60
> Cr-Commit-Position: refs/heads/master@{#14317}
TBR=perkj@webrtc.org,magjed@webrtc.org,pthatcher@webrtc.org,honghaiz@webrtc.org,stefan@webrtc.org
# Skipping CQ checks because original CL landed less than 1 days ago.
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=webrtc:5921
Review-Url: https://codereview.webrtc.org/2354223002
Cr-Commit-Position: refs/heads/master@{#14325}
This commit is contained in:
@ -121,47 +121,52 @@ int H264DecoderImpl::AVGetBuffer2(
|
||||
return ret;
|
||||
}
|
||||
|
||||
// The video frame is stored in |frame_buffer|. |av_frame| is FFmpeg's version
|
||||
// of a video frame and will be set up to reference |frame_buffer|'s data.
|
||||
// The video frame is stored in |video_frame|. |av_frame| is FFmpeg's version
|
||||
// of a video frame and will be set up to reference |video_frame|'s buffers.
|
||||
|
||||
// TODO(nisse): The VideoFrame's timestamp and rotation info is not used.
|
||||
// Refactor to do not use a VideoFrame object at all.
|
||||
|
||||
// FFmpeg expects the initial allocation to be zero-initialized according to
|
||||
// http://crbug.com/390941. Our pool is set up to zero-initialize new buffers.
|
||||
// TODO(nisse): Delete that feature from the video pool, instead add
|
||||
// an explicit call to InitializeData here.
|
||||
rtc::scoped_refptr<I420Buffer> frame_buffer =
|
||||
decoder->pool_.CreateBuffer(width, height);
|
||||
VideoFrame* video_frame = new VideoFrame(
|
||||
decoder->pool_.CreateBuffer(width, height),
|
||||
0 /* timestamp */, 0 /* render_time_ms */, kVideoRotation_0);
|
||||
|
||||
int y_size = width * height;
|
||||
int uv_size = ((width + 1) / 2) * ((height + 1) / 2);
|
||||
// DCHECK that we have a continuous buffer as is required.
|
||||
RTC_DCHECK_EQ(frame_buffer->DataU(), frame_buffer->DataY() + y_size);
|
||||
RTC_DCHECK_EQ(frame_buffer->DataV(), frame_buffer->DataU() + uv_size);
|
||||
int total_size = y_size + 2 * uv_size;
|
||||
RTC_DCHECK_EQ(video_frame->video_frame_buffer()->DataU(),
|
||||
video_frame->video_frame_buffer()->DataY() +
|
||||
video_frame->allocated_size(kYPlane));
|
||||
RTC_DCHECK_EQ(video_frame->video_frame_buffer()->DataV(),
|
||||
video_frame->video_frame_buffer()->DataU() +
|
||||
video_frame->allocated_size(kUPlane));
|
||||
int total_size = video_frame->allocated_size(kYPlane) +
|
||||
video_frame->allocated_size(kUPlane) +
|
||||
video_frame->allocated_size(kVPlane);
|
||||
|
||||
av_frame->format = context->pix_fmt;
|
||||
av_frame->reordered_opaque = context->reordered_opaque;
|
||||
|
||||
// Set |av_frame| members as required by FFmpeg.
|
||||
av_frame->data[kYPlaneIndex] = frame_buffer->MutableDataY();
|
||||
av_frame->linesize[kYPlaneIndex] = frame_buffer->StrideY();
|
||||
av_frame->data[kUPlaneIndex] = frame_buffer->MutableDataU();
|
||||
av_frame->linesize[kUPlaneIndex] = frame_buffer->StrideU();
|
||||
av_frame->data[kVPlaneIndex] = frame_buffer->MutableDataV();
|
||||
av_frame->linesize[kVPlaneIndex] = frame_buffer->StrideV();
|
||||
av_frame->data[kYPlaneIndex] =
|
||||
video_frame->video_frame_buffer()->MutableDataY();
|
||||
av_frame->linesize[kYPlaneIndex] =
|
||||
video_frame->video_frame_buffer()->StrideY();
|
||||
av_frame->data[kUPlaneIndex] =
|
||||
video_frame->video_frame_buffer()->MutableDataU();
|
||||
av_frame->linesize[kUPlaneIndex] =
|
||||
video_frame->video_frame_buffer()->StrideU();
|
||||
av_frame->data[kVPlaneIndex] =
|
||||
video_frame->video_frame_buffer()->MutableDataV();
|
||||
av_frame->linesize[kVPlaneIndex] =
|
||||
video_frame->video_frame_buffer()->StrideV();
|
||||
RTC_DCHECK_EQ(av_frame->extended_data, av_frame->data);
|
||||
|
||||
// Create a VideoFrame object, to keep a reference to the buffer.
|
||||
// TODO(nisse): The VideoFrame's timestamp and rotation info is not used.
|
||||
// Refactor to do not use a VideoFrame object at all.
|
||||
av_frame->buf[0] = av_buffer_create(
|
||||
av_frame->data[kYPlaneIndex],
|
||||
total_size,
|
||||
AVFreeBuffer2,
|
||||
static_cast<void*>(new VideoFrame(frame_buffer,
|
||||
0 /* timestamp */,
|
||||
0 /* render_time_ms */,
|
||||
kVideoRotation_0)),
|
||||
0);
|
||||
av_frame->buf[0] = av_buffer_create(av_frame->data[kYPlaneIndex],
|
||||
total_size,
|
||||
AVFreeBuffer2,
|
||||
static_cast<void*>(video_frame),
|
||||
0);
|
||||
RTC_CHECK(av_frame->buf[0]);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -137,7 +137,8 @@ int I420Encoder::RegisterEncodeCompleteCallback(
|
||||
}
|
||||
|
||||
I420Decoder::I420Decoder()
|
||||
: _width(0),
|
||||
: _decodedImage(),
|
||||
_width(0),
|
||||
_height(0),
|
||||
_inited(false),
|
||||
_decodeCompleteCallback(NULL) {}
|
||||
@ -198,19 +199,17 @@ int I420Decoder::Decode(const EncodedImage& inputImage,
|
||||
}
|
||||
// Set decoded image parameters.
|
||||
int half_width = (_width + 1) / 2;
|
||||
rtc::scoped_refptr<webrtc::I420Buffer> frame_buffer =
|
||||
I420Buffer::Create(_width, _height, _width, half_width, half_width);
|
||||
|
||||
// Converting from raw buffer I420Buffer.
|
||||
_decodedImage.CreateEmptyFrame(_width, _height, _width, half_width,
|
||||
half_width);
|
||||
// Converting from buffer to plane representation.
|
||||
int ret = ConvertToI420(kI420, buffer, 0, 0, _width, _height, 0,
|
||||
kVideoRotation_0, frame_buffer.get());
|
||||
kVideoRotation_0, &_decodedImage);
|
||||
if (ret < 0) {
|
||||
return WEBRTC_VIDEO_CODEC_MEMORY;
|
||||
}
|
||||
_decodedImage.set_timestamp(inputImage._timeStamp);
|
||||
|
||||
VideoFrame decoded_image(frame_buffer, inputImage._timeStamp, 0,
|
||||
webrtc::kVideoRotation_0);
|
||||
_decodeCompleteCallback->Decoded(decoded_image);
|
||||
_decodeCompleteCallback->Decoded(_decodedImage);
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
|
||||
@ -298,10 +298,11 @@ int SimulcastEncoderAdapter::Encode(
|
||||
return ret;
|
||||
}
|
||||
} else {
|
||||
VideoFrame dst_frame;
|
||||
// Making sure that destination frame is of sufficient size.
|
||||
// Aligning stride values based on width.
|
||||
rtc::scoped_refptr<I420Buffer> dst_buffer =
|
||||
I420Buffer::Create(dst_width, dst_height, dst_width,
|
||||
(dst_width + 1) / 2, (dst_width + 1) / 2);
|
||||
dst_frame.CreateEmptyFrame(dst_width, dst_height, dst_width,
|
||||
(dst_width + 1) / 2, (dst_width + 1) / 2);
|
||||
libyuv::I420Scale(input_image.video_frame_buffer()->DataY(),
|
||||
input_image.video_frame_buffer()->StrideY(),
|
||||
input_image.video_frame_buffer()->DataU(),
|
||||
@ -309,16 +310,18 @@ int SimulcastEncoderAdapter::Encode(
|
||||
input_image.video_frame_buffer()->DataV(),
|
||||
input_image.video_frame_buffer()->StrideV(),
|
||||
src_width, src_height,
|
||||
dst_buffer->MutableDataY(), dst_buffer->StrideY(),
|
||||
dst_buffer->MutableDataU(), dst_buffer->StrideU(),
|
||||
dst_buffer->MutableDataV(), dst_buffer->StrideV(),
|
||||
dst_frame.video_frame_buffer()->MutableDataY(),
|
||||
dst_frame.video_frame_buffer()->StrideY(),
|
||||
dst_frame.video_frame_buffer()->MutableDataU(),
|
||||
dst_frame.video_frame_buffer()->StrideU(),
|
||||
dst_frame.video_frame_buffer()->MutableDataV(),
|
||||
dst_frame.video_frame_buffer()->StrideV(),
|
||||
dst_width, dst_height,
|
||||
libyuv::kFilterBilinear);
|
||||
|
||||
dst_frame.set_timestamp(input_image.timestamp());
|
||||
dst_frame.set_render_time_ms(input_image.render_time_ms());
|
||||
int ret = streaminfos_[stream_idx].encoder->Encode(
|
||||
VideoFrame(dst_buffer, input_image.timestamp(),
|
||||
input_image.render_time_ms(), webrtc::kVideoRotation_0),
|
||||
codec_specific_info, &stream_frame_types);
|
||||
dst_frame, codec_specific_info, &stream_frame_types);
|
||||
if (ret != WEBRTC_VIDEO_CODEC_OK) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -535,11 +535,17 @@ TEST_F(TestSimulcastEncoderAdapterFake, TestFailureReturnCodesFromEncodeCalls) {
|
||||
.WillOnce(Return(WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE));
|
||||
|
||||
// Send a fake frame and assert the return is software fallback.
|
||||
VideoFrame input_frame;
|
||||
int half_width = (kDefaultWidth + 1) / 2;
|
||||
rtc::scoped_refptr<I420Buffer> input_buffer = I420Buffer::Create(
|
||||
kDefaultWidth, kDefaultHeight, kDefaultWidth, half_width, half_width);
|
||||
input_buffer->InitializeData();
|
||||
VideoFrame input_frame(input_buffer, 0, 0, webrtc::kVideoRotation_0);
|
||||
input_frame.CreateEmptyFrame(kDefaultWidth, kDefaultHeight, kDefaultWidth,
|
||||
half_width, half_width);
|
||||
memset(input_frame.video_frame_buffer()->MutableDataY(), 0,
|
||||
input_frame.allocated_size(kYPlane));
|
||||
memset(input_frame.video_frame_buffer()->MutableDataU(), 0,
|
||||
input_frame.allocated_size(kUPlane));
|
||||
memset(input_frame.video_frame_buffer()->MutableDataV(), 0,
|
||||
input_frame.allocated_size(kVPlane));
|
||||
|
||||
std::vector<FrameType> frame_types(3, kVideoFrameKey);
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE,
|
||||
adapter_->Encode(input_frame, nullptr, &frame_types));
|
||||
|
||||
@ -236,8 +236,8 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
}
|
||||
}
|
||||
|
||||
// Fills in an I420Buffer from |plane_colors|.
|
||||
static void CreateImage(const rtc::scoped_refptr<I420Buffer>& buffer,
|
||||
// Fills in an VideoFrameBuffer from |plane_colors|.
|
||||
static void CreateImage(const rtc::scoped_refptr<VideoFrameBuffer>& buffer,
|
||||
int plane_colors[kNumOfPlanes]) {
|
||||
int width = buffer->width();
|
||||
int height = buffer->height();
|
||||
@ -317,11 +317,14 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200));
|
||||
EXPECT_EQ(0, decoder_->InitDecode(&settings_, 1));
|
||||
int half_width = (kDefaultWidth + 1) / 2;
|
||||
input_buffer_ = I420Buffer::Create(kDefaultWidth, kDefaultHeight,
|
||||
kDefaultWidth, half_width, half_width);
|
||||
input_buffer_->InitializeData();
|
||||
input_frame_.reset(
|
||||
new VideoFrame(input_buffer_, 0, 0, webrtc::kVideoRotation_0));
|
||||
input_frame_.CreateEmptyFrame(kDefaultWidth, kDefaultHeight, kDefaultWidth,
|
||||
half_width, half_width);
|
||||
memset(input_frame_.video_frame_buffer()->MutableDataY(), 0,
|
||||
input_frame_.allocated_size(kYPlane));
|
||||
memset(input_frame_.video_frame_buffer()->MutableDataU(), 0,
|
||||
input_frame_.allocated_size(kUPlane));
|
||||
memset(input_frame_.video_frame_buffer()->MutableDataV(), 0,
|
||||
input_frame_.allocated_size(kVPlane));
|
||||
}
|
||||
|
||||
virtual void TearDown() {
|
||||
@ -393,33 +396,33 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
|
||||
kVideoFrameDelta);
|
||||
ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
|
||||
ExpectStreams(kVideoFrameDelta, kNumberOfSimulcastStreams);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
|
||||
frame_types[0] = kVideoFrameKey;
|
||||
ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
|
||||
std::fill(frame_types.begin(), frame_types.end(), kVideoFrameDelta);
|
||||
frame_types[1] = kVideoFrameKey;
|
||||
ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
|
||||
std::fill(frame_types.begin(), frame_types.end(), kVideoFrameDelta);
|
||||
frame_types[2] = kVideoFrameKey;
|
||||
ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
|
||||
std::fill(frame_types.begin(), frame_types.end(), kVideoFrameDelta);
|
||||
ExpectStreams(kVideoFrameDelta, kNumberOfSimulcastStreams);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
}
|
||||
|
||||
void TestPaddingAllStreams() {
|
||||
@ -428,11 +431,11 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
|
||||
kVideoFrameDelta);
|
||||
ExpectStreams(kVideoFrameKey, 1);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
|
||||
ExpectStreams(kVideoFrameDelta, 1);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
}
|
||||
|
||||
void TestPaddingTwoStreams() {
|
||||
@ -441,11 +444,11 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
|
||||
kVideoFrameDelta);
|
||||
ExpectStreams(kVideoFrameKey, 1);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
|
||||
ExpectStreams(kVideoFrameDelta, 1);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
}
|
||||
|
||||
void TestPaddingTwoStreamsOneMaxedOut() {
|
||||
@ -455,11 +458,11 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
|
||||
kVideoFrameDelta);
|
||||
ExpectStreams(kVideoFrameKey, 1);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
|
||||
ExpectStreams(kVideoFrameDelta, 1);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
}
|
||||
|
||||
void TestPaddingOneStream() {
|
||||
@ -468,11 +471,11 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
|
||||
kVideoFrameDelta);
|
||||
ExpectStreams(kVideoFrameKey, 2);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
|
||||
ExpectStreams(kVideoFrameDelta, 2);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
}
|
||||
|
||||
void TestPaddingOneStreamTwoMaxedOut() {
|
||||
@ -483,11 +486,11 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
|
||||
kVideoFrameDelta);
|
||||
ExpectStreams(kVideoFrameKey, 2);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
|
||||
ExpectStreams(kVideoFrameDelta, 2);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
}
|
||||
|
||||
void TestSendAllStreams() {
|
||||
@ -497,11 +500,11 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
|
||||
kVideoFrameDelta);
|
||||
ExpectStreams(kVideoFrameKey, 3);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
|
||||
ExpectStreams(kVideoFrameDelta, 3);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
}
|
||||
|
||||
void TestDisablingStreams() {
|
||||
@ -510,47 +513,47 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
|
||||
kVideoFrameDelta);
|
||||
ExpectStreams(kVideoFrameKey, 3);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
|
||||
ExpectStreams(kVideoFrameDelta, 3);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
|
||||
// We should only get two streams and padding for one.
|
||||
encoder_->SetRates(
|
||||
kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] / 2, 30);
|
||||
ExpectStreams(kVideoFrameDelta, 2);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
|
||||
// We should only get the first stream and padding for two.
|
||||
encoder_->SetRates(kTargetBitrates[0] + kMinBitrates[1] / 2, 30);
|
||||
ExpectStreams(kVideoFrameDelta, 1);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
|
||||
// We don't have enough bitrate for the thumbnail stream, but we should get
|
||||
// it anyway with current configuration.
|
||||
encoder_->SetRates(kTargetBitrates[0] - 1, 30);
|
||||
ExpectStreams(kVideoFrameDelta, 1);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
|
||||
// We should only get two streams and padding for one.
|
||||
encoder_->SetRates(
|
||||
kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] / 2, 30);
|
||||
// We get a key frame because a new stream is being enabled.
|
||||
ExpectStreams(kVideoFrameKey, 2);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
|
||||
// We should get all three streams.
|
||||
encoder_->SetRates(
|
||||
kTargetBitrates[0] + kTargetBitrates[1] + kTargetBitrates[2], 30);
|
||||
// We get a key frame because a new stream is being enabled.
|
||||
ExpectStreams(kVideoFrameKey, 3);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
}
|
||||
|
||||
void SwitchingToOneStream(int width, int height) {
|
||||
@ -568,12 +571,14 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
}
|
||||
// Setting input image to new resolution.
|
||||
int half_width = (settings_.width + 1) / 2;
|
||||
input_buffer_ = I420Buffer::Create(settings_.width, settings_.height,
|
||||
settings_.width, half_width, half_width);
|
||||
input_buffer_->InitializeData();
|
||||
|
||||
input_frame_.reset(
|
||||
new VideoFrame(input_buffer_, 0, 0, webrtc::kVideoRotation_0));
|
||||
input_frame_.CreateEmptyFrame(settings_.width, settings_.height,
|
||||
settings_.width, half_width, half_width);
|
||||
memset(input_frame_.video_frame_buffer()->MutableDataY(), 0,
|
||||
input_frame_.allocated_size(kYPlane));
|
||||
memset(input_frame_.video_frame_buffer()->MutableDataU(), 0,
|
||||
input_frame_.allocated_size(kUPlane));
|
||||
memset(input_frame_.video_frame_buffer()->MutableDataV(), 0,
|
||||
input_frame_.allocated_size(kVPlane));
|
||||
|
||||
// The for loop above did not set the bitrate of the highest layer.
|
||||
settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1]
|
||||
@ -598,7 +603,7 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
.Times(1)
|
||||
.WillRepeatedly(Return(
|
||||
EncodedImageCallback::Result(EncodedImageCallback::Result::OK, 0)));
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
|
||||
// Switch back.
|
||||
DefaultSettings(&settings_, kDefaultTemporalLayerProfile);
|
||||
@ -609,12 +614,15 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
ExpectStreams(kVideoFrameKey, 1);
|
||||
// Resize |input_frame_| to the new resolution.
|
||||
half_width = (settings_.width + 1) / 2;
|
||||
input_buffer_ = I420Buffer::Create(settings_.width, settings_.height,
|
||||
settings_.width, half_width, half_width);
|
||||
input_buffer_->InitializeData();
|
||||
input_frame_.reset(
|
||||
new VideoFrame(input_buffer_, 0, 0, webrtc::kVideoRotation_0));
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
|
||||
input_frame_.CreateEmptyFrame(settings_.width, settings_.height,
|
||||
settings_.width, half_width, half_width);
|
||||
memset(input_frame_.video_frame_buffer()->MutableDataY(), 0,
|
||||
input_frame_.allocated_size(kYPlane));
|
||||
memset(input_frame_.video_frame_buffer()->MutableDataU(), 0,
|
||||
input_frame_.allocated_size(kUPlane));
|
||||
memset(input_frame_.video_frame_buffer()->MutableDataV(), 0,
|
||||
input_frame_.allocated_size(kVPlane));
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
}
|
||||
|
||||
void TestSwitchingToOneStream() { SwitchingToOneStream(1024, 768); }
|
||||
@ -629,7 +637,7 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
|
||||
encoder_->SetRates(kMaxBitrates[2], 30); // To get all three streams.
|
||||
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
|
||||
int picture_id = -1;
|
||||
int temporal_layer = -1;
|
||||
bool layer_sync = false;
|
||||
@ -639,22 +647,22 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
EXPECT_TRUE(layer_sync);
|
||||
int key_frame_picture_id = picture_id;
|
||||
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
|
||||
encoder_callback.GetLastEncodedFrameInfo(&picture_id, &temporal_layer,
|
||||
&layer_sync, 0);
|
||||
EXPECT_EQ(2, temporal_layer);
|
||||
EXPECT_TRUE(layer_sync);
|
||||
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
|
||||
encoder_callback.GetLastEncodedFrameInfo(&picture_id, &temporal_layer,
|
||||
&layer_sync, 0);
|
||||
EXPECT_EQ(1, temporal_layer);
|
||||
EXPECT_TRUE(layer_sync);
|
||||
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
|
||||
encoder_callback.GetLastEncodedFrameInfo(&picture_id, &temporal_layer,
|
||||
&layer_sync, 0);
|
||||
EXPECT_EQ(2, temporal_layer);
|
||||
@ -667,8 +675,8 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
// Must match last key frame to trigger.
|
||||
codec_specific.codecSpecific.VP8.pictureIdRPSI = key_frame_picture_id;
|
||||
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &codec_specific, NULL));
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, &codec_specific, NULL));
|
||||
encoder_callback.GetLastEncodedFrameInfo(&picture_id, &temporal_layer,
|
||||
&layer_sync, 0);
|
||||
|
||||
@ -678,8 +686,8 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
// Must match last key frame to trigger, test bad id.
|
||||
codec_specific.codecSpecific.VP8.pictureIdRPSI = key_frame_picture_id + 17;
|
||||
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &codec_specific, NULL));
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, &codec_specific, NULL));
|
||||
encoder_callback.GetLastEncodedFrameInfo(&picture_id, &temporal_layer,
|
||||
&layer_sync, 0);
|
||||
|
||||
@ -703,9 +711,9 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
plane_offset[kYPlane] = kColorY;
|
||||
plane_offset[kUPlane] = kColorU;
|
||||
plane_offset[kVPlane] = kColorV;
|
||||
CreateImage(input_buffer_, plane_offset);
|
||||
CreateImage(input_frame_.video_frame_buffer(), plane_offset);
|
||||
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
|
||||
int picture_id = -1;
|
||||
int temporal_layer = -1;
|
||||
bool layer_sync = false;
|
||||
@ -719,27 +727,27 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
plane_offset[kYPlane] += 1;
|
||||
plane_offset[kUPlane] += 1;
|
||||
plane_offset[kVPlane] += 1;
|
||||
CreateImage(input_buffer_, plane_offset);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
|
||||
CreateImage(input_frame_.video_frame_buffer(), plane_offset);
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
|
||||
|
||||
// Change color.
|
||||
plane_offset[kYPlane] += 1;
|
||||
plane_offset[kUPlane] += 1;
|
||||
plane_offset[kVPlane] += 1;
|
||||
CreateImage(input_buffer_, plane_offset);
|
||||
CreateImage(input_frame_.video_frame_buffer(), plane_offset);
|
||||
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
|
||||
|
||||
// Change color.
|
||||
plane_offset[kYPlane] += 1;
|
||||
plane_offset[kUPlane] += 1;
|
||||
plane_offset[kVPlane] += 1;
|
||||
CreateImage(input_buffer_, plane_offset);
|
||||
CreateImage(input_frame_.video_frame_buffer(), plane_offset);
|
||||
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
|
||||
|
||||
CodecSpecificInfo codec_specific;
|
||||
codec_specific.codecType = kVideoCodecVP8;
|
||||
@ -751,10 +759,10 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
plane_offset[kYPlane] = kColorY;
|
||||
plane_offset[kUPlane] = kColorU;
|
||||
plane_offset[kVPlane] = kColorV;
|
||||
CreateImage(input_buffer_, plane_offset);
|
||||
CreateImage(input_frame_.video_frame_buffer(), plane_offset);
|
||||
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &codec_specific, NULL));
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, &codec_specific, NULL));
|
||||
|
||||
EncodedImage encoded_frame;
|
||||
encoder_callback.GetLastEncodedKeyFrame(&encoded_frame);
|
||||
@ -776,47 +784,47 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
bool expected_layer_sync[3] = {false, false, false};
|
||||
|
||||
// First frame: #0.
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
|
||||
SetExpectedValues3<int>(0, 0, 0, expected_temporal_idx);
|
||||
SetExpectedValues3<bool>(true, true, true, expected_layer_sync);
|
||||
VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
||||
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
||||
|
||||
// Next frame: #1.
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
|
||||
SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
|
||||
SetExpectedValues3<bool>(true, true, true, expected_layer_sync);
|
||||
VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
||||
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
||||
|
||||
// Next frame: #2.
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
|
||||
SetExpectedValues3<int>(1, 1, 1, expected_temporal_idx);
|
||||
SetExpectedValues3<bool>(true, true, true, expected_layer_sync);
|
||||
VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
||||
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
||||
|
||||
// Next frame: #3.
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
|
||||
SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
|
||||
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
|
||||
VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
||||
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
||||
|
||||
// Next frame: #4.
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
|
||||
SetExpectedValues3<int>(0, 0, 0, expected_temporal_idx);
|
||||
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
|
||||
VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
||||
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
||||
|
||||
// Next frame: #5.
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
|
||||
SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
|
||||
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
|
||||
VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
||||
@ -845,47 +853,47 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
bool expected_layer_sync[3] = {false, false, false};
|
||||
|
||||
// First frame: #0.
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
|
||||
SetExpectedValues3<int>(0, 0, 255, expected_temporal_idx);
|
||||
SetExpectedValues3<bool>(true, true, false, expected_layer_sync);
|
||||
VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
||||
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
||||
|
||||
// Next frame: #1.
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
|
||||
SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
|
||||
SetExpectedValues3<bool>(true, true, false, expected_layer_sync);
|
||||
VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
||||
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
||||
|
||||
// Next frame: #2.
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
|
||||
SetExpectedValues3<int>(1, 0, 255, expected_temporal_idx);
|
||||
SetExpectedValues3<bool>(true, false, false, expected_layer_sync);
|
||||
VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
||||
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
||||
|
||||
// Next frame: #3.
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
|
||||
SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
|
||||
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
|
||||
VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
||||
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
||||
|
||||
// Next frame: #4.
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
|
||||
SetExpectedValues3<int>(0, 0, 255, expected_temporal_idx);
|
||||
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
|
||||
VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
||||
&encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
|
||||
|
||||
// Next frame: #5.
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
|
||||
SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
|
||||
SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
|
||||
VerifyTemporalIdxAndSyncForAllSpatialLayers(
|
||||
@ -903,27 +911,24 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
// 1. stride > width 2. stride_y != stride_uv/2
|
||||
int stride_y = kDefaultWidth + 20;
|
||||
int stride_uv = ((kDefaultWidth + 1) / 2) + 5;
|
||||
input_buffer_ = I420Buffer::Create(kDefaultWidth, kDefaultHeight, stride_y,
|
||||
stride_uv, stride_uv);
|
||||
input_frame_.reset(
|
||||
new VideoFrame(input_buffer_, 0, 0, webrtc::kVideoRotation_0));
|
||||
|
||||
input_frame_.CreateEmptyFrame(kDefaultWidth, kDefaultHeight, stride_y,
|
||||
stride_uv, stride_uv);
|
||||
// Set color.
|
||||
int plane_offset[kNumOfPlanes];
|
||||
plane_offset[kYPlane] = kColorY;
|
||||
plane_offset[kUPlane] = kColorU;
|
||||
plane_offset[kVPlane] = kColorV;
|
||||
CreateImage(input_buffer_, plane_offset);
|
||||
CreateImage(input_frame_.video_frame_buffer(), plane_offset);
|
||||
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
|
||||
|
||||
// Change color.
|
||||
plane_offset[kYPlane] += 1;
|
||||
plane_offset[kUPlane] += 1;
|
||||
plane_offset[kVPlane] += 1;
|
||||
CreateImage(input_buffer_, plane_offset);
|
||||
input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
|
||||
CreateImage(input_frame_.video_frame_buffer(), plane_offset);
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL));
|
||||
|
||||
EncodedImage encoded_frame;
|
||||
// Only encoding one frame - so will be a key frame.
|
||||
@ -963,8 +968,7 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
std::unique_ptr<VP8Decoder> decoder_;
|
||||
MockDecodedImageCallback decoder_callback_;
|
||||
VideoCodec settings_;
|
||||
rtc::scoped_refptr<I420Buffer> input_buffer_;
|
||||
std::unique_ptr<VideoFrame> input_frame_;
|
||||
VideoFrame input_frame_;
|
||||
};
|
||||
|
||||
} // namespace testing
|
||||
|
||||
@ -147,15 +147,13 @@ class TestVp8Impl : public ::testing::Test {
|
||||
EXPECT_EQ(stride_y, 176);
|
||||
EXPECT_EQ(stride_uv, 96);
|
||||
|
||||
rtc::scoped_refptr<I420Buffer> buffer = I420Buffer::Create(
|
||||
codec_inst_.width, codec_inst_.height, stride_y, stride_uv, stride_uv);
|
||||
input_frame_.CreateEmptyFrame(codec_inst_.width, codec_inst_.height,
|
||||
stride_y, stride_uv, stride_uv);
|
||||
input_frame_.set_timestamp(kTestTimestamp);
|
||||
// Using ConvertToI420 to add stride to the image.
|
||||
EXPECT_EQ(
|
||||
0, ConvertToI420(kI420, source_buffer_.get(), 0, 0, codec_inst_.width,
|
||||
codec_inst_.height, 0, kVideoRotation_0,
|
||||
buffer.get()));
|
||||
input_frame_.reset(
|
||||
new VideoFrame(buffer, kTestTimestamp, 0, webrtc::kVideoRotation_0));
|
||||
EXPECT_EQ(0, ConvertToI420(kI420, source_buffer_.get(), 0, 0,
|
||||
codec_inst_.width, codec_inst_.height, 0,
|
||||
kVideoRotation_0, &input_frame_));
|
||||
}
|
||||
|
||||
void SetUpEncodeDecode() {
|
||||
@ -197,7 +195,7 @@ class TestVp8Impl : public ::testing::Test {
|
||||
std::unique_ptr<Vp8UnitTestDecodeCompleteCallback> decode_complete_callback_;
|
||||
std::unique_ptr<uint8_t[]> source_buffer_;
|
||||
FILE* source_file_;
|
||||
std::unique_ptr<VideoFrame> input_frame_;
|
||||
VideoFrame input_frame_;
|
||||
std::unique_ptr<VideoEncoder> encoder_;
|
||||
std::unique_ptr<VideoDecoder> decoder_;
|
||||
EncodedImage encoded_frame_;
|
||||
@ -239,7 +237,7 @@ TEST_F(TestVp8Impl, EncoderParameterTest) {
|
||||
#endif
|
||||
TEST_F(TestVp8Impl, MAYBE_AlignedStrideEncodeDecode) {
|
||||
SetUpEncodeDecode();
|
||||
encoder_->Encode(*input_frame_, NULL, NULL);
|
||||
encoder_->Encode(input_frame_, NULL, NULL);
|
||||
EXPECT_GT(WaitForEncodedFrame(), 0u);
|
||||
// First frame should be a key frame.
|
||||
encoded_frame_._frameType = kVideoFrameKey;
|
||||
@ -248,7 +246,7 @@ TEST_F(TestVp8Impl, MAYBE_AlignedStrideEncodeDecode) {
|
||||
decoder_->Decode(encoded_frame_, false, NULL));
|
||||
EXPECT_GT(WaitForDecodedFrame(), 0u);
|
||||
// Compute PSNR on all planes (faster than SSIM).
|
||||
EXPECT_GT(I420PSNR(input_frame_.get(), &decoded_frame_), 36);
|
||||
EXPECT_GT(I420PSNR(&input_frame_, &decoded_frame_), 36);
|
||||
EXPECT_EQ(kTestTimestamp, decoded_frame_.timestamp());
|
||||
EXPECT_EQ(kTestNtpTimeMs, decoded_frame_.ntp_time_ms());
|
||||
}
|
||||
@ -260,7 +258,7 @@ TEST_F(TestVp8Impl, MAYBE_AlignedStrideEncodeDecode) {
|
||||
#endif
|
||||
TEST_F(TestVp8Impl, MAYBE_DecodeWithACompleteKeyFrame) {
|
||||
SetUpEncodeDecode();
|
||||
encoder_->Encode(*input_frame_, NULL, NULL);
|
||||
encoder_->Encode(input_frame_, NULL, NULL);
|
||||
EXPECT_GT(WaitForEncodedFrame(), 0u);
|
||||
// Setting complete to false -> should return an error.
|
||||
encoded_frame_._completeFrame = false;
|
||||
@ -275,7 +273,7 @@ TEST_F(TestVp8Impl, MAYBE_DecodeWithACompleteKeyFrame) {
|
||||
encoded_frame_._frameType = kVideoFrameKey;
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
decoder_->Decode(encoded_frame_, false, NULL));
|
||||
EXPECT_GT(I420PSNR(input_frame_.get(), &decoded_frame_), 36);
|
||||
EXPECT_GT(I420PSNR(&input_frame_, &decoded_frame_), 36);
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
@ -1306,18 +1306,18 @@ int VP8DecoderImpl::ReturnFrame(const vpx_image_t* img,
|
||||
last_frame_width_ = img->d_w;
|
||||
last_frame_height_ = img->d_h;
|
||||
// Allocate memory for decoded image.
|
||||
rtc::scoped_refptr<I420Buffer> buffer =
|
||||
buffer_pool_.CreateBuffer(img->d_w, img->d_h);
|
||||
|
||||
VideoFrame decoded_image(buffer_pool_.CreateBuffer(img->d_w, img->d_h),
|
||||
timestamp, 0, kVideoRotation_0);
|
||||
libyuv::I420Copy(img->planes[VPX_PLANE_Y], img->stride[VPX_PLANE_Y],
|
||||
img->planes[VPX_PLANE_U], img->stride[VPX_PLANE_U],
|
||||
img->planes[VPX_PLANE_V], img->stride[VPX_PLANE_V],
|
||||
buffer->MutableDataY(), buffer->StrideY(),
|
||||
buffer->MutableDataU(), buffer->StrideU(),
|
||||
buffer->MutableDataV(), buffer->StrideV(),
|
||||
decoded_image.video_frame_buffer()->MutableDataY(),
|
||||
decoded_image.video_frame_buffer()->StrideY(),
|
||||
decoded_image.video_frame_buffer()->MutableDataU(),
|
||||
decoded_image.video_frame_buffer()->StrideU(),
|
||||
decoded_image.video_frame_buffer()->MutableDataV(),
|
||||
decoded_image.video_frame_buffer()->StrideV(),
|
||||
img->d_w, img->d_h);
|
||||
|
||||
VideoFrame decoded_image(buffer, timestamp, 0, kVideoRotation_0);
|
||||
decoded_image.set_ntp_time_ms(ntp_time_ms);
|
||||
int ret = decode_complete_callback_->Decoded(decoded_image);
|
||||
if (ret != 0)
|
||||
|
||||
@ -148,7 +148,7 @@ int SequenceCoder(webrtc::test::CommandLineParser* parser) {
|
||||
return -1;
|
||||
}
|
||||
EXPECT_EQ(0, decoder->InitDecode(&inst, 1));
|
||||
|
||||
webrtc::VideoFrame input_frame;
|
||||
size_t length = webrtc::CalcBufferSize(webrtc::kI420, width, height);
|
||||
std::unique_ptr<uint8_t[]> frame_buffer(new uint8_t[length]);
|
||||
|
||||
@ -163,18 +163,14 @@ int SequenceCoder(webrtc::test::CommandLineParser* parser) {
|
||||
int64_t starttime = rtc::TimeMillis();
|
||||
int frame_cnt = 1;
|
||||
int frames_processed = 0;
|
||||
rtc::scoped_refptr<webrtc::I420Buffer> i420_buffer =
|
||||
webrtc::I420Buffer::Create(width, height, width, half_width, half_width);
|
||||
|
||||
input_frame.CreateEmptyFrame(width, height, width, half_width, half_width);
|
||||
while (!feof(input_file) &&
|
||||
(num_frames == -1 || frames_processed < num_frames)) {
|
||||
if (fread(frame_buffer.get(), 1, length, input_file) != length)
|
||||
continue;
|
||||
if (frame_cnt >= start_frame) {
|
||||
webrtc::ConvertToI420(webrtc::kI420, frame_buffer.get(), 0, 0, width,
|
||||
height, 0, webrtc::kVideoRotation_0, &i420_buffer);
|
||||
webrtc::VideoFrame input_frame(i420_buffer, 0, 0,
|
||||
webrtc::kVideoRotation_0);
|
||||
height, 0, webrtc::kVideoRotation_0, &input_frame);
|
||||
encoder->Encode(input_frame, NULL, NULL);
|
||||
decoder->Decode(encoder_callback.encoded_image(), false, NULL);
|
||||
++frames_processed;
|
||||
|
||||
Reference in New Issue
Block a user