Remove VideoFrameType aliases for FrameType.
No longer used in Chromium, so these can now be removed. BUG=webrtc:5042 R=mflodman@webrtc.org TBR=magjed@webrtc.org Review URL: https://codereview.webrtc.org/1415693002 . Cr-Commit-Position: refs/heads/master@{#10390}
This commit is contained in:
@ -474,7 +474,7 @@ int32_t MediaCodecVideoDecoder::Decode(
|
||||
|
||||
// Always start with a complete key frame.
|
||||
if (key_frame_required_) {
|
||||
if (inputImage._frameType != webrtc::kKeyFrame) {
|
||||
if (inputImage._frameType != webrtc::kVideoFrameKey) {
|
||||
ALOGE << "Decode() - key frame is required";
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
|
||||
@ -590,7 +590,7 @@ int32_t MediaCodecVideoEncoder::EncodeOnCodecThread(
|
||||
render_times_ms_.push_back(input_frame.render_time_ms());
|
||||
frame_rtc_times_ms_.push_back(GetCurrentTimeMs());
|
||||
|
||||
bool key_frame = frame_types->front() != webrtc::kDeltaFrame;
|
||||
bool key_frame = frame_types->front() != webrtc::kVideoFrameDelta;
|
||||
bool encode_status = jni->CallBooleanMethod(*j_media_codec_video_encoder_,
|
||||
j_encode_method_,
|
||||
key_frame,
|
||||
@ -769,7 +769,8 @@ bool MediaCodecVideoEncoder::DeliverPendingOutputs(JNIEnv* jni) {
|
||||
image->_encodedHeight = height_;
|
||||
image->_timeStamp = output_timestamp_;
|
||||
image->capture_time_ms_ = output_render_time_ms_;
|
||||
image->_frameType = (key_frame ? webrtc::kKeyFrame : webrtc::kDeltaFrame);
|
||||
image->_frameType =
|
||||
(key_frame ? webrtc::kVideoFrameKey : webrtc::kVideoFrameDelta);
|
||||
image->_completeFrame = true;
|
||||
|
||||
webrtc::CodecSpecificInfo info;
|
||||
|
||||
@ -162,16 +162,8 @@ enum FrameType {
|
||||
kAudioFrameCN = 2,
|
||||
kVideoFrameKey = 3,
|
||||
kVideoFrameDelta = 4,
|
||||
// TODO(pbos): Remove below aliases (non-kVideo prefixed) as soon as no
|
||||
// VideoEncoder implementation in Chromium uses them.
|
||||
kKeyFrame = kVideoFrameKey,
|
||||
kDeltaFrame = kVideoFrameDelta,
|
||||
};
|
||||
|
||||
// TODO(pbos): Remove VideoFrameType when VideoEncoder implementations no longer
|
||||
// depend on it.
|
||||
using VideoFrameType = FrameType;
|
||||
|
||||
// Statistics for an RTCP channel
|
||||
struct RtcpStatistics {
|
||||
RtcpStatistics()
|
||||
|
||||
@ -198,7 +198,8 @@ void VTCompressionOutputCallback(void* encoder,
|
||||
frame._encodedWidth = encode_params->width;
|
||||
frame._encodedHeight = encode_params->height;
|
||||
frame._completeFrame = true;
|
||||
frame._frameType = is_keyframe ? webrtc::kKeyFrame : webrtc::kDeltaFrame;
|
||||
frame._frameType =
|
||||
is_keyframe ? webrtc::kVideoFrameKey : webrtc::kVideoFrameDelta;
|
||||
frame.capture_time_ms_ = encode_params->render_time_ms;
|
||||
frame._timeStamp = encode_params->timestamp;
|
||||
|
||||
@ -277,7 +278,7 @@ int H264VideoToolboxEncoder::Encode(
|
||||
bool is_keyframe_required = false;
|
||||
if (frame_types) {
|
||||
for (auto frame_type : *frame_types) {
|
||||
if (frame_type == kKeyFrame) {
|
||||
if (frame_type == kVideoFrameKey) {
|
||||
is_keyframe_required = true;
|
||||
break;
|
||||
}
|
||||
|
||||
@ -82,7 +82,7 @@ int I420Encoder::Encode(const VideoFrame& inputImage,
|
||||
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
||||
}
|
||||
|
||||
_encodedImage._frameType = kKeyFrame;
|
||||
_encodedImage._frameType = kVideoFrameKey;
|
||||
_encodedImage._timeStamp = inputImage.timestamp();
|
||||
_encodedImage._encodedHeight = inputImage.height();
|
||||
_encodedImage._encodedWidth = inputImage.width();
|
||||
|
||||
@ -32,7 +32,7 @@ FrameStatistic::FrameStatistic()
|
||||
total_packets(0),
|
||||
bit_rate_in_kbps(0),
|
||||
encoded_frame_length_in_bytes(0),
|
||||
frame_type(kDeltaFrame) {}
|
||||
frame_type(kVideoFrameDelta) {}
|
||||
|
||||
Stats::Stats() {}
|
||||
|
||||
@ -83,7 +83,7 @@ void Stats::PrintSummary() {
|
||||
total_encoding_time_in_us += it->encode_time_in_us;
|
||||
total_decoding_time_in_us += it->decode_time_in_us;
|
||||
total_encoded_frames_lengths += it->encoded_frame_length_in_bytes;
|
||||
if (it->frame_type == webrtc::kKeyFrame) {
|
||||
if (it->frame_type == webrtc::kVideoFrameKey) {
|
||||
total_encoded_key_frames_lengths += it->encoded_frame_length_in_bytes;
|
||||
nbr_keyframes++;
|
||||
} else {
|
||||
|
||||
@ -59,7 +59,7 @@ VideoProcessorImpl::VideoProcessorImpl(webrtc::VideoEncoder* encoder,
|
||||
last_frame_missing_(false),
|
||||
initialized_(false),
|
||||
encoded_frame_size_(0),
|
||||
encoded_frame_type_(kKeyFrame),
|
||||
encoded_frame_type_(kVideoFrameKey),
|
||||
prev_time_stamp_(0),
|
||||
num_dropped_frames_(0),
|
||||
num_spatial_resizes_(0),
|
||||
@ -199,15 +199,15 @@ bool VideoProcessorImpl::ProcessFrame(int frame_number) {
|
||||
source_frame_.set_timestamp(frame_number);
|
||||
|
||||
// Decide if we're going to force a keyframe:
|
||||
std::vector<FrameType> frame_types(1, kDeltaFrame);
|
||||
std::vector<FrameType> frame_types(1, kVideoFrameDelta);
|
||||
if (config_.keyframe_interval > 0 &&
|
||||
frame_number % config_.keyframe_interval == 0) {
|
||||
frame_types[0] = kKeyFrame;
|
||||
frame_types[0] = kVideoFrameKey;
|
||||
}
|
||||
|
||||
// For dropped frames, we regard them as zero size encoded frames.
|
||||
encoded_frame_size_ = 0;
|
||||
encoded_frame_type_ = kDeltaFrame;
|
||||
encoded_frame_type_ = kVideoFrameDelta;
|
||||
|
||||
int32_t encode_result = encoder_->Encode(source_frame_, NULL, &frame_types);
|
||||
|
||||
@ -257,7 +257,7 @@ void VideoProcessorImpl::FrameEncoded(const EncodedImage& encoded_image) {
|
||||
// Perform packet loss if criteria is fullfilled:
|
||||
bool exclude_this_frame = false;
|
||||
// Only keyframes can be excluded
|
||||
if (encoded_image._frameType == kKeyFrame) {
|
||||
if (encoded_image._frameType == kVideoFrameKey) {
|
||||
switch (config_.exclude_frame_types) {
|
||||
case kExcludeOnlyFirstKeyFrame:
|
||||
if (!first_key_frame_has_been_excluded_) {
|
||||
|
||||
@ -272,7 +272,7 @@ class VideoProcessorIntegrationTest: public testing::Test {
|
||||
float encoded_size_kbits = processor_->EncodedFrameSize() * 8.0f / 1000.0f;
|
||||
// Update layer data.
|
||||
// Update rate mismatch relative to per-frame bandwidth for delta frames.
|
||||
if (frame_type == kDeltaFrame) {
|
||||
if (frame_type == kVideoFrameDelta) {
|
||||
// TODO(marpan): Should we count dropped (zero size) frames in mismatch?
|
||||
sum_frame_size_mismatch_[layer_] += fabs(encoded_size_kbits -
|
||||
per_frame_bandwidth_[layer_]) /
|
||||
@ -450,7 +450,7 @@ class VideoProcessorIntegrationTest: public testing::Test {
|
||||
ResetRateControlMetrics(
|
||||
rate_profile.frame_index_rate_update[update_index + 1]);
|
||||
int frame_number = 0;
|
||||
FrameType frame_type = kDeltaFrame;
|
||||
FrameType frame_type = kVideoFrameDelta;
|
||||
while (processor_->ProcessFrame(frame_number) &&
|
||||
frame_number < num_frames) {
|
||||
// Get the layer index for the frame |frame_number|.
|
||||
|
||||
@ -425,7 +425,7 @@ void PrintPythonOutput(const webrtc::test::TestConfig& config,
|
||||
f.decode_return_code,
|
||||
f.bit_rate_in_kbps,
|
||||
f.encoded_frame_length_in_bytes,
|
||||
f.frame_type == webrtc::kDeltaFrame ? "'Delta'" : "'Other'",
|
||||
f.frame_type == webrtc::kVideoFrameDelta ? "'Delta'" : "'Other'",
|
||||
f.packets_dropped,
|
||||
f.total_packets,
|
||||
ssim.value,
|
||||
|
||||
@ -246,7 +246,7 @@ int SimulcastEncoderAdapter::Encode(
|
||||
bool send_key_frame = false;
|
||||
if (frame_types) {
|
||||
for (size_t i = 0; i < frame_types->size(); ++i) {
|
||||
if (frame_types->at(i) == kKeyFrame) {
|
||||
if (frame_types->at(i) == kVideoFrameKey) {
|
||||
send_key_frame = true;
|
||||
break;
|
||||
}
|
||||
@ -269,10 +269,10 @@ int SimulcastEncoderAdapter::Encode(
|
||||
|
||||
std::vector<FrameType> stream_frame_types;
|
||||
if (send_key_frame) {
|
||||
stream_frame_types.push_back(kKeyFrame);
|
||||
stream_frame_types.push_back(kVideoFrameKey);
|
||||
streaminfos_[stream_idx].key_frame_request = false;
|
||||
} else {
|
||||
stream_frame_types.push_back(kDeltaFrame);
|
||||
stream_frame_types.push_back(kVideoFrameDelta);
|
||||
}
|
||||
|
||||
int dst_width = streaminfos_[stream_idx].width;
|
||||
|
||||
@ -70,12 +70,12 @@ class Vp8TestEncodedImageCallback : public EncodedImageCallback {
|
||||
const RTPFragmentationHeader* fragmentation) {
|
||||
// Only store the base layer.
|
||||
if (codec_specific_info->codecSpecific.VP8.simulcastIdx == 0) {
|
||||
if (encoded_image._frameType == kKeyFrame) {
|
||||
if (encoded_image._frameType == kVideoFrameKey) {
|
||||
delete [] encoded_key_frame_._buffer;
|
||||
encoded_key_frame_._buffer = new uint8_t[encoded_image._size];
|
||||
encoded_key_frame_._size = encoded_image._size;
|
||||
encoded_key_frame_._length = encoded_image._length;
|
||||
encoded_key_frame_._frameType = kKeyFrame;
|
||||
encoded_key_frame_._frameType = kVideoFrameKey;
|
||||
encoded_key_frame_._completeFrame = encoded_image._completeFrame;
|
||||
memcpy(encoded_key_frame_._buffer,
|
||||
encoded_image._buffer,
|
||||
@ -389,33 +389,34 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
// a key frame was only requested for some of them.
|
||||
void TestKeyFrameRequestsOnAllStreams() {
|
||||
encoder_->SetRates(kMaxBitrates[2], 30); // To get all three streams.
|
||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, kDeltaFrame);
|
||||
ExpectStreams(kKeyFrame, kNumberOfSimulcastStreams);
|
||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
|
||||
kVideoFrameDelta);
|
||||
ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
|
||||
ExpectStreams(kDeltaFrame, kNumberOfSimulcastStreams);
|
||||
ExpectStreams(kVideoFrameDelta, kNumberOfSimulcastStreams);
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
|
||||
frame_types[0] = kKeyFrame;
|
||||
ExpectStreams(kKeyFrame, kNumberOfSimulcastStreams);
|
||||
frame_types[0] = kVideoFrameKey;
|
||||
ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
|
||||
std::fill(frame_types.begin(), frame_types.end(), kDeltaFrame);
|
||||
frame_types[1] = kKeyFrame;
|
||||
ExpectStreams(kKeyFrame, kNumberOfSimulcastStreams);
|
||||
std::fill(frame_types.begin(), frame_types.end(), kVideoFrameDelta);
|
||||
frame_types[1] = kVideoFrameKey;
|
||||
ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
|
||||
std::fill(frame_types.begin(), frame_types.end(), kDeltaFrame);
|
||||
frame_types[2] = kKeyFrame;
|
||||
ExpectStreams(kKeyFrame, kNumberOfSimulcastStreams);
|
||||
std::fill(frame_types.begin(), frame_types.end(), kVideoFrameDelta);
|
||||
frame_types[2] = kVideoFrameKey;
|
||||
ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
|
||||
std::fill(frame_types.begin(), frame_types.end(), kDeltaFrame);
|
||||
ExpectStreams(kDeltaFrame, kNumberOfSimulcastStreams);
|
||||
std::fill(frame_types.begin(), frame_types.end(), kVideoFrameDelta);
|
||||
ExpectStreams(kVideoFrameDelta, kNumberOfSimulcastStreams);
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
}
|
||||
@ -423,11 +424,12 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
void TestPaddingAllStreams() {
|
||||
// We should always encode the base layer.
|
||||
encoder_->SetRates(kMinBitrates[0] - 1, 30);
|
||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, kDeltaFrame);
|
||||
ExpectStreams(kKeyFrame, 1);
|
||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
|
||||
kVideoFrameDelta);
|
||||
ExpectStreams(kVideoFrameKey, 1);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
|
||||
ExpectStreams(kDeltaFrame, 1);
|
||||
ExpectStreams(kVideoFrameDelta, 1);
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
}
|
||||
@ -435,11 +437,12 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
void TestPaddingTwoStreams() {
|
||||
// We have just enough to get only the first stream and padding for two.
|
||||
encoder_->SetRates(kMinBitrates[0], 30);
|
||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, kDeltaFrame);
|
||||
ExpectStreams(kKeyFrame, 1);
|
||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
|
||||
kVideoFrameDelta);
|
||||
ExpectStreams(kVideoFrameKey, 1);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
|
||||
ExpectStreams(kDeltaFrame, 1);
|
||||
ExpectStreams(kVideoFrameDelta, 1);
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
}
|
||||
@ -448,11 +451,12 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
// We are just below limit of sending second stream, so we should get
|
||||
// the first stream maxed out (at |maxBitrate|), and padding for two.
|
||||
encoder_->SetRates(kTargetBitrates[0] + kMinBitrates[1] - 1, 30);
|
||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, kDeltaFrame);
|
||||
ExpectStreams(kKeyFrame, 1);
|
||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
|
||||
kVideoFrameDelta);
|
||||
ExpectStreams(kVideoFrameKey, 1);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
|
||||
ExpectStreams(kDeltaFrame, 1);
|
||||
ExpectStreams(kVideoFrameDelta, 1);
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
}
|
||||
@ -460,11 +464,12 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
void TestPaddingOneStream() {
|
||||
// We have just enough to send two streams, so padding for one stream.
|
||||
encoder_->SetRates(kTargetBitrates[0] + kMinBitrates[1], 30);
|
||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, kDeltaFrame);
|
||||
ExpectStreams(kKeyFrame, 2);
|
||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
|
||||
kVideoFrameDelta);
|
||||
ExpectStreams(kVideoFrameKey, 2);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
|
||||
ExpectStreams(kDeltaFrame, 2);
|
||||
ExpectStreams(kVideoFrameDelta, 2);
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
}
|
||||
@ -474,11 +479,12 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
// first stream's rate maxed out at |targetBitrate|, second at |maxBitrate|.
|
||||
encoder_->SetRates(kTargetBitrates[0] + kTargetBitrates[1] +
|
||||
kMinBitrates[2] - 1, 30);
|
||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, kDeltaFrame);
|
||||
ExpectStreams(kKeyFrame, 2);
|
||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
|
||||
kVideoFrameDelta);
|
||||
ExpectStreams(kVideoFrameKey, 2);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
|
||||
ExpectStreams(kDeltaFrame, 2);
|
||||
ExpectStreams(kVideoFrameDelta, 2);
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
}
|
||||
@ -487,11 +493,12 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
// We have just enough to send all streams.
|
||||
encoder_->SetRates(kTargetBitrates[0] + kTargetBitrates[1] +
|
||||
kMinBitrates[2], 30);
|
||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, kDeltaFrame);
|
||||
ExpectStreams(kKeyFrame, 3);
|
||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
|
||||
kVideoFrameDelta);
|
||||
ExpectStreams(kVideoFrameKey, 3);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
|
||||
ExpectStreams(kDeltaFrame, 3);
|
||||
ExpectStreams(kVideoFrameDelta, 3);
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
}
|
||||
@ -500,31 +507,32 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
// We should get three media streams.
|
||||
encoder_->SetRates(kMaxBitrates[0] + kMaxBitrates[1] +
|
||||
kMaxBitrates[2], 30);
|
||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, kDeltaFrame);
|
||||
ExpectStreams(kKeyFrame, 3);
|
||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
|
||||
kVideoFrameDelta);
|
||||
ExpectStreams(kVideoFrameKey, 3);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
|
||||
ExpectStreams(kDeltaFrame, 3);
|
||||
ExpectStreams(kVideoFrameDelta, 3);
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
|
||||
// We should only get two streams and padding for one.
|
||||
encoder_->SetRates(kTargetBitrates[0] + kTargetBitrates[1] +
|
||||
kMinBitrates[2] / 2, 30);
|
||||
ExpectStreams(kDeltaFrame, 2);
|
||||
ExpectStreams(kVideoFrameDelta, 2);
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
|
||||
// We should only get the first stream and padding for two.
|
||||
encoder_->SetRates(kTargetBitrates[0] + kMinBitrates[1] / 2, 30);
|
||||
ExpectStreams(kDeltaFrame, 1);
|
||||
ExpectStreams(kVideoFrameDelta, 1);
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
|
||||
// We don't have enough bitrate for the thumbnail stream, but we should get
|
||||
// it anyway with current configuration.
|
||||
encoder_->SetRates(kTargetBitrates[0] - 1, 30);
|
||||
ExpectStreams(kDeltaFrame, 1);
|
||||
ExpectStreams(kVideoFrameDelta, 1);
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
|
||||
@ -532,7 +540,7 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
encoder_->SetRates(kTargetBitrates[0] + kTargetBitrates[1] +
|
||||
kMinBitrates[2] / 2, 30);
|
||||
// We get a key frame because a new stream is being enabled.
|
||||
ExpectStreams(kKeyFrame, 2);
|
||||
ExpectStreams(kVideoFrameKey, 2);
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
|
||||
@ -540,7 +548,7 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
encoder_->SetRates(kTargetBitrates[0] + kTargetBitrates[1] +
|
||||
kTargetBitrates[2], 30);
|
||||
// We get a key frame because a new stream is being enabled.
|
||||
ExpectStreams(kKeyFrame, 3);
|
||||
ExpectStreams(kVideoFrameKey, 3);
|
||||
input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
}
|
||||
@ -581,11 +589,13 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
|
||||
// Encode one frame and verify.
|
||||
encoder_->SetRates(kMaxBitrates[0] + kMaxBitrates[1], 30);
|
||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, kDeltaFrame);
|
||||
EXPECT_CALL(encoder_callback_, Encoded(
|
||||
AllOf(Field(&EncodedImage::_frameType, kKeyFrame),
|
||||
Field(&EncodedImage::_encodedWidth, width),
|
||||
Field(&EncodedImage::_encodedHeight, height)), _, _))
|
||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
|
||||
kVideoFrameDelta);
|
||||
EXPECT_CALL(encoder_callback_,
|
||||
Encoded(AllOf(Field(&EncodedImage::_frameType, kVideoFrameKey),
|
||||
Field(&EncodedImage::_encodedWidth, width),
|
||||
Field(&EncodedImage::_encodedHeight, height)),
|
||||
_, _))
|
||||
.Times(1)
|
||||
.WillRepeatedly(Return(0));
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
@ -596,7 +606,7 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
settings_.startBitrate = kMinBitrates[0];
|
||||
EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200));
|
||||
encoder_->SetRates(settings_.startBitrate, 30);
|
||||
ExpectStreams(kKeyFrame, 1);
|
||||
ExpectStreams(kVideoFrameKey, 1);
|
||||
// Resize |input_frame_| to the new resolution.
|
||||
half_width = (settings_.width + 1) / 2;
|
||||
input_frame_.CreateEmptyFrame(settings_.width, settings_.height,
|
||||
|
||||
@ -221,7 +221,7 @@ TEST_F(TestVp8Impl, DISABLED_ON_ANDROID(AlignedStrideEncodeDecode)) {
|
||||
encoder_->Encode(input_frame_, NULL, NULL);
|
||||
EXPECT_GT(WaitForEncodedFrame(), 0u);
|
||||
// First frame should be a key frame.
|
||||
encoded_frame_._frameType = kKeyFrame;
|
||||
encoded_frame_._frameType = kVideoFrameKey;
|
||||
encoded_frame_.ntp_time_ms_ = kTestNtpTimeMs;
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
decoder_->Decode(encoded_frame_, false, NULL));
|
||||
@ -241,12 +241,12 @@ TEST_F(TestVp8Impl, DISABLED_ON_ANDROID(DecodeWithACompleteKeyFrame)) {
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_ERROR,
|
||||
decoder_->Decode(encoded_frame_, false, NULL));
|
||||
// Setting complete back to true. Forcing a delta frame.
|
||||
encoded_frame_._frameType = kDeltaFrame;
|
||||
encoded_frame_._frameType = kVideoFrameDelta;
|
||||
encoded_frame_._completeFrame = true;
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_ERROR,
|
||||
decoder_->Decode(encoded_frame_, false, NULL));
|
||||
// Now setting a key frame.
|
||||
encoded_frame_._frameType = kKeyFrame;
|
||||
encoded_frame_._frameType = kVideoFrameKey;
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
decoder_->Decode(encoded_frame_, false, NULL));
|
||||
EXPECT_GT(I420PSNR(&input_frame_, &decoded_frame_), 36);
|
||||
|
||||
@ -792,7 +792,7 @@ int VP8EncoderImpl::Encode(const VideoFrame& frame,
|
||||
if (!send_key_frame && frame_types) {
|
||||
for (size_t i = 0; i < frame_types->size() && i < send_stream_.size();
|
||||
++i) {
|
||||
if ((*frame_types)[i] == kKeyFrame && send_stream_[i]) {
|
||||
if ((*frame_types)[i] == kVideoFrameKey && send_stream_[i]) {
|
||||
send_key_frame = true;
|
||||
break;
|
||||
}
|
||||
@ -970,7 +970,7 @@ int VP8EncoderImpl::GetEncodedPartitions(const VideoFrame& input_image,
|
||||
vpx_codec_iter_t iter = NULL;
|
||||
int part_idx = 0;
|
||||
encoded_images_[encoder_idx]._length = 0;
|
||||
encoded_images_[encoder_idx]._frameType = kDeltaFrame;
|
||||
encoded_images_[encoder_idx]._frameType = kVideoFrameDelta;
|
||||
RTPFragmentationHeader frag_info;
|
||||
// token_partitions_ is number of bits used.
|
||||
frag_info.VerifyAndAllocateFragmentationHeader((1 << token_partitions_)
|
||||
@ -1001,7 +1001,7 @@ int VP8EncoderImpl::GetEncodedPartitions(const VideoFrame& input_image,
|
||||
if ((pkt->data.frame.flags & VPX_FRAME_IS_FRAGMENT) == 0) {
|
||||
// check if encoded frame is a key frame
|
||||
if (pkt->data.frame.flags & VPX_FRAME_IS_KEY) {
|
||||
encoded_images_[encoder_idx]._frameType = kKeyFrame;
|
||||
encoded_images_[encoder_idx]._frameType = kVideoFrameKey;
|
||||
rps_.EncodedKeyFrame(picture_id_[stream_idx]);
|
||||
}
|
||||
PopulateCodecSpecific(&codec_specific, *pkt, stream_idx,
|
||||
@ -1172,7 +1172,7 @@ int VP8DecoderImpl::Decode(const EncodedImage& input_image,
|
||||
|
||||
// Always start with a complete key frame.
|
||||
if (key_frame_required_) {
|
||||
if (input_image._frameType != kKeyFrame)
|
||||
if (input_image._frameType != kVideoFrameKey)
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
// We have a key frame - is it complete?
|
||||
if (input_image._completeFrame) {
|
||||
@ -1185,7 +1185,8 @@ int VP8DecoderImpl::Decode(const EncodedImage& input_image,
|
||||
// the feedback mode is enabled (RPS).
|
||||
// Reset on a key frame refresh.
|
||||
if (!feedback_mode_) {
|
||||
if (input_image._frameType == kKeyFrame && input_image._completeFrame) {
|
||||
if (input_image._frameType == kVideoFrameKey &&
|
||||
input_image._completeFrame) {
|
||||
propagation_cnt_ = -1;
|
||||
// Start count on first loss.
|
||||
} else if ((!input_image._completeFrame || missing_frames) &&
|
||||
@ -1238,7 +1239,7 @@ int VP8DecoderImpl::Decode(const EncodedImage& input_image,
|
||||
#endif
|
||||
|
||||
// Store encoded frame if key frame. (Used in Copy method.)
|
||||
if (input_image._frameType == kKeyFrame && input_image._buffer != NULL) {
|
||||
if (input_image._frameType == kVideoFrameKey && input_image._buffer != NULL) {
|
||||
const uint32_t bytes_to_copy = input_image._length;
|
||||
if (last_keyframe_._size < bytes_to_copy) {
|
||||
delete [] last_keyframe_._buffer;
|
||||
@ -1272,7 +1273,7 @@ int VP8DecoderImpl::Decode(const EncodedImage& input_image,
|
||||
// Whenever we receive an incomplete key frame all reference buffers will
|
||||
// be corrupt. If that happens we must request new key frames until we
|
||||
// decode a complete key frame.
|
||||
if (input_image._frameType == kKeyFrame && !input_image._completeFrame)
|
||||
if (input_image._frameType == kVideoFrameKey && !input_image._completeFrame)
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
// Check for reference updates and last reference buffer corruption and
|
||||
// signal successful reference propagation or frame corruption to the
|
||||
|
||||
@ -432,7 +432,7 @@ int VP9EncoderImpl::Encode(const VideoFrame& input_image,
|
||||
if (encoded_complete_callback_ == NULL) {
|
||||
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
||||
}
|
||||
FrameType frame_type = kDeltaFrame;
|
||||
FrameType frame_type = kVideoFrameDelta;
|
||||
// We only support one stream at the moment.
|
||||
if (frame_types && frame_types->size() > 0) {
|
||||
frame_type = (*frame_types)[0];
|
||||
@ -456,7 +456,7 @@ int VP9EncoderImpl::Encode(const VideoFrame& input_image,
|
||||
raw_->stride[VPX_PLANE_V] = input_image.stride(kVPlane);
|
||||
|
||||
int flags = 0;
|
||||
bool send_keyframe = (frame_type == kKeyFrame);
|
||||
bool send_keyframe = (frame_type == kVideoFrameKey);
|
||||
if (send_keyframe) {
|
||||
// Key frame request from caller.
|
||||
flags = VPX_EFLAG_FORCE_KF;
|
||||
@ -560,7 +560,7 @@ void VP9EncoderImpl::PopulateCodecSpecific(CodecSpecificInfo* codec_specific,
|
||||
|
||||
int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
|
||||
encoded_image_._length = 0;
|
||||
encoded_image_._frameType = kDeltaFrame;
|
||||
encoded_image_._frameType = kVideoFrameDelta;
|
||||
RTPFragmentationHeader frag_info;
|
||||
// Note: no data partitioning in VP9, so 1 partition only. We keep this
|
||||
// fragmentation data for now, until VP9 packetizer is implemented.
|
||||
@ -582,7 +582,7 @@ int VP9EncoderImpl::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
|
||||
// End of frame.
|
||||
// Check if encoded frame is a key frame.
|
||||
if (pkt->data.frame.flags & VPX_FRAME_IS_KEY) {
|
||||
encoded_image_._frameType = kKeyFrame;
|
||||
encoded_image_._frameType = kVideoFrameKey;
|
||||
}
|
||||
PopulateCodecSpecific(&codec_specific, *pkt, input_image_->timestamp());
|
||||
|
||||
@ -688,7 +688,7 @@ int VP9DecoderImpl::Decode(const EncodedImage& input_image,
|
||||
}
|
||||
// Always start with a complete key frame.
|
||||
if (key_frame_required_) {
|
||||
if (input_image._frameType != kKeyFrame)
|
||||
if (input_image._frameType != kVideoFrameKey)
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
// We have a key frame - is it complete?
|
||||
if (input_image._completeFrame) {
|
||||
|
||||
@ -89,7 +89,7 @@ void VCMEncodedFrame::Reset()
|
||||
_renderTimeMs = -1;
|
||||
_timeStamp = 0;
|
||||
_payloadType = 0;
|
||||
_frameType = kDeltaFrame;
|
||||
_frameType = kVideoFrameDelta;
|
||||
_encodedWidth = 0;
|
||||
_encodedHeight = 0;
|
||||
_completeFrame = false;
|
||||
|
||||
@ -372,7 +372,7 @@ int32_t MediaOptimization::UpdateWithEncodedData(
|
||||
UpdateSentBitrate(now_ms);
|
||||
UpdateSentFramerate();
|
||||
if (encoded_length > 0) {
|
||||
const bool delta_frame = encoded_image._frameType != kKeyFrame;
|
||||
const bool delta_frame = encoded_image._frameType != kVideoFrameKey;
|
||||
|
||||
frame_dropper_->Fill(encoded_length, delta_frame);
|
||||
if (max_payload_size_ > 0 && encoded_length > 0) {
|
||||
|
||||
@ -38,7 +38,7 @@ class TestMediaOptimization : public ::testing::Test {
|
||||
EncodedImage encoded_image;
|
||||
encoded_image._length = bytes_per_frame;
|
||||
encoded_image._timeStamp = next_timestamp_;
|
||||
encoded_image._frameType = kKeyFrame;
|
||||
encoded_image._frameType = kVideoFrameKey;
|
||||
ASSERT_EQ(VCM_OK, media_opt_.UpdateWithEncodedData(encoded_image));
|
||||
}
|
||||
next_timestamp_ += frame_time_ms_ * kSampleRate / 1000;
|
||||
|
||||
@ -236,16 +236,16 @@ class TestVideoSenderWithMockEncoder : public TestVideoSender {
|
||||
// No intra request expected.
|
||||
EXPECT_CALL(
|
||||
encoder_,
|
||||
Encode(_,
|
||||
_,
|
||||
Pointee(ElementsAre(kDeltaFrame, kDeltaFrame, kDeltaFrame))))
|
||||
.Times(1).WillRepeatedly(Return(0));
|
||||
Encode(_, _, Pointee(ElementsAre(kVideoFrameDelta, kVideoFrameDelta,
|
||||
kVideoFrameDelta))))
|
||||
.Times(1)
|
||||
.WillRepeatedly(Return(0));
|
||||
return;
|
||||
}
|
||||
assert(stream >= 0);
|
||||
assert(stream < kNumberOfStreams);
|
||||
std::vector<FrameType> frame_types(kNumberOfStreams, kDeltaFrame);
|
||||
frame_types[stream] = kKeyFrame;
|
||||
std::vector<FrameType> frame_types(kNumberOfStreams, kVideoFrameDelta);
|
||||
frame_types[stream] = kVideoFrameKey;
|
||||
EXPECT_CALL(
|
||||
encoder_,
|
||||
Encode(_,
|
||||
|
||||
@ -47,7 +47,7 @@ int32_t ConfigurableFrameSizeEncoder::Encode(
|
||||
encodedImage._completeFrame = true;
|
||||
encodedImage._encodedHeight = inputImage.height();
|
||||
encodedImage._encodedWidth = inputImage.width();
|
||||
encodedImage._frameType = kKeyFrame;
|
||||
encodedImage._frameType = kVideoFrameKey;
|
||||
encodedImage._timeStamp = inputImage.timestamp();
|
||||
encodedImage.capture_time_ms_ = inputImage.render_time_ms();
|
||||
RTPFragmentationHeader* fragmentation = NULL;
|
||||
|
||||
@ -189,7 +189,7 @@ void SendStatisticsProxy::OnSendEncodedImage(
|
||||
stats->height = encoded_image._encodedHeight;
|
||||
update_times_[ssrc].resolution_update_ms = clock_->TimeInMilliseconds();
|
||||
|
||||
key_frame_counter_.Add(encoded_image._frameType == kKeyFrame);
|
||||
key_frame_counter_.Add(encoded_image._frameType == kVideoFrameKey);
|
||||
|
||||
if (encoded_image.adapt_reason_.quality_resolution_downscales != -1) {
|
||||
bool downscaled =
|
||||
|
||||
@ -87,7 +87,7 @@ int32_t VideoDecoderSoftwareFallbackWrapper::Decode(
|
||||
int64_t render_time_ms) {
|
||||
// Try decoding with the provided decoder on every keyframe or when there's no
|
||||
// fallback decoder. This is the normal case.
|
||||
if (!fallback_decoder_ || input_image._frameType == kKeyFrame) {
|
||||
if (!fallback_decoder_ || input_image._frameType == kVideoFrameKey) {
|
||||
int32_t ret = decoder_->Decode(input_image, missing_frames, fragmentation,
|
||||
codec_specific_info, render_time_ms);
|
||||
if (ret == WEBRTC_VIDEO_CODEC_OK) {
|
||||
|
||||
@ -86,13 +86,13 @@ TEST_F(VideoDecoderSoftwareFallbackWrapperTest,
|
||||
<< "Decoder used even though fallback should be active.";
|
||||
|
||||
// Should be able to recover on a keyframe.
|
||||
encoded_image._frameType = kKeyFrame;
|
||||
encoded_image._frameType = kVideoFrameKey;
|
||||
fake_decoder_.decode_return_code_ = WEBRTC_VIDEO_CODEC_OK;
|
||||
fallback_wrapper_.Decode(encoded_image, false, nullptr, nullptr, -1);
|
||||
EXPECT_EQ(2, fake_decoder_.decode_count_)
|
||||
<< "Wrapper did not try to decode a keyframe using registered decoder.";
|
||||
|
||||
encoded_image._frameType = kDeltaFrame;
|
||||
encoded_image._frameType = kVideoFrameDelta;
|
||||
fallback_wrapper_.Decode(encoded_image, false, nullptr, nullptr, -1);
|
||||
EXPECT_EQ(3, fake_decoder_.decode_count_)
|
||||
<< "Decoder not used on future delta frames.";
|
||||
|
||||
@ -110,7 +110,7 @@ void VideoEncoderSoftwareFallbackWrapperTest::EncodeFrame() {
|
||||
memset(frame_.buffer(webrtc::kVPlane), 128,
|
||||
frame_.allocated_size(webrtc::kVPlane));
|
||||
|
||||
std::vector<FrameType> types(1, kKeyFrame);
|
||||
std::vector<FrameType> types(1, kVideoFrameKey);
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
fallback_wrapper_.Encode(frame_, nullptr, &types));
|
||||
}
|
||||
@ -205,7 +205,7 @@ TEST_F(VideoEncoderSoftwareFallbackWrapperTest,
|
||||
EXPECT_EQ(&callback2, fake_encoder_.encode_complete_callback_);
|
||||
|
||||
// Encoding a frame using the fallback should arrive at the new callback.
|
||||
std::vector<FrameType> types(1, kKeyFrame);
|
||||
std::vector<FrameType> types(1, kVideoFrameKey);
|
||||
frame_.set_timestamp(frame_.timestamp() + 1000);
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
fallback_wrapper_.Encode(frame_, nullptr, &types));
|
||||
|
||||
@ -193,7 +193,7 @@ class EncodedImage {
|
||||
// NTP time of the capture time in local timebase in milliseconds.
|
||||
int64_t ntp_time_ms_ = 0;
|
||||
int64_t capture_time_ms_ = 0;
|
||||
FrameType _frameType = kDeltaFrame;
|
||||
FrameType _frameType = kVideoFrameDelta;
|
||||
uint8_t* _buffer;
|
||||
size_t _length;
|
||||
size_t _size;
|
||||
|
||||
Reference in New Issue
Block a user