Prepare for splitting FrameType into AudioFrameType and VideoFrameType

This cl deprecates the FrameType enum, and adds aliases AudioFrameType
and VideoFrameType.

After downstream usage is updated, the enums will be separated
and be moved out of common_types.h.

Bug: webrtc:6883
Change-Id: I2aaf660169da45f22574b4cbb16aea8522cc07a6
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/123184
Commit-Queue: Niels Moller <nisse@webrtc.org>
Reviewed-by: Karl Wiberg <kwiberg@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#27011}
This commit is contained in:
Niels Möller
2019-03-07 10:18:23 +01:00
committed by Commit Bot
parent 0b69826ffb
commit 87e2d785a0
98 changed files with 226 additions and 206 deletions

View File

@ -64,7 +64,7 @@ int NumberOfThreads(int width, int height, int number_of_cores) {
return 1;
}
FrameType ConvertToVideoFrameType(EVideoFrameType type) {
VideoFrameType ConvertToVideoFrameType(EVideoFrameType type) {
switch (type) {
case videoFrameTypeIDR:
return kVideoFrameKey;
@ -381,9 +381,10 @@ int32_t H264EncoderImpl::SetRateAllocation(
return WEBRTC_VIDEO_CODEC_OK;
}
int32_t H264EncoderImpl::Encode(const VideoFrame& input_frame,
const CodecSpecificInfo* codec_specific_info,
const std::vector<FrameType>* frame_types) {
int32_t H264EncoderImpl::Encode(
const VideoFrame& input_frame,
const CodecSpecificInfo* codec_specific_info,
const std::vector<VideoFrameType>* frame_types) {
if (encoders_.empty()) {
ReportError();
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;

View File

@ -68,7 +68,7 @@ class H264EncoderImpl : public H264Encoder {
// passed to the encode complete callback.
int32_t Encode(const VideoFrame& frame,
const CodecSpecificInfo* codec_specific_info,
const std::vector<FrameType>* frame_types) override;
const std::vector<VideoFrameType>* frame_types) override;
EncoderInfo GetEncoderInfo() const override;

View File

@ -43,7 +43,7 @@ class MultiplexEncoderAdapter : public VideoEncoder {
int number_of_cores,
size_t max_payload_size) override;
int Encode(const VideoFrame& input_image,
const std::vector<FrameType>* frame_types) override;
const std::vector<VideoFrameType>* frame_types) override;
int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override;
int SetRateAllocation(const VideoBitrateAllocation& bitrate,
uint32_t new_framerate) override;

View File

@ -115,11 +115,13 @@ MultiplexImageComponentHeader UnpackFrameHeader(const uint8_t* buffer) {
ByteReader<uint32_t>::ReadBigEndian(buffer + offset);
offset += sizeof(uint32_t);
// TODO(nisse): This makes the wire format depend on the numeric values of the
// VideoCodecType and VideoFrameType enum constants.
frame_header.codec_type = static_cast<VideoCodecType>(
ByteReader<uint8_t>::ReadBigEndian(buffer + offset));
offset += sizeof(uint8_t);
frame_header.frame_type = static_cast<FrameType>(
frame_header.frame_type = static_cast<VideoFrameType>(
ByteReader<uint8_t>::ReadBigEndian(buffer + offset));
offset += sizeof(uint8_t);
@ -181,8 +183,8 @@ EncodedImage MultiplexEncodedImagePacker::PackAndRelease(
// key frame so as to decode the whole image without previous frame data.
// Thus only when all components are key frames, we can mark the combined
// frame as key frame.
if (frame_header.frame_type == FrameType::kVideoFrameDelta) {
combined_image._frameType = FrameType::kVideoFrameDelta;
if (frame_header.frame_type == VideoFrameType::kVideoFrameDelta) {
combined_image._frameType = VideoFrameType::kVideoFrameDelta;
}
frame_headers.push_back(frame_header);

View File

@ -67,7 +67,7 @@ struct MultiplexImageComponentHeader {
VideoCodecType codec_type;
// Indicated the underlying frame is a key frame or delta frame.
FrameType frame_type;
VideoFrameType frame_type;
};
const int kMultiplexImageComponentHeaderSize =
sizeof(uint32_t) + sizeof(uint8_t) + sizeof(uint32_t) + sizeof(uint32_t) +

View File

@ -138,12 +138,12 @@ int MultiplexEncoderAdapter::InitEncode(const VideoCodec* inst,
int MultiplexEncoderAdapter::Encode(
const VideoFrame& input_image,
const std::vector<FrameType>* frame_types) {
const std::vector<VideoFrameType>* frame_types) {
if (!encoded_complete_callback_) {
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
std::vector<FrameType> adjusted_frame_types;
std::vector<VideoFrameType> adjusted_frame_types;
if (key_frame_interval_ > 0 && picture_index_ % key_frame_interval_ == 0) {
adjusted_frame_types.push_back(kVideoFrameKey);
} else {

View File

@ -285,9 +285,9 @@ void VideoProcessor::ProcessFrame() {
}
// Encode.
const std::vector<FrameType> frame_types =
(frame_number == 0) ? std::vector<FrameType>{kVideoFrameKey}
: std::vector<FrameType>{kVideoFrameDelta};
const std::vector<VideoFrameType> frame_types =
(frame_number == 0) ? std::vector<VideoFrameType>{kVideoFrameKey}
: std::vector<VideoFrameType>{kVideoFrameDelta};
const int encode_return_code = encoder_->Encode(input_frame, &frame_types);
for (size_t i = 0; i < num_simulcast_or_spatial_layers_; ++i) {
FrameStatistics* frame_stat = stats_->GetFrame(frame_number, i);

View File

@ -737,7 +737,7 @@ size_t LibvpxVp8Encoder::SteadyStateSize(int sid, int tid) {
int LibvpxVp8Encoder::Encode(const VideoFrame& frame,
const CodecSpecificInfo* codec_specific_info,
const std::vector<FrameType>* frame_types) {
const std::vector<VideoFrameType>* frame_types) {
RTC_DCHECK_EQ(frame.width(), codec_.width);
RTC_DCHECK_EQ(frame.height(), codec_.height);

View File

@ -47,7 +47,7 @@ class LibvpxVp8Encoder : public VideoEncoder {
int Encode(const VideoFrame& input_image,
const CodecSpecificInfo* codec_specific_info,
const std::vector<FrameType>* frame_types) override;
const std::vector<VideoFrameType>* frame_types) override;
int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override;

View File

@ -74,11 +74,11 @@ class TestVp8Impl : public VideoCodecUnitTest {
EncodedImage* encoded_frame,
CodecSpecificInfo* codec_specific_info,
bool keyframe = false) {
std::vector<FrameType> frame_types;
std::vector<VideoFrameType> frame_types;
if (keyframe) {
frame_types.emplace_back(FrameType::kVideoFrameKey);
frame_types.emplace_back(VideoFrameType::kVideoFrameKey);
} else {
frame_types.emplace_back(FrameType::kVideoFrameDelta);
frame_types.emplace_back(VideoFrameType::kVideoFrameDelta);
}
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
encoder_->Encode(input_frame, &frame_types));
@ -484,7 +484,7 @@ TEST_F(TestVp8Impl, KeepsTimestampOnReencode) {
.Times(2)
.WillRepeatedly(Return(vpx_codec_err_t::VPX_CODEC_OK));
auto delta_frame = std::vector<FrameType>{kVideoFrameDelta};
auto delta_frame = std::vector<VideoFrameType>{kVideoFrameDelta};
encoder.Encode(*NextInputFrame(), nullptr, &delta_frame);
}

View File

@ -714,7 +714,7 @@ uint32_t VP9EncoderImpl::MaxIntraTarget(uint32_t optimal_buffer_size) {
int VP9EncoderImpl::Encode(const VideoFrame& input_image,
const CodecSpecificInfo* codec_specific_info,
const std::vector<FrameType>* frame_types) {
const std::vector<VideoFrameType>* frame_types) {
if (!inited_) {
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}

View File

@ -45,7 +45,7 @@ class VP9EncoderImpl : public VP9Encoder {
int Encode(const VideoFrame& input_image,
const CodecSpecificInfo* codec_specific_info,
const std::vector<FrameType>* frame_types) override;
const std::vector<VideoFrameType>* frame_types) override;
int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override;

View File

@ -67,7 +67,7 @@ class VCMEncodedFrame : protected EncodedImage {
/**
* Get frame type
*/
webrtc::FrameType FrameType() const { return _frameType; }
webrtc::VideoFrameType FrameType() const { return _frameType; }
/**
* Get frame rotation
*/

View File

@ -177,7 +177,7 @@ void FecControllerDefault::SetProtectionMethod(bool enable_fec,
}
void FecControllerDefault::UpdateWithEncodedData(
const size_t encoded_image_length,
const FrameType encoded_image_frametype) {
const VideoFrameType encoded_image_frametype) {
const size_t encoded_length = encoded_image_length;
CritScope lock(&crit_sect_);
if (encoded_length > 0) {

View File

@ -44,8 +44,9 @@ class FecControllerDefault : public FecController {
uint8_t fraction_lost,
std::vector<bool> loss_mask_vector,
int64_t round_trip_time_ms) override;
void UpdateWithEncodedData(const size_t encoded_image_length,
const FrameType encoded_image_frametype) override;
void UpdateWithEncodedData(
const size_t encoded_image_length,
const VideoFrameType encoded_image_frametype) override;
bool UseLossVectorMask() override;
float GetProtectionOverheadRateThreshold();

View File

@ -29,7 +29,7 @@ VCMFrameBuffer::VCMFrameBuffer()
VCMFrameBuffer::~VCMFrameBuffer() {}
webrtc::FrameType VCMFrameBuffer::FrameType() const {
webrtc::VideoFrameType VCMFrameBuffer::FrameType() const {
return _sessionInfo.FrameType();
}

View File

@ -70,7 +70,7 @@ class VCMFrameBuffer : public VCMEncodedFrame {
int64_t LatestPacketTimeMs() const;
webrtc::FrameType FrameType() const;
webrtc::VideoFrameType FrameType() const;
private:
void SetState(VCMFrameBufferStateEnum state); // Set state of frame

View File

@ -121,7 +121,7 @@ int RtpFrameObject::times_nacked() const {
return times_nacked_;
}
FrameType RtpFrameObject::frame_type() const {
VideoFrameType RtpFrameObject::frame_type() const {
return frame_type_;
}

View File

@ -36,7 +36,7 @@ class RtpFrameObject : public EncodedFrame {
uint16_t first_seq_num() const;
uint16_t last_seq_num() const;
int times_nacked() const;
enum FrameType frame_type() const;
VideoFrameType frame_type() const;
VideoCodecType codec_type() const;
int64_t ReceivedTime() const override;
int64_t RenderTime() const override;
@ -49,7 +49,7 @@ class RtpFrameObject : public EncodedFrame {
void AllocateBitstreamBuffer(size_t frame_size);
rtc::scoped_refptr<PacketBuffer> packet_buffer_;
enum FrameType frame_type_;
VideoFrameType frame_type_;
VideoCodecType codec_type_;
uint16_t first_seq_num_;
uint16_t last_seq_num_;

View File

@ -362,7 +362,7 @@ class TestRunningJitterBuffer : public ::testing::TestWithParam<std::string>,
return jitter_buffer_->InsertPacket(packet, &retransmitted);
}
VCMFrameBufferEnum InsertFrame(FrameType frame_type) {
VCMFrameBufferEnum InsertFrame(VideoFrameType frame_type) {
stream_generator_->GenerateFrame(
frame_type, (frame_type != kEmptyFrame) ? 1 : 0,
(frame_type == kEmptyFrame) ? 1 : 0, clock_->TimeInMilliseconds());
@ -371,7 +371,7 @@ class TestRunningJitterBuffer : public ::testing::TestWithParam<std::string>,
return ret;
}
VCMFrameBufferEnum InsertFrames(int num_frames, FrameType frame_type) {
VCMFrameBufferEnum InsertFrames(int num_frames, VideoFrameType frame_type) {
VCMFrameBufferEnum ret_for_all = kNoError;
for (int i = 0; i < num_frames; ++i) {
VCMFrameBufferEnum ret = InsertFrame(frame_type);

View File

@ -46,7 +46,7 @@ VCMPacket::VCMPacket(const uint8_t* ptr,
size_t size,
const RTPHeader& rtp_header,
const RTPVideoHeader& videoHeader,
FrameType frame_type,
VideoFrameType frame_type,
int64_t ntp_time_ms)
: payloadType(rtp_header.payloadType),
timestamp(rtp_header.timestamp),

View File

@ -32,7 +32,7 @@ class VCMPacket {
size_t size,
const RTPHeader& rtp_header,
const RTPVideoHeader& video_header,
FrameType frame_type,
VideoFrameType frame_type,
int64_t ntp_time_ms);
~VCMPacket();
@ -58,7 +58,7 @@ class VCMPacket {
bool markerBit;
int timesNacked;
FrameType frameType;
VideoFrameType frameType;
VCMNaluCompleteness completeNALU; // Default is kNaluIncomplete.
bool insertStartCode; // True if a start code should be inserted before this

View File

@ -56,7 +56,7 @@ class TestVCMReceiver : public ::testing::Test {
return receiver_.InsertPacket(packet);
}
int32_t InsertFrame(FrameType frame_type, bool complete) {
int32_t InsertFrame(VideoFrameType frame_type, bool complete) {
int num_of_packets = complete ? 1 : 2;
stream_generator_->GenerateFrame(
frame_type, (frame_type != kEmptyFrame) ? num_of_packets : 0,
@ -322,7 +322,7 @@ class SimulatedClockWithFrames : public SimulatedClock {
void GenerateAndInsertFrame(int64_t render_timestamp_ms) {
VCMPacket packet;
stream_generator_->GenerateFrame(FrameType::kVideoFrameKey,
stream_generator_->GenerateFrame(VideoFrameType::kVideoFrameKey,
1, // media packets
0, // empty packets
render_timestamp_ms);

View File

@ -54,7 +54,7 @@ class VCMSessionInfo {
int NumPackets() const;
bool HaveFirstPacket() const;
bool HaveLastPacket() const;
webrtc::FrameType FrameType() const { return frame_type_; }
webrtc::VideoFrameType FrameType() const { return frame_type_; }
int LowSequenceNumber() const;
// Returns highest sequence number, media or empty.
@ -103,7 +103,7 @@ class VCMSessionInfo {
void UpdateCompleteSession();
bool complete_;
webrtc::FrameType frame_type_;
webrtc::VideoFrameType frame_type_;
// Packets in this frame.
PacketList packets_;
int empty_seq_num_low_;

View File

@ -29,7 +29,7 @@ void StreamGenerator::Init(uint16_t start_seq_num, int64_t current_time) {
memset(packet_buffer_, 0, sizeof(packet_buffer_));
}
void StreamGenerator::GenerateFrame(FrameType type,
void StreamGenerator::GenerateFrame(VideoFrameType type,
int num_media_packets,
int num_empty_packets,
int64_t time_ms) {
@ -54,7 +54,7 @@ VCMPacket StreamGenerator::GeneratePacket(uint16_t sequence_number,
unsigned int size,
bool first_packet,
bool marker_bit,
FrameType type) {
VideoFrameType type) {
EXPECT_LT(size, kMaxPacketSize);
VCMPacket packet;
packet.seqNum = sequence_number;

View File

@ -34,7 +34,7 @@ class StreamGenerator {
// |time_ms| denotes the timestamp you want to put on the frame, and the unit
// is millisecond. GenerateFrame will translate |time_ms| into a 90kHz
// timestamp and put it on the frame.
void GenerateFrame(FrameType type,
void GenerateFrame(VideoFrameType type,
int num_media_packets,
int num_empty_packets,
int64_t time_ms);
@ -56,7 +56,7 @@ class StreamGenerator {
unsigned int size,
bool first_packet,
bool marker_bit,
FrameType type);
VideoFrameType type);
std::list<VCMPacket>::iterator GetPacketIterator(int index);

View File

@ -294,8 +294,8 @@ void SimulcastTestFixtureImpl::SetRates(uint32_t bitrate_kbps, uint32_t fps) {
void SimulcastTestFixtureImpl::RunActiveStreamsTest(
const std::vector<bool> active_streams) {
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
UpdateActiveStreams(active_streams);
// Set sufficient bitrate for all streams so we can test active without
// bitrate being an issue.
@ -326,7 +326,7 @@ void SimulcastTestFixtureImpl::UpdateActiveStreams(
}
void SimulcastTestFixtureImpl::ExpectStreams(
FrameType frame_type,
VideoFrameType frame_type,
const std::vector<bool> expected_streams_active) {
ASSERT_EQ(static_cast<int>(expected_streams_active.size()),
kNumberOfSimulcastStreams);
@ -367,7 +367,7 @@ void SimulcastTestFixtureImpl::ExpectStreams(
}
}
void SimulcastTestFixtureImpl::ExpectStreams(FrameType frame_type,
void SimulcastTestFixtureImpl::ExpectStreams(VideoFrameType frame_type,
int expected_video_streams) {
ASSERT_GE(expected_video_streams, 0);
ASSERT_LE(expected_video_streams, kNumberOfSimulcastStreams);
@ -396,8 +396,8 @@ void SimulcastTestFixtureImpl::VerifyTemporalIdxAndSyncForAllSpatialLayers(
// a key frame was only requested for some of them.
void SimulcastTestFixtureImpl::TestKeyFrameRequestsOnAllStreams() {
SetRates(kMaxBitrates[2], 30); // To get all three streams.
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
@ -431,8 +431,8 @@ void SimulcastTestFixtureImpl::TestKeyFrameRequestsOnAllStreams() {
void SimulcastTestFixtureImpl::TestPaddingAllStreams() {
// We should always encode the base layer.
SetRates(kMinBitrates[0] - 1, 30);
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
ExpectStreams(kVideoFrameKey, 1);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
@ -444,8 +444,8 @@ void SimulcastTestFixtureImpl::TestPaddingAllStreams() {
void SimulcastTestFixtureImpl::TestPaddingTwoStreams() {
// We have just enough to get only the first stream and padding for two.
SetRates(kMinBitrates[0], 30);
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
ExpectStreams(kVideoFrameKey, 1);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
@ -458,8 +458,8 @@ void SimulcastTestFixtureImpl::TestPaddingTwoStreamsOneMaxedOut() {
// We are just below limit of sending second stream, so we should get
// the first stream maxed out (at |maxBitrate|), and padding for two.
SetRates(kTargetBitrates[0] + kMinBitrates[1] - 1, 30);
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
ExpectStreams(kVideoFrameKey, 1);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
@ -471,8 +471,8 @@ void SimulcastTestFixtureImpl::TestPaddingTwoStreamsOneMaxedOut() {
void SimulcastTestFixtureImpl::TestPaddingOneStream() {
// We have just enough to send two streams, so padding for one stream.
SetRates(kTargetBitrates[0] + kMinBitrates[1], 30);
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
ExpectStreams(kVideoFrameKey, 2);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
@ -485,8 +485,8 @@ void SimulcastTestFixtureImpl::TestPaddingOneStreamTwoMaxedOut() {
// We are just below limit of sending third stream, so we should get
// first stream's rate maxed out at |targetBitrate|, second at |maxBitrate|.
SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] - 1, 30);
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
ExpectStreams(kVideoFrameKey, 2);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
@ -498,8 +498,8 @@ void SimulcastTestFixtureImpl::TestPaddingOneStreamTwoMaxedOut() {
void SimulcastTestFixtureImpl::TestSendAllStreams() {
// We have just enough to send all streams.
SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2], 30);
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
ExpectStreams(kVideoFrameKey, 3);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
@ -511,8 +511,8 @@ void SimulcastTestFixtureImpl::TestSendAllStreams() {
void SimulcastTestFixtureImpl::TestDisablingStreams() {
// We should get three media streams.
SetRates(kMaxBitrates[0] + kMaxBitrates[1] + kMaxBitrates[2], 30);
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
ExpectStreams(kVideoFrameKey, 3);
EXPECT_EQ(0, encoder_->Encode(*input_frame_, &frame_types));
@ -617,8 +617,8 @@ void SimulcastTestFixtureImpl::SwitchingToOneStream(int width, int height) {
// Encode one frame and verify.
SetRates(kMaxBitrates[0] + kMaxBitrates[1], 30);
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
EXPECT_CALL(
encoder_callback_,
OnEncodedImage(AllOf(Field(&EncodedImage::_frameType, kVideoFrameKey),

View File

@ -67,9 +67,9 @@ class SimulcastTestFixtureImpl final : public SimulcastTestFixture {
void SetRates(uint32_t bitrate_kbps, uint32_t fps);
void RunActiveStreamsTest(const std::vector<bool> active_streams);
void UpdateActiveStreams(const std::vector<bool> active_streams);
void ExpectStreams(FrameType frame_type,
void ExpectStreams(VideoFrameType frame_type,
const std::vector<bool> expected_streams_active);
void ExpectStreams(FrameType frame_type, int expected_video_streams);
void ExpectStreams(VideoFrameType frame_type, int expected_video_streams);
void VerifyTemporalIdxAndSyncForAllSpatialLayers(
TestEncodedImageCallback* encoder_callback,
const int* expected_temporal_idx,