Prepare for splitting FrameType into AudioFrameType and VideoFrameType
This cl deprecates the FrameType enum, and adds aliases AudioFrameType and VideoFrameType. After downstream usage is updated, the enums will be separated and be moved out of common_types.h. Bug: webrtc:6883 Change-Id: I2aaf660169da45f22574b4cbb16aea8522cc07a6 Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/123184 Commit-Queue: Niels Moller <nisse@webrtc.org> Reviewed-by: Karl Wiberg <kwiberg@webrtc.org> Cr-Commit-Position: refs/heads/master@{#27011}
This commit is contained in:
@ -64,7 +64,7 @@ int NumberOfThreads(int width, int height, int number_of_cores) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
FrameType ConvertToVideoFrameType(EVideoFrameType type) {
|
||||
VideoFrameType ConvertToVideoFrameType(EVideoFrameType type) {
|
||||
switch (type) {
|
||||
case videoFrameTypeIDR:
|
||||
return kVideoFrameKey;
|
||||
@ -381,9 +381,10 @@ int32_t H264EncoderImpl::SetRateAllocation(
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
int32_t H264EncoderImpl::Encode(const VideoFrame& input_frame,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
const std::vector<FrameType>* frame_types) {
|
||||
int32_t H264EncoderImpl::Encode(
|
||||
const VideoFrame& input_frame,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
const std::vector<VideoFrameType>* frame_types) {
|
||||
if (encoders_.empty()) {
|
||||
ReportError();
|
||||
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
||||
|
@ -68,7 +68,7 @@ class H264EncoderImpl : public H264Encoder {
|
||||
// passed to the encode complete callback.
|
||||
int32_t Encode(const VideoFrame& frame,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
const std::vector<FrameType>* frame_types) override;
|
||||
const std::vector<VideoFrameType>* frame_types) override;
|
||||
|
||||
EncoderInfo GetEncoderInfo() const override;
|
||||
|
||||
|
@ -43,7 +43,7 @@ class MultiplexEncoderAdapter : public VideoEncoder {
|
||||
int number_of_cores,
|
||||
size_t max_payload_size) override;
|
||||
int Encode(const VideoFrame& input_image,
|
||||
const std::vector<FrameType>* frame_types) override;
|
||||
const std::vector<VideoFrameType>* frame_types) override;
|
||||
int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override;
|
||||
int SetRateAllocation(const VideoBitrateAllocation& bitrate,
|
||||
uint32_t new_framerate) override;
|
||||
|
@ -115,11 +115,13 @@ MultiplexImageComponentHeader UnpackFrameHeader(const uint8_t* buffer) {
|
||||
ByteReader<uint32_t>::ReadBigEndian(buffer + offset);
|
||||
offset += sizeof(uint32_t);
|
||||
|
||||
// TODO(nisse): This makes the wire format depend on the numeric values of the
|
||||
// VideoCodecType and VideoFrameType enum constants.
|
||||
frame_header.codec_type = static_cast<VideoCodecType>(
|
||||
ByteReader<uint8_t>::ReadBigEndian(buffer + offset));
|
||||
offset += sizeof(uint8_t);
|
||||
|
||||
frame_header.frame_type = static_cast<FrameType>(
|
||||
frame_header.frame_type = static_cast<VideoFrameType>(
|
||||
ByteReader<uint8_t>::ReadBigEndian(buffer + offset));
|
||||
offset += sizeof(uint8_t);
|
||||
|
||||
@ -181,8 +183,8 @@ EncodedImage MultiplexEncodedImagePacker::PackAndRelease(
|
||||
// key frame so as to decode the whole image without previous frame data.
|
||||
// Thus only when all components are key frames, we can mark the combined
|
||||
// frame as key frame.
|
||||
if (frame_header.frame_type == FrameType::kVideoFrameDelta) {
|
||||
combined_image._frameType = FrameType::kVideoFrameDelta;
|
||||
if (frame_header.frame_type == VideoFrameType::kVideoFrameDelta) {
|
||||
combined_image._frameType = VideoFrameType::kVideoFrameDelta;
|
||||
}
|
||||
|
||||
frame_headers.push_back(frame_header);
|
||||
|
@ -67,7 +67,7 @@ struct MultiplexImageComponentHeader {
|
||||
VideoCodecType codec_type;
|
||||
|
||||
// Indicated the underlying frame is a key frame or delta frame.
|
||||
FrameType frame_type;
|
||||
VideoFrameType frame_type;
|
||||
};
|
||||
const int kMultiplexImageComponentHeaderSize =
|
||||
sizeof(uint32_t) + sizeof(uint8_t) + sizeof(uint32_t) + sizeof(uint32_t) +
|
||||
|
@ -138,12 +138,12 @@ int MultiplexEncoderAdapter::InitEncode(const VideoCodec* inst,
|
||||
|
||||
int MultiplexEncoderAdapter::Encode(
|
||||
const VideoFrame& input_image,
|
||||
const std::vector<FrameType>* frame_types) {
|
||||
const std::vector<VideoFrameType>* frame_types) {
|
||||
if (!encoded_complete_callback_) {
|
||||
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
||||
}
|
||||
|
||||
std::vector<FrameType> adjusted_frame_types;
|
||||
std::vector<VideoFrameType> adjusted_frame_types;
|
||||
if (key_frame_interval_ > 0 && picture_index_ % key_frame_interval_ == 0) {
|
||||
adjusted_frame_types.push_back(kVideoFrameKey);
|
||||
} else {
|
||||
|
@ -285,9 +285,9 @@ void VideoProcessor::ProcessFrame() {
|
||||
}
|
||||
|
||||
// Encode.
|
||||
const std::vector<FrameType> frame_types =
|
||||
(frame_number == 0) ? std::vector<FrameType>{kVideoFrameKey}
|
||||
: std::vector<FrameType>{kVideoFrameDelta};
|
||||
const std::vector<VideoFrameType> frame_types =
|
||||
(frame_number == 0) ? std::vector<VideoFrameType>{kVideoFrameKey}
|
||||
: std::vector<VideoFrameType>{kVideoFrameDelta};
|
||||
const int encode_return_code = encoder_->Encode(input_frame, &frame_types);
|
||||
for (size_t i = 0; i < num_simulcast_or_spatial_layers_; ++i) {
|
||||
FrameStatistics* frame_stat = stats_->GetFrame(frame_number, i);
|
||||
|
@ -737,7 +737,7 @@ size_t LibvpxVp8Encoder::SteadyStateSize(int sid, int tid) {
|
||||
|
||||
int LibvpxVp8Encoder::Encode(const VideoFrame& frame,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
const std::vector<FrameType>* frame_types) {
|
||||
const std::vector<VideoFrameType>* frame_types) {
|
||||
RTC_DCHECK_EQ(frame.width(), codec_.width);
|
||||
RTC_DCHECK_EQ(frame.height(), codec_.height);
|
||||
|
||||
|
@ -47,7 +47,7 @@ class LibvpxVp8Encoder : public VideoEncoder {
|
||||
|
||||
int Encode(const VideoFrame& input_image,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
const std::vector<FrameType>* frame_types) override;
|
||||
const std::vector<VideoFrameType>* frame_types) override;
|
||||
|
||||
int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override;
|
||||
|
||||
|
@ -74,11 +74,11 @@ class TestVp8Impl : public VideoCodecUnitTest {
|
||||
EncodedImage* encoded_frame,
|
||||
CodecSpecificInfo* codec_specific_info,
|
||||
bool keyframe = false) {
|
||||
std::vector<FrameType> frame_types;
|
||||
std::vector<VideoFrameType> frame_types;
|
||||
if (keyframe) {
|
||||
frame_types.emplace_back(FrameType::kVideoFrameKey);
|
||||
frame_types.emplace_back(VideoFrameType::kVideoFrameKey);
|
||||
} else {
|
||||
frame_types.emplace_back(FrameType::kVideoFrameDelta);
|
||||
frame_types.emplace_back(VideoFrameType::kVideoFrameDelta);
|
||||
}
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
encoder_->Encode(input_frame, &frame_types));
|
||||
@ -484,7 +484,7 @@ TEST_F(TestVp8Impl, KeepsTimestampOnReencode) {
|
||||
.Times(2)
|
||||
.WillRepeatedly(Return(vpx_codec_err_t::VPX_CODEC_OK));
|
||||
|
||||
auto delta_frame = std::vector<FrameType>{kVideoFrameDelta};
|
||||
auto delta_frame = std::vector<VideoFrameType>{kVideoFrameDelta};
|
||||
encoder.Encode(*NextInputFrame(), nullptr, &delta_frame);
|
||||
}
|
||||
|
||||
|
@ -714,7 +714,7 @@ uint32_t VP9EncoderImpl::MaxIntraTarget(uint32_t optimal_buffer_size) {
|
||||
|
||||
int VP9EncoderImpl::Encode(const VideoFrame& input_image,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
const std::vector<FrameType>* frame_types) {
|
||||
const std::vector<VideoFrameType>* frame_types) {
|
||||
if (!inited_) {
|
||||
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
||||
}
|
||||
|
@ -45,7 +45,7 @@ class VP9EncoderImpl : public VP9Encoder {
|
||||
|
||||
int Encode(const VideoFrame& input_image,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
const std::vector<FrameType>* frame_types) override;
|
||||
const std::vector<VideoFrameType>* frame_types) override;
|
||||
|
||||
int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override;
|
||||
|
||||
|
Reference in New Issue
Block a user