Reland "Rename stereo video codec to multiplex"
This is a reland of bbdabe50db0cf09f6007dda12a6476dc4602b174. Original change's description: > Rename stereo video codec to multiplex > > This CL only does the rename from"stereo" to multiplex". With this we have a > better name that doesn't clash with audio's usage of stereo. > > Bug: webrtc:7671 > Change-Id: Iebc3fc20839025f1bc8bcf0e16141bf9744ef652 > Reviewed-on: https://webrtc-review.googlesource.com/43242 > Commit-Queue: Emircan Uysaler <emircan@webrtc.org> > Reviewed-by: Niklas Enbom <niklas.enbom@webrtc.org> > Cr-Commit-Position: refs/heads/master@{#21769} TBR=niklas.enbom@webrtc.org Bug: webrtc:7671 Change-Id: I6f38dc46126f279f334d52b56339b40acdc30511 Reviewed-on: https://webrtc-review.googlesource.com/45820 Reviewed-by: Emircan Uysaler <emircan@webrtc.org> Commit-Queue: Emircan Uysaler <emircan@webrtc.org> Cr-Commit-Position: refs/heads/master@{#21794}
This commit is contained in:
committed by
Commit Bot
parent
1f5e98d97e
commit
d7ae3c34e5
1
modules/video_coding/codecs/multiplex/OWNERS
Normal file
1
modules/video_coding/codecs/multiplex/OWNERS
Normal file
@ -0,0 +1 @@
|
||||
emircan@webrtc.org
|
||||
@ -0,0 +1,75 @@
|
||||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef MODULES_VIDEO_CODING_CODECS_MULTIPLEX_INCLUDE_MULTIPLEX_DECODER_ADAPTER_H_
|
||||
#define MODULES_VIDEO_CODING_CODECS_MULTIPLEX_INCLUDE_MULTIPLEX_DECODER_ADAPTER_H_
|
||||
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "api/video_codecs/sdp_video_format.h"
|
||||
#include "api/video_codecs/video_decoder.h"
|
||||
#include "api/video_codecs/video_decoder_factory.h"
|
||||
#include "modules/video_coding/codecs/multiplex/include/multiplex_encoder_adapter.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class MultiplexDecoderAdapter : public VideoDecoder {
|
||||
public:
|
||||
// |factory| is not owned and expected to outlive this class' lifetime.
|
||||
explicit MultiplexDecoderAdapter(VideoDecoderFactory* factory,
|
||||
const SdpVideoFormat& associated_format);
|
||||
virtual ~MultiplexDecoderAdapter();
|
||||
|
||||
// Implements VideoDecoder
|
||||
int32_t InitDecode(const VideoCodec* codec_settings,
|
||||
int32_t number_of_cores) override;
|
||||
int32_t Decode(const EncodedImage& input_image,
|
||||
bool missing_frames,
|
||||
const RTPFragmentationHeader* fragmentation,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
int64_t render_time_ms) override;
|
||||
int32_t RegisterDecodeCompleteCallback(
|
||||
DecodedImageCallback* callback) override;
|
||||
int32_t Release() override;
|
||||
|
||||
void Decoded(AlphaCodecStream stream_idx,
|
||||
VideoFrame* decoded_image,
|
||||
rtc::Optional<int32_t> decode_time_ms,
|
||||
rtc::Optional<uint8_t> qp);
|
||||
|
||||
private:
|
||||
// Wrapper class that redirects Decoded() calls.
|
||||
class AdapterDecodedImageCallback;
|
||||
|
||||
// Holds the decoded image output of a frame.
|
||||
struct DecodedImageData;
|
||||
|
||||
void MergeAlphaImages(VideoFrame* decoded_image,
|
||||
const rtc::Optional<int32_t>& decode_time_ms,
|
||||
const rtc::Optional<uint8_t>& qp,
|
||||
VideoFrame* multiplex_decoded_image,
|
||||
const rtc::Optional<int32_t>& multiplex_decode_time_ms,
|
||||
const rtc::Optional<uint8_t>& multiplex_qp);
|
||||
|
||||
VideoDecoderFactory* const factory_;
|
||||
const SdpVideoFormat associated_format_;
|
||||
std::vector<std::unique_ptr<VideoDecoder>> decoders_;
|
||||
std::vector<std::unique_ptr<AdapterDecodedImageCallback>> adapter_callbacks_;
|
||||
DecodedImageCallback* decoded_complete_callback_;
|
||||
|
||||
// Holds YUV or AXX decode output of a frame that is identified by timestamp.
|
||||
std::map<uint32_t /* timestamp */, DecodedImageData> decoded_data_;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // MODULES_VIDEO_CODING_CODECS_MULTIPLEX_INCLUDE_MULTIPLEX_DECODER_ADAPTER_H_
|
||||
@ -0,0 +1,106 @@
|
||||
/*
|
||||
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef MODULES_VIDEO_CODING_CODECS_MULTIPLEX_INCLUDE_MULTIPLEX_ENCODED_IMAGE_PACKER_H_
|
||||
#define MODULES_VIDEO_CODING_CODECS_MULTIPLEX_INCLUDE_MULTIPLEX_ENCODED_IMAGE_PACKER_H_
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "common_types.h" // NOLINT(build/include)
|
||||
#include "common_video/include/video_frame.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// Struct describing the whole bundle of multiple frames of an image.
|
||||
// This struct is expected to be the set in the beginning of a picture's
|
||||
// bitstream data.
|
||||
struct MultiplexImageHeader {
|
||||
// The number of frame components making up the complete picture data.
|
||||
// For example, |frame_count| = 2 for the case of YUV frame with Alpha frame.
|
||||
uint8_t component_count;
|
||||
|
||||
// The increasing image ID given by the encoder. For different components
|
||||
// of a single picture, they have the same |picture_index|.
|
||||
uint16_t image_index;
|
||||
|
||||
// The location of the first MultiplexImageComponentHeader in the bitstream,
|
||||
// in terms of byte from the beginning of the bitstream.
|
||||
uint32_t first_component_header_offset;
|
||||
};
|
||||
const int kMultiplexImageHeaderSize =
|
||||
sizeof(uint8_t) + sizeof(uint16_t) + sizeof(uint32_t);
|
||||
|
||||
// Struct describing the individual image component's content.
|
||||
struct MultiplexImageComponentHeader {
|
||||
// The location of the next MultiplexImageComponentHeader in the bitstream,
|
||||
// in terms of the byte from the beginning of the bitstream;
|
||||
uint32_t next_component_header_offset;
|
||||
|
||||
// Identifies which component this frame represent, i.e. YUV frame vs Alpha
|
||||
// frame.
|
||||
uint8_t component_index;
|
||||
|
||||
// The location of the real encoded image data of the frame in the bitstream,
|
||||
// in terms of byte from the beginning of the bitstream.
|
||||
uint32_t bitstream_offset;
|
||||
|
||||
// Indicates the number of bytes of the encoded image data.
|
||||
uint32_t bitstream_length;
|
||||
|
||||
// Indicated the underlying VideoCodecType of the frame, i.e. VP9 or VP8 etc.
|
||||
VideoCodecType codec_type;
|
||||
|
||||
// Indicated the underlying frame is a key frame or delta frame.
|
||||
FrameType frame_type;
|
||||
};
|
||||
const int kMultiplexImageComponentHeaderSize =
|
||||
sizeof(uint32_t) + sizeof(uint8_t) + sizeof(uint32_t) + sizeof(uint32_t) +
|
||||
sizeof(uint8_t) + sizeof(uint8_t);
|
||||
|
||||
// Struct holding the encoded image for one component.
|
||||
struct MultiplexImageComponent {
|
||||
// Indicated the underlying VideoCodecType of the frame, i.e. VP9 or VP8 etc.
|
||||
VideoCodecType codec_type;
|
||||
|
||||
// Identifies which component this frame represent, i.e. YUV frame vs Alpha
|
||||
// frame.
|
||||
int component_index;
|
||||
|
||||
// Stores the actual frame data of the encoded image.
|
||||
EncodedImage encoded_image;
|
||||
};
|
||||
|
||||
// Struct holding the whole frame bundle of components of an image.
|
||||
struct MultiplexImage {
|
||||
int image_index;
|
||||
int component_count;
|
||||
std::vector<MultiplexImageComponent> image_components;
|
||||
|
||||
MultiplexImage(int picture_index, int frame_count);
|
||||
};
|
||||
|
||||
// A utility class providing conversion between two representations of a
|
||||
// multiplex image frame:
|
||||
// 1. Packed version is just one encoded image, we pack all necessary metadata
|
||||
// in the bitstream as headers.
|
||||
// 2. Unpacked version is essentially a list of encoded images, one for one
|
||||
// component.
|
||||
class MultiplexEncodedImagePacker {
|
||||
public:
|
||||
// Note: It is caller responsibility to release the buffer of the result.
|
||||
static EncodedImage PackAndRelease(const MultiplexImage& image);
|
||||
|
||||
// Note: The image components just share the memory with |combined_image|.
|
||||
static MultiplexImage Unpack(const EncodedImage& combined_image);
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // MODULES_VIDEO_CODING_CODECS_MULTIPLEX_INCLUDE_MULTIPLEX_ENCODED_IMAGE_PACKER_H_
|
||||
@ -0,0 +1,80 @@
|
||||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef MODULES_VIDEO_CODING_CODECS_MULTIPLEX_INCLUDE_MULTIPLEX_ENCODER_ADAPTER_H_
|
||||
#define MODULES_VIDEO_CODING_CODECS_MULTIPLEX_INCLUDE_MULTIPLEX_ENCODER_ADAPTER_H_
|
||||
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "api/video_codecs/sdp_video_format.h"
|
||||
#include "api/video_codecs/video_encoder.h"
|
||||
#include "api/video_codecs/video_encoder_factory.h"
|
||||
#include "modules/video_coding/codecs/multiplex/include/multiplex_encoded_image_packer.h"
|
||||
#include "modules/video_coding/include/video_codec_interface.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
enum AlphaCodecStream {
|
||||
kYUVStream = 0,
|
||||
kAXXStream = 1,
|
||||
kAlphaCodecStreams = 2,
|
||||
};
|
||||
|
||||
class MultiplexEncoderAdapter : public VideoEncoder {
|
||||
public:
|
||||
// |factory| is not owned and expected to outlive this class' lifetime.
|
||||
explicit MultiplexEncoderAdapter(VideoEncoderFactory* factory,
|
||||
const SdpVideoFormat& associated_format);
|
||||
virtual ~MultiplexEncoderAdapter();
|
||||
|
||||
// Implements VideoEncoder
|
||||
int InitEncode(const VideoCodec* inst,
|
||||
int number_of_cores,
|
||||
size_t max_payload_size) override;
|
||||
int Encode(const VideoFrame& input_image,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
const std::vector<FrameType>* frame_types) override;
|
||||
int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override;
|
||||
int SetChannelParameters(uint32_t packet_loss, int64_t rtt) override;
|
||||
int SetRateAllocation(const BitrateAllocation& bitrate,
|
||||
uint32_t new_framerate) override;
|
||||
int Release() override;
|
||||
const char* ImplementationName() const override;
|
||||
|
||||
EncodedImageCallback::Result OnEncodedImage(
|
||||
AlphaCodecStream stream_idx,
|
||||
const EncodedImage& encodedImage,
|
||||
const CodecSpecificInfo* codecSpecificInfo,
|
||||
const RTPFragmentationHeader* fragmentation);
|
||||
|
||||
private:
|
||||
// Wrapper class that redirects OnEncodedImage() calls.
|
||||
class AdapterEncodedImageCallback;
|
||||
|
||||
VideoEncoderFactory* const factory_;
|
||||
const SdpVideoFormat associated_format_;
|
||||
std::vector<std::unique_ptr<VideoEncoder>> encoders_;
|
||||
std::vector<std::unique_ptr<AdapterEncodedImageCallback>> adapter_callbacks_;
|
||||
EncodedImageCallback* encoded_complete_callback_;
|
||||
|
||||
std::map<uint32_t /* timestamp */, MultiplexImage> stashed_images_;
|
||||
|
||||
uint16_t picture_index_ = 0;
|
||||
std::vector<uint8_t> multiplex_dummy_planes_;
|
||||
|
||||
int key_frame_interval_;
|
||||
EncodedImage combined_image_;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // MODULES_VIDEO_CODING_CODECS_MULTIPLEX_INCLUDE_MULTIPLEX_ENCODER_ADAPTER_H_
|
||||
@ -0,0 +1,216 @@
|
||||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "modules/video_coding/codecs/multiplex/include/multiplex_decoder_adapter.h"
|
||||
|
||||
#include "api/video/i420_buffer.h"
|
||||
#include "api/video/video_frame_buffer.h"
|
||||
#include "common_video/include/video_frame.h"
|
||||
#include "common_video/include/video_frame_buffer.h"
|
||||
#include "common_video/libyuv/include/webrtc_libyuv.h"
|
||||
#include "rtc_base/keep_ref_until_done.h"
|
||||
#include "rtc_base/logging.h"
|
||||
|
||||
namespace {
|
||||
void KeepBufferRefs(rtc::scoped_refptr<webrtc::VideoFrameBuffer>,
|
||||
rtc::scoped_refptr<webrtc::VideoFrameBuffer>) {}
|
||||
} // anonymous namespace
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class MultiplexDecoderAdapter::AdapterDecodedImageCallback
|
||||
: public webrtc::DecodedImageCallback {
|
||||
public:
|
||||
AdapterDecodedImageCallback(webrtc::MultiplexDecoderAdapter* adapter,
|
||||
AlphaCodecStream stream_idx)
|
||||
: adapter_(adapter), stream_idx_(stream_idx) {}
|
||||
|
||||
void Decoded(VideoFrame& decoded_image,
|
||||
rtc::Optional<int32_t> decode_time_ms,
|
||||
rtc::Optional<uint8_t> qp) override {
|
||||
if (!adapter_)
|
||||
return;
|
||||
adapter_->Decoded(stream_idx_, &decoded_image, decode_time_ms, qp);
|
||||
}
|
||||
int32_t Decoded(VideoFrame& decoded_image) override {
|
||||
RTC_NOTREACHED();
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
int32_t Decoded(VideoFrame& decoded_image, int64_t decode_time_ms) override {
|
||||
RTC_NOTREACHED();
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
private:
|
||||
MultiplexDecoderAdapter* adapter_;
|
||||
const AlphaCodecStream stream_idx_;
|
||||
};
|
||||
|
||||
struct MultiplexDecoderAdapter::DecodedImageData {
|
||||
explicit DecodedImageData(AlphaCodecStream stream_idx)
|
||||
: stream_idx_(stream_idx),
|
||||
decoded_image_(I420Buffer::Create(1 /* width */, 1 /* height */),
|
||||
0,
|
||||
0,
|
||||
kVideoRotation_0) {
|
||||
RTC_DCHECK_EQ(kAXXStream, stream_idx);
|
||||
}
|
||||
DecodedImageData(AlphaCodecStream stream_idx,
|
||||
const VideoFrame& decoded_image,
|
||||
const rtc::Optional<int32_t>& decode_time_ms,
|
||||
const rtc::Optional<uint8_t>& qp)
|
||||
: stream_idx_(stream_idx),
|
||||
decoded_image_(decoded_image),
|
||||
decode_time_ms_(decode_time_ms),
|
||||
qp_(qp) {}
|
||||
const AlphaCodecStream stream_idx_;
|
||||
VideoFrame decoded_image_;
|
||||
const rtc::Optional<int32_t> decode_time_ms_;
|
||||
const rtc::Optional<uint8_t> qp_;
|
||||
|
||||
private:
|
||||
RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(DecodedImageData);
|
||||
};
|
||||
|
||||
MultiplexDecoderAdapter::MultiplexDecoderAdapter(
|
||||
VideoDecoderFactory* factory,
|
||||
const SdpVideoFormat& associated_format)
|
||||
: factory_(factory), associated_format_(associated_format) {}
|
||||
|
||||
MultiplexDecoderAdapter::~MultiplexDecoderAdapter() {
|
||||
Release();
|
||||
}
|
||||
|
||||
int32_t MultiplexDecoderAdapter::InitDecode(const VideoCodec* codec_settings,
|
||||
int32_t number_of_cores) {
|
||||
RTC_DCHECK_EQ(kVideoCodecMultiplex, codec_settings->codecType);
|
||||
VideoCodec settings = *codec_settings;
|
||||
settings.codecType = PayloadStringToCodecType(associated_format_.name);
|
||||
for (size_t i = 0; i < kAlphaCodecStreams; ++i) {
|
||||
std::unique_ptr<VideoDecoder> decoder =
|
||||
factory_->CreateVideoDecoder(associated_format_);
|
||||
const int32_t rv = decoder->InitDecode(&settings, number_of_cores);
|
||||
if (rv)
|
||||
return rv;
|
||||
adapter_callbacks_.emplace_back(
|
||||
new MultiplexDecoderAdapter::AdapterDecodedImageCallback(
|
||||
this, static_cast<AlphaCodecStream>(i)));
|
||||
decoder->RegisterDecodeCompleteCallback(adapter_callbacks_.back().get());
|
||||
decoders_.emplace_back(std::move(decoder));
|
||||
}
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
int32_t MultiplexDecoderAdapter::Decode(
|
||||
const EncodedImage& input_image,
|
||||
bool missing_frames,
|
||||
const RTPFragmentationHeader* /*fragmentation*/,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
int64_t render_time_ms) {
|
||||
const MultiplexImage& image =
|
||||
MultiplexEncodedImagePacker::Unpack(input_image);
|
||||
|
||||
if (image.component_count == 1) {
|
||||
RTC_DCHECK(decoded_data_.find(input_image._timeStamp) ==
|
||||
decoded_data_.end());
|
||||
decoded_data_.emplace(std::piecewise_construct,
|
||||
std::forward_as_tuple(input_image._timeStamp),
|
||||
std::forward_as_tuple(kAXXStream));
|
||||
}
|
||||
int32_t rv = 0;
|
||||
for (size_t i = 0; i < image.image_components.size(); i++) {
|
||||
rv = decoders_[image.image_components[i].component_index]->Decode(
|
||||
image.image_components[i].encoded_image, missing_frames, nullptr,
|
||||
nullptr, render_time_ms);
|
||||
if (rv != WEBRTC_VIDEO_CODEC_OK)
|
||||
return rv;
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
|
||||
int32_t MultiplexDecoderAdapter::RegisterDecodeCompleteCallback(
|
||||
DecodedImageCallback* callback) {
|
||||
decoded_complete_callback_ = callback;
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
int32_t MultiplexDecoderAdapter::Release() {
|
||||
for (auto& decoder : decoders_) {
|
||||
const int32_t rv = decoder->Release();
|
||||
if (rv)
|
||||
return rv;
|
||||
}
|
||||
decoders_.clear();
|
||||
adapter_callbacks_.clear();
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
void MultiplexDecoderAdapter::Decoded(AlphaCodecStream stream_idx,
|
||||
VideoFrame* decoded_image,
|
||||
rtc::Optional<int32_t> decode_time_ms,
|
||||
rtc::Optional<uint8_t> qp) {
|
||||
const auto& other_decoded_data_it =
|
||||
decoded_data_.find(decoded_image->timestamp());
|
||||
if (other_decoded_data_it != decoded_data_.end()) {
|
||||
auto& other_image_data = other_decoded_data_it->second;
|
||||
if (stream_idx == kYUVStream) {
|
||||
RTC_DCHECK_EQ(kAXXStream, other_image_data.stream_idx_);
|
||||
MergeAlphaImages(decoded_image, decode_time_ms, qp,
|
||||
&other_image_data.decoded_image_,
|
||||
other_image_data.decode_time_ms_, other_image_data.qp_);
|
||||
} else {
|
||||
RTC_DCHECK_EQ(kYUVStream, other_image_data.stream_idx_);
|
||||
RTC_DCHECK_EQ(kAXXStream, stream_idx);
|
||||
MergeAlphaImages(&other_image_data.decoded_image_,
|
||||
other_image_data.decode_time_ms_, other_image_data.qp_,
|
||||
decoded_image, decode_time_ms, qp);
|
||||
}
|
||||
decoded_data_.erase(decoded_data_.begin(), other_decoded_data_it);
|
||||
return;
|
||||
}
|
||||
RTC_DCHECK(decoded_data_.find(decoded_image->timestamp()) ==
|
||||
decoded_data_.end());
|
||||
decoded_data_.emplace(
|
||||
std::piecewise_construct,
|
||||
std::forward_as_tuple(decoded_image->timestamp()),
|
||||
std::forward_as_tuple(stream_idx, *decoded_image, decode_time_ms, qp));
|
||||
}
|
||||
|
||||
void MultiplexDecoderAdapter::MergeAlphaImages(
|
||||
VideoFrame* decoded_image,
|
||||
const rtc::Optional<int32_t>& decode_time_ms,
|
||||
const rtc::Optional<uint8_t>& qp,
|
||||
VideoFrame* alpha_decoded_image,
|
||||
const rtc::Optional<int32_t>& alpha_decode_time_ms,
|
||||
const rtc::Optional<uint8_t>& alpha_qp) {
|
||||
if (!alpha_decoded_image->timestamp()) {
|
||||
decoded_complete_callback_->Decoded(*decoded_image, decode_time_ms, qp);
|
||||
return;
|
||||
}
|
||||
|
||||
rtc::scoped_refptr<webrtc::I420BufferInterface> yuv_buffer =
|
||||
decoded_image->video_frame_buffer()->ToI420();
|
||||
rtc::scoped_refptr<webrtc::I420BufferInterface> alpha_buffer =
|
||||
alpha_decoded_image->video_frame_buffer()->ToI420();
|
||||
RTC_DCHECK_EQ(yuv_buffer->width(), alpha_buffer->width());
|
||||
RTC_DCHECK_EQ(yuv_buffer->height(), alpha_buffer->height());
|
||||
rtc::scoped_refptr<I420ABufferInterface> merged_buffer = WrapI420ABuffer(
|
||||
yuv_buffer->width(), yuv_buffer->height(), yuv_buffer->DataY(),
|
||||
yuv_buffer->StrideY(), yuv_buffer->DataU(), yuv_buffer->StrideU(),
|
||||
yuv_buffer->DataV(), yuv_buffer->StrideV(), alpha_buffer->DataY(),
|
||||
alpha_buffer->StrideY(),
|
||||
rtc::Bind(&KeepBufferRefs, yuv_buffer, alpha_buffer));
|
||||
|
||||
VideoFrame merged_image(merged_buffer, decoded_image->timestamp(),
|
||||
0 /* render_time_ms */, decoded_image->rotation());
|
||||
decoded_complete_callback_->Decoded(merged_image, decode_time_ms, qp);
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
@ -0,0 +1,230 @@
|
||||
/*
|
||||
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "modules/video_coding/codecs/multiplex/include/multiplex_encoded_image_packer.h"
|
||||
|
||||
#include <cstring>
|
||||
|
||||
#include "modules/rtp_rtcp/source/byte_io.h"
|
||||
|
||||
namespace webrtc {
|
||||
int PackHeader(uint8_t* buffer, MultiplexImageHeader header) {
|
||||
int offset = 0;
|
||||
ByteWriter<uint8_t>::WriteBigEndian(buffer + offset, header.component_count);
|
||||
offset += sizeof(uint8_t);
|
||||
|
||||
ByteWriter<uint16_t>::WriteBigEndian(buffer + offset, header.image_index);
|
||||
offset += sizeof(uint16_t);
|
||||
|
||||
ByteWriter<uint32_t>::WriteBigEndian(buffer + offset,
|
||||
header.first_component_header_offset);
|
||||
offset += sizeof(uint32_t);
|
||||
|
||||
RTC_DCHECK_EQ(offset, kMultiplexImageHeaderSize);
|
||||
return offset;
|
||||
}
|
||||
|
||||
MultiplexImageHeader UnpackHeader(uint8_t* buffer) {
|
||||
MultiplexImageHeader header;
|
||||
int offset = 0;
|
||||
header.component_count = ByteReader<uint8_t>::ReadBigEndian(buffer + offset);
|
||||
offset += sizeof(uint8_t);
|
||||
|
||||
header.image_index = ByteReader<uint16_t>::ReadBigEndian(buffer + offset);
|
||||
offset += sizeof(uint16_t);
|
||||
|
||||
header.first_component_header_offset =
|
||||
ByteReader<uint32_t>::ReadBigEndian(buffer + offset);
|
||||
offset += sizeof(uint32_t);
|
||||
|
||||
RTC_DCHECK_EQ(offset, kMultiplexImageHeaderSize);
|
||||
return header;
|
||||
}
|
||||
|
||||
int PackFrameHeader(uint8_t* buffer,
|
||||
MultiplexImageComponentHeader frame_header) {
|
||||
int offset = 0;
|
||||
ByteWriter<uint32_t>::WriteBigEndian(
|
||||
buffer + offset, frame_header.next_component_header_offset);
|
||||
offset += sizeof(uint32_t);
|
||||
|
||||
ByteWriter<uint8_t>::WriteBigEndian(buffer + offset,
|
||||
frame_header.component_index);
|
||||
offset += sizeof(uint8_t);
|
||||
|
||||
ByteWriter<uint32_t>::WriteBigEndian(buffer + offset,
|
||||
frame_header.bitstream_offset);
|
||||
offset += sizeof(uint32_t);
|
||||
|
||||
ByteWriter<uint32_t>::WriteBigEndian(buffer + offset,
|
||||
frame_header.bitstream_length);
|
||||
offset += sizeof(uint32_t);
|
||||
|
||||
ByteWriter<uint8_t>::WriteBigEndian(buffer + offset, frame_header.codec_type);
|
||||
offset += sizeof(uint8_t);
|
||||
|
||||
ByteWriter<uint8_t>::WriteBigEndian(buffer + offset, frame_header.frame_type);
|
||||
offset += sizeof(uint8_t);
|
||||
|
||||
RTC_DCHECK_EQ(offset, kMultiplexImageComponentHeaderSize);
|
||||
return offset;
|
||||
}
|
||||
|
||||
MultiplexImageComponentHeader UnpackFrameHeader(uint8_t* buffer) {
|
||||
MultiplexImageComponentHeader frame_header;
|
||||
int offset = 0;
|
||||
|
||||
frame_header.next_component_header_offset =
|
||||
ByteReader<uint32_t>::ReadBigEndian(buffer + offset);
|
||||
offset += sizeof(uint32_t);
|
||||
|
||||
frame_header.component_index =
|
||||
ByteReader<uint8_t>::ReadBigEndian(buffer + offset);
|
||||
offset += sizeof(uint8_t);
|
||||
|
||||
frame_header.bitstream_offset =
|
||||
ByteReader<uint32_t>::ReadBigEndian(buffer + offset);
|
||||
offset += sizeof(uint32_t);
|
||||
|
||||
frame_header.bitstream_length =
|
||||
ByteReader<uint32_t>::ReadBigEndian(buffer + offset);
|
||||
offset += sizeof(uint32_t);
|
||||
|
||||
frame_header.codec_type = static_cast<VideoCodecType>(
|
||||
ByteReader<uint8_t>::ReadBigEndian(buffer + offset));
|
||||
offset += sizeof(uint8_t);
|
||||
|
||||
frame_header.frame_type = static_cast<FrameType>(
|
||||
ByteReader<uint8_t>::ReadBigEndian(buffer + offset));
|
||||
offset += sizeof(uint8_t);
|
||||
|
||||
RTC_DCHECK_EQ(offset, kMultiplexImageComponentHeaderSize);
|
||||
return frame_header;
|
||||
}
|
||||
|
||||
void PackBitstream(uint8_t* buffer, MultiplexImageComponent image) {
|
||||
memcpy(buffer, image.encoded_image._buffer, image.encoded_image._length);
|
||||
}
|
||||
|
||||
MultiplexImage::MultiplexImage(int picture_index, int frame_count)
|
||||
: image_index(picture_index), component_count(frame_count) {}
|
||||
|
||||
EncodedImage MultiplexEncodedImagePacker::PackAndRelease(
|
||||
const MultiplexImage& multiplex_image) {
|
||||
MultiplexImageHeader header;
|
||||
std::vector<MultiplexImageComponentHeader> frame_headers;
|
||||
|
||||
header.component_count = multiplex_image.component_count;
|
||||
header.image_index = multiplex_image.image_index;
|
||||
int header_offset = kMultiplexImageHeaderSize;
|
||||
header.first_component_header_offset = header_offset;
|
||||
int bitstream_offset = header_offset + kMultiplexImageComponentHeaderSize *
|
||||
header.component_count;
|
||||
|
||||
const std::vector<MultiplexImageComponent>& images =
|
||||
multiplex_image.image_components;
|
||||
EncodedImage combined_image = images[0].encoded_image;
|
||||
for (size_t i = 0; i < images.size(); i++) {
|
||||
MultiplexImageComponentHeader frame_header;
|
||||
header_offset += kMultiplexImageComponentHeaderSize;
|
||||
frame_header.next_component_header_offset =
|
||||
(i == images.size() - 1) ? 0 : header_offset;
|
||||
frame_header.component_index = images[i].component_index;
|
||||
|
||||
frame_header.bitstream_offset = bitstream_offset;
|
||||
frame_header.bitstream_length =
|
||||
static_cast<uint32_t>(images[i].encoded_image._length);
|
||||
bitstream_offset += frame_header.bitstream_length;
|
||||
|
||||
frame_header.codec_type = images[i].codec_type;
|
||||
frame_header.frame_type = images[i].encoded_image._frameType;
|
||||
|
||||
// As long as one component is delta frame, we have to mark the combined
|
||||
// frame as delta frame, because it is necessary for all components to be
|
||||
// key frame so as to decode the whole image without previous frame data.
|
||||
// Thus only when all components are key frames, we can mark the combined
|
||||
// frame as key frame.
|
||||
if (frame_header.frame_type == FrameType::kVideoFrameDelta) {
|
||||
combined_image._frameType = FrameType::kVideoFrameDelta;
|
||||
}
|
||||
|
||||
frame_headers.push_back(frame_header);
|
||||
}
|
||||
|
||||
combined_image._length = combined_image._size = bitstream_offset;
|
||||
combined_image._buffer = new uint8_t[combined_image._length];
|
||||
|
||||
// header
|
||||
header_offset = PackHeader(combined_image._buffer, header);
|
||||
RTC_DCHECK_EQ(header.first_component_header_offset,
|
||||
kMultiplexImageHeaderSize);
|
||||
|
||||
// Frame Header
|
||||
for (size_t i = 0; i < images.size(); i++) {
|
||||
int relative_offset = PackFrameHeader(
|
||||
combined_image._buffer + header_offset, frame_headers[i]);
|
||||
RTC_DCHECK_EQ(relative_offset, kMultiplexImageComponentHeaderSize);
|
||||
|
||||
header_offset = frame_headers[i].next_component_header_offset;
|
||||
RTC_DCHECK_EQ(header_offset,
|
||||
(i == images.size() - 1)
|
||||
? 0
|
||||
: (kMultiplexImageHeaderSize +
|
||||
kMultiplexImageComponentHeaderSize * (i + 1)));
|
||||
}
|
||||
|
||||
// Bitstreams
|
||||
for (size_t i = 0; i < images.size(); i++) {
|
||||
PackBitstream(combined_image._buffer + frame_headers[i].bitstream_offset,
|
||||
images[i]);
|
||||
delete[] images[i].encoded_image._buffer;
|
||||
}
|
||||
|
||||
return combined_image;
|
||||
}
|
||||
|
||||
MultiplexImage MultiplexEncodedImagePacker::Unpack(
|
||||
const EncodedImage& combined_image) {
|
||||
const MultiplexImageHeader& header = UnpackHeader(combined_image._buffer);
|
||||
|
||||
MultiplexImage multiplex_image(header.image_index, header.component_count);
|
||||
|
||||
std::vector<MultiplexImageComponentHeader> frame_headers;
|
||||
|
||||
int header_offset = header.first_component_header_offset;
|
||||
|
||||
while (header_offset > 0) {
|
||||
frame_headers.push_back(
|
||||
UnpackFrameHeader(combined_image._buffer + header_offset));
|
||||
header_offset = frame_headers.back().next_component_header_offset;
|
||||
}
|
||||
|
||||
RTC_DCHECK_LE(frame_headers.size(), header.component_count);
|
||||
for (size_t i = 0; i < frame_headers.size(); i++) {
|
||||
MultiplexImageComponent image_component;
|
||||
image_component.component_index = frame_headers[i].component_index;
|
||||
image_component.codec_type = frame_headers[i].codec_type;
|
||||
|
||||
EncodedImage encoded_image = combined_image;
|
||||
encoded_image._frameType = frame_headers[i].frame_type;
|
||||
encoded_image._length = encoded_image._size =
|
||||
static_cast<size_t>(frame_headers[i].bitstream_length);
|
||||
encoded_image._buffer =
|
||||
combined_image._buffer + frame_headers[i].bitstream_offset;
|
||||
|
||||
image_component.encoded_image = encoded_image;
|
||||
|
||||
multiplex_image.image_components.push_back(image_component);
|
||||
}
|
||||
|
||||
return multiplex_image;
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
@ -0,0 +1,263 @@
|
||||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "modules/video_coding/codecs/multiplex/include/multiplex_encoder_adapter.h"
|
||||
|
||||
#include <cstring>
|
||||
|
||||
#include "common_video/include/video_frame.h"
|
||||
#include "common_video/include/video_frame_buffer.h"
|
||||
#include "common_video/libyuv/include/webrtc_libyuv.h"
|
||||
#include "modules/include/module_common_types.h"
|
||||
#include "rtc_base/keep_ref_until_done.h"
|
||||
#include "rtc_base/logging.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// Callback wrapper that helps distinguish returned results from |encoders_|
|
||||
// instances.
|
||||
class MultiplexEncoderAdapter::AdapterEncodedImageCallback
|
||||
: public webrtc::EncodedImageCallback {
|
||||
public:
|
||||
AdapterEncodedImageCallback(webrtc::MultiplexEncoderAdapter* adapter,
|
||||
AlphaCodecStream stream_idx)
|
||||
: adapter_(adapter), stream_idx_(stream_idx) {}
|
||||
|
||||
EncodedImageCallback::Result OnEncodedImage(
|
||||
const EncodedImage& encoded_image,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
const RTPFragmentationHeader* fragmentation) override {
|
||||
if (!adapter_)
|
||||
return Result(Result::OK);
|
||||
return adapter_->OnEncodedImage(stream_idx_, encoded_image,
|
||||
codec_specific_info, fragmentation);
|
||||
}
|
||||
|
||||
private:
|
||||
MultiplexEncoderAdapter* adapter_;
|
||||
const AlphaCodecStream stream_idx_;
|
||||
};
|
||||
|
||||
MultiplexEncoderAdapter::MultiplexEncoderAdapter(
|
||||
VideoEncoderFactory* factory,
|
||||
const SdpVideoFormat& associated_format)
|
||||
: factory_(factory),
|
||||
associated_format_(associated_format),
|
||||
encoded_complete_callback_(nullptr) {}
|
||||
|
||||
MultiplexEncoderAdapter::~MultiplexEncoderAdapter() {
|
||||
Release();
|
||||
}
|
||||
|
||||
int MultiplexEncoderAdapter::InitEncode(const VideoCodec* inst,
|
||||
int number_of_cores,
|
||||
size_t max_payload_size) {
|
||||
const size_t buffer_size =
|
||||
CalcBufferSize(VideoType::kI420, inst->width, inst->height);
|
||||
multiplex_dummy_planes_.resize(buffer_size);
|
||||
// It is more expensive to encode 0x00, so use 0x80 instead.
|
||||
std::fill(multiplex_dummy_planes_.begin(), multiplex_dummy_planes_.end(),
|
||||
0x80);
|
||||
|
||||
RTC_DCHECK_EQ(kVideoCodecMultiplex, inst->codecType);
|
||||
VideoCodec settings = *inst;
|
||||
settings.codecType = PayloadStringToCodecType(associated_format_.name);
|
||||
|
||||
// Take over the key frame interval at adapter level, because we have to
|
||||
// sync the key frames for both sub-encoders.
|
||||
switch (settings.codecType) {
|
||||
case kVideoCodecVP8:
|
||||
key_frame_interval_ = settings.VP8()->keyFrameInterval;
|
||||
settings.VP8()->keyFrameInterval = 0;
|
||||
break;
|
||||
case kVideoCodecVP9:
|
||||
key_frame_interval_ = settings.VP9()->keyFrameInterval;
|
||||
settings.VP9()->keyFrameInterval = 0;
|
||||
break;
|
||||
case kVideoCodecH264:
|
||||
key_frame_interval_ = settings.H264()->keyFrameInterval;
|
||||
settings.H264()->keyFrameInterval = 0;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < kAlphaCodecStreams; ++i) {
|
||||
std::unique_ptr<VideoEncoder> encoder =
|
||||
factory_->CreateVideoEncoder(associated_format_);
|
||||
const int rv =
|
||||
encoder->InitEncode(&settings, number_of_cores, max_payload_size);
|
||||
if (rv) {
|
||||
RTC_LOG(LS_ERROR) << "Failed to create multiplex codec index " << i;
|
||||
return rv;
|
||||
}
|
||||
adapter_callbacks_.emplace_back(new AdapterEncodedImageCallback(
|
||||
this, static_cast<AlphaCodecStream>(i)));
|
||||
encoder->RegisterEncodeCompleteCallback(adapter_callbacks_.back().get());
|
||||
encoders_.emplace_back(std::move(encoder));
|
||||
}
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
int MultiplexEncoderAdapter::Encode(
|
||||
const VideoFrame& input_image,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
const std::vector<FrameType>* frame_types) {
|
||||
if (!encoded_complete_callback_) {
|
||||
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
||||
}
|
||||
|
||||
std::vector<FrameType> adjusted_frame_types;
|
||||
if (key_frame_interval_ > 0 && picture_index_ % key_frame_interval_ == 0) {
|
||||
adjusted_frame_types.push_back(kVideoFrameKey);
|
||||
} else {
|
||||
adjusted_frame_types.push_back(kVideoFrameDelta);
|
||||
}
|
||||
const bool has_alpha = input_image.video_frame_buffer()->type() ==
|
||||
VideoFrameBuffer::Type::kI420A;
|
||||
stashed_images_.emplace(
|
||||
std::piecewise_construct, std::forward_as_tuple(input_image.timestamp()),
|
||||
std::forward_as_tuple(picture_index_,
|
||||
has_alpha ? kAlphaCodecStreams : 1));
|
||||
|
||||
++picture_index_;
|
||||
|
||||
// Encode YUV
|
||||
int rv = encoders_[kYUVStream]->Encode(input_image, codec_specific_info,
|
||||
&adjusted_frame_types);
|
||||
// If we do not receive an alpha frame, we send a single frame for this
|
||||
// |picture_index_|. The receiver will receive |frame_count| as 1 which
|
||||
// soecifies this case.
|
||||
if (rv || !has_alpha)
|
||||
return rv;
|
||||
|
||||
// Encode AXX
|
||||
const I420ABufferInterface* yuva_buffer =
|
||||
input_image.video_frame_buffer()->GetI420A();
|
||||
rtc::scoped_refptr<I420BufferInterface> alpha_buffer =
|
||||
WrapI420Buffer(input_image.width(), input_image.height(),
|
||||
yuva_buffer->DataA(), yuva_buffer->StrideA(),
|
||||
multiplex_dummy_planes_.data(), yuva_buffer->StrideU(),
|
||||
multiplex_dummy_planes_.data(), yuva_buffer->StrideV(),
|
||||
rtc::KeepRefUntilDone(input_image.video_frame_buffer()));
|
||||
VideoFrame alpha_image(alpha_buffer, input_image.timestamp(),
|
||||
input_image.render_time_ms(), input_image.rotation());
|
||||
rv = encoders_[kAXXStream]->Encode(alpha_image, codec_specific_info,
|
||||
&adjusted_frame_types);
|
||||
return rv;
|
||||
}
|
||||
|
||||
int MultiplexEncoderAdapter::RegisterEncodeCompleteCallback(
|
||||
EncodedImageCallback* callback) {
|
||||
encoded_complete_callback_ = callback;
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
int MultiplexEncoderAdapter::SetChannelParameters(uint32_t packet_loss,
|
||||
int64_t rtt) {
|
||||
for (auto& encoder : encoders_) {
|
||||
const int rv = encoder->SetChannelParameters(packet_loss, rtt);
|
||||
if (rv)
|
||||
return rv;
|
||||
}
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
int MultiplexEncoderAdapter::SetRateAllocation(const BitrateAllocation& bitrate,
|
||||
uint32_t framerate) {
|
||||
for (auto& encoder : encoders_) {
|
||||
// TODO(emircan): |framerate| is used to calculate duration in encoder
|
||||
// instances. We report the total frame rate to keep real time for now.
|
||||
// Remove this after refactoring duration logic.
|
||||
const int rv = encoder->SetRateAllocation(
|
||||
bitrate, static_cast<uint32_t>(encoders_.size()) * framerate);
|
||||
if (rv)
|
||||
return rv;
|
||||
}
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
int MultiplexEncoderAdapter::Release() {
|
||||
for (auto& encoder : encoders_) {
|
||||
const int rv = encoder->Release();
|
||||
if (rv)
|
||||
return rv;
|
||||
}
|
||||
encoders_.clear();
|
||||
adapter_callbacks_.clear();
|
||||
for (auto& stashed_image : stashed_images_) {
|
||||
for (auto& image_component : stashed_image.second.image_components) {
|
||||
delete[] image_component.encoded_image._buffer;
|
||||
}
|
||||
}
|
||||
stashed_images_.clear();
|
||||
if (combined_image_._buffer) {
|
||||
delete[] combined_image_._buffer;
|
||||
combined_image_._buffer = nullptr;
|
||||
}
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
const char* MultiplexEncoderAdapter::ImplementationName() const {
|
||||
return "MultiplexEncoderAdapter";
|
||||
}
|
||||
|
||||
EncodedImageCallback::Result MultiplexEncoderAdapter::OnEncodedImage(
|
||||
AlphaCodecStream stream_idx,
|
||||
const EncodedImage& encodedImage,
|
||||
const CodecSpecificInfo* codecSpecificInfo,
|
||||
const RTPFragmentationHeader* fragmentation) {
|
||||
const auto& stashed_image_itr = stashed_images_.find(encodedImage._timeStamp);
|
||||
const auto& stashed_image_next_itr = std::next(stashed_image_itr, 1);
|
||||
RTC_DCHECK(stashed_image_itr != stashed_images_.end());
|
||||
MultiplexImage& stashed_image = stashed_image_itr->second;
|
||||
const uint8_t frame_count = stashed_image.component_count;
|
||||
|
||||
// Save the image
|
||||
MultiplexImageComponent image_component;
|
||||
image_component.component_index = stream_idx;
|
||||
image_component.codec_type =
|
||||
PayloadStringToCodecType(associated_format_.name);
|
||||
image_component.encoded_image = encodedImage;
|
||||
image_component.encoded_image._buffer = new uint8_t[encodedImage._length];
|
||||
std::memcpy(image_component.encoded_image._buffer, encodedImage._buffer,
|
||||
encodedImage._length);
|
||||
|
||||
stashed_image.image_components.push_back(image_component);
|
||||
|
||||
if (stashed_image.image_components.size() == frame_count) {
|
||||
// Complete case
|
||||
auto iter = stashed_images_.begin();
|
||||
while (iter != stashed_images_.end() && iter != stashed_image_next_itr) {
|
||||
// No image at all, skip.
|
||||
if (iter->second.image_components.size() == 0)
|
||||
continue;
|
||||
|
||||
// We have to send out those stashed frames, otherwise the delta frame
|
||||
// dependency chain is broken.
|
||||
if (combined_image_._buffer)
|
||||
delete[] combined_image_._buffer;
|
||||
combined_image_ =
|
||||
MultiplexEncodedImagePacker::PackAndRelease(iter->second);
|
||||
|
||||
CodecSpecificInfo codec_info = *codecSpecificInfo;
|
||||
codec_info.codecType = kVideoCodecMultiplex;
|
||||
codec_info.codecSpecific.generic.simulcast_idx = 0;
|
||||
encoded_complete_callback_->OnEncodedImage(combined_image_, &codec_info,
|
||||
fragmentation);
|
||||
iter++;
|
||||
}
|
||||
|
||||
stashed_images_.erase(stashed_images_.begin(), stashed_image_next_itr);
|
||||
}
|
||||
return EncodedImageCallback::Result(EncodedImageCallback::Result::OK);
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
@ -0,0 +1,145 @@
|
||||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "api/test/mock_video_decoder_factory.h"
|
||||
#include "api/test/mock_video_encoder_factory.h"
|
||||
#include "api/video_codecs/sdp_video_format.h"
|
||||
#include "common_video/include/video_frame_buffer.h"
|
||||
#include "common_video/libyuv/include/webrtc_libyuv.h"
|
||||
#include "media/base/mediaconstants.h"
|
||||
#include "modules/video_coding/codecs/multiplex/include/multiplex_decoder_adapter.h"
|
||||
#include "modules/video_coding/codecs/multiplex/include/multiplex_encoder_adapter.h"
|
||||
#include "modules/video_coding/codecs/test/video_codec_test.h"
|
||||
#include "modules/video_coding/codecs/vp9/include/vp9.h"
|
||||
#include "rtc_base/keep_ref_until_done.h"
|
||||
#include "rtc_base/ptr_util.h"
|
||||
|
||||
using testing::_;
|
||||
using testing::Return;
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
constexpr const char* kMultiplexAssociatedCodecName = cricket::kVp9CodecName;
|
||||
const VideoCodecType kMultiplexAssociatedCodecType =
|
||||
PayloadStringToCodecType(kMultiplexAssociatedCodecName);
|
||||
|
||||
class TestMultiplexAdapter : public VideoCodecTest {
|
||||
public:
|
||||
TestMultiplexAdapter()
|
||||
: decoder_factory_(new webrtc::MockVideoDecoderFactory),
|
||||
encoder_factory_(new webrtc::MockVideoEncoderFactory) {}
|
||||
|
||||
protected:
|
||||
std::unique_ptr<VideoDecoder> CreateDecoder() override {
|
||||
return rtc::MakeUnique<MultiplexDecoderAdapter>(
|
||||
decoder_factory_.get(), SdpVideoFormat(kMultiplexAssociatedCodecName));
|
||||
}
|
||||
|
||||
std::unique_ptr<VideoEncoder> CreateEncoder() override {
|
||||
return rtc::MakeUnique<MultiplexEncoderAdapter>(
|
||||
encoder_factory_.get(), SdpVideoFormat(kMultiplexAssociatedCodecName));
|
||||
}
|
||||
|
||||
VideoCodec codec_settings() override {
|
||||
VideoCodec codec_settings;
|
||||
codec_settings.codecType = kMultiplexAssociatedCodecType;
|
||||
codec_settings.VP9()->numberOfTemporalLayers = 1;
|
||||
codec_settings.VP9()->numberOfSpatialLayers = 1;
|
||||
codec_settings.codecType = webrtc::kVideoCodecMultiplex;
|
||||
return codec_settings;
|
||||
}
|
||||
|
||||
std::unique_ptr<VideoFrame> CreateI420AInputFrame() {
|
||||
rtc::scoped_refptr<webrtc::I420BufferInterface> yuv_buffer =
|
||||
input_frame_->video_frame_buffer()->ToI420();
|
||||
rtc::scoped_refptr<I420ABufferInterface> yuva_buffer = WrapI420ABuffer(
|
||||
yuv_buffer->width(), yuv_buffer->height(), yuv_buffer->DataY(),
|
||||
yuv_buffer->StrideY(), yuv_buffer->DataU(), yuv_buffer->StrideU(),
|
||||
yuv_buffer->DataV(), yuv_buffer->StrideV(), yuv_buffer->DataY(),
|
||||
yuv_buffer->StrideY(), rtc::KeepRefUntilDone(yuv_buffer));
|
||||
return rtc::WrapUnique<VideoFrame>(
|
||||
new VideoFrame(yuva_buffer, kVideoRotation_0, 0));
|
||||
}
|
||||
|
||||
private:
|
||||
void SetUp() override {
|
||||
EXPECT_CALL(*decoder_factory_, Die());
|
||||
// The decoders/encoders will be owned by the caller of
|
||||
// CreateVideoDecoder()/CreateVideoEncoder().
|
||||
VideoDecoder* decoder1 = VP9Decoder::Create().release();
|
||||
VideoDecoder* decoder2 = VP9Decoder::Create().release();
|
||||
EXPECT_CALL(*decoder_factory_, CreateVideoDecoderProxy(_))
|
||||
.WillOnce(Return(decoder1))
|
||||
.WillOnce(Return(decoder2));
|
||||
|
||||
EXPECT_CALL(*encoder_factory_, Die());
|
||||
VideoEncoder* encoder1 = VP9Encoder::Create().release();
|
||||
VideoEncoder* encoder2 = VP9Encoder::Create().release();
|
||||
EXPECT_CALL(*encoder_factory_, CreateVideoEncoderProxy(_))
|
||||
.WillOnce(Return(encoder1))
|
||||
.WillOnce(Return(encoder2));
|
||||
|
||||
VideoCodecTest::SetUp();
|
||||
}
|
||||
|
||||
const std::unique_ptr<webrtc::MockVideoDecoderFactory> decoder_factory_;
|
||||
const std::unique_ptr<webrtc::MockVideoEncoderFactory> encoder_factory_;
|
||||
};
|
||||
|
||||
// TODO(emircan): Currently VideoCodecTest tests do a complete setup
|
||||
// step that goes beyond constructing |decoder_|. Simplify these tests to do
|
||||
// less.
|
||||
TEST_F(TestMultiplexAdapter, ConstructAndDestructDecoder) {
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Release());
|
||||
}
|
||||
|
||||
TEST_F(TestMultiplexAdapter, ConstructAndDestructEncoder) {
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release());
|
||||
}
|
||||
|
||||
TEST_F(TestMultiplexAdapter, EncodeDecodeI420Frame) {
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
encoder_->Encode(*input_frame_, nullptr, nullptr));
|
||||
EncodedImage encoded_frame;
|
||||
CodecSpecificInfo codec_specific_info;
|
||||
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||
|
||||
EXPECT_EQ(kVideoCodecMultiplex, codec_specific_info.codecType);
|
||||
|
||||
EXPECT_EQ(
|
||||
WEBRTC_VIDEO_CODEC_OK,
|
||||
decoder_->Decode(encoded_frame, false, nullptr, &codec_specific_info));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
rtc::Optional<uint8_t> decoded_qp;
|
||||
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
|
||||
ASSERT_TRUE(decoded_frame);
|
||||
EXPECT_GT(I420PSNR(input_frame_.get(), decoded_frame.get()), 36);
|
||||
}
|
||||
|
||||
TEST_F(TestMultiplexAdapter, EncodeDecodeI420AFrame) {
|
||||
std::unique_ptr<VideoFrame> yuva_frame = CreateI420AInputFrame();
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
encoder_->Encode(*yuva_frame, nullptr, nullptr));
|
||||
EncodedImage encoded_frame;
|
||||
CodecSpecificInfo codec_specific_info;
|
||||
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||
|
||||
EXPECT_EQ(kVideoCodecMultiplex, codec_specific_info.codecType);
|
||||
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
|
||||
decoder_->Decode(encoded_frame, false, nullptr, nullptr));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
rtc::Optional<uint8_t> decoded_qp;
|
||||
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
|
||||
ASSERT_TRUE(decoded_frame);
|
||||
EXPECT_GT(I420PSNR(yuva_frame.get(), decoded_frame.get()), 36);
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
Reference in New Issue
Block a user