Update multiplex encoder to support having augmenting data attached to the video
Multiplex encoder is now supporting attaching user-defined data to the video frame. This data will be sent with the video frame and thus is guaranteed to be synchronized. This is useful in cases where the data and video frame need to by synchronized such as sending information about 3D objects or camera tracking information with the video stream Multiplex Encoder with data is implemented in a modular way. A new VideoFrameBuffer type is created with the encoder. AugmentedVideoFrameBuffer holds the video frame and the data. MultiplexVideoEncoder encodes both the frame and data. Change-Id: I23263f70d111f6f1783c070edec70bd11ebb9868 Bug: webrtc:9632 Reviewed-on: https://webrtc-review.googlesource.com/92642 Commit-Queue: Tarek Hefny <tarekh@google.com> Reviewed-by: Niklas Enbom <niklas.enbom@webrtc.org> Reviewed-by: Emircan Uysaler <emircan@webrtc.org> Cr-Commit-Position: refs/heads/master@{#24297}
This commit is contained in:
@ -16,6 +16,7 @@
|
||||
#include "common_video/include/video_frame_buffer.h"
|
||||
#include "common_video/libyuv/include/webrtc_libyuv.h"
|
||||
#include "modules/include/module_common_types.h"
|
||||
#include "modules/video_coding/codecs/multiplex/include/augmented_video_frame_buffer.h"
|
||||
#include "rtc_base/keep_ref_until_done.h"
|
||||
#include "rtc_base/logging.h"
|
||||
|
||||
@ -47,10 +48,12 @@ class MultiplexEncoderAdapter::AdapterEncodedImageCallback
|
||||
|
||||
MultiplexEncoderAdapter::MultiplexEncoderAdapter(
|
||||
VideoEncoderFactory* factory,
|
||||
const SdpVideoFormat& associated_format)
|
||||
const SdpVideoFormat& associated_format,
|
||||
bool supports_augmented_data)
|
||||
: factory_(factory),
|
||||
associated_format_(associated_format),
|
||||
encoded_complete_callback_(nullptr) {}
|
||||
encoded_complete_callback_(nullptr),
|
||||
supports_augmented_data_(supports_augmented_data) {}
|
||||
|
||||
MultiplexEncoderAdapter::~MultiplexEncoderAdapter() {
|
||||
Release();
|
||||
@ -122,13 +125,30 @@ int MultiplexEncoderAdapter::Encode(
|
||||
}
|
||||
const bool has_alpha = input_image.video_frame_buffer()->type() ==
|
||||
VideoFrameBuffer::Type::kI420A;
|
||||
std::unique_ptr<uint8_t[]> augmenting_data = nullptr;
|
||||
uint16_t augmenting_data_length = 0;
|
||||
AugmentedVideoFrameBuffer* augmented_video_frame_buffer = nullptr;
|
||||
if (supports_augmented_data_) {
|
||||
augmented_video_frame_buffer = static_cast<AugmentedVideoFrameBuffer*>(
|
||||
input_image.video_frame_buffer().get());
|
||||
augmenting_data_length =
|
||||
augmented_video_frame_buffer->GetAugmentingDataSize();
|
||||
augmenting_data =
|
||||
std::unique_ptr<uint8_t[]>(new uint8_t[augmenting_data_length]);
|
||||
memcpy(augmenting_data.get(),
|
||||
augmented_video_frame_buffer->GetAugmentingData(),
|
||||
augmenting_data_length);
|
||||
augmenting_data_size_ = augmenting_data_length;
|
||||
}
|
||||
|
||||
{
|
||||
rtc::CritScope cs(&crit_);
|
||||
stashed_images_.emplace(
|
||||
std::piecewise_construct,
|
||||
std::forward_as_tuple(input_image.timestamp()),
|
||||
std::forward_as_tuple(picture_index_,
|
||||
has_alpha ? kAlphaCodecStreams : 1));
|
||||
std::forward_as_tuple(
|
||||
picture_index_, has_alpha ? kAlphaCodecStreams : 1,
|
||||
std::move(augmenting_data), augmenting_data_length));
|
||||
}
|
||||
|
||||
++picture_index_;
|
||||
@ -136,15 +156,18 @@ int MultiplexEncoderAdapter::Encode(
|
||||
// Encode YUV
|
||||
int rv = encoders_[kYUVStream]->Encode(input_image, codec_specific_info,
|
||||
&adjusted_frame_types);
|
||||
|
||||
// If we do not receive an alpha frame, we send a single frame for this
|
||||
// |picture_index_|. The receiver will receive |frame_count| as 1 which
|
||||
// soecifies this case.
|
||||
// specifies this case.
|
||||
if (rv || !has_alpha)
|
||||
return rv;
|
||||
|
||||
// Encode AXX
|
||||
const I420ABufferInterface* yuva_buffer =
|
||||
input_image.video_frame_buffer()->GetI420A();
|
||||
supports_augmented_data_
|
||||
? augmented_video_frame_buffer->GetVideoFrameBuffer()->GetI420A()
|
||||
: input_image.video_frame_buffer()->GetI420A();
|
||||
rtc::scoped_refptr<I420BufferInterface> alpha_buffer =
|
||||
WrapI420Buffer(input_image.width(), input_image.height(),
|
||||
yuva_buffer->DataA(), yuva_buffer->StrideA(),
|
||||
@ -177,12 +200,16 @@ int MultiplexEncoderAdapter::SetChannelParameters(uint32_t packet_loss,
|
||||
int MultiplexEncoderAdapter::SetRateAllocation(
|
||||
const VideoBitrateAllocation& bitrate,
|
||||
uint32_t framerate) {
|
||||
VideoBitrateAllocation bitrate_allocation(bitrate);
|
||||
bitrate_allocation.SetBitrate(
|
||||
0, 0, bitrate.GetBitrate(0, 0) - augmenting_data_size_);
|
||||
for (auto& encoder : encoders_) {
|
||||
// TODO(emircan): |framerate| is used to calculate duration in encoder
|
||||
// instances. We report the total frame rate to keep real time for now.
|
||||
// Remove this after refactoring duration logic.
|
||||
const int rv = encoder->SetRateAllocation(
|
||||
bitrate, static_cast<uint32_t>(encoders_.size()) * framerate);
|
||||
bitrate_allocation,
|
||||
static_cast<uint32_t>(encoders_.size()) * framerate);
|
||||
if (rv)
|
||||
return rv;
|
||||
}
|
||||
|
Reference in New Issue
Block a user