Add EncodedImageCallback::OnEncodedImage().
OnEncodedImage() is going to replace Encoded(), which is deprecated now. The new OnEncodedImage() returns Result struct that contains frame_id, which tells the encoder RTP timestamp for the frame. BUG=chromium:621691 R=niklas.enbom@webrtc.org, sprang@webrtc.org, stefan@webrtc.org Review URL: https://codereview.webrtc.org/2089773002 . Committed: https://crrev.com/ad34dbe934d47f88011045671b4aea00dbd5a795 Cr-Original-Commit-Position: refs/heads/master@{#13613} Cr-Commit-Position: refs/heads/master@{#13615}
This commit is contained in:
@ -410,7 +410,8 @@ const char* VideoCodecTypeToStr(webrtc::VideoCodecType e) {
|
||||
}
|
||||
|
||||
// Callbacks
|
||||
int32_t VideoProcessorImpl::VideoProcessorEncodeCompleteCallback::Encoded(
|
||||
EncodedImageCallback::Result
|
||||
VideoProcessorImpl::VideoProcessorEncodeCompleteCallback::OnEncodedImage(
|
||||
const EncodedImage& encoded_image,
|
||||
const webrtc::CodecSpecificInfo* codec_specific_info,
|
||||
const webrtc::RTPFragmentationHeader* fragmentation) {
|
||||
@ -419,7 +420,7 @@ int32_t VideoProcessorImpl::VideoProcessorEncodeCompleteCallback::Encoded(
|
||||
video_processor_->FrameEncoded(codec_specific_info->codecType,
|
||||
encoded_image,
|
||||
fragmentation);
|
||||
return 0;
|
||||
return Result(Result::OK, 0);
|
||||
}
|
||||
int32_t VideoProcessorImpl::VideoProcessorDecodeCompleteCallback::Decoded(
|
||||
VideoFrame& image) {
|
||||
|
||||
@ -230,7 +230,7 @@ class VideoProcessorImpl : public VideoProcessor {
|
||||
public:
|
||||
explicit VideoProcessorEncodeCompleteCallback(VideoProcessorImpl* vp)
|
||||
: video_processor_(vp) {}
|
||||
int32_t Encoded(
|
||||
Result OnEncodedImage(
|
||||
const webrtc::EncodedImage& encoded_image,
|
||||
const webrtc::CodecSpecificInfo* codec_specific_info,
|
||||
const webrtc::RTPFragmentationHeader* fragmentation) override;
|
||||
|
||||
@ -120,12 +120,12 @@ class AdapterEncodedImageCallback : public webrtc::EncodedImageCallback {
|
||||
size_t stream_idx)
|
||||
: adapter_(adapter), stream_idx_(stream_idx) {}
|
||||
|
||||
int32_t Encoded(
|
||||
const webrtc::EncodedImage& encodedImage,
|
||||
const webrtc::CodecSpecificInfo* codecSpecificInfo = NULL,
|
||||
const webrtc::RTPFragmentationHeader* fragmentation = NULL) override {
|
||||
return adapter_->Encoded(stream_idx_, encodedImage, codecSpecificInfo,
|
||||
fragmentation);
|
||||
EncodedImageCallback::Result OnEncodedImage(
|
||||
const webrtc::EncodedImage& encoded_image,
|
||||
const webrtc::CodecSpecificInfo* codec_specific_info,
|
||||
const webrtc::RTPFragmentationHeader* fragmentation) override {
|
||||
return adapter_->OnEncodedImage(stream_idx_, encoded_image,
|
||||
codec_specific_info, fragmentation);
|
||||
}
|
||||
|
||||
private:
|
||||
@ -404,7 +404,7 @@ int SimulcastEncoderAdapter::SetRates(uint32_t new_bitrate_kbit,
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
int32_t SimulcastEncoderAdapter::Encoded(
|
||||
EncodedImageCallback::Result SimulcastEncoderAdapter::OnEncodedImage(
|
||||
size_t stream_idx,
|
||||
const EncodedImage& encodedImage,
|
||||
const CodecSpecificInfo* codecSpecificInfo,
|
||||
@ -413,7 +413,7 @@ int32_t SimulcastEncoderAdapter::Encoded(
|
||||
CodecSpecificInfoVP8* vp8Info = &(stream_codec_specific.codecSpecific.VP8);
|
||||
vp8Info->simulcastIdx = stream_idx;
|
||||
|
||||
return encoded_complete_callback_->Encoded(
|
||||
return encoded_complete_callback_->OnEncodedImage(
|
||||
encodedImage, &stream_codec_specific, fragmentation);
|
||||
}
|
||||
|
||||
|
||||
@ -51,10 +51,11 @@ class SimulcastEncoderAdapter : public VP8Encoder {
|
||||
// Eventual handler for the contained encoders' EncodedImageCallbacks, but
|
||||
// called from an internal helper that also knows the correct stream
|
||||
// index.
|
||||
int32_t Encoded(size_t stream_idx,
|
||||
const EncodedImage& encodedImage,
|
||||
const CodecSpecificInfo* codecSpecificInfo = NULL,
|
||||
const RTPFragmentationHeader* fragmentation = NULL);
|
||||
EncodedImageCallback::Result OnEncodedImage(
|
||||
size_t stream_idx,
|
||||
const EncodedImage& encoded_image,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
const RTPFragmentationHeader* fragmentation);
|
||||
|
||||
void OnDroppedFrame() override;
|
||||
|
||||
|
||||
@ -242,16 +242,16 @@ class TestSimulcastEncoderAdapterFake : public ::testing::Test,
|
||||
last_encoded_image_simulcast_index_(-1) {}
|
||||
virtual ~TestSimulcastEncoderAdapterFake() {}
|
||||
|
||||
int32_t Encoded(const EncodedImage& encodedImage,
|
||||
const CodecSpecificInfo* codecSpecificInfo = NULL,
|
||||
const RTPFragmentationHeader* fragmentation = NULL) override {
|
||||
last_encoded_image_width_ = encodedImage._encodedWidth;
|
||||
last_encoded_image_height_ = encodedImage._encodedHeight;
|
||||
if (codecSpecificInfo) {
|
||||
Result OnEncodedImage(const EncodedImage& encoded_image,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
const RTPFragmentationHeader* fragmentation) override {
|
||||
last_encoded_image_width_ = encoded_image._encodedWidth;
|
||||
last_encoded_image_height_ = encoded_image._encodedHeight;
|
||||
if (codec_specific_info) {
|
||||
last_encoded_image_simulcast_index_ =
|
||||
codecSpecificInfo->codecSpecific.VP8.simulcastIdx;
|
||||
codec_specific_info->codecSpecific.VP8.simulcastIdx;
|
||||
}
|
||||
return 0;
|
||||
return Result(Result::OK, encoded_image._timeStamp);
|
||||
}
|
||||
|
||||
bool GetLastEncodedImageInfo(int* out_width,
|
||||
|
||||
@ -61,9 +61,9 @@ class Vp8TestEncodedImageCallback : public EncodedImageCallback {
|
||||
delete[] encoded_frame_._buffer;
|
||||
}
|
||||
|
||||
virtual int32_t Encoded(const EncodedImage& encoded_image,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
const RTPFragmentationHeader* fragmentation) {
|
||||
virtual Result OnEncodedImage(const EncodedImage& encoded_image,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
const RTPFragmentationHeader* fragmentation) {
|
||||
// Only store the base layer.
|
||||
if (codec_specific_info->codecSpecific.VP8.simulcastIdx == 0) {
|
||||
if (encoded_image._frameType == kVideoFrameKey) {
|
||||
@ -89,7 +89,7 @@ class Vp8TestEncodedImageCallback : public EncodedImageCallback {
|
||||
codec_specific_info->codecSpecific.VP8.layerSync;
|
||||
temporal_layer_[codec_specific_info->codecSpecific.VP8.simulcastIdx] =
|
||||
codec_specific_info->codecSpecific.VP8.temporalIdx;
|
||||
return 0;
|
||||
return Result(Result::OK, encoded_image._timeStamp);
|
||||
}
|
||||
void GetLastEncodedFrameInfo(int* picture_id,
|
||||
int* temporal_layer,
|
||||
@ -338,34 +338,38 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
if (expected_video_streams >= 1) {
|
||||
EXPECT_CALL(
|
||||
encoder_callback_,
|
||||
Encoded(
|
||||
OnEncodedImage(
|
||||
AllOf(Field(&EncodedImage::_frameType, frame_type),
|
||||
Field(&EncodedImage::_encodedWidth, kDefaultWidth / 4),
|
||||
Field(&EncodedImage::_encodedHeight, kDefaultHeight / 4)),
|
||||
_, _))
|
||||
.Times(1)
|
||||
.WillRepeatedly(Return(0));
|
||||
.WillRepeatedly(Return(EncodedImageCallback::Result(
|
||||
EncodedImageCallback::Result::OK, 0)));
|
||||
}
|
||||
if (expected_video_streams >= 2) {
|
||||
EXPECT_CALL(
|
||||
encoder_callback_,
|
||||
Encoded(
|
||||
OnEncodedImage(
|
||||
AllOf(Field(&EncodedImage::_frameType, frame_type),
|
||||
Field(&EncodedImage::_encodedWidth, kDefaultWidth / 2),
|
||||
Field(&EncodedImage::_encodedHeight, kDefaultHeight / 2)),
|
||||
_, _))
|
||||
.Times(1)
|
||||
.WillRepeatedly(Return(0));
|
||||
.WillRepeatedly(Return(EncodedImageCallback::Result(
|
||||
EncodedImageCallback::Result::OK, 0)));
|
||||
}
|
||||
if (expected_video_streams >= 3) {
|
||||
EXPECT_CALL(
|
||||
encoder_callback_,
|
||||
Encoded(AllOf(Field(&EncodedImage::_frameType, frame_type),
|
||||
Field(&EncodedImage::_encodedWidth, kDefaultWidth),
|
||||
Field(&EncodedImage::_encodedHeight, kDefaultHeight)),
|
||||
_, _))
|
||||
OnEncodedImage(
|
||||
AllOf(Field(&EncodedImage::_frameType, frame_type),
|
||||
Field(&EncodedImage::_encodedWidth, kDefaultWidth),
|
||||
Field(&EncodedImage::_encodedHeight, kDefaultHeight)),
|
||||
_, _))
|
||||
.Times(1)
|
||||
.WillRepeatedly(Return(0));
|
||||
.WillRepeatedly(Return(EncodedImageCallback::Result(
|
||||
EncodedImageCallback::Result::OK, 0)));
|
||||
}
|
||||
}
|
||||
|
||||
@ -590,13 +594,15 @@ class TestVp8Simulcast : public ::testing::Test {
|
||||
encoder_->SetRates(kMaxBitrates[0] + kMaxBitrates[1], 30);
|
||||
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
|
||||
kVideoFrameDelta);
|
||||
EXPECT_CALL(encoder_callback_,
|
||||
Encoded(AllOf(Field(&EncodedImage::_frameType, kVideoFrameKey),
|
||||
Field(&EncodedImage::_encodedWidth, width),
|
||||
Field(&EncodedImage::_encodedHeight, height)),
|
||||
_, _))
|
||||
EXPECT_CALL(
|
||||
encoder_callback_,
|
||||
OnEncodedImage(AllOf(Field(&EncodedImage::_frameType, kVideoFrameKey),
|
||||
Field(&EncodedImage::_encodedWidth, width),
|
||||
Field(&EncodedImage::_encodedHeight, height)),
|
||||
_, _))
|
||||
.Times(1)
|
||||
.WillRepeatedly(Return(0));
|
||||
.WillRepeatedly(Return(
|
||||
EncodedImageCallback::Result(EncodedImageCallback::Result::OK, 0)));
|
||||
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
|
||||
|
||||
// Switch back.
|
||||
|
||||
@ -43,9 +43,9 @@ class Vp8UnitTestEncodeCompleteCallback : public webrtc::EncodedImageCallback {
|
||||
void* decoderSpecificInfo)
|
||||
: encoded_frame_(frame), encode_complete_(false) {}
|
||||
|
||||
virtual int Encoded(const EncodedImage& encoded_frame_,
|
||||
const CodecSpecificInfo* codecSpecificInfo,
|
||||
const RTPFragmentationHeader*);
|
||||
Result OnEncodedImage(const EncodedImage& encoded_frame_,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
const RTPFragmentationHeader* fragmentation) override;
|
||||
bool EncodeComplete();
|
||||
|
||||
private:
|
||||
@ -54,9 +54,10 @@ class Vp8UnitTestEncodeCompleteCallback : public webrtc::EncodedImageCallback {
|
||||
bool encode_complete_;
|
||||
};
|
||||
|
||||
int Vp8UnitTestEncodeCompleteCallback::Encoded(
|
||||
webrtc::EncodedImageCallback::Result
|
||||
Vp8UnitTestEncodeCompleteCallback::OnEncodedImage(
|
||||
const EncodedImage& encoded_frame,
|
||||
const CodecSpecificInfo* codecSpecificInfo,
|
||||
const CodecSpecificInfo* codec_specific_info,
|
||||
const RTPFragmentationHeader* fragmentation) {
|
||||
if (encoded_frame_->_size < encoded_frame._length) {
|
||||
delete[] encoded_frame_->_buffer;
|
||||
@ -72,7 +73,7 @@ int Vp8UnitTestEncodeCompleteCallback::Encoded(
|
||||
encoded_frame_->_frameType = encoded_frame._frameType;
|
||||
encoded_frame_->_completeFrame = encoded_frame._completeFrame;
|
||||
encode_complete_ = true;
|
||||
return 0;
|
||||
return Result(Result::OK, 0);
|
||||
}
|
||||
|
||||
bool Vp8UnitTestEncodeCompleteCallback::EncodeComplete() {
|
||||
|
||||
@ -26,9 +26,9 @@ class Vp8SequenceCoderEncodeCallback : public webrtc::EncodedImageCallback {
|
||||
explicit Vp8SequenceCoderEncodeCallback(FILE* encoded_file)
|
||||
: encoded_file_(encoded_file), encoded_bytes_(0) {}
|
||||
~Vp8SequenceCoderEncodeCallback();
|
||||
int Encoded(const webrtc::EncodedImage& encoded_image,
|
||||
const webrtc::CodecSpecificInfo* codecSpecificInfo,
|
||||
const webrtc::RTPFragmentationHeader*);
|
||||
Result OnEncodedImage(const webrtc::EncodedImage& encoded_image,
|
||||
const webrtc::CodecSpecificInfo* codec_specific_info,
|
||||
const webrtc::RTPFragmentationHeader*);
|
||||
// Returns the encoded image.
|
||||
webrtc::EncodedImage encoded_image() { return encoded_image_; }
|
||||
size_t encoded_bytes() { return encoded_bytes_; }
|
||||
@ -43,7 +43,9 @@ Vp8SequenceCoderEncodeCallback::~Vp8SequenceCoderEncodeCallback() {
|
||||
delete[] encoded_image_._buffer;
|
||||
encoded_image_._buffer = NULL;
|
||||
}
|
||||
int Vp8SequenceCoderEncodeCallback::Encoded(
|
||||
|
||||
webrtc::EncodedImageCallback::Result
|
||||
Vp8SequenceCoderEncodeCallback::OnEncodedImage(
|
||||
const webrtc::EncodedImage& encoded_image,
|
||||
const webrtc::CodecSpecificInfo* codecSpecificInfo,
|
||||
const webrtc::RTPFragmentationHeader* fragmentation) {
|
||||
@ -58,11 +60,11 @@ int Vp8SequenceCoderEncodeCallback::Encoded(
|
||||
if (encoded_file_ != NULL) {
|
||||
if (fwrite(encoded_image._buffer, 1, encoded_image._length,
|
||||
encoded_file_) != encoded_image._length) {
|
||||
return -1;
|
||||
return Result(Result::ERROR_SEND_FAILED, 0);
|
||||
}
|
||||
}
|
||||
encoded_bytes_ += encoded_image_._length;
|
||||
return 0;
|
||||
return Result(Result::OK, 0);
|
||||
}
|
||||
|
||||
// TODO(mikhal): Add support for varying the frame size.
|
||||
|
||||
Reference in New Issue
Block a user