Add EncodedImageCallback::OnEncodedImage().

OnEncodedImage() is going to replace Encoded(), which is deprecated now.
The new OnEncodedImage() returns Result struct that contains frame_id,
which tells the encoder RTP timestamp for the frame.

BUG=chromium:621691
R=niklas.enbom@webrtc.org, sprang@webrtc.org, stefan@webrtc.org

Review URL: https://codereview.webrtc.org/2089773002 .

Committed: https://crrev.com/ad34dbe934d47f88011045671b4aea00dbd5a795
Cr-Original-Commit-Position: refs/heads/master@{#13613}
Cr-Commit-Position: refs/heads/master@{#13615}
This commit is contained in:
Sergey Ulanov
2016-08-02 15:14:39 -07:00
parent ac4dc2cefe
commit 4c7f4cd2ef
43 changed files with 475 additions and 380 deletions

View File

@ -225,8 +225,21 @@ class RtpRtcp : public Module {
// |payload_size| - size of payload buffer to send
// |fragmentation| - fragmentation offset data for fragmented frames such
// as layers or RED
// Returns -1 on failure else 0.
virtual int32_t SendOutgoingData(
// |transport_frame_id_out| - set to RTP timestamp.
// Returns true on success.
virtual bool SendOutgoingData(FrameType frame_type,
int8_t payload_type,
uint32_t timestamp,
int64_t capture_time_ms,
const uint8_t* payload_data,
size_t payload_size,
const RTPFragmentationHeader* fragmentation,
const RTPVideoHeader* rtp_video_header,
uint32_t* transport_frame_id_out) = 0;
// Deprecated version of the method above.
int32_t SendOutgoingData(
FrameType frame_type,
int8_t payload_type,
uint32_t timestamp,
@ -234,7 +247,14 @@ class RtpRtcp : public Module {
const uint8_t* payload_data,
size_t payload_size,
const RTPFragmentationHeader* fragmentation = nullptr,
const RTPVideoHeader* rtp_video_header = nullptr) = 0;
const RTPVideoHeader* rtp_video_header = nullptr) {
return SendOutgoingData(frame_type, payload_type, timestamp,
capture_time_ms, payload_data, payload_size,
fragmentation, rtp_video_header,
/*frame_id_out=*/nullptr)
? 0
: -1;
}
virtual bool TimeToSendPacket(uint32_t ssrc,
uint16_t sequence_number,

View File

@ -98,15 +98,16 @@ class MockRtpRtcp : public RtpRtcp {
MOCK_CONST_METHOD0(GetVideoBitrateObserver, BitrateStatisticsObserver*(void));
MOCK_CONST_METHOD1(EstimatedReceiveBandwidth,
int(uint32_t* available_bandwidth));
MOCK_METHOD8(SendOutgoingData,
int32_t(FrameType frame_type,
int8_t payload_type,
uint32_t timestamp,
int64_t capture_time_ms,
const uint8_t* payload_data,
size_t payload_size,
const RTPFragmentationHeader* fragmentation,
const RTPVideoHeader* rtp_video_header));
MOCK_METHOD9(SendOutgoingData,
bool(FrameType frame_type,
int8_t payload_type,
uint32_t timestamp,
int64_t capture_time_ms,
const uint8_t* payload_data,
size_t payload_size,
const RTPFragmentationHeader* fragmentation,
const RTPVideoHeader* rtp_video_header,
uint32_t* frame_id_out));
MOCK_METHOD5(TimeToSendPacket,
bool(uint32_t ssrc,
uint16_t sequence_number,

View File

@ -264,9 +264,9 @@ class RtpRtcpRtxNackTest : public ::testing::Test {
uint32_t timestamp = 3000;
uint16_t nack_list[kVideoNackListSize];
for (int frame = 0; frame < kNumFrames; ++frame) {
EXPECT_EQ(0, rtp_rtcp_module_->SendOutgoingData(
webrtc::kVideoFrameDelta, kPayloadType, timestamp,
timestamp / 90, payload_data, payload_data_length));
EXPECT_TRUE(rtp_rtcp_module_->SendOutgoingData(
webrtc::kVideoFrameDelta, kPayloadType, timestamp, timestamp / 90,
payload_data, payload_data_length, nullptr, nullptr, nullptr));
// Min required delay until retransmit = 5 + RTT ms (RTT = 0).
fake_clock.AdvanceTimeMilliseconds(5);
int length = BuildNackList(nack_list);
@ -310,9 +310,9 @@ TEST_F(RtpRtcpRtxNackTest, LongNackList) {
// Send 30 frames which at the default size is roughly what we need to get
// enough packets.
for (int frame = 0; frame < kNumFrames; ++frame) {
EXPECT_EQ(0, rtp_rtcp_module_->SendOutgoingData(
webrtc::kVideoFrameDelta, kPayloadType, timestamp,
timestamp / 90, payload_data, payload_data_length));
EXPECT_TRUE(rtp_rtcp_module_->SendOutgoingData(
webrtc::kVideoFrameDelta, kPayloadType, timestamp, timestamp / 90,
payload_data, payload_data_length, nullptr, nullptr, nullptr));
// Prepare next frame.
timestamp += 3000;
fake_clock.AdvanceTimeMilliseconds(33);

View File

@ -384,7 +384,7 @@ bool ModuleRtpRtcpImpl::SendingMedia() const {
return rtp_sender_.SendingMedia();
}
int32_t ModuleRtpRtcpImpl::SendOutgoingData(
bool ModuleRtpRtcpImpl::SendOutgoingData(
FrameType frame_type,
int8_t payload_type,
uint32_t time_stamp,
@ -392,7 +392,8 @@ int32_t ModuleRtpRtcpImpl::SendOutgoingData(
const uint8_t* payload_data,
size_t payload_size,
const RTPFragmentationHeader* fragmentation,
const RTPVideoHeader* rtp_video_header) {
const RTPVideoHeader* rtp_video_header,
uint32_t* transport_frame_id_out) {
rtcp_sender_.SetLastRtpTime(time_stamp, capture_time_ms);
// Make sure an RTCP report isn't queued behind a key frame.
if (rtcp_sender_.TimeToSendRTCPReport(kVideoFrameKey == frame_type)) {
@ -400,7 +401,7 @@ int32_t ModuleRtpRtcpImpl::SendOutgoingData(
}
return rtp_sender_.SendOutgoingData(
frame_type, payload_type, time_stamp, capture_time_ms, payload_data,
payload_size, fragmentation, rtp_video_header);
payload_size, fragmentation, rtp_video_header, transport_frame_id_out);
}
bool ModuleRtpRtcpImpl::TimeToSendPacket(uint32_t ssrc,

View File

@ -112,15 +112,15 @@ class ModuleRtpRtcpImpl : public RtpRtcp {
// Used by the codec module to deliver a video or audio frame for
// packetization.
int32_t SendOutgoingData(
FrameType frame_type,
int8_t payload_type,
uint32_t time_stamp,
int64_t capture_time_ms,
const uint8_t* payload_data,
size_t payload_size,
const RTPFragmentationHeader* fragmentation = NULL,
const RTPVideoHeader* rtp_video_header = NULL) override;
bool SendOutgoingData(FrameType frame_type,
int8_t payload_type,
uint32_t time_stamp,
int64_t capture_time_ms,
const uint8_t* payload_data,
size_t payload_size,
const RTPFragmentationHeader* fragmentation,
const RTPVideoHeader* rtp_video_header,
uint32_t* transport_frame_id_out) override;
bool TimeToSendPacket(uint32_t ssrc,
uint16_t sequence_number,

View File

@ -206,14 +206,9 @@ class RtpRtcpImplTest : public ::testing::Test {
kRtpVideoVp8, {vp8_header}};
const uint8_t payload[100] = {0};
EXPECT_EQ(0, module->impl_->SendOutgoingData(kVideoFrameKey,
codec_.plType,
0,
0,
payload,
sizeof(payload),
NULL,
&rtp_video_header));
EXPECT_EQ(true, module->impl_->SendOutgoingData(
kVideoFrameKey, codec_.plType, 0, 0, payload,
sizeof(payload), nullptr, &rtp_video_header, nullptr));
}
void IncomingRtcpNack(const RtpRtcpModule* module, uint16_t sequence_number) {

View File

@ -448,14 +448,15 @@ bool RTPSender::ActivateCVORtpHeaderExtension() {
return video_rotation_active_;
}
int32_t RTPSender::SendOutgoingData(FrameType frame_type,
int8_t payload_type,
uint32_t capture_timestamp,
int64_t capture_time_ms,
const uint8_t* payload_data,
size_t payload_size,
const RTPFragmentationHeader* fragmentation,
const RTPVideoHeader* rtp_hdr) {
bool RTPSender::SendOutgoingData(FrameType frame_type,
int8_t payload_type,
uint32_t capture_timestamp,
int64_t capture_time_ms,
const uint8_t* payload_data,
size_t payload_size,
const RTPFragmentationHeader* fragmentation,
const RTPVideoHeader* rtp_header,
uint32_t* transport_frame_id_out) {
uint32_t ssrc;
uint16_t sequence_number;
{
@ -463,36 +464,35 @@ int32_t RTPSender::SendOutgoingData(FrameType frame_type,
rtc::CritScope lock(&send_critsect_);
ssrc = ssrc_;
sequence_number = sequence_number_;
if (!sending_media_) {
return 0;
}
if (!sending_media_)
return true;
}
RtpVideoCodecTypes video_type = kRtpVideoGeneric;
if (CheckPayloadType(payload_type, &video_type) != 0) {
LOG(LS_ERROR) << "Don't send data with unknown payload type: "
<< static_cast<int>(payload_type) << ".";
return -1;
return false;
}
int32_t ret_val;
bool result;
if (audio_configured_) {
TRACE_EVENT_ASYNC_STEP1("webrtc", "Audio", capture_timestamp,
"Send", "type", FrameTypeToString(frame_type));
assert(frame_type == kAudioFrameSpeech || frame_type == kAudioFrameCN ||
frame_type == kEmptyFrame);
ret_val = audio_->SendAudio(frame_type, payload_type, capture_timestamp,
payload_data, payload_size, fragmentation);
result = audio_->SendAudio(frame_type, payload_type, capture_timestamp,
payload_data, payload_size, fragmentation);
} else {
TRACE_EVENT_ASYNC_STEP1("webrtc", "Video", capture_time_ms,
"Send", "type", FrameTypeToString(frame_type));
assert(frame_type != kAudioFrameSpeech && frame_type != kAudioFrameCN);
if (frame_type == kEmptyFrame)
return 0;
return true;
if (rtp_hdr) {
playout_delay_oracle_.UpdateRequest(ssrc, rtp_hdr->playout_delay,
if (rtp_header) {
playout_delay_oracle_.UpdateRequest(ssrc, rtp_header->playout_delay,
sequence_number);
}
@ -507,9 +507,16 @@ int32_t RTPSender::SendOutgoingData(FrameType frame_type,
}
}
ret_val = video_->SendVideo(
video_type, frame_type, payload_type, capture_timestamp,
capture_time_ms, payload_data, payload_size, fragmentation, rtp_hdr);
result = video_->SendVideo(video_type, frame_type, payload_type,
capture_timestamp, capture_time_ms, payload_data,
payload_size, fragmentation, rtp_header);
}
if (transport_frame_id_out) {
rtc::CritScope lock(&send_critsect_);
// TODO(sergeyu): Move RTP timestamp calculation from BuildRTPheader() to
// SendOutgoingData() and pass it to SendVideo()/SendAudio() calls.
*transport_frame_id_out = timestamp_;
}
rtc::CritScope cs(&statistics_crit_);
@ -523,7 +530,7 @@ int32_t RTPSender::SendOutgoingData(FrameType frame_type,
frame_count_observer_->FrameCountUpdated(frame_counts_, ssrc);
}
return ret_val;
return result;
}
size_t RTPSender::TrySendRedundantPayloads(size_t bytes_to_send,
@ -945,12 +952,12 @@ size_t RTPSender::TimeToSendPadding(size_t bytes, int probe_cluster_id) {
}
// TODO(pwestin): send in the RtpHeaderParser to avoid parsing it again.
int32_t RTPSender::SendToNetwork(uint8_t* buffer,
size_t payload_length,
size_t rtp_header_length,
int64_t capture_time_ms,
StorageType storage,
RtpPacketSender::Priority priority) {
bool RTPSender::SendToNetwork(uint8_t* buffer,
size_t payload_length,
size_t rtp_header_length,
int64_t capture_time_ms,
StorageType storage,
RtpPacketSender::Priority priority) {
size_t length = payload_length + rtp_header_length;
RtpUtility::RtpHeaderParser rtp_parser(buffer, length);
@ -972,7 +979,7 @@ int32_t RTPSender::SendToNetwork(uint8_t* buffer,
// Used for NACK and to spread out the transmission of packets.
if (packet_history_.PutRTPPacket(buffer, length, capture_time_ms, storage) !=
0) {
return -1;
return false;
}
if (paced_sender_) {
@ -989,7 +996,7 @@ int32_t RTPSender::SendToNetwork(uint8_t* buffer,
"PacedSend", corrected_time_ms,
"capture_time_ms", corrected_time_ms);
}
return 0;
return true;
}
PacketOptions options;
@ -1010,14 +1017,14 @@ int32_t RTPSender::SendToNetwork(uint8_t* buffer,
packet_history_.SetSent(rtp_header.sequenceNumber);
if (!sent)
return -1;
return false;
{
rtc::CritScope lock(&send_critsect_);
media_has_been_sent_ = true;
}
UpdateRtpStats(buffer, length, rtp_header, false, false);
return 0;
return true;
}
void RTPSender::UpdateDelayStatistics(int64_t capture_time_ms, int64_t now_ms) {

View File

@ -76,12 +76,12 @@ class RTPSenderInterface {
virtual size_t MaxDataPayloadLength() const = 0;
virtual uint16_t ActualSendBitrateKbit() const = 0;
virtual int32_t SendToNetwork(uint8_t* data_buffer,
size_t payload_length,
size_t rtp_header_length,
int64_t capture_time_ms,
StorageType storage,
RtpPacketSender::Priority priority) = 0;
virtual bool SendToNetwork(uint8_t* data_buffer,
size_t payload_length,
size_t rtp_header_length,
int64_t capture_time_ms,
StorageType storage,
RtpPacketSender::Priority priority) = 0;
virtual bool UpdateVideoRotation(uint8_t* rtp_packet,
size_t rtp_packet_length,
@ -154,14 +154,15 @@ class RTPSender : public RTPSenderInterface {
void SetMaxPayloadLength(size_t max_payload_length);
int32_t SendOutgoingData(FrameType frame_type,
int8_t payload_type,
uint32_t timestamp,
int64_t capture_time_ms,
const uint8_t* payload_data,
size_t payload_size,
const RTPFragmentationHeader* fragmentation,
const RTPVideoHeader* rtp_header);
bool SendOutgoingData(FrameType frame_type,
int8_t payload_type,
uint32_t timestamp,
int64_t capture_time_ms,
const uint8_t* payload_data,
size_t payload_size,
const RTPFragmentationHeader* fragmentation,
const RTPVideoHeader* rtp_header,
uint32_t* transport_frame_id_out);
// RTP header extension
int32_t SetTransmissionTimeOffset(int32_t transmission_time_offset);
@ -276,12 +277,12 @@ class RTPSender : public RTPSenderInterface {
uint32_t Timestamp() const override;
uint32_t SSRC() const override;
int32_t SendToNetwork(uint8_t* data_buffer,
size_t payload_length,
size_t rtp_header_length,
int64_t capture_time_ms,
StorageType storage,
RtpPacketSender::Priority priority) override;
bool SendToNetwork(uint8_t* data_buffer,
size_t payload_length,
size_t rtp_header_length,
int64_t capture_time_ms,
StorageType storage,
RtpPacketSender::Priority priority) override;
// Audio.

View File

@ -145,7 +145,7 @@ bool RTPSenderAudio::MarkerBit(FrameType frame_type, int8_t payload_type) {
return marker_bit;
}
int32_t RTPSenderAudio::SendAudio(FrameType frame_type,
bool RTPSenderAudio::SendAudio(FrameType frame_type,
int8_t payload_type,
uint32_t capture_timestamp,
const uint8_t* payload_data,
@ -195,7 +195,7 @@ int32_t RTPSenderAudio::SendAudio(FrameType frame_type,
if (packet_size_samples >
(capture_timestamp - dtmf_timestamp_last_sent_)) {
// not time to send yet
return 0;
return true;
}
}
dtmf_timestamp_last_sent_ = capture_timestamp;
@ -228,24 +228,24 @@ int32_t RTPSenderAudio::SendAudio(FrameType frame_type,
ended, dtmf_payload_type, dtmf_timestamp_,
static_cast<uint16_t>(dtmf_duration_samples), false);
} else {
if (SendTelephoneEventPacket(ended, dtmf_payload_type, dtmf_timestamp_,
dtmf_duration_samples,
!dtmf_event_first_packet_sent_) != 0) {
return -1;
if (!SendTelephoneEventPacket(ended, dtmf_payload_type, dtmf_timestamp_,
dtmf_duration_samples,
!dtmf_event_first_packet_sent_)) {
return false;
}
dtmf_event_first_packet_sent_ = true;
return 0;
return true;
}
}
return 0;
return true;
}
if (payload_size == 0 || payload_data == NULL) {
if (frame_type == kEmptyFrame) {
// we don't send empty audio RTP packets
// no error since we use it to drive DTMF when we use VAD
return 0;
return true;
}
return -1;
return false;
}
uint8_t data_buffer[IP_PACKET_SIZE];
bool marker_bit = MarkerBit(frame_type, payload_type);
@ -269,11 +269,11 @@ int32_t RTPSenderAudio::SendAudio(FrameType frame_type,
clock_->TimeInMilliseconds());
}
if (rtpHeaderLength <= 0) {
return -1;
return false;
}
if (max_payload_length < (rtpHeaderLength + payload_size)) {
// Too large payload buffer.
return -1;
return false;
}
if (red_payload_type >= 0 && // Have we configured RED?
fragmentation && fragmentation->fragmentationVectorSize > 1 &&
@ -281,7 +281,7 @@ int32_t RTPSenderAudio::SendAudio(FrameType frame_type,
if (timestampOffset <= 0x3fff) {
if (fragmentation->fragmentationVectorSize != 2) {
// we only support 2 codecs when using RED
return -1;
return false;
}
// only 0x80 if we have multiple blocks
data_buffer[rtpHeaderLength++] =
@ -290,7 +290,7 @@ int32_t RTPSenderAudio::SendAudio(FrameType frame_type,
// sanity blockLength
if (blockLength > 0x3ff) { // block length 10 bits 1023 bytes
return -1;
return false;
}
uint32_t REDheader = (timestampOffset << 10) + blockLength;
ByteWriter<uint32_t>::WriteBigEndian(data_buffer + rtpHeaderLength,
@ -349,7 +349,7 @@ int32_t RTPSenderAudio::SendAudio(FrameType frame_type,
TRACE_EVENT_ASYNC_END2("webrtc", "Audio", capture_timestamp, "timestamp",
rtp_sender_->Timestamp(), "seqnum",
rtp_sender_->SequenceNumber());
int32_t send_result = rtp_sender_->SendToNetwork(
bool send_result = rtp_sender_->SendToNetwork(
data_buffer, payload_size, rtpHeaderLength, rtc::TimeMillis(),
kAllowRetransmission, RtpPacketSender::kHighPriority);
if (first_packet_sent_()) {
@ -403,18 +403,18 @@ int32_t RTPSenderAudio::SendTelephoneEvent(uint8_t key,
return AddDTMF(key, time_ms, level);
}
int32_t RTPSenderAudio::SendTelephoneEventPacket(bool ended,
int8_t dtmf_payload_type,
uint32_t dtmf_timestamp,
uint16_t duration,
bool marker_bit) {
bool RTPSenderAudio::SendTelephoneEventPacket(bool ended,
int8_t dtmf_payload_type,
uint32_t dtmf_timestamp,
uint16_t duration,
bool marker_bit) {
uint8_t dtmfbuffer[IP_PACKET_SIZE];
uint8_t sendCount = 1;
int32_t retVal = 0;
uint8_t send_count = 1;
bool result = true;
if (ended) {
// resend last packet in an event 3 times
sendCount = 3;
send_count = 3;
}
do {
// Send DTMF data
@ -422,7 +422,7 @@ int32_t RTPSenderAudio::SendTelephoneEventPacket(bool ended,
dtmfbuffer, dtmf_payload_type, marker_bit, dtmf_timestamp,
clock_->TimeInMilliseconds());
if (header_length <= 0)
return -1;
return false;
// reset CSRC and X bit
dtmfbuffer[0] &= 0xe0;
@ -451,12 +451,12 @@ int32_t RTPSenderAudio::SendTelephoneEventPacket(bool ended,
TRACE_EVENT_INSTANT2(
TRACE_DISABLED_BY_DEFAULT("webrtc_rtp"), "Audio::SendTelephoneEvent",
"timestamp", dtmf_timestamp, "seqnum", rtp_sender_->SequenceNumber());
retVal = rtp_sender_->SendToNetwork(dtmfbuffer, 4, 12, rtc::TimeMillis(),
result = rtp_sender_->SendToNetwork(dtmfbuffer, 4, 12, rtc::TimeMillis(),
kAllowRetransmission,
RtpPacketSender::kHighPriority);
sendCount--;
} while (sendCount > 0 && retVal == 0);
send_count--;
} while (send_count > 0 && result);
return retVal;
return result;
}
} // namespace webrtc

View File

@ -34,12 +34,12 @@ class RTPSenderAudio : public DTMFqueue {
uint32_t rate,
RtpUtility::Payload** payload);
int32_t SendAudio(FrameType frame_type,
int8_t payload_type,
uint32_t capture_timestamp,
const uint8_t* payload_data,
size_t payload_size,
const RTPFragmentationHeader* fragmentation);
bool SendAudio(FrameType frame_type,
int8_t payload_type,
uint32_t capture_timestamp,
const uint8_t* payload_data,
size_t payload_size,
const RTPFragmentationHeader* fragmentation);
// set audio packet size, used to determine when it's time to send a DTMF
// packet in silence (CNG)
@ -62,7 +62,7 @@ class RTPSenderAudio : public DTMFqueue {
int32_t RED(int8_t* payload_type) const;
protected:
int32_t SendTelephoneEventPacket(
bool SendTelephoneEventPacket(
bool ended,
int8_t dtmf_payload_type,
uint32_t dtmf_timestamp,

View File

@ -190,9 +190,9 @@ class RtpSenderTest : public ::testing::Test {
ASSERT_GE(rtp_length, 0);
// Packet should be stored in a send bucket.
EXPECT_EQ(0, rtp_sender_->SendToNetwork(
packet_, payload_length, rtp_length, capture_time_ms,
kAllowRetransmission, RtpPacketSender::kNormalPriority));
EXPECT_TRUE(rtp_sender_->SendToNetwork(
packet_, payload_length, rtp_length, capture_time_ms,
kAllowRetransmission, RtpPacketSender::kNormalPriority));
}
void SendGenericPayload() {
@ -204,9 +204,9 @@ class RtpSenderTest : public ::testing::Test {
EXPECT_EQ(0, rtp_sender_->RegisterPayload(payload_name, kPayloadType, 90000,
0, 1500));
EXPECT_EQ(0, rtp_sender_->SendOutgoingData(
kVideoFrameKey, kPayloadType, kTimestamp, kCaptureTimeMs,
kPayload, sizeof(kPayload), nullptr, nullptr));
EXPECT_TRUE(rtp_sender_->SendOutgoingData(
kVideoFrameKey, kPayloadType, kTimestamp, kCaptureTimeMs, kPayload,
sizeof(kPayload), nullptr, nullptr, nullptr));
}
};
@ -753,9 +753,9 @@ TEST_F(RtpSenderTest, TrafficSmoothingWithExtensions) {
size_t rtp_length = static_cast<size_t>(rtp_length_int);
// Packet should be stored in a send bucket.
EXPECT_EQ(0, rtp_sender_->SendToNetwork(packet_, 0, rtp_length,
capture_time_ms, kAllowRetransmission,
RtpPacketSender::kNormalPriority));
EXPECT_TRUE(rtp_sender_->SendToNetwork(packet_, 0, rtp_length,
capture_time_ms, kAllowRetransmission,
RtpPacketSender::kNormalPriority));
EXPECT_EQ(0, transport_.packets_sent_);
@ -806,9 +806,9 @@ TEST_F(RtpSenderTest, TrafficSmoothingRetransmits) {
size_t rtp_length = static_cast<size_t>(rtp_length_int);
// Packet should be stored in a send bucket.
EXPECT_EQ(0, rtp_sender_->SendToNetwork(packet_, 0, rtp_length,
capture_time_ms, kAllowRetransmission,
RtpPacketSender::kNormalPriority));
EXPECT_TRUE(rtp_sender_->SendToNetwork(packet_, 0, rtp_length,
capture_time_ms, kAllowRetransmission,
RtpPacketSender::kNormalPriority));
EXPECT_EQ(0, transport_.packets_sent_);
@ -888,9 +888,9 @@ TEST_F(RtpSenderTest, SendPadding) {
size_t rtp_length = static_cast<size_t>(rtp_length_int);
// Packet should be stored in a send bucket.
EXPECT_EQ(0, rtp_sender_->SendToNetwork(packet_, 0, rtp_length,
capture_time_ms, kAllowRetransmission,
RtpPacketSender::kNormalPriority));
EXPECT_TRUE(rtp_sender_->SendToNetwork(packet_, 0, rtp_length,
capture_time_ms, kAllowRetransmission,
RtpPacketSender::kNormalPriority));
int total_packets_sent = 0;
EXPECT_EQ(total_packets_sent, transport_.packets_sent_);
@ -948,9 +948,9 @@ TEST_F(RtpSenderTest, SendPadding) {
InsertPacket(RtpPacketSender::kNormalPriority, _, _, _, _, _));
// Packet should be stored in a send bucket.
EXPECT_EQ(0, rtp_sender_->SendToNetwork(packet_, 0, rtp_length,
capture_time_ms, kAllowRetransmission,
RtpPacketSender::kNormalPriority));
EXPECT_TRUE(rtp_sender_->SendToNetwork(packet_, 0, rtp_length,
capture_time_ms, kAllowRetransmission,
RtpPacketSender::kNormalPriority));
rtp_sender_->TimeToSendPacket(seq_num, capture_time_ms, false,
PacketInfo::kNotAProbe);
@ -1115,9 +1115,9 @@ TEST_F(RtpSenderTestWithoutPacer, SendGenericVideo) {
uint8_t payload[] = {47, 11, 32, 93, 89};
// Send keyframe
ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kVideoFrameKey, payload_type, 1234,
4321, payload, sizeof(payload),
nullptr, nullptr));
ASSERT_TRUE(rtp_sender_->SendOutgoingData(kVideoFrameKey, payload_type, 1234,
4321, payload, sizeof(payload),
nullptr, nullptr, nullptr));
RtpUtility::RtpHeaderParser rtp_parser(transport_.last_sent_packet_,
transport_.last_sent_packet_len_);
@ -1141,9 +1141,9 @@ TEST_F(RtpSenderTestWithoutPacer, SendGenericVideo) {
payload[1] = 42;
payload[4] = 13;
ASSERT_EQ(0, rtp_sender_->SendOutgoingData(
kVideoFrameDelta, payload_type, 1234, 4321, payload,
sizeof(payload), nullptr, nullptr));
ASSERT_TRUE(rtp_sender_->SendOutgoingData(
kVideoFrameDelta, payload_type, 1234, 4321, payload, sizeof(payload),
nullptr, nullptr, nullptr));
RtpUtility::RtpHeaderParser rtp_parser2(transport_.last_sent_packet_,
transport_.last_sent_packet_len_);
@ -1195,18 +1195,18 @@ TEST_F(RtpSenderTest, FrameCountCallbacks) {
EXPECT_CALL(mock_paced_sender_, InsertPacket(_, _, _, _, _, _))
.Times(::testing::AtLeast(2));
ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kVideoFrameKey, payload_type, 1234,
4321, payload, sizeof(payload),
nullptr, nullptr));
ASSERT_TRUE(rtp_sender_->SendOutgoingData(kVideoFrameKey, payload_type, 1234,
4321, payload, sizeof(payload),
nullptr, nullptr, nullptr));
EXPECT_EQ(1U, callback.num_calls_);
EXPECT_EQ(ssrc, callback.ssrc_);
EXPECT_EQ(1, callback.frame_counts_.key_frames);
EXPECT_EQ(0, callback.frame_counts_.delta_frames);
ASSERT_EQ(0, rtp_sender_->SendOutgoingData(
kVideoFrameDelta, payload_type, 1234, 4321, payload,
sizeof(payload), nullptr, nullptr));
ASSERT_TRUE(rtp_sender_->SendOutgoingData(
kVideoFrameDelta, payload_type, 1234, 4321, payload, sizeof(payload),
nullptr, nullptr, nullptr));
EXPECT_EQ(2U, callback.num_calls_);
EXPECT_EQ(ssrc, callback.ssrc_);
@ -1268,9 +1268,9 @@ TEST_F(RtpSenderTest, BitrateCallbacks) {
// Send a few frames.
for (uint32_t i = 0; i < kNumPackets; ++i) {
ASSERT_EQ(0, rtp_sender_->SendOutgoingData(
kVideoFrameKey, payload_type, 1234, 4321, payload,
sizeof(payload), nullptr, nullptr));
ASSERT_TRUE(rtp_sender_->SendOutgoingData(
kVideoFrameKey, payload_type, 1234, 4321, payload, sizeof(payload),
nullptr, nullptr, nullptr));
fake_clock_.AdvanceTimeMilliseconds(kPacketInterval);
}
@ -1349,9 +1349,9 @@ TEST_F(RtpSenderTestWithoutPacer, StreamDataCountersCallbacks) {
rtp_sender_->RegisterRtpStatisticsCallback(&callback);
// Send a frame.
ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kVideoFrameKey, payload_type, 1234,
4321, payload, sizeof(payload),
nullptr, nullptr));
ASSERT_TRUE(rtp_sender_->SendOutgoingData(
kVideoFrameKey, payload_type, 1234, 4321, payload,
sizeof(payload), nullptr, nullptr, nullptr));
StreamDataCounters expected;
expected.transmitted.payload_bytes = 6;
expected.transmitted.header_bytes = 12;
@ -1391,9 +1391,9 @@ TEST_F(RtpSenderTestWithoutPacer, StreamDataCountersCallbacks) {
fec_params.fec_rate = 1;
fec_params.max_fec_frames = 1;
rtp_sender_->SetFecParameters(&fec_params, &fec_params);
ASSERT_EQ(0, rtp_sender_->SendOutgoingData(
kVideoFrameDelta, payload_type, 1234, 4321, payload,
sizeof(payload), nullptr, nullptr));
ASSERT_TRUE(rtp_sender_->SendOutgoingData(
kVideoFrameDelta, payload_type, 1234, 4321, payload,
sizeof(payload), nullptr, nullptr, nullptr));
expected.transmitted.payload_bytes = 40;
expected.transmitted.header_bytes = 60;
expected.transmitted.packets = 5;
@ -1410,9 +1410,9 @@ TEST_F(RtpSenderAudioTest, SendAudio) {
0, 1500));
uint8_t payload[] = {47, 11, 32, 93, 89};
ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kAudioFrameCN, payload_type, 1234,
4321, payload, sizeof(payload),
nullptr, nullptr));
ASSERT_TRUE(rtp_sender_->SendOutgoingData(
kAudioFrameCN, payload_type, 1234, 4321, payload,
sizeof(payload), nullptr, nullptr, nullptr));
RtpUtility::RtpHeaderParser rtp_parser(transport_.last_sent_packet_,
transport_.last_sent_packet_len_);
@ -1439,9 +1439,9 @@ TEST_F(RtpSenderAudioTest, SendAudioWithAudioLevelExtension) {
0, 1500));
uint8_t payload[] = {47, 11, 32, 93, 89};
ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kAudioFrameCN, payload_type, 1234,
4321, payload, sizeof(payload),
nullptr, nullptr));
ASSERT_TRUE(rtp_sender_->SendOutgoingData(
kAudioFrameCN, payload_type, 1234, 4321, payload,
sizeof(payload), nullptr, nullptr, nullptr));
RtpUtility::RtpHeaderParser rtp_parser(transport_.last_sent_packet_,
transport_.last_sent_packet_len_);
@ -1490,15 +1490,15 @@ TEST_F(RtpSenderAudioTest, CheckMarkerBitForTelephoneEvents) {
// During start, it takes the starting timestamp as last sent timestamp.
// The duration is calculated as the difference of current and last sent
// timestamp. So for first call it will skip since the duration is zero.
ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kEmptyFrame, payload_type,
capture_time_ms, 0, nullptr, 0,
nullptr, nullptr));
ASSERT_TRUE(rtp_sender_->SendOutgoingData(kEmptyFrame, payload_type,
capture_time_ms, 0, nullptr, 0,
nullptr, nullptr, nullptr));
// DTMF Sample Length is (Frequency/1000) * Duration.
// So in this case, it is (8000/1000) * 500 = 4000.
// Sending it as two packets.
ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kEmptyFrame, payload_type,
capture_time_ms + 2000, 0, nullptr,
0, nullptr, nullptr));
ASSERT_TRUE(rtp_sender_->SendOutgoingData(
kEmptyFrame, payload_type, capture_time_ms + 2000, 0,
nullptr, 0, nullptr, nullptr, nullptr));
std::unique_ptr<webrtc::RtpHeaderParser> rtp_parser(
webrtc::RtpHeaderParser::Create());
ASSERT_TRUE(rtp_parser.get() != nullptr);
@ -1508,9 +1508,9 @@ TEST_F(RtpSenderAudioTest, CheckMarkerBitForTelephoneEvents) {
// Marker Bit should be set to 1 for first packet.
EXPECT_TRUE(rtp_header.markerBit);
ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kEmptyFrame, payload_type,
capture_time_ms + 4000, 0, nullptr,
0, nullptr, nullptr));
ASSERT_TRUE(rtp_sender_->SendOutgoingData(
kEmptyFrame, payload_type, capture_time_ms + 4000, 0,
nullptr, 0, nullptr, nullptr, nullptr));
ASSERT_TRUE(rtp_parser->Parse(transport_.last_sent_packet_,
transport_.last_sent_packet_len_, &rtp_header));
// Marker Bit should be set to 0 for rest of the packets.
@ -1529,9 +1529,9 @@ TEST_F(RtpSenderTestWithoutPacer, BytesReportedCorrectly) {
0, 1500));
uint8_t payload[] = {47, 11, 32, 93, 89};
ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kVideoFrameKey, kPayloadType, 1234,
4321, payload, sizeof(payload),
nullptr, nullptr));
ASSERT_TRUE(rtp_sender_->SendOutgoingData(
kVideoFrameKey, kPayloadType, 1234, 4321, payload,
sizeof(payload), nullptr, nullptr, nullptr));
// Will send 2 full-size padding packets.
rtp_sender_->TimeToSendPadding(1, PacketInfo::kNotAProbe);

View File

@ -79,18 +79,18 @@ void RTPSenderVideo::SendVideoPacket(uint8_t* data_buffer,
uint32_t capture_timestamp,
int64_t capture_time_ms,
StorageType storage) {
if (rtp_sender_->SendToNetwork(data_buffer, payload_length, rtp_header_length,
capture_time_ms, storage,
RtpPacketSender::kLowPriority) == 0) {
rtc::CritScope cs(&stats_crit_);
video_bitrate_.Update(payload_length + rtp_header_length,
clock_->TimeInMilliseconds());
TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("webrtc_rtp"),
"Video::PacketNormal", "timestamp", capture_timestamp,
"seqnum", seq_num);
} else {
if (!rtp_sender_->SendToNetwork(data_buffer, payload_length,
rtp_header_length, capture_time_ms, storage,
RtpPacketSender::kLowPriority)) {
LOG(LS_WARNING) << "Failed to send video packet " << seq_num;
return;
}
rtc::CritScope cs(&stats_crit_);
video_bitrate_.Update(payload_length + rtp_header_length,
clock_->TimeInMilliseconds());
TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("webrtc_rtp"),
"Video::PacketNormal", "timestamp", capture_timestamp,
"seqnum", seq_num);
}
void RTPSenderVideo::SendVideoPacketAsRed(uint8_t* data_buffer,
@ -206,18 +206,17 @@ void RTPSenderVideo::SetFecParameters(const FecProtectionParams* delta_params,
}
}
int32_t RTPSenderVideo::SendVideo(RtpVideoCodecTypes video_type,
FrameType frame_type,
int8_t payload_type,
uint32_t capture_timestamp,
int64_t capture_time_ms,
const uint8_t* payload_data,
size_t payload_size,
const RTPFragmentationHeader* fragmentation,
const RTPVideoHeader* video_header) {
if (payload_size == 0) {
return -1;
}
bool RTPSenderVideo::SendVideo(RtpVideoCodecTypes video_type,
FrameType frame_type,
int8_t payload_type,
uint32_t capture_timestamp,
int64_t capture_time_ms,
const uint8_t* payload_data,
size_t payload_size,
const RTPFragmentationHeader* fragmentation,
const RTPVideoHeader* video_header) {
if (payload_size == 0)
return false;
std::unique_ptr<RtpPacketizer> packetizer(RtpPacketizer::Create(
video_type, rtp_sender_->MaxDataPayloadLength(),
@ -262,14 +261,14 @@ int32_t RTPSenderVideo::SendVideo(RtpVideoCodecTypes video_type,
if (!packetizer->NextPacket(&dataBuffer[rtp_header_length],
&payload_bytes_in_packet, &last)) {
return -1;
return false;
}
// Write RTP header.
int32_t header_length = rtp_sender_->BuildRtpHeader(
dataBuffer, payload_type, last, capture_timestamp, capture_time_ms);
if (header_length <= 0)
return -1;
return false;
// According to
// http://www.etsi.org/deliver/etsi_ts/126100_126199/126114/12.07.00_60/
@ -324,7 +323,7 @@ int32_t RTPSenderVideo::SendVideo(RtpVideoCodecTypes video_type,
TRACE_EVENT_ASYNC_END1("webrtc", "Video", capture_time_ms, "timestamp",
rtp_sender_->Timestamp());
return 0;
return true;
}
uint32_t RTPSenderVideo::VideoBitrateSent() const {

View File

@ -42,15 +42,15 @@ class RTPSenderVideo {
const char payload_name[RTP_PAYLOAD_NAME_SIZE],
int8_t payload_type);
int32_t SendVideo(RtpVideoCodecTypes video_type,
FrameType frame_type,
int8_t payload_type,
uint32_t capture_timestamp,
int64_t capture_time_ms,
const uint8_t* payload_data,
size_t payload_size,
const RTPFragmentationHeader* fragmentation,
const RTPVideoHeader* video_header);
bool SendVideo(RtpVideoCodecTypes video_type,
FrameType frame_type,
int8_t payload_type,
uint32_t capture_timestamp,
int64_t capture_time_ms,
const uint8_t* payload_data,
size_t payload_size,
const RTPFragmentationHeader* fragmentation,
const RTPVideoHeader* video_header);
int32_t SendRTPIntraRequest();

View File

@ -171,8 +171,9 @@ TEST_F(RtpRtcpAudioTest, Basic) {
// Send an empty RTP packet.
// Should fail since we have not registered the payload type.
EXPECT_EQ(-1, module1->SendOutgoingData(webrtc::kAudioFrameSpeech,
96, 0, -1, NULL, 0));
EXPECT_FALSE(module1->SendOutgoingData(webrtc::kAudioFrameSpeech, 96, 0, -1,
nullptr, 0, nullptr, nullptr,
nullptr));
CodecInst voice_codec;
memset(&voice_codec, 0, sizeof(voice_codec));
@ -197,8 +198,9 @@ TEST_F(RtpRtcpAudioTest, Basic) {
(voice_codec.rate < 0) ? 0 : voice_codec.rate));
const uint8_t test[5] = "test";
EXPECT_EQ(0, module1->SendOutgoingData(webrtc::kAudioFrameSpeech, 96,
0, -1, test, 4));
EXPECT_EQ(true,
module1->SendOutgoingData(webrtc::kAudioFrameSpeech, 96, 0, -1,
test, 4, nullptr, nullptr, nullptr));
EXPECT_EQ(test_ssrc, rtp_receiver2_->SSRC());
uint32_t timestamp;
@ -271,9 +273,9 @@ TEST_F(RtpRtcpAudioTest, RED) {
const uint8_t test[5] = "test";
// Send a RTP packet.
EXPECT_EQ(0, module1->SendOutgoingData(webrtc::kAudioFrameSpeech,
96, 160, -1, test, 4,
&fragmentation));
EXPECT_TRUE(module1->SendOutgoingData(webrtc::kAudioFrameSpeech, 96, 160, -1,
test, 4, &fragmentation, nullptr,
nullptr));
EXPECT_EQ(0, module1->SetSendREDPayloadType(-1));
EXPECT_EQ(-1, module1->SendREDPayloadType(&red));
@ -333,16 +335,18 @@ TEST_F(RtpRtcpAudioTest, DTMF) {
// Send RTP packets for 16 tones a 160 ms 100ms
// pause between = 2560ms + 1600ms = 4160ms
for (; timeStamp <= 250 * 160; timeStamp += 160) {
EXPECT_EQ(0, module1->SendOutgoingData(webrtc::kAudioFrameSpeech, 96,
timeStamp, -1, test, 4));
EXPECT_TRUE(module1->SendOutgoingData(webrtc::kAudioFrameSpeech, 96,
timeStamp, -1, test, 4, nullptr,
nullptr, nullptr));
fake_clock.AdvanceTimeMilliseconds(20);
module1->Process();
}
EXPECT_EQ(0, module1->SendTelephoneEventOutband(32, 9000, 10));
for (; timeStamp <= 740 * 160; timeStamp += 160) {
EXPECT_EQ(0, module1->SendOutgoingData(webrtc::kAudioFrameSpeech, 96,
timeStamp, -1, test, 4));
EXPECT_TRUE(module1->SendOutgoingData(webrtc::kAudioFrameSpeech, 96,
timeStamp, -1, test, 4, nullptr,
nullptr, nullptr));
fake_clock.AdvanceTimeMilliseconds(20);
module1->Process();
}

View File

@ -165,8 +165,9 @@ class RtpRtcpRtcpTest : public ::testing::Test {
// the receiving module.
// send RTP packet with the data "testtest"
const uint8_t test[9] = "testtest";
EXPECT_EQ(0, module1->SendOutgoingData(webrtc::kAudioFrameSpeech, 96,
0, -1, test, 8));
EXPECT_EQ(true,
module1->SendOutgoingData(webrtc::kAudioFrameSpeech, 96, 0, -1,
test, 8, nullptr, nullptr, nullptr));
}
virtual void TearDown() {

View File

@ -147,11 +147,9 @@ class RtpRtcpVideoTest : public ::testing::Test {
TEST_F(RtpRtcpVideoTest, BasicVideo) {
uint32_t timestamp = 3000;
EXPECT_EQ(0, video_module_->SendOutgoingData(kVideoFrameDelta, 123,
timestamp,
timestamp / 90,
video_frame_,
payload_data_length_));
EXPECT_TRUE(video_module_->SendOutgoingData(
kVideoFrameDelta, 123, timestamp, timestamp / 90, video_frame_,
payload_data_length_, nullptr, nullptr, nullptr));
}
TEST_F(RtpRtcpVideoTest, PaddingOnlyFrames) {

View File

@ -410,7 +410,8 @@ const char* VideoCodecTypeToStr(webrtc::VideoCodecType e) {
}
// Callbacks
int32_t VideoProcessorImpl::VideoProcessorEncodeCompleteCallback::Encoded(
EncodedImageCallback::Result
VideoProcessorImpl::VideoProcessorEncodeCompleteCallback::OnEncodedImage(
const EncodedImage& encoded_image,
const webrtc::CodecSpecificInfo* codec_specific_info,
const webrtc::RTPFragmentationHeader* fragmentation) {
@ -419,7 +420,7 @@ int32_t VideoProcessorImpl::VideoProcessorEncodeCompleteCallback::Encoded(
video_processor_->FrameEncoded(codec_specific_info->codecType,
encoded_image,
fragmentation);
return 0;
return Result(Result::OK, 0);
}
int32_t VideoProcessorImpl::VideoProcessorDecodeCompleteCallback::Decoded(
VideoFrame& image) {

View File

@ -230,7 +230,7 @@ class VideoProcessorImpl : public VideoProcessor {
public:
explicit VideoProcessorEncodeCompleteCallback(VideoProcessorImpl* vp)
: video_processor_(vp) {}
int32_t Encoded(
Result OnEncodedImage(
const webrtc::EncodedImage& encoded_image,
const webrtc::CodecSpecificInfo* codec_specific_info,
const webrtc::RTPFragmentationHeader* fragmentation) override;

View File

@ -120,12 +120,12 @@ class AdapterEncodedImageCallback : public webrtc::EncodedImageCallback {
size_t stream_idx)
: adapter_(adapter), stream_idx_(stream_idx) {}
int32_t Encoded(
const webrtc::EncodedImage& encodedImage,
const webrtc::CodecSpecificInfo* codecSpecificInfo = NULL,
const webrtc::RTPFragmentationHeader* fragmentation = NULL) override {
return adapter_->Encoded(stream_idx_, encodedImage, codecSpecificInfo,
fragmentation);
EncodedImageCallback::Result OnEncodedImage(
const webrtc::EncodedImage& encoded_image,
const webrtc::CodecSpecificInfo* codec_specific_info,
const webrtc::RTPFragmentationHeader* fragmentation) override {
return adapter_->OnEncodedImage(stream_idx_, encoded_image,
codec_specific_info, fragmentation);
}
private:
@ -404,7 +404,7 @@ int SimulcastEncoderAdapter::SetRates(uint32_t new_bitrate_kbit,
return WEBRTC_VIDEO_CODEC_OK;
}
int32_t SimulcastEncoderAdapter::Encoded(
EncodedImageCallback::Result SimulcastEncoderAdapter::OnEncodedImage(
size_t stream_idx,
const EncodedImage& encodedImage,
const CodecSpecificInfo* codecSpecificInfo,
@ -413,7 +413,7 @@ int32_t SimulcastEncoderAdapter::Encoded(
CodecSpecificInfoVP8* vp8Info = &(stream_codec_specific.codecSpecific.VP8);
vp8Info->simulcastIdx = stream_idx;
return encoded_complete_callback_->Encoded(
return encoded_complete_callback_->OnEncodedImage(
encodedImage, &stream_codec_specific, fragmentation);
}

View File

@ -51,10 +51,11 @@ class SimulcastEncoderAdapter : public VP8Encoder {
// Eventual handler for the contained encoders' EncodedImageCallbacks, but
// called from an internal helper that also knows the correct stream
// index.
int32_t Encoded(size_t stream_idx,
const EncodedImage& encodedImage,
const CodecSpecificInfo* codecSpecificInfo = NULL,
const RTPFragmentationHeader* fragmentation = NULL);
EncodedImageCallback::Result OnEncodedImage(
size_t stream_idx,
const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation);
void OnDroppedFrame() override;

View File

@ -242,16 +242,16 @@ class TestSimulcastEncoderAdapterFake : public ::testing::Test,
last_encoded_image_simulcast_index_(-1) {}
virtual ~TestSimulcastEncoderAdapterFake() {}
int32_t Encoded(const EncodedImage& encodedImage,
const CodecSpecificInfo* codecSpecificInfo = NULL,
const RTPFragmentationHeader* fragmentation = NULL) override {
last_encoded_image_width_ = encodedImage._encodedWidth;
last_encoded_image_height_ = encodedImage._encodedHeight;
if (codecSpecificInfo) {
Result OnEncodedImage(const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) override {
last_encoded_image_width_ = encoded_image._encodedWidth;
last_encoded_image_height_ = encoded_image._encodedHeight;
if (codec_specific_info) {
last_encoded_image_simulcast_index_ =
codecSpecificInfo->codecSpecific.VP8.simulcastIdx;
codec_specific_info->codecSpecific.VP8.simulcastIdx;
}
return 0;
return Result(Result::OK, encoded_image._timeStamp);
}
bool GetLastEncodedImageInfo(int* out_width,

View File

@ -61,9 +61,9 @@ class Vp8TestEncodedImageCallback : public EncodedImageCallback {
delete[] encoded_frame_._buffer;
}
virtual int32_t Encoded(const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) {
virtual Result OnEncodedImage(const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) {
// Only store the base layer.
if (codec_specific_info->codecSpecific.VP8.simulcastIdx == 0) {
if (encoded_image._frameType == kVideoFrameKey) {
@ -89,7 +89,7 @@ class Vp8TestEncodedImageCallback : public EncodedImageCallback {
codec_specific_info->codecSpecific.VP8.layerSync;
temporal_layer_[codec_specific_info->codecSpecific.VP8.simulcastIdx] =
codec_specific_info->codecSpecific.VP8.temporalIdx;
return 0;
return Result(Result::OK, encoded_image._timeStamp);
}
void GetLastEncodedFrameInfo(int* picture_id,
int* temporal_layer,
@ -338,34 +338,38 @@ class TestVp8Simulcast : public ::testing::Test {
if (expected_video_streams >= 1) {
EXPECT_CALL(
encoder_callback_,
Encoded(
OnEncodedImage(
AllOf(Field(&EncodedImage::_frameType, frame_type),
Field(&EncodedImage::_encodedWidth, kDefaultWidth / 4),
Field(&EncodedImage::_encodedHeight, kDefaultHeight / 4)),
_, _))
.Times(1)
.WillRepeatedly(Return(0));
.WillRepeatedly(Return(EncodedImageCallback::Result(
EncodedImageCallback::Result::OK, 0)));
}
if (expected_video_streams >= 2) {
EXPECT_CALL(
encoder_callback_,
Encoded(
OnEncodedImage(
AllOf(Field(&EncodedImage::_frameType, frame_type),
Field(&EncodedImage::_encodedWidth, kDefaultWidth / 2),
Field(&EncodedImage::_encodedHeight, kDefaultHeight / 2)),
_, _))
.Times(1)
.WillRepeatedly(Return(0));
.WillRepeatedly(Return(EncodedImageCallback::Result(
EncodedImageCallback::Result::OK, 0)));
}
if (expected_video_streams >= 3) {
EXPECT_CALL(
encoder_callback_,
Encoded(AllOf(Field(&EncodedImage::_frameType, frame_type),
Field(&EncodedImage::_encodedWidth, kDefaultWidth),
Field(&EncodedImage::_encodedHeight, kDefaultHeight)),
_, _))
OnEncodedImage(
AllOf(Field(&EncodedImage::_frameType, frame_type),
Field(&EncodedImage::_encodedWidth, kDefaultWidth),
Field(&EncodedImage::_encodedHeight, kDefaultHeight)),
_, _))
.Times(1)
.WillRepeatedly(Return(0));
.WillRepeatedly(Return(EncodedImageCallback::Result(
EncodedImageCallback::Result::OK, 0)));
}
}
@ -590,13 +594,15 @@ class TestVp8Simulcast : public ::testing::Test {
encoder_->SetRates(kMaxBitrates[0] + kMaxBitrates[1], 30);
std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
kVideoFrameDelta);
EXPECT_CALL(encoder_callback_,
Encoded(AllOf(Field(&EncodedImage::_frameType, kVideoFrameKey),
Field(&EncodedImage::_encodedWidth, width),
Field(&EncodedImage::_encodedHeight, height)),
_, _))
EXPECT_CALL(
encoder_callback_,
OnEncodedImage(AllOf(Field(&EncodedImage::_frameType, kVideoFrameKey),
Field(&EncodedImage::_encodedWidth, width),
Field(&EncodedImage::_encodedHeight, height)),
_, _))
.Times(1)
.WillRepeatedly(Return(0));
.WillRepeatedly(Return(
EncodedImageCallback::Result(EncodedImageCallback::Result::OK, 0)));
EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
// Switch back.

View File

@ -43,9 +43,9 @@ class Vp8UnitTestEncodeCompleteCallback : public webrtc::EncodedImageCallback {
void* decoderSpecificInfo)
: encoded_frame_(frame), encode_complete_(false) {}
virtual int Encoded(const EncodedImage& encoded_frame_,
const CodecSpecificInfo* codecSpecificInfo,
const RTPFragmentationHeader*);
Result OnEncodedImage(const EncodedImage& encoded_frame_,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) override;
bool EncodeComplete();
private:
@ -54,9 +54,10 @@ class Vp8UnitTestEncodeCompleteCallback : public webrtc::EncodedImageCallback {
bool encode_complete_;
};
int Vp8UnitTestEncodeCompleteCallback::Encoded(
webrtc::EncodedImageCallback::Result
Vp8UnitTestEncodeCompleteCallback::OnEncodedImage(
const EncodedImage& encoded_frame,
const CodecSpecificInfo* codecSpecificInfo,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) {
if (encoded_frame_->_size < encoded_frame._length) {
delete[] encoded_frame_->_buffer;
@ -72,7 +73,7 @@ int Vp8UnitTestEncodeCompleteCallback::Encoded(
encoded_frame_->_frameType = encoded_frame._frameType;
encoded_frame_->_completeFrame = encoded_frame._completeFrame;
encode_complete_ = true;
return 0;
return Result(Result::OK, 0);
}
bool Vp8UnitTestEncodeCompleteCallback::EncodeComplete() {

View File

@ -26,9 +26,9 @@ class Vp8SequenceCoderEncodeCallback : public webrtc::EncodedImageCallback {
explicit Vp8SequenceCoderEncodeCallback(FILE* encoded_file)
: encoded_file_(encoded_file), encoded_bytes_(0) {}
~Vp8SequenceCoderEncodeCallback();
int Encoded(const webrtc::EncodedImage& encoded_image,
const webrtc::CodecSpecificInfo* codecSpecificInfo,
const webrtc::RTPFragmentationHeader*);
Result OnEncodedImage(const webrtc::EncodedImage& encoded_image,
const webrtc::CodecSpecificInfo* codec_specific_info,
const webrtc::RTPFragmentationHeader*);
// Returns the encoded image.
webrtc::EncodedImage encoded_image() { return encoded_image_; }
size_t encoded_bytes() { return encoded_bytes_; }
@ -43,7 +43,9 @@ Vp8SequenceCoderEncodeCallback::~Vp8SequenceCoderEncodeCallback() {
delete[] encoded_image_._buffer;
encoded_image_._buffer = NULL;
}
int Vp8SequenceCoderEncodeCallback::Encoded(
webrtc::EncodedImageCallback::Result
Vp8SequenceCoderEncodeCallback::OnEncodedImage(
const webrtc::EncodedImage& encoded_image,
const webrtc::CodecSpecificInfo* codecSpecificInfo,
const webrtc::RTPFragmentationHeader* fragmentation) {
@ -58,11 +60,11 @@ int Vp8SequenceCoderEncodeCallback::Encoded(
if (encoded_file_ != NULL) {
if (fwrite(encoded_image._buffer, 1, encoded_image._length,
encoded_file_) != encoded_image._length) {
return -1;
return Result(Result::ERROR_SEND_FAILED, 0);
}
}
encoded_bytes_ += encoded_image_._length;
return 0;
return Result(Result::OK, 0);
}
// TODO(mikhal): Add support for varying the frame size.

View File

@ -21,6 +21,7 @@
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
namespace webrtc {
VCMGenericEncoder::VCMGenericEncoder(
VideoEncoder* encoder,
VCMEncodedFrameCallback* encoded_frame_callback,
@ -143,23 +144,25 @@ VCMEncodedFrameCallback::VCMEncodedFrameCallback(
VCMEncodedFrameCallback::~VCMEncodedFrameCallback() {}
int32_t VCMEncodedFrameCallback::Encoded(
EncodedImageCallback::Result VCMEncodedFrameCallback::OnEncodedImage(
const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific,
const RTPFragmentationHeader* fragmentation_header) {
TRACE_EVENT_INSTANT1("webrtc", "VCMEncodedFrameCallback::Encoded",
"timestamp", encoded_image._timeStamp);
int ret_val = post_encode_callback_->Encoded(encoded_image, codec_specific,
fragmentation_header);
if (ret_val < 0)
return ret_val;
Result result = post_encode_callback_->OnEncodedImage(
encoded_image, codec_specific, fragmentation_header);
if (result.error != Result::OK)
return result;
if (media_opt_) {
media_opt_->UpdateWithEncodedData(encoded_image);
if (internal_source_)
return media_opt_->DropFrame(); // Signal to encoder to drop next frame.
if (internal_source_) {
// Signal to encoder to drop next frame.
result.drop_next_frame = media_opt_->DropFrame();
}
}
return VCM_OK;
return result;
}
} // namespace webrtc

View File

@ -41,9 +41,10 @@ class VCMEncodedFrameCallback : public EncodedImageCallback {
virtual ~VCMEncodedFrameCallback();
// Implements EncodedImageCallback.
int32_t Encoded(const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific,
const RTPFragmentationHeader* fragmentation_header) override;
EncodedImageCallback::Result OnEncodedImage(
const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) override;
void SetInternalSource(bool internal_source) {
internal_source_ = internal_source;
}

View File

@ -22,10 +22,10 @@ namespace webrtc {
class MockEncodedImageCallback : public EncodedImageCallback {
public:
MOCK_METHOD3(Encoded,
int32_t(const EncodedImage& encodedImage,
const CodecSpecificInfo* codecSpecificInfo,
const RTPFragmentationHeader* fragmentation));
MOCK_METHOD3(OnEncodedImage,
Result(const EncodedImage& encodedImage,
const CodecSpecificInfo* codecSpecificInfo,
const RTPFragmentationHeader* fragmentation));
};
class MockVideoEncoder : public VideoEncoder {

View File

@ -45,7 +45,8 @@ namespace {
class EncodedImageCallbackWrapper : public EncodedImageCallback {
public:
EncodedImageCallbackWrapper()
: cs_(CriticalSectionWrapper::CreateCriticalSection()), callback_(NULL) {}
: cs_(CriticalSectionWrapper::CreateCriticalSection()),
callback_(nullptr) {}
virtual ~EncodedImageCallbackWrapper() {}
@ -54,14 +55,15 @@ class EncodedImageCallbackWrapper : public EncodedImageCallback {
callback_ = callback;
}
virtual int32_t Encoded(const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) {
virtual Result OnEncodedImage(const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) {
CriticalSectionScoped cs(cs_.get());
if (callback_)
return callback_->Encoded(encoded_image, codec_specific_info,
fragmentation);
return 0;
if (callback_) {
return callback_->OnEncodedImage(encoded_image, codec_specific_info,
fragmentation);
}
return Result(Result::ERROR_SEND_FAILED);
}
private:

View File

@ -93,13 +93,13 @@ class EncodedImageCallbackImpl : public EncodedImageCallback {
virtual ~EncodedImageCallbackImpl() {}
int32_t Encoded(const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) override {
Result OnEncodedImage(const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) override {
assert(codec_specific_info);
frame_data_.push_back(
FrameData(encoded_image._length, *codec_specific_info));
return 0;
return Result(Result::OK, encoded_image._timeStamp);
}
void Reset() {

View File

@ -148,9 +148,10 @@ int32_t FakeH264Encoder::RegisterEncodeCompleteCallback(
return 0;
}
int32_t FakeH264Encoder::Encoded(const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragments) {
EncodedImageCallback::Result FakeH264Encoder::OnEncodedImage(
const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragments) {
const size_t kSpsSize = 8;
const size_t kPpsSize = 11;
const int kIdrFrequency = 10;
@ -190,7 +191,7 @@ int32_t FakeH264Encoder::Encoded(const EncodedImage& encoded_image,
++fragment_counter;
}
}
return callback_->Encoded(encoded_image, NULL, &fragmentation);
return callback_->OnEncodedImage(encoded_image, NULL, &fragmentation);
}
DelayedEncoder::DelayedEncoder(Clock* clock, int delay_ms)

View File

@ -61,9 +61,9 @@ class FakeH264Encoder : public FakeEncoder, public EncodedImageCallback {
int32_t RegisterEncodeCompleteCallback(
EncodedImageCallback* callback) override;
int32_t Encoded(const EncodedImage& encodedImage,
const CodecSpecificInfo* codecSpecificInfo,
const RTPFragmentationHeader* fragments) override;
Result OnEncodedImage(const EncodedImage& encodedImage,
const CodecSpecificInfo* codecSpecificInfo,
const RTPFragmentationHeader* fragments) override;
private:
EncodedImageCallback* callback_;

View File

@ -137,15 +137,16 @@ void PayloadRouter::UpdateModuleSendingState() {
}
}
int32_t PayloadRouter::Encoded(const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) {
EncodedImageCallback::Result PayloadRouter::OnEncodedImage(
const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) {
rtc::CritScope lock(&crit_);
RTC_DCHECK(!rtp_modules_.empty());
if (!active_ || num_sending_modules_ == 0)
return -1;
return Result(Result::ERROR_SEND_FAILED);
int stream_idx = 0;
int stream_index = 0;
RTPVideoHeader rtp_video_header;
memset(&rtp_video_header, 0, sizeof(RTPVideoHeader));
@ -158,13 +159,19 @@ int32_t PayloadRouter::Encoded(const EncodedImage& encoded_image,
// The simulcast index might actually be larger than the number of modules
// in case the encoder was processing a frame during a codec reconfig.
if (rtp_video_header.simulcastIdx >= num_sending_modules_)
return -1;
stream_idx = rtp_video_header.simulcastIdx;
return Result(Result::ERROR_SEND_FAILED);
stream_index = rtp_video_header.simulcastIdx;
return rtp_modules_[stream_idx]->SendOutgoingData(
uint32_t frame_id;
int send_result = rtp_modules_[stream_index]->SendOutgoingData(
encoded_image._frameType, payload_type_, encoded_image._timeStamp,
encoded_image.capture_time_ms_, encoded_image._buffer,
encoded_image._length, fragmentation, &rtp_video_header);
encoded_image._length, fragmentation, &rtp_video_header, &frame_id);
if (send_result < 0)
return Result(Result::ERROR_SEND_FAILED);
return Result(Result::OK, frame_id);
}
size_t PayloadRouter::MaxPayloadLength() const {

View File

@ -32,8 +32,8 @@ struct RTPVideoHeader;
class PayloadRouter : public EncodedImageCallback {
public:
// Rtp modules are assumed to be sorted in simulcast index order.
explicit PayloadRouter(const std::vector<RtpRtcp*>& rtp_modules,
int payload_type);
PayloadRouter(const std::vector<RtpRtcp*>& rtp_modules,
int payload_type);
~PayloadRouter();
static size_t DefaultMaxPayloadLength();
@ -46,9 +46,10 @@ class PayloadRouter : public EncodedImageCallback {
// Implements EncodedImageCallback.
// Returns 0 if the packet was routed / sent, -1 otherwise.
int32_t Encoded(const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) override;
EncodedImageCallback::Result OnEncodedImage(
const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) override;
// Returns the maximum allowed data payload length, given the configured MTU
// and RTP headers.

View File

@ -44,7 +44,7 @@ TEST(PayloadRouterTest, SendOnOneModule) {
EXPECT_CALL(rtp, SendOutgoingData(encoded_image._frameType, payload_type,
encoded_image._timeStamp,
encoded_image.capture_time_ms_, &payload,
encoded_image._length, nullptr, _))
encoded_image._length, nullptr, _, _))
.Times(0);
EXPECT_EQ(-1, payload_router.Encoded(encoded_image, nullptr, nullptr));
@ -52,7 +52,7 @@ TEST(PayloadRouterTest, SendOnOneModule) {
EXPECT_CALL(rtp, SendOutgoingData(encoded_image._frameType, payload_type,
encoded_image._timeStamp,
encoded_image.capture_time_ms_, &payload,
encoded_image._length, nullptr, _))
encoded_image._length, nullptr, _, _))
.Times(1);
EXPECT_EQ(0, payload_router.Encoded(encoded_image, nullptr, nullptr));
@ -60,7 +60,7 @@ TEST(PayloadRouterTest, SendOnOneModule) {
EXPECT_CALL(rtp, SendOutgoingData(encoded_image._frameType, payload_type,
encoded_image._timeStamp,
encoded_image.capture_time_ms_, &payload,
encoded_image._length, nullptr, _))
encoded_image._length, nullptr, _, _))
.Times(0);
EXPECT_EQ(-1, payload_router.Encoded(encoded_image, nullptr, nullptr));
@ -68,7 +68,7 @@ TEST(PayloadRouterTest, SendOnOneModule) {
EXPECT_CALL(rtp, SendOutgoingData(encoded_image._frameType, payload_type,
encoded_image._timeStamp,
encoded_image.capture_time_ms_, &payload,
encoded_image._length, nullptr, _))
encoded_image._length, nullptr, _, _))
.Times(1);
EXPECT_EQ(0, payload_router.Encoded(encoded_image, nullptr, nullptr));
@ -77,7 +77,7 @@ TEST(PayloadRouterTest, SendOnOneModule) {
EXPECT_CALL(rtp, SendOutgoingData(encoded_image._frameType, payload_type,
encoded_image._timeStamp,
encoded_image.capture_time_ms_, &payload,
encoded_image._length, nullptr, _))
encoded_image._length, nullptr, _, _))
.Times(0);
EXPECT_EQ(-1, payload_router.Encoded(encoded_image, nullptr, nullptr));
}
@ -111,10 +111,9 @@ TEST(PayloadRouterTest, SendSimulcast) {
EXPECT_CALL(rtp_1, SendOutgoingData(encoded_image._frameType, payload_type,
encoded_image._timeStamp,
encoded_image.capture_time_ms_, &payload,
encoded_image._length, nullptr, _))
encoded_image._length, nullptr, _, _))
.Times(1);
EXPECT_CALL(rtp_2, SendOutgoingData(_, _, _, _, _, _, _, _))
.Times(0);
EXPECT_CALL(rtp_2, SendOutgoingData(_, _, _, _, _, _, _, _, _)).Times(0);
EXPECT_EQ(0, payload_router.Encoded(encoded_image, &codec_info_1, nullptr));
CodecSpecificInfo codec_info_2;
@ -125,17 +124,17 @@ TEST(PayloadRouterTest, SendSimulcast) {
EXPECT_CALL(rtp_2, SendOutgoingData(encoded_image._frameType, payload_type,
encoded_image._timeStamp,
encoded_image.capture_time_ms_, &payload,
encoded_image._length, nullptr, _))
encoded_image._length, nullptr, _, _))
.Times(1);
EXPECT_CALL(rtp_1, SendOutgoingData(_, _, _, _, _, _, _, _))
EXPECT_CALL(rtp_1, SendOutgoingData(_, _, _, _, _, _, _, _, _))
.Times(0);
EXPECT_EQ(0, payload_router.Encoded(encoded_image, &codec_info_2, nullptr));
// Inactive.
payload_router.set_active(false);
EXPECT_CALL(rtp_1, SendOutgoingData(_, _, _, _, _, _, _, _))
EXPECT_CALL(rtp_1, SendOutgoingData(_, _, _, _, _, _, _, _, _))
.Times(0);
EXPECT_CALL(rtp_2, SendOutgoingData(_, _, _, _, _, _, _, _))
EXPECT_CALL(rtp_2, SendOutgoingData(_, _, _, _, _, _, _, _, _))
.Times(0);
EXPECT_EQ(-1, payload_router.Encoded(encoded_image, &codec_info_1, nullptr));
EXPECT_EQ(-1, payload_router.Encoded(encoded_image, &codec_info_2, nullptr));
@ -144,9 +143,9 @@ TEST(PayloadRouterTest, SendSimulcast) {
streams.pop_back(); // Remove a stream.
payload_router.SetSendStreams(streams);
payload_router.set_active(true);
EXPECT_CALL(rtp_1, SendOutgoingData(_, _, _, _, _, _, _, _))
EXPECT_CALL(rtp_1, SendOutgoingData(_, _, _, _, _, _, _, _, _))
.Times(0);
EXPECT_CALL(rtp_2, SendOutgoingData(_, _, _, _, _, _, _, _))
EXPECT_CALL(rtp_2, SendOutgoingData(_, _, _, _, _, _, _, _, _))
.Times(0);
codec_info_2.codecSpecific.VP8.simulcastIdx = 1;
EXPECT_EQ(-1, payload_router.Encoded(encoded_image, &codec_info_2, nullptr));

View File

@ -85,10 +85,12 @@ class VideoEncoderSoftwareFallbackWrapperTest : public ::testing::Test {
class FakeEncodedImageCallback : public EncodedImageCallback {
public:
int32_t Encoded(const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) override {
return ++callback_count_;
Result OnEncodedImage(
const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) override {
++callback_count_;
return Result(Result::OK, callback_count_);
}
int callback_count_ = 0;
};

View File

@ -324,7 +324,7 @@ void VideoReceiveStream::OnFrame(const VideoFrame& video_frame) {
// TODO(asapersson): Consider moving callback from video_encoder.h or
// creating a different callback.
int32_t VideoReceiveStream::Encoded(
EncodedImageCallback::Result VideoReceiveStream::OnEncodedImage(
const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) {
@ -348,7 +348,7 @@ int32_t VideoReceiveStream::Encoded(
}
}
return 0;
return Result(Result::OK, encoded_image._timeStamp);
}
bool VideoReceiveStream::DecodeThreadFunction(void* ptr) {

View File

@ -67,9 +67,10 @@ class VideoReceiveStream : public webrtc::VideoReceiveStream,
void OnFrame(const VideoFrame& video_frame) override;
// Overrides EncodedImageCallback.
int32_t Encoded(const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) override;
EncodedImageCallback::Result OnEncodedImage(
const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) override;
const Config& config() const { return config_; }

View File

@ -720,9 +720,10 @@ void VideoSendStream::NormalUsage() {
config_.overuse_callback->OnLoadUpdate(LoadObserver::kUnderuse);
}
int32_t VideoSendStream::Encoded(const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) {
EncodedImageCallback::Result VideoSendStream::OnEncodedImage(
const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) {
if (config_.post_encode_callback) {
config_.post_encode_callback->EncodedFrameCallback(
EncodedFrame(encoded_image._buffer, encoded_image._length,
@ -730,7 +731,7 @@ int32_t VideoSendStream::Encoded(const EncodedImage& encoded_image,
}
protection_bitrate_calculator_.UpdateWithEncodedData(encoded_image);
int32_t return_value = payload_router_.Encoded(
EncodedImageCallback::Result result = payload_router_.OnEncodedImage(
encoded_image, codec_specific_info, fragmentation);
if (kEnableFrameRecording) {
@ -756,7 +757,7 @@ int32_t VideoSendStream::Encoded(const EncodedImage& encoded_image,
}
}
return return_value;
return result;
}
void VideoSendStream::ConfigureProtection() {

View File

@ -108,9 +108,10 @@ class VideoSendStream : public webrtc::VideoSendStream,
// Implements EncodedImageCallback. The implementation routes encoded frames
// to the |payload_router_| and |config.pre_encode_callback| if set.
// Called on an arbitrary encoder callback thread.
int32_t Encoded(const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) override;
EncodedImageCallback::Result OnEncodedImage(
const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) override;
static bool EncoderThreadFunction(void* obj);
void EncoderProcess();

View File

@ -193,9 +193,10 @@ int64_t ViEEncoder::time_of_last_frame_activity_ms() {
return time_of_last_frame_activity_ms_;
}
int32_t ViEEncoder::Encoded(const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) {
EncodedImageCallback::Result ViEEncoder::OnEncodedImage(
const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) {
{
rtc::CritScope lock(&data_cs_);
time_of_last_frame_activity_ms_ = rtc::TimeMillis();
@ -204,11 +205,11 @@ int32_t ViEEncoder::Encoded(const EncodedImage& encoded_image,
stats_proxy_->OnSendEncodedImage(encoded_image, codec_specific_info);
}
int success =
sink_->Encoded(encoded_image, codec_specific_info, fragmentation);
EncodedImageCallback::Result result =
sink_->OnEncodedImage(encoded_image, codec_specific_info, fragmentation);
overuse_detector_->FrameSent(encoded_image._timeStamp);
return success;
return result;
}
void ViEEncoder::SendStatistics(uint32_t bit_rate,

View File

@ -82,9 +82,10 @@ class ViEEncoder : public EncodedImageCallback,
// Implements EncodedImageCallback.
int32_t Encoded(const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) override;
EncodedImageCallback::Result OnEncodedImage(
const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) override;
// Implements VideoSendStatisticsCallback.
void SendStatistics(uint32_t bit_rate,

View File

@ -30,11 +30,47 @@ class EncodedImageCallback {
public:
virtual ~EncodedImageCallback() {}
struct Result {
enum Error {
OK,
// Failed to send the packet.
ERROR_SEND_FAILED,
};
Result(Error error) : error(error) {}
Result(Error error, uint32_t frame_id) : error(error), frame_id(frame_id) {}
Error error;
// Frame ID assigned to the frame. The frame ID should be the same as the ID
// seen by the receiver for this frame. RTP timestamp of the frame is used
// as frame ID when RTP is used to send video. Must be used only when
// error=OK.
uint32_t frame_id = 0;
// Tells the encoder that the next frame is should be dropped.
bool drop_next_frame = false;
};
// Callback function which is called when an image has been encoded.
// TODO(perkj): Change this to return void.
virtual Result OnEncodedImage(const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) {
return (Encoded(encoded_image, codec_specific_info, fragmentation) == 0)
? Result(Result::OK, 0)
: Result(Result::ERROR_SEND_FAILED);
}
// DEPRECATED.
// TODO(sergeyu): Remove this method.
virtual int32_t Encoded(const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragmentation) = 0;
const RTPFragmentationHeader* fragmentation) {
Result result =
OnEncodedImage(encoded_image, codec_specific_info, fragmentation);
return (result.error != Result::OK) ? -1 : (result.drop_next_frame ? 1 : 0);
}
};
class VideoEncoder {

View File

@ -360,12 +360,12 @@ int32_t Channel::SendData(FrameType frameType,
// Push data from ACM to RTP/RTCP-module to deliver audio frame for
// packetization.
// This call will trigger Transport::SendPacket() from the RTP/RTCP module.
if (_rtpRtcpModule->SendOutgoingData(
if (!_rtpRtcpModule->SendOutgoingData(
(FrameType&)frameType, payloadType, timeStamp,
// Leaving the time when this frame was
// received from the capture device as
// undefined for voice for now.
-1, payloadData, payloadSize, fragmentation) == -1) {
-1, payloadData, payloadSize, fragmentation, nullptr, nullptr)) {
_engineStatisticsPtr->SetLastError(
VE_RTP_RTCP_MODULE_ERROR, kTraceWarning,
"Channel::SendData() failed to send data to RTP/RTCP module");