New jitter buffer experiment.
BUG=webrtc:5514 Review-Url: https://codereview.webrtc.org/2480293002 Cr-Commit-Position: refs/heads/master@{#15077}
This commit is contained in:
@ -256,6 +256,10 @@ int VP9EncoderImpl::InitEncode(const VideoCodec* inst,
|
|||||||
}
|
}
|
||||||
if (encoder_ == NULL) {
|
if (encoder_ == NULL) {
|
||||||
encoder_ = new vpx_codec_ctx_t;
|
encoder_ = new vpx_codec_ctx_t;
|
||||||
|
// Only randomize pid/tl0 the first time the encoder is initialized
|
||||||
|
// in order to not make random jumps mid-stream.
|
||||||
|
picture_id_ = static_cast<uint16_t>(rand()) & 0x7FFF; // NOLINT
|
||||||
|
tl0_pic_idx_ = static_cast<uint8_t>(rand()); // NOLINT
|
||||||
}
|
}
|
||||||
if (config_ == NULL) {
|
if (config_ == NULL) {
|
||||||
config_ = new vpx_codec_enc_cfg_t;
|
config_ = new vpx_codec_enc_cfg_t;
|
||||||
@ -270,8 +274,6 @@ int VP9EncoderImpl::InitEncode(const VideoCodec* inst,
|
|||||||
if (num_temporal_layers_ == 0)
|
if (num_temporal_layers_ == 0)
|
||||||
num_temporal_layers_ = 1;
|
num_temporal_layers_ = 1;
|
||||||
|
|
||||||
// Random start 16 bits is enough.
|
|
||||||
picture_id_ = static_cast<uint16_t>(rand()) & 0x7FFF; // NOLINT
|
|
||||||
// Allocate memory for encoded image
|
// Allocate memory for encoded image
|
||||||
if (encoded_image_._buffer != NULL) {
|
if (encoded_image_._buffer != NULL) {
|
||||||
delete[] encoded_image_._buffer;
|
delete[] encoded_image_._buffer;
|
||||||
@ -366,8 +368,6 @@ int VP9EncoderImpl::InitEncode(const VideoCodec* inst,
|
|||||||
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
|
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
|
||||||
}
|
}
|
||||||
|
|
||||||
tl0_pic_idx_ = static_cast<uint8_t>(rand()); // NOLINT
|
|
||||||
|
|
||||||
return InitAndSetControlSettings(inst);
|
return InitAndSetControlSettings(inst);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -28,7 +28,7 @@ namespace {
|
|||||||
constexpr int kMaxFramesBuffered = 600;
|
constexpr int kMaxFramesBuffered = 600;
|
||||||
|
|
||||||
// Max number of decoded frame info that will be saved.
|
// Max number of decoded frame info that will be saved.
|
||||||
constexpr int kMaxFramesHistory = 20;
|
constexpr int kMaxFramesHistory = 50;
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
FrameBuffer::FrameBuffer(Clock* clock,
|
FrameBuffer::FrameBuffer(Clock* clock,
|
||||||
@ -114,7 +114,7 @@ FrameBuffer::ReturnReason FrameBuffer::NextFrame(
|
|||||||
if (next_frame_it != frames_.end()) {
|
if (next_frame_it != frames_.end()) {
|
||||||
std::unique_ptr<FrameObject> frame = std::move(next_frame_it->second.frame);
|
std::unique_ptr<FrameObject> frame = std::move(next_frame_it->second.frame);
|
||||||
int64_t received_time = frame->ReceivedTime();
|
int64_t received_time = frame->ReceivedTime();
|
||||||
uint32_t timestamp = frame->Timestamp();
|
uint32_t timestamp = frame->timestamp;
|
||||||
|
|
||||||
int64_t frame_delay;
|
int64_t frame_delay;
|
||||||
if (inter_frame_delay_.CalculateDelay(timestamp, &frame_delay,
|
if (inter_frame_delay_.CalculateDelay(timestamp, &frame_delay,
|
||||||
|
@ -299,7 +299,7 @@ TEST_F(TestFrameBuffer2, DropTemporalLayerSlowDecoder) {
|
|||||||
uint32_t ts = Rand();
|
uint32_t ts = Rand();
|
||||||
|
|
||||||
InsertFrame(pid, 0, ts, false);
|
InsertFrame(pid, 0, ts, false);
|
||||||
InsertFrame(pid + 1, 0, ts + kFps20, false);
|
InsertFrame(pid + 1, 0, ts + kFps20, false, pid);
|
||||||
for (int i = 2; i < 10; i += 2) {
|
for (int i = 2; i < 10; i += 2) {
|
||||||
uint32_t ts_tl0 = ts + i / 2 * kFps10;
|
uint32_t ts_tl0 = ts + i / 2 * kFps10;
|
||||||
InsertFrame(pid + i, 0, ts_tl0, false, pid + i - 2);
|
InsertFrame(pid + i, 0, ts_tl0, false, pid + i - 2);
|
||||||
|
@ -57,6 +57,8 @@ PacketBuffer::~PacketBuffer() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool PacketBuffer::InsertPacket(const VCMPacket& packet) {
|
bool PacketBuffer::InsertPacket(const VCMPacket& packet) {
|
||||||
|
std::vector<std::unique_ptr<RtpFrameObject>> found_frames;
|
||||||
|
{
|
||||||
rtc::CritScope lock(&crit_);
|
rtc::CritScope lock(&crit_);
|
||||||
uint16_t seq_num = packet.seqNum;
|
uint16_t seq_num = packet.seqNum;
|
||||||
size_t index = seq_num % size_;
|
size_t index = seq_num % size_;
|
||||||
@ -100,7 +102,12 @@ bool PacketBuffer::InsertPacket(const VCMPacket& packet) {
|
|||||||
sequence_buffer_[index].used = true;
|
sequence_buffer_[index].used = true;
|
||||||
data_buffer_[index] = packet;
|
data_buffer_[index] = packet;
|
||||||
|
|
||||||
FindFrames(seq_num);
|
found_frames = FindFrames(seq_num);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (std::unique_ptr<RtpFrameObject>& frame : found_frames)
|
||||||
|
received_frame_callback_->OnReceivedFrame(std::move(frame));
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -187,7 +194,9 @@ bool PacketBuffer::PotentialNewFrame(uint16_t seq_num) const {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void PacketBuffer::FindFrames(uint16_t seq_num) {
|
std::vector<std::unique_ptr<RtpFrameObject>> PacketBuffer::FindFrames(
|
||||||
|
uint16_t seq_num) {
|
||||||
|
std::vector<std::unique_ptr<RtpFrameObject>> found_frames;
|
||||||
while (PotentialNewFrame(seq_num)) {
|
while (PotentialNewFrame(seq_num)) {
|
||||||
size_t index = seq_num % size_;
|
size_t index = seq_num % size_;
|
||||||
sequence_buffer_[index].continuous = true;
|
sequence_buffer_[index].continuous = true;
|
||||||
@ -204,8 +213,8 @@ void PacketBuffer::FindFrames(uint16_t seq_num) {
|
|||||||
int start_index = index;
|
int start_index = index;
|
||||||
while (true) {
|
while (true) {
|
||||||
frame_size += data_buffer_[start_index].sizeBytes;
|
frame_size += data_buffer_[start_index].sizeBytes;
|
||||||
max_nack_count = std::max(
|
max_nack_count =
|
||||||
max_nack_count, data_buffer_[start_index].timesNacked);
|
std::max(max_nack_count, data_buffer_[start_index].timesNacked);
|
||||||
sequence_buffer_[start_index].frame_created = true;
|
sequence_buffer_[start_index].frame_created = true;
|
||||||
|
|
||||||
if (sequence_buffer_[start_index].frame_begin)
|
if (sequence_buffer_[start_index].frame_begin)
|
||||||
@ -215,15 +224,13 @@ void PacketBuffer::FindFrames(uint16_t seq_num) {
|
|||||||
start_seq_num--;
|
start_seq_num--;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::unique_ptr<RtpFrameObject> frame(
|
found_frames.emplace_back(
|
||||||
new RtpFrameObject(this, start_seq_num, seq_num, frame_size,
|
new RtpFrameObject(this, start_seq_num, seq_num, frame_size,
|
||||||
max_nack_count, clock_->TimeInMilliseconds()));
|
max_nack_count, clock_->TimeInMilliseconds()));
|
||||||
|
|
||||||
received_frame_callback_->OnReceivedFrame(std::move(frame));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
++seq_num;
|
++seq_num;
|
||||||
}
|
}
|
||||||
|
return found_frames;
|
||||||
}
|
}
|
||||||
|
|
||||||
void PacketBuffer::ReturnFrame(RtpFrameObject* frame) {
|
void PacketBuffer::ReturnFrame(RtpFrameObject* frame) {
|
||||||
@ -267,7 +274,6 @@ bool PacketBuffer::GetBitstream(const RtpFrameObject& frame,
|
|||||||
}
|
}
|
||||||
|
|
||||||
VCMPacket* PacketBuffer::GetPacket(uint16_t seq_num) {
|
VCMPacket* PacketBuffer::GetPacket(uint16_t seq_num) {
|
||||||
rtc::CritScope lock(&crit_);
|
|
||||||
size_t index = seq_num % size_;
|
size_t index = seq_num % size_;
|
||||||
if (!sequence_buffer_[index].used ||
|
if (!sequence_buffer_[index].used ||
|
||||||
seq_num != sequence_buffer_[index].seq_num) {
|
seq_num != sequence_buffer_[index].seq_num) {
|
||||||
|
@ -99,8 +99,9 @@ class PacketBuffer {
|
|||||||
EXCLUSIVE_LOCKS_REQUIRED(crit_);
|
EXCLUSIVE_LOCKS_REQUIRED(crit_);
|
||||||
|
|
||||||
// Test if all packets of a frame has arrived, and if so, creates a frame.
|
// Test if all packets of a frame has arrived, and if so, creates a frame.
|
||||||
// May create multiple frames per invocation.
|
// Returns a vector of received frames.
|
||||||
void FindFrames(uint16_t seq_num) EXCLUSIVE_LOCKS_REQUIRED(crit_);
|
std::vector<std::unique_ptr<RtpFrameObject>> FindFrames(uint16_t seq_num)
|
||||||
|
EXCLUSIVE_LOCKS_REQUIRED(crit_);
|
||||||
|
|
||||||
// Copy the bitstream for |frame| to |destination|.
|
// Copy the bitstream for |frame| to |destination|.
|
||||||
// Virtual for testing.
|
// Virtual for testing.
|
||||||
@ -108,7 +109,8 @@ class PacketBuffer {
|
|||||||
|
|
||||||
// Get the packet with sequence number |seq_num|.
|
// Get the packet with sequence number |seq_num|.
|
||||||
// Virtual for testing.
|
// Virtual for testing.
|
||||||
virtual VCMPacket* GetPacket(uint16_t seq_num);
|
virtual VCMPacket* GetPacket(uint16_t seq_num)
|
||||||
|
EXCLUSIVE_LOCKS_REQUIRED(crit_);
|
||||||
|
|
||||||
// Mark all slots used by |frame| as not used.
|
// Mark all slots used by |frame| as not used.
|
||||||
// Virtual for testing.
|
// Virtual for testing.
|
||||||
|
@ -371,6 +371,11 @@ void RtpFrameReferenceFinder::ManageFrameVp9(
|
|||||||
|
|
||||||
const RTPVideoHeaderVP9& codec_header = rtp_codec_header->VP9;
|
const RTPVideoHeaderVP9& codec_header = rtp_codec_header->VP9;
|
||||||
|
|
||||||
|
bool old_frame = Vp9PidTl0Fix(*frame, &rtp_codec_header->VP9.picture_id,
|
||||||
|
&rtp_codec_header->VP9.tl0_pic_idx);
|
||||||
|
if (old_frame)
|
||||||
|
return;
|
||||||
|
|
||||||
if (codec_header.picture_id == kNoPictureId ||
|
if (codec_header.picture_id == kNoPictureId ||
|
||||||
codec_header.temporal_idx == kNoTemporalIdx) {
|
codec_header.temporal_idx == kNoTemporalIdx) {
|
||||||
ManageFrameGeneric(std::move(frame), codec_header.picture_id);
|
ManageFrameGeneric(std::move(frame), codec_header.picture_id);
|
||||||
@ -585,5 +590,145 @@ uint16_t RtpFrameReferenceFinder::UnwrapPictureId(uint16_t picture_id) {
|
|||||||
return last_unwrap_;
|
return last_unwrap_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool RtpFrameReferenceFinder::Vp9PidTl0Fix(const RtpFrameObject& frame,
|
||||||
|
int16_t* picture_id,
|
||||||
|
int16_t* tl0_pic_idx) {
|
||||||
|
const int kTl0PicIdLength = 256;
|
||||||
|
const uint8_t kMaxPidDiff = 128;
|
||||||
|
|
||||||
|
// We are currently receiving VP9 without PID, nothing to fix.
|
||||||
|
if (*picture_id == kNoPictureId)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
// If |vp9_fix_jump_timestamp_| != -1 then a jump has occurred recently.
|
||||||
|
if (vp9_fix_jump_timestamp_ != -1) {
|
||||||
|
// If this frame has a timestamp older than |vp9_fix_jump_timestamp_| then
|
||||||
|
// this frame is old (more previous than the frame where we detected the
|
||||||
|
// jump) and should be dropped.
|
||||||
|
if (AheadOf<uint32_t>(vp9_fix_jump_timestamp_, frame.timestamp))
|
||||||
|
return true;
|
||||||
|
|
||||||
|
// After 60 seconds, reset |vp9_fix_jump_timestamp_| in order to not
|
||||||
|
// discard old frames when the timestamp wraps.
|
||||||
|
int diff_ms =
|
||||||
|
ForwardDiff<uint32_t>(vp9_fix_jump_timestamp_, frame.timestamp) / 90;
|
||||||
|
if (diff_ms > 60 * 1000)
|
||||||
|
vp9_fix_jump_timestamp_ = -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update |vp9_fix_last_timestamp_| with the most recent timestamp.
|
||||||
|
if (vp9_fix_last_timestamp_ == -1)
|
||||||
|
vp9_fix_last_timestamp_ = frame.timestamp;
|
||||||
|
if (AheadOf<uint32_t>(frame.timestamp, vp9_fix_last_timestamp_))
|
||||||
|
vp9_fix_last_timestamp_ = frame.timestamp;
|
||||||
|
|
||||||
|
uint16_t fixed_pid = Add<kPicIdLength>(*picture_id, vp9_fix_pid_offset_);
|
||||||
|
if (vp9_fix_last_picture_id_ == -1)
|
||||||
|
vp9_fix_last_picture_id_ = *picture_id;
|
||||||
|
|
||||||
|
int16_t fixed_tl0 = kNoTl0PicIdx;
|
||||||
|
if (*tl0_pic_idx != kNoTl0PicIdx) {
|
||||||
|
fixed_tl0 = Add<kTl0PicIdLength>(*tl0_pic_idx, vp9_fix_tl0_pic_idx_offset_);
|
||||||
|
// Update |vp9_fix_last_tl0_pic_idx_| with the most recent tl0 pic index.
|
||||||
|
if (vp9_fix_last_tl0_pic_idx_ == -1)
|
||||||
|
vp9_fix_last_tl0_pic_idx_ = *tl0_pic_idx;
|
||||||
|
if (AheadOf<uint8_t>(fixed_tl0, vp9_fix_last_tl0_pic_idx_))
|
||||||
|
vp9_fix_last_tl0_pic_idx_ = fixed_tl0;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool has_jumped = DetectVp9PicIdJump(fixed_pid, fixed_tl0, frame.timestamp);
|
||||||
|
if (!has_jumped)
|
||||||
|
has_jumped = DetectVp9Tl0PicIdxJump(fixed_tl0, frame.timestamp);
|
||||||
|
|
||||||
|
if (has_jumped) {
|
||||||
|
// First we calculate the offset to get to the previous picture id, and then
|
||||||
|
// we add kMaxPid to avoid accidently referencing any previous
|
||||||
|
// frames that was inserted into the FrameBuffer.
|
||||||
|
vp9_fix_pid_offset_ = ForwardDiff<uint16_t, kPicIdLength>(
|
||||||
|
*picture_id, vp9_fix_last_picture_id_);
|
||||||
|
vp9_fix_pid_offset_ += kMaxPidDiff;
|
||||||
|
|
||||||
|
fixed_pid = Add<kPicIdLength>(*picture_id, vp9_fix_pid_offset_);
|
||||||
|
vp9_fix_last_picture_id_ = fixed_pid;
|
||||||
|
vp9_fix_jump_timestamp_ = frame.timestamp;
|
||||||
|
gof_info_.clear();
|
||||||
|
|
||||||
|
vp9_fix_tl0_pic_idx_offset_ =
|
||||||
|
ForwardDiff<uint8_t>(*tl0_pic_idx, vp9_fix_last_tl0_pic_idx_);
|
||||||
|
vp9_fix_tl0_pic_idx_offset_ += kMaxGofSaved;
|
||||||
|
fixed_tl0 = Add<kTl0PicIdLength>(*tl0_pic_idx, vp9_fix_tl0_pic_idx_offset_);
|
||||||
|
vp9_fix_last_tl0_pic_idx_ = fixed_tl0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update |vp9_fix_last_picture_id_| with the most recent picture id.
|
||||||
|
if (AheadOf<uint16_t, kPicIdLength>(fixed_pid, vp9_fix_last_picture_id_))
|
||||||
|
vp9_fix_last_picture_id_ = fixed_pid;
|
||||||
|
|
||||||
|
*picture_id = fixed_pid;
|
||||||
|
*tl0_pic_idx = fixed_tl0;
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool RtpFrameReferenceFinder::DetectVp9PicIdJump(int fixed_pid,
|
||||||
|
int fixed_tl0,
|
||||||
|
uint32_t timestamp) const {
|
||||||
|
// Test if there has been a jump backwards in the picture id.
|
||||||
|
if (AheadOrAt<uint32_t>(timestamp, vp9_fix_last_timestamp_) &&
|
||||||
|
AheadOf<uint16_t, kPicIdLength>(vp9_fix_last_picture_id_, fixed_pid)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test if we have jumped forward too much. The reason we have to do this
|
||||||
|
// is because the FrameBuffer holds history of old frames and inserting
|
||||||
|
// frames with a much advanced picture id can result in the frame buffer
|
||||||
|
// holding more than half of the interval of picture ids.
|
||||||
|
if (AheadOrAt<uint32_t>(timestamp, vp9_fix_last_timestamp_) &&
|
||||||
|
ForwardDiff<uint16_t, kPicIdLength>(vp9_fix_last_picture_id_, fixed_pid) >
|
||||||
|
128) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Special case where the picture id jump forward but not by much and the
|
||||||
|
// tl0 jumps to the id of an already saved gof for that id. In order to
|
||||||
|
// detect this we check if the picture id span over the length of the GOF.
|
||||||
|
if (fixed_tl0 != kNoTl0PicIdx) {
|
||||||
|
auto info_it = gof_info_.find(fixed_tl0);
|
||||||
|
if (info_it != gof_info_.end()) {
|
||||||
|
int last_pid_gof_idx_0 =
|
||||||
|
Subtract<kPicIdLength>(info_it->second.last_picture_id,
|
||||||
|
info_it->second.last_picture_id %
|
||||||
|
info_it->second.gof->num_frames_in_gof);
|
||||||
|
int pif_gof_end = Add<kPicIdLength>(
|
||||||
|
last_pid_gof_idx_0, info_it->second.gof->num_frames_in_gof);
|
||||||
|
if (AheadOf<uint16_t, kPicIdLength>(fixed_pid, pif_gof_end))
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool RtpFrameReferenceFinder::DetectVp9Tl0PicIdxJump(int fixed_tl0,
|
||||||
|
uint32_t timestamp) const {
|
||||||
|
if (fixed_tl0 != kNoTl0PicIdx) {
|
||||||
|
// Test if there has been a jump backwards in tl0 pic index.
|
||||||
|
if (AheadOrAt<uint32_t>(timestamp, vp9_fix_last_timestamp_) &&
|
||||||
|
AheadOf<uint8_t>(vp9_fix_last_tl0_pic_idx_, fixed_tl0)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test if there has been a jump forward. If the jump forward results
|
||||||
|
// in the tl0 pic index for this frame to be considered smaller than the
|
||||||
|
// smallest item in |gof_info_| then we have jumped forward far enough to
|
||||||
|
// wrap.
|
||||||
|
if (!gof_info_.empty() &&
|
||||||
|
AheadOf<uint8_t>(gof_info_.begin()->first, fixed_tl0)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace video_coding
|
} // namespace video_coding
|
||||||
} // namespace webrtc
|
} // namespace webrtc
|
||||||
|
@ -57,12 +57,12 @@ class RtpFrameReferenceFinder {
|
|||||||
void ClearTo(uint16_t seq_num);
|
void ClearTo(uint16_t seq_num);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
static const uint16_t kPicIdLength = 1 << 7;
|
static const uint16_t kPicIdLength = 1 << 15;
|
||||||
static const uint8_t kMaxTemporalLayers = 5;
|
static const uint8_t kMaxTemporalLayers = 5;
|
||||||
static const int kMaxLayerInfo = 10;
|
static const int kMaxLayerInfo = 50;
|
||||||
static const int kMaxStashedFrames = 10;
|
static const int kMaxStashedFrames = 50;
|
||||||
static const int kMaxNotYetReceivedFrames = 20;
|
static const int kMaxNotYetReceivedFrames = 100;
|
||||||
static const int kMaxGofSaved = 15;
|
static const int kMaxGofSaved = 50;
|
||||||
static const int kMaxPaddingAge = 100;
|
static const int kMaxPaddingAge = 100;
|
||||||
|
|
||||||
|
|
||||||
@ -129,6 +129,24 @@ class RtpFrameReferenceFinder {
|
|||||||
// All picture ids are unwrapped to 16 bits.
|
// All picture ids are unwrapped to 16 bits.
|
||||||
uint16_t UnwrapPictureId(uint16_t picture_id) EXCLUSIVE_LOCKS_REQUIRED(crit_);
|
uint16_t UnwrapPictureId(uint16_t picture_id) EXCLUSIVE_LOCKS_REQUIRED(crit_);
|
||||||
|
|
||||||
|
// Returns true if the frame is old and should be dropped.
|
||||||
|
// TODO(philipel): Remove when VP9 PID/TL0 does not jump mid-stream (should be
|
||||||
|
// around M59).
|
||||||
|
bool Vp9PidTl0Fix(const RtpFrameObject& frame,
|
||||||
|
int16_t* picture_id,
|
||||||
|
int16_t* tl0_pic_idx) EXCLUSIVE_LOCKS_REQUIRED(crit_);
|
||||||
|
|
||||||
|
// TODO(philipel): Remove when VP9 PID/TL0 does not jump mid-stream (should be
|
||||||
|
// around M59).
|
||||||
|
bool DetectVp9PicIdJump(int fixed_pid,
|
||||||
|
int fixed_tl0,
|
||||||
|
uint32_t timestamp) const
|
||||||
|
EXCLUSIVE_LOCKS_REQUIRED(crit_);
|
||||||
|
|
||||||
|
// TODO(philipel): Remove when VP9 PID/TL0 does not jump mid-stream (should be
|
||||||
|
// around M59).
|
||||||
|
bool DetectVp9Tl0PicIdxJump(int fixed_tl0, uint32_t timestamp) const
|
||||||
|
EXCLUSIVE_LOCKS_REQUIRED(crit_);
|
||||||
|
|
||||||
// For every group of pictures, hold two sequence numbers. The first being
|
// For every group of pictures, hold two sequence numbers. The first being
|
||||||
// the sequence number of the last packet of the last completed frame, and
|
// the sequence number of the last packet of the last completed frame, and
|
||||||
@ -196,6 +214,15 @@ class RtpFrameReferenceFinder {
|
|||||||
int cleared_to_seq_num_ GUARDED_BY(crit_);
|
int cleared_to_seq_num_ GUARDED_BY(crit_);
|
||||||
|
|
||||||
OnCompleteFrameCallback* frame_callback_;
|
OnCompleteFrameCallback* frame_callback_;
|
||||||
|
|
||||||
|
// Vp9PidFix variables
|
||||||
|
// TODO(philipel): Remove when VP9 PID does not jump mid-stream.
|
||||||
|
int vp9_fix_last_timestamp_ = -1;
|
||||||
|
int vp9_fix_jump_timestamp_ = -1;
|
||||||
|
int vp9_fix_last_picture_id_ = -1;
|
||||||
|
int vp9_fix_pid_offset_ = 0;
|
||||||
|
int vp9_fix_last_tl0_pic_idx_ = -1;
|
||||||
|
int vp9_fix_tl0_pic_idx_offset_ = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace video_coding
|
} // namespace video_coding
|
||||||
|
@ -122,6 +122,7 @@ class TestRtpFrameReferenceFinder : public ::testing::Test,
|
|||||||
bool up_switch = false,
|
bool up_switch = false,
|
||||||
GofInfoVP9* ss = nullptr) {
|
GofInfoVP9* ss = nullptr) {
|
||||||
VCMPacket packet;
|
VCMPacket packet;
|
||||||
|
packet.timestamp = pid;
|
||||||
packet.codec = kVideoCodecVP9;
|
packet.codec = kVideoCodecVP9;
|
||||||
packet.seqNum = seq_num_start;
|
packet.seqNum = seq_num_start;
|
||||||
packet.frameType = keyframe ? kVideoFrameKey : kVideoFrameDelta;
|
packet.frameType = keyframe ? kVideoFrameKey : kVideoFrameDelta;
|
||||||
@ -152,6 +153,7 @@ class TestRtpFrameReferenceFinder : public ::testing::Test,
|
|||||||
bool inter = false,
|
bool inter = false,
|
||||||
std::vector<uint8_t> refs = std::vector<uint8_t>()) {
|
std::vector<uint8_t> refs = std::vector<uint8_t>()) {
|
||||||
VCMPacket packet;
|
VCMPacket packet;
|
||||||
|
packet.timestamp = pid;
|
||||||
packet.codec = kVideoCodecVP9;
|
packet.codec = kVideoCodecVP9;
|
||||||
packet.seqNum = seq_num_start;
|
packet.seqNum = seq_num_start;
|
||||||
packet.frameType = keyframe ? kVideoFrameKey : kVideoFrameDelta;
|
packet.frameType = keyframe ? kVideoFrameKey : kVideoFrameDelta;
|
||||||
@ -1221,5 +1223,235 @@ TEST_F(TestRtpFrameReferenceFinder, Vp9FlexibleModeTwoSpatialLayersReordered) {
|
|||||||
CheckReferencesVp9(pid + 8, 1, pid + 7);
|
CheckReferencesVp9(pid + 8, 1, pid + 7);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO(philipel): Remove when VP9 PID/TL0 does not jump mid-stream (should be
|
||||||
|
// around M59).
|
||||||
|
TEST_F(TestRtpFrameReferenceFinder, Vp9PidFix_PidJumpsBackwardThenForward) {
|
||||||
|
GofInfoVP9 ss;
|
||||||
|
ss.SetGofInfoVP9(kTemporalStructureMode1);
|
||||||
|
|
||||||
|
VCMPacket packet;
|
||||||
|
packet.timestamp = 0;
|
||||||
|
packet.codec = kVideoCodecVP9;
|
||||||
|
packet.frameType = kVideoFrameKey;
|
||||||
|
packet.video_header.codecHeader.VP9.flexible_mode = false;
|
||||||
|
packet.video_header.codecHeader.VP9.picture_id = 1;
|
||||||
|
packet.video_header.codecHeader.VP9.temporal_idx = 0;
|
||||||
|
packet.video_header.codecHeader.VP9.spatial_idx = 0;
|
||||||
|
packet.video_header.codecHeader.VP9.tl0_pic_idx = 0;
|
||||||
|
packet.video_header.codecHeader.VP9.temporal_up_switch = true;
|
||||||
|
packet.video_header.codecHeader.VP9.ss_data_available = true;
|
||||||
|
packet.video_header.codecHeader.VP9.gof = ss;
|
||||||
|
|
||||||
|
{
|
||||||
|
ref_packet_buffer_->InsertPacket(packet);
|
||||||
|
std::unique_ptr<RtpFrameObject> frame(
|
||||||
|
new RtpFrameObject(ref_packet_buffer_, 0, 0, 0, 0, 0));
|
||||||
|
reference_finder_->ManageFrame(std::move(frame));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Timestamp goes forward but pid goes backwards.
|
||||||
|
packet.timestamp = 1;
|
||||||
|
packet.video_header.codecHeader.VP9.picture_id = 0;
|
||||||
|
|
||||||
|
{
|
||||||
|
ref_packet_buffer_->InsertPacket(packet);
|
||||||
|
std::unique_ptr<RtpFrameObject> frame(
|
||||||
|
new RtpFrameObject(ref_packet_buffer_, 0, 0, 0, 0, 0));
|
||||||
|
reference_finder_->ManageFrame(std::move(frame));
|
||||||
|
}
|
||||||
|
|
||||||
|
packet.timestamp = 2;
|
||||||
|
packet.video_header.codecHeader.VP9.picture_id = 5000;
|
||||||
|
|
||||||
|
{
|
||||||
|
ref_packet_buffer_->InsertPacket(packet);
|
||||||
|
std::unique_ptr<RtpFrameObject> frame(
|
||||||
|
new RtpFrameObject(ref_packet_buffer_, 0, 0, 0, 0, 0));
|
||||||
|
reference_finder_->ManageFrame(std::move(frame));
|
||||||
|
}
|
||||||
|
|
||||||
|
ASSERT_EQ(3UL, frames_from_callback_.size());
|
||||||
|
CheckReferencesVp9(1, 0);
|
||||||
|
CheckReferencesVp9(129, 0);
|
||||||
|
CheckReferencesVp9(257, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(philipel): Remove when VP9 PID/TL0 does not jump mid-stream (should be
|
||||||
|
// around M59).
|
||||||
|
TEST_F(TestRtpFrameReferenceFinder, Vp9PidFix_Tl0JumpsBackwardThenForward) {
|
||||||
|
GofInfoVP9 ss;
|
||||||
|
ss.SetGofInfoVP9(kTemporalStructureMode1);
|
||||||
|
|
||||||
|
VCMPacket packet;
|
||||||
|
packet.timestamp = 0;
|
||||||
|
packet.codec = kVideoCodecVP9;
|
||||||
|
packet.frameType = kVideoFrameKey;
|
||||||
|
packet.video_header.codecHeader.VP9.flexible_mode = false;
|
||||||
|
packet.video_header.codecHeader.VP9.picture_id = 0;
|
||||||
|
packet.video_header.codecHeader.VP9.temporal_idx = 0;
|
||||||
|
packet.video_header.codecHeader.VP9.spatial_idx = 0;
|
||||||
|
packet.video_header.codecHeader.VP9.tl0_pic_idx = 1;
|
||||||
|
packet.video_header.codecHeader.VP9.temporal_up_switch = true;
|
||||||
|
packet.video_header.codecHeader.VP9.ss_data_available = true;
|
||||||
|
packet.video_header.codecHeader.VP9.gof = ss;
|
||||||
|
{
|
||||||
|
ref_packet_buffer_->InsertPacket(packet);
|
||||||
|
std::unique_ptr<RtpFrameObject> frame(
|
||||||
|
new RtpFrameObject(ref_packet_buffer_, 0, 0, 0, 0, 0));
|
||||||
|
reference_finder_->ManageFrame(std::move(frame));
|
||||||
|
}
|
||||||
|
|
||||||
|
packet.timestamp = 1;
|
||||||
|
packet.video_header.codecHeader.VP9.picture_id = 1;
|
||||||
|
packet.video_header.codecHeader.VP9.tl0_pic_idx = 0;
|
||||||
|
{
|
||||||
|
ref_packet_buffer_->InsertPacket(packet);
|
||||||
|
std::unique_ptr<RtpFrameObject> frame(
|
||||||
|
new RtpFrameObject(ref_packet_buffer_, 0, 0, 0, 0, 0));
|
||||||
|
reference_finder_->ManageFrame(std::move(frame));
|
||||||
|
}
|
||||||
|
|
||||||
|
packet.timestamp = 2;
|
||||||
|
packet.frameType = kVideoFrameDelta;
|
||||||
|
packet.video_header.codecHeader.VP9.picture_id = 2;
|
||||||
|
packet.video_header.codecHeader.VP9.tl0_pic_idx = 2;
|
||||||
|
{
|
||||||
|
ref_packet_buffer_->InsertPacket(packet);
|
||||||
|
std::unique_ptr<RtpFrameObject> frame(
|
||||||
|
new RtpFrameObject(ref_packet_buffer_, 0, 0, 0, 0, 0));
|
||||||
|
reference_finder_->ManageFrame(std::move(frame));
|
||||||
|
}
|
||||||
|
|
||||||
|
packet.timestamp = 3;
|
||||||
|
packet.frameType = kVideoFrameKey;
|
||||||
|
packet.video_header.codecHeader.VP9.ss_data_available = true;
|
||||||
|
packet.video_header.codecHeader.VP9.picture_id = 3;
|
||||||
|
packet.video_header.codecHeader.VP9.tl0_pic_idx = 129;
|
||||||
|
{
|
||||||
|
ref_packet_buffer_->InsertPacket(packet);
|
||||||
|
std::unique_ptr<RtpFrameObject> frame(
|
||||||
|
new RtpFrameObject(ref_packet_buffer_, 0, 0, 0, 0, 0));
|
||||||
|
reference_finder_->ManageFrame(std::move(frame));
|
||||||
|
}
|
||||||
|
|
||||||
|
ASSERT_EQ(4UL, frames_from_callback_.size());
|
||||||
|
CheckReferencesVp9(0, 0);
|
||||||
|
CheckReferencesVp9(128, 0);
|
||||||
|
CheckReferencesVp9(129, 0, 128);
|
||||||
|
CheckReferencesVp9(257, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(philipel): Remove when VP9 PID/TL0 does not jump mid-stream (should be
|
||||||
|
// around M59).
|
||||||
|
TEST_F(TestRtpFrameReferenceFinder, Vp9PidFix_PidSmallJumpForward) {
|
||||||
|
GofInfoVP9 ss;
|
||||||
|
ss.SetGofInfoVP9(kTemporalStructureMode1);
|
||||||
|
|
||||||
|
VCMPacket packet;
|
||||||
|
packet.timestamp = 0;
|
||||||
|
packet.codec = kVideoCodecVP9;
|
||||||
|
packet.frameType = kVideoFrameKey;
|
||||||
|
packet.video_header.codecHeader.VP9.flexible_mode = false;
|
||||||
|
packet.video_header.codecHeader.VP9.picture_id = 1;
|
||||||
|
packet.video_header.codecHeader.VP9.temporal_idx = 0;
|
||||||
|
packet.video_header.codecHeader.VP9.spatial_idx = 0;
|
||||||
|
packet.video_header.codecHeader.VP9.tl0_pic_idx = 1;
|
||||||
|
packet.video_header.codecHeader.VP9.temporal_up_switch = true;
|
||||||
|
packet.video_header.codecHeader.VP9.ss_data_available = true;
|
||||||
|
packet.video_header.codecHeader.VP9.gof = ss;
|
||||||
|
{
|
||||||
|
ref_packet_buffer_->InsertPacket(packet);
|
||||||
|
std::unique_ptr<RtpFrameObject> frame(
|
||||||
|
new RtpFrameObject(ref_packet_buffer_, 0, 0, 0, 0, 0));
|
||||||
|
reference_finder_->ManageFrame(std::move(frame));
|
||||||
|
}
|
||||||
|
|
||||||
|
packet.timestamp = 1;
|
||||||
|
packet.video_header.codecHeader.VP9.picture_id = 2;
|
||||||
|
packet.video_header.codecHeader.VP9.tl0_pic_idx = 2;
|
||||||
|
{
|
||||||
|
ref_packet_buffer_->InsertPacket(packet);
|
||||||
|
std::unique_ptr<RtpFrameObject> frame(
|
||||||
|
new RtpFrameObject(ref_packet_buffer_, 0, 0, 0, 0, 0));
|
||||||
|
reference_finder_->ManageFrame(std::move(frame));
|
||||||
|
}
|
||||||
|
|
||||||
|
packet.timestamp = 2;
|
||||||
|
packet.video_header.codecHeader.VP9.picture_id = 3;
|
||||||
|
packet.video_header.codecHeader.VP9.tl0_pic_idx = 2;
|
||||||
|
{
|
||||||
|
ref_packet_buffer_->InsertPacket(packet);
|
||||||
|
std::unique_ptr<RtpFrameObject> frame(
|
||||||
|
new RtpFrameObject(ref_packet_buffer_, 0, 0, 0, 0, 0));
|
||||||
|
reference_finder_->ManageFrame(std::move(frame));
|
||||||
|
}
|
||||||
|
|
||||||
|
packet.timestamp = 2;
|
||||||
|
packet.video_header.codecHeader.VP9.picture_id = 4;
|
||||||
|
packet.video_header.codecHeader.VP9.tl0_pic_idx = 1;
|
||||||
|
{
|
||||||
|
ref_packet_buffer_->InsertPacket(packet);
|
||||||
|
std::unique_ptr<RtpFrameObject> frame(
|
||||||
|
new RtpFrameObject(ref_packet_buffer_, 0, 0, 0, 0, 0));
|
||||||
|
reference_finder_->ManageFrame(std::move(frame));
|
||||||
|
}
|
||||||
|
|
||||||
|
ASSERT_EQ(4UL, frames_from_callback_.size());
|
||||||
|
CheckReferencesVp9(1, 0);
|
||||||
|
CheckReferencesVp9(2, 0);
|
||||||
|
CheckReferencesVp9(3, 0);
|
||||||
|
CheckReferencesVp9(131, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(philipel): Remove when VP9 PID/TL0 does not jump mid-stream (should be
|
||||||
|
// around M59).
|
||||||
|
TEST_F(TestRtpFrameReferenceFinder, Vp9PidFix_DropOldFrame) {
|
||||||
|
GofInfoVP9 ss;
|
||||||
|
ss.SetGofInfoVP9(kTemporalStructureMode1);
|
||||||
|
|
||||||
|
VCMPacket packet;
|
||||||
|
packet.timestamp = 0;
|
||||||
|
packet.codec = kVideoCodecVP9;
|
||||||
|
packet.frameType = kVideoFrameKey;
|
||||||
|
packet.video_header.codecHeader.VP9.flexible_mode = false;
|
||||||
|
packet.video_header.codecHeader.VP9.picture_id = 1;
|
||||||
|
packet.video_header.codecHeader.VP9.temporal_idx = 0;
|
||||||
|
packet.video_header.codecHeader.VP9.spatial_idx = 0;
|
||||||
|
packet.video_header.codecHeader.VP9.tl0_pic_idx = 1;
|
||||||
|
packet.video_header.codecHeader.VP9.temporal_up_switch = true;
|
||||||
|
packet.video_header.codecHeader.VP9.ss_data_available = true;
|
||||||
|
packet.video_header.codecHeader.VP9.gof = ss;
|
||||||
|
{
|
||||||
|
ref_packet_buffer_->InsertPacket(packet);
|
||||||
|
std::unique_ptr<RtpFrameObject> frame(
|
||||||
|
new RtpFrameObject(ref_packet_buffer_, 0, 0, 0, 0, 0));
|
||||||
|
reference_finder_->ManageFrame(std::move(frame));
|
||||||
|
}
|
||||||
|
|
||||||
|
packet.timestamp = 1;
|
||||||
|
packet.video_header.codecHeader.VP9.picture_id = 0;
|
||||||
|
packet.video_header.codecHeader.VP9.tl0_pic_idx = 2;
|
||||||
|
{
|
||||||
|
ref_packet_buffer_->InsertPacket(packet);
|
||||||
|
std::unique_ptr<RtpFrameObject> frame(
|
||||||
|
new RtpFrameObject(ref_packet_buffer_, 0, 0, 0, 0, 0));
|
||||||
|
reference_finder_->ManageFrame(std::move(frame));
|
||||||
|
}
|
||||||
|
|
||||||
|
packet.timestamp = 0;
|
||||||
|
packet.video_header.codecHeader.VP9.picture_id = 3;
|
||||||
|
packet.video_header.codecHeader.VP9.tl0_pic_idx = 2;
|
||||||
|
{
|
||||||
|
ref_packet_buffer_->InsertPacket(packet);
|
||||||
|
std::unique_ptr<RtpFrameObject> frame(
|
||||||
|
new RtpFrameObject(ref_packet_buffer_, 0, 0, 0, 0, 0));
|
||||||
|
reference_finder_->ManageFrame(std::move(frame));
|
||||||
|
}
|
||||||
|
|
||||||
|
ASSERT_EQ(2UL, frames_from_callback_.size());
|
||||||
|
CheckReferencesVp9(1, 0);
|
||||||
|
CheckReferencesVp9(129, 0);
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace video_coding
|
} // namespace video_coding
|
||||||
} // namespace webrtc
|
} // namespace webrtc
|
||||||
|
@ -154,6 +154,7 @@ void VCMTiming::UpdateCurrentDelay(uint32_t frame_timestamp) {
|
|||||||
max_change_ms = kDelayMaxChangeMsPerS *
|
max_change_ms = kDelayMaxChangeMsPerS *
|
||||||
(frame_timestamp - prev_frame_timestamp_) / 90000;
|
(frame_timestamp - prev_frame_timestamp_) / 90000;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (max_change_ms <= 0) {
|
if (max_change_ms <= 0) {
|
||||||
// Any changes less than 1 ms are truncated and
|
// Any changes less than 1 ms are truncated and
|
||||||
// will be postponed. Negative change will be due
|
// will be postponed. Negative change will be due
|
||||||
|
@ -143,6 +143,8 @@ class VideoReceiver : public Module {
|
|||||||
|
|
||||||
int32_t Decode(uint16_t maxWaitTimeMs);
|
int32_t Decode(uint16_t maxWaitTimeMs);
|
||||||
|
|
||||||
|
int32_t Decode(const webrtc::VCMEncodedFrame* frame);
|
||||||
|
|
||||||
int32_t ReceiveCodec(VideoCodec* currentReceiveCodec) const;
|
int32_t ReceiveCodec(VideoCodec* currentReceiveCodec) const;
|
||||||
VideoCodecType ReceiveCodec() const;
|
VideoCodecType ReceiveCodec() const;
|
||||||
|
|
||||||
|
@ -290,6 +290,14 @@ int32_t VideoReceiver::Decode(uint16_t maxWaitTimeMs) {
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Used for the WebRTC-NewVideoJitterBuffer experiment.
|
||||||
|
// TODO(philipel): Clean up among the Decode functions as we replace
|
||||||
|
// VCMEncodedFrame with FrameObject.
|
||||||
|
int32_t VideoReceiver::Decode(const webrtc::VCMEncodedFrame* frame) {
|
||||||
|
rtc::CritScope lock(&receive_crit_);
|
||||||
|
return Decode(*frame);
|
||||||
|
}
|
||||||
|
|
||||||
int32_t VideoReceiver::RequestSliceLossIndication(
|
int32_t VideoReceiver::RequestSliceLossIndication(
|
||||||
const uint64_t pictureID) const {
|
const uint64_t pictureID) const {
|
||||||
TRACE_EVENT1("webrtc", "RequestSLI", "picture_id", pictureID);
|
TRACE_EVENT1("webrtc", "RequestSLI", "picture_id", pictureID);
|
||||||
|
@ -11,6 +11,7 @@
|
|||||||
#include "webrtc/video/rtp_stream_receiver.h"
|
#include "webrtc/video/rtp_stream_receiver.h"
|
||||||
|
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
#include <utility>
|
||||||
|
|
||||||
#include "webrtc/base/checks.h"
|
#include "webrtc/base/checks.h"
|
||||||
#include "webrtc/base/logging.h"
|
#include "webrtc/base/logging.h"
|
||||||
@ -24,7 +25,11 @@
|
|||||||
#include "webrtc/modules/rtp_rtcp/include/rtp_receiver.h"
|
#include "webrtc/modules/rtp_rtcp/include/rtp_receiver.h"
|
||||||
#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp.h"
|
#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp.h"
|
||||||
#include "webrtc/modules/rtp_rtcp/include/ulpfec_receiver.h"
|
#include "webrtc/modules/rtp_rtcp/include/ulpfec_receiver.h"
|
||||||
|
#include "webrtc/modules/video_coding/frame_object.h"
|
||||||
|
#include "webrtc/modules/video_coding/h264_sps_pps_tracker.h"
|
||||||
|
#include "webrtc/modules/video_coding/packet_buffer.h"
|
||||||
#include "webrtc/modules/video_coding/video_coding_impl.h"
|
#include "webrtc/modules/video_coding/video_coding_impl.h"
|
||||||
|
#include "webrtc/system_wrappers/include/field_trial.h"
|
||||||
#include "webrtc/system_wrappers/include/metrics.h"
|
#include "webrtc/system_wrappers/include/metrics.h"
|
||||||
#include "webrtc/system_wrappers/include/timestamp_extrapolator.h"
|
#include "webrtc/system_wrappers/include/timestamp_extrapolator.h"
|
||||||
#include "webrtc/system_wrappers/include/trace.h"
|
#include "webrtc/system_wrappers/include/trace.h"
|
||||||
@ -33,6 +38,11 @@
|
|||||||
|
|
||||||
namespace webrtc {
|
namespace webrtc {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
constexpr int kPacketBufferStartSize = 32;
|
||||||
|
constexpr int kPacketBufferMaxSixe = 2048;
|
||||||
|
}
|
||||||
|
|
||||||
std::unique_ptr<RtpRtcp> CreateRtpRtcpModule(
|
std::unique_ptr<RtpRtcp> CreateRtpRtcpModule(
|
||||||
ReceiveStatistics* receive_statistics,
|
ReceiveStatistics* receive_statistics,
|
||||||
Transport* outgoing_transport,
|
Transport* outgoing_transport,
|
||||||
@ -83,7 +93,11 @@ RtpStreamReceiver::RtpStreamReceiver(
|
|||||||
const VideoReceiveStream::Config* config,
|
const VideoReceiveStream::Config* config,
|
||||||
ReceiveStatisticsProxy* receive_stats_proxy,
|
ReceiveStatisticsProxy* receive_stats_proxy,
|
||||||
ProcessThread* process_thread,
|
ProcessThread* process_thread,
|
||||||
RateLimiter* retransmission_rate_limiter)
|
RateLimiter* retransmission_rate_limiter,
|
||||||
|
NackSender* nack_sender,
|
||||||
|
KeyFrameRequestSender* keyframe_request_sender,
|
||||||
|
video_coding::OnCompleteFrameCallback* complete_frame_callback,
|
||||||
|
VCMTiming* timing)
|
||||||
: clock_(Clock::GetRealTimeClock()),
|
: clock_(Clock::GetRealTimeClock()),
|
||||||
config_(*config),
|
config_(*config),
|
||||||
video_receiver_(video_receiver),
|
video_receiver_(video_receiver),
|
||||||
@ -110,7 +124,10 @@ RtpStreamReceiver::RtpStreamReceiver(
|
|||||||
remote_bitrate_estimator_,
|
remote_bitrate_estimator_,
|
||||||
paced_sender,
|
paced_sender,
|
||||||
packet_router,
|
packet_router,
|
||||||
retransmission_rate_limiter)) {
|
retransmission_rate_limiter)),
|
||||||
|
complete_frame_callback_(complete_frame_callback),
|
||||||
|
keyframe_request_sender_(keyframe_request_sender),
|
||||||
|
timing_(timing) {
|
||||||
packet_router_->AddRtpModule(rtp_rtcp_.get());
|
packet_router_->AddRtpModule(rtp_rtcp_.get());
|
||||||
rtp_receive_statistics_->RegisterRtpStatisticsCallback(receive_stats_proxy);
|
rtp_receive_statistics_->RegisterRtpStatisticsCallback(receive_stats_proxy);
|
||||||
rtp_receive_statistics_->RegisterRtcpStatisticsCallback(receive_stats_proxy);
|
rtp_receive_statistics_->RegisterRtcpStatisticsCallback(receive_stats_proxy);
|
||||||
@ -180,11 +197,27 @@ RtpStreamReceiver::RtpStreamReceiver(
|
|||||||
rtp_rtcp_->RegisterRtcpStatisticsCallback(receive_stats_proxy);
|
rtp_rtcp_->RegisterRtcpStatisticsCallback(receive_stats_proxy);
|
||||||
|
|
||||||
process_thread_->RegisterModule(rtp_rtcp_.get());
|
process_thread_->RegisterModule(rtp_rtcp_.get());
|
||||||
|
|
||||||
|
jitter_buffer_experiment_ =
|
||||||
|
field_trial::FindFullName("WebRTC-NewVideoJitterBuffer") == "Enabled";
|
||||||
|
|
||||||
|
if (jitter_buffer_experiment_) {
|
||||||
|
nack_module_.reset(
|
||||||
|
new NackModule(clock_, nack_sender, keyframe_request_sender));
|
||||||
|
process_thread_->RegisterModule(nack_module_.get());
|
||||||
|
|
||||||
|
packet_buffer_ = video_coding::PacketBuffer::Create(
|
||||||
|
clock_, kPacketBufferStartSize, kPacketBufferMaxSixe, this);
|
||||||
|
reference_finder_.reset(new video_coding::RtpFrameReferenceFinder(this));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
RtpStreamReceiver::~RtpStreamReceiver() {
|
RtpStreamReceiver::~RtpStreamReceiver() {
|
||||||
process_thread_->DeRegisterModule(rtp_rtcp_.get());
|
process_thread_->DeRegisterModule(rtp_rtcp_.get());
|
||||||
|
|
||||||
|
if (jitter_buffer_experiment_)
|
||||||
|
process_thread_->DeRegisterModule(nack_module_.get());
|
||||||
|
|
||||||
packet_router_->RemoveRtpModule(rtp_rtcp_.get());
|
packet_router_->RemoveRtpModule(rtp_rtcp_.get());
|
||||||
rtp_rtcp_->SetREMBStatus(false);
|
rtp_rtcp_->SetREMBStatus(false);
|
||||||
remb_->RemoveReceiveChannel(rtp_rtcp_.get());
|
remb_->RemoveReceiveChannel(rtp_rtcp_.get());
|
||||||
@ -224,11 +257,35 @@ int32_t RtpStreamReceiver::OnReceivedPayloadData(
|
|||||||
WebRtcRTPHeader rtp_header_with_ntp = *rtp_header;
|
WebRtcRTPHeader rtp_header_with_ntp = *rtp_header;
|
||||||
rtp_header_with_ntp.ntp_time_ms =
|
rtp_header_with_ntp.ntp_time_ms =
|
||||||
ntp_estimator_.Estimate(rtp_header->header.timestamp);
|
ntp_estimator_.Estimate(rtp_header->header.timestamp);
|
||||||
|
if (jitter_buffer_experiment_) {
|
||||||
|
VCMPacket packet(payload_data, payload_size, rtp_header_with_ntp);
|
||||||
|
timing_->IncomingTimestamp(packet.timestamp, clock_->TimeInMilliseconds());
|
||||||
|
packet.timesNacked = nack_module_->OnReceivedPacket(packet);
|
||||||
|
|
||||||
|
if (packet.codec == kVideoCodecH264) {
|
||||||
|
switch (tracker_.CopyAndFixBitstream(&packet)) {
|
||||||
|
case video_coding::H264SpsPpsTracker::kRequestKeyframe:
|
||||||
|
keyframe_request_sender_->RequestKeyFrame();
|
||||||
|
FALLTHROUGH();
|
||||||
|
case video_coding::H264SpsPpsTracker::kDrop:
|
||||||
|
return 0;
|
||||||
|
case video_coding::H264SpsPpsTracker::kInsert:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
uint8_t* data = new uint8_t[packet.sizeBytes];
|
||||||
|
memcpy(data, packet.dataPtr, packet.sizeBytes);
|
||||||
|
packet.dataPtr = data;
|
||||||
|
}
|
||||||
|
|
||||||
|
packet_buffer_->InsertPacket(packet);
|
||||||
|
} else {
|
||||||
if (video_receiver_->IncomingPacket(payload_data, payload_size,
|
if (video_receiver_->IncomingPacket(payload_data, payload_size,
|
||||||
rtp_header_with_ntp) != 0) {
|
rtp_header_with_ntp) != 0) {
|
||||||
// Check this...
|
// Check this...
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -348,6 +405,27 @@ int32_t RtpStreamReceiver::ResendPackets(const uint16_t* sequence_numbers,
|
|||||||
return rtp_rtcp_->SendNACK(sequence_numbers, length);
|
return rtp_rtcp_->SendNACK(sequence_numbers, length);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void RtpStreamReceiver::OnReceivedFrame(
|
||||||
|
std::unique_ptr<video_coding::RtpFrameObject> frame) {
|
||||||
|
reference_finder_->ManageFrame(std::move(frame));
|
||||||
|
}
|
||||||
|
|
||||||
|
void RtpStreamReceiver::OnCompleteFrame(
|
||||||
|
std::unique_ptr<video_coding::FrameObject> frame) {
|
||||||
|
{
|
||||||
|
rtc::CritScope lock(&last_seq_num_cs_);
|
||||||
|
video_coding::RtpFrameObject* rtp_frame =
|
||||||
|
static_cast<video_coding::RtpFrameObject*>(frame.get());
|
||||||
|
last_seq_num_for_pic_id_[rtp_frame->picture_id] = rtp_frame->last_seq_num();
|
||||||
|
}
|
||||||
|
complete_frame_callback_->OnCompleteFrame(std::move(frame));
|
||||||
|
}
|
||||||
|
|
||||||
|
void RtpStreamReceiver::OnRttUpdate(int64_t avg_rtt_ms, int64_t max_rtt_ms) {
|
||||||
|
if (jitter_buffer_experiment_)
|
||||||
|
nack_module_->UpdateRtt(max_rtt_ms);
|
||||||
|
}
|
||||||
|
|
||||||
bool RtpStreamReceiver::ReceivePacket(const uint8_t* packet,
|
bool RtpStreamReceiver::ReceivePacket(const uint8_t* packet,
|
||||||
size_t packet_length,
|
size_t packet_length,
|
||||||
const RTPHeader& header,
|
const RTPHeader& header,
|
||||||
@ -472,6 +550,39 @@ bool RtpStreamReceiver::DeliverRtcp(const uint8_t* rtcp_packet,
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void RtpStreamReceiver::FrameContinuous(uint16_t picture_id) {
|
||||||
|
if (jitter_buffer_experiment_) {
|
||||||
|
int seq_num = -1;
|
||||||
|
{
|
||||||
|
rtc::CritScope lock(&last_seq_num_cs_);
|
||||||
|
auto seq_num_it = last_seq_num_for_pic_id_.find(picture_id);
|
||||||
|
if (seq_num_it != last_seq_num_for_pic_id_.end())
|
||||||
|
seq_num = seq_num_it->second;
|
||||||
|
}
|
||||||
|
if (seq_num != -1)
|
||||||
|
nack_module_->ClearUpTo(seq_num);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void RtpStreamReceiver::FrameDecoded(uint16_t picture_id) {
|
||||||
|
if (jitter_buffer_experiment_) {
|
||||||
|
int seq_num = -1;
|
||||||
|
{
|
||||||
|
rtc::CritScope lock(&last_seq_num_cs_);
|
||||||
|
auto seq_num_it = last_seq_num_for_pic_id_.find(picture_id);
|
||||||
|
if (seq_num_it != last_seq_num_for_pic_id_.end()) {
|
||||||
|
seq_num = seq_num_it->second;
|
||||||
|
last_seq_num_for_pic_id_.erase(last_seq_num_for_pic_id_.begin(),
|
||||||
|
++seq_num_it);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (seq_num != -1) {
|
||||||
|
packet_buffer_->ClearTo(seq_num);
|
||||||
|
reference_finder_->ClearTo(seq_num);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void RtpStreamReceiver::SignalNetworkState(NetworkState state) {
|
void RtpStreamReceiver::SignalNetworkState(NetworkState state) {
|
||||||
rtp_rtcp_->SetRTCPStatus(state == kNetworkUp ? config_.rtp.rtcp_mode
|
rtp_rtcp_->SetRTCPStatus(state == kNetworkUp ? config_.rtp.rtcp_mode
|
||||||
: RtcpMode::kOff);
|
: RtcpMode::kOff);
|
||||||
|
@ -12,47 +12,60 @@
|
|||||||
#define WEBRTC_VIDEO_RTP_STREAM_RECEIVER_H_
|
#define WEBRTC_VIDEO_RTP_STREAM_RECEIVER_H_
|
||||||
|
|
||||||
#include <list>
|
#include <list>
|
||||||
|
#include <map>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
#include "webrtc/base/constructormagic.h"
|
#include "webrtc/base/constructormagic.h"
|
||||||
#include "webrtc/base/criticalsection.h"
|
#include "webrtc/base/criticalsection.h"
|
||||||
|
#include "webrtc/modules/include/module_common_types.h"
|
||||||
#include "webrtc/modules/rtp_rtcp/include/receive_statistics.h"
|
#include "webrtc/modules/rtp_rtcp/include/receive_statistics.h"
|
||||||
#include "webrtc/modules/rtp_rtcp/include/remote_ntp_time_estimator.h"
|
#include "webrtc/modules/rtp_rtcp/include/remote_ntp_time_estimator.h"
|
||||||
#include "webrtc/modules/rtp_rtcp/include/rtp_payload_registry.h"
|
#include "webrtc/modules/rtp_rtcp/include/rtp_payload_registry.h"
|
||||||
#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp.h"
|
#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp.h"
|
||||||
#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
|
#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
|
||||||
|
#include "webrtc/modules/video_coding/h264_sps_pps_tracker.h"
|
||||||
#include "webrtc/modules/video_coding/include/video_coding_defines.h"
|
#include "webrtc/modules/video_coding/include/video_coding_defines.h"
|
||||||
|
#include "webrtc/modules/video_coding/packet_buffer.h"
|
||||||
|
#include "webrtc/modules/video_coding/rtp_frame_reference_finder.h"
|
||||||
|
#include "webrtc/modules/video_coding/sequence_number_util.h"
|
||||||
#include "webrtc/typedefs.h"
|
#include "webrtc/typedefs.h"
|
||||||
#include "webrtc/video_receive_stream.h"
|
#include "webrtc/video_receive_stream.h"
|
||||||
|
|
||||||
namespace webrtc {
|
namespace webrtc {
|
||||||
|
|
||||||
|
class NackModule;
|
||||||
class PacedSender;
|
class PacedSender;
|
||||||
class PacketRouter;
|
class PacketRouter;
|
||||||
class ProcessThread;
|
class ProcessThread;
|
||||||
class RemoteNtpTimeEstimator;
|
|
||||||
class ReceiveStatistics;
|
class ReceiveStatistics;
|
||||||
class ReceiveStatisticsProxy;
|
class ReceiveStatisticsProxy;
|
||||||
class RemoteBitrateEstimator;
|
class RemoteBitrateEstimator;
|
||||||
|
class RemoteNtpTimeEstimator;
|
||||||
class RtcpRttStats;
|
class RtcpRttStats;
|
||||||
class RtpHeaderParser;
|
class RtpHeaderParser;
|
||||||
class RTPPayloadRegistry;
|
class RTPPayloadRegistry;
|
||||||
class RtpReceiver;
|
class RtpReceiver;
|
||||||
class Transport;
|
class Transport;
|
||||||
class UlpfecReceiver;
|
class UlpfecReceiver;
|
||||||
|
class VCMTiming;
|
||||||
class VieRemb;
|
class VieRemb;
|
||||||
|
|
||||||
namespace vcm {
|
namespace vcm {
|
||||||
class VideoReceiver;
|
class VideoReceiver;
|
||||||
} // namespace vcm
|
} // namespace vcm
|
||||||
|
|
||||||
class RtpStreamReceiver : public RtpData, public RtpFeedback,
|
class RtpStreamReceiver : public RtpData,
|
||||||
|
public RtpFeedback,
|
||||||
public VCMFrameTypeCallback,
|
public VCMFrameTypeCallback,
|
||||||
public VCMPacketRequestCallback {
|
public VCMPacketRequestCallback,
|
||||||
|
public video_coding::OnReceivedFrameCallback,
|
||||||
|
public video_coding::OnCompleteFrameCallback,
|
||||||
|
public CallStatsObserver {
|
||||||
public:
|
public:
|
||||||
RtpStreamReceiver(vcm::VideoReceiver* video_receiver,
|
RtpStreamReceiver(
|
||||||
|
vcm::VideoReceiver* video_receiver,
|
||||||
RemoteBitrateEstimator* remote_bitrate_estimator,
|
RemoteBitrateEstimator* remote_bitrate_estimator,
|
||||||
Transport* transport,
|
Transport* transport,
|
||||||
RtcpRttStats* rtt_stats,
|
RtcpRttStats* rtt_stats,
|
||||||
@ -62,7 +75,11 @@ class RtpStreamReceiver : public RtpData, public RtpFeedback,
|
|||||||
const VideoReceiveStream::Config* config,
|
const VideoReceiveStream::Config* config,
|
||||||
ReceiveStatisticsProxy* receive_stats_proxy,
|
ReceiveStatisticsProxy* receive_stats_proxy,
|
||||||
ProcessThread* process_thread,
|
ProcessThread* process_thread,
|
||||||
RateLimiter* retransmission_rate_limiter);
|
RateLimiter* retransmission_rate_limiter,
|
||||||
|
NackSender* nack_sender,
|
||||||
|
KeyFrameRequestSender* keyframe_request_sender,
|
||||||
|
video_coding::OnCompleteFrameCallback* complete_frame_callback,
|
||||||
|
VCMTiming* timing);
|
||||||
~RtpStreamReceiver();
|
~RtpStreamReceiver();
|
||||||
|
|
||||||
bool SetReceiveCodec(const VideoCodec& video_codec);
|
bool SetReceiveCodec(const VideoCodec& video_codec);
|
||||||
@ -81,6 +98,10 @@ class RtpStreamReceiver : public RtpData, public RtpFeedback,
|
|||||||
const PacketTime& packet_time);
|
const PacketTime& packet_time);
|
||||||
bool DeliverRtcp(const uint8_t* rtcp_packet, size_t rtcp_packet_length);
|
bool DeliverRtcp(const uint8_t* rtcp_packet, size_t rtcp_packet_length);
|
||||||
|
|
||||||
|
void FrameContinuous(uint16_t seq_num);
|
||||||
|
|
||||||
|
void FrameDecoded(uint16_t seq_num);
|
||||||
|
|
||||||
void SignalNetworkState(NetworkState state);
|
void SignalNetworkState(NetworkState state);
|
||||||
|
|
||||||
// Implements RtpData.
|
// Implements RtpData.
|
||||||
@ -111,6 +132,16 @@ class RtpStreamReceiver : public RtpData, public RtpFeedback,
|
|||||||
int32_t ResendPackets(const uint16_t* sequenceNumbers,
|
int32_t ResendPackets(const uint16_t* sequenceNumbers,
|
||||||
uint16_t length) override;
|
uint16_t length) override;
|
||||||
|
|
||||||
|
// Implements OnReceivedFrameCallback.
|
||||||
|
void OnReceivedFrame(
|
||||||
|
std::unique_ptr<video_coding::RtpFrameObject> frame) override;
|
||||||
|
|
||||||
|
// Implements OnCompleteFrameCallback.
|
||||||
|
void OnCompleteFrame(
|
||||||
|
std::unique_ptr<video_coding::FrameObject> frame) override;
|
||||||
|
|
||||||
|
void OnRttUpdate(int64_t avg_rtt_ms, int64_t max_rtt_ms) override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
bool ReceivePacket(const uint8_t* packet,
|
bool ReceivePacket(const uint8_t* packet,
|
||||||
size_t packet_length,
|
size_t packet_length,
|
||||||
@ -152,6 +183,19 @@ class RtpStreamReceiver : public RtpData, public RtpFeedback,
|
|||||||
int64_t last_packet_log_ms_ GUARDED_BY(receive_cs_);
|
int64_t last_packet_log_ms_ GUARDED_BY(receive_cs_);
|
||||||
|
|
||||||
const std::unique_ptr<RtpRtcp> rtp_rtcp_;
|
const std::unique_ptr<RtpRtcp> rtp_rtcp_;
|
||||||
|
|
||||||
|
// Members for the new jitter buffer experiment.
|
||||||
|
bool jitter_buffer_experiment_;
|
||||||
|
video_coding::OnCompleteFrameCallback* complete_frame_callback_;
|
||||||
|
KeyFrameRequestSender* keyframe_request_sender_;
|
||||||
|
VCMTiming* timing_;
|
||||||
|
std::unique_ptr<NackModule> nack_module_;
|
||||||
|
rtc::scoped_refptr<video_coding::PacketBuffer> packet_buffer_;
|
||||||
|
std::unique_ptr<video_coding::RtpFrameReferenceFinder> reference_finder_;
|
||||||
|
rtc::CriticalSection last_seq_num_cs_;
|
||||||
|
std::map<uint16_t, uint16_t, DescendingSeqNumComp<uint16_t>>
|
||||||
|
last_seq_num_for_pic_id_ GUARDED_BY(last_seq_num_cs_);
|
||||||
|
video_coding::H264SpsPpsTracker tracker_;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace webrtc
|
} // namespace webrtc
|
||||||
|
@ -18,12 +18,17 @@
|
|||||||
|
|
||||||
#include "webrtc/base/checks.h"
|
#include "webrtc/base/checks.h"
|
||||||
#include "webrtc/base/logging.h"
|
#include "webrtc/base/logging.h"
|
||||||
|
#include "webrtc/base/optional.h"
|
||||||
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
|
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
|
||||||
#include "webrtc/modules/congestion_controller/include/congestion_controller.h"
|
#include "webrtc/modules/congestion_controller/include/congestion_controller.h"
|
||||||
#include "webrtc/modules/utility/include/process_thread.h"
|
#include "webrtc/modules/utility/include/process_thread.h"
|
||||||
|
#include "webrtc/modules/video_coding/frame_object.h"
|
||||||
#include "webrtc/modules/video_coding/include/video_coding.h"
|
#include "webrtc/modules/video_coding/include/video_coding.h"
|
||||||
|
#include "webrtc/modules/video_coding/jitter_estimator.h"
|
||||||
|
#include "webrtc/modules/video_coding/timing.h"
|
||||||
#include "webrtc/modules/video_coding/utility/ivf_file_writer.h"
|
#include "webrtc/modules/video_coding/utility/ivf_file_writer.h"
|
||||||
#include "webrtc/system_wrappers/include/clock.h"
|
#include "webrtc/system_wrappers/include/clock.h"
|
||||||
|
#include "webrtc/system_wrappers/include/field_trial.h"
|
||||||
#include "webrtc/video/call_stats.h"
|
#include "webrtc/video/call_stats.h"
|
||||||
#include "webrtc/video/receive_statistics_proxy.h"
|
#include "webrtc/video/receive_statistics_proxy.h"
|
||||||
#include "webrtc/video_receive_stream.h"
|
#include "webrtc/video_receive_stream.h"
|
||||||
@ -197,6 +202,7 @@ VideoReceiveStream::VideoReceiveStream(
|
|||||||
call_stats_(call_stats),
|
call_stats_(call_stats),
|
||||||
video_receiver_(clock_, nullptr, this, this, this),
|
video_receiver_(clock_, nullptr, this, this, this),
|
||||||
stats_proxy_(&config_, clock_),
|
stats_proxy_(&config_, clock_),
|
||||||
|
timing_(new VCMTiming(clock_)),
|
||||||
rtp_stream_receiver_(
|
rtp_stream_receiver_(
|
||||||
&video_receiver_,
|
&video_receiver_,
|
||||||
congestion_controller_->GetRemoteBitrateEstimator(
|
congestion_controller_->GetRemoteBitrateEstimator(
|
||||||
@ -209,8 +215,15 @@ VideoReceiveStream::VideoReceiveStream(
|
|||||||
&config_,
|
&config_,
|
||||||
&stats_proxy_,
|
&stats_proxy_,
|
||||||
process_thread_,
|
process_thread_,
|
||||||
congestion_controller_->GetRetransmissionRateLimiter()),
|
congestion_controller_->GetRetransmissionRateLimiter(),
|
||||||
rtp_stream_sync_(&video_receiver_, &rtp_stream_receiver_) {
|
this, // NackSender
|
||||||
|
this, // KeyFrameRequestSender
|
||||||
|
this, // OnCompleteFrameCallback
|
||||||
|
timing_.get()),
|
||||||
|
rtp_stream_sync_(&video_receiver_, &rtp_stream_receiver_),
|
||||||
|
jitter_buffer_experiment_(
|
||||||
|
field_trial::FindFullName("WebRTC-NewVideoJitterBuffer") ==
|
||||||
|
"Enabled") {
|
||||||
LOG(LS_INFO) << "VideoReceiveStream: " << config_.ToString();
|
LOG(LS_INFO) << "VideoReceiveStream: " << config_.ToString();
|
||||||
|
|
||||||
RTC_DCHECK(process_thread_);
|
RTC_DCHECK(process_thread_);
|
||||||
@ -230,6 +243,12 @@ VideoReceiveStream::VideoReceiveStream(
|
|||||||
|
|
||||||
video_receiver_.SetRenderDelay(config.render_delay_ms);
|
video_receiver_.SetRenderDelay(config.render_delay_ms);
|
||||||
|
|
||||||
|
if (jitter_buffer_experiment_) {
|
||||||
|
jitter_estimator_.reset(new VCMJitterEstimator(clock_));
|
||||||
|
frame_buffer_.reset(new video_coding::FrameBuffer(
|
||||||
|
clock_, jitter_estimator_.get(), timing_.get()));
|
||||||
|
}
|
||||||
|
|
||||||
process_thread_->RegisterModule(&video_receiver_);
|
process_thread_->RegisterModule(&video_receiver_);
|
||||||
process_thread_->RegisterModule(&rtp_stream_sync_);
|
process_thread_->RegisterModule(&rtp_stream_sync_);
|
||||||
}
|
}
|
||||||
@ -268,6 +287,15 @@ bool VideoReceiveStream::OnRecoveredPacket(const uint8_t* packet,
|
|||||||
void VideoReceiveStream::Start() {
|
void VideoReceiveStream::Start() {
|
||||||
if (decode_thread_.IsRunning())
|
if (decode_thread_.IsRunning())
|
||||||
return;
|
return;
|
||||||
|
if (jitter_buffer_experiment_) {
|
||||||
|
frame_buffer_->Start();
|
||||||
|
call_stats_->RegisterStatsObserver(&rtp_stream_receiver_);
|
||||||
|
|
||||||
|
if (rtp_stream_receiver_.IsRetransmissionsEnabled() &&
|
||||||
|
rtp_stream_receiver_.IsFecEnabled()) {
|
||||||
|
frame_buffer_->SetProtectionMode(kProtectionNackFEC);
|
||||||
|
}
|
||||||
|
}
|
||||||
transport_adapter_.Enable();
|
transport_adapter_.Enable();
|
||||||
rtc::VideoSinkInterface<VideoFrame>* renderer = nullptr;
|
rtc::VideoSinkInterface<VideoFrame>* renderer = nullptr;
|
||||||
if (config_.renderer) {
|
if (config_.renderer) {
|
||||||
@ -310,6 +338,12 @@ void VideoReceiveStream::Stop() {
|
|||||||
// stop immediately, instead of waiting for a timeout. Needs to be called
|
// stop immediately, instead of waiting for a timeout. Needs to be called
|
||||||
// before joining the decoder thread thread.
|
// before joining the decoder thread thread.
|
||||||
video_receiver_.TriggerDecoderShutdown();
|
video_receiver_.TriggerDecoderShutdown();
|
||||||
|
|
||||||
|
if (jitter_buffer_experiment_) {
|
||||||
|
frame_buffer_->Stop();
|
||||||
|
call_stats_->DeregisterStatsObserver(&rtp_stream_receiver_);
|
||||||
|
}
|
||||||
|
|
||||||
if (decode_thread_.IsRunning()) {
|
if (decode_thread_.IsRunning()) {
|
||||||
decode_thread_.Stop();
|
decode_thread_.Stop();
|
||||||
// Deregister external decoders so they are no longer running during
|
// Deregister external decoders so they are no longer running during
|
||||||
@ -319,6 +353,7 @@ void VideoReceiveStream::Stop() {
|
|||||||
for (const Decoder& decoder : config_.decoders)
|
for (const Decoder& decoder : config_.decoders)
|
||||||
video_receiver_.RegisterExternalDecoder(nullptr, decoder.payload_type);
|
video_receiver_.RegisterExternalDecoder(nullptr, decoder.payload_type);
|
||||||
}
|
}
|
||||||
|
|
||||||
call_stats_->DeregisterStatsObserver(video_stream_decoder_.get());
|
call_stats_->DeregisterStatsObserver(video_stream_decoder_.get());
|
||||||
video_stream_decoder_.reset();
|
video_stream_decoder_.reset();
|
||||||
incoming_video_stream_.reset();
|
incoming_video_stream_.reset();
|
||||||
@ -365,6 +400,13 @@ void VideoReceiveStream::OnFrame(const VideoFrame& video_frame) {
|
|||||||
stats_proxy_.OnRenderedFrame(video_frame);
|
stats_proxy_.OnRenderedFrame(video_frame);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void VideoReceiveStream::OnCompleteFrame(
|
||||||
|
std::unique_ptr<video_coding::FrameObject> frame) {
|
||||||
|
int last_continuous_pid = frame_buffer_->InsertFrame(std::move(frame));
|
||||||
|
if (last_continuous_pid != -1)
|
||||||
|
rtp_stream_receiver_.FrameContinuous(last_continuous_pid);
|
||||||
|
}
|
||||||
|
|
||||||
// TODO(asapersson): Consider moving callback from video_encoder.h or
|
// TODO(asapersson): Consider moving callback from video_encoder.h or
|
||||||
// creating a different callback.
|
// creating a different callback.
|
||||||
EncodedImageCallback::Result VideoReceiveStream::OnEncodedImage(
|
EncodedImageCallback::Result VideoReceiveStream::OnEncodedImage(
|
||||||
@ -397,7 +439,26 @@ bool VideoReceiveStream::DecodeThreadFunction(void* ptr) {
|
|||||||
|
|
||||||
void VideoReceiveStream::Decode() {
|
void VideoReceiveStream::Decode() {
|
||||||
static const int kMaxDecodeWaitTimeMs = 50;
|
static const int kMaxDecodeWaitTimeMs = 50;
|
||||||
|
if (jitter_buffer_experiment_) {
|
||||||
|
static const int kMaxWaitForFrameMs = 3000;
|
||||||
|
std::unique_ptr<video_coding::FrameObject> frame;
|
||||||
|
video_coding::FrameBuffer::ReturnReason res =
|
||||||
|
frame_buffer_->NextFrame(kMaxWaitForFrameMs, &frame);
|
||||||
|
|
||||||
|
if (res == video_coding::FrameBuffer::ReturnReason::kStopped)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (frame) {
|
||||||
|
if (video_receiver_.Decode(frame.get()) == VCM_OK)
|
||||||
|
rtp_stream_receiver_.FrameDecoded(frame->picture_id);
|
||||||
|
} else {
|
||||||
|
LOG(LS_WARNING) << "No decodable frame in " << kMaxWaitForFrameMs
|
||||||
|
<< " ms, requesting keyframe.";
|
||||||
|
RequestKeyFrame();
|
||||||
|
}
|
||||||
|
} else {
|
||||||
video_receiver_.Decode(kMaxDecodeWaitTimeMs);
|
video_receiver_.Decode(kMaxDecodeWaitTimeMs);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void VideoReceiveStream::SendNack(
|
void VideoReceiveStream::SendNack(
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
#include "webrtc/common_video/include/incoming_video_stream.h"
|
#include "webrtc/common_video/include/incoming_video_stream.h"
|
||||||
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
|
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
|
||||||
#include "webrtc/modules/rtp_rtcp/include/flexfec_receiver.h"
|
#include "webrtc/modules/rtp_rtcp/include/flexfec_receiver.h"
|
||||||
|
#include "webrtc/modules/video_coding/frame_buffer2.h"
|
||||||
#include "webrtc/modules/video_coding/video_coding_impl.h"
|
#include "webrtc/modules/video_coding/video_coding_impl.h"
|
||||||
#include "webrtc/system_wrappers/include/clock.h"
|
#include "webrtc/system_wrappers/include/clock.h"
|
||||||
#include "webrtc/video/receive_statistics_proxy.h"
|
#include "webrtc/video/receive_statistics_proxy.h"
|
||||||
@ -35,6 +36,8 @@ class ProcessThread;
|
|||||||
class RTPFragmentationHeader;
|
class RTPFragmentationHeader;
|
||||||
class VoiceEngine;
|
class VoiceEngine;
|
||||||
class VieRemb;
|
class VieRemb;
|
||||||
|
class VCMTiming;
|
||||||
|
class VCMJitterEstimator;
|
||||||
|
|
||||||
namespace internal {
|
namespace internal {
|
||||||
|
|
||||||
@ -42,7 +45,8 @@ class VideoReceiveStream : public webrtc::VideoReceiveStream,
|
|||||||
public rtc::VideoSinkInterface<VideoFrame>,
|
public rtc::VideoSinkInterface<VideoFrame>,
|
||||||
public EncodedImageCallback,
|
public EncodedImageCallback,
|
||||||
public NackSender,
|
public NackSender,
|
||||||
public KeyFrameRequestSender {
|
public KeyFrameRequestSender,
|
||||||
|
public video_coding::OnCompleteFrameCallback {
|
||||||
public:
|
public:
|
||||||
VideoReceiveStream(int num_cpu_cores,
|
VideoReceiveStream(int num_cpu_cores,
|
||||||
CongestionController* congestion_controller,
|
CongestionController* congestion_controller,
|
||||||
@ -70,6 +74,10 @@ class VideoReceiveStream : public webrtc::VideoReceiveStream,
|
|||||||
// Overrides rtc::VideoSinkInterface<VideoFrame>.
|
// Overrides rtc::VideoSinkInterface<VideoFrame>.
|
||||||
void OnFrame(const VideoFrame& video_frame) override;
|
void OnFrame(const VideoFrame& video_frame) override;
|
||||||
|
|
||||||
|
// Implements video_coding::OnCompleteFrameCallback.
|
||||||
|
void OnCompleteFrame(
|
||||||
|
std::unique_ptr<video_coding::FrameObject> frame) override;
|
||||||
|
|
||||||
// Overrides EncodedImageCallback.
|
// Overrides EncodedImageCallback.
|
||||||
EncodedImageCallback::Result OnEncodedImage(
|
EncodedImageCallback::Result OnEncodedImage(
|
||||||
const EncodedImage& encoded_image,
|
const EncodedImage& encoded_image,
|
||||||
@ -112,12 +120,18 @@ class VideoReceiveStream : public webrtc::VideoReceiveStream,
|
|||||||
vcm::VideoReceiver video_receiver_;
|
vcm::VideoReceiver video_receiver_;
|
||||||
std::unique_ptr<rtc::VideoSinkInterface<VideoFrame>> incoming_video_stream_;
|
std::unique_ptr<rtc::VideoSinkInterface<VideoFrame>> incoming_video_stream_;
|
||||||
ReceiveStatisticsProxy stats_proxy_;
|
ReceiveStatisticsProxy stats_proxy_;
|
||||||
|
std::unique_ptr<VCMTiming> timing_; // Jitter buffer experiment.
|
||||||
RtpStreamReceiver rtp_stream_receiver_;
|
RtpStreamReceiver rtp_stream_receiver_;
|
||||||
std::unique_ptr<VideoStreamDecoder> video_stream_decoder_;
|
std::unique_ptr<VideoStreamDecoder> video_stream_decoder_;
|
||||||
RtpStreamsSynchronizer rtp_stream_sync_;
|
RtpStreamsSynchronizer rtp_stream_sync_;
|
||||||
|
|
||||||
rtc::CriticalSection ivf_writer_lock_;
|
rtc::CriticalSection ivf_writer_lock_;
|
||||||
std::unique_ptr<IvfFileWriter> ivf_writer_ GUARDED_BY(ivf_writer_lock_);
|
std::unique_ptr<IvfFileWriter> ivf_writer_ GUARDED_BY(ivf_writer_lock_);
|
||||||
|
|
||||||
|
// Members for the new jitter buffer experiment.
|
||||||
|
const bool jitter_buffer_experiment_;
|
||||||
|
std::unique_ptr<VCMJitterEstimator> jitter_estimator_;
|
||||||
|
std::unique_ptr<video_coding::FrameBuffer> frame_buffer_;
|
||||||
};
|
};
|
||||||
} // namespace internal
|
} // namespace internal
|
||||||
} // namespace webrtc
|
} // namespace webrtc
|
||||||
|
Reference in New Issue
Block a user