Improve AV-sync when initial delay is set and NetEq has long buffer.
Review URL: https://webrtc-codereview.appspot.com/1324006 git-svn-id: http://webrtc.googlecode.com/svn/trunk@3883 4adac7df-926f-26a2-2b94-8c16560cd09d
This commit is contained in:
@ -21,7 +21,6 @@
|
||||
#include "webrtc/modules/audio_coding/neteq/interface/webrtc_neteq_internal.h"
|
||||
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
|
||||
#include "webrtc/system_wrappers/interface/rw_lock_wrapper.h"
|
||||
#include "webrtc/system_wrappers/interface/tick_util.h"
|
||||
#include "webrtc/system_wrappers/interface/trace.h"
|
||||
#include "webrtc/system_wrappers/interface/trace_event.h"
|
||||
|
||||
@ -49,7 +48,8 @@ ACMNetEQ::ACMNetEQ()
|
||||
callback_crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
|
||||
min_of_max_num_packets_(0),
|
||||
min_of_buffer_size_bytes_(0),
|
||||
per_packet_overhead_bytes_(0) {
|
||||
per_packet_overhead_bytes_(0),
|
||||
av_sync_(false) {
|
||||
for (int n = 0; n < MAX_NUM_SLAVE_NETEQ + 1; n++) {
|
||||
is_initialized_[n] = false;
|
||||
ptr_vadinst_[n] = NULL;
|
||||
@ -436,12 +436,59 @@ int32_t ACMNetEQ::NetworkStatistics(
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t ACMNetEQ::RecIn(const uint8_t* incoming_payload,
|
||||
const int32_t length_payload,
|
||||
const WebRtcRTPHeader& rtp_info) {
|
||||
int16_t payload_length = static_cast<int16_t>(length_payload);
|
||||
// Should only be called in AV-sync mode.
|
||||
int ACMNetEQ::RecIn(const WebRtcRTPHeader& rtp_info,
|
||||
uint32_t receive_timestamp) {
|
||||
assert(av_sync_);
|
||||
|
||||
// translate to NetEq struct
|
||||
// Translate to NetEq structure.
|
||||
WebRtcNetEQ_RTPInfo neteq_rtpinfo;
|
||||
neteq_rtpinfo.payloadType = rtp_info.header.payloadType;
|
||||
neteq_rtpinfo.sequenceNumber = rtp_info.header.sequenceNumber;
|
||||
neteq_rtpinfo.timeStamp = rtp_info.header.timestamp;
|
||||
neteq_rtpinfo.SSRC = rtp_info.header.ssrc;
|
||||
neteq_rtpinfo.markerBit = rtp_info.header.markerBit;
|
||||
|
||||
CriticalSectionScoped lock(neteq_crit_sect_);
|
||||
|
||||
// Master should be initialized.
|
||||
assert(is_initialized_[0]);
|
||||
|
||||
// Push into Master.
|
||||
int status = WebRtcNetEQ_RecInSyncRTP(inst_[0], &neteq_rtpinfo,
|
||||
receive_timestamp);
|
||||
if (status < 0) {
|
||||
LogError("RecInSyncRTP", 0);
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
|
||||
"RecIn (sync): NetEq, error in pushing in Master");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// If the received stream is stereo, insert a sync payload into slave.
|
||||
if (rtp_info.type.Audio.channel == 2) {
|
||||
// Slave should be initialized.
|
||||
assert(is_initialized_[1]);
|
||||
|
||||
// PUSH into Slave
|
||||
status = WebRtcNetEQ_RecInSyncRTP(inst_[1], &neteq_rtpinfo,
|
||||
receive_timestamp);
|
||||
if (status < 0) {
|
||||
LogError("RecInRTPStruct", 1);
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
|
||||
"RecIn (sync): NetEq, error in pushing in Slave");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
int32_t ACMNetEQ::RecIn(const uint8_t* incoming_payload,
|
||||
const int32_t length_payload,
|
||||
const WebRtcRTPHeader& rtp_info,
|
||||
uint32_t receive_timestamp) {
|
||||
int16_t payload_length = static_cast<int16_t>(length_payload);
|
||||
|
||||
// Translate to NetEq structure.
|
||||
WebRtcNetEQ_RTPInfo neteq_rtpinfo;
|
||||
neteq_rtpinfo.payloadType = rtp_info.header.payloadType;
|
||||
neteq_rtpinfo.sequenceNumber = rtp_info.header.sequenceNumber;
|
||||
@ -450,15 +497,6 @@ int32_t ACMNetEQ::RecIn(const uint8_t* incoming_payload,
|
||||
neteq_rtpinfo.markerBit = rtp_info.header.markerBit;
|
||||
|
||||
CriticalSectionScoped lock(neteq_crit_sect_);
|
||||
// Down-cast the time to (32-6)-bit since we only care about
|
||||
// the least significant bits. (32-6) bits cover 2^(32-6) = 67108864 ms.
|
||||
// we masked 6 most significant bits of 32-bit so we don't loose resolution
|
||||
// when do the following multiplication.
|
||||
const uint32_t now_in_ms =
|
||||
static_cast<uint32_t>(
|
||||
TickTime::MillisecondTimestamp() & 0x03ffffff);
|
||||
uint32_t recv_timestamp = static_cast<uint32_t>(
|
||||
current_samp_freq_khz_ * now_in_ms);
|
||||
|
||||
int status;
|
||||
// In case of stereo payload, first half of the data should be pushed into
|
||||
@ -473,10 +511,10 @@ int32_t ACMNetEQ::RecIn(const uint8_t* incoming_payload,
|
||||
"RecIn: NetEq is not initialized.");
|
||||
return -1;
|
||||
}
|
||||
// PUSH into Master
|
||||
// Push into Master.
|
||||
status = WebRtcNetEQ_RecInRTPStruct(inst_[0], &neteq_rtpinfo,
|
||||
incoming_payload, payload_length,
|
||||
recv_timestamp);
|
||||
receive_timestamp);
|
||||
if (status < 0) {
|
||||
LogError("RecInRTPStruct", 0);
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
|
||||
@ -491,10 +529,10 @@ int32_t ACMNetEQ::RecIn(const uint8_t* incoming_payload,
|
||||
"RecIn: NetEq is not initialized.");
|
||||
return -1;
|
||||
}
|
||||
// PUSH into Slave
|
||||
// Push into Slave.
|
||||
status = WebRtcNetEQ_RecInRTPStruct(inst_[1], &neteq_rtpinfo,
|
||||
&incoming_payload[payload_length],
|
||||
payload_length, recv_timestamp);
|
||||
payload_length, receive_timestamp);
|
||||
if (status < 0) {
|
||||
LogError("RecInRTPStruct", 1);
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
|
||||
@ -529,7 +567,6 @@ int32_t ACMNetEQ::RecOut(AudioFrame& audio_frame) {
|
||||
LogError("RecOut", 0);
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
|
||||
"RecOut: NetEq, error in pulling out for mono case");
|
||||
|
||||
// Check for errors that can be recovered from:
|
||||
// RECOUT_ERROR_SAMPLEUNDERRUN = 2003
|
||||
int error_code = WebRtcNetEQ_GetErrorCode(inst_[0]);
|
||||
@ -1056,6 +1093,8 @@ int16_t ACMNetEQ::AddSlave(const WebRtcNetEQDecoder* used_codecs,
|
||||
"AddSlave: AddSlave Failed, Could not Set Playout Mode.");
|
||||
return -1;
|
||||
}
|
||||
// Set AV-sync for the slave.
|
||||
WebRtcNetEQ_EnableAVSync(inst_[slave_idx], av_sync_ ? 1 : 0);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -1071,4 +1110,13 @@ uint8_t ACMNetEQ::num_slaves() {
|
||||
return num_slaves_;
|
||||
}
|
||||
|
||||
void ACMNetEQ::EnableAVSync(bool enable) {
|
||||
CriticalSectionScoped lock(neteq_crit_sect_);
|
||||
av_sync_ = enable;
|
||||
for (int i = 0; i < num_slaves_ + 1; ++i) {
|
||||
assert(is_initialized_[i]);
|
||||
WebRtcNetEQ_EnableAVSync(inst_[i], enable ? 1 : 0);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
@ -60,13 +60,31 @@ class ACMNetEQ {
|
||||
// - rtp_info : RTP header for the incoming payload containing
|
||||
// information about payload type, sequence number,
|
||||
// timestamp, SSRC and marker bit.
|
||||
// - receive_timestamp : received timestamp.
|
||||
//
|
||||
// Return value : 0 if ok.
|
||||
// <0 if NetEQ returned an error.
|
||||
//
|
||||
int32_t RecIn(const uint8_t* incoming_payload,
|
||||
const int32_t length_payload,
|
||||
const WebRtcRTPHeader& rtp_info);
|
||||
const WebRtcRTPHeader& rtp_info,
|
||||
uint32_t receive_timestamp);
|
||||
|
||||
//
|
||||
// RecIn()
|
||||
// Insert a sync payload to NetEq. Should only be called if |av_sync_| is
|
||||
// enabled;
|
||||
//
|
||||
// Input:
|
||||
// - rtp_info : RTP header for the incoming payload containing
|
||||
// information about payload type, sequence number,
|
||||
// timestamp, SSRC and marker bit.
|
||||
// - receive_timestamp : received timestamp.
|
||||
//
|
||||
// Return value : 0 if ok.
|
||||
// <0 if NetEQ returned an error.
|
||||
//
|
||||
int RecIn(const WebRtcRTPHeader& rtp_info, uint32_t receive_timestamp);
|
||||
|
||||
//
|
||||
// RecOut()
|
||||
@ -278,6 +296,11 @@ class ACMNetEQ {
|
||||
overhead_bytes = per_packet_overhead_bytes_;
|
||||
}
|
||||
|
||||
//
|
||||
// Set AV-sync mode.
|
||||
//
|
||||
void EnableAVSync(bool enable);
|
||||
|
||||
private:
|
||||
//
|
||||
// RTPPack()
|
||||
@ -350,6 +373,9 @@ class ACMNetEQ {
|
||||
// Minimum of buffer-size among all NetEq instances.
|
||||
int min_of_buffer_size_bytes_;
|
||||
int per_packet_overhead_bytes_;
|
||||
|
||||
// Keep track of AV-sync. Just used to set the slave when a slave is added.
|
||||
bool av_sync_;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
@ -68,8 +68,9 @@ void AcmNetEqTest::InsertZeroPacket(uint16_t sequence_number,
|
||||
rtp_header.header.payloadType = payload_type;
|
||||
rtp_header.header.markerBit = marker_bit;
|
||||
rtp_header.type.Audio.channel = 1;
|
||||
// Receive timestamp can be set to send timestamp in this test.
|
||||
ASSERT_EQ(0, neteq_.RecIn(reinterpret_cast<uint8_t*>(payload),
|
||||
len_payload_bytes, rtp_header));
|
||||
len_payload_bytes, rtp_header, timestamp));
|
||||
}
|
||||
|
||||
void AcmNetEqTest::PullData(int expected_num_samples) {
|
||||
|
||||
@ -21,6 +21,7 @@
|
||||
#include "webrtc/modules/audio_coding/main/source/acm_resampler.h"
|
||||
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
|
||||
#include "webrtc/system_wrappers/interface/rw_lock_wrapper.h"
|
||||
#include "webrtc/system_wrappers/interface/tick_util.h"
|
||||
#include "webrtc/system_wrappers/interface/trace.h"
|
||||
#include "webrtc/system_wrappers/interface/trace_event.h"
|
||||
|
||||
@ -43,6 +44,9 @@ enum {
|
||||
kMaxNumFragmentationVectors = 3
|
||||
};
|
||||
|
||||
static const uint32_t kMaskTimestamp = 0x03ffffff;
|
||||
static const int kDefaultTimestampDiff = 960; // 20 ms @ 48 kHz.
|
||||
|
||||
namespace {
|
||||
|
||||
bool IsCodecRED(const CodecInst* codec) {
|
||||
@ -85,7 +89,7 @@ int UpMix(const AudioFrame& frame, int length_out_buff, int16_t* out_buff) {
|
||||
|
||||
// Return 1 if timestamp t1 is less than timestamp t2, while compensating for
|
||||
// wrap-around.
|
||||
static int TimestampLessThan(uint32_t t1, uint32_t t2) {
|
||||
int TimestampLessThan(uint32_t t1, uint32_t t2) {
|
||||
uint32_t kHalfFullRange = static_cast<uint32_t>(0xFFFFFFFF) / 2;
|
||||
if (t1 == t2) {
|
||||
return 0;
|
||||
@ -100,6 +104,21 @@ static int TimestampLessThan(uint32_t t1, uint32_t t2) {
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Return the timestamp of current time, computed according to sampling rate
|
||||
// of the codec identified by |codec_id|.
|
||||
//
|
||||
uint32_t NowTimestamp(int codec_id) {
|
||||
// Down-cast the time to (32-6)-bit since we only care about
|
||||
// the least significant bits. (32-6) bits cover 2^(32-6) = 67108864 ms.
|
||||
// we masked 6 most significant bits of 32-bit so we don't loose resolution
|
||||
// when do the following multiplication.
|
||||
int sample_rate_khz = ACMCodecDB::database_[codec_id].plfreq / 1000;
|
||||
const uint32_t now_in_ms = static_cast<uint32_t>(
|
||||
TickTime::MillisecondTimestamp() & kMaskTimestamp);
|
||||
return static_cast<uint32_t>(sample_rate_khz * now_in_ms);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
AudioCodingModuleImpl::AudioCodingModuleImpl(const int32_t id)
|
||||
@ -147,7 +166,12 @@ AudioCodingModuleImpl::AudioCodingModuleImpl(const int32_t id)
|
||||
first_payload_received_(false),
|
||||
last_incoming_send_timestamp_(0),
|
||||
track_neteq_buffer_(false),
|
||||
playout_ts_(0) {
|
||||
playout_ts_(0),
|
||||
av_sync_(false),
|
||||
last_timestamp_diff_(kDefaultTimestampDiff),
|
||||
last_sequence_number_(0),
|
||||
last_ssrc_(0),
|
||||
last_packet_was_sync_(false) {
|
||||
|
||||
// Nullify send codec memory, set payload type and set codec name to
|
||||
// invalid values.
|
||||
@ -1574,8 +1598,8 @@ int AudioCodingModuleImpl::SetVADSafe(bool enable_dtx,
|
||||
// If a send codec is registered, set VAD/DTX for the codec.
|
||||
if (HaveValidEncoder("SetVAD")) {
|
||||
int16_t status = codecs_[current_send_codec_idx_]->SetVAD(enable_dtx,
|
||||
enable_vad,
|
||||
mode);
|
||||
enable_vad,
|
||||
mode);
|
||||
if (status == 1) {
|
||||
// Vad was enabled.
|
||||
vad_enabled_ = true;
|
||||
@ -1981,6 +2005,29 @@ int32_t AudioCodingModuleImpl::IncomingPacket(
|
||||
// and "received frequency."
|
||||
CriticalSectionScoped lock(acm_crit_sect_);
|
||||
|
||||
// Check there are packets missed between the last injected packet, and the
|
||||
// latest received packet. If so and we are in AV-sync mode then we would
|
||||
// like to fill the gap. Shouldn't be the first payload.
|
||||
if (av_sync_ && first_payload_received_ &&
|
||||
rtp_info.header.sequenceNumber > last_sequence_number_ + 1) {
|
||||
// If the last packet pushed was sync-packet account for all missing
|
||||
// packets. Otherwise leave some room for PLC.
|
||||
if (last_packet_was_sync_) {
|
||||
while (rtp_info.header.sequenceNumber > last_sequence_number_ + 2) {
|
||||
PushSyncPacketSafe();
|
||||
}
|
||||
} else {
|
||||
// Leave two packet room for NetEq perform PLC.
|
||||
if (rtp_info.header.sequenceNumber > last_sequence_number_ + 3) {
|
||||
last_sequence_number_ += 2;
|
||||
last_incoming_send_timestamp_ += last_timestamp_diff_ * 2;
|
||||
last_receive_timestamp_ += 2 * last_timestamp_diff_;
|
||||
while (rtp_info.header.sequenceNumber > last_sequence_number_ + 1)
|
||||
PushSyncPacketSafe();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
uint8_t my_payload_type;
|
||||
|
||||
// Check if this is an RED payload.
|
||||
@ -2010,32 +2057,18 @@ int32_t AudioCodingModuleImpl::IncomingPacket(
|
||||
}
|
||||
// Codec is changed, there might be a jump in timestamp, therefore,
|
||||
// we have to reset some variables that track NetEq buffer.
|
||||
if (track_neteq_buffer_) {
|
||||
if (track_neteq_buffer_ || av_sync_) {
|
||||
last_incoming_send_timestamp_ = rtp_info.header.timestamp;
|
||||
}
|
||||
}
|
||||
last_recv_audio_codec_pltype_ = my_payload_type;
|
||||
}
|
||||
|
||||
if (track_neteq_buffer_) {
|
||||
const int in_sample_rate_khz =
|
||||
(ACMCodecDB::database_[current_receive_codec_idx_].plfreq / 1000);
|
||||
if (first_payload_received_) {
|
||||
if (rtp_info.header.timestamp > last_incoming_send_timestamp_) {
|
||||
accumulated_audio_ms_ += (rtp_info.header.timestamp -
|
||||
last_incoming_send_timestamp_) / in_sample_rate_khz;
|
||||
}
|
||||
} else {
|
||||
first_payload_received_ = true;
|
||||
}
|
||||
num_packets_accumulated_++;
|
||||
last_incoming_send_timestamp_ = rtp_info.header.timestamp;
|
||||
playout_ts_ = static_cast<uint32_t>(
|
||||
rtp_info.header.timestamp - static_cast<uint32_t>(
|
||||
initial_delay_ms_ * in_sample_rate_khz));
|
||||
}
|
||||
// Current timestamp based on the receiver sampling frequency.
|
||||
last_receive_timestamp_ = NowTimestamp(current_receive_codec_idx_);
|
||||
}
|
||||
|
||||
int per_neteq_payload_length = payload_length;
|
||||
// Split the payload for stereo packets, so that first half of payload
|
||||
// vector holds left channel, and second half holds right channel.
|
||||
if (expected_channels_ == 2) {
|
||||
@ -2047,24 +2080,46 @@ int32_t AudioCodingModuleImpl::IncomingPacket(
|
||||
memcpy(payload, incoming_payload, payload_length);
|
||||
codecs_[current_receive_codec_idx_]->SplitStereoPacket(payload, &length);
|
||||
rtp_header.type.Audio.channel = 2;
|
||||
if (track_neteq_buffer_)
|
||||
num_bytes_accumulated_ += length / 2; // Per neteq, half is inserted
|
||||
// into master and half to slave.
|
||||
per_neteq_payload_length = length / 2;
|
||||
// Insert packet into NetEQ.
|
||||
return neteq_.RecIn(payload, length, rtp_header);
|
||||
if (neteq_.RecIn(payload, length, rtp_header,
|
||||
last_receive_timestamp_) < 0)
|
||||
return -1;
|
||||
} else {
|
||||
// If we receive a CNG packet while expecting stereo, we ignore the packet
|
||||
// and continue. CNG is not supported for stereo.
|
||||
// If we receive a CNG packet while expecting stereo, we ignore the
|
||||
// packet and continue. CNG is not supported for stereo.
|
||||
return 0;
|
||||
}
|
||||
} else {
|
||||
{
|
||||
CriticalSectionScoped lock(acm_crit_sect_);
|
||||
if (track_neteq_buffer_)
|
||||
num_bytes_accumulated_ += payload_length;
|
||||
}
|
||||
return neteq_.RecIn(incoming_payload, payload_length, rtp_header);
|
||||
if (neteq_.RecIn(incoming_payload, payload_length, rtp_header,
|
||||
last_receive_timestamp_) < 0)
|
||||
return -1;
|
||||
}
|
||||
|
||||
{
|
||||
CriticalSectionScoped lock(acm_crit_sect_);
|
||||
|
||||
// Update buffering uses |last_incoming_send_timestamp_| so it should be
|
||||
// before the next block.
|
||||
if (track_neteq_buffer_)
|
||||
UpdateBufferingSafe(rtp_header, per_neteq_payload_length);
|
||||
|
||||
if (av_sync_) {
|
||||
if(rtp_info.header.sequenceNumber == last_sequence_number_ + 1) {
|
||||
last_timestamp_diff_ = rtp_info.header.timestamp -
|
||||
last_incoming_send_timestamp_;
|
||||
}
|
||||
last_sequence_number_ = rtp_info.header.sequenceNumber;
|
||||
last_ssrc_ = rtp_info.header.ssrc;
|
||||
last_packet_was_sync_ = false;
|
||||
}
|
||||
|
||||
if (av_sync_ || track_neteq_buffer_) {
|
||||
last_incoming_send_timestamp_ = rtp_info.header.timestamp;
|
||||
first_payload_received_ = true;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int AudioCodingModuleImpl::UpdateUponReceivingCodec(int index) {
|
||||
@ -2257,9 +2312,9 @@ int32_t AudioCodingModuleImpl::PlayoutData10Ms(
|
||||
audio_frame->speech_type_ = audio_frame_.speech_type_;
|
||||
|
||||
stereo_mode = (audio_frame_.num_channels_ > 1);
|
||||
|
||||
// For stereo playout:
|
||||
// Master and Slave samples are interleaved starting with Master.
|
||||
|
||||
const uint16_t receive_freq =
|
||||
static_cast<uint16_t>(audio_frame_.sample_rate_hz_);
|
||||
bool tone_detected = false;
|
||||
@ -2270,6 +2325,23 @@ int32_t AudioCodingModuleImpl::PlayoutData10Ms(
|
||||
{
|
||||
CriticalSectionScoped lock(acm_crit_sect_);
|
||||
|
||||
// If we are in AV-sync and number of packets is below a threshold or
|
||||
// next packet is late then inject a sync packet.
|
||||
if (av_sync_ && NowTimestamp(current_receive_codec_idx_) > 5 *
|
||||
last_timestamp_diff_ + last_receive_timestamp_) {
|
||||
if (!last_packet_was_sync_) {
|
||||
// If the last packet inserted has been a regular packet Skip two
|
||||
// packets to give room for PLC.
|
||||
last_incoming_send_timestamp_ += 2 * last_timestamp_diff_;
|
||||
last_sequence_number_ += 2;
|
||||
last_receive_timestamp_ += 2 * last_timestamp_diff_;
|
||||
}
|
||||
|
||||
// One sync packet.
|
||||
if (PushSyncPacketSafe() < 0)
|
||||
return -1;
|
||||
}
|
||||
|
||||
if ((receive_freq != desired_freq_hz) && (desired_freq_hz != -1)) {
|
||||
TRACE_EVENT_ASYNC_END2("webrtc", "ACM::PlayoutData10Ms", 0,
|
||||
"stereo", stereo_mode, "resample", true);
|
||||
@ -2449,7 +2521,11 @@ int32_t AudioCodingModuleImpl::RegisterVADCallback(
|
||||
return 0;
|
||||
}
|
||||
|
||||
// TODO(turajs): Remove this API if it is not used.
|
||||
// TODO(tlegrand): Modify this function to work for stereo, and add tests.
|
||||
// TODO(turajs): Receive timestamp in this method is incremented by frame-size
|
||||
// and does not reflect the true receive frame-size. Therefore, subsequent
|
||||
// jitter computations are not accurate.
|
||||
int32_t AudioCodingModuleImpl::IncomingPayload(
|
||||
const uint8_t* incoming_payload, const int32_t payload_length,
|
||||
const uint8_t payload_type, const uint32_t timestamp) {
|
||||
@ -2512,8 +2588,10 @@ int32_t AudioCodingModuleImpl::IncomingPayload(
|
||||
// and "received frequency."
|
||||
last_recv_audio_codec_pltype_ = payload_type;
|
||||
|
||||
last_receive_timestamp_ += recv_pl_frame_size_smpls_;
|
||||
// Insert in NetEQ.
|
||||
if (neteq_.RecIn(incoming_payload, payload_length, *dummy_rtp_header_) < 0) {
|
||||
if (neteq_.RecIn(incoming_payload, payload_length, *dummy_rtp_header_,
|
||||
last_receive_timestamp_) < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -2836,6 +2914,7 @@ void AudioCodingModuleImpl::ResetFragmentation(int vector_size) {
|
||||
static_cast<uint16_t>(vector_size);
|
||||
}
|
||||
|
||||
// TODO(turajs): Add second parameter to enable/disable AV-sync.
|
||||
int AudioCodingModuleImpl::SetInitialPlayoutDelay(int delay_ms) {
|
||||
if (delay_ms < 0 || delay_ms > 10000) {
|
||||
return -1;
|
||||
@ -2854,13 +2933,19 @@ int AudioCodingModuleImpl::SetInitialPlayoutDelay(int delay_ms) {
|
||||
}
|
||||
initial_delay_ms_ = delay_ms;
|
||||
track_neteq_buffer_ = true;
|
||||
av_sync_ = true;
|
||||
neteq_.EnableAVSync(av_sync_);
|
||||
return neteq_.SetExtraDelay(delay_ms);
|
||||
}
|
||||
|
||||
bool AudioCodingModuleImpl::GetSilence(int desired_sample_rate_hz,
|
||||
AudioFrame* frame) {
|
||||
CriticalSectionScoped lock(acm_crit_sect_);
|
||||
if (initial_delay_ms_ == 0 || accumulated_audio_ms_ >= initial_delay_ms_) {
|
||||
if (initial_delay_ms_ == 0 || !track_neteq_buffer_) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (accumulated_audio_ms_ >= initial_delay_ms_) {
|
||||
track_neteq_buffer_ = false;
|
||||
return false;
|
||||
}
|
||||
@ -2906,4 +2991,50 @@ bool AudioCodingModuleImpl::GetSilence(int desired_sample_rate_hz,
|
||||
return true;
|
||||
}
|
||||
|
||||
// Must be called within the scope of ACM critical section.
|
||||
int AudioCodingModuleImpl::PushSyncPacketSafe() {
|
||||
assert(av_sync_);
|
||||
last_sequence_number_++;
|
||||
last_incoming_send_timestamp_ += last_timestamp_diff_;
|
||||
last_receive_timestamp_ += last_timestamp_diff_;
|
||||
|
||||
WebRtcRTPHeader rtp_info;
|
||||
rtp_info.header.payloadType = last_recv_audio_codec_pltype_;
|
||||
rtp_info.header.ssrc = last_ssrc_;
|
||||
rtp_info.header.markerBit = false;
|
||||
rtp_info.header.sequenceNumber = last_sequence_number_;
|
||||
rtp_info.header.timestamp = last_incoming_send_timestamp_;
|
||||
rtp_info.type.Audio.channel = stereo_receive_ ? 2 : 1;
|
||||
last_packet_was_sync_ = true;
|
||||
int payload_len_bytes = neteq_.RecIn(rtp_info, last_receive_timestamp_);
|
||||
|
||||
if (payload_len_bytes < 0)
|
||||
return -1;
|
||||
|
||||
// This is to account for sync packets inserted during the buffering phase.
|
||||
if (track_neteq_buffer_)
|
||||
UpdateBufferingSafe(rtp_info, payload_len_bytes);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Must be called within the scope of ACM critical section.
|
||||
void AudioCodingModuleImpl::UpdateBufferingSafe(const WebRtcRTPHeader& rtp_info,
|
||||
int payload_len_bytes) {
|
||||
const int in_sample_rate_khz =
|
||||
(ACMCodecDB::database_[current_receive_codec_idx_].plfreq / 1000);
|
||||
if (first_payload_received_ &&
|
||||
rtp_info.header.timestamp > last_incoming_send_timestamp_) {
|
||||
accumulated_audio_ms_ += (rtp_info.header.timestamp -
|
||||
last_incoming_send_timestamp_) / in_sample_rate_khz;
|
||||
}
|
||||
|
||||
num_packets_accumulated_++;
|
||||
num_bytes_accumulated_ += payload_len_bytes;
|
||||
|
||||
playout_ts_ = static_cast<uint32_t>(
|
||||
rtp_info.header.timestamp - static_cast<uint32_t>(
|
||||
initial_delay_ms_ * in_sample_rate_khz));
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
@ -312,6 +312,18 @@ class AudioCodingModuleImpl : public AudioCodingModule {
|
||||
|
||||
bool GetSilence(int desired_sample_rate_hz, AudioFrame* frame);
|
||||
|
||||
// Push a synchronization packet into NetEq. Such packets result in a frame
|
||||
// of zeros (not decoded by the corresponding decoder). The size of the frame
|
||||
// is the same as last decoding. NetEq has a special payload for this.
|
||||
// Call within the scope of ACM critical section.
|
||||
int PushSyncPacketSafe();
|
||||
|
||||
// Update the parameters required in initial phase of buffering, when
|
||||
// initial playout delay is requested. Call within the scope of ACM critical
|
||||
// section.
|
||||
void UpdateBufferingSafe(const WebRtcRTPHeader& rtp_info,
|
||||
int payload_len_bytes);
|
||||
|
||||
AudioPacketizationCallback* packetization_callback_;
|
||||
int32_t id_;
|
||||
uint32_t last_timestamp_;
|
||||
@ -395,6 +407,17 @@ class AudioCodingModuleImpl : public AudioCodingModule {
|
||||
uint32_t last_incoming_send_timestamp_;
|
||||
bool track_neteq_buffer_;
|
||||
uint32_t playout_ts_;
|
||||
|
||||
// AV-sync is enabled. In AV-sync mode, sync packet pushed during long packet
|
||||
// losses.
|
||||
bool av_sync_;
|
||||
|
||||
// Latest send timestamp difference of two consecutive packets.
|
||||
uint32_t last_timestamp_diff_;
|
||||
uint16_t last_sequence_number_;
|
||||
uint32_t last_ssrc_;
|
||||
bool last_packet_was_sync_;
|
||||
int64_t last_receive_timestamp_;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
Reference in New Issue
Block a user