Remove use of RTPFragmentationHeader from RTPSenderAudio
The RTPFragmentationHeader was used when sending audio using RED for loss protection. This feature has been deprecated and gradually removed. This cl removes remnants of support from the RTP send path. Bug: webrtc:6471 Change-Id: Ia1249047b09c16f79498827f74c2ce07aa38b8f7 Reviewed-on: https://webrtc-review.googlesource.com/16427 Commit-Queue: Niels Moller <nisse@webrtc.org> Reviewed-by: Henrik Lundin <henrik.lundin@webrtc.org> Cr-Commit-Position: refs/heads/master@{#20473}
This commit is contained in:
@ -417,9 +417,11 @@ bool RTPSender::SendOutgoingData(FrameType frame_type,
|
||||
if (audio_configured_) {
|
||||
TRACE_EVENT_ASYNC_STEP1("webrtc", "Audio", rtp_timestamp, "Send", "type",
|
||||
FrameTypeToString(frame_type));
|
||||
|
||||
// The only known way to produce of RTPFragmentationHeader for audio is
|
||||
// to use the AudioCodingModule directly.
|
||||
RTC_DCHECK(fragmentation == nullptr);
|
||||
result = audio_->SendAudio(frame_type, payload_type, rtp_timestamp,
|
||||
payload_data, payload_size, fragmentation);
|
||||
payload_data, payload_size);
|
||||
} else {
|
||||
TRACE_EVENT_ASYNC_STEP1("webrtc", "Video", capture_time_ms,
|
||||
"Send", "type", FrameTypeToString(frame_type));
|
||||
|
||||
@ -119,8 +119,7 @@ bool RTPSenderAudio::SendAudio(FrameType frame_type,
|
||||
int8_t payload_type,
|
||||
uint32_t rtp_timestamp,
|
||||
const uint8_t* payload_data,
|
||||
size_t payload_size,
|
||||
const RTPFragmentationHeader* fragmentation) {
|
||||
size_t payload_size) {
|
||||
// From RFC 4733:
|
||||
// A source has wide latitude as to how often it sends event updates. A
|
||||
// natural interval is the spacing between non-event audio packets. [...]
|
||||
@ -223,21 +222,10 @@ bool RTPSenderAudio::SendAudio(FrameType frame_type,
|
||||
packet->SetExtension<AudioLevel>(frame_type == kAudioFrameSpeech,
|
||||
audio_level_dbov);
|
||||
|
||||
if (fragmentation && fragmentation->fragmentationVectorSize > 0) {
|
||||
// Use the fragment info if we have one.
|
||||
uint8_t* payload =
|
||||
packet->AllocatePayload(1 + fragmentation->fragmentationLength[0]);
|
||||
if (!payload) // Too large payload buffer.
|
||||
return false;
|
||||
payload[0] = fragmentation->fragmentationPlType[0];
|
||||
memcpy(payload + 1, payload_data + fragmentation->fragmentationOffset[0],
|
||||
fragmentation->fragmentationLength[0]);
|
||||
} else {
|
||||
uint8_t* payload = packet->AllocatePayload(payload_size);
|
||||
if (!payload) // Too large payload buffer.
|
||||
return false;
|
||||
memcpy(payload, payload_data, payload_size);
|
||||
}
|
||||
uint8_t* payload = packet->AllocatePayload(payload_size);
|
||||
if (!payload) // Too large payload buffer.
|
||||
return false;
|
||||
memcpy(payload, payload_data, payload_size);
|
||||
|
||||
if (!rtp_sender_->AssignSequenceNumber(packet.get()))
|
||||
return false;
|
||||
|
||||
@ -39,8 +39,7 @@ class RTPSenderAudio {
|
||||
int8_t payload_type,
|
||||
uint32_t capture_timestamp,
|
||||
const uint8_t* payload_data,
|
||||
size_t payload_size,
|
||||
const RTPFragmentationHeader* fragmentation);
|
||||
size_t payload_size);
|
||||
|
||||
// Store the audio level in dBov for
|
||||
// header-extension-for-audio-level-indication.
|
||||
|
||||
Reference in New Issue
Block a user