Moving src/webrtc into src/.

In order to eliminate the WebRTC Subtree mirror in Chromium, 
WebRTC is moving the content of the src/webrtc directory up
to the src/ directory.

NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
TBR=tommi@webrtc.org

Bug: chromium:611808
Change-Id: Iac59c5b51b950f174119565bac87955a7994bc38
Reviewed-on: https://webrtc-review.googlesource.com/1560
Commit-Queue: Mirko Bonadei <mbonadei@webrtc.org>
Reviewed-by: Henrik Kjellander <kjellander@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#19845}
This commit is contained in:
Mirko Bonadei
2017-09-15 06:15:48 +02:00
committed by Commit Bot
parent 6674846b4a
commit bb547203bf
4576 changed files with 1092 additions and 1196 deletions

View File

@ -0,0 +1,3 @@
include_rules = [
"+webrtc/logging/rtc_event_log",
]

View File

@ -0,0 +1,59 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_AUDIO_CHECKSUM_H_
#define WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_AUDIO_CHECKSUM_H_
#include <string>
#include "webrtc/modules/audio_coding/neteq/tools/audio_sink.h"
#include "webrtc/rtc_base/constructormagic.h"
#include "webrtc/rtc_base/md5digest.h"
#include "webrtc/rtc_base/stringencode.h"
#include "webrtc/typedefs.h"
namespace webrtc {
namespace test {
class AudioChecksum : public AudioSink {
public:
AudioChecksum() : finished_(false) {}
bool WriteArray(const int16_t* audio, size_t num_samples) override {
if (finished_)
return false;
#ifndef WEBRTC_ARCH_LITTLE_ENDIAN
#error "Big-endian gives a different checksum"
#endif
checksum_.Update(audio, num_samples * sizeof(*audio));
return true;
}
// Finalizes the computations, and returns the checksum.
std::string Finish() {
if (!finished_) {
finished_ = true;
checksum_.Finish(checksum_result_, rtc::Md5Digest::kSize);
}
return rtc::hex_encode(checksum_result_, rtc::Md5Digest::kSize);
}
private:
rtc::Md5Digest checksum_;
char checksum_result_[rtc::Md5Digest::kSize];
bool finished_;
RTC_DISALLOW_COPY_AND_ASSIGN(AudioChecksum);
};
} // namespace test
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_AUDIO_CHECKSUM_H_

View File

@ -0,0 +1,59 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_coding/neteq/tools/audio_loop.h"
#include <assert.h>
#include <stdio.h>
#include <string.h>
namespace webrtc {
namespace test {
bool AudioLoop::Init(const std::string file_name,
size_t max_loop_length_samples,
size_t block_length_samples) {
FILE* fp = fopen(file_name.c_str(), "rb");
if (!fp) return false;
audio_array_.reset(new int16_t[max_loop_length_samples +
block_length_samples]);
size_t samples_read = fread(audio_array_.get(), sizeof(int16_t),
max_loop_length_samples, fp);
fclose(fp);
// Block length must be shorter than the loop length.
if (block_length_samples > samples_read) return false;
// Add an extra block length of samples to the end of the array, starting
// over again from the beginning of the array. This is done to simplify
// the reading process when reading over the end of the loop.
memcpy(&audio_array_[samples_read], audio_array_.get(),
block_length_samples * sizeof(int16_t));
loop_length_samples_ = samples_read;
block_length_samples_ = block_length_samples;
next_index_ = 0;
return true;
}
rtc::ArrayView<const int16_t> AudioLoop::GetNextBlock() {
// Check that the AudioLoop is initialized.
if (block_length_samples_ == 0)
return rtc::ArrayView<const int16_t>();
const int16_t* output_ptr = &audio_array_[next_index_];
next_index_ = (next_index_ + block_length_samples_) % loop_length_samples_;
return rtc::ArrayView<const int16_t>(output_ptr, block_length_samples_);
}
} // namespace test
} // namespace webrtc

View File

@ -0,0 +1,59 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_AUDIO_LOOP_H_
#define WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_AUDIO_LOOP_H_
#include <memory>
#include <string>
#include "webrtc/api/array_view.h"
#include "webrtc/rtc_base/constructormagic.h"
#include "webrtc/typedefs.h"
namespace webrtc {
namespace test {
// Class serving as an infinite source of audio, realized by looping an audio
// clip.
class AudioLoop {
public:
AudioLoop()
: next_index_(0),
loop_length_samples_(0),
block_length_samples_(0) {
}
virtual ~AudioLoop() {}
// Initializes the AudioLoop by reading from |file_name|. The loop will be no
// longer than |max_loop_length_samples|, if the length of the file is
// greater. Otherwise, the loop length is the same as the file length.
// The audio will be delivered in blocks of |block_length_samples|.
// Returns false if the initialization failed, otherwise true.
bool Init(const std::string file_name, size_t max_loop_length_samples,
size_t block_length_samples);
// Returns a (pointer,size) pair for the next block of audio. The size is
// equal to the |block_length_samples| Init() argument.
rtc::ArrayView<const int16_t> GetNextBlock();
private:
size_t next_index_;
size_t loop_length_samples_;
size_t block_length_samples_;
std::unique_ptr<int16_t[]> audio_array_;
RTC_DISALLOW_COPY_AND_ASSIGN(AudioLoop);
};
} // namespace test
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_AUDIO_LOOP_H_

View File

@ -0,0 +1,26 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_coding/neteq/tools/audio_sink.h"
namespace webrtc {
namespace test {
bool AudioSinkFork::WriteArray(const int16_t* audio, size_t num_samples) {
return left_sink_->WriteArray(audio, num_samples) &&
right_sink_->WriteArray(audio, num_samples);
}
bool VoidAudioSink::WriteArray(const int16_t* audio, size_t num_samples) {
return true;
}
} // namespace test
} // namespace webrtc

View File

@ -0,0 +1,71 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_AUDIO_SINK_H_
#define WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_AUDIO_SINK_H_
#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/rtc_base/constructormagic.h"
#include "webrtc/typedefs.h"
namespace webrtc {
namespace test {
// Interface class for an object receiving raw output audio from test
// applications.
class AudioSink {
public:
AudioSink() {}
virtual ~AudioSink() {}
// Writes |num_samples| from |audio| to the AudioSink. Returns true if
// successful, otherwise false.
virtual bool WriteArray(const int16_t* audio, size_t num_samples) = 0;
// Writes |audio_frame| to the AudioSink. Returns true if successful,
// otherwise false.
bool WriteAudioFrame(const AudioFrame& audio_frame) {
return WriteArray(
audio_frame.data(),
audio_frame.samples_per_channel_ * audio_frame.num_channels_);
}
private:
RTC_DISALLOW_COPY_AND_ASSIGN(AudioSink);
};
// Forks the output audio to two AudioSink objects.
class AudioSinkFork : public AudioSink {
public:
AudioSinkFork(AudioSink* left, AudioSink* right)
: left_sink_(left), right_sink_(right) {}
bool WriteArray(const int16_t* audio, size_t num_samples) override;
private:
AudioSink* left_sink_;
AudioSink* right_sink_;
RTC_DISALLOW_COPY_AND_ASSIGN(AudioSinkFork);
};
// An AudioSink implementation that does nothing.
class VoidAudioSink : public AudioSink {
public:
VoidAudioSink() = default;
bool WriteArray(const int16_t* audio, size_t num_samples) override;
private:
RTC_DISALLOW_COPY_AND_ASSIGN(VoidAudioSink);
};
} // namespace test
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_AUDIO_SINK_H_

View File

@ -0,0 +1,70 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_coding/neteq/tools/constant_pcm_packet_source.h"
#include <algorithm>
#include "webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.h"
#include "webrtc/modules/audio_coding/neteq/tools/packet.h"
#include "webrtc/rtc_base/checks.h"
namespace webrtc {
namespace test {
ConstantPcmPacketSource::ConstantPcmPacketSource(size_t payload_len_samples,
int16_t sample_value,
int sample_rate_hz,
int payload_type)
: payload_len_samples_(payload_len_samples),
packet_len_bytes_(2 * payload_len_samples_ + kHeaderLenBytes),
samples_per_ms_(sample_rate_hz / 1000),
next_arrival_time_ms_(0.0),
payload_type_(payload_type),
seq_number_(0),
timestamp_(0),
payload_ssrc_(0xABCD1234) {
size_t encoded_len = WebRtcPcm16b_Encode(&sample_value, 1, encoded_sample_);
RTC_CHECK_EQ(2U, encoded_len);
}
std::unique_ptr<Packet> ConstantPcmPacketSource::NextPacket() {
RTC_CHECK_GT(packet_len_bytes_, kHeaderLenBytes);
uint8_t* packet_memory = new uint8_t[packet_len_bytes_];
// Fill the payload part of the packet memory with the pre-encoded value.
for (unsigned i = 0; i < 2 * payload_len_samples_; ++i)
packet_memory[kHeaderLenBytes + i] = encoded_sample_[i % 2];
WriteHeader(packet_memory);
// |packet| assumes ownership of |packet_memory|.
std::unique_ptr<Packet> packet(
new Packet(packet_memory, packet_len_bytes_, next_arrival_time_ms_));
next_arrival_time_ms_ += payload_len_samples_ / samples_per_ms_;
return packet;
}
void ConstantPcmPacketSource::WriteHeader(uint8_t* packet_memory) {
packet_memory[0] = 0x80;
packet_memory[1] = static_cast<uint8_t>(payload_type_);
packet_memory[2] = seq_number_ >> 8;
packet_memory[3] = seq_number_ & 0xFF;
packet_memory[4] = timestamp_ >> 24;
packet_memory[5] = (timestamp_ >> 16) & 0xFF;
packet_memory[6] = (timestamp_ >> 8) & 0xFF;
packet_memory[7] = timestamp_ & 0xFF;
packet_memory[8] = payload_ssrc_ >> 24;
packet_memory[9] = (payload_ssrc_ >> 16) & 0xFF;
packet_memory[10] = (payload_ssrc_ >> 8) & 0xFF;
packet_memory[11] = payload_ssrc_ & 0xFF;
++seq_number_;
timestamp_ += static_cast<uint32_t>(payload_len_samples_);
}
} // namespace test
} // namespace webrtc

View File

@ -0,0 +1,55 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_CONSTANT_PCM_PACKET_SOURCE_H_
#define WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_CONSTANT_PCM_PACKET_SOURCE_H_
#include <stdio.h>
#include <string>
#include "webrtc/common_types.h"
#include "webrtc/modules/audio_coding/neteq/tools/packet_source.h"
#include "webrtc/rtc_base/constructormagic.h"
namespace webrtc {
namespace test {
// This class implements a packet source that delivers PCM16b encoded packets
// with a constant sample value. The payload length, constant sample value,
// sample rate, and payload type are all set in the constructor.
class ConstantPcmPacketSource : public PacketSource {
public:
ConstantPcmPacketSource(size_t payload_len_samples,
int16_t sample_value,
int sample_rate_hz,
int payload_type);
std::unique_ptr<Packet> NextPacket() override;
private:
void WriteHeader(uint8_t* packet_memory);
const size_t kHeaderLenBytes = 12;
const size_t payload_len_samples_;
const size_t packet_len_bytes_;
uint8_t encoded_sample_[2];
const int samples_per_ms_;
double next_arrival_time_ms_;
const int payload_type_;
uint16_t seq_number_;
uint32_t timestamp_;
const uint32_t payload_ssrc_;
RTC_DISALLOW_COPY_AND_ASSIGN(ConstantPcmPacketSource);
};
} // namespace test
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_CONSTANT_PCM_PACKET_SOURCE_H_

View File

@ -0,0 +1,88 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_coding/neteq/tools/encode_neteq_input.h"
#include <utility>
#include "webrtc/rtc_base/checks.h"
#include "webrtc/rtc_base/safe_conversions.h"
namespace webrtc {
namespace test {
EncodeNetEqInput::EncodeNetEqInput(std::unique_ptr<Generator> generator,
std::unique_ptr<AudioEncoder> encoder,
int64_t input_duration_ms)
: generator_(std::move(generator)),
encoder_(std::move(encoder)),
input_duration_ms_(input_duration_ms) {
CreatePacket();
}
rtc::Optional<int64_t> EncodeNetEqInput::NextPacketTime() const {
RTC_DCHECK(packet_data_);
return rtc::Optional<int64_t>(static_cast<int64_t>(packet_data_->time_ms));
}
rtc::Optional<int64_t> EncodeNetEqInput::NextOutputEventTime() const {
return rtc::Optional<int64_t>(next_output_event_ms_);
}
std::unique_ptr<NetEqInput::PacketData> EncodeNetEqInput::PopPacket() {
RTC_DCHECK(packet_data_);
// Grab the packet to return...
std::unique_ptr<PacketData> packet_to_return = std::move(packet_data_);
// ... and line up the next packet for future use.
CreatePacket();
return packet_to_return;
}
void EncodeNetEqInput::AdvanceOutputEvent() {
next_output_event_ms_ += kOutputPeriodMs;
}
rtc::Optional<RTPHeader> EncodeNetEqInput::NextHeader() const {
RTC_DCHECK(packet_data_);
return rtc::Optional<RTPHeader>(packet_data_->header);
}
void EncodeNetEqInput::CreatePacket() {
// Create a new PacketData object.
RTC_DCHECK(!packet_data_);
packet_data_.reset(new NetEqInput::PacketData);
RTC_DCHECK_EQ(packet_data_->payload.size(), 0);
// Loop until we get a packet.
AudioEncoder::EncodedInfo info;
RTC_DCHECK(!info.send_even_if_empty);
int num_blocks = 0;
while (packet_data_->payload.size() == 0 && !info.send_even_if_empty) {
const size_t num_samples = rtc::CheckedDivExact(
static_cast<int>(encoder_->SampleRateHz() * kOutputPeriodMs), 1000);
info = encoder_->Encode(rtp_timestamp_, generator_->Generate(num_samples),
&packet_data_->payload);
rtp_timestamp_ += rtc::dchecked_cast<uint32_t>(
num_samples * encoder_->RtpTimestampRateHz() /
encoder_->SampleRateHz());
++num_blocks;
}
packet_data_->header.timestamp = info.encoded_timestamp;
packet_data_->header.payloadType = info.payload_type;
packet_data_->header.sequenceNumber = sequence_number_++;
packet_data_->time_ms = next_packet_time_ms_;
next_packet_time_ms_ += num_blocks * kOutputPeriodMs;
}
} // namespace test
} // namespace webrtc

View File

@ -0,0 +1,71 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_ENCODE_NETEQ_INPUT_H_
#define WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_ENCODE_NETEQ_INPUT_H_
#include <memory>
#include "webrtc/api/audio_codecs/audio_encoder.h"
#include "webrtc/modules/audio_coding/neteq/tools/neteq_input.h"
#include "webrtc/modules/include/module_common_types.h"
namespace webrtc {
namespace test {
// This class provides a NetEqInput that takes audio from a generator object and
// encodes it using a given audio encoder.
class EncodeNetEqInput : public NetEqInput {
public:
// Generator class, to be provided to the EncodeNetEqInput constructor.
class Generator {
public:
virtual ~Generator() = default;
// Returns the next num_samples values from the signal generator.
virtual rtc::ArrayView<const int16_t> Generate(size_t num_samples) = 0;
};
// The source will end after the given input duration.
EncodeNetEqInput(std::unique_ptr<Generator> generator,
std::unique_ptr<AudioEncoder> encoder,
int64_t input_duration_ms);
rtc::Optional<int64_t> NextPacketTime() const override;
rtc::Optional<int64_t> NextOutputEventTime() const override;
std::unique_ptr<PacketData> PopPacket() override;
void AdvanceOutputEvent() override;
bool ended() const override {
return next_output_event_ms_ <= input_duration_ms_;
}
rtc::Optional<RTPHeader> NextHeader() const override;
private:
static constexpr int64_t kOutputPeriodMs = 10;
void CreatePacket();
std::unique_ptr<Generator> generator_;
std::unique_ptr<AudioEncoder> encoder_;
std::unique_ptr<PacketData> packet_data_;
uint32_t rtp_timestamp_ = 0;
int16_t sequence_number_ = 0;
int64_t next_packet_time_ms_ = 0;
int64_t next_output_event_ms_ = 0;
const int64_t input_duration_ms_;
};
} // namespace test
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_ENCODE_NETEQ_INPUT_H_

View File

@ -0,0 +1,103 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_coding/neteq/tools/fake_decode_from_file.h"
#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
#include "webrtc/rtc_base/checks.h"
#include "webrtc/rtc_base/safe_conversions.h"
namespace webrtc {
namespace test {
int FakeDecodeFromFile::DecodeInternal(const uint8_t* encoded,
size_t encoded_len,
int sample_rate_hz,
int16_t* decoded,
SpeechType* speech_type) {
if (encoded_len == 0) {
// Decoder is asked to produce codec-internal comfort noise.
RTC_DCHECK(!encoded); // NetEq always sends nullptr in this case.
RTC_DCHECK(cng_mode_);
RTC_DCHECK_GT(last_decoded_length_, 0);
std::fill_n(decoded, last_decoded_length_, 0);
*speech_type = kComfortNoise;
return rtc::dchecked_cast<int>(last_decoded_length_);
}
RTC_CHECK_GE(encoded_len, 12);
uint32_t timestamp_to_decode =
ByteReader<uint32_t>::ReadLittleEndian(encoded);
uint32_t samples_to_decode =
ByteReader<uint32_t>::ReadLittleEndian(&encoded[4]);
if (samples_to_decode == 0) {
// Number of samples in packet is unknown.
if (last_decoded_length_ > 0) {
// Use length of last decoded packet, but since this is the total for all
// channels, we have to divide by 2 in the stereo case.
samples_to_decode = rtc::dchecked_cast<int>(rtc::CheckedDivExact(
last_decoded_length_, static_cast<size_t>(stereo_ ? 2uL : 1uL)));
} else {
// This is the first packet to decode, and we do not know the length of
// it. Set it to 10 ms.
samples_to_decode = rtc::CheckedDivExact(sample_rate_hz, 100);
}
}
if (next_timestamp_from_input_ &&
timestamp_to_decode != *next_timestamp_from_input_) {
// A gap in the timestamp sequence is detected. Skip the same number of
// samples from the file.
uint32_t jump = timestamp_to_decode - *next_timestamp_from_input_;
RTC_CHECK(input_->Seek(jump));
}
next_timestamp_from_input_ =
rtc::Optional<uint32_t>(timestamp_to_decode + samples_to_decode);
uint32_t original_payload_size_bytes =
ByteReader<uint32_t>::ReadLittleEndian(&encoded[8]);
if (original_payload_size_bytes == 1) {
// This is a comfort noise payload.
RTC_DCHECK_GT(last_decoded_length_, 0);
std::fill_n(decoded, last_decoded_length_, 0);
*speech_type = kComfortNoise;
cng_mode_ = true;
return rtc::dchecked_cast<int>(last_decoded_length_);
}
cng_mode_ = false;
RTC_CHECK(input_->Read(static_cast<size_t>(samples_to_decode), decoded));
if (stereo_) {
InputAudioFile::DuplicateInterleaved(decoded, samples_to_decode, 2,
decoded);
samples_to_decode *= 2;
}
*speech_type = kSpeech;
last_decoded_length_ = samples_to_decode;
return rtc::dchecked_cast<int>(last_decoded_length_);
}
void FakeDecodeFromFile::PrepareEncoded(uint32_t timestamp,
size_t samples,
size_t original_payload_size_bytes,
rtc::ArrayView<uint8_t> encoded) {
RTC_CHECK_GE(encoded.size(), 12);
ByteWriter<uint32_t>::WriteLittleEndian(&encoded[0], timestamp);
ByteWriter<uint32_t>::WriteLittleEndian(&encoded[4],
rtc::checked_cast<uint32_t>(samples));
ByteWriter<uint32_t>::WriteLittleEndian(
&encoded[8], rtc::checked_cast<uint32_t>(original_payload_size_bytes));
}
} // namespace test
} // namespace webrtc

View File

@ -0,0 +1,73 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_FAKE_DECODE_FROM_FILE_H_
#define WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_FAKE_DECODE_FROM_FILE_H_
#include <memory>
#include "webrtc/api/array_view.h"
#include "webrtc/api/audio_codecs/audio_decoder.h"
#include "webrtc/api/optional.h"
#include "webrtc/modules/audio_coding/neteq/tools/input_audio_file.h"
namespace webrtc {
namespace test {
// Provides an AudioDecoder implementation that delivers audio data from a file.
// The "encoded" input should contain information about what RTP timestamp the
// encoding represents, and how many samples the decoder should produce for that
// encoding. A helper method PrepareEncoded is provided to prepare such
// encodings. If packets are missing, as determined from the timestamps, the
// file reading will skip forward to match the loss.
class FakeDecodeFromFile : public AudioDecoder {
public:
FakeDecodeFromFile(std::unique_ptr<InputAudioFile> input,
int sample_rate_hz,
bool stereo)
: input_(std::move(input)),
sample_rate_hz_(sample_rate_hz),
stereo_(stereo) {}
~FakeDecodeFromFile() = default;
void Reset() override {}
int SampleRateHz() const override { return sample_rate_hz_; }
size_t Channels() const override { return stereo_ ? 2 : 1; }
int DecodeInternal(const uint8_t* encoded,
size_t encoded_len,
int sample_rate_hz,
int16_t* decoded,
SpeechType* speech_type) override;
// Helper method. Writes |timestamp|, |samples| and
// |original_payload_size_bytes| to |encoded| in a format that the
// FakeDecodeFromFile decoder will understand. |encoded| must be at least 12
// bytes long.
static void PrepareEncoded(uint32_t timestamp,
size_t samples,
size_t original_payload_size_bytes,
rtc::ArrayView<uint8_t> encoded);
private:
std::unique_ptr<InputAudioFile> input_;
rtc::Optional<uint32_t> next_timestamp_from_input_;
const int sample_rate_hz_;
const bool stereo_;
size_t last_decoded_length_ = 0;
bool cng_mode_ = false;
};
} // namespace test
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_FAKE_DECODE_FROM_FILE_H_

View File

@ -0,0 +1,77 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_coding/neteq/tools/input_audio_file.h"
#include "webrtc/rtc_base/checks.h"
namespace webrtc {
namespace test {
InputAudioFile::InputAudioFile(const std::string file_name) {
fp_ = fopen(file_name.c_str(), "rb");
}
InputAudioFile::~InputAudioFile() { fclose(fp_); }
bool InputAudioFile::Read(size_t samples, int16_t* destination) {
if (!fp_) {
return false;
}
size_t samples_read = fread(destination, sizeof(int16_t), samples, fp_);
if (samples_read < samples) {
// Rewind and read the missing samples.
rewind(fp_);
size_t missing_samples = samples - samples_read;
if (fread(destination + samples_read, sizeof(int16_t), missing_samples,
fp_) < missing_samples) {
// Could not read enough even after rewinding the file.
return false;
}
}
return true;
}
bool InputAudioFile::Seek(int samples) {
if (!fp_) {
return false;
}
// Find file boundaries.
const long current_pos = ftell(fp_);
RTC_CHECK_NE(EOF, current_pos)
<< "Error returned when getting file position.";
RTC_CHECK_EQ(0, fseek(fp_, 0, SEEK_END)); // Move to end of file.
const long file_size = ftell(fp_);
RTC_CHECK_NE(EOF, file_size) << "Error returned when getting file position.";
// Find new position.
long new_pos = current_pos + sizeof(int16_t) * samples; // Samples to bytes.
RTC_CHECK_GE(new_pos, 0)
<< "Trying to move to before the beginning of the file";
new_pos = new_pos % file_size; // Wrap around the end of the file.
// Move to new position relative to the beginning of the file.
RTC_CHECK_EQ(0, fseek(fp_, new_pos, SEEK_SET));
return true;
}
void InputAudioFile::DuplicateInterleaved(const int16_t* source, size_t samples,
size_t channels,
int16_t* destination) {
// Start from the end of |source| and |destination|, and work towards the
// beginning. This is to allow in-place interleaving of the same array (i.e.,
// |source| and |destination| are the same array).
for (int i = static_cast<int>(samples - 1); i >= 0; --i) {
for (int j = static_cast<int>(channels - 1); j >= 0; --j) {
destination[i * channels + j] = source[i];
}
}
}
} // namespace test
} // namespace webrtc

View File

@ -0,0 +1,58 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_INPUT_AUDIO_FILE_H_
#define WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_INPUT_AUDIO_FILE_H_
#include <stdio.h>
#include <string>
#include "webrtc/rtc_base/constructormagic.h"
#include "webrtc/typedefs.h"
namespace webrtc {
namespace test {
// Class for handling a looping input audio file.
class InputAudioFile {
public:
explicit InputAudioFile(const std::string file_name);
virtual ~InputAudioFile();
// Reads |samples| elements from source file to |destination|. Returns true
// if the read was successful, otherwise false. If the file end is reached,
// the file is rewound and reading continues from the beginning.
// The output |destination| must have the capacity to hold |samples| elements.
virtual bool Read(size_t samples, int16_t* destination);
// Fast-forwards (|samples| > 0) or -backwards (|samples| < 0) the file by the
// indicated number of samples. Just like Read(), Seek() starts over at the
// beginning of the file if the end is reached. However, seeking backwards
// past the beginning of the file is not possible.
virtual bool Seek(int samples);
// Creates a multi-channel signal from a mono signal. Each sample is repeated
// |channels| times to create an interleaved multi-channel signal where all
// channels are identical. The output |destination| must have the capacity to
// hold samples * channels elements. Note that |source| and |destination| can
// be the same array (i.e., point to the same address).
static void DuplicateInterleaved(const int16_t* source, size_t samples,
size_t channels, int16_t* destination);
private:
FILE* fp_;
RTC_DISALLOW_COPY_AND_ASSIGN(InputAudioFile);
};
} // namespace test
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_INPUT_AUDIO_FILE_H_

View File

@ -0,0 +1,58 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// Unit tests for test InputAudioFile class.
#include "webrtc/modules/audio_coding/neteq/tools/input_audio_file.h"
#include "webrtc/test/gtest.h"
namespace webrtc {
namespace test {
TEST(TestInputAudioFile, DuplicateInterleaveSeparateSrcDst) {
static const size_t kSamples = 10;
static const size_t kChannels = 2;
int16_t input[kSamples];
for (size_t i = 0; i < kSamples; ++i) {
input[i] = i;
}
int16_t output[kSamples * kChannels];
InputAudioFile::DuplicateInterleaved(input, kSamples, kChannels, output);
// Verify output
int16_t* output_ptr = output;
for (size_t i = 0; i < kSamples; ++i) {
for (size_t j = 0; j < kChannels; ++j) {
EXPECT_EQ(static_cast<int16_t>(i), *output_ptr++);
}
}
}
TEST(TestInputAudioFile, DuplicateInterleaveSameSrcDst) {
static const size_t kSamples = 10;
static const size_t kChannels = 5;
int16_t input[kSamples * kChannels];
for (size_t i = 0; i < kSamples; ++i) {
input[i] = i;
}
InputAudioFile::DuplicateInterleaved(input, kSamples, kChannels, input);
// Verify output
int16_t* output_ptr = input;
for (size_t i = 0; i < kSamples; ++i) {
for (size_t j = 0; j < kChannels; ++j) {
EXPECT_EQ(static_cast<int16_t>(i), *output_ptr++);
}
}
}
} // namespace test
} // namespace webrtc

View File

@ -0,0 +1,257 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_coding/neteq/tools/neteq_delay_analyzer.h"
#include <algorithm>
#include <fstream>
#include <ios>
#include <iterator>
#include <limits>
#include <utility>
#include "webrtc/rtc_base/checks.h"
namespace webrtc {
namespace test {
namespace {
// Helper function for NetEqDelayAnalyzer::CreateGraphs. Returns the
// interpolated value of a function at the point x. Vector x_vec contains the
// sample points, and y_vec contains the function values at these points. The
// return value is a linear interpolation between y_vec values.
double LinearInterpolate(double x,
const std::vector<int64_t>& x_vec,
const std::vector<int64_t>& y_vec) {
// Find first element which is larger than x.
auto it = std::upper_bound(x_vec.begin(), x_vec.end(), x);
if (it == x_vec.end()) {
--it;
}
const size_t upper_ix = it - x_vec.begin();
size_t lower_ix;
if (upper_ix == 0 || x_vec[upper_ix] <= x) {
lower_ix = upper_ix;
} else {
lower_ix = upper_ix - 1;
}
double y;
if (lower_ix == upper_ix) {
y = y_vec[lower_ix];
} else {
RTC_DCHECK_NE(x_vec[lower_ix], x_vec[upper_ix]);
y = (x - x_vec[lower_ix]) * (y_vec[upper_ix] - y_vec[lower_ix]) /
(x_vec[upper_ix] - x_vec[lower_ix]) +
y_vec[lower_ix];
}
return y;
}
} // namespace
void NetEqDelayAnalyzer::AfterInsertPacket(
const test::NetEqInput::PacketData& packet,
NetEq* neteq) {
data_.insert(
std::make_pair(packet.header.timestamp, TimingData(packet.time_ms)));
ssrcs_.insert(packet.header.ssrc);
payload_types_.insert(packet.header.payloadType);
}
void NetEqDelayAnalyzer::BeforeGetAudio(NetEq* neteq) {
last_sync_buffer_ms_ = neteq->SyncBufferSizeMs();
}
void NetEqDelayAnalyzer::AfterGetAudio(int64_t time_now_ms,
const AudioFrame& audio_frame,
bool /*muted*/,
NetEq* neteq) {
get_audio_time_ms_.push_back(time_now_ms);
// Check what timestamps were decoded in the last GetAudio call.
std::vector<uint32_t> dec_ts = neteq->LastDecodedTimestamps();
// Find those timestamps in data_, insert their decoding time and sync
// delay.
for (uint32_t ts : dec_ts) {
auto it = data_.find(ts);
if (it == data_.end()) {
// This is a packet that was split out from another packet. Skip it.
continue;
}
auto& it_timing = it->second;
RTC_CHECK(!it_timing.decode_get_audio_count)
<< "Decode time already written";
it_timing.decode_get_audio_count = rtc::Optional<int64_t>(get_audio_count_);
RTC_CHECK(!it_timing.sync_delay_ms) << "Decode time already written";
it_timing.sync_delay_ms = rtc::Optional<int64_t>(last_sync_buffer_ms_);
it_timing.target_delay_ms = rtc::Optional<int>(neteq->TargetDelayMs());
it_timing.current_delay_ms =
rtc::Optional<int>(neteq->FilteredCurrentDelayMs());
}
last_sample_rate_hz_ = audio_frame.sample_rate_hz_;
++get_audio_count_;
}
void NetEqDelayAnalyzer::CreateGraphs(
std::vector<float>* send_time_s,
std::vector<float>* arrival_delay_ms,
std::vector<float>* corrected_arrival_delay_ms,
std::vector<rtc::Optional<float>>* playout_delay_ms,
std::vector<rtc::Optional<float>>* target_delay_ms) const {
if (get_audio_time_ms_.empty()) {
return;
}
// Create nominal_get_audio_time_ms, a vector starting at
// get_audio_time_ms_[0] and increasing by 10 for each element.
std::vector<int64_t> nominal_get_audio_time_ms(get_audio_time_ms_.size());
nominal_get_audio_time_ms[0] = get_audio_time_ms_[0];
std::transform(
nominal_get_audio_time_ms.begin(), nominal_get_audio_time_ms.end() - 1,
nominal_get_audio_time_ms.begin() + 1, [](int64_t& x) { return x + 10; });
RTC_DCHECK(
std::is_sorted(get_audio_time_ms_.begin(), get_audio_time_ms_.end()));
std::vector<double> rtp_timestamps_ms;
double offset = std::numeric_limits<double>::max();
TimestampUnwrapper unwrapper;
// This loop traverses data_ and populates rtp_timestamps_ms as well as
// calculates the base offset.
for (auto& d : data_) {
rtp_timestamps_ms.push_back(
unwrapper.Unwrap(d.first) /
rtc::CheckedDivExact(last_sample_rate_hz_, 1000));
offset =
std::min(offset, d.second.arrival_time_ms - rtp_timestamps_ms.back());
}
// Calculate send times in seconds for each packet. This is the (unwrapped)
// RTP timestamp in ms divided by 1000.
send_time_s->resize(rtp_timestamps_ms.size());
std::transform(rtp_timestamps_ms.begin(), rtp_timestamps_ms.end(),
send_time_s->begin(), [rtp_timestamps_ms](double x) {
return (x - rtp_timestamps_ms[0]) / 1000.f;
});
RTC_DCHECK_EQ(send_time_s->size(), rtp_timestamps_ms.size());
// This loop traverses the data again and populates the graph vectors. The
// reason to have two loops and traverse twice is that the offset cannot be
// known until the first traversal is done. Meanwhile, the final offset must
// be known already at the start of this second loop.
auto data_it = data_.cbegin();
for (size_t i = 0; i < send_time_s->size(); ++i, ++data_it) {
RTC_DCHECK(data_it != data_.end());
const double offset_send_time_ms = rtp_timestamps_ms[i] + offset;
const auto& timing = data_it->second;
corrected_arrival_delay_ms->push_back(
LinearInterpolate(timing.arrival_time_ms, get_audio_time_ms_,
nominal_get_audio_time_ms) -
offset_send_time_ms);
arrival_delay_ms->push_back(timing.arrival_time_ms - offset_send_time_ms);
if (timing.decode_get_audio_count) {
// This packet was decoded.
RTC_DCHECK(timing.sync_delay_ms);
const float playout_ms = *timing.decode_get_audio_count * 10 +
get_audio_time_ms_[0] + *timing.sync_delay_ms -
offset_send_time_ms;
playout_delay_ms->push_back(rtc::Optional<float>(playout_ms));
RTC_DCHECK(timing.target_delay_ms);
RTC_DCHECK(timing.current_delay_ms);
const float target =
playout_ms - *timing.current_delay_ms + *timing.target_delay_ms;
target_delay_ms->push_back(rtc::Optional<float>(target));
} else {
// This packet was never decoded. Mark target and playout delays as empty.
playout_delay_ms->push_back(rtc::Optional<float>());
target_delay_ms->push_back(rtc::Optional<float>());
}
}
RTC_DCHECK(data_it == data_.end());
RTC_DCHECK_EQ(send_time_s->size(), corrected_arrival_delay_ms->size());
RTC_DCHECK_EQ(send_time_s->size(), playout_delay_ms->size());
RTC_DCHECK_EQ(send_time_s->size(), target_delay_ms->size());
}
void NetEqDelayAnalyzer::CreateMatlabScript(
const std::string& script_name) const {
std::vector<float> send_time_s;
std::vector<float> arrival_delay_ms;
std::vector<float> corrected_arrival_delay_ms;
std::vector<rtc::Optional<float>> playout_delay_ms;
std::vector<rtc::Optional<float>> target_delay_ms;
CreateGraphs(&send_time_s, &arrival_delay_ms, &corrected_arrival_delay_ms,
&playout_delay_ms, &target_delay_ms);
// Create an output file stream to Matlab script file.
std::ofstream output(script_name);
// The iterator is used to batch-output comma-separated values from vectors.
std::ostream_iterator<float> output_iterator(output, ",");
output << "send_time_s = [ ";
std::copy(send_time_s.begin(), send_time_s.end(), output_iterator);
output << "];" << std::endl;
output << "arrival_delay_ms = [ ";
std::copy(arrival_delay_ms.begin(), arrival_delay_ms.end(), output_iterator);
output << "];" << std::endl;
output << "corrected_arrival_delay_ms = [ ";
std::copy(corrected_arrival_delay_ms.begin(),
corrected_arrival_delay_ms.end(), output_iterator);
output << "];" << std::endl;
output << "playout_delay_ms = [ ";
for (const auto& v : playout_delay_ms) {
if (!v) {
output << "nan, ";
} else {
output << *v << ", ";
}
}
output << "];" << std::endl;
output << "target_delay_ms = [ ";
for (const auto& v : target_delay_ms) {
if (!v) {
output << "nan, ";
} else {
output << *v << ", ";
}
}
output << "];" << std::endl;
output << "h=plot(send_time_s, arrival_delay_ms, "
<< "send_time_s, target_delay_ms, 'g.', "
<< "send_time_s, playout_delay_ms);" << std::endl;
output << "set(h(1),'color',0.75*[1 1 1]);" << std::endl;
output << "set(h(2),'markersize',6);" << std::endl;
output << "set(h(3),'linew',1.5);" << std::endl;
output << "ax1=axis;" << std::endl;
output << "axis tight" << std::endl;
output << "ax2=axis;" << std::endl;
output << "axis([ax2(1:3) ax1(4)])" << std::endl;
output << "xlabel('send time [s]');" << std::endl;
output << "ylabel('relative delay [ms]');" << std::endl;
if (!ssrcs_.empty()) {
auto ssrc_it = ssrcs_.cbegin();
output << "title('SSRC: 0x" << std::hex << static_cast<int64_t>(*ssrc_it++);
while (ssrc_it != ssrcs_.end()) {
output << ", 0x" << std::hex << static_cast<int64_t>(*ssrc_it++);
}
output << std::dec;
auto pt_it = payload_types_.cbegin();
output << "; Payload Types: " << *pt_it++;
while (pt_it != payload_types_.end()) {
output << ", " << *pt_it++;
}
output << "');" << std::endl;
}
}
} // namespace test
} // namespace webrtc

View File

@ -0,0 +1,71 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_DELAY_ANALYZER_H_
#define WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_DELAY_ANALYZER_H_
#include <map>
#include <set>
#include <string>
#include <vector>
#include "webrtc/api/optional.h"
#include "webrtc/modules/audio_coding/neteq/tools/neteq_input.h"
#include "webrtc/modules/audio_coding/neteq/tools/neteq_test.h"
#include "webrtc/typedefs.h"
namespace webrtc {
namespace test {
class NetEqDelayAnalyzer : public test::NetEqPostInsertPacket,
public test::NetEqGetAudioCallback {
public:
void AfterInsertPacket(const test::NetEqInput::PacketData& packet,
NetEq* neteq) override;
void BeforeGetAudio(NetEq* neteq) override;
void AfterGetAudio(int64_t time_now_ms,
const AudioFrame& audio_frame,
bool muted,
NetEq* neteq) override;
void CreateGraphs(std::vector<float>* send_times_s,
std::vector<float>* arrival_delay_ms,
std::vector<float>* corrected_arrival_delay_ms,
std::vector<rtc::Optional<float>>* playout_delay_ms,
std::vector<rtc::Optional<float>>* target_delay_ms) const;
// Creates a matlab script with file name script_name. When executed in
// Matlab, the script will generate graphs with the same timing information
// as provided by CreateGraphs.
void CreateMatlabScript(const std::string& script_name) const;
private:
struct TimingData {
explicit TimingData(double at) : arrival_time_ms(at) {}
double arrival_time_ms;
rtc::Optional<int64_t> decode_get_audio_count;
rtc::Optional<int64_t> sync_delay_ms;
rtc::Optional<int> target_delay_ms;
rtc::Optional<int> current_delay_ms;
};
std::map<uint32_t, TimingData> data_;
std::vector<int64_t> get_audio_time_ms_;
size_t get_audio_count_ = 0;
size_t last_sync_buffer_ms_ = 0;
int last_sample_rate_hz_ = 0;
std::set<uint32_t> ssrcs_;
std::set<int> payload_types_;
};
} // namespace test
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_DELAY_ANALYZER_H_

View File

@ -0,0 +1,59 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_coding/neteq/tools/neteq_external_decoder_test.h"
#include "webrtc/api/audio_codecs/builtin_audio_decoder_factory.h"
#include "webrtc/rtc_base/format_macros.h"
#include "webrtc/test/gtest.h"
namespace webrtc {
namespace test {
NetEqExternalDecoderTest::NetEqExternalDecoderTest(NetEqDecoder codec,
int sample_rate_hz,
AudioDecoder* decoder)
: codec_(codec),
decoder_(decoder),
sample_rate_hz_(sample_rate_hz),
channels_(decoder_->Channels()) {
NetEq::Config config;
config.sample_rate_hz = sample_rate_hz_;
neteq_.reset(NetEq::Create(config, CreateBuiltinAudioDecoderFactory()));
}
void NetEqExternalDecoderTest::Init() {
ASSERT_EQ(NetEq::kOK,
neteq_->RegisterExternalDecoder(decoder_, codec_, name_,
kPayloadType));
}
void NetEqExternalDecoderTest::InsertPacket(
RTPHeader rtp_header,
rtc::ArrayView<const uint8_t> payload,
uint32_t receive_timestamp) {
ASSERT_EQ(NetEq::kOK,
neteq_->InsertPacket(rtp_header, payload, receive_timestamp));
}
void NetEqExternalDecoderTest::GetOutputAudio(AudioFrame* output) {
// Get audio from regular instance.
bool muted;
EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(output, &muted));
ASSERT_FALSE(muted);
EXPECT_EQ(channels_, output->num_channels_);
EXPECT_EQ(static_cast<size_t>(kOutputLengthMs * sample_rate_hz_ / 1000),
output->samples_per_channel_);
EXPECT_EQ(sample_rate_hz_, neteq_->last_output_sample_rate_hz());
}
} // namespace test
} // namespace webrtc

View File

@ -0,0 +1,65 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_EXTERNAL_DECODER_TEST_H_
#define WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_EXTERNAL_DECODER_TEST_H_
#include <memory>
#include <string>
#include "webrtc/api/audio_codecs/audio_decoder.h"
#include "webrtc/common_types.h"
#include "webrtc/modules/audio_coding/neteq/include/neteq.h"
#include "webrtc/modules/include/module_common_types.h"
namespace webrtc {
namespace test {
// This test class provides a way run NetEQ with an external decoder.
class NetEqExternalDecoderTest {
protected:
static const uint8_t kPayloadType = 95;
static const int kOutputLengthMs = 10;
// The external decoder |decoder| is suppose to be of type |codec|.
NetEqExternalDecoderTest(NetEqDecoder codec,
int sample_rate_hz,
AudioDecoder* decoder);
virtual ~NetEqExternalDecoderTest() { }
// In Init(), we register the external decoder.
void Init();
// Inserts a new packet with |rtp_header| and |payload| of
// |payload_size_bytes| bytes. The |receive_timestamp| is an indication
// of the time when the packet was received, and should be measured with
// the same tick rate as the RTP timestamp of the current payload.
virtual void InsertPacket(RTPHeader rtp_header,
rtc::ArrayView<const uint8_t> payload,
uint32_t receive_timestamp);
// Get 10 ms of audio data.
void GetOutputAudio(AudioFrame* output);
NetEq* neteq() { return neteq_.get(); }
private:
NetEqDecoder codec_;
std::string name_ = "dummy name";
AudioDecoder* decoder_;
int sample_rate_hz_;
size_t channels_;
std::unique_ptr<NetEq> neteq_;
};
} // namespace test
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_EXTERNAL_DECODER_TEST_H_

View File

@ -0,0 +1,32 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_coding/neteq/tools/neteq_input.h"
#include <sstream>
namespace webrtc {
namespace test {
std::string NetEqInput::PacketData::ToString() const {
std::stringstream ss;
ss << "{"
<< "time_ms: " << static_cast<int64_t>(time_ms) << ", "
<< "header: {"
<< "pt: " << static_cast<int>(header.payloadType) << ", "
<< "sn: " << header.sequenceNumber << ", "
<< "ts: " << header.timestamp << ", "
<< "ssrc: " << header.ssrc << "}, "
<< "payload bytes: " << payload.size() << "}";
return ss.str();
}
} // namespace test
} // namespace webrtc

View File

@ -0,0 +1,83 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_INPUT_H_
#define WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_INPUT_H_
#include <algorithm>
#include <memory>
#include <string>
#include "webrtc/api/optional.h"
#include "webrtc/common_types.h"
#include "webrtc/modules/audio_coding/neteq/tools/packet.h"
#include "webrtc/modules/audio_coding/neteq/tools/packet_source.h"
#include "webrtc/rtc_base/buffer.h"
namespace webrtc {
namespace test {
// Interface class for input to the NetEqTest class.
class NetEqInput {
public:
struct PacketData {
std::string ToString() const;
RTPHeader header;
rtc::Buffer payload;
double time_ms;
};
virtual ~NetEqInput() = default;
// Returns at what time (in ms) NetEq::InsertPacket should be called next, or
// empty if the source is out of packets.
virtual rtc::Optional<int64_t> NextPacketTime() const = 0;
// Returns at what time (in ms) NetEq::GetAudio should be called next, or
// empty if no more output events are available.
virtual rtc::Optional<int64_t> NextOutputEventTime() const = 0;
// Returns the time (in ms) for the next event from either NextPacketTime()
// or NextOutputEventTime(), or empty if both are out of events.
rtc::Optional<int64_t> NextEventTime() const {
const auto a = NextPacketTime();
const auto b = NextOutputEventTime();
// Return the minimum of non-empty |a| and |b|, or empty if both are empty.
if (a) {
return b ? rtc::Optional<int64_t>(std::min(*a, *b)) : a;
}
return b ? b : rtc::Optional<int64_t>();
}
// Returns the next packet to be inserted into NetEq. The packet following the
// returned one is pre-fetched in the NetEqInput object, such that future
// calls to NextPacketTime() or NextHeader() will return information from that
// packet.
virtual std::unique_ptr<PacketData> PopPacket() = 0;
// Move to the next output event. This will make NextOutputEventTime() return
// a new value (potentially the same if several output events share the same
// time).
virtual void AdvanceOutputEvent() = 0;
// Returns true if the source has come to an end. An implementation must
// eventually return true from this method, or the test will end up in an
// infinite loop.
virtual bool ended() const = 0;
// Returns the RTP header for the next packet, i.e., the packet that will be
// delivered next by PopPacket().
virtual rtc::Optional<RTPHeader> NextHeader() const = 0;
};
} // namespace test
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_INPUT_H_

View File

@ -0,0 +1,116 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_coding/neteq/tools/neteq_packet_source_input.h"
#include <algorithm>
#include <limits>
#include "webrtc/modules/audio_coding/neteq/tools/rtc_event_log_source.h"
#include "webrtc/modules/audio_coding/neteq/tools/rtp_file_source.h"
#include "webrtc/rtc_base/checks.h"
namespace webrtc {
namespace test {
NetEqPacketSourceInput::NetEqPacketSourceInput() : next_output_event_ms_(0) {}
rtc::Optional<int64_t> NetEqPacketSourceInput::NextPacketTime() const {
return packet_
? rtc::Optional<int64_t>(static_cast<int64_t>(packet_->time_ms()))
: rtc::Optional<int64_t>();
}
rtc::Optional<RTPHeader> NetEqPacketSourceInput::NextHeader() const {
return packet_ ? rtc::Optional<RTPHeader>(packet_->header())
: rtc::Optional<RTPHeader>();
}
void NetEqPacketSourceInput::LoadNextPacket() {
packet_ = source()->NextPacket();
}
std::unique_ptr<NetEqInput::PacketData> NetEqPacketSourceInput::PopPacket() {
if (!packet_) {
return std::unique_ptr<PacketData>();
}
std::unique_ptr<PacketData> packet_data(new PacketData);
packet_data->header = packet_->header();
if (packet_->payload_length_bytes() == 0 &&
packet_->virtual_payload_length_bytes() > 0) {
// This is a header-only "dummy" packet. Set the payload to all zeros, with
// length according to the virtual length.
packet_data->payload.SetSize(packet_->virtual_payload_length_bytes());
std::fill_n(packet_data->payload.data(), packet_data->payload.size(), 0);
} else {
packet_data->payload.SetData(packet_->payload(),
packet_->payload_length_bytes());
}
packet_data->time_ms = packet_->time_ms();
LoadNextPacket();
return packet_data;
}
NetEqRtpDumpInput::NetEqRtpDumpInput(const std::string& file_name,
const RtpHeaderExtensionMap& hdr_ext_map)
: source_(RtpFileSource::Create(file_name)) {
for (const auto& ext_pair : hdr_ext_map) {
source_->RegisterRtpHeaderExtension(ext_pair.second, ext_pair.first);
}
LoadNextPacket();
}
rtc::Optional<int64_t> NetEqRtpDumpInput::NextOutputEventTime() const {
return next_output_event_ms_;
}
void NetEqRtpDumpInput::AdvanceOutputEvent() {
if (next_output_event_ms_) {
*next_output_event_ms_ += kOutputPeriodMs;
}
if (!NextPacketTime()) {
next_output_event_ms_ = rtc::Optional<int64_t>();
}
}
PacketSource* NetEqRtpDumpInput::source() {
return source_.get();
}
NetEqEventLogInput::NetEqEventLogInput(const std::string& file_name,
const RtpHeaderExtensionMap& hdr_ext_map)
: source_(RtcEventLogSource::Create(file_name)) {
for (const auto& ext_pair : hdr_ext_map) {
source_->RegisterRtpHeaderExtension(ext_pair.second, ext_pair.first);
}
LoadNextPacket();
AdvanceOutputEvent();
}
rtc::Optional<int64_t> NetEqEventLogInput::NextOutputEventTime() const {
return rtc::Optional<int64_t>(next_output_event_ms_);
}
void NetEqEventLogInput::AdvanceOutputEvent() {
next_output_event_ms_ =
rtc::Optional<int64_t>(source_->NextAudioOutputEventMs());
if (*next_output_event_ms_ == std::numeric_limits<int64_t>::max()) {
next_output_event_ms_ = rtc::Optional<int64_t>();
}
}
PacketSource* NetEqEventLogInput::source() {
return source_.get();
}
} // namespace test
} // namespace webrtc

View File

@ -0,0 +1,84 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_PACKET_SOURCE_INPUT_H_
#define WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_PACKET_SOURCE_INPUT_H_
#include <map>
#include <string>
#include "webrtc/modules/audio_coding/neteq/tools/neteq_input.h"
#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
namespace webrtc {
namespace test {
class RtpFileSource;
class RtcEventLogSource;
// An adapter class to dress up a PacketSource object as a NetEqInput.
class NetEqPacketSourceInput : public NetEqInput {
public:
using RtpHeaderExtensionMap = std::map<int, webrtc::RTPExtensionType>;
NetEqPacketSourceInput();
rtc::Optional<int64_t> NextPacketTime() const override;
std::unique_ptr<PacketData> PopPacket() override;
rtc::Optional<RTPHeader> NextHeader() const override;
bool ended() const override { return !next_output_event_ms_; }
protected:
virtual PacketSource* source() = 0;
void LoadNextPacket();
rtc::Optional<int64_t> next_output_event_ms_;
private:
std::unique_ptr<Packet> packet_;
};
// Implementation of NetEqPacketSourceInput to be used with an RtpFileSource.
class NetEqRtpDumpInput final : public NetEqPacketSourceInput {
public:
NetEqRtpDumpInput(const std::string& file_name,
const RtpHeaderExtensionMap& hdr_ext_map);
rtc::Optional<int64_t> NextOutputEventTime() const override;
void AdvanceOutputEvent() override;
protected:
PacketSource* source() override;
private:
static constexpr int64_t kOutputPeriodMs = 10;
std::unique_ptr<RtpFileSource> source_;
};
// Implementation of NetEqPacketSourceInput to be used with an
// RtcEventLogSource.
class NetEqEventLogInput final : public NetEqPacketSourceInput {
public:
NetEqEventLogInput(const std::string& file_name,
const RtpHeaderExtensionMap& hdr_ext_map);
rtc::Optional<int64_t> NextOutputEventTime() const override;
void AdvanceOutputEvent() override;
protected:
PacketSource* source() override;
private:
std::unique_ptr<RtcEventLogSource> source_;
};
} // namespace test
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_PACKET_SOURCE_INPUT_H_

View File

@ -0,0 +1,133 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_coding/neteq/tools/neteq_performance_test.h"
#include "webrtc/api/audio_codecs/builtin_audio_decoder_factory.h"
#include "webrtc/common_types.h"
#include "webrtc/modules/audio_coding/codecs/pcm16b/pcm16b.h"
#include "webrtc/modules/audio_coding/neteq/include/neteq.h"
#include "webrtc/modules/audio_coding/neteq/tools/audio_loop.h"
#include "webrtc/modules/audio_coding/neteq/tools/rtp_generator.h"
#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/rtc_base/checks.h"
#include "webrtc/system_wrappers/include/clock.h"
#include "webrtc/test/testsupport/fileutils.h"
#include "webrtc/typedefs.h"
using webrtc::NetEq;
using webrtc::test::AudioLoop;
using webrtc::test::RtpGenerator;
namespace webrtc {
namespace test {
int64_t NetEqPerformanceTest::Run(int runtime_ms,
int lossrate,
double drift_factor) {
const std::string kInputFileName =
webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm");
const int kSampRateHz = 32000;
const webrtc::NetEqDecoder kDecoderType =
webrtc::NetEqDecoder::kDecoderPCM16Bswb32kHz;
const std::string kDecoderName = "pcm16-swb32";
const int kPayloadType = 95;
// Initialize NetEq instance.
NetEq::Config config;
config.sample_rate_hz = kSampRateHz;
NetEq* neteq = NetEq::Create(config, CreateBuiltinAudioDecoderFactory());
// Register decoder in |neteq|.
if (neteq->RegisterPayloadType(kDecoderType, kDecoderName, kPayloadType) != 0)
return -1;
// Set up AudioLoop object.
AudioLoop audio_loop;
const size_t kMaxLoopLengthSamples = kSampRateHz * 10; // 10 second loop.
const size_t kInputBlockSizeSamples = 60 * kSampRateHz / 1000; // 60 ms.
if (!audio_loop.Init(kInputFileName, kMaxLoopLengthSamples,
kInputBlockSizeSamples))
return -1;
int32_t time_now_ms = 0;
// Get first input packet.
RTPHeader rtp_header;
RtpGenerator rtp_gen(kSampRateHz / 1000);
// Start with positive drift first half of simulation.
rtp_gen.set_drift_factor(drift_factor);
bool drift_flipped = false;
int32_t packet_input_time_ms =
rtp_gen.GetRtpHeader(kPayloadType, kInputBlockSizeSamples, &rtp_header);
auto input_samples = audio_loop.GetNextBlock();
if (input_samples.empty())
exit(1);
uint8_t input_payload[kInputBlockSizeSamples * sizeof(int16_t)];
size_t payload_len = WebRtcPcm16b_Encode(input_samples.data(),
input_samples.size(), input_payload);
RTC_CHECK_EQ(sizeof(input_payload), payload_len);
// Main loop.
webrtc::Clock* clock = webrtc::Clock::GetRealTimeClock();
int64_t start_time_ms = clock->TimeInMilliseconds();
AudioFrame out_frame;
while (time_now_ms < runtime_ms) {
while (packet_input_time_ms <= time_now_ms) {
// Drop every N packets, where N = FLAG_lossrate.
bool lost = false;
if (lossrate > 0) {
lost = ((rtp_header.sequenceNumber - 1) % lossrate) == 0;
}
if (!lost) {
// Insert packet.
int error =
neteq->InsertPacket(rtp_header, input_payload,
packet_input_time_ms * kSampRateHz / 1000);
if (error != NetEq::kOK)
return -1;
}
// Get next packet.
packet_input_time_ms = rtp_gen.GetRtpHeader(kPayloadType,
kInputBlockSizeSamples,
&rtp_header);
input_samples = audio_loop.GetNextBlock();
if (input_samples.empty())
return -1;
payload_len = WebRtcPcm16b_Encode(input_samples.data(),
input_samples.size(), input_payload);
assert(payload_len == kInputBlockSizeSamples * sizeof(int16_t));
}
// Get output audio, but don't do anything with it.
bool muted;
int error = neteq->GetAudio(&out_frame, &muted);
RTC_CHECK(!muted);
if (error != NetEq::kOK)
return -1;
assert(out_frame.samples_per_channel_ ==
static_cast<size_t>(kSampRateHz * 10 / 1000));
static const int kOutputBlockSizeMs = 10;
time_now_ms += kOutputBlockSizeMs;
if (time_now_ms >= runtime_ms / 2 && !drift_flipped) {
// Apply negative drift second half of simulation.
rtp_gen.set_drift_factor(-drift_factor);
drift_flipped = true;
}
}
int64_t end_time_ms = clock->TimeInMilliseconds();
delete neteq;
return end_time_ms - start_time_ms;
}
} // namespace test
} // namespace webrtc

View File

@ -0,0 +1,32 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_PERFORMANCE_TEST_H_
#define WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_PERFORMANCE_TEST_H_
#include "webrtc/typedefs.h"
namespace webrtc {
namespace test {
class NetEqPerformanceTest {
public:
// Runs a performance test with parameters as follows:
// |runtime_ms|: the simulation time, i.e., the duration of the audio data.
// |lossrate|: drop one out of |lossrate| packets, e.g., one out of 10.
// |drift_factor|: clock drift in [0, 1].
// Returns the runtime in ms.
static int64_t Run(int runtime_ms, int lossrate, double drift_factor);
};
} // namespace test
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_PERFORMANCE_TEST_H_

View File

@ -0,0 +1,377 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <math.h>
#include <stdio.h>
#include "webrtc/api/audio_codecs/builtin_audio_decoder_factory.h"
#include "webrtc/modules/audio_coding/neteq/tools/neteq_quality_test.h"
#include "webrtc/modules/audio_coding/neteq/tools/output_audio_file.h"
#include "webrtc/modules/audio_coding/neteq/tools/output_wav_file.h"
#include "webrtc/modules/audio_coding/neteq/tools/resample_input_audio_file.h"
#include "webrtc/rtc_base/checks.h"
#include "webrtc/test/testsupport/fileutils.h"
namespace webrtc {
namespace test {
const uint8_t kPayloadType = 95;
const int kOutputSizeMs = 10;
const int kInitSeed = 0x12345678;
const int kPacketLossTimeUnitMs = 10;
const std::string& DefaultInFilename() {
static const std::string path =
ResourcePath("audio_coding/speech_mono_16kHz", "pcm");
return path;
}
const std::string& DefaultOutFilename() {
static const std::string path = OutputPath() + "neteq_quality_test_out.pcm";
return path;
}
// Common validator for file names.
static bool ValidateFilename(const std::string& value, bool write) {
FILE* fid = write ? fopen(value.c_str(), "wb") : fopen(value.c_str(), "rb");
if (fid == nullptr)
return false;
fclose(fid);
return true;
}
DEFINE_string(in_filename, DefaultInFilename().c_str(),
"Filename for input audio (specify sample rate with --input_sample_rate, "
"and channels with --channels).");
DEFINE_int(input_sample_rate, 16000, "Sample rate of input file in Hz.");
DEFINE_int(channels, 1, "Number of channels in input audio.");
DEFINE_string(out_filename, DefaultOutFilename().c_str(),
"Name of output audio file.");
DEFINE_int(runtime_ms, 10000, "Simulated runtime (milliseconds).");
DEFINE_int(packet_loss_rate, 10, "Percentile of packet loss.");
DEFINE_int(random_loss_mode, 1,
"Random loss mode: 0--no loss, 1--uniform loss, 2--Gilbert Elliot loss.");
DEFINE_int(burst_length, 30,
"Burst length in milliseconds, only valid for Gilbert Elliot loss.");
DEFINE_float(drift_factor, 0.0, "Time drift factor.");
// ProbTrans00Solver() is to calculate the transition probability from no-loss
// state to itself in a modified Gilbert Elliot packet loss model. The result is
// to achieve the target packet loss rate |loss_rate|, when a packet is not
// lost only if all |units| drawings within the duration of the packet result in
// no-loss.
static double ProbTrans00Solver(int units, double loss_rate,
double prob_trans_10) {
if (units == 1)
return prob_trans_10 / (1.0f - loss_rate) - prob_trans_10;
// 0 == prob_trans_00 ^ (units - 1) + (1 - loss_rate) / prob_trans_10 *
// prob_trans_00 - (1 - loss_rate) * (1 + 1 / prob_trans_10).
// There is a unique solution between 0.0 and 1.0, due to the monotonicity and
// an opposite sign at 0.0 and 1.0.
// For simplicity, we reformulate the equation as
// f(x) = x ^ (units - 1) + a x + b.
// Its derivative is
// f'(x) = (units - 1) x ^ (units - 2) + a.
// The derivative is strictly greater than 0 when x is between 0 and 1.
// We use Newton's method to solve the equation, iteration is
// x(k+1) = x(k) - f(x) / f'(x);
const double kPrecision = 0.001f;
const int kIterations = 100;
const double a = (1.0f - loss_rate) / prob_trans_10;
const double b = (loss_rate - 1.0f) * (1.0f + 1.0f / prob_trans_10);
double x = 0.0f; // Starting point;
double f = b;
double f_p;
int iter = 0;
while ((f >= kPrecision || f <= -kPrecision) && iter < kIterations) {
f_p = (units - 1.0f) * pow(x, units - 2) + a;
x -= f / f_p;
if (x > 1.0f) {
x = 1.0f;
} else if (x < 0.0f) {
x = 0.0f;
}
f = pow(x, units - 1) + a * x + b;
iter ++;
}
return x;
}
NetEqQualityTest::NetEqQualityTest(int block_duration_ms,
int in_sampling_khz,
int out_sampling_khz,
NetEqDecoder decoder_type)
: decoder_type_(decoder_type),
channels_(static_cast<size_t>(FLAG_channels)),
decoded_time_ms_(0),
decodable_time_ms_(0),
drift_factor_(FLAG_drift_factor),
packet_loss_rate_(FLAG_packet_loss_rate),
block_duration_ms_(block_duration_ms),
in_sampling_khz_(in_sampling_khz),
out_sampling_khz_(out_sampling_khz),
in_size_samples_(
static_cast<size_t>(in_sampling_khz_ * block_duration_ms_)),
payload_size_bytes_(0),
max_payload_bytes_(0),
in_file_(new ResampleInputAudioFile(FLAG_in_filename,
FLAG_input_sample_rate,
in_sampling_khz * 1000)),
rtp_generator_(
new RtpGenerator(in_sampling_khz_, 0, 0, decodable_time_ms_)),
total_payload_size_bytes_(0) {
// Flag validation
RTC_CHECK(ValidateFilename(FLAG_in_filename, false))
<< "Invalid input filename.";
RTC_CHECK(FLAG_input_sample_rate == 8000 || FLAG_input_sample_rate == 16000 ||
FLAG_input_sample_rate == 32000 || FLAG_input_sample_rate == 48000)
<< "Invalid sample rate should be 8000, 16000, 32000 or 48000 Hz.";
RTC_CHECK_EQ(FLAG_channels, 1)
<< "Invalid number of channels, current support only 1.";
RTC_CHECK(ValidateFilename(FLAG_out_filename, true))
<< "Invalid output filename.";
RTC_CHECK_GT(FLAG_runtime_ms, 0)
<< "Invalid runtime, should be greater than 0.";
RTC_CHECK(FLAG_packet_loss_rate >= 0 && FLAG_packet_loss_rate <= 100)
<< "Invalid packet loss percentile, should be between 0 and 100.";
RTC_CHECK(FLAG_random_loss_mode >= 0 && FLAG_random_loss_mode <= 2)
<< "Invalid random packet loss mode, should be between 0 and 2.";
RTC_CHECK_GE(FLAG_burst_length, kPacketLossTimeUnitMs)
<< "Invalid burst length, should be greater than or equal to "
<< kPacketLossTimeUnitMs << " ms.";
RTC_CHECK_GT(FLAG_drift_factor, -0.1)
<< "Invalid drift factor, should be greater than -0.1.";
const std::string out_filename = FLAG_out_filename;
const std::string log_filename = out_filename + ".log";
log_file_.open(log_filename.c_str(), std::ofstream::out);
RTC_CHECK(log_file_.is_open());
if (out_filename.size() >= 4 &&
out_filename.substr(out_filename.size() - 4) == ".wav") {
// Open a wav file.
output_.reset(
new webrtc::test::OutputWavFile(out_filename, 1000 * out_sampling_khz));
} else {
// Open a pcm file.
output_.reset(new webrtc::test::OutputAudioFile(out_filename));
}
NetEq::Config config;
config.sample_rate_hz = out_sampling_khz_ * 1000;
neteq_.reset(
NetEq::Create(config, webrtc::CreateBuiltinAudioDecoderFactory()));
max_payload_bytes_ = in_size_samples_ * channels_ * sizeof(int16_t);
in_data_.reset(new int16_t[in_size_samples_ * channels_]);
}
NetEqQualityTest::~NetEqQualityTest() {
log_file_.close();
}
bool NoLoss::Lost() {
return false;
}
UniformLoss::UniformLoss(double loss_rate)
: loss_rate_(loss_rate) {
}
bool UniformLoss::Lost() {
int drop_this = rand();
return (drop_this < loss_rate_ * RAND_MAX);
}
GilbertElliotLoss::GilbertElliotLoss(double prob_trans_11, double prob_trans_01)
: prob_trans_11_(prob_trans_11),
prob_trans_01_(prob_trans_01),
lost_last_(false),
uniform_loss_model_(new UniformLoss(0)) {
}
GilbertElliotLoss::~GilbertElliotLoss() {}
bool GilbertElliotLoss::Lost() {
// Simulate bursty channel (Gilbert model).
// (1st order) Markov chain model with memory of the previous/last
// packet state (lost or received).
if (lost_last_) {
// Previous packet was not received.
uniform_loss_model_->set_loss_rate(prob_trans_11_);
return lost_last_ = uniform_loss_model_->Lost();
} else {
uniform_loss_model_->set_loss_rate(prob_trans_01_);
return lost_last_ = uniform_loss_model_->Lost();
}
}
void NetEqQualityTest::SetUp() {
ASSERT_EQ(0,
neteq_->RegisterPayloadType(decoder_type_, "noname", kPayloadType));
rtp_generator_->set_drift_factor(drift_factor_);
int units = block_duration_ms_ / kPacketLossTimeUnitMs;
switch (FLAG_random_loss_mode) {
case 1: {
// |unit_loss_rate| is the packet loss rate for each unit time interval
// (kPacketLossTimeUnitMs). Since a packet loss event is generated if any
// of |block_duration_ms_ / kPacketLossTimeUnitMs| unit time intervals of
// a full packet duration is drawn with a loss, |unit_loss_rate| fulfills
// (1 - unit_loss_rate) ^ (block_duration_ms_ / kPacketLossTimeUnitMs) ==
// 1 - packet_loss_rate.
double unit_loss_rate = (1.0f - pow(1.0f - 0.01f * packet_loss_rate_,
1.0f / units));
loss_model_.reset(new UniformLoss(unit_loss_rate));
break;
}
case 2: {
// |FLAG_burst_length| should be integer times of kPacketLossTimeUnitMs.
ASSERT_EQ(0, FLAG_burst_length % kPacketLossTimeUnitMs);
// We do not allow 100 percent packet loss in Gilbert Elliot model, which
// makes no sense.
ASSERT_GT(100, packet_loss_rate_);
// To guarantee the overall packet loss rate, transition probabilities
// need to satisfy:
// pi_0 * (1 - prob_trans_01_) ^ units +
// pi_1 * prob_trans_10_ ^ (units - 1) == 1 - loss_rate
// pi_0 = prob_trans_10 / (prob_trans_10 + prob_trans_01_)
// is the stationary state probability of no-loss
// pi_1 = prob_trans_01_ / (prob_trans_10 + prob_trans_01_)
// is the stationary state probability of loss
// After a derivation prob_trans_00 should satisfy:
// prob_trans_00 ^ (units - 1) = (loss_rate - 1) / prob_trans_10 *
// prob_trans_00 + (1 - loss_rate) * (1 + 1 / prob_trans_10).
double loss_rate = 0.01f * packet_loss_rate_;
double prob_trans_10 = 1.0f * kPacketLossTimeUnitMs / FLAG_burst_length;
double prob_trans_00 = ProbTrans00Solver(units, loss_rate, prob_trans_10);
loss_model_.reset(new GilbertElliotLoss(1.0f - prob_trans_10,
1.0f - prob_trans_00));
break;
}
default: {
loss_model_.reset(new NoLoss);
break;
}
}
// Make sure that the packet loss profile is same for all derived tests.
srand(kInitSeed);
}
std::ofstream& NetEqQualityTest::Log() {
return log_file_;
}
bool NetEqQualityTest::PacketLost() {
int cycles = block_duration_ms_ / kPacketLossTimeUnitMs;
// The loop is to make sure that codecs with different block lengths share the
// same packet loss profile.
bool lost = false;
for (int idx = 0; idx < cycles; idx ++) {
if (loss_model_->Lost()) {
// The packet will be lost if any of the drawings indicates a loss, but
// the loop has to go on to make sure that codecs with different block
// lengths keep the same pace.
lost = true;
}
}
return lost;
}
int NetEqQualityTest::Transmit() {
int packet_input_time_ms =
rtp_generator_->GetRtpHeader(kPayloadType, in_size_samples_,
&rtp_header_);
Log() << "Packet of size "
<< payload_size_bytes_
<< " bytes, for frame at "
<< packet_input_time_ms
<< " ms ";
if (payload_size_bytes_ > 0) {
if (!PacketLost()) {
int ret = neteq_->InsertPacket(
rtp_header_,
rtc::ArrayView<const uint8_t>(payload_.data(), payload_size_bytes_),
packet_input_time_ms * in_sampling_khz_);
if (ret != NetEq::kOK)
return -1;
Log() << "was sent.";
} else {
Log() << "was lost.";
}
}
Log() << std::endl;
return packet_input_time_ms;
}
int NetEqQualityTest::DecodeBlock() {
bool muted;
int ret = neteq_->GetAudio(&out_frame_, &muted);
RTC_CHECK(!muted);
if (ret != NetEq::kOK) {
return -1;
} else {
RTC_DCHECK_EQ(out_frame_.num_channels_, channels_);
RTC_DCHECK_EQ(out_frame_.samples_per_channel_,
static_cast<size_t>(kOutputSizeMs * out_sampling_khz_));
RTC_CHECK(output_->WriteArray(
out_frame_.data(),
out_frame_.samples_per_channel_ * out_frame_.num_channels_));
return static_cast<int>(out_frame_.samples_per_channel_);
}
}
void NetEqQualityTest::Simulate() {
int audio_size_samples;
while (decoded_time_ms_ < FLAG_runtime_ms) {
// Assume 10 packets in packets buffer.
while (decodable_time_ms_ - 10 * block_duration_ms_ < decoded_time_ms_) {
ASSERT_TRUE(in_file_->Read(in_size_samples_ * channels_, &in_data_[0]));
payload_.Clear();
payload_size_bytes_ = EncodeBlock(&in_data_[0],
in_size_samples_, &payload_,
max_payload_bytes_);
total_payload_size_bytes_ += payload_size_bytes_;
decodable_time_ms_ = Transmit() + block_duration_ms_;
}
audio_size_samples = DecodeBlock();
if (audio_size_samples > 0) {
decoded_time_ms_ += audio_size_samples / out_sampling_khz_;
}
}
Log() << "Average bit rate was "
<< 8.0f * total_payload_size_bytes_ / FLAG_runtime_ms
<< " kbps"
<< std::endl;
}
} // namespace test
} // namespace webrtc

View File

@ -0,0 +1,140 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_QUALITY_TEST_H_
#define WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_QUALITY_TEST_H_
#include <fstream>
#include <memory>
#include "webrtc/common_types.h"
#include "webrtc/modules/audio_coding/neteq/include/neteq.h"
#include "webrtc/modules/audio_coding/neteq/tools/audio_sink.h"
#include "webrtc/modules/audio_coding/neteq/tools/input_audio_file.h"
#include "webrtc/modules/audio_coding/neteq/tools/rtp_generator.h"
#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/rtc_base/flags.h"
#include "webrtc/test/gtest.h"
#include "webrtc/typedefs.h"
namespace webrtc {
namespace test {
class LossModel {
public:
virtual ~LossModel() {};
virtual bool Lost() = 0;
};
class NoLoss : public LossModel {
public:
bool Lost() override;
};
class UniformLoss : public LossModel {
public:
UniformLoss(double loss_rate);
bool Lost() override;
void set_loss_rate(double loss_rate) { loss_rate_ = loss_rate; }
private:
double loss_rate_;
};
class GilbertElliotLoss : public LossModel {
public:
GilbertElliotLoss(double prob_trans_11, double prob_trans_01);
~GilbertElliotLoss() override;
bool Lost() override;
private:
// Prob. of losing current packet, when previous packet is lost.
double prob_trans_11_;
// Prob. of losing current packet, when previous packet is not lost.
double prob_trans_01_;
bool lost_last_;
std::unique_ptr<UniformLoss> uniform_loss_model_;
};
class NetEqQualityTest : public ::testing::Test {
protected:
NetEqQualityTest(int block_duration_ms,
int in_sampling_khz,
int out_sampling_khz,
NetEqDecoder decoder_type);
~NetEqQualityTest() override;
void SetUp() override;
// EncodeBlock(...) does the following:
// 1. encodes a block of audio, saved in |in_data| and has a length of
// |block_size_samples| (samples per channel),
// 2. save the bit stream to |payload| of |max_bytes| bytes in size,
// 3. returns the length of the payload (in bytes),
virtual int EncodeBlock(int16_t* in_data, size_t block_size_samples,
rtc::Buffer* payload, size_t max_bytes) = 0;
// PacketLost(...) determines weather a packet sent at an indicated time gets
// lost or not.
bool PacketLost();
// DecodeBlock() decodes a block of audio using the payload stored in
// |payload_| with the length of |payload_size_bytes_| (bytes). The decoded
// audio is to be stored in |out_data_|.
int DecodeBlock();
// Transmit() uses |rtp_generator_| to generate a packet and passes it to
// |neteq_|.
int Transmit();
// Runs encoding / transmitting / decoding.
void Simulate();
// Write to log file. Usage Log() << ...
std::ofstream& Log();
NetEqDecoder decoder_type_;
const size_t channels_;
private:
int decoded_time_ms_;
int decodable_time_ms_;
double drift_factor_;
int packet_loss_rate_;
const int block_duration_ms_;
const int in_sampling_khz_;
const int out_sampling_khz_;
// Number of samples per channel in a frame.
const size_t in_size_samples_;
size_t payload_size_bytes_;
size_t max_payload_bytes_;
std::unique_ptr<InputAudioFile> in_file_;
std::unique_ptr<AudioSink> output_;
std::ofstream log_file_;
std::unique_ptr<RtpGenerator> rtp_generator_;
std::unique_ptr<NetEq> neteq_;
std::unique_ptr<LossModel> loss_model_;
std::unique_ptr<int16_t[]> in_data_;
rtc::Buffer payload_;
AudioFrame out_frame_;
RTPHeader rtp_header_;
size_t total_payload_size_bytes_;
};
} // namespace test
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_QUALITY_TEST_H_

View File

@ -0,0 +1,109 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_coding/neteq/tools/neteq_replacement_input.h"
#include "webrtc/modules/audio_coding/neteq/tools/fake_decode_from_file.h"
#include "webrtc/rtc_base/checks.h"
namespace webrtc {
namespace test {
NetEqReplacementInput::NetEqReplacementInput(
std::unique_ptr<NetEqInput> source,
uint8_t replacement_payload_type,
const std::set<uint8_t>& comfort_noise_types,
const std::set<uint8_t>& forbidden_types)
: source_(std::move(source)),
replacement_payload_type_(replacement_payload_type),
comfort_noise_types_(comfort_noise_types),
forbidden_types_(forbidden_types) {
RTC_CHECK(source_);
packet_ = source_->PopPacket();
ReplacePacket();
RTC_CHECK(packet_);
}
rtc::Optional<int64_t> NetEqReplacementInput::NextPacketTime() const {
return packet_
? rtc::Optional<int64_t>(static_cast<int64_t>(packet_->time_ms))
: rtc::Optional<int64_t>();
}
rtc::Optional<int64_t> NetEqReplacementInput::NextOutputEventTime() const {
return source_->NextOutputEventTime();
}
std::unique_ptr<NetEqInput::PacketData> NetEqReplacementInput::PopPacket() {
std::unique_ptr<PacketData> to_return = std::move(packet_);
packet_ = source_->PopPacket();
ReplacePacket();
return to_return;
}
void NetEqReplacementInput::AdvanceOutputEvent() {
source_->AdvanceOutputEvent();
}
bool NetEqReplacementInput::ended() const {
return source_->ended();
}
rtc::Optional<RTPHeader> NetEqReplacementInput::NextHeader() const {
return source_->NextHeader();
}
void NetEqReplacementInput::ReplacePacket() {
if (!source_->NextPacketTime()) {
// End of input. Cannot do proper replacement on the very last packet, so we
// delete it instead.
packet_.reset();
return;
}
RTC_DCHECK(packet_);
RTC_CHECK_EQ(forbidden_types_.count(packet_->header.payloadType), 0)
<< "Payload type " << static_cast<int>(packet_->header.payloadType)
<< " is forbidden.";
// Check if this packet is comfort noise.
if (comfort_noise_types_.count(packet_->header.payloadType) != 0) {
// If CNG, simply insert a zero-energy one-byte payload.
uint8_t cng_payload[1] = {127}; // Max attenuation of CNG.
packet_->payload.SetData(cng_payload);
return;
}
rtc::Optional<RTPHeader> next_hdr = source_->NextHeader();
RTC_DCHECK(next_hdr);
uint8_t payload[12];
RTC_DCHECK_LE(last_frame_size_timestamps_, 120 * 48);
uint32_t input_frame_size_timestamps = last_frame_size_timestamps_;
const uint32_t timestamp_diff =
next_hdr->timestamp - packet_->header.timestamp;
if (next_hdr->sequenceNumber == packet_->header.sequenceNumber + 1 &&
timestamp_diff <= 120 * 48) {
// Packets are in order and the timestamp diff is less than 5760 samples.
// Accept the timestamp diff as a valid frame size.
input_frame_size_timestamps = timestamp_diff;
last_frame_size_timestamps_ = input_frame_size_timestamps;
}
RTC_DCHECK_LE(input_frame_size_timestamps, 120 * 48);
FakeDecodeFromFile::PrepareEncoded(packet_->header.timestamp,
input_frame_size_timestamps,
packet_->payload.size(), payload);
packet_->payload.SetData(payload);
packet_->header.payloadType = replacement_payload_type_;
return;
}
} // namespace test
} // namespace webrtc

View File

@ -0,0 +1,51 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_REPLACEMENT_INPUT_H_
#define WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_REPLACEMENT_INPUT_H_
#include <memory>
#include <set>
#include "webrtc/modules/audio_coding/neteq/tools/neteq_input.h"
namespace webrtc {
namespace test {
// This class converts the packets from a NetEqInput to fake encodings to be
// decoded by a FakeDecodeFromFile decoder.
class NetEqReplacementInput : public NetEqInput {
public:
NetEqReplacementInput(std::unique_ptr<NetEqInput> source,
uint8_t replacement_payload_type,
const std::set<uint8_t>& comfort_noise_types,
const std::set<uint8_t>& forbidden_types);
rtc::Optional<int64_t> NextPacketTime() const override;
rtc::Optional<int64_t> NextOutputEventTime() const override;
std::unique_ptr<PacketData> PopPacket() override;
void AdvanceOutputEvent() override;
bool ended() const override;
rtc::Optional<RTPHeader> NextHeader() const override;
private:
void ReplacePacket();
std::unique_ptr<NetEqInput> source_;
const uint8_t replacement_payload_type_;
const std::set<uint8_t> comfort_noise_types_;
const std::set<uint8_t> forbidden_types_;
std::unique_ptr<PacketData> packet_; // The next packet to deliver.
uint32_t last_frame_size_timestamps_ = 960; // Initial guess: 20 ms @ 48 kHz.
};
} // namespace test
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_REPLACEMENT_INPUT_H_

View File

@ -0,0 +1,668 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <errno.h>
#include <inttypes.h>
#include <limits.h> // For ULONG_MAX returned by strtoul.
#include <stdio.h>
#include <stdlib.h> // For strtoul.
#include <string.h>
#include <algorithm>
#include <ios>
#include <iostream>
#include <memory>
#include <numeric>
#include <string>
#include "webrtc/modules/audio_coding/neteq/include/neteq.h"
#include "webrtc/modules/audio_coding/neteq/tools/fake_decode_from_file.h"
#include "webrtc/modules/audio_coding/neteq/tools/input_audio_file.h"
#include "webrtc/modules/audio_coding/neteq/tools/neteq_delay_analyzer.h"
#include "webrtc/modules/audio_coding/neteq/tools/neteq_packet_source_input.h"
#include "webrtc/modules/audio_coding/neteq/tools/neteq_replacement_input.h"
#include "webrtc/modules/audio_coding/neteq/tools/neteq_test.h"
#include "webrtc/modules/audio_coding/neteq/tools/output_audio_file.h"
#include "webrtc/modules/audio_coding/neteq/tools/output_wav_file.h"
#include "webrtc/modules/audio_coding/neteq/tools/rtp_file_source.h"
#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/rtc_base/checks.h"
#include "webrtc/rtc_base/flags.h"
#include "webrtc/test/testsupport/fileutils.h"
#include "webrtc/typedefs.h"
namespace webrtc {
namespace test {
namespace {
// Parses the input string for a valid SSRC (at the start of the string). If a
// valid SSRC is found, it is written to the output variable |ssrc|, and true is
// returned. Otherwise, false is returned.
bool ParseSsrc(const std::string& str, uint32_t* ssrc) {
if (str.empty())
return true;
int base = 10;
// Look for "0x" or "0X" at the start and change base to 16 if found.
if ((str.compare(0, 2, "0x") == 0) || (str.compare(0, 2, "0X") == 0))
base = 16;
errno = 0;
char* end_ptr;
unsigned long value = strtoul(str.c_str(), &end_ptr, base);
if (value == ULONG_MAX && errno == ERANGE)
return false; // Value out of range for unsigned long.
if (sizeof(unsigned long) > sizeof(uint32_t) && value > 0xFFFFFFFF)
return false; // Value out of range for uint32_t.
if (end_ptr - str.c_str() < static_cast<ptrdiff_t>(str.length()))
return false; // Part of the string was not parsed.
*ssrc = static_cast<uint32_t>(value);
return true;
}
// Flag validators.
bool ValidatePayloadType(int value) {
if (value >= 0 && value <= 127) // Value is ok.
return true;
printf("Payload type must be between 0 and 127, not %d\n",
static_cast<int>(value));
return false;
}
bool ValidateSsrcValue(const std::string& str) {
uint32_t dummy_ssrc;
if (ParseSsrc(str, &dummy_ssrc)) // Value is ok.
return true;
printf("Invalid SSRC: %s\n", str.c_str());
return false;
}
static bool ValidateExtensionId(int value) {
if (value > 0 && value <= 255) // Value is ok.
return true;
printf("Extension ID must be between 1 and 255, not %d\n",
static_cast<int>(value));
return false;
}
// Define command line flags.
DEFINE_int(pcmu, 0, "RTP payload type for PCM-u");
DEFINE_int(pcma, 8, "RTP payload type for PCM-a");
DEFINE_int(ilbc, 102, "RTP payload type for iLBC");
DEFINE_int(isac, 103, "RTP payload type for iSAC");
DEFINE_int(isac_swb, 104, "RTP payload type for iSAC-swb (32 kHz)");
DEFINE_int(opus, 111, "RTP payload type for Opus");
DEFINE_int(pcm16b, 93, "RTP payload type for PCM16b-nb (8 kHz)");
DEFINE_int(pcm16b_wb, 94, "RTP payload type for PCM16b-wb (16 kHz)");
DEFINE_int(pcm16b_swb32, 95, "RTP payload type for PCM16b-swb32 (32 kHz)");
DEFINE_int(pcm16b_swb48, 96, "RTP payload type for PCM16b-swb48 (48 kHz)");
DEFINE_int(g722, 9, "RTP payload type for G.722");
DEFINE_int(avt, 106, "RTP payload type for AVT/DTMF (8 kHz)");
DEFINE_int(avt_16, 114, "RTP payload type for AVT/DTMF (16 kHz)");
DEFINE_int(avt_32, 115, "RTP payload type for AVT/DTMF (32 kHz)");
DEFINE_int(avt_48, 116, "RTP payload type for AVT/DTMF (48 kHz)");
DEFINE_int(red, 117, "RTP payload type for redundant audio (RED)");
DEFINE_int(cn_nb, 13, "RTP payload type for comfort noise (8 kHz)");
DEFINE_int(cn_wb, 98, "RTP payload type for comfort noise (16 kHz)");
DEFINE_int(cn_swb32, 99, "RTP payload type for comfort noise (32 kHz)");
DEFINE_int(cn_swb48, 100, "RTP payload type for comfort noise (48 kHz)");
DEFINE_bool(codec_map, false, "Prints the mapping between RTP payload type and "
"codec");
DEFINE_string(replacement_audio_file, "",
"A PCM file that will be used to populate ""dummy"" RTP packets");
DEFINE_string(ssrc,
"",
"Only use packets with this SSRC (decimal or hex, the latter "
"starting with 0x)");
DEFINE_int(audio_level, 1, "Extension ID for audio level (RFC 6464)");
DEFINE_int(abs_send_time, 3, "Extension ID for absolute sender time");
DEFINE_int(transport_seq_no, 5, "Extension ID for transport sequence number");
DEFINE_bool(matlabplot,
false,
"Generates a matlab script for plotting the delay profile");
DEFINE_bool(help, false, "Prints this message");
// Maps a codec type to a printable name string.
std::string CodecName(NetEqDecoder codec) {
switch (codec) {
case NetEqDecoder::kDecoderPCMu:
return "PCM-u";
case NetEqDecoder::kDecoderPCMa:
return "PCM-a";
case NetEqDecoder::kDecoderILBC:
return "iLBC";
case NetEqDecoder::kDecoderISAC:
return "iSAC";
case NetEqDecoder::kDecoderISACswb:
return "iSAC-swb (32 kHz)";
case NetEqDecoder::kDecoderOpus:
return "Opus";
case NetEqDecoder::kDecoderPCM16B:
return "PCM16b-nb (8 kHz)";
case NetEqDecoder::kDecoderPCM16Bwb:
return "PCM16b-wb (16 kHz)";
case NetEqDecoder::kDecoderPCM16Bswb32kHz:
return "PCM16b-swb32 (32 kHz)";
case NetEqDecoder::kDecoderPCM16Bswb48kHz:
return "PCM16b-swb48 (48 kHz)";
case NetEqDecoder::kDecoderG722:
return "G.722";
case NetEqDecoder::kDecoderRED:
return "redundant audio (RED)";
case NetEqDecoder::kDecoderAVT:
return "AVT/DTMF (8 kHz)";
case NetEqDecoder::kDecoderAVT16kHz:
return "AVT/DTMF (16 kHz)";
case NetEqDecoder::kDecoderAVT32kHz:
return "AVT/DTMF (32 kHz)";
case NetEqDecoder::kDecoderAVT48kHz:
return "AVT/DTMF (48 kHz)";
case NetEqDecoder::kDecoderCNGnb:
return "comfort noise (8 kHz)";
case NetEqDecoder::kDecoderCNGwb:
return "comfort noise (16 kHz)";
case NetEqDecoder::kDecoderCNGswb32kHz:
return "comfort noise (32 kHz)";
case NetEqDecoder::kDecoderCNGswb48kHz:
return "comfort noise (48 kHz)";
default:
FATAL();
return "undefined";
}
}
void PrintCodecMappingEntry(NetEqDecoder codec, int flag) {
std::cout << CodecName(codec) << ": " << flag << std::endl;
}
void PrintCodecMapping() {
PrintCodecMappingEntry(NetEqDecoder::kDecoderPCMu, FLAG_pcmu);
PrintCodecMappingEntry(NetEqDecoder::kDecoderPCMa, FLAG_pcma);
PrintCodecMappingEntry(NetEqDecoder::kDecoderILBC, FLAG_ilbc);
PrintCodecMappingEntry(NetEqDecoder::kDecoderISAC, FLAG_isac);
PrintCodecMappingEntry(NetEqDecoder::kDecoderISACswb, FLAG_isac_swb);
PrintCodecMappingEntry(NetEqDecoder::kDecoderOpus, FLAG_opus);
PrintCodecMappingEntry(NetEqDecoder::kDecoderPCM16B, FLAG_pcm16b);
PrintCodecMappingEntry(NetEqDecoder::kDecoderPCM16Bwb, FLAG_pcm16b_wb);
PrintCodecMappingEntry(NetEqDecoder::kDecoderPCM16Bswb32kHz,
FLAG_pcm16b_swb32);
PrintCodecMappingEntry(NetEqDecoder::kDecoderPCM16Bswb48kHz,
FLAG_pcm16b_swb48);
PrintCodecMappingEntry(NetEqDecoder::kDecoderG722, FLAG_g722);
PrintCodecMappingEntry(NetEqDecoder::kDecoderAVT, FLAG_avt);
PrintCodecMappingEntry(NetEqDecoder::kDecoderAVT16kHz, FLAG_avt_16);
PrintCodecMappingEntry(NetEqDecoder::kDecoderAVT32kHz, FLAG_avt_32);
PrintCodecMappingEntry(NetEqDecoder::kDecoderAVT48kHz, FLAG_avt_48);
PrintCodecMappingEntry(NetEqDecoder::kDecoderRED, FLAG_red);
PrintCodecMappingEntry(NetEqDecoder::kDecoderCNGnb, FLAG_cn_nb);
PrintCodecMappingEntry(NetEqDecoder::kDecoderCNGwb, FLAG_cn_wb);
PrintCodecMappingEntry(NetEqDecoder::kDecoderCNGswb32kHz, FLAG_cn_swb32);
PrintCodecMappingEntry(NetEqDecoder::kDecoderCNGswb48kHz, FLAG_cn_swb48);
}
rtc::Optional<int> CodecSampleRate(uint8_t payload_type) {
if (payload_type == FLAG_pcmu || payload_type == FLAG_pcma ||
payload_type == FLAG_ilbc || payload_type == FLAG_pcm16b ||
payload_type == FLAG_cn_nb || payload_type == FLAG_avt)
return rtc::Optional<int>(8000);
if (payload_type == FLAG_isac || payload_type == FLAG_pcm16b_wb ||
payload_type == FLAG_g722 || payload_type == FLAG_cn_wb ||
payload_type == FLAG_avt_16)
return rtc::Optional<int>(16000);
if (payload_type == FLAG_isac_swb || payload_type == FLAG_pcm16b_swb32 ||
payload_type == FLAG_cn_swb32 || payload_type == FLAG_avt_32)
return rtc::Optional<int>(32000);
if (payload_type == FLAG_opus || payload_type == FLAG_pcm16b_swb48 ||
payload_type == FLAG_cn_swb48 || payload_type == FLAG_avt_48)
return rtc::Optional<int>(48000);
if (payload_type == FLAG_red)
return rtc::Optional<int>(0);
return rtc::Optional<int>();
}
// Class to let through only the packets with a given SSRC. Should be used as an
// outer layer on another NetEqInput object.
class FilterSsrcInput : public NetEqInput {
public:
FilterSsrcInput(std::unique_ptr<NetEqInput> source, uint32_t ssrc)
: source_(std::move(source)), ssrc_(ssrc) {
FindNextWithCorrectSsrc();
RTC_CHECK(source_->NextHeader()) << "Found no packet with SSRC = 0x"
<< std::hex << ssrc_;
}
// All methods but PopPacket() simply relay to the |source_| object.
rtc::Optional<int64_t> NextPacketTime() const override {
return source_->NextPacketTime();
}
rtc::Optional<int64_t> NextOutputEventTime() const override {
return source_->NextOutputEventTime();
}
// Returns the next packet, and throws away upcoming packets that do not match
// the desired SSRC.
std::unique_ptr<PacketData> PopPacket() override {
std::unique_ptr<PacketData> packet_to_return = source_->PopPacket();
RTC_DCHECK(!packet_to_return || packet_to_return->header.ssrc == ssrc_);
// Pre-fetch the next packet with correct SSRC. Hence, |source_| will always
// be have a valid packet (or empty if no more packets are available) when
// this method returns.
FindNextWithCorrectSsrc();
return packet_to_return;
}
void AdvanceOutputEvent() override { source_->AdvanceOutputEvent(); }
bool ended() const override { return source_->ended(); }
rtc::Optional<RTPHeader> NextHeader() const override {
return source_->NextHeader();
}
private:
void FindNextWithCorrectSsrc() {
while (source_->NextHeader() && source_->NextHeader()->ssrc != ssrc_) {
source_->PopPacket();
}
}
std::unique_ptr<NetEqInput> source_;
uint32_t ssrc_;
};
// A callback class which prints whenver the inserted packet stream changes
// the SSRC.
class SsrcSwitchDetector : public NetEqPostInsertPacket {
public:
// Takes a pointer to another callback object, which will be invoked after
// this object finishes. This does not transfer ownership, and null is a
// valid value.
explicit SsrcSwitchDetector(NetEqPostInsertPacket* other_callback)
: other_callback_(other_callback) {}
void AfterInsertPacket(const NetEqInput::PacketData& packet,
NetEq* neteq) override {
if (last_ssrc_ && packet.header.ssrc != *last_ssrc_) {
std::cout << "Changing streams from 0x" << std::hex << *last_ssrc_
<< " to 0x" << std::hex << packet.header.ssrc
<< std::dec << " (payload type "
<< static_cast<int>(packet.header.payloadType) << ")"
<< std::endl;
}
last_ssrc_ = rtc::Optional<uint32_t>(packet.header.ssrc);
if (other_callback_) {
other_callback_->AfterInsertPacket(packet, neteq);
}
}
private:
NetEqPostInsertPacket* other_callback_;
rtc::Optional<uint32_t> last_ssrc_;
};
class StatsGetter : public NetEqGetAudioCallback {
public:
// This struct is a replica of webrtc::NetEqNetworkStatistics, but with all
// values stored in double precision.
struct Stats {
double current_buffer_size_ms = 0.0;
double preferred_buffer_size_ms = 0.0;
double jitter_peaks_found = 0.0;
double packet_loss_rate = 0.0;
double expand_rate = 0.0;
double speech_expand_rate = 0.0;
double preemptive_rate = 0.0;
double accelerate_rate = 0.0;
double secondary_decoded_rate = 0.0;
double secondary_discarded_rate = 0.0;
double clockdrift_ppm = 0.0;
double added_zero_samples = 0.0;
double mean_waiting_time_ms = 0.0;
double median_waiting_time_ms = 0.0;
double min_waiting_time_ms = 0.0;
double max_waiting_time_ms = 0.0;
};
// Takes a pointer to another callback object, which will be invoked after
// this object finishes. This does not transfer ownership, and null is a
// valid value.
explicit StatsGetter(NetEqGetAudioCallback* other_callback)
: other_callback_(other_callback) {}
void BeforeGetAudio(NetEq* neteq) override {
if (other_callback_) {
other_callback_->BeforeGetAudio(neteq);
}
}
void AfterGetAudio(int64_t time_now_ms,
const AudioFrame& audio_frame,
bool muted,
NetEq* neteq) override {
if (++counter_ >= 100) {
counter_ = 0;
NetEqNetworkStatistics stats;
RTC_CHECK_EQ(neteq->NetworkStatistics(&stats), 0);
stats_.push_back(stats);
}
if (other_callback_) {
other_callback_->AfterGetAudio(time_now_ms, audio_frame, muted, neteq);
}
}
double AverageSpeechExpandRate() const {
double sum_speech_expand =
std::accumulate(stats_.begin(), stats_.end(), double{0.0},
[](double a, NetEqNetworkStatistics b) {
return a + static_cast<double>(b.speech_expand_rate);
});
return sum_speech_expand / 16384.0 / stats_.size();
}
Stats AverageStats() const {
Stats sum_stats = std::accumulate(
stats_.begin(), stats_.end(), Stats(),
[](Stats a, NetEqNetworkStatistics b) {
a.current_buffer_size_ms += b.current_buffer_size_ms;
a.preferred_buffer_size_ms += b.preferred_buffer_size_ms;
a.jitter_peaks_found += b.jitter_peaks_found;
a.packet_loss_rate += b.packet_loss_rate / 16384.0;
a.expand_rate += b.expand_rate / 16384.0;
a.speech_expand_rate += b.speech_expand_rate / 16384.0;
a.preemptive_rate += b.preemptive_rate / 16384.0;
a.accelerate_rate += b.accelerate_rate / 16384.0;
a.secondary_decoded_rate += b.secondary_decoded_rate / 16384.0;
a.secondary_discarded_rate += b.secondary_discarded_rate / 16384.0;
a.clockdrift_ppm += b.clockdrift_ppm;
a.added_zero_samples += b.added_zero_samples;
a.mean_waiting_time_ms += b.mean_waiting_time_ms;
a.median_waiting_time_ms += b.median_waiting_time_ms;
a.min_waiting_time_ms =
std::min(a.min_waiting_time_ms,
static_cast<double>(b.min_waiting_time_ms));
a.max_waiting_time_ms =
std::max(a.max_waiting_time_ms,
static_cast<double>(b.max_waiting_time_ms));
return a;
});
sum_stats.current_buffer_size_ms /= stats_.size();
sum_stats.preferred_buffer_size_ms /= stats_.size();
sum_stats.jitter_peaks_found /= stats_.size();
sum_stats.packet_loss_rate /= stats_.size();
sum_stats.expand_rate /= stats_.size();
sum_stats.speech_expand_rate /= stats_.size();
sum_stats.preemptive_rate /= stats_.size();
sum_stats.accelerate_rate /= stats_.size();
sum_stats.secondary_decoded_rate /= stats_.size();
sum_stats.secondary_discarded_rate /= stats_.size();
sum_stats.clockdrift_ppm /= stats_.size();
sum_stats.added_zero_samples /= stats_.size();
sum_stats.mean_waiting_time_ms /= stats_.size();
sum_stats.median_waiting_time_ms /= stats_.size();
return sum_stats;
}
private:
NetEqGetAudioCallback* other_callback_;
size_t counter_ = 0;
std::vector<NetEqNetworkStatistics> stats_;
};
int RunTest(int argc, char* argv[]) {
std::string program_name = argv[0];
std::string usage = "Tool for decoding an RTP dump file using NetEq.\n"
"Run " + program_name + " --help for usage.\n"
"Example usage:\n" + program_name +
" input.rtp output.{pcm, wav}\n";
if (rtc::FlagList::SetFlagsFromCommandLine(&argc, argv, true)) {
return 1;
}
if (FLAG_help) {
std::cout << usage;
rtc::FlagList::Print(nullptr, false);
return 0;
}
if (FLAG_codec_map) {
PrintCodecMapping();
}
if (argc != 3) {
if (FLAG_codec_map) {
// We have already printed the codec map. Just end the program.
return 0;
}
// Print usage information.
std::cout << usage;
return 0;
}
RTC_CHECK(ValidatePayloadType(FLAG_pcmu));
RTC_CHECK(ValidatePayloadType(FLAG_pcma));
RTC_CHECK(ValidatePayloadType(FLAG_ilbc));
RTC_CHECK(ValidatePayloadType(FLAG_isac));
RTC_CHECK(ValidatePayloadType(FLAG_isac_swb));
RTC_CHECK(ValidatePayloadType(FLAG_opus));
RTC_CHECK(ValidatePayloadType(FLAG_pcm16b));
RTC_CHECK(ValidatePayloadType(FLAG_pcm16b_wb));
RTC_CHECK(ValidatePayloadType(FLAG_pcm16b_swb32));
RTC_CHECK(ValidatePayloadType(FLAG_pcm16b_swb48));
RTC_CHECK(ValidatePayloadType(FLAG_g722));
RTC_CHECK(ValidatePayloadType(FLAG_avt));
RTC_CHECK(ValidatePayloadType(FLAG_avt_16));
RTC_CHECK(ValidatePayloadType(FLAG_avt_32));
RTC_CHECK(ValidatePayloadType(FLAG_avt_48));
RTC_CHECK(ValidatePayloadType(FLAG_red));
RTC_CHECK(ValidatePayloadType(FLAG_cn_nb));
RTC_CHECK(ValidatePayloadType(FLAG_cn_wb));
RTC_CHECK(ValidatePayloadType(FLAG_cn_swb32));
RTC_CHECK(ValidatePayloadType(FLAG_cn_swb48));
RTC_CHECK(ValidateSsrcValue(FLAG_ssrc));
RTC_CHECK(ValidateExtensionId(FLAG_audio_level));
RTC_CHECK(ValidateExtensionId(FLAG_abs_send_time));
RTC_CHECK(ValidateExtensionId(FLAG_transport_seq_no));
// Gather RTP header extensions in a map.
NetEqPacketSourceInput::RtpHeaderExtensionMap rtp_ext_map = {
{FLAG_audio_level, kRtpExtensionAudioLevel},
{FLAG_abs_send_time, kRtpExtensionAbsoluteSendTime},
{FLAG_transport_seq_no, kRtpExtensionTransportSequenceNumber}};
const std::string input_file_name = argv[1];
std::unique_ptr<NetEqInput> input;
if (RtpFileSource::ValidRtpDump(input_file_name) ||
RtpFileSource::ValidPcap(input_file_name)) {
input.reset(new NetEqRtpDumpInput(input_file_name, rtp_ext_map));
} else {
input.reset(new NetEqEventLogInput(input_file_name, rtp_ext_map));
}
std::cout << "Input file: " << input_file_name << std::endl;
RTC_CHECK(input) << "Cannot open input file";
RTC_CHECK(!input->ended()) << "Input file is empty";
// Check if an SSRC value was provided.
if (strlen(FLAG_ssrc) > 0) {
uint32_t ssrc;
RTC_CHECK(ParseSsrc(FLAG_ssrc, &ssrc)) << "Flag verification has failed.";
input.reset(new FilterSsrcInput(std::move(input), ssrc));
}
// Check the sample rate.
rtc::Optional<int> sample_rate_hz;
std::set<std::pair<int, uint32_t>> discarded_pt_and_ssrc;
while (input->NextHeader()) {
rtc::Optional<RTPHeader> first_rtp_header = input->NextHeader();
RTC_DCHECK(first_rtp_header);
sample_rate_hz = CodecSampleRate(first_rtp_header->payloadType);
if (sample_rate_hz) {
std::cout << "Found valid packet with payload type "
<< static_cast<int>(first_rtp_header->payloadType)
<< " and SSRC 0x" << std::hex << first_rtp_header->ssrc
<< std::dec << std::endl;
break;
}
// Discard this packet and move to the next. Keep track of discarded payload
// types and SSRCs.
discarded_pt_and_ssrc.emplace(first_rtp_header->payloadType,
first_rtp_header->ssrc);
input->PopPacket();
}
if (!discarded_pt_and_ssrc.empty()) {
std::cout << "Discarded initial packets with the following payload types "
"and SSRCs:"
<< std::endl;
for (const auto& d : discarded_pt_and_ssrc) {
std::cout << "PT " << d.first << "; SSRC 0x" << std::hex
<< static_cast<int>(d.second) << std::dec << std::endl;
}
}
if (!sample_rate_hz) {
std::cout << "Cannot find any packets with known payload types"
<< std::endl;
RTC_NOTREACHED();
}
// Open the output file now that we know the sample rate. (Rate is only needed
// for wav files.)
const std::string output_file_name = argv[2];
std::unique_ptr<AudioSink> output;
if (output_file_name.size() >= 4 &&
output_file_name.substr(output_file_name.size() - 4) == ".wav") {
// Open a wav file.
output.reset(new OutputWavFile(output_file_name, *sample_rate_hz));
} else {
// Open a pcm file.
output.reset(new OutputAudioFile(output_file_name));
}
std::cout << "Output file: " << output_file_name << std::endl;
NetEqTest::DecoderMap codecs = {
{FLAG_pcmu, std::make_pair(NetEqDecoder::kDecoderPCMu, "pcmu")},
{FLAG_pcma, std::make_pair(NetEqDecoder::kDecoderPCMa, "pcma")},
{FLAG_ilbc, std::make_pair(NetEqDecoder::kDecoderILBC, "ilbc")},
{FLAG_isac, std::make_pair(NetEqDecoder::kDecoderISAC, "isac")},
{FLAG_isac_swb,
std::make_pair(NetEqDecoder::kDecoderISACswb, "isac-swb")},
{FLAG_opus, std::make_pair(NetEqDecoder::kDecoderOpus, "opus")},
{FLAG_pcm16b, std::make_pair(NetEqDecoder::kDecoderPCM16B, "pcm16-nb")},
{FLAG_pcm16b_wb,
std::make_pair(NetEqDecoder::kDecoderPCM16Bwb, "pcm16-wb")},
{FLAG_pcm16b_swb32,
std::make_pair(NetEqDecoder::kDecoderPCM16Bswb32kHz, "pcm16-swb32")},
{FLAG_pcm16b_swb48,
std::make_pair(NetEqDecoder::kDecoderPCM16Bswb48kHz, "pcm16-swb48")},
{FLAG_g722, std::make_pair(NetEqDecoder::kDecoderG722, "g722")},
{FLAG_avt, std::make_pair(NetEqDecoder::kDecoderAVT, "avt")},
{FLAG_avt_16, std::make_pair(NetEqDecoder::kDecoderAVT16kHz, "avt-16")},
{FLAG_avt_32,
std::make_pair(NetEqDecoder::kDecoderAVT32kHz, "avt-32")},
{FLAG_avt_48,
std::make_pair(NetEqDecoder::kDecoderAVT48kHz, "avt-48")},
{FLAG_red, std::make_pair(NetEqDecoder::kDecoderRED, "red")},
{FLAG_cn_nb, std::make_pair(NetEqDecoder::kDecoderCNGnb, "cng-nb")},
{FLAG_cn_wb, std::make_pair(NetEqDecoder::kDecoderCNGwb, "cng-wb")},
{FLAG_cn_swb32,
std::make_pair(NetEqDecoder::kDecoderCNGswb32kHz, "cng-swb32")},
{FLAG_cn_swb48,
std::make_pair(NetEqDecoder::kDecoderCNGswb48kHz, "cng-swb48")}};
// Check if a replacement audio file was provided.
std::unique_ptr<AudioDecoder> replacement_decoder;
NetEqTest::ExtDecoderMap ext_codecs;
if (strlen(FLAG_replacement_audio_file) > 0) {
// Find largest unused payload type.
int replacement_pt = 127;
while (!(codecs.find(replacement_pt) == codecs.end() &&
ext_codecs.find(replacement_pt) == ext_codecs.end())) {
--replacement_pt;
RTC_CHECK_GE(replacement_pt, 0);
}
auto std_set_int32_to_uint8 = [](const std::set<int32_t>& a) {
std::set<uint8_t> b;
for (auto& x : a) {
b.insert(static_cast<uint8_t>(x));
}
return b;
};
std::set<uint8_t> cn_types = std_set_int32_to_uint8(
{FLAG_cn_nb, FLAG_cn_wb, FLAG_cn_swb32, FLAG_cn_swb48});
std::set<uint8_t> forbidden_types =
std_set_int32_to_uint8({FLAG_g722, FLAG_red, FLAG_avt,
FLAG_avt_16, FLAG_avt_32, FLAG_avt_48});
input.reset(new NetEqReplacementInput(std::move(input), replacement_pt,
cn_types, forbidden_types));
replacement_decoder.reset(new FakeDecodeFromFile(
std::unique_ptr<InputAudioFile>(
new InputAudioFile(FLAG_replacement_audio_file)),
48000, false));
NetEqTest::ExternalDecoderInfo ext_dec_info = {
replacement_decoder.get(), NetEqDecoder::kDecoderArbitrary,
"replacement codec"};
ext_codecs[replacement_pt] = ext_dec_info;
}
NetEqTest::Callbacks callbacks;
std::unique_ptr<NetEqDelayAnalyzer> delay_analyzer;
if (FLAG_matlabplot) {
delay_analyzer.reset(new NetEqDelayAnalyzer);
}
SsrcSwitchDetector ssrc_switch_detector(delay_analyzer.get());
callbacks.post_insert_packet = &ssrc_switch_detector;
StatsGetter stats_getter(delay_analyzer.get());
callbacks.get_audio_callback = &stats_getter;
NetEq::Config config;
config.sample_rate_hz = *sample_rate_hz;
NetEqTest test(config, codecs, ext_codecs, std::move(input),
std::move(output), callbacks);
int64_t test_duration_ms = test.Run();
if (FLAG_matlabplot) {
auto matlab_script_name = output_file_name;
std::replace(matlab_script_name.begin(), matlab_script_name.end(), '.',
'_');
std::cout << "Creating Matlab plot script " << matlab_script_name + ".m"
<< std::endl;
delay_analyzer->CreateMatlabScript(matlab_script_name + ".m");
}
printf("Simulation statistics:\n");
printf(" output duration: %" PRId64 " ms\n", test_duration_ms);
auto stats = stats_getter.AverageStats();
printf(" packet_loss_rate: %f %%\n", 100.0 * stats.packet_loss_rate);
printf(" expand_rate: %f %%\n", 100.0 * stats.expand_rate);
printf(" speech_expand_rate: %f %%\n", 100.0 * stats.speech_expand_rate);
printf(" preemptive_rate: %f %%\n", 100.0 * stats.preemptive_rate);
printf(" accelerate_rate: %f %%\n", 100.0 * stats.accelerate_rate);
printf(" secondary_decoded_rate: %f %%\n",
100.0 * stats.secondary_decoded_rate);
printf(" secondary_discarded_rate: %f %%\n",
100.0 * stats.secondary_discarded_rate);
printf(" clockdrift_ppm: %f ppm\n", stats.clockdrift_ppm);
printf(" mean_waiting_time_ms: %f ms\n", stats.mean_waiting_time_ms);
printf(" median_waiting_time_ms: %f ms\n", stats.median_waiting_time_ms);
printf(" min_waiting_time_ms: %f ms\n", stats.min_waiting_time_ms);
printf(" max_waiting_time_ms: %f ms\n", stats.max_waiting_time_ms);
return 0;
}
} // namespace
} // namespace test
} // namespace webrtc
int main(int argc, char* argv[]) {
webrtc::test::RunTest(argc, argv);
}

View File

@ -0,0 +1,136 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_coding/neteq/tools/neteq_test.h"
#include <iostream>
#include "webrtc/api/audio_codecs/builtin_audio_decoder_factory.h"
namespace webrtc {
namespace test {
void DefaultNetEqTestErrorCallback::OnInsertPacketError(
const NetEqInput::PacketData& packet) {
std::cerr << "InsertPacket returned an error." << std::endl;
std::cerr << "Packet data: " << packet.ToString() << std::endl;
FATAL();
}
void DefaultNetEqTestErrorCallback::OnGetAudioError() {
std::cerr << "GetAudio returned an error." << std::endl;
FATAL();
}
NetEqTest::NetEqTest(const NetEq::Config& config,
const DecoderMap& codecs,
const ExtDecoderMap& ext_codecs,
std::unique_ptr<NetEqInput> input,
std::unique_ptr<AudioSink> output,
Callbacks callbacks)
: neteq_(NetEq::Create(config, CreateBuiltinAudioDecoderFactory())),
input_(std::move(input)),
output_(std::move(output)),
callbacks_(callbacks),
sample_rate_hz_(config.sample_rate_hz) {
RTC_CHECK(!config.enable_muted_state)
<< "The code does not handle enable_muted_state";
RegisterDecoders(codecs);
RegisterExternalDecoders(ext_codecs);
}
int64_t NetEqTest::Run() {
const int64_t start_time_ms = *input_->NextEventTime();
int64_t time_now_ms = start_time_ms;
while (!input_->ended()) {
// Advance time to next event.
RTC_DCHECK(input_->NextEventTime());
time_now_ms = *input_->NextEventTime();
// Check if it is time to insert packet.
if (input_->NextPacketTime() && time_now_ms >= *input_->NextPacketTime()) {
std::unique_ptr<NetEqInput::PacketData> packet_data = input_->PopPacket();
RTC_CHECK(packet_data);
int error = neteq_->InsertPacket(
packet_data->header,
rtc::ArrayView<const uint8_t>(packet_data->payload),
static_cast<uint32_t>(packet_data->time_ms * sample_rate_hz_ / 1000));
if (error != NetEq::kOK && callbacks_.error_callback) {
callbacks_.error_callback->OnInsertPacketError(*packet_data);
}
if (callbacks_.post_insert_packet) {
callbacks_.post_insert_packet->AfterInsertPacket(*packet_data,
neteq_.get());
}
}
// Check if it is time to get output audio.
if (input_->NextOutputEventTime() &&
time_now_ms >= *input_->NextOutputEventTime()) {
if (callbacks_.get_audio_callback) {
callbacks_.get_audio_callback->BeforeGetAudio(neteq_.get());
}
AudioFrame out_frame;
bool muted;
int error = neteq_->GetAudio(&out_frame, &muted);
RTC_CHECK(!muted) << "The code does not handle enable_muted_state";
if (error != NetEq::kOK) {
if (callbacks_.error_callback) {
callbacks_.error_callback->OnGetAudioError();
}
} else {
sample_rate_hz_ = out_frame.sample_rate_hz_;
}
if (callbacks_.get_audio_callback) {
callbacks_.get_audio_callback->AfterGetAudio(time_now_ms, out_frame,
muted, neteq_.get());
}
if (output_) {
RTC_CHECK(output_->WriteArray(
out_frame.data(),
out_frame.samples_per_channel_ * out_frame.num_channels_));
}
input_->AdvanceOutputEvent();
}
}
return time_now_ms - start_time_ms;
}
NetEqNetworkStatistics NetEqTest::SimulationStats() {
NetEqNetworkStatistics stats;
RTC_CHECK_EQ(neteq_->NetworkStatistics(&stats), 0);
return stats;
}
void NetEqTest::RegisterDecoders(const DecoderMap& codecs) {
for (const auto& c : codecs) {
RTC_CHECK_EQ(
neteq_->RegisterPayloadType(c.second.first, c.second.second, c.first),
NetEq::kOK)
<< "Cannot register " << c.second.second << " to payload type "
<< c.first;
}
}
void NetEqTest::RegisterExternalDecoders(const ExtDecoderMap& codecs) {
for (const auto& c : codecs) {
RTC_CHECK_EQ(
neteq_->RegisterExternalDecoder(c.second.decoder, c.second.codec,
c.second.codec_name, c.first),
NetEq::kOK)
<< "Cannot register " << c.second.codec_name << " to payload type "
<< c.first;
}
}
} // namespace test
} // namespace webrtc

View File

@ -0,0 +1,106 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_TEST_H_
#define WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_TEST_H_
#include <map>
#include <memory>
#include <string>
#include <utility>
#include "webrtc/modules/audio_coding/neteq/include/neteq.h"
#include "webrtc/modules/audio_coding/neteq/tools/audio_sink.h"
#include "webrtc/modules/audio_coding/neteq/tools/neteq_input.h"
namespace webrtc {
namespace test {
class NetEqTestErrorCallback {
public:
virtual ~NetEqTestErrorCallback() = default;
virtual void OnInsertPacketError(const NetEqInput::PacketData& packet) {}
virtual void OnGetAudioError() {}
};
class DefaultNetEqTestErrorCallback : public NetEqTestErrorCallback {
void OnInsertPacketError(const NetEqInput::PacketData& packet) override;
void OnGetAudioError() override;
};
class NetEqPostInsertPacket {
public:
virtual ~NetEqPostInsertPacket() = default;
virtual void AfterInsertPacket(const NetEqInput::PacketData& packet,
NetEq* neteq) = 0;
};
class NetEqGetAudioCallback {
public:
virtual ~NetEqGetAudioCallback() = default;
virtual void BeforeGetAudio(NetEq* neteq) = 0;
virtual void AfterGetAudio(int64_t time_now_ms,
const AudioFrame& audio_frame,
bool muted,
NetEq* neteq) = 0;
};
// Class that provides an input--output test for NetEq. The input (both packets
// and output events) is provided by a NetEqInput object, while the output is
// directed to an AudioSink object.
class NetEqTest {
public:
using DecoderMap = std::map<int, std::pair<NetEqDecoder, std::string> >;
struct ExternalDecoderInfo {
AudioDecoder* decoder;
NetEqDecoder codec;
std::string codec_name;
};
using ExtDecoderMap = std::map<int, ExternalDecoderInfo>;
struct Callbacks {
NetEqTestErrorCallback* error_callback = nullptr;
NetEqPostInsertPacket* post_insert_packet = nullptr;
NetEqGetAudioCallback* get_audio_callback = nullptr;
};
// Sets up the test with given configuration, codec mappings, input, ouput,
// and callback objects for error reporting.
NetEqTest(const NetEq::Config& config,
const DecoderMap& codecs,
const ExtDecoderMap& ext_codecs,
std::unique_ptr<NetEqInput> input,
std::unique_ptr<AudioSink> output,
Callbacks callbacks);
~NetEqTest() = default;
// Runs the test. Returns the duration of the produced audio in ms.
int64_t Run();
// Returns the statistics from NetEq.
NetEqNetworkStatistics SimulationStats();
private:
void RegisterDecoders(const DecoderMap& codecs);
void RegisterExternalDecoders(const ExtDecoderMap& codecs);
std::unique_ptr<NetEq> neteq_;
std::unique_ptr<NetEqInput> input_;
std::unique_ptr<AudioSink> output_;
Callbacks callbacks_;
int sample_rate_hz_;
};
} // namespace test
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_NETEQ_TEST_H_

View File

@ -0,0 +1,50 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_OUTPUT_AUDIO_FILE_H_
#define WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_OUTPUT_AUDIO_FILE_H_
#include <assert.h>
#include <stdio.h>
#include <string>
#include "webrtc/modules/audio_coding/neteq/tools/audio_sink.h"
#include "webrtc/rtc_base/constructormagic.h"
namespace webrtc {
namespace test {
class OutputAudioFile : public AudioSink {
public:
// Creates an OutputAudioFile, opening a file named |file_name| for writing.
// The file format is 16-bit signed host-endian PCM.
explicit OutputAudioFile(const std::string& file_name) {
out_file_ = fopen(file_name.c_str(), "wb");
}
virtual ~OutputAudioFile() {
if (out_file_)
fclose(out_file_);
}
bool WriteArray(const int16_t* audio, size_t num_samples) override {
assert(out_file_);
return fwrite(audio, sizeof(*audio), num_samples, out_file_) == num_samples;
}
private:
FILE* out_file_;
RTC_DISALLOW_COPY_AND_ASSIGN(OutputAudioFile);
};
} // namespace test
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_OUTPUT_AUDIO_FILE_H_

View File

@ -0,0 +1,43 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_OUTPUT_WAV_FILE_H_
#define WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_OUTPUT_WAV_FILE_H_
#include <string>
#include "webrtc/common_audio/wav_file.h"
#include "webrtc/modules/audio_coding/neteq/tools/audio_sink.h"
#include "webrtc/rtc_base/constructormagic.h"
namespace webrtc {
namespace test {
class OutputWavFile : public AudioSink {
public:
// Creates an OutputWavFile, opening a file named |file_name| for writing.
// The output file is a PCM encoded wav file.
OutputWavFile(const std::string& file_name, int sample_rate_hz)
: wav_writer_(file_name, sample_rate_hz, 1) {}
bool WriteArray(const int16_t* audio, size_t num_samples) override {
wav_writer_.WriteSamples(audio, num_samples);
return true;
}
private:
WavWriter wav_writer_;
RTC_DISALLOW_COPY_AND_ASSIGN(OutputWavFile);
};
} // namespace test
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_OUTPUT_WAV_FILE_H_

View File

@ -0,0 +1,170 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_coding/neteq/tools/packet.h"
#include <string.h>
#include <memory>
#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/modules/rtp_rtcp/include/rtp_header_parser.h"
#include "webrtc/rtc_base/checks.h"
namespace webrtc {
namespace test {
Packet::Packet(uint8_t* packet_memory,
size_t allocated_bytes,
double time_ms,
const RtpHeaderParser& parser)
: payload_memory_(packet_memory),
payload_(NULL),
packet_length_bytes_(allocated_bytes),
payload_length_bytes_(0),
virtual_packet_length_bytes_(allocated_bytes),
virtual_payload_length_bytes_(0),
time_ms_(time_ms) {
valid_header_ = ParseHeader(parser);
}
Packet::Packet(uint8_t* packet_memory,
size_t allocated_bytes,
size_t virtual_packet_length_bytes,
double time_ms,
const RtpHeaderParser& parser)
: payload_memory_(packet_memory),
payload_(NULL),
packet_length_bytes_(allocated_bytes),
payload_length_bytes_(0),
virtual_packet_length_bytes_(virtual_packet_length_bytes),
virtual_payload_length_bytes_(0),
time_ms_(time_ms) {
valid_header_ = ParseHeader(parser);
}
Packet::Packet(uint8_t* packet_memory, size_t allocated_bytes, double time_ms)
: payload_memory_(packet_memory),
payload_(NULL),
packet_length_bytes_(allocated_bytes),
payload_length_bytes_(0),
virtual_packet_length_bytes_(allocated_bytes),
virtual_payload_length_bytes_(0),
time_ms_(time_ms) {
std::unique_ptr<RtpHeaderParser> parser(RtpHeaderParser::Create());
valid_header_ = ParseHeader(*parser);
}
Packet::Packet(uint8_t* packet_memory,
size_t allocated_bytes,
size_t virtual_packet_length_bytes,
double time_ms)
: payload_memory_(packet_memory),
payload_(NULL),
packet_length_bytes_(allocated_bytes),
payload_length_bytes_(0),
virtual_packet_length_bytes_(virtual_packet_length_bytes),
virtual_payload_length_bytes_(0),
time_ms_(time_ms) {
std::unique_ptr<RtpHeaderParser> parser(RtpHeaderParser::Create());
valid_header_ = ParseHeader(*parser);
}
Packet::~Packet() = default;
bool Packet::ExtractRedHeaders(std::list<RTPHeader*>* headers) const {
//
// 0 1 2 3
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// |1| block PT | timestamp offset | block length |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// |1| ... |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// |0| block PT |
// +-+-+-+-+-+-+-+-+
//
assert(payload_);
const uint8_t* payload_ptr = payload_;
const uint8_t* payload_end_ptr = payload_ptr + payload_length_bytes_;
// Find all RED headers with the extension bit set to 1. That is, all headers
// but the last one.
while ((payload_ptr < payload_end_ptr) && (*payload_ptr & 0x80)) {
RTPHeader* header = new RTPHeader;
CopyToHeader(header);
header->payloadType = payload_ptr[0] & 0x7F;
uint32_t offset = (payload_ptr[1] << 6) + ((payload_ptr[2] & 0xFC) >> 2);
header->timestamp -= offset;
headers->push_front(header);
payload_ptr += 4;
}
// Last header.
assert(payload_ptr < payload_end_ptr);
if (payload_ptr >= payload_end_ptr) {
return false; // Payload too short.
}
RTPHeader* header = new RTPHeader;
CopyToHeader(header);
header->payloadType = payload_ptr[0] & 0x7F;
headers->push_front(header);
return true;
}
void Packet::DeleteRedHeaders(std::list<RTPHeader*>* headers) {
while (!headers->empty()) {
delete headers->front();
headers->pop_front();
}
}
bool Packet::ParseHeader(const RtpHeaderParser& parser) {
bool valid_header = parser.Parse(
payload_memory_.get(), static_cast<int>(packet_length_bytes_), &header_);
// Special case for dummy packets that have padding marked in the RTP header.
// This causes the RTP header parser to report failure, but is fine in this
// context.
const bool header_only_with_padding =
(header_.headerLength == packet_length_bytes_ &&
header_.paddingLength > 0);
if (!valid_header && !header_only_with_padding) {
return false;
}
assert(header_.headerLength <= packet_length_bytes_);
payload_ = &payload_memory_[header_.headerLength];
assert(packet_length_bytes_ >= header_.headerLength);
payload_length_bytes_ = packet_length_bytes_ - header_.headerLength;
RTC_CHECK_GE(virtual_packet_length_bytes_, packet_length_bytes_);
assert(virtual_packet_length_bytes_ >= header_.headerLength);
virtual_payload_length_bytes_ =
virtual_packet_length_bytes_ - header_.headerLength;
return true;
}
void Packet::CopyToHeader(RTPHeader* destination) const {
destination->markerBit = header_.markerBit;
destination->payloadType = header_.payloadType;
destination->sequenceNumber = header_.sequenceNumber;
destination->timestamp = header_.timestamp;
destination->ssrc = header_.ssrc;
destination->numCSRCs = header_.numCSRCs;
destination->paddingLength = header_.paddingLength;
destination->headerLength = header_.headerLength;
destination->payload_type_frequency = header_.payload_type_frequency;
memcpy(&destination->arrOfCSRCs,
&header_.arrOfCSRCs,
sizeof(header_.arrOfCSRCs));
memcpy(
&destination->extension, &header_.extension, sizeof(header_.extension));
}
} // namespace test
} // namespace webrtc

View File

@ -0,0 +1,117 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_PACKET_H_
#define WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_PACKET_H_
#include <list>
#include <memory>
#include "webrtc/common_types.h"
#include "webrtc/rtc_base/constructormagic.h"
#include "webrtc/typedefs.h"
namespace webrtc {
class RtpHeaderParser;
namespace test {
// Class for handling RTP packets in test applications.
class Packet {
public:
// Creates a packet, with the packet payload (including header bytes) in
// |packet_memory|. The length of |packet_memory| is |allocated_bytes|.
// The new object assumes ownership of |packet_memory| and will delete it
// when the Packet object is deleted. The |time_ms| is an extra time
// associated with this packet, typically used to denote arrival time.
// The first bytes in |packet_memory| will be parsed using |parser|.
Packet(uint8_t* packet_memory,
size_t allocated_bytes,
double time_ms,
const RtpHeaderParser& parser);
// Same as above, but with the extra argument |virtual_packet_length_bytes|.
// This is typically used when reading RTP dump files that only contain the
// RTP headers, and no payload (a.k.a RTP dummy files or RTP light). The
// |virtual_packet_length_bytes| tells what size the packet had on wire,
// including the now discarded payload, whereas |allocated_bytes| is the
// length of the remaining payload (typically only the RTP header).
Packet(uint8_t* packet_memory,
size_t allocated_bytes,
size_t virtual_packet_length_bytes,
double time_ms,
const RtpHeaderParser& parser);
// The following two constructors are the same as above, but without a
// parser. Note that when the object is constructed using any of these
// methods, the header will be parsed using a default RtpHeaderParser object.
// In particular, RTP header extensions won't be parsed.
Packet(uint8_t* packet_memory, size_t allocated_bytes, double time_ms);
Packet(uint8_t* packet_memory,
size_t allocated_bytes,
size_t virtual_packet_length_bytes,
double time_ms);
virtual ~Packet();
// Parses the first bytes of the RTP payload, interpreting them as RED headers
// according to RFC 2198. The headers will be inserted into |headers|. The
// caller of the method assumes ownership of the objects in the list, and
// must delete them properly.
bool ExtractRedHeaders(std::list<RTPHeader*>* headers) const;
// Deletes all RTPHeader objects in |headers|, but does not delete |headers|
// itself.
static void DeleteRedHeaders(std::list<RTPHeader*>* headers);
const uint8_t* payload() const { return payload_; }
size_t packet_length_bytes() const { return packet_length_bytes_; }
size_t payload_length_bytes() const { return payload_length_bytes_; }
size_t virtual_packet_length_bytes() const {
return virtual_packet_length_bytes_;
}
size_t virtual_payload_length_bytes() const {
return virtual_payload_length_bytes_;
}
const RTPHeader& header() const { return header_; }
void set_time_ms(double time) { time_ms_ = time; }
double time_ms() const { return time_ms_; }
bool valid_header() const { return valid_header_; }
private:
bool ParseHeader(const RtpHeaderParser& parser);
void CopyToHeader(RTPHeader* destination) const;
RTPHeader header_;
std::unique_ptr<uint8_t[]> payload_memory_;
const uint8_t* payload_; // First byte after header.
const size_t packet_length_bytes_; // Total length of packet.
size_t payload_length_bytes_; // Length of the payload, after RTP header.
// Zero for dummy RTP packets.
// Virtual lengths are used when parsing RTP header files (dummy RTP files).
const size_t virtual_packet_length_bytes_;
size_t virtual_payload_length_bytes_;
double time_ms_; // Used to denote a packet's arrival time.
bool valid_header_; // Set by the RtpHeaderParser.
RTC_DISALLOW_COPY_AND_ASSIGN(Packet);
};
} // namespace test
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_PACKET_H_

View File

@ -0,0 +1,30 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_coding/neteq/tools/packet_source.h"
namespace webrtc {
namespace test {
PacketSource::PacketSource() : use_ssrc_filter_(false), ssrc_(0) {}
PacketSource::~PacketSource() = default;
void PacketSource::FilterOutPayloadType(uint8_t payload_type) {
filter_.set(payload_type, true);
}
void PacketSource::SelectSsrc(uint32_t ssrc) {
use_ssrc_filter_ = true;
ssrc_ = ssrc;
}
} // namespace test
} // namespace webrtc

View File

@ -0,0 +1,50 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_PACKET_SOURCE_H_
#define WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_PACKET_SOURCE_H_
#include <bitset>
#include <memory>
#include "webrtc/modules/audio_coding/neteq/tools/packet.h"
#include "webrtc/rtc_base/constructormagic.h"
#include "webrtc/typedefs.h"
namespace webrtc {
namespace test {
// Interface class for an object delivering RTP packets to test applications.
class PacketSource {
public:
PacketSource();
virtual ~PacketSource();
// Returns next packet. Returns nullptr if the source is depleted, or if an
// error occurred.
virtual std::unique_ptr<Packet> NextPacket() = 0;
virtual void FilterOutPayloadType(uint8_t payload_type);
virtual void SelectSsrc(uint32_t ssrc);
protected:
std::bitset<128> filter_; // Payload type is 7 bits in the RFC.
// If SSRC filtering discards all packet that do not match the SSRC.
bool use_ssrc_filter_; // True when SSRC filtering is active.
uint32_t ssrc_; // The selected SSRC. All other SSRCs will be discarded.
private:
RTC_DISALLOW_COPY_AND_ASSIGN(PacketSource);
};
} // namespace test
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_PACKET_SOURCE_H_

View File

@ -0,0 +1,202 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// Unit tests for test Packet class.
#include "webrtc/modules/audio_coding/neteq/tools/packet.h"
#include "webrtc/test/gtest.h"
namespace webrtc {
namespace test {
namespace {
const int kHeaderLengthBytes = 12;
void MakeRtpHeader(int payload_type,
int seq_number,
uint32_t timestamp,
uint32_t ssrc,
uint8_t* rtp_data) {
rtp_data[0] = 0x80;
rtp_data[1] = static_cast<uint8_t>(payload_type);
rtp_data[2] = (seq_number >> 8) & 0xFF;
rtp_data[3] = (seq_number) & 0xFF;
rtp_data[4] = timestamp >> 24;
rtp_data[5] = (timestamp >> 16) & 0xFF;
rtp_data[6] = (timestamp >> 8) & 0xFF;
rtp_data[7] = timestamp & 0xFF;
rtp_data[8] = ssrc >> 24;
rtp_data[9] = (ssrc >> 16) & 0xFF;
rtp_data[10] = (ssrc >> 8) & 0xFF;
rtp_data[11] = ssrc & 0xFF;
}
} // namespace
TEST(TestPacket, RegularPacket) {
const size_t kPacketLengthBytes = 100;
uint8_t* packet_memory = new uint8_t[kPacketLengthBytes];
const uint8_t kPayloadType = 17;
const uint16_t kSequenceNumber = 4711;
const uint32_t kTimestamp = 47114711;
const uint32_t kSsrc = 0x12345678;
MakeRtpHeader(
kPayloadType, kSequenceNumber, kTimestamp, kSsrc, packet_memory);
const double kPacketTime = 1.0;
// Hand over ownership of |packet_memory| to |packet|.
Packet packet(packet_memory, kPacketLengthBytes, kPacketTime);
ASSERT_TRUE(packet.valid_header());
EXPECT_EQ(kPayloadType, packet.header().payloadType);
EXPECT_EQ(kSequenceNumber, packet.header().sequenceNumber);
EXPECT_EQ(kTimestamp, packet.header().timestamp);
EXPECT_EQ(kSsrc, packet.header().ssrc);
EXPECT_EQ(0, packet.header().numCSRCs);
EXPECT_EQ(kPacketLengthBytes, packet.packet_length_bytes());
EXPECT_EQ(kPacketLengthBytes - kHeaderLengthBytes,
packet.payload_length_bytes());
EXPECT_EQ(kPacketLengthBytes, packet.virtual_packet_length_bytes());
EXPECT_EQ(kPacketLengthBytes - kHeaderLengthBytes,
packet.virtual_payload_length_bytes());
EXPECT_EQ(kPacketTime, packet.time_ms());
}
TEST(TestPacket, DummyPacket) {
const size_t kPacketLengthBytes = kHeaderLengthBytes; // Only RTP header.
const size_t kVirtualPacketLengthBytes = 100;
uint8_t* packet_memory = new uint8_t[kPacketLengthBytes];
const uint8_t kPayloadType = 17;
const uint16_t kSequenceNumber = 4711;
const uint32_t kTimestamp = 47114711;
const uint32_t kSsrc = 0x12345678;
MakeRtpHeader(
kPayloadType, kSequenceNumber, kTimestamp, kSsrc, packet_memory);
const double kPacketTime = 1.0;
// Hand over ownership of |packet_memory| to |packet|.
Packet packet(packet_memory,
kPacketLengthBytes,
kVirtualPacketLengthBytes,
kPacketTime);
ASSERT_TRUE(packet.valid_header());
EXPECT_EQ(kPayloadType, packet.header().payloadType);
EXPECT_EQ(kSequenceNumber, packet.header().sequenceNumber);
EXPECT_EQ(kTimestamp, packet.header().timestamp);
EXPECT_EQ(kSsrc, packet.header().ssrc);
EXPECT_EQ(0, packet.header().numCSRCs);
EXPECT_EQ(kPacketLengthBytes, packet.packet_length_bytes());
EXPECT_EQ(kPacketLengthBytes - kHeaderLengthBytes,
packet.payload_length_bytes());
EXPECT_EQ(kVirtualPacketLengthBytes, packet.virtual_packet_length_bytes());
EXPECT_EQ(kVirtualPacketLengthBytes - kHeaderLengthBytes,
packet.virtual_payload_length_bytes());
EXPECT_EQ(kPacketTime, packet.time_ms());
}
namespace {
// Writes one RED block header starting at |rtp_data|, according to RFC 2198.
// returns the number of bytes written (1 or 4).
//
// Format if |last_payoad| is false:
// 0 1 2 3
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// |1| block PT | timestamp offset | block length |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
//
// Format if |last_payoad| is true:
// 0 1 2 3 4 5 6 7
// +-+-+-+-+-+-+-+-+
// |0| Block PT |
// +-+-+-+-+-+-+-+-+
int MakeRedHeader(int payload_type,
uint32_t timestamp_offset,
int block_length,
bool last_payload,
uint8_t* rtp_data) {
rtp_data[0] = 0x80 | (payload_type & 0x7F); // Set the first bit to 1.
if (last_payload) {
rtp_data[0] &= 0x7F; // Reset the first but to 0 to indicate last block.
return 1;
}
rtp_data[1] = timestamp_offset >> 6;
rtp_data[2] = (timestamp_offset & 0x3F) << 2;
rtp_data[2] |= block_length >> 8;
rtp_data[3] = block_length & 0xFF;
return 4;
}
} // namespace
TEST(TestPacket, RED) {
const size_t kPacketLengthBytes = 100;
uint8_t* packet_memory = new uint8_t[kPacketLengthBytes];
const uint8_t kRedPayloadType = 17;
const uint16_t kSequenceNumber = 4711;
const uint32_t kTimestamp = 47114711;
const uint32_t kSsrc = 0x12345678;
MakeRtpHeader(
kRedPayloadType, kSequenceNumber, kTimestamp, kSsrc, packet_memory);
// Create four RED headers.
// Payload types are just the same as the block index the offset is 100 times
// the block index.
const int kRedBlocks = 4;
uint8_t* payload_ptr =
&packet_memory[kHeaderLengthBytes]; // First byte after header.
for (int i = 0; i < kRedBlocks; ++i) {
int payload_type = i;
// Offset value is not used for the last block.
uint32_t timestamp_offset = 100 * i;
int block_length = 10 * i;
bool last_block = (i == kRedBlocks - 1) ? true : false;
payload_ptr += MakeRedHeader(
payload_type, timestamp_offset, block_length, last_block, payload_ptr);
}
const double kPacketTime = 1.0;
// Hand over ownership of |packet_memory| to |packet|.
Packet packet(packet_memory, kPacketLengthBytes, kPacketTime);
ASSERT_TRUE(packet.valid_header());
EXPECT_EQ(kRedPayloadType, packet.header().payloadType);
EXPECT_EQ(kSequenceNumber, packet.header().sequenceNumber);
EXPECT_EQ(kTimestamp, packet.header().timestamp);
EXPECT_EQ(kSsrc, packet.header().ssrc);
EXPECT_EQ(0, packet.header().numCSRCs);
EXPECT_EQ(kPacketLengthBytes, packet.packet_length_bytes());
EXPECT_EQ(kPacketLengthBytes - kHeaderLengthBytes,
packet.payload_length_bytes());
EXPECT_EQ(kPacketLengthBytes, packet.virtual_packet_length_bytes());
EXPECT_EQ(kPacketLengthBytes - kHeaderLengthBytes,
packet.virtual_payload_length_bytes());
EXPECT_EQ(kPacketTime, packet.time_ms());
std::list<RTPHeader*> red_headers;
EXPECT_TRUE(packet.ExtractRedHeaders(&red_headers));
EXPECT_EQ(kRedBlocks, static_cast<int>(red_headers.size()));
int block_index = 0;
for (std::list<RTPHeader*>::reverse_iterator it = red_headers.rbegin();
it != red_headers.rend();
++it) {
// Reading list from the back, since the extraction puts the main payload
// (which is the last one on wire) first.
RTPHeader* red_block = *it;
EXPECT_EQ(block_index, red_block->payloadType);
EXPECT_EQ(kSequenceNumber, red_block->sequenceNumber);
if (block_index == kRedBlocks - 1) {
// Last block has zero offset per definition.
EXPECT_EQ(kTimestamp, red_block->timestamp);
} else {
EXPECT_EQ(kTimestamp - 100 * block_index, red_block->timestamp);
}
EXPECT_EQ(kSsrc, red_block->ssrc);
EXPECT_EQ(0, red_block->numCSRCs);
++block_index;
}
Packet::DeleteRedHeaders(&red_headers);
}
} // namespace test
} // namespace webrtc

View File

@ -0,0 +1,48 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_coding/neteq/tools/resample_input_audio_file.h"
#include <memory>
#include "webrtc/rtc_base/checks.h"
namespace webrtc {
namespace test {
bool ResampleInputAudioFile::Read(size_t samples,
int output_rate_hz,
int16_t* destination) {
const size_t samples_to_read = samples * file_rate_hz_ / output_rate_hz;
RTC_CHECK_EQ(samples_to_read * output_rate_hz, samples * file_rate_hz_)
<< "Frame size and sample rates don't add up to an integer.";
std::unique_ptr<int16_t[]> temp_destination(new int16_t[samples_to_read]);
if (!InputAudioFile::Read(samples_to_read, temp_destination.get()))
return false;
resampler_.ResetIfNeeded(file_rate_hz_, output_rate_hz, 1);
size_t output_length = 0;
RTC_CHECK_EQ(resampler_.Push(temp_destination.get(), samples_to_read,
destination, samples, output_length),
0);
RTC_CHECK_EQ(samples, output_length);
return true;
}
bool ResampleInputAudioFile::Read(size_t samples, int16_t* destination) {
RTC_CHECK_GT(output_rate_hz_, 0) << "Output rate not set.";
return Read(samples, output_rate_hz_, destination);
}
void ResampleInputAudioFile::set_output_rate_hz(int rate_hz) {
output_rate_hz_ = rate_hz;
}
} // namespace test
} // namespace webrtc

View File

@ -0,0 +1,51 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_RESAMPLE_INPUT_AUDIO_FILE_H_
#define WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_RESAMPLE_INPUT_AUDIO_FILE_H_
#include <string>
#include "webrtc/common_audio/resampler/include/resampler.h"
#include "webrtc/modules/audio_coding/neteq/tools/input_audio_file.h"
#include "webrtc/rtc_base/constructormagic.h"
#include "webrtc/typedefs.h"
namespace webrtc {
namespace test {
// Class for handling a looping input audio file with resampling.
class ResampleInputAudioFile : public InputAudioFile {
public:
ResampleInputAudioFile(const std::string file_name, int file_rate_hz)
: InputAudioFile(file_name),
file_rate_hz_(file_rate_hz),
output_rate_hz_(-1) {}
ResampleInputAudioFile(const std::string file_name,
int file_rate_hz,
int output_rate_hz)
: InputAudioFile(file_name),
file_rate_hz_(file_rate_hz),
output_rate_hz_(output_rate_hz) {}
bool Read(size_t samples, int output_rate_hz, int16_t* destination);
bool Read(size_t samples, int16_t* destination) override;
void set_output_rate_hz(int rate_hz);
private:
const int file_rate_hz_;
int output_rate_hz_;
Resampler resampler_;
RTC_DISALLOW_COPY_AND_ASSIGN(ResampleInputAudioFile);
};
} // namespace test
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_RESAMPLE_INPUT_AUDIO_FILE_H_

View File

@ -0,0 +1,109 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_coding/neteq/tools/rtc_event_log_source.h"
#include <assert.h>
#include <string.h>
#include <iostream>
#include <limits>
#include "webrtc/call/call.h"
#include "webrtc/modules/audio_coding/neteq/tools/packet.h"
#include "webrtc/modules/rtp_rtcp/include/rtp_header_parser.h"
#include "webrtc/rtc_base/checks.h"
namespace webrtc {
namespace test {
RtcEventLogSource* RtcEventLogSource::Create(const std::string& file_name) {
RtcEventLogSource* source = new RtcEventLogSource();
RTC_CHECK(source->OpenFile(file_name));
return source;
}
RtcEventLogSource::~RtcEventLogSource() {}
bool RtcEventLogSource::RegisterRtpHeaderExtension(RTPExtensionType type,
uint8_t id) {
RTC_CHECK(parser_.get());
return parser_->RegisterRtpHeaderExtension(type, id);
}
std::unique_ptr<Packet> RtcEventLogSource::NextPacket() {
for (; rtp_packet_index_ < parsed_stream_.GetNumberOfEvents();
rtp_packet_index_++) {
if (parsed_stream_.GetEventType(rtp_packet_index_) ==
ParsedRtcEventLog::RTP_EVENT) {
PacketDirection direction;
size_t header_length;
size_t packet_length;
uint64_t timestamp_us = parsed_stream_.GetTimestamp(rtp_packet_index_);
parsed_stream_.GetRtpHeader(rtp_packet_index_, &direction, nullptr,
&header_length, &packet_length);
if (direction != kIncomingPacket) {
continue;
}
uint8_t* packet_header = new uint8_t[header_length];
parsed_stream_.GetRtpHeader(rtp_packet_index_, nullptr, packet_header,
nullptr, nullptr);
std::unique_ptr<Packet> packet(
new Packet(packet_header, header_length, packet_length,
static_cast<double>(timestamp_us) / 1000, *parser_.get()));
if (!packet->valid_header()) {
std::cout << "Warning: Packet with index " << rtp_packet_index_
<< " has an invalid header and will be ignored." << std::endl;
continue;
}
if (parsed_stream_.GetMediaType(packet->header().ssrc, direction) !=
webrtc::ParsedRtcEventLog::MediaType::AUDIO) {
continue;
}
// Check if the packet should not be filtered out.
if (!filter_.test(packet->header().payloadType) &&
!(use_ssrc_filter_ && packet->header().ssrc != ssrc_)) {
++rtp_packet_index_;
return packet;
}
}
}
return nullptr;
}
int64_t RtcEventLogSource::NextAudioOutputEventMs() {
while (audio_output_index_ < parsed_stream_.GetNumberOfEvents()) {
if (parsed_stream_.GetEventType(audio_output_index_) ==
ParsedRtcEventLog::AUDIO_PLAYOUT_EVENT) {
uint64_t timestamp_us = parsed_stream_.GetTimestamp(audio_output_index_);
// We call GetAudioPlayout only to check that the protobuf event is
// well-formed.
parsed_stream_.GetAudioPlayout(audio_output_index_, nullptr);
audio_output_index_++;
return timestamp_us / 1000;
}
audio_output_index_++;
}
return std::numeric_limits<int64_t>::max();
}
RtcEventLogSource::RtcEventLogSource()
: PacketSource(), parser_(RtpHeaderParser::Create()) {}
bool RtcEventLogSource::OpenFile(const std::string& file_name) {
return parsed_stream_.ParseFile(file_name);
}
} // namespace test
} // namespace webrtc

View File

@ -0,0 +1,65 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_RTC_EVENT_LOG_SOURCE_H_
#define WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_RTC_EVENT_LOG_SOURCE_H_
#include <memory>
#include <string>
#include "webrtc/logging/rtc_event_log/rtc_event_log_parser.h"
#include "webrtc/modules/audio_coding/neteq/tools/packet_source.h"
#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "webrtc/rtc_base/constructormagic.h"
namespace webrtc {
class RtpHeaderParser;
namespace test {
class Packet;
class RtcEventLogSource : public PacketSource {
public:
// Creates an RtcEventLogSource reading from |file_name|. If the file cannot
// be opened, or has the wrong format, NULL will be returned.
static RtcEventLogSource* Create(const std::string& file_name);
virtual ~RtcEventLogSource();
// Registers an RTP header extension and binds it to |id|.
virtual bool RegisterRtpHeaderExtension(RTPExtensionType type, uint8_t id);
std::unique_ptr<Packet> NextPacket() override;
// Returns the timestamp of the next audio output event, in milliseconds. The
// maximum value of int64_t is returned if there are no more audio output
// events available.
int64_t NextAudioOutputEventMs();
private:
RtcEventLogSource();
bool OpenFile(const std::string& file_name);
size_t rtp_packet_index_ = 0;
size_t audio_output_index_ = 0;
ParsedRtcEventLog parsed_stream_;
std::unique_ptr<RtpHeaderParser> parser_;
RTC_DISALLOW_COPY_AND_ASSIGN(RtcEventLogSource);
};
} // namespace test
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_RTC_EVENT_LOG_SOURCE_H_

View File

@ -0,0 +1,174 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <assert.h>
#include <stdio.h>
#include <memory>
#include <vector>
#include "webrtc/modules/audio_coding/neteq/tools/packet.h"
#include "webrtc/modules/audio_coding/neteq/tools/rtp_file_source.h"
#include "webrtc/rtc_base/flags.h"
// Define command line flags.
DEFINE_int(red, 117, "RTP payload type for RED");
DEFINE_int(audio_level, -1, "Extension ID for audio level (RFC 6464); "
"-1 not to print audio level");
DEFINE_int(abs_send_time, -1, "Extension ID for absolute sender time; "
"-1 not to print absolute send time");
DEFINE_bool(help, false, "Print this message");
int main(int argc, char* argv[]) {
std::string program_name = argv[0];
std::string usage =
"Tool for parsing an RTP dump file to text output.\n"
"Run " +
program_name +
" --help for usage.\n"
"Example usage:\n" +
program_name + " input.rtp output.txt\n\n" +
"Output is sent to stdout if no output file is given. " +
"Note that this tool can read files with or without payloads.\n";
if (rtc::FlagList::SetFlagsFromCommandLine(&argc, argv, true) ||
FLAG_help || (argc != 2 && argc != 3)) {
printf("%s", usage.c_str());
if (FLAG_help) {
rtc::FlagList::Print(nullptr, false);
return 0;
}
return 1;
}
RTC_CHECK(FLAG_red >= 0 && FLAG_red <= 127); // Payload type
RTC_CHECK(FLAG_audio_level == -1 || // Default
(FLAG_audio_level > 0 && FLAG_audio_level <= 255)); // Extension ID
RTC_CHECK(FLAG_abs_send_time == -1 || // Default
(FLAG_abs_send_time > 0 && FLAG_abs_send_time <= 255)); // Extension ID
printf("Input file: %s\n", argv[1]);
std::unique_ptr<webrtc::test::RtpFileSource> file_source(
webrtc::test::RtpFileSource::Create(argv[1]));
assert(file_source.get());
// Set RTP extension IDs.
bool print_audio_level = false;
if (FLAG_audio_level != -1) {
print_audio_level = true;
file_source->RegisterRtpHeaderExtension(webrtc::kRtpExtensionAudioLevel,
FLAG_audio_level);
}
bool print_abs_send_time = false;
if (FLAG_abs_send_time != -1) {
print_abs_send_time = true;
file_source->RegisterRtpHeaderExtension(
webrtc::kRtpExtensionAbsoluteSendTime, FLAG_abs_send_time);
}
FILE* out_file;
if (argc == 3) {
out_file = fopen(argv[2], "wt");
if (!out_file) {
printf("Cannot open output file %s\n", argv[2]);
return -1;
}
printf("Output file: %s\n\n", argv[2]);
} else {
out_file = stdout;
}
// Print file header.
fprintf(out_file, "SeqNo TimeStamp SendTime Size PT M SSRC");
if (print_audio_level) {
fprintf(out_file, " AuLvl (V)");
}
if (print_abs_send_time) {
fprintf(out_file, " AbsSendTime");
}
fprintf(out_file, "\n");
uint32_t max_abs_send_time = 0;
int cycles = -1;
std::unique_ptr<webrtc::test::Packet> packet;
while (true) {
packet = file_source->NextPacket();
if (!packet.get()) {
// End of file reached.
break;
}
// Write packet data to file. Use virtual_packet_length_bytes so that the
// correct packet sizes are printed also for RTP header-only dumps.
fprintf(out_file,
"%5u %10u %10u %5i %5i %2i %#08X",
packet->header().sequenceNumber,
packet->header().timestamp,
static_cast<unsigned int>(packet->time_ms()),
static_cast<int>(packet->virtual_packet_length_bytes()),
packet->header().payloadType,
packet->header().markerBit,
packet->header().ssrc);
if (print_audio_level && packet->header().extension.hasAudioLevel) {
fprintf(out_file,
" %5u (%1i)",
packet->header().extension.audioLevel,
packet->header().extension.voiceActivity);
}
if (print_abs_send_time && packet->header().extension.hasAbsoluteSendTime) {
if (cycles == -1) {
// Initialize.
max_abs_send_time = packet->header().extension.absoluteSendTime;
cycles = 0;
}
// Abs sender time is 24 bit 6.18 fixed point. Shift by 8 to normalize to
// 32 bits (unsigned). Calculate the difference between this packet's
// send time and the maximum observed. Cast to signed 32-bit to get the
// desired wrap-around behavior.
if (static_cast<int32_t>(
(packet->header().extension.absoluteSendTime << 8) -
(max_abs_send_time << 8)) >= 0) {
// The difference is non-negative, meaning that this packet is newer
// than the previously observed maximum absolute send time.
if (packet->header().extension.absoluteSendTime < max_abs_send_time) {
// Wrap detected.
cycles++;
}
max_abs_send_time = packet->header().extension.absoluteSendTime;
}
// Abs sender time is 24 bit 6.18 fixed point. Divide by 2^18 to convert
// to floating point representation.
double send_time_seconds =
static_cast<double>(packet->header().extension.absoluteSendTime) /
262144 +
64.0 * cycles;
fprintf(out_file, " %11f", send_time_seconds);
}
fprintf(out_file, "\n");
if (packet->header().payloadType == FLAG_red) {
std::list<webrtc::RTPHeader*> red_headers;
packet->ExtractRedHeaders(&red_headers);
while (!red_headers.empty()) {
webrtc::RTPHeader* red = red_headers.front();
assert(red);
fprintf(out_file,
"* %5u %10u %10u %5i\n",
red->sequenceNumber,
red->timestamp,
static_cast<unsigned int>(packet->time_ms()),
red->payloadType);
red_headers.pop_front();
delete red;
}
}
}
fclose(out_file);
return 0;
}

View File

@ -0,0 +1,103 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_coding/neteq/tools/rtp_file_source.h"
#include <assert.h>
#include <string.h>
#ifdef WIN32
#include <winsock2.h>
#else
#include <netinet/in.h>
#endif
#include <memory>
#include "webrtc/modules/audio_coding/neteq/tools/packet.h"
#include "webrtc/modules/rtp_rtcp/include/rtp_header_parser.h"
#include "webrtc/rtc_base/checks.h"
#include "webrtc/test/rtp_file_reader.h"
namespace webrtc {
namespace test {
RtpFileSource* RtpFileSource::Create(const std::string& file_name) {
RtpFileSource* source = new RtpFileSource();
RTC_CHECK(source->OpenFile(file_name));
return source;
}
bool RtpFileSource::ValidRtpDump(const std::string& file_name) {
std::unique_ptr<RtpFileReader> temp_file(
RtpFileReader::Create(RtpFileReader::kRtpDump, file_name));
return !!temp_file;
}
bool RtpFileSource::ValidPcap(const std::string& file_name) {
std::unique_ptr<RtpFileReader> temp_file(
RtpFileReader::Create(RtpFileReader::kPcap, file_name));
return !!temp_file;
}
RtpFileSource::~RtpFileSource() {
}
bool RtpFileSource::RegisterRtpHeaderExtension(RTPExtensionType type,
uint8_t id) {
assert(parser_.get());
return parser_->RegisterRtpHeaderExtension(type, id);
}
std::unique_ptr<Packet> RtpFileSource::NextPacket() {
while (true) {
RtpPacket temp_packet;
if (!rtp_reader_->NextPacket(&temp_packet)) {
return NULL;
}
if (temp_packet.original_length == 0) {
// May be an RTCP packet.
// Read the next one.
continue;
}
std::unique_ptr<uint8_t[]> packet_memory(new uint8_t[temp_packet.length]);
memcpy(packet_memory.get(), temp_packet.data, temp_packet.length);
std::unique_ptr<Packet> packet(new Packet(
packet_memory.release(), temp_packet.length,
temp_packet.original_length, temp_packet.time_ms, *parser_.get()));
if (!packet->valid_header()) {
continue;
}
if (filter_.test(packet->header().payloadType) ||
(use_ssrc_filter_ && packet->header().ssrc != ssrc_)) {
// This payload type should be filtered out. Continue to the next packet.
continue;
}
return packet;
}
}
RtpFileSource::RtpFileSource()
: PacketSource(),
parser_(RtpHeaderParser::Create()) {}
bool RtpFileSource::OpenFile(const std::string& file_name) {
rtp_reader_.reset(RtpFileReader::Create(RtpFileReader::kRtpDump, file_name));
if (rtp_reader_)
return true;
rtp_reader_.reset(RtpFileReader::Create(RtpFileReader::kPcap, file_name));
if (!rtp_reader_) {
FATAL() << "Couldn't open input file as either a rtpdump or .pcap. Note "
"that .pcapng is not supported.";
}
return true;
}
} // namespace test
} // namespace webrtc

View File

@ -0,0 +1,66 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_RTP_FILE_SOURCE_H_
#define WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_RTP_FILE_SOURCE_H_
#include <stdio.h>
#include <memory>
#include <string>
#include "webrtc/common_types.h"
#include "webrtc/modules/audio_coding/neteq/tools/packet_source.h"
#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "webrtc/rtc_base/constructormagic.h"
namespace webrtc {
class RtpHeaderParser;
namespace test {
class RtpFileReader;
class RtpFileSource : public PacketSource {
public:
// Creates an RtpFileSource reading from |file_name|. If the file cannot be
// opened, or has the wrong format, NULL will be returned.
static RtpFileSource* Create(const std::string& file_name);
// Checks whether a files is a valid RTP dump or PCAP (Wireshark) file.
static bool ValidRtpDump(const std::string& file_name);
static bool ValidPcap(const std::string& file_name);
virtual ~RtpFileSource();
// Registers an RTP header extension and binds it to |id|.
virtual bool RegisterRtpHeaderExtension(RTPExtensionType type, uint8_t id);
std::unique_ptr<Packet> NextPacket() override;
private:
static const int kFirstLineLength = 40;
static const int kRtpFileHeaderSize = 4 + 4 + 4 + 2 + 2;
static const size_t kPacketHeaderSize = 8;
RtpFileSource();
bool OpenFile(const std::string& file_name);
std::unique_ptr<RtpFileReader> rtp_reader_;
std::unique_ptr<RtpHeaderParser> parser_;
RTC_DISALLOW_COPY_AND_ASSIGN(RtpFileSource);
};
} // namespace test
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_RTP_FILE_SOURCE_H_

View File

@ -0,0 +1,61 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <assert.h>
#include "webrtc/modules/audio_coding/neteq/tools/rtp_generator.h"
namespace webrtc {
namespace test {
uint32_t RtpGenerator::GetRtpHeader(uint8_t payload_type,
size_t payload_length_samples,
RTPHeader* rtp_header) {
assert(rtp_header);
if (!rtp_header) {
return 0;
}
rtp_header->sequenceNumber = seq_number_++;
rtp_header->timestamp = timestamp_;
timestamp_ += static_cast<uint32_t>(payload_length_samples);
rtp_header->payloadType = payload_type;
rtp_header->markerBit = false;
rtp_header->ssrc = ssrc_;
rtp_header->numCSRCs = 0;
uint32_t this_send_time = next_send_time_ms_;
assert(samples_per_ms_ > 0);
next_send_time_ms_ += ((1.0 + drift_factor_) * payload_length_samples) /
samples_per_ms_;
return this_send_time;
}
void RtpGenerator::set_drift_factor(double factor) {
if (factor > -1.0) {
drift_factor_ = factor;
}
}
uint32_t TimestampJumpRtpGenerator::GetRtpHeader(uint8_t payload_type,
size_t payload_length_samples,
RTPHeader* rtp_header) {
uint32_t ret = RtpGenerator::GetRtpHeader(
payload_type, payload_length_samples, rtp_header);
if (timestamp_ - static_cast<uint32_t>(payload_length_samples) <=
jump_from_timestamp_ &&
timestamp_ > jump_from_timestamp_) {
// We just moved across the |jump_from_timestamp_| timestamp. Do the jump.
timestamp_ = jump_to_timestamp_;
}
return ret;
}
} // namespace test
} // namespace webrtc

View File

@ -0,0 +1,83 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_RTP_GENERATOR_H_
#define WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_RTP_GENERATOR_H_
#include "webrtc/common_types.h"
#include "webrtc/rtc_base/constructormagic.h"
#include "webrtc/typedefs.h"
namespace webrtc {
namespace test {
// Class for generating RTP headers.
class RtpGenerator {
public:
RtpGenerator(int samples_per_ms,
uint16_t start_seq_number = 0,
uint32_t start_timestamp = 0,
uint32_t start_send_time_ms = 0,
uint32_t ssrc = 0x12345678)
: seq_number_(start_seq_number),
timestamp_(start_timestamp),
next_send_time_ms_(start_send_time_ms),
ssrc_(ssrc),
samples_per_ms_(samples_per_ms),
drift_factor_(0.0) {
}
virtual ~RtpGenerator() {}
// Writes the next RTP header to |rtp_header|, which will be of type
// |payload_type|. Returns the send time for this packet (in ms). The value of
// |payload_length_samples| determines the send time for the next packet.
virtual uint32_t GetRtpHeader(uint8_t payload_type,
size_t payload_length_samples,
RTPHeader* rtp_header);
void set_drift_factor(double factor);
protected:
uint16_t seq_number_;
uint32_t timestamp_;
uint32_t next_send_time_ms_;
const uint32_t ssrc_;
const int samples_per_ms_;
double drift_factor_;
private:
RTC_DISALLOW_COPY_AND_ASSIGN(RtpGenerator);
};
class TimestampJumpRtpGenerator : public RtpGenerator {
public:
TimestampJumpRtpGenerator(int samples_per_ms,
uint16_t start_seq_number,
uint32_t start_timestamp,
uint32_t jump_from_timestamp,
uint32_t jump_to_timestamp)
: RtpGenerator(samples_per_ms, start_seq_number, start_timestamp),
jump_from_timestamp_(jump_from_timestamp),
jump_to_timestamp_(jump_to_timestamp) {}
uint32_t GetRtpHeader(uint8_t payload_type,
size_t payload_length_samples,
RTPHeader* rtp_header) override;
private:
uint32_t jump_from_timestamp_;
uint32_t jump_to_timestamp_;
RTC_DISALLOW_COPY_AND_ASSIGN(TimestampJumpRtpGenerator);
};
} // namespace test
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_NETEQ_TOOLS_RTP_GENERATOR_H_

View File

@ -0,0 +1,45 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <stdio.h>
#include <memory>
#include "webrtc/rtc_base/checks.h"
#include "webrtc/test/rtp_file_reader.h"
#include "webrtc/test/rtp_file_writer.h"
using webrtc::test::RtpFileReader;
using webrtc::test::RtpFileWriter;
int main(int argc, char* argv[]) {
if (argc < 3) {
printf("Concatenates multiple rtpdump files into one.\n");
printf("Usage: rtpcat in1.rtp int2.rtp [...] out.rtp\n");
exit(1);
}
std::unique_ptr<RtpFileWriter> output(
RtpFileWriter::Create(RtpFileWriter::kRtpDump, argv[argc - 1]));
RTC_CHECK(output.get() != NULL) << "Cannot open output file.";
printf("Output RTP file: %s\n", argv[argc - 1]);
for (int i = 1; i < argc - 1; i++) {
std::unique_ptr<RtpFileReader> input(
RtpFileReader::Create(RtpFileReader::kRtpDump, argv[i]));
RTC_CHECK(input.get() != NULL) << "Cannot open input file " << argv[i];
printf("Input RTP file: %s\n", argv[i]);
webrtc::test::RtpPacket packet;
while (input->NextPacket(&packet))
RTC_CHECK(output->WritePacket(&packet));
}
return 0;
}