Make AudioFrameType an enum class, and move to audio_coding_module_typedefs.h
Bug: webrtc:5876 Change-Id: I0c92f9410fcf0832bfa321229b3437134255dba6 Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/128085 Commit-Queue: Niels Moller <nisse@webrtc.org> Reviewed-by: Karl Wiberg <kwiberg@webrtc.org> Reviewed-by: Danil Chapovalov <danilchap@webrtc.org> Cr-Commit-Position: refs/heads/master@{#27190}
This commit is contained in:
@ -36,7 +36,7 @@ class AcmReceiverTestOldApi : public AudioPacketizationCallback,
|
||||
: timestamp_(0),
|
||||
packet_sent_(false),
|
||||
last_packet_send_timestamp_(timestamp_),
|
||||
last_frame_type_(kEmptyFrame) {
|
||||
last_frame_type_(AudioFrameType::kEmptyFrame) {
|
||||
config_.decoder_factory = decoder_factory_;
|
||||
}
|
||||
|
||||
@ -109,7 +109,7 @@ class AcmReceiverTestOldApi : public AudioPacketizationCallback,
|
||||
const uint8_t* payload_data,
|
||||
size_t payload_len_bytes,
|
||||
const RTPFragmentationHeader* fragmentation) override {
|
||||
if (frame_type == kEmptyFrame)
|
||||
if (frame_type == AudioFrameType::kEmptyFrame)
|
||||
return 0;
|
||||
|
||||
rtp_header_.payloadType = payload_type;
|
||||
@ -336,7 +336,7 @@ TEST_F(AcmReceiverTestOldApi, MAYBE_LastAudioCodec) {
|
||||
SetEncoder(0, codecs.at(0), cng_payload_types)); // Enough to test
|
||||
// with one codec.
|
||||
ASSERT_TRUE(packet_sent_);
|
||||
EXPECT_EQ(kAudioFrameCN, last_frame_type_);
|
||||
EXPECT_EQ(AudioFrameType::kAudioFrameCN, last_frame_type_);
|
||||
|
||||
// Has received, only, DTX. Last Audio codec is undefined.
|
||||
EXPECT_EQ(absl::nullopt, receiver_->LastDecoder());
|
||||
@ -353,7 +353,7 @@ TEST_F(AcmReceiverTestOldApi, MAYBE_LastAudioCodec) {
|
||||
// Sanity check if Actually an audio payload received, and it should be
|
||||
// of type "speech."
|
||||
ASSERT_TRUE(packet_sent_);
|
||||
ASSERT_EQ(kAudioFrameSpeech, last_frame_type_);
|
||||
ASSERT_EQ(AudioFrameType::kAudioFrameSpeech, last_frame_type_);
|
||||
EXPECT_EQ(info_without_cng.sample_rate_hz,
|
||||
receiver_->last_packet_sample_rate_hz());
|
||||
|
||||
@ -361,7 +361,7 @@ TEST_F(AcmReceiverTestOldApi, MAYBE_LastAudioCodec) {
|
||||
// the expected codec. Encode repeatedly until a DTX is sent.
|
||||
const AudioCodecInfo info_with_cng =
|
||||
SetEncoder(payload_type, codecs.at(i), cng_payload_types);
|
||||
while (last_frame_type_ != kAudioFrameCN) {
|
||||
while (last_frame_type_ != AudioFrameType::kAudioFrameCN) {
|
||||
packet_sent_ = false;
|
||||
InsertOnePacketOfSilence(info_with_cng);
|
||||
ASSERT_TRUE(packet_sent_);
|
||||
|
||||
@ -44,7 +44,7 @@ AcmSendTestOldApi::AcmSendTestOldApi(InputAudioFile* audio_source,
|
||||
static_cast<size_t>(source_rate_hz_ * kBlockSizeMs / 1000)),
|
||||
codec_registered_(false),
|
||||
test_duration_ms_(test_duration_ms),
|
||||
frame_type_(kAudioFrameSpeech),
|
||||
frame_type_(AudioFrameType::kAudioFrameSpeech),
|
||||
payload_type_(0),
|
||||
timestamp_(0),
|
||||
sequence_number_(0) {
|
||||
|
||||
@ -395,11 +395,12 @@ int32_t AudioCodingModuleImpl::Encode(const InputData& input_data) {
|
||||
ConvertEncodedInfoToFragmentationHeader(encoded_info, &my_fragmentation);
|
||||
AudioFrameType frame_type;
|
||||
if (encode_buffer_.size() == 0 && encoded_info.send_even_if_empty) {
|
||||
frame_type = kEmptyFrame;
|
||||
frame_type = AudioFrameType::kEmptyFrame;
|
||||
encoded_info.payload_type = previous_pltype;
|
||||
} else {
|
||||
RTC_DCHECK_GT(encode_buffer_.size(), 0);
|
||||
frame_type = encoded_info.speech ? kAudioFrameSpeech : kAudioFrameCN;
|
||||
frame_type = encoded_info.speech ? AudioFrameType::kAudioFrameSpeech
|
||||
: AudioFrameType::kAudioFrameCN;
|
||||
}
|
||||
|
||||
{
|
||||
|
||||
@ -100,7 +100,7 @@ class PacketizationCallbackStubOldApi : public AudioPacketizationCallback {
|
||||
public:
|
||||
PacketizationCallbackStubOldApi()
|
||||
: num_calls_(0),
|
||||
last_frame_type_(kEmptyFrame),
|
||||
last_frame_type_(AudioFrameType::kEmptyFrame),
|
||||
last_payload_type_(-1),
|
||||
last_timestamp_(0) {}
|
||||
|
||||
@ -350,11 +350,12 @@ TEST_F(AudioCodingModuleTestOldApi, TransportCallbackIsInvokedForEachPacket) {
|
||||
for (int i = 0; i < kLoops; ++i) {
|
||||
EXPECT_EQ(i / k10MsBlocksPerPacket, packet_cb_.num_calls());
|
||||
if (packet_cb_.num_calls() > 0)
|
||||
EXPECT_EQ(kAudioFrameSpeech, packet_cb_.last_frame_type());
|
||||
EXPECT_EQ(AudioFrameType::kAudioFrameSpeech,
|
||||
packet_cb_.last_frame_type());
|
||||
InsertAudioAndVerifyEncoding();
|
||||
}
|
||||
EXPECT_EQ(kLoops / k10MsBlocksPerPacket, packet_cb_.num_calls());
|
||||
EXPECT_EQ(kAudioFrameSpeech, packet_cb_.last_frame_type());
|
||||
EXPECT_EQ(AudioFrameType::kAudioFrameSpeech, packet_cb_.last_frame_type());
|
||||
}
|
||||
|
||||
#if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
|
||||
@ -431,12 +432,19 @@ class AudioCodingModuleTestWithComfortNoiseOldApi
|
||||
const struct {
|
||||
int ix;
|
||||
AudioFrameType type;
|
||||
} expectation[] = {
|
||||
{2, kAudioFrameCN}, {5, kEmptyFrame}, {8, kEmptyFrame},
|
||||
{11, kAudioFrameCN}, {14, kEmptyFrame}, {17, kEmptyFrame},
|
||||
{20, kAudioFrameCN}, {23, kEmptyFrame}, {26, kEmptyFrame},
|
||||
{29, kEmptyFrame}, {32, kAudioFrameCN}, {35, kEmptyFrame},
|
||||
{38, kEmptyFrame}};
|
||||
} expectation[] = {{2, AudioFrameType::kAudioFrameCN},
|
||||
{5, AudioFrameType::kEmptyFrame},
|
||||
{8, AudioFrameType::kEmptyFrame},
|
||||
{11, AudioFrameType::kAudioFrameCN},
|
||||
{14, AudioFrameType::kEmptyFrame},
|
||||
{17, AudioFrameType::kEmptyFrame},
|
||||
{20, AudioFrameType::kAudioFrameCN},
|
||||
{23, AudioFrameType::kEmptyFrame},
|
||||
{26, AudioFrameType::kEmptyFrame},
|
||||
{29, AudioFrameType::kEmptyFrame},
|
||||
{32, AudioFrameType::kAudioFrameCN},
|
||||
{35, AudioFrameType::kEmptyFrame},
|
||||
{38, AudioFrameType::kEmptyFrame}};
|
||||
for (int i = 0; i < kLoops; ++i) {
|
||||
int num_calls_before = packet_cb_.num_calls();
|
||||
EXPECT_EQ(i / blocks_per_packet, num_calls_before);
|
||||
|
||||
Reference in New Issue
Block a user