Unify FrameType and VideoFrameType.
Prevents some heap allocation and frame-type conversion since interfaces mismatch. Also it's less confusing to have one type for this. BUG=webrtc:5042 R=magjed@webrtc.org, mflodman@webrtc.org, henrik.lundin@webrtc.org, solenberg@webrtc.org, stefan@webrtc.org Review URL: https://codereview.webrtc.org/1371043003 Cr-Commit-Position: refs/heads/master@{#10320}
This commit is contained in:
@ -46,7 +46,7 @@ class AcmReceiverTest : public AudioPacketizationCallback,
|
||||
: timestamp_(0),
|
||||
packet_sent_(false),
|
||||
last_packet_send_timestamp_(timestamp_),
|
||||
last_frame_type_(kFrameEmpty) {
|
||||
last_frame_type_(kEmptyFrame) {
|
||||
AudioCoding::Config config;
|
||||
config.transport = this;
|
||||
acm_.reset(new AudioCodingImpl(config));
|
||||
@ -121,7 +121,7 @@ class AcmReceiverTest : public AudioPacketizationCallback,
|
||||
const uint8_t* payload_data,
|
||||
size_t payload_len_bytes,
|
||||
const RTPFragmentationHeader* fragmentation) override {
|
||||
if (frame_type == kFrameEmpty)
|
||||
if (frame_type == kEmptyFrame)
|
||||
return 0;
|
||||
|
||||
rtp_header_.header.payloadType = payload_type;
|
||||
|
||||
@ -46,7 +46,7 @@ class AcmReceiverTestOldApi : public AudioPacketizationCallback,
|
||||
: timestamp_(0),
|
||||
packet_sent_(false),
|
||||
last_packet_send_timestamp_(timestamp_),
|
||||
last_frame_type_(kFrameEmpty) {
|
||||
last_frame_type_(kEmptyFrame) {
|
||||
AudioCodingModule::Config config;
|
||||
acm_.reset(new AudioCodingModuleImpl(config));
|
||||
receiver_.reset(new AcmReceiver(config));
|
||||
@ -120,7 +120,7 @@ class AcmReceiverTestOldApi : public AudioPacketizationCallback,
|
||||
const uint8_t* payload_data,
|
||||
size_t payload_len_bytes,
|
||||
const RTPFragmentationHeader* fragmentation) override {
|
||||
if (frame_type == kFrameEmpty)
|
||||
if (frame_type == kEmptyFrame)
|
||||
return 0;
|
||||
|
||||
rtp_header_.header.payloadType = payload_type;
|
||||
|
||||
@ -171,7 +171,7 @@ int32_t AudioCodingModuleImpl::Encode(const InputData& input_data) {
|
||||
ConvertEncodedInfoToFragmentationHeader(encoded_info, &my_fragmentation);
|
||||
FrameType frame_type;
|
||||
if (encode_buffer_.size() == 0 && encoded_info.send_even_if_empty) {
|
||||
frame_type = kFrameEmpty;
|
||||
frame_type = kEmptyFrame;
|
||||
encoded_info.payload_type = previous_pltype;
|
||||
} else {
|
||||
RTC_DCHECK_GT(encode_buffer_.size(), 0u);
|
||||
|
||||
@ -92,7 +92,7 @@ class PacketizationCallbackStubOldApi : public AudioPacketizationCallback {
|
||||
public:
|
||||
PacketizationCallbackStubOldApi()
|
||||
: num_calls_(0),
|
||||
last_frame_type_(kFrameEmpty),
|
||||
last_frame_type_(kEmptyFrame),
|
||||
last_payload_type_(-1),
|
||||
last_timestamp_(0),
|
||||
crit_sect_(CriticalSectionWrapper::CreateCriticalSection()) {}
|
||||
@ -416,18 +416,18 @@ class AudioCodingModuleTestWithComfortNoiseOldApi
|
||||
int ix;
|
||||
FrameType type;
|
||||
} expectation[] = {{2, kAudioFrameCN},
|
||||
{5, kFrameEmpty},
|
||||
{8, kFrameEmpty},
|
||||
{5, kEmptyFrame},
|
||||
{8, kEmptyFrame},
|
||||
{11, kAudioFrameCN},
|
||||
{14, kFrameEmpty},
|
||||
{17, kFrameEmpty},
|
||||
{14, kEmptyFrame},
|
||||
{17, kEmptyFrame},
|
||||
{20, kAudioFrameCN},
|
||||
{23, kFrameEmpty},
|
||||
{26, kFrameEmpty},
|
||||
{29, kFrameEmpty},
|
||||
{23, kEmptyFrame},
|
||||
{26, kEmptyFrame},
|
||||
{29, kEmptyFrame},
|
||||
{32, kAudioFrameCN},
|
||||
{35, kFrameEmpty},
|
||||
{38, kFrameEmpty}};
|
||||
{35, kEmptyFrame},
|
||||
{38, kEmptyFrame}};
|
||||
for (int i = 0; i < kLoops; ++i) {
|
||||
int num_calls_before = packet_cb_.num_calls();
|
||||
EXPECT_EQ(i / blocks_per_packet, num_calls_before);
|
||||
@ -447,7 +447,7 @@ class AudioCodingModuleTestWithComfortNoiseOldApi
|
||||
|
||||
// Checks that the transport callback is invoked once per frame period of the
|
||||
// underlying speech encoder, even when comfort noise is produced.
|
||||
// Also checks that the frame type is kAudioFrameCN or kFrameEmpty.
|
||||
// Also checks that the frame type is kAudioFrameCN or kEmptyFrame.
|
||||
// This test and the next check the same thing, but differ in the order of
|
||||
// speech codec and CNG registration.
|
||||
TEST_F(AudioCodingModuleTestWithComfortNoiseOldApi,
|
||||
|
||||
@ -42,7 +42,7 @@ int32_t Channel::SendData(FrameType frameType,
|
||||
} else {
|
||||
rtpInfo.type.Audio.isCNG = false;
|
||||
}
|
||||
if (frameType == kFrameEmpty) {
|
||||
if (frameType == kEmptyFrame) {
|
||||
// When frame is empty, we should not transmit it. The frame size of the
|
||||
// next non-empty frame will be based on the previous frame size.
|
||||
_useLastFrameSize = _lastFrameSizeSample > 0;
|
||||
|
||||
@ -74,7 +74,7 @@ int32_t TestPack::SendData(FrameType frame_type, uint8_t payload_type,
|
||||
} else {
|
||||
rtp_info.type.Audio.isCNG = false;
|
||||
}
|
||||
if (frame_type == kFrameEmpty) {
|
||||
if (frame_type == kEmptyFrame) {
|
||||
// Skip this frame.
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -58,7 +58,7 @@ int32_t TestPackStereo::SendData(const FrameType frame_type,
|
||||
rtp_info.header.sequenceNumber = seq_no_++;
|
||||
rtp_info.header.payloadType = payload_type;
|
||||
rtp_info.header.timestamp = timestamp;
|
||||
if (frame_type == kFrameEmpty) {
|
||||
if (frame_type == kEmptyFrame) {
|
||||
// Skip this frame
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -44,7 +44,7 @@ int32_t ActivityMonitor::InFrameType(FrameType frame_type) {
|
||||
|
||||
void ActivityMonitor::PrintStatistics() {
|
||||
printf("\n");
|
||||
printf("kFrameEmpty %u\n", counter_[kFrameEmpty]);
|
||||
printf("kEmptyFrame %u\n", counter_[kEmptyFrame]);
|
||||
printf("kAudioFrameSpeech %u\n", counter_[kAudioFrameSpeech]);
|
||||
printf("kAudioFrameCN %u\n", counter_[kAudioFrameCN]);
|
||||
printf("kVideoFrameKey %u\n", counter_[kVideoFrameKey]);
|
||||
@ -248,7 +248,7 @@ void TestOpusDtx::Perform() {
|
||||
32000, 1, out_filename, false, expects);
|
||||
|
||||
EXPECT_EQ(0, acm_send_->EnableOpusDtx());
|
||||
expects[kFrameEmpty] = 1;
|
||||
expects[kEmptyFrame] = 1;
|
||||
Run(webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm"),
|
||||
32000, 1, out_filename, true, expects);
|
||||
|
||||
@ -256,13 +256,13 @@ void TestOpusDtx::Perform() {
|
||||
out_filename = webrtc::test::OutputPath() + "testOpusDtx_outFile_stereo.pcm";
|
||||
RegisterCodec(kOpusStereo);
|
||||
EXPECT_EQ(0, acm_send_->DisableOpusDtx());
|
||||
expects[kFrameEmpty] = 0;
|
||||
expects[kEmptyFrame] = 0;
|
||||
Run(webrtc::test::ResourcePath("audio_coding/teststereo32kHz", "pcm"),
|
||||
32000, 2, out_filename, false, expects);
|
||||
|
||||
EXPECT_EQ(0, acm_send_->EnableOpusDtx());
|
||||
|
||||
expects[kFrameEmpty] = 1;
|
||||
expects[kEmptyFrame] = 1;
|
||||
Run(webrtc::test::ResourcePath("audio_coding/teststereo32kHz", "pcm"),
|
||||
32000, 2, out_filename, true, expects);
|
||||
#endif
|
||||
|
||||
@ -29,7 +29,7 @@ class ActivityMonitor : public ACMVADCallback {
|
||||
void ResetStatistics();
|
||||
void GetStatistics(uint32_t* stats);
|
||||
private:
|
||||
// 0 - kFrameEmpty
|
||||
// 0 - kEmptyFrame
|
||||
// 1 - kAudioFrameSpeech
|
||||
// 2 - kAudioFrameCN
|
||||
// 3 - kVideoFrameKey (not used by audio)
|
||||
@ -60,7 +60,7 @@ class TestVadDtx : public ACMTest {
|
||||
// 0 : there have been no packets of type |x|,
|
||||
// 1 : there have been packets of type |x|,
|
||||
// with |x| indicates the following packet types
|
||||
// 0 - kFrameEmpty
|
||||
// 0 - kEmptyFrame
|
||||
// 1 - kAudioFrameSpeech
|
||||
// 2 - kAudioFrameCN
|
||||
// 3 - kVideoFrameKey (not used by audio)
|
||||
|
||||
@ -288,7 +288,7 @@ VADCallback::VADCallback() {
|
||||
}
|
||||
|
||||
void VADCallback::PrintFrameTypes() {
|
||||
printf("kFrameEmpty......... %d\n", _numFrameTypes[kFrameEmpty]);
|
||||
printf("kEmptyFrame......... %d\n", _numFrameTypes[kEmptyFrame]);
|
||||
printf("kAudioFrameSpeech... %d\n", _numFrameTypes[kAudioFrameSpeech]);
|
||||
printf("kAudioFrameCN....... %d\n", _numFrameTypes[kAudioFrameCN]);
|
||||
printf("kVideoFrameKey...... %d\n", _numFrameTypes[kVideoFrameKey]);
|
||||
|
||||
Reference in New Issue
Block a user