Move functionality out from AudioFrame and into AudioFrameOperations.

This CL is in preparation to move the AudioFrame into webrtc/api. The
AudioFrame is a POD type used for representing 10ms of audio. It
appears as a parameter and return value of interfaces being migrated
to webrtc/api, in particular AudioMixer.

Here, methods operator+=, operator>>=, Mute are
moved into a new target webrtc/audio/utility/audio_frame_operations,
and dependencies are changed to use
the new versions. The old AudioFrame methods are marked deprecated.

The audio frame utilities in webrtc/modules/utility:audio_frame_operations
are also moved to the new location.

TBR=kjellander@webrtc.org
BUG=webrtc:6548
NOPRESUBMIT=True

Review-Url: https://codereview.webrtc.org/2424173003
Cr-Commit-Position: refs/heads/master@{#15413}
This commit is contained in:
aleloi
2016-12-05 01:46:09 -08:00
committed by Commit bot
parent bd9bdf6140
commit 6321b49a0d
29 changed files with 261 additions and 128 deletions

1
.gn
View File

@ -25,6 +25,7 @@ check_targets = [
"//webrtc/api:audio_mixer_api",
"//webrtc/api:transport_api",
"//webrtc/api:rtc_stats_api",
"//webrtc/audio/utility/*",
"//webrtc/modules/audio_coding:audio_decoder_factory_interface",
"//webrtc/modules/audio_coding:audio_format",
"//webrtc/modules/audio_coding:audio_format_conversion",

View File

@ -37,11 +37,6 @@ rtc_static_library("audio") {
"../modules/audio_processing",
"../system_wrappers",
"../voice_engine",
"utility", # Bogus dep, needed for landing
# codereview.webrtc.org/2424173003 without breaking
# internal projects. See
# bugs.webrtc.org/6548. TODO(aleloi): remove dependency
# when codereview.webrtc.org/2424173003 has landed.
]
}
if (rtc_include_tests) {
@ -51,6 +46,7 @@ if (rtc_include_tests) {
"audio_receive_stream_unittest.cc",
"audio_send_stream_unittest.cc",
"audio_state_unittest.cc",
"utility/audio_frame_operations_unittest.cc",
]
deps = [
":audio",
@ -59,6 +55,8 @@ if (rtc_include_tests) {
"../modules/audio_device:mock_audio_device",
"../modules/audio_mixer:audio_mixer_impl",
"../test:test_common",
"../test:test_support",
"utility:audio_frame_operations",
"//testing/gmock",
"//testing/gtest",
]

View File

@ -8,4 +8,20 @@
import("../../build/webrtc.gni")
group("utility") {
public_deps = [
":audio_frame_operations",
]
}
rtc_static_library("audio_frame_operations") {
sources = [
"audio_frame_operations.cc",
"audio_frame_operations.h",
]
deps = [
"../..:webrtc_common",
"../../base:rtc_base_approved",
"../../modules/audio_coding:audio_format_conversion",
]
}

View File

@ -8,18 +8,65 @@
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/modules/utility/include/audio_frame_operations.h"
#include "webrtc/audio/utility/audio_frame_operations.h"
#include <algorithm>
#include "webrtc/base/checks.h"
#include "webrtc/base/safe_conversions.h"
#include "webrtc/modules/include/module_common_types.h"
namespace webrtc {
namespace {
namespace {
// 2.7ms @ 48kHz, 4ms @ 32kHz, 8ms @ 16kHz.
const size_t kMuteFadeFrames = 128;
const float kMuteFadeInc = 1.0f / kMuteFadeFrames;
} // namespace {
} // namespace
void AudioFrameOperations::Add(const AudioFrame& frame_to_add,
AudioFrame* result_frame) {
// Sanity check.
RTC_DCHECK(result_frame);
RTC_DCHECK_GT(result_frame->num_channels_, 0);
RTC_DCHECK_EQ(result_frame->num_channels_, frame_to_add.num_channels_);
bool no_previous_data = false;
if (result_frame->samples_per_channel_ != frame_to_add.samples_per_channel_) {
// Special case we have no data to start with.
RTC_DCHECK_EQ(result_frame->samples_per_channel_, 0);
result_frame->samples_per_channel_ = frame_to_add.samples_per_channel_;
no_previous_data = true;
}
if (result_frame->vad_activity_ == AudioFrame::kVadActive ||
frame_to_add.vad_activity_ == AudioFrame::kVadActive) {
result_frame->vad_activity_ = AudioFrame::kVadActive;
} else if (result_frame->vad_activity_ == AudioFrame::kVadUnknown ||
frame_to_add.vad_activity_ == AudioFrame::kVadUnknown) {
result_frame->vad_activity_ = AudioFrame::kVadUnknown;
}
if (result_frame->speech_type_ != frame_to_add.speech_type_)
result_frame->speech_type_ = AudioFrame::kUndefined;
if (no_previous_data) {
std::copy(frame_to_add.data_, frame_to_add.data_ +
frame_to_add.samples_per_channel_ *
result_frame->num_channels_,
result_frame->data_);
} else {
for (size_t i = 0;
i < result_frame->samples_per_channel_ * result_frame->num_channels_;
i++) {
const int32_t wrap_guard = static_cast<int32_t>(result_frame->data_[i]) +
static_cast<int32_t>(frame_to_add.data_[i]);
result_frame->data_[i] = rtc::saturated_cast<int16_t>(wrap_guard);
}
}
return;
}
void AudioFrameOperations::MonoToStereo(const int16_t* src_audio,
size_t samples_per_channel,
@ -68,7 +115,10 @@ int AudioFrameOperations::StereoToMono(AudioFrame* frame) {
}
void AudioFrameOperations::SwapStereoChannels(AudioFrame* frame) {
if (frame->num_channels_ != 2) return;
RTC_DCHECK(frame);
if (frame->num_channels_ != 2) {
return;
}
for (size_t i = 0; i < frame->samples_per_channel_ * 2; i += 2) {
int16_t temp_data = frame->data_[i];
@ -77,7 +127,8 @@ void AudioFrameOperations::SwapStereoChannels(AudioFrame* frame) {
}
}
void AudioFrameOperations::Mute(AudioFrame* frame, bool previous_frame_muted,
void AudioFrameOperations::Mute(AudioFrame* frame,
bool previous_frame_muted,
bool current_frame_muted) {
RTC_DCHECK(frame);
if (!previous_frame_muted && !current_frame_muted) {
@ -125,14 +176,30 @@ void AudioFrameOperations::Mute(AudioFrame* frame, bool previous_frame_muted,
}
}
void AudioFrameOperations::Mute(AudioFrame* frame) {
Mute(frame, true, true);
}
void AudioFrameOperations::ApplyHalfGain(AudioFrame* frame) {
RTC_DCHECK(frame);
RTC_DCHECK_GT(frame->num_channels_, 0);
if (frame->num_channels_ < 1) {
return;
}
for (size_t i = 0; i < frame->samples_per_channel_ * frame->num_channels_;
i++) {
frame->data_[i] = frame->data_[i] >> 1;
}
}
int AudioFrameOperations::Scale(float left, float right, AudioFrame& frame) {
if (frame.num_channels_ != 2) {
return -1;
}
for (size_t i = 0; i < frame.samples_per_channel_; i++) {
frame.data_[2 * i] =
static_cast<int16_t>(left * frame.data_[2 * i]);
frame.data_[2 * i] = static_cast<int16_t>(left * frame.data_[2 * i]);
frame.data_[2 * i + 1] =
static_cast<int16_t>(right * frame.data_[2 * i + 1]);
}
@ -156,5 +223,4 @@ int AudioFrameOperations::ScaleWithSat(float scale, AudioFrame& frame) {
}
return 0;
}
} // namespace webrtc

View File

@ -0,0 +1,83 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_AUDIO_UTILITY_AUDIO_FRAME_OPERATIONS_H_
#define WEBRTC_AUDIO_UTILITY_AUDIO_FRAME_OPERATIONS_H_
#include <stddef.h>
#include "webrtc/typedefs.h"
namespace webrtc {
class AudioFrame;
// TODO(andrew): consolidate this with utility.h and audio_frame_manipulator.h.
// Change reference parameters to pointers. Consider using a namespace rather
// than a class.
class AudioFrameOperations {
public:
// Add samples in |frame_to_add| with samples in |result_frame|
// putting the results in |results_frame|. The fields
// |vad_activity_| and |speech_type_| of the result frame are
// updated. If |result_frame| is empty (|samples_per_channel_|==0),
// the samples in |frame_to_add| are added to it. The number of
// channels and number of samples per channel must match except when
// |result_frame| is empty.
static void Add(const AudioFrame& frame_to_add, AudioFrame* result_frame);
// Upmixes mono |src_audio| to stereo |dst_audio|. This is an out-of-place
// operation, meaning src_audio and dst_audio must point to different
// buffers. It is the caller's responsibility to ensure that |dst_audio| is
// sufficiently large.
static void MonoToStereo(const int16_t* src_audio,
size_t samples_per_channel,
int16_t* dst_audio);
// |frame.num_channels_| will be updated. This version checks for sufficient
// buffer size and that |num_channels_| is mono.
static int MonoToStereo(AudioFrame* frame);
// Downmixes stereo |src_audio| to mono |dst_audio|. This is an in-place
// operation, meaning |src_audio| and |dst_audio| may point to the same
// buffer.
static void StereoToMono(const int16_t* src_audio,
size_t samples_per_channel,
int16_t* dst_audio);
// |frame.num_channels_| will be updated. This version checks that
// |num_channels_| is stereo.
static int StereoToMono(AudioFrame* frame);
// Swap the left and right channels of |frame|. Fails silently if |frame| is
// not stereo.
static void SwapStereoChannels(AudioFrame* frame);
// Conditionally zero out contents of |frame| for implementing audio mute:
// |previous_frame_muted| && |current_frame_muted| - Zero out whole frame.
// |previous_frame_muted| && !|current_frame_muted| - Fade-in at frame start.
// !|previous_frame_muted| && |current_frame_muted| - Fade-out at frame end.
// !|previous_frame_muted| && !|current_frame_muted| - Leave frame untouched.
static void Mute(AudioFrame* frame,
bool previous_frame_muted,
bool current_frame_muted);
// Zero out contents of frame.
static void Mute(AudioFrame* frame);
// Halve samples in |frame|.
static void ApplyHalfGain(AudioFrame* frame);
static int Scale(float left, float right, AudioFrame& frame);
static int ScaleWithSat(float scale, AudioFrame& frame);
};
} // namespace webrtc
#endif // WEBRTC_AUDIO_UTILITY_AUDIO_FRAME_OPERATIONS_H_

View File

@ -8,9 +8,9 @@
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/audio/utility/audio_frame_operations.h"
#include "webrtc/base/checks.h"
#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/modules/utility/include/audio_frame_operations.h"
#include "webrtc/test/gtest.h"
namespace webrtc {
@ -365,5 +365,25 @@ TEST_F(AudioFrameOperationsTest, ScaleWithSatSucceeds) {
VerifyFramesAreEqual(scaled_frame, frame_);
}
TEST_F(AudioFrameOperationsTest, AddingXToEmptyGivesX) {
// When samples_per_channel_ is 0, the frame counts as empty and zero.
AudioFrame frame_to_add_to;
frame_to_add_to.samples_per_channel_ = 0;
frame_to_add_to.num_channels_ = frame_.num_channels_;
AudioFrameOperations::Add(frame_, &frame_to_add_to);
VerifyFramesAreEqual(frame_, frame_to_add_to);
}
TEST_F(AudioFrameOperationsTest, AddingTwoFramesProducesTheirSum) {
AudioFrame frame_to_add_to;
frame_to_add_to.samples_per_channel_ = frame_.samples_per_channel_;
frame_to_add_to.num_channels_ = frame_.num_channels_;
SetFrameData(&frame_to_add_to, 1000);
AudioFrameOperations::Add(frame_, &frame_to_add_to);
SetFrameData(&frame_, frame_.data_[0] + 1000);
VerifyFramesAreEqual(frame_, frame_to_add_to);
}
} // namespace
} // namespace webrtc

View File

@ -483,7 +483,6 @@ if (rtc_include_tests) {
"rtp_rtcp/test/testAPI/test_api_audio.cc",
"rtp_rtcp/test/testAPI/test_api_rtcp.cc",
"rtp_rtcp/test/testAPI/test_api_video.cc",
"utility/source/audio_frame_operations_unittest.cc",
"utility/source/file_player_unittests.cc",
"utility/source/process_thread_impl_unittest.cc",
"video_coding/codecs/test/packet_manipulator_unittest.cc",

View File

@ -39,8 +39,8 @@ rtc_static_library("audio_conference_mixer") {
}
deps = [
"../../audio/utility:audio_frame_operations",
"../../system_wrappers",
"../audio_processing",
"../utility",
]
}

View File

@ -1,4 +1,5 @@
include_rules = [
"+webrtc/audio/utility/audio_frame_operations.h",
"+webrtc/base",
"+webrtc/system_wrappers",
]

View File

@ -8,11 +8,11 @@
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/audio/utility/audio_frame_operations.h"
#include "webrtc/modules/audio_conference_mixer/include/audio_conference_mixer_defines.h"
#include "webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.h"
#include "webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.h"
#include "webrtc/modules/audio_processing/include/audio_processing.h"
#include "webrtc/modules/utility/include/audio_frame_operations.h"
#include "webrtc/system_wrappers/include/critical_section_wrapper.h"
#include "webrtc/system_wrappers/include/trace.h"
@ -38,9 +38,9 @@ typedef std::list<ParticipantFrameStruct*> ParticipantFrameStructList;
void MixFrames(AudioFrame* mixed_frame, AudioFrame* frame, bool use_limiter) {
assert(mixed_frame->num_channels_ >= frame->num_channels_);
if (use_limiter) {
// Divide by two to avoid saturation in the mixing.
// This is only meaningful if the limiter will be used.
*frame >>= 1;
// This is to avoid saturation in the mixing. It is only
// meaningful if the limiter will be used.
AudioFrameOperations::ApplyHalfGain(frame);
}
if (mixed_frame->num_channels_ > frame->num_channels_) {
// We only support mono-to-stereo.
@ -49,7 +49,7 @@ void MixFrames(AudioFrame* mixed_frame, AudioFrame* frame, bool use_limiter) {
AudioFrameOperations::MonoToStereo(frame);
}
*mixed_frame += *frame;
AudioFrameOperations::Add(*frame, mixed_frame);
}
// Return the max number of channels from a |list| composed of AudioFrames.
@ -303,7 +303,7 @@ void AudioConferenceMixerImpl::Process() {
if(mixedAudio->samples_per_channel_ == 0) {
// Nothing was mixed, set the audio samples to silence.
mixedAudio->samples_per_channel_ = _sampleSize;
mixedAudio->Mute();
AudioFrameOperations::Mute(mixedAudio);
} else {
// Only call the limiter if we have something to mix.
LimitMixedAudio(mixedAudio);
@ -922,7 +922,7 @@ bool AudioConferenceMixerImpl::LimitMixedAudio(AudioFrame* mixedAudio) const {
//
// Instead we double the frame (with addition since left-shifting a
// negative value is undefined).
*mixedAudio += *mixedAudio;
AudioFrameOperations::Add(*mixedAudio, mixedAudio);
if(error != _limiter->kNoError) {
WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id,

View File

@ -32,10 +32,10 @@ rtc_static_library("audio_mixer_impl") {
deps = [
":audio_frame_manipulator",
"../..:webrtc_common",
"../../audio/utility:audio_frame_operations",
"../../base:rtc_base_approved",
"../../modules/audio_processing",
"../../modules/utility",
"../../system_wrappers",
"../audio_processing",
]
}
@ -51,7 +51,7 @@ rtc_static_library("audio_frame_manipulator") {
]
deps = [
"../../audio/utility",
"../../base:rtc_base_approved",
"../../modules/utility",
]
}

View File

@ -1,4 +1,5 @@
include_rules = [
"+webrtc/audio/utility/audio_frame_operations.h",
"+webrtc/base",
"+webrtc/call",
"+webrtc/common_audio",

View File

@ -8,10 +8,10 @@
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/audio/utility/audio_frame_operations.h"
#include "webrtc/base/checks.h"
#include "webrtc/modules/audio_mixer/audio_frame_manipulator.h"
#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/modules/utility/include/audio_frame_operations.h"
namespace webrtc {

View File

@ -14,9 +14,9 @@
#include <functional>
#include <utility>
#include "webrtc/audio/utility/audio_frame_operations.h"
#include "webrtc/base/logging.h"
#include "webrtc/modules/audio_mixer/audio_frame_manipulator.h"
#include "webrtc/modules/utility/include/audio_frame_operations.h"
namespace webrtc {
namespace {
@ -106,12 +106,12 @@ int32_t MixFromList(AudioFrame* mixed_audio,
// Mix |f.frame| into |mixed_audio|, with saturation protection.
// These effect is applied to |f.frame| itself prior to mixing.
if (use_limiter) {
// Divide by two to avoid saturation in the mixing.
// This is only meaningful if the limiter will be used.
*frame >>= 1;
// This is to avoid saturation in the mixing. It is only
// meaningful if the limiter will be used.
AudioFrameOperations::ApplyHalfGain(frame);
}
RTC_DCHECK_EQ(frame->num_channels_, mixed_audio->num_channels_);
*mixed_audio += *frame;
AudioFrameOperations::Add(*frame, mixed_audio);
}
return 0;
}
@ -250,7 +250,7 @@ void AudioMixerImpl::Mix(size_t number_of_channels,
if (audio_frame_for_mixing->samples_per_channel_ == 0) {
// Nothing was mixed, set the audio samples to silence.
audio_frame_for_mixing->samples_per_channel_ = sample_size_;
audio_frame_for_mixing->Mute();
AudioFrameOperations::Mute(audio_frame_for_mixing);
} else {
// Only call the limiter if we have something to mix.
LimitMixedAudio(audio_frame_for_mixing);
@ -357,7 +357,7 @@ bool AudioMixerImpl::LimitMixedAudio(AudioFrame* mixed_audio) const {
//
// Instead we double the frame (with addition since left-shifting a
// negative value is undefined).
*mixed_audio += *mixed_audio;
AudioFrameOperations::Add(*mixed_audio, mixed_audio);
if (error != limiter_->kNoError) {
LOG_F(LS_ERROR) << "Error from AudioProcessing: " << error;

View File

@ -165,6 +165,7 @@ rtc_static_library("audio_processing") {
defines = []
deps = [
"../..:webrtc_common",
"../../audio/utility:audio_frame_operations",
"../audio_coding:isac",
]

View File

@ -1,4 +1,5 @@
include_rules = [
"+webrtc/audio/utility/audio_frame_operations.h",
"+webrtc/base",
"+webrtc/common_audio",
"+webrtc/system_wrappers",

View File

@ -10,9 +10,9 @@
#include "webrtc/modules/audio_processing/vad/standalone_vad.h"
#include "webrtc/audio/utility/audio_frame_operations.h"
#include "webrtc/base/checks.h"
#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/modules/utility/include/audio_frame_operations.h"
#include "webrtc/typedefs.h"
namespace webrtc {

View File

@ -18,6 +18,8 @@
#include <limits>
#include "webrtc/base/constructormagic.h"
#include "webrtc/base/deprecation.h"
#include "webrtc/base/safe_conversions.h"
#include "webrtc/common_types.h"
#include "webrtc/common_video/rotation.h"
#include "webrtc/typedefs.h"
@ -520,8 +522,6 @@ class CallStatsObserver {
*
* - Stereo data is interleaved starting with the left channel.
*
* - The +operator assume that you would never add exactly opposite frames when
* deciding the resulting state. To do this use the -operator.
*/
class AudioFrame {
public:
@ -556,26 +556,29 @@ class AudioFrame {
void CopyFrom(const AudioFrame& src);
void Mute();
AudioFrame& operator>>=(const int rhs);
AudioFrame& operator+=(const AudioFrame& rhs);
// These methods are deprecated. Use the functions in
// webrtc/audio/utility instead. These methods will exists for a
// short period of time until webrtc clients have updated. See
// webrtc:6548 for details.
RTC_DEPRECATED void Mute();
RTC_DEPRECATED AudioFrame& operator>>=(const int rhs);
RTC_DEPRECATED AudioFrame& operator+=(const AudioFrame& rhs);
int id_;
// RTP timestamp of the first sample in the AudioFrame.
uint32_t timestamp_;
uint32_t timestamp_ = 0;
// Time since the first frame in milliseconds.
// -1 represents an uninitialized value.
int64_t elapsed_time_ms_;
int64_t elapsed_time_ms_ = -1;
// NTP time of the estimated capture time in local timebase in milliseconds.
// -1 represents an uninitialized value.
int64_t ntp_time_ms_;
int64_t ntp_time_ms_ = -1;
int16_t data_[kMaxDataSizeSamples];
size_t samples_per_channel_;
int sample_rate_hz_;
size_t num_channels_;
SpeechType speech_type_;
VADActivity vad_activity_;
size_t samples_per_channel_ = 0;
int sample_rate_hz_ = 0;
size_t num_channels_ = 0;
SpeechType speech_type_ = kUndefined;
VADActivity vad_activity_ = kVadUnknown;
private:
RTC_DISALLOW_COPY_AND_ASSIGN(AudioFrame);
@ -585,7 +588,6 @@ class AudioFrame {
// See https://bugs.chromium.org/p/webrtc/issues/detail?id=5647.
inline AudioFrame::AudioFrame()
: data_() {
Reset();
}
inline void AudioFrame::Reset() {
@ -659,18 +661,6 @@ inline AudioFrame& AudioFrame::operator>>=(const int rhs) {
return *this;
}
namespace {
inline int16_t ClampToInt16(int32_t input) {
if (input < -0x00008000) {
return -0x8000;
} else if (input > 0x00007FFF) {
return 0x7FFF;
} else {
return static_cast<int16_t>(input);
}
}
}
inline AudioFrame& AudioFrame::operator+=(const AudioFrame& rhs) {
// Sanity check
assert((num_channels_ > 0) && (num_channels_ < 3));
@ -704,7 +694,7 @@ inline AudioFrame& AudioFrame::operator+=(const AudioFrame& rhs) {
for (size_t i = 0; i < samples_per_channel_ * num_channels_; i++) {
int32_t wrap_guard =
static_cast<int32_t>(data_[i]) + static_cast<int32_t>(rhs.data_[i]);
data_[i] = ClampToInt16(wrap_guard);
data_[i] = rtc::saturated_cast<int16_t>(wrap_guard);
}
}
return *this;

View File

@ -112,15 +112,6 @@ TEST(LatestTimestamp, Wrap) {
EXPECT_EQ(0x0000FFFFu, LatestTimestamp(0xFFFF0000, 0x0000FFFF));
}
TEST(ClampToInt16, TestCases) {
EXPECT_EQ(0x0000, ClampToInt16(0x00000000));
EXPECT_EQ(0x0001, ClampToInt16(0x00000001));
EXPECT_EQ(0x7FFF, ClampToInt16(0x00007FFF));
EXPECT_EQ(0x7FFF, ClampToInt16(0x7FFFFFFF));
EXPECT_EQ(-0x0001, ClampToInt16(-0x00000001));
EXPECT_EQ(-0x8000, ClampToInt16(-0x8000));
EXPECT_EQ(-0x8000, ClampToInt16(-0x7FFFFFFF));
}
TEST(SequenceNumberUnwrapper, Limits) {
SequenceNumberUnwrapper unwrapper;

View File

@ -16,7 +16,6 @@ rtc_static_library("utility") {
"include/helpers_android.h",
"include/jvm_android.h",
"include/process_thread.h",
"source/audio_frame_operations.cc",
"source/coder.cc",
"source/coder.h",
"source/file_player.cc",
@ -38,10 +37,13 @@ rtc_static_library("utility") {
deps = [
"../..:webrtc_common",
"../../audio/utility:audio_frame_operations",
"../../base:rtc_task_queue",
"../../common_audio",
"../../system_wrappers",
"../audio_coding",
"../audio_coding:builtin_audio_decoder_factory",
"../audio_coding:rent_a_codec",
"../media_file",
]
}

View File

@ -1,4 +1,7 @@
include_rules = [
# TODO(aleloi): remove when clients update. See
# bugs.webrtc.org/6548.
"+webrtc/audio/utility/audio_frame_operations.h",
"+webrtc/base",
"+webrtc/common_audio",
"+webrtc/common_video",

View File

@ -10,54 +10,11 @@
#ifndef WEBRTC_MODULES_UTILITY_INCLUDE_AUDIO_FRAME_OPERATIONS_H_
#define WEBRTC_MODULES_UTILITY_INCLUDE_AUDIO_FRAME_OPERATIONS_H_
// The contents of this file have moved to
// //webrtc/audio/utility. This file is deprecated.
#include "webrtc/typedefs.h"
namespace webrtc {
class AudioFrame;
// TODO(andrew): consolidate this with utility.h and audio_frame_manipulator.h.
// Change reference parameters to pointers. Consider using a namespace rather
// than a class.
class AudioFrameOperations {
public:
// Upmixes mono |src_audio| to stereo |dst_audio|. This is an out-of-place
// operation, meaning src_audio and dst_audio must point to different
// buffers. It is the caller's responsibility to ensure that |dst_audio| is
// sufficiently large.
static void MonoToStereo(const int16_t* src_audio, size_t samples_per_channel,
int16_t* dst_audio);
// |frame.num_channels_| will be updated. This version checks for sufficient
// buffer size and that |num_channels_| is mono.
static int MonoToStereo(AudioFrame* frame);
// Downmixes stereo |src_audio| to mono |dst_audio|. This is an in-place
// operation, meaning |src_audio| and |dst_audio| may point to the same
// buffer.
static void StereoToMono(const int16_t* src_audio, size_t samples_per_channel,
int16_t* dst_audio);
// |frame.num_channels_| will be updated. This version checks that
// |num_channels_| is stereo.
static int StereoToMono(AudioFrame* frame);
// Swap the left and right channels of |frame|. Fails silently if |frame| is
// not stereo.
static void SwapStereoChannels(AudioFrame* frame);
// Conditionally zero out contents of |frame| for implementing audio mute:
// |previous_frame_muted| && |current_frame_muted| - Zero out whole frame.
// |previous_frame_muted| && !|current_frame_muted| - Fade-in at frame start.
// !|previous_frame_muted| && |current_frame_muted| - Fade-out at frame end.
// !|previous_frame_muted| && !|current_frame_muted| - Leave frame untouched.
static void Mute(AudioFrame* frame, bool previous_frame_muted,
bool current_frame_muted);
static int Scale(float left, float right, AudioFrame& frame);
static int ScaleWithSat(float scale, AudioFrame& frame);
};
} // namespace webrtc
// TODO(aleloi): Remove this file when clients have updated their
// includes. See bugs.webrtc.org/6548.
#include "webrtc/audio/utility/audio_frame_operations.h"
#endif // #ifndef WEBRTC_MODULES_UTILITY_INCLUDE_AUDIO_FRAME_OPERATIONS_H_

View File

@ -90,6 +90,7 @@ rtc_static_library("voice_engine") {
"../api:audio_mixer_api",
"../api:call_api",
"../api:transport_api",
"../audio/utility:audio_frame_operations",
"../base:rtc_base_approved",
"../common_audio",
"../logging:rtc_event_log_api",

View File

@ -1,4 +1,5 @@
include_rules = [
"+webrtc/audio/utility/audio_frame_operations.h",
"+webrtc/base",
"+webrtc/call",
"+webrtc/common_audio",

View File

@ -13,6 +13,7 @@
#include <algorithm>
#include <utility>
#include "webrtc/audio/utility/audio_frame_operations.h"
#include "webrtc/base/array_view.h"
#include "webrtc/base/checks.h"
#include "webrtc/base/criticalsection.h"
@ -32,7 +33,6 @@
#include "webrtc/modules/rtp_rtcp/include/rtp_payload_registry.h"
#include "webrtc/modules/rtp_rtcp/include/rtp_receiver.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_receiver_strategy.h"
#include "webrtc/modules/utility/include/audio_frame_operations.h"
#include "webrtc/modules/utility/include/process_thread.h"
#include "webrtc/system_wrappers/include/trace.h"
#include "webrtc/voice_engine/include/voe_external_media.h"
@ -613,7 +613,7 @@ MixerParticipant::AudioFrameInfo Channel::GetAudioFrameWithMuted(
// TODO(henrik.lundin): We should be able to do better than this. But we
// will have to go through all the cases below where the audio samples may
// be used, and handle the muted case in some way.
audioFrame->Mute();
AudioFrameOperations::Mute(audioFrame);
}
// Convert module ID to internal VoE channel ID

View File

@ -10,9 +10,9 @@
#include "webrtc/voice_engine/output_mixer.h"
#include "webrtc/audio/utility/audio_frame_operations.h"
#include "webrtc/base/format_macros.h"
#include "webrtc/modules/audio_processing/include/audio_processing.h"
#include "webrtc/modules/utility/include/audio_frame_operations.h"
#include "webrtc/system_wrappers/include/file_wrapper.h"
#include "webrtc/system_wrappers/include/trace.h"
#include "webrtc/voice_engine/include/voe_external_media.h"

View File

@ -12,9 +12,9 @@
#include <memory>
#include "webrtc/audio/utility/audio_frame_operations.h"
#include "webrtc/base/format_macros.h"
#include "webrtc/base/logging.h"
#include "webrtc/modules/utility/include/audio_frame_operations.h"
#include "webrtc/system_wrappers/include/event_wrapper.h"
#include "webrtc/system_wrappers/include/trace.h"
#include "webrtc/voice_engine/channel.h"

View File

@ -10,13 +10,13 @@
#include "webrtc/voice_engine/utility.h"
#include "webrtc/audio/utility/audio_frame_operations.h"
#include "webrtc/base/checks.h"
#include "webrtc/base/logging.h"
#include "webrtc/common_audio/resampler/include/push_resampler.h"
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
#include "webrtc/common_types.h"
#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/modules/utility/include/audio_frame_operations.h"
#include "webrtc/voice_engine/voice_engine_defines.h"
namespace webrtc {

View File

@ -10,6 +10,7 @@
#include "webrtc/voice_engine/voe_external_media_impl.h"
#include "webrtc/audio/utility/audio_frame_operations.h"
#include "webrtc/system_wrappers/include/trace.h"
#include "webrtc/voice_engine/channel.h"
#include "webrtc/voice_engine/include/voe_errors.h"
@ -149,7 +150,7 @@ int VoEExternalMediaImpl::GetAudioFrame(int channel, int desired_sample_rate_hz,
desired_sample_rate_hz == 0 ? -1 : desired_sample_rate_hz;
auto ret = channelPtr->GetAudioFrameWithMuted(channel, frame);
if (ret == MixerParticipant::AudioFrameInfo::kMuted) {
frame->Mute();
AudioFrameOperations::Mute(frame);
}
return ret == MixerParticipant::AudioFrameInfo::kError ? -1 : 0;
}