VoE: apply new style guide on VoE interfaces and their implementations

Changes:
1. Ran clang-format on VoE interfaces and their implementations.
2. Replaced virtual with override in derived classes.

R=henrika@webrtc.org

Review URL: https://webrtc-codereview.appspot.com/49239004

Cr-Commit-Position: refs/heads/master@{#9130}
This commit is contained in:
Jelena Marusic
2015-05-04 14:15:32 +02:00
parent 79c143312b
commit 0d266054ac
39 changed files with 3868 additions and 4348 deletions

View File

@ -44,200 +44,195 @@ namespace webrtc {
class VoiceEngine; class VoiceEngine;
// VoERxVadCallback // VoERxVadCallback
class WEBRTC_DLLEXPORT VoERxVadCallback class WEBRTC_DLLEXPORT VoERxVadCallback {
{ public:
public: virtual void OnRxVad(int channel, int vadDecision) = 0;
virtual void OnRxVad(int channel, int vadDecision) = 0;
protected: protected:
virtual ~VoERxVadCallback() {} virtual ~VoERxVadCallback() {}
}; };
// VoEAudioProcessing // VoEAudioProcessing
class WEBRTC_DLLEXPORT VoEAudioProcessing class WEBRTC_DLLEXPORT VoEAudioProcessing {
{ public:
public: // Factory for the VoEAudioProcessing sub-API. Increases an internal
// Factory for the VoEAudioProcessing sub-API. Increases an internal // reference counter if successful. Returns NULL if the API is not
// reference counter if successful. Returns NULL if the API is not // supported or if construction fails.
// supported or if construction fails. static VoEAudioProcessing* GetInterface(VoiceEngine* voiceEngine);
static VoEAudioProcessing* GetInterface(VoiceEngine* voiceEngine);
// Releases the VoEAudioProcessing sub-API and decreases an internal // Releases the VoEAudioProcessing sub-API and decreases an internal
// reference counter. Returns the new reference count. This value should // reference counter. Returns the new reference count. This value should
// be zero for all sub-API:s before the VoiceEngine object can be safely // be zero for all sub-API:s before the VoiceEngine object can be safely
// deleted. // deleted.
virtual int Release() = 0; virtual int Release() = 0;
// Sets Noise Suppression (NS) status and mode. // Sets Noise Suppression (NS) status and mode.
// The NS reduces noise in the microphone signal. // The NS reduces noise in the microphone signal.
virtual int SetNsStatus(bool enable, NsModes mode = kNsUnchanged) = 0; virtual int SetNsStatus(bool enable, NsModes mode = kNsUnchanged) = 0;
// Gets the NS status and mode. // Gets the NS status and mode.
virtual int GetNsStatus(bool& enabled, NsModes& mode) = 0; virtual int GetNsStatus(bool& enabled, NsModes& mode) = 0;
// Sets the Automatic Gain Control (AGC) status and mode. // Sets the Automatic Gain Control (AGC) status and mode.
// The AGC adjusts the microphone signal to an appropriate level. // The AGC adjusts the microphone signal to an appropriate level.
virtual int SetAgcStatus(bool enable, AgcModes mode = kAgcUnchanged) = 0; virtual int SetAgcStatus(bool enable, AgcModes mode = kAgcUnchanged) = 0;
// Gets the AGC status and mode. // Gets the AGC status and mode.
virtual int GetAgcStatus(bool& enabled, AgcModes& mode) = 0; virtual int GetAgcStatus(bool& enabled, AgcModes& mode) = 0;
// Sets the AGC configuration. // Sets the AGC configuration.
// Should only be used in situations where the working environment // Should only be used in situations where the working environment
// is well known. // is well known.
virtual int SetAgcConfig(AgcConfig config) = 0; virtual int SetAgcConfig(AgcConfig config) = 0;
// Gets the AGC configuration. // Gets the AGC configuration.
virtual int GetAgcConfig(AgcConfig& config) = 0; virtual int GetAgcConfig(AgcConfig& config) = 0;
// Sets the Echo Control (EC) status and mode. // Sets the Echo Control (EC) status and mode.
// The EC mitigates acoustic echo where a user can hear their own // The EC mitigates acoustic echo where a user can hear their own
// speech repeated back due to an acoustic coupling between the // speech repeated back due to an acoustic coupling between the
// speaker and the microphone at the remote end. // speaker and the microphone at the remote end.
virtual int SetEcStatus(bool enable, EcModes mode = kEcUnchanged) = 0; virtual int SetEcStatus(bool enable, EcModes mode = kEcUnchanged) = 0;
// Gets the EC status and mode. // Gets the EC status and mode.
virtual int GetEcStatus(bool& enabled, EcModes& mode) = 0; virtual int GetEcStatus(bool& enabled, EcModes& mode) = 0;
// Enables the compensation of clock drift between the capture and render // Enables the compensation of clock drift between the capture and render
// streams by the echo canceller (i.e. only using EcMode==kEcAec). It will // streams by the echo canceller (i.e. only using EcMode==kEcAec). It will
// only be enabled if supported on the current platform; otherwise an error // only be enabled if supported on the current platform; otherwise an error
// will be returned. Check if the platform is supported by calling // will be returned. Check if the platform is supported by calling
// |DriftCompensationSupported()|. // |DriftCompensationSupported()|.
virtual int EnableDriftCompensation(bool enable) = 0; virtual int EnableDriftCompensation(bool enable) = 0;
virtual bool DriftCompensationEnabled() = 0; virtual bool DriftCompensationEnabled() = 0;
static bool DriftCompensationSupported(); static bool DriftCompensationSupported();
// Sets a delay |offset| in ms to add to the system delay reported by the // Sets a delay |offset| in ms to add to the system delay reported by the
// OS, which is used by the AEC to synchronize far- and near-end streams. // OS, which is used by the AEC to synchronize far- and near-end streams.
// In some cases a system may introduce a delay which goes unreported by the // In some cases a system may introduce a delay which goes unreported by the
// OS, but which is known to the user. This method can be used to compensate // OS, but which is known to the user. This method can be used to compensate
// for the unreported delay. // for the unreported delay.
virtual void SetDelayOffsetMs(int offset) = 0; virtual void SetDelayOffsetMs(int offset) = 0;
virtual int DelayOffsetMs() = 0; virtual int DelayOffsetMs() = 0;
// Modifies settings for the AEC designed for mobile devices (AECM). // Modifies settings for the AEC designed for mobile devices (AECM).
virtual int SetAecmMode(AecmModes mode = kAecmSpeakerphone, virtual int SetAecmMode(AecmModes mode = kAecmSpeakerphone,
bool enableCNG = true) = 0; bool enableCNG = true) = 0;
// Gets settings for the AECM. // Gets settings for the AECM.
virtual int GetAecmMode(AecmModes& mode, bool& enabledCNG) = 0; virtual int GetAecmMode(AecmModes& mode, bool& enabledCNG) = 0;
// Enables a high pass filter on the capture signal. This removes DC bias // Enables a high pass filter on the capture signal. This removes DC bias
// and low-frequency noise. Recommended to be enabled. // and low-frequency noise. Recommended to be enabled.
virtual int EnableHighPassFilter(bool enable) = 0; virtual int EnableHighPassFilter(bool enable) = 0;
virtual bool IsHighPassFilterEnabled() = 0; virtual bool IsHighPassFilterEnabled() = 0;
// Sets status and mode of the receiving-side (Rx) NS. // Sets status and mode of the receiving-side (Rx) NS.
// The Rx NS reduces noise in the received signal for the specified // The Rx NS reduces noise in the received signal for the specified
// |channel|. Intended for advanced usage only. // |channel|. Intended for advanced usage only.
virtual int SetRxNsStatus(int channel, virtual int SetRxNsStatus(int channel,
bool enable, bool enable,
NsModes mode = kNsUnchanged) = 0; NsModes mode = kNsUnchanged) = 0;
// Gets status and mode of the receiving-side NS. // Gets status and mode of the receiving-side NS.
virtual int GetRxNsStatus(int channel, virtual int GetRxNsStatus(int channel, bool& enabled, NsModes& mode) = 0;
bool& enabled,
NsModes& mode) = 0;
// Sets status and mode of the receiving-side (Rx) AGC. // Sets status and mode of the receiving-side (Rx) AGC.
// The Rx AGC adjusts the received signal to an appropriate level // The Rx AGC adjusts the received signal to an appropriate level
// for the specified |channel|. Intended for advanced usage only. // for the specified |channel|. Intended for advanced usage only.
virtual int SetRxAgcStatus(int channel, virtual int SetRxAgcStatus(int channel,
bool enable, bool enable,
AgcModes mode = kAgcUnchanged) = 0; AgcModes mode = kAgcUnchanged) = 0;
// Gets status and mode of the receiving-side AGC. // Gets status and mode of the receiving-side AGC.
virtual int GetRxAgcStatus(int channel, virtual int GetRxAgcStatus(int channel, bool& enabled, AgcModes& mode) = 0;
bool& enabled,
AgcModes& mode) = 0;
// Modifies the AGC configuration on the receiving side for the // Modifies the AGC configuration on the receiving side for the
// specified |channel|. // specified |channel|.
virtual int SetRxAgcConfig(int channel, AgcConfig config) = 0; virtual int SetRxAgcConfig(int channel, AgcConfig config) = 0;
// Gets the AGC configuration on the receiving side. // Gets the AGC configuration on the receiving side.
virtual int GetRxAgcConfig(int channel, AgcConfig& config) = 0; virtual int GetRxAgcConfig(int channel, AgcConfig& config) = 0;
// Registers a VoERxVadCallback |observer| instance and enables Rx VAD // Registers a VoERxVadCallback |observer| instance and enables Rx VAD
// notifications for the specified |channel|. // notifications for the specified |channel|.
virtual int RegisterRxVadObserver(int channel, virtual int RegisterRxVadObserver(int channel,
VoERxVadCallback &observer) = 0; VoERxVadCallback& observer) = 0;
// Deregisters the VoERxVadCallback |observer| and disables Rx VAD // Deregisters the VoERxVadCallback |observer| and disables Rx VAD
// notifications for the specified |channel|. // notifications for the specified |channel|.
virtual int DeRegisterRxVadObserver(int channel) = 0; virtual int DeRegisterRxVadObserver(int channel) = 0;
// Gets the VAD/DTX activity for the specified |channel|. // Gets the VAD/DTX activity for the specified |channel|.
// The returned value is 1 if frames of audio contains speech // The returned value is 1 if frames of audio contains speech
// and 0 if silence. The output is always 1 if VAD is disabled. // and 0 if silence. The output is always 1 if VAD is disabled.
virtual int VoiceActivityIndicator(int channel) = 0; virtual int VoiceActivityIndicator(int channel) = 0;
// Enables or disables the possibility to retrieve echo metrics and delay // Enables or disables the possibility to retrieve echo metrics and delay
// logging values during an active call. The metrics are only supported in // logging values during an active call. The metrics are only supported in
// AEC. // AEC.
virtual int SetEcMetricsStatus(bool enable) = 0; virtual int SetEcMetricsStatus(bool enable) = 0;
// Gets the current EC metric status. // Gets the current EC metric status.
virtual int GetEcMetricsStatus(bool& enabled) = 0; virtual int GetEcMetricsStatus(bool& enabled) = 0;
// Gets the instantaneous echo level metrics. // Gets the instantaneous echo level metrics.
virtual int GetEchoMetrics(int& ERL, int& ERLE, int& RERL, int& A_NLP) = 0; virtual int GetEchoMetrics(int& ERL, int& ERLE, int& RERL, int& A_NLP) = 0;
// Gets the EC internal |delay_median| and |delay_std| in ms between // Gets the EC internal |delay_median| and |delay_std| in ms between
// near-end and far-end. The metric |fraction_poor_delays| is the amount of // near-end and far-end. The metric |fraction_poor_delays| is the amount of
// delay values that potentially can break the EC. The values are aggregated // delay values that potentially can break the EC. The values are aggregated
// over one second and the last updated metrics are returned. // over one second and the last updated metrics are returned.
virtual int GetEcDelayMetrics(int& delay_median, int& delay_std, virtual int GetEcDelayMetrics(int& delay_median,
float& fraction_poor_delays) = 0; int& delay_std,
float& fraction_poor_delays) = 0;
// Enables recording of Audio Processing (AP) debugging information. // Enables recording of Audio Processing (AP) debugging information.
// The file can later be used for off-line analysis of the AP performance. // The file can later be used for off-line analysis of the AP performance.
virtual int StartDebugRecording(const char* fileNameUTF8) = 0; virtual int StartDebugRecording(const char* fileNameUTF8) = 0;
// Same as above but sets and uses an existing file handle. Takes ownership // Same as above but sets and uses an existing file handle. Takes ownership
// of |file_handle| and passes it on to the audio processing module. // of |file_handle| and passes it on to the audio processing module.
virtual int StartDebugRecording(FILE* file_handle) = 0; virtual int StartDebugRecording(FILE* file_handle) = 0;
// Disables recording of AP debugging information. // Disables recording of AP debugging information.
virtual int StopDebugRecording() = 0; virtual int StopDebugRecording() = 0;
// Enables or disables detection of disturbing keyboard typing. // Enables or disables detection of disturbing keyboard typing.
// An error notification will be given as a callback upon detection. // An error notification will be given as a callback upon detection.
virtual int SetTypingDetectionStatus(bool enable) = 0; virtual int SetTypingDetectionStatus(bool enable) = 0;
// Gets the current typing detection status. // Gets the current typing detection status.
virtual int GetTypingDetectionStatus(bool& enabled) = 0; virtual int GetTypingDetectionStatus(bool& enabled) = 0;
// Reports the lower of: // Reports the lower of:
// * Time in seconds since the last typing event. // * Time in seconds since the last typing event.
// * Time in seconds since the typing detection was enabled. // * Time in seconds since the typing detection was enabled.
// Returns error if typing detection is disabled. // Returns error if typing detection is disabled.
virtual int TimeSinceLastTyping(int &seconds) = 0; virtual int TimeSinceLastTyping(int& seconds) = 0;
// Optional setting of typing detection parameters // Optional setting of typing detection parameters
// Parameter with value == 0 will be ignored // Parameter with value == 0 will be ignored
// and left with default config. // and left with default config.
// TODO(niklase) Remove default argument as soon as libJingle is updated! // TODO(niklase) Remove default argument as soon as libJingle is updated!
virtual int SetTypingDetectionParameters(int timeWindow, virtual int SetTypingDetectionParameters(int timeWindow,
int costPerTyping, int costPerTyping,
int reportingThreshold, int reportingThreshold,
int penaltyDecay, int penaltyDecay,
int typeEventDelay = 0) = 0; int typeEventDelay = 0) = 0;
// Swaps the capture-side left and right audio channels when enabled. It // Swaps the capture-side left and right audio channels when enabled. It
// only has an effect when using a stereo send codec. The setting is // only has an effect when using a stereo send codec. The setting is
// persistent; it will be applied whenever a stereo send codec is enabled. // persistent; it will be applied whenever a stereo send codec is enabled.
// //
// The swap is applied only to the captured audio, and not mixed files. The // The swap is applied only to the captured audio, and not mixed files. The
// swap will appear in file recordings and when accessing audio through the // swap will appear in file recordings and when accessing audio through the
// external media interface. // external media interface.
virtual void EnableStereoChannelSwapping(bool enable) = 0; virtual void EnableStereoChannelSwapping(bool enable) = 0;
virtual bool IsStereoChannelSwappingEnabled() = 0; virtual bool IsStereoChannelSwappingEnabled() = 0;
protected: protected:
VoEAudioProcessing() {} VoEAudioProcessing() {}
virtual ~VoEAudioProcessing() {} virtual ~VoEAudioProcessing() {}
}; };
} // namespace webrtc } // namespace webrtc

View File

@ -46,149 +46,150 @@ class Config;
const int kVoEDefault = -1; const int kVoEDefault = -1;
// VoiceEngineObserver // VoiceEngineObserver
class WEBRTC_DLLEXPORT VoiceEngineObserver class WEBRTC_DLLEXPORT VoiceEngineObserver {
{ public:
public: // This method will be called after the occurrence of any runtime error
// This method will be called after the occurrence of any runtime error // code, or warning notification, when the observer interface has been
// code, or warning notification, when the observer interface has been // installed using VoEBase::RegisterVoiceEngineObserver().
// installed using VoEBase::RegisterVoiceEngineObserver(). virtual void CallbackOnError(int channel, int errCode) = 0;
virtual void CallbackOnError(int channel, int errCode) = 0;
protected: protected:
virtual ~VoiceEngineObserver() {} virtual ~VoiceEngineObserver() {}
}; };
// VoiceEngine // VoiceEngine
class WEBRTC_DLLEXPORT VoiceEngine class WEBRTC_DLLEXPORT VoiceEngine {
{ public:
public: // Creates a VoiceEngine object, which can then be used to acquire
// Creates a VoiceEngine object, which can then be used to acquire // sub-APIs. Returns NULL on failure.
// sub-APIs. Returns NULL on failure. static VoiceEngine* Create();
static VoiceEngine* Create(); static VoiceEngine* Create(const Config& config);
static VoiceEngine* Create(const Config& config);
// Deletes a created VoiceEngine object and releases the utilized resources. // Deletes a created VoiceEngine object and releases the utilized resources.
// Note that if there are outstanding references held via other interfaces, // Note that if there are outstanding references held via other interfaces,
// the voice engine instance will not actually be deleted until those // the voice engine instance will not actually be deleted until those
// references have been released. // references have been released.
static bool Delete(VoiceEngine*& voiceEngine); static bool Delete(VoiceEngine*& voiceEngine);
// Specifies the amount and type of trace information which will be // Specifies the amount and type of trace information which will be
// created by the VoiceEngine. // created by the VoiceEngine.
static int SetTraceFilter(unsigned int filter); static int SetTraceFilter(unsigned int filter);
// Sets the name of the trace file and enables non-encrypted trace messages. // Sets the name of the trace file and enables non-encrypted trace messages.
static int SetTraceFile(const char* fileNameUTF8, static int SetTraceFile(const char* fileNameUTF8,
bool addFileCounter = false); bool addFileCounter = false);
// Installs the TraceCallback implementation to ensure that the user // Installs the TraceCallback implementation to ensure that the user
// receives callbacks for generated trace messages. // receives callbacks for generated trace messages.
static int SetTraceCallback(TraceCallback* callback); static int SetTraceCallback(TraceCallback* callback);
#if !defined(WEBRTC_CHROMIUM_BUILD) #if !defined(WEBRTC_CHROMIUM_BUILD)
static int SetAndroidObjects(void* javaVM, void* context); static int SetAndroidObjects(void* javaVM, void* context);
#endif #endif
protected: protected:
VoiceEngine() {} VoiceEngine() {}
~VoiceEngine() {} ~VoiceEngine() {}
}; };
// VoEBase // VoEBase
class WEBRTC_DLLEXPORT VoEBase class WEBRTC_DLLEXPORT VoEBase {
{ public:
public: // Factory for the VoEBase sub-API. Increases an internal reference
// Factory for the VoEBase sub-API. Increases an internal reference // counter if successful. Returns NULL if the API is not supported or if
// counter if successful. Returns NULL if the API is not supported or if // construction fails.
// construction fails. static VoEBase* GetInterface(VoiceEngine* voiceEngine);
static VoEBase* GetInterface(VoiceEngine* voiceEngine);
// Releases the VoEBase sub-API and decreases an internal reference // Releases the VoEBase sub-API and decreases an internal reference
// counter. Returns the new reference count. This value should be zero // counter. Returns the new reference count. This value should be zero
// for all sub-APIs before the VoiceEngine object can be safely deleted. // for all sub-APIs before the VoiceEngine object can be safely deleted.
virtual int Release() = 0; virtual int Release() = 0;
// Installs the observer class to enable runtime error control and // Installs the observer class to enable runtime error control and
// warning notifications. Returns -1 in case of an error, 0 otherwise. // warning notifications. Returns -1 in case of an error, 0 otherwise.
virtual int RegisterVoiceEngineObserver(VoiceEngineObserver& observer) = 0; virtual int RegisterVoiceEngineObserver(VoiceEngineObserver& observer) = 0;
// Removes and disables the observer class for runtime error control // Removes and disables the observer class for runtime error control
// and warning notifications. Returns 0. // and warning notifications. Returns 0.
virtual int DeRegisterVoiceEngineObserver() = 0; virtual int DeRegisterVoiceEngineObserver() = 0;
// Initializes all common parts of the VoiceEngine; e.g. all // Initializes all common parts of the VoiceEngine; e.g. all
// encoders/decoders, the sound card and core receiving components. // encoders/decoders, the sound card and core receiving components.
// This method also makes it possible to install some user-defined external // This method also makes it possible to install some user-defined external
// modules: // modules:
// - The Audio Device Module (ADM) which implements all the audio layer // - The Audio Device Module (ADM) which implements all the audio layer
// functionality in a separate (reference counted) module. // functionality in a separate (reference counted) module.
// - The AudioProcessing module handles capture-side processing. VoiceEngine // - The AudioProcessing module handles capture-side processing. VoiceEngine
// takes ownership of this object. // takes ownership of this object.
// If NULL is passed for any of these, VoiceEngine will create its own. // If NULL is passed for any of these, VoiceEngine will create its own.
// Returns -1 in case of an error, 0 otherwise. // Returns -1 in case of an error, 0 otherwise.
// TODO(ajm): Remove default NULLs. // TODO(ajm): Remove default NULLs.
virtual int Init(AudioDeviceModule* external_adm = NULL, virtual int Init(AudioDeviceModule* external_adm = NULL,
AudioProcessing* audioproc = NULL) = 0; AudioProcessing* audioproc = NULL) = 0;
// Returns NULL before Init() is called. // Returns NULL before Init() is called.
virtual AudioProcessing* audio_processing() = 0; virtual AudioProcessing* audio_processing() = 0;
// Terminates all VoiceEngine functions and releases allocated resources. // Terminates all VoiceEngine functions and releases allocated resources.
// Returns 0. // Returns 0.
virtual int Terminate() = 0; virtual int Terminate() = 0;
// Creates a new channel and allocates the required resources for it. // Creates a new channel and allocates the required resources for it.
// One can use |config| to configure the channel. Currently that is used for // One can use |config| to configure the channel. Currently that is used for
// choosing between ACM1 and ACM2, when creating Audio Coding Module. // choosing between ACM1 and ACM2, when creating Audio Coding Module.
// Returns channel ID or -1 in case of an error. // Returns channel ID or -1 in case of an error.
virtual int CreateChannel() = 0; virtual int CreateChannel() = 0;
virtual int CreateChannel(const Config& config) = 0; virtual int CreateChannel(const Config& config) = 0;
// Deletes an existing channel and releases the utilized resources. // Deletes an existing channel and releases the utilized resources.
// Returns -1 in case of an error, 0 otherwise. // Returns -1 in case of an error, 0 otherwise.
virtual int DeleteChannel(int channel) = 0; virtual int DeleteChannel(int channel) = 0;
// Prepares and initiates the VoiceEngine for reception of // Prepares and initiates the VoiceEngine for reception of
// incoming RTP/RTCP packets on the specified |channel|. // incoming RTP/RTCP packets on the specified |channel|.
virtual int StartReceive(int channel) = 0; virtual int StartReceive(int channel) = 0;
// Stops receiving incoming RTP/RTCP packets on the specified |channel|. // Stops receiving incoming RTP/RTCP packets on the specified |channel|.
virtual int StopReceive(int channel) = 0; virtual int StopReceive(int channel) = 0;
// Starts forwarding the packets to the mixer/soundcard for a // Starts forwarding the packets to the mixer/soundcard for a
// specified |channel|. // specified |channel|.
virtual int StartPlayout(int channel) = 0; virtual int StartPlayout(int channel) = 0;
// Stops forwarding the packets to the mixer/soundcard for a // Stops forwarding the packets to the mixer/soundcard for a
// specified |channel|. // specified |channel|.
virtual int StopPlayout(int channel) = 0; virtual int StopPlayout(int channel) = 0;
// Starts sending packets to an already specified IP address and // Starts sending packets to an already specified IP address and
// port number for a specified |channel|. // port number for a specified |channel|.
virtual int StartSend(int channel) = 0; virtual int StartSend(int channel) = 0;
// Stops sending packets from a specified |channel|. // Stops sending packets from a specified |channel|.
virtual int StopSend(int channel) = 0; virtual int StopSend(int channel) = 0;
// Gets the version information for VoiceEngine and its components. // Gets the version information for VoiceEngine and its components.
virtual int GetVersion(char version[1024]) = 0; virtual int GetVersion(char version[1024]) = 0;
// Gets the last VoiceEngine error code. // Gets the last VoiceEngine error code.
virtual int LastError() = 0; virtual int LastError() = 0;
// TODO(xians): Make the interface pure virtual after libjingle // TODO(xians): Make the interface pure virtual after libjingle
// implements the interface in its FakeWebRtcVoiceEngine. // implements the interface in its FakeWebRtcVoiceEngine.
virtual AudioTransport* audio_transport() { return NULL; } virtual AudioTransport* audio_transport() { return NULL; }
// To be removed. Don't use. // To be removed. Don't use.
virtual int SetOnHoldStatus(int channel, bool enable, virtual int SetOnHoldStatus(int channel,
OnHoldModes mode = kHoldSendAndPlay) { return -1; } bool enable,
virtual int GetOnHoldStatus(int channel, bool& enabled, OnHoldModes mode = kHoldSendAndPlay) {
OnHoldModes& mode) { return -1; } return -1;
}
virtual int GetOnHoldStatus(int channel, bool& enabled, OnHoldModes& mode) {
return -1;
}
protected: protected:
VoEBase() {} VoEBase() {}
virtual ~VoEBase() {} virtual ~VoEBase() {}
}; };
} // namespace webrtc } // namespace webrtc

View File

@ -37,109 +37,116 @@ namespace webrtc {
class VoiceEngine; class VoiceEngine;
class WEBRTC_DLLEXPORT VoECodec class WEBRTC_DLLEXPORT VoECodec {
{ public:
public: // Factory for the VoECodec sub-API. Increases an internal
// Factory for the VoECodec sub-API. Increases an internal // reference counter if successful. Returns NULL if the API is not
// reference counter if successful. Returns NULL if the API is not // supported or if construction fails.
// supported or if construction fails. static VoECodec* GetInterface(VoiceEngine* voiceEngine);
static VoECodec* GetInterface(VoiceEngine* voiceEngine);
// Releases the VoECodec sub-API and decreases an internal // Releases the VoECodec sub-API and decreases an internal
// reference counter. Returns the new reference count. This value should // reference counter. Returns the new reference count. This value should
// be zero for all sub-API:s before the VoiceEngine object can be safely // be zero for all sub-API:s before the VoiceEngine object can be safely
// deleted. // deleted.
virtual int Release() = 0; virtual int Release() = 0;
// Gets the number of supported codecs. // Gets the number of supported codecs.
virtual int NumOfCodecs() = 0; virtual int NumOfCodecs() = 0;
// Get the |codec| information for a specified list |index|. // Get the |codec| information for a specified list |index|.
virtual int GetCodec(int index, CodecInst& codec) = 0; virtual int GetCodec(int index, CodecInst& codec) = 0;
// Sets the |codec| for the |channel| to be used for sending. // Sets the |codec| for the |channel| to be used for sending.
virtual int SetSendCodec(int channel, const CodecInst& codec) = 0; virtual int SetSendCodec(int channel, const CodecInst& codec) = 0;
// Gets the |codec| parameters for the sending codec on a specified // Gets the |codec| parameters for the sending codec on a specified
// |channel|. // |channel|.
virtual int GetSendCodec(int channel, CodecInst& codec) = 0; virtual int GetSendCodec(int channel, CodecInst& codec) = 0;
// Sets the bitrate on a specified |channel| to the specified value // Sets the bitrate on a specified |channel| to the specified value
// (in bits/sec). If the value is not supported by the codec, the codec will // (in bits/sec). If the value is not supported by the codec, the codec will
// choose an appropriate value. // choose an appropriate value.
// Returns -1 on failure and 0 on success. // Returns -1 on failure and 0 on success.
virtual int SetBitRate(int channel, int bitrate_bps) = 0; virtual int SetBitRate(int channel, int bitrate_bps) = 0;
// Gets the currently received |codec| for a specific |channel|. // Gets the currently received |codec| for a specific |channel|.
virtual int GetRecCodec(int channel, CodecInst& codec) = 0; virtual int GetRecCodec(int channel, CodecInst& codec) = 0;
// Sets the dynamic payload type number for a particular |codec| or // Sets the dynamic payload type number for a particular |codec| or
// disables (ignores) a codec for receiving. For instance, when receiving // disables (ignores) a codec for receiving. For instance, when receiving
// an invite from a SIP-based client, this function can be used to change // an invite from a SIP-based client, this function can be used to change
// the dynamic payload type number to match that in the INVITE SDP- // the dynamic payload type number to match that in the INVITE SDP-
// message. The utilized parameters in the |codec| structure are: // message. The utilized parameters in the |codec| structure are:
// plname, plfreq, pltype and channels. // plname, plfreq, pltype and channels.
virtual int SetRecPayloadType(int channel, const CodecInst& codec) = 0; virtual int SetRecPayloadType(int channel, const CodecInst& codec) = 0;
// Gets the actual payload type that is set for receiving a |codec| on a // Gets the actual payload type that is set for receiving a |codec| on a
// |channel|. The value it retrieves will either be the default payload // |channel|. The value it retrieves will either be the default payload
// type, or a value earlier set with SetRecPayloadType(). // type, or a value earlier set with SetRecPayloadType().
virtual int GetRecPayloadType(int channel, CodecInst& codec) = 0; virtual int GetRecPayloadType(int channel, CodecInst& codec) = 0;
// Sets the payload |type| for the sending of SID-frames with background // Sets the payload |type| for the sending of SID-frames with background
// noise estimation during silence periods detected by the VAD. // noise estimation during silence periods detected by the VAD.
virtual int SetSendCNPayloadType( virtual int SetSendCNPayloadType(
int channel, int type, PayloadFrequencies frequency = kFreq16000Hz) = 0; int channel,
int type,
PayloadFrequencies frequency = kFreq16000Hz) = 0;
// Sets the codec internal FEC (forward error correction) status for a // Sets the codec internal FEC (forward error correction) status for a
// specified |channel|. Returns 0 if success, and -1 if failed. // specified |channel|. Returns 0 if success, and -1 if failed.
// TODO(minyue): Make SetFECStatus() pure virtual when fakewebrtcvoiceengine // TODO(minyue): Make SetFECStatus() pure virtual when fakewebrtcvoiceengine
// in talk is ready. // in talk is ready.
virtual int SetFECStatus(int channel, bool enable) { return -1; } virtual int SetFECStatus(int channel, bool enable) { return -1; }
// Gets the codec internal FEC status for a specified |channel|. Returns 0 // Gets the codec internal FEC status for a specified |channel|. Returns 0
// with the status stored in |enabled| if success, and -1 if encountered // with the status stored in |enabled| if success, and -1 if encountered
// error. // error.
// TODO(minyue): Make GetFECStatus() pure virtual when fakewebrtcvoiceengine // TODO(minyue): Make GetFECStatus() pure virtual when fakewebrtcvoiceengine
// in talk is ready. // in talk is ready.
virtual int GetFECStatus(int channel, bool& enabled) { return -1; } virtual int GetFECStatus(int channel, bool& enabled) { return -1; }
// Sets the VAD/DTX (silence suppression) status and |mode| for a // Sets the VAD/DTX (silence suppression) status and |mode| for a
// specified |channel|. Disabling VAD (through |enable|) will also disable // specified |channel|. Disabling VAD (through |enable|) will also disable
// DTX; it is not necessary to explictly set |disableDTX| in this case. // DTX; it is not necessary to explictly set |disableDTX| in this case.
virtual int SetVADStatus(int channel, bool enable, virtual int SetVADStatus(int channel,
VadModes mode = kVadConventional, bool enable,
bool disableDTX = false) = 0; VadModes mode = kVadConventional,
bool disableDTX = false) = 0;
// Gets the VAD/DTX status and |mode| for a specified |channel|. // Gets the VAD/DTX status and |mode| for a specified |channel|.
virtual int GetVADStatus(int channel, bool& enabled, VadModes& mode, virtual int GetVADStatus(int channel,
bool& disabledDTX) = 0; bool& enabled,
VadModes& mode,
bool& disabledDTX) = 0;
// If send codec is Opus on a specified |channel|, sets the maximum playback // If send codec is Opus on a specified |channel|, sets the maximum playback
// rate the receiver will render: |frequency_hz| (in Hz). // rate the receiver will render: |frequency_hz| (in Hz).
// TODO(minyue): Make SetOpusMaxPlaybackRate() pure virtual when // TODO(minyue): Make SetOpusMaxPlaybackRate() pure virtual when
// fakewebrtcvoiceengine in talk is ready. // fakewebrtcvoiceengine in talk is ready.
virtual int SetOpusMaxPlaybackRate(int channel, int frequency_hz) { virtual int SetOpusMaxPlaybackRate(int channel, int frequency_hz) {
return -1; return -1;
} }
// If send codec is Opus on a specified |channel|, set its DTX. Returns 0 if // If send codec is Opus on a specified |channel|, set its DTX. Returns 0 if
// success, and -1 if failed. // success, and -1 if failed.
virtual int SetOpusDtx(int channel, bool enable_dtx) = 0; virtual int SetOpusDtx(int channel, bool enable_dtx) = 0;
// Don't use. To be removed. // Don't use. To be removed.
virtual int SetAMREncFormat(int channel, AmrMode mode) { return -1; } virtual int SetAMREncFormat(int channel, AmrMode mode) { return -1; }
virtual int SetAMRDecFormat(int channel, AmrMode mode) { return -1; } virtual int SetAMRDecFormat(int channel, AmrMode mode) { return -1; }
virtual int SetAMRWbEncFormat(int channel, AmrMode mode) { return -1; } virtual int SetAMRWbEncFormat(int channel, AmrMode mode) { return -1; }
virtual int SetAMRWbDecFormat(int channel, AmrMode mode) { return -1; } virtual int SetAMRWbDecFormat(int channel, AmrMode mode) { return -1; }
virtual int SetISACInitTargetRate(int channel, int rateBps, virtual int SetISACInitTargetRate(int channel,
bool useFixedFrameSize = false) { return -1; } int rateBps,
virtual int SetISACMaxRate(int channel, int rateBps) { return -1; } bool useFixedFrameSize = false) {
virtual int SetISACMaxPayloadSize(int channel, int sizeBytes) { return -1; } return -1;
}
virtual int SetISACMaxRate(int channel, int rateBps) { return -1; }
virtual int SetISACMaxPayloadSize(int channel, int sizeBytes) { return -1; }
protected: protected:
VoECodec() {} VoECodec() {}
virtual ~VoECodec() {} virtual ~VoECodec() {}
}; };
} // namespace webrtc } // namespace webrtc

View File

@ -40,57 +40,57 @@ namespace webrtc {
class VoiceEngine; class VoiceEngine;
// VoEDtmf // VoEDtmf
class WEBRTC_DLLEXPORT VoEDtmf class WEBRTC_DLLEXPORT VoEDtmf {
{ public:
public: // Factory for the VoEDtmf sub-API. Increases an internal
// reference counter if successful. Returns NULL if the API is not
// supported or if construction fails.
static VoEDtmf* GetInterface(VoiceEngine* voiceEngine);
// Factory for the VoEDtmf sub-API. Increases an internal // Releases the VoEDtmf sub-API and decreases an internal
// reference counter if successful. Returns NULL if the API is not // reference counter. Returns the new reference count. This value should
// supported or if construction fails. // be zero for all sub-API:s before the VoiceEngine object can be safely
static VoEDtmf* GetInterface(VoiceEngine* voiceEngine); // deleted.
virtual int Release() = 0;
// Releases the VoEDtmf sub-API and decreases an internal // Sends telephone events either in-band or out-of-band.
// reference counter. Returns the new reference count. This value should virtual int SendTelephoneEvent(int channel,
// be zero for all sub-API:s before the VoiceEngine object can be safely int eventCode,
// deleted. bool outOfBand = true,
virtual int Release() = 0; int lengthMs = 160,
int attenuationDb = 10) = 0;
// Sends telephone events either in-band or out-of-band. // Sets the dynamic payload |type| that should be used for telephone
virtual int SendTelephoneEvent(int channel, int eventCode, // events.
bool outOfBand = true, int lengthMs = 160, virtual int SetSendTelephoneEventPayloadType(int channel,
int attenuationDb = 10) = 0; unsigned char type) = 0;
// Gets the currently set dynamic payload |type| for telephone events.
virtual int GetSendTelephoneEventPayloadType(int channel,
unsigned char& type) = 0;
// Sets the dynamic payload |type| that should be used for telephone // Toogles DTMF feedback state: when a DTMF tone is sent, the same tone
// events. // is played out on the speaker.
virtual int SetSendTelephoneEventPayloadType(int channel, virtual int SetDtmfFeedbackStatus(bool enable,
unsigned char type) = 0; bool directFeedback = false) = 0;
// Gets the DTMF feedback status.
virtual int GetDtmfFeedbackStatus(bool& enabled, bool& directFeedback) = 0;
// Gets the currently set dynamic payload |type| for telephone events. // Plays a DTMF feedback tone (only locally).
virtual int GetSendTelephoneEventPayloadType(int channel, virtual int PlayDtmfTone(int eventCode,
unsigned char& type) = 0; int lengthMs = 200,
int attenuationDb = 10) = 0;
// Toogles DTMF feedback state: when a DTMF tone is sent, the same tone // To be removed. Don't use.
// is played out on the speaker. virtual int StartPlayingDtmfTone(int eventCode, int attenuationDb = 10) {
virtual int SetDtmfFeedbackStatus(bool enable, return -1;
bool directFeedback = false) = 0; }
virtual int StopPlayingDtmfTone() { return -1; }
// Gets the DTMF feedback status. protected:
virtual int GetDtmfFeedbackStatus(bool& enabled, bool& directFeedback) = 0; VoEDtmf() {}
virtual ~VoEDtmf() {}
// Plays a DTMF feedback tone (only locally).
virtual int PlayDtmfTone(int eventCode, int lengthMs = 200,
int attenuationDb = 10) = 0;
// To be removed. Don't use.
virtual int StartPlayingDtmfTone(int eventCode,
int attenuationDb = 10) { return -1; }
virtual int StopPlayingDtmfTone() { return -1; }
protected:
VoEDtmf() {}
virtual ~VoEDtmf() {}
}; };
} // namespace webrtc } // namespace webrtc

View File

@ -17,73 +17,83 @@ namespace webrtc {
class VoiceEngine; class VoiceEngine;
class AudioFrame; class AudioFrame;
class WEBRTC_DLLEXPORT VoEMediaProcess class WEBRTC_DLLEXPORT VoEMediaProcess {
{ public:
public: // The VoiceEngine user should override the Process() method in a
// The VoiceEngine user should override the Process() method in a // derived class. Process() will be called when audio is ready to
// derived class. Process() will be called when audio is ready to // be processed. The audio can be accessed in several different modes
// be processed. The audio can be accessed in several different modes // given by the |type| parameter. The function should modify the
// given by the |type| parameter. The function should modify the // original data and ensure that it is copied back to the |audio10ms|
// original data and ensure that it is copied back to the |audio10ms| // array. The number of samples in the frame cannot be changed.
// array. The number of samples in the frame cannot be changed. // The sampling frequency will depend upon the codec used.
// The sampling frequency will depend upon the codec used. // If |isStereo| is true, audio10ms will contain 16-bit PCM data
// If |isStereo| is true, audio10ms will contain 16-bit PCM data // samples in interleaved stereo format (L0,R0,L1,R1,...).
// samples in interleaved stereo format (L0,R0,L1,R1,...). virtual void Process(int channel,
virtual void Process(int channel, ProcessingTypes type, ProcessingTypes type,
int16_t audio10ms[], int length, int16_t audio10ms[],
int samplingFreq, bool isStereo) = 0; int length,
int samplingFreq,
bool isStereo) = 0;
protected: protected:
virtual ~VoEMediaProcess() {} virtual ~VoEMediaProcess() {}
}; };
class WEBRTC_DLLEXPORT VoEExternalMedia class WEBRTC_DLLEXPORT VoEExternalMedia {
{ public:
public: // Factory for the VoEExternalMedia sub-API. Increases an internal
// Factory for the VoEExternalMedia sub-API. Increases an internal // reference counter if successful. Returns NULL if the API is not
// reference counter if successful. Returns NULL if the API is not // supported or if construction fails.
// supported or if construction fails. static VoEExternalMedia* GetInterface(VoiceEngine* voiceEngine);
static VoEExternalMedia* GetInterface(VoiceEngine* voiceEngine);
// Releases the VoEExternalMedia sub-API and decreases an internal // Releases the VoEExternalMedia sub-API and decreases an internal
// reference counter. Returns the new reference count. This value should // reference counter. Returns the new reference count. This value should
// be zero for all sub-API:s before the VoiceEngine object can be safely // be zero for all sub-API:s before the VoiceEngine object can be safely
// deleted. // deleted.
virtual int Release() = 0; virtual int Release() = 0;
// Installs a VoEMediaProcess derived instance and activates external // Installs a VoEMediaProcess derived instance and activates external
// media for the specified |channel| and |type|. // media for the specified |channel| and |type|.
virtual int RegisterExternalMediaProcessing( virtual int RegisterExternalMediaProcessing(
int channel, ProcessingTypes type, VoEMediaProcess& processObject) = 0; int channel,
ProcessingTypes type,
VoEMediaProcess& processObject) = 0;
// Removes the VoEMediaProcess derived instance and deactivates external // Removes the VoEMediaProcess derived instance and deactivates external
// media for the specified |channel| and |type|. // media for the specified |channel| and |type|.
virtual int DeRegisterExternalMediaProcessing( virtual int DeRegisterExternalMediaProcessing(int channel,
int channel, ProcessingTypes type) = 0; ProcessingTypes type) = 0;
// Pulls an audio frame from the specified |channel| for external mixing. // Pulls an audio frame from the specified |channel| for external mixing.
// If the |desired_sample_rate_hz| is 0, the signal will be returned with // If the |desired_sample_rate_hz| is 0, the signal will be returned with
// its native frequency, otherwise it will be resampled. Valid frequencies // its native frequency, otherwise it will be resampled. Valid frequencies
// are 16, 22, 32, 44 or 48 kHz. // are 16, 22, 32, 44 or 48 kHz.
virtual int GetAudioFrame(int channel, int desired_sample_rate_hz, virtual int GetAudioFrame(int channel,
AudioFrame* frame) = 0; int desired_sample_rate_hz,
AudioFrame* frame) = 0;
// Sets the state of external mixing. Cannot be changed during playback. // Sets the state of external mixing. Cannot be changed during playback.
virtual int SetExternalMixing(int channel, bool enable) = 0; virtual int SetExternalMixing(int channel, bool enable) = 0;
// Don't use. To be removed. // Don't use. To be removed.
virtual int SetExternalRecordingStatus(bool enable) { return -1; } virtual int SetExternalRecordingStatus(bool enable) { return -1; }
virtual int SetExternalPlayoutStatus(bool enable) { return -1; } virtual int SetExternalPlayoutStatus(bool enable) { return -1; }
virtual int ExternalRecordingInsertData( virtual int ExternalRecordingInsertData(const int16_t speechData10ms[],
const int16_t speechData10ms[], int lengthSamples, int lengthSamples,
int samplingFreqHz, int current_delay_ms) { return -1; } int samplingFreqHz,
virtual int ExternalPlayoutGetData( int current_delay_ms) {
int16_t speechData10ms[], int samplingFreqHz, return -1;
int current_delay_ms, int& lengthSamples) { return -1; } }
virtual int ExternalPlayoutGetData(int16_t speechData10ms[],
int samplingFreqHz,
int current_delay_ms,
int& lengthSamples) {
return -1;
}
protected: protected:
VoEExternalMedia() {} VoEExternalMedia() {}
virtual ~VoEExternalMedia() {} virtual ~VoEExternalMedia() {}
}; };
} // namespace webrtc } // namespace webrtc

View File

@ -44,124 +44,142 @@ namespace webrtc {
class VoiceEngine; class VoiceEngine;
class WEBRTC_DLLEXPORT VoEFile class WEBRTC_DLLEXPORT VoEFile {
{ public:
public: // Factory for the VoEFile sub-API. Increases an internal
// Factory for the VoEFile sub-API. Increases an internal // reference counter if successful. Returns NULL if the API is not
// reference counter if successful. Returns NULL if the API is not // supported or if construction fails.
// supported or if construction fails. static VoEFile* GetInterface(VoiceEngine* voiceEngine);
static VoEFile* GetInterface(VoiceEngine* voiceEngine);
// Releases the VoEFile sub-API and decreases an internal // Releases the VoEFile sub-API and decreases an internal
// reference counter. Returns the new reference count. This value should // reference counter. Returns the new reference count. This value should
// be zero for all sub-API:s before the VoiceEngine object can be safely // be zero for all sub-API:s before the VoiceEngine object can be safely
// deleted. // deleted.
virtual int Release() = 0; virtual int Release() = 0;
// Starts playing and mixing files with the local speaker signal for // Starts playing and mixing files with the local speaker signal for
// playout. // playout.
virtual int StartPlayingFileLocally( virtual int StartPlayingFileLocally(
int channel, int channel,
const char fileNameUTF8[1024], const char fileNameUTF8[1024],
bool loop = false, bool loop = false,
FileFormats format = kFileFormatPcm16kHzFile, FileFormats format = kFileFormatPcm16kHzFile,
float volumeScaling = 1.0, float volumeScaling = 1.0,
int startPointMs = 0, int startPointMs = 0,
int stopPointMs = 0) = 0; int stopPointMs = 0) = 0;
// Starts playing and mixing streams with the local speaker signal for // Starts playing and mixing streams with the local speaker signal for
// playout. // playout.
virtual int StartPlayingFileLocally( virtual int StartPlayingFileLocally(
int channel, int channel,
InStream* stream, InStream* stream,
FileFormats format = kFileFormatPcm16kHzFile, FileFormats format = kFileFormatPcm16kHzFile,
float volumeScaling = 1.0, float volumeScaling = 1.0,
int startPointMs = 0, int stopPointMs = 0) = 0; int startPointMs = 0,
int stopPointMs = 0) = 0;
// Stops playback of a file on a specific |channel|. // Stops playback of a file on a specific |channel|.
virtual int StopPlayingFileLocally(int channel) = 0; virtual int StopPlayingFileLocally(int channel) = 0;
// Returns the current file playing state for a specific |channel|. // Returns the current file playing state for a specific |channel|.
virtual int IsPlayingFileLocally(int channel) = 0; virtual int IsPlayingFileLocally(int channel) = 0;
// Starts reading data from a file and transmits the data either // Starts reading data from a file and transmits the data either
// mixed with or instead of the microphone signal. // mixed with or instead of the microphone signal.
virtual int StartPlayingFileAsMicrophone( virtual int StartPlayingFileAsMicrophone(
int channel, int channel,
const char fileNameUTF8[1024], const char fileNameUTF8[1024],
bool loop = false , bool loop = false,
bool mixWithMicrophone = false, bool mixWithMicrophone = false,
FileFormats format = kFileFormatPcm16kHzFile, FileFormats format = kFileFormatPcm16kHzFile,
float volumeScaling = 1.0) = 0; float volumeScaling = 1.0) = 0;
// Starts reading data from a stream and transmits the data either // Starts reading data from a stream and transmits the data either
// mixed with or instead of the microphone signal. // mixed with or instead of the microphone signal.
virtual int StartPlayingFileAsMicrophone( virtual int StartPlayingFileAsMicrophone(
int channel, int channel,
InStream* stream, InStream* stream,
bool mixWithMicrophone = false, bool mixWithMicrophone = false,
FileFormats format = kFileFormatPcm16kHzFile, FileFormats format = kFileFormatPcm16kHzFile,
float volumeScaling = 1.0) = 0; float volumeScaling = 1.0) = 0;
// Stops playing of a file as microphone signal for a specific |channel|. // Stops playing of a file as microphone signal for a specific |channel|.
virtual int StopPlayingFileAsMicrophone(int channel) = 0; virtual int StopPlayingFileAsMicrophone(int channel) = 0;
// Returns whether the |channel| is currently playing a file as microphone. // Returns whether the |channel| is currently playing a file as microphone.
virtual int IsPlayingFileAsMicrophone(int channel) = 0; virtual int IsPlayingFileAsMicrophone(int channel) = 0;
// Starts recording the mixed playout audio. // Starts recording the mixed playout audio.
virtual int StartRecordingPlayout(int channel, virtual int StartRecordingPlayout(int channel,
const char* fileNameUTF8, const char* fileNameUTF8,
CodecInst* compression = NULL, CodecInst* compression = NULL,
int maxSizeBytes = -1) = 0; int maxSizeBytes = -1) = 0;
// Stops recording the mixed playout audio. // Stops recording the mixed playout audio.
virtual int StopRecordingPlayout(int channel) = 0; virtual int StopRecordingPlayout(int channel) = 0;
virtual int StartRecordingPlayout(int channel, virtual int StartRecordingPlayout(int channel,
OutStream* stream, OutStream* stream,
CodecInst* compression = NULL) = 0; CodecInst* compression = NULL) = 0;
// Starts recording the microphone signal to a file. // Starts recording the microphone signal to a file.
virtual int StartRecordingMicrophone(const char* fileNameUTF8, virtual int StartRecordingMicrophone(const char* fileNameUTF8,
CodecInst* compression = NULL, CodecInst* compression = NULL,
int maxSizeBytes = -1) = 0; int maxSizeBytes = -1) = 0;
// Starts recording the microphone signal to a stream. // Starts recording the microphone signal to a stream.
virtual int StartRecordingMicrophone(OutStream* stream, virtual int StartRecordingMicrophone(OutStream* stream,
CodecInst* compression = NULL) = 0; CodecInst* compression = NULL) = 0;
// Stops recording the microphone signal. // Stops recording the microphone signal.
virtual int StopRecordingMicrophone() = 0; virtual int StopRecordingMicrophone() = 0;
// Don't use. To be removed. // Don't use. To be removed.
virtual int ScaleLocalFilePlayout(int channel, float scale) { return -1; } virtual int ScaleLocalFilePlayout(int channel, float scale) { return -1; }
virtual int ScaleFileAsMicrophonePlayout( virtual int ScaleFileAsMicrophonePlayout(int channel, float scale) {
int channel, float scale) { return -1; } return -1;
virtual int GetFileDuration(const char* fileNameUTF8, int& durationMs, }
FileFormats format = kFileFormatPcm16kHzFile) { return -1; } virtual int GetFileDuration(const char* fileNameUTF8,
virtual int GetPlaybackPosition(int channel, int& positionMs) { return -1; } int& durationMs,
virtual int ConvertPCMToWAV(const char* fileNameInUTF8, FileFormats format = kFileFormatPcm16kHzFile) {
const char* fileNameOutUTF8) { return -1; } return -1;
virtual int ConvertPCMToWAV(InStream* streamIn, }
OutStream* streamOut) { return -1; } virtual int GetPlaybackPosition(int channel, int& positionMs) { return -1; }
virtual int ConvertWAVToPCM(const char* fileNameInUTF8, virtual int ConvertPCMToWAV(const char* fileNameInUTF8,
const char* fileNameOutUTF8) { return -1; } const char* fileNameOutUTF8) {
virtual int ConvertWAVToPCM(InStream* streamIn, return -1;
OutStream* streamOut) { return -1; } }
virtual int ConvertPCMToCompressed(const char* fileNameInUTF8, virtual int ConvertPCMToWAV(InStream* streamIn, OutStream* streamOut) {
const char* fileNameOutUTF8, return -1;
CodecInst* compression) { return -1; } }
virtual int ConvertPCMToCompressed(InStream* streamIn, virtual int ConvertWAVToPCM(const char* fileNameInUTF8,
OutStream* streamOut, const char* fileNameOutUTF8) {
CodecInst* compression) { return -1; } return -1;
virtual int ConvertCompressedToPCM(const char* fileNameInUTF8, }
const char* fileNameOutUTF8) { return -1; } virtual int ConvertWAVToPCM(InStream* streamIn, OutStream* streamOut) {
virtual int ConvertCompressedToPCM(InStream* streamIn, return -1;
OutStream* streamOut) { return -1; } }
protected: virtual int ConvertPCMToCompressed(const char* fileNameInUTF8,
VoEFile() {} const char* fileNameOutUTF8,
virtual ~VoEFile() {} CodecInst* compression) {
return -1;
}
virtual int ConvertPCMToCompressed(InStream* streamIn,
OutStream* streamOut,
CodecInst* compression) {
return -1;
}
virtual int ConvertCompressedToPCM(const char* fileNameInUTF8,
const char* fileNameOutUTF8) {
return -1;
}
virtual int ConvertCompressedToPCM(InStream* streamIn, OutStream* streamOut) {
return -1;
}
protected:
VoEFile() {}
virtual ~VoEFile() {}
}; };
} // namespace webrtc } // namespace webrtc

View File

@ -38,75 +38,79 @@ namespace webrtc {
class VoiceEngine; class VoiceEngine;
class WEBRTC_DLLEXPORT VoEHardware class WEBRTC_DLLEXPORT VoEHardware {
{ public:
public: // Factory for the VoEHardware sub-API. Increases an internal
// Factory for the VoEHardware sub-API. Increases an internal // reference counter if successful. Returns NULL if the API is not
// reference counter if successful. Returns NULL if the API is not // supported or if construction fails.
// supported or if construction fails. static VoEHardware* GetInterface(VoiceEngine* voiceEngine);
static VoEHardware* GetInterface(VoiceEngine* voiceEngine);
// Releases the VoEHardware sub-API and decreases an internal // Releases the VoEHardware sub-API and decreases an internal
// reference counter. Returns the new reference count. This value should // reference counter. Returns the new reference count. This value should
// be zero for all sub-API:s before the VoiceEngine object can be safely // be zero for all sub-API:s before the VoiceEngine object can be safely
// deleted. // deleted.
virtual int Release() = 0; virtual int Release() = 0;
// Gets the number of audio devices available for recording. // Gets the number of audio devices available for recording.
virtual int GetNumOfRecordingDevices(int& devices) = 0; virtual int GetNumOfRecordingDevices(int& devices) = 0;
// Gets the number of audio devices available for playout. // Gets the number of audio devices available for playout.
virtual int GetNumOfPlayoutDevices(int& devices) = 0; virtual int GetNumOfPlayoutDevices(int& devices) = 0;
// Gets the name of a specific recording device given by an |index|. // Gets the name of a specific recording device given by an |index|.
// On Windows Vista/7, it also retrieves an additional unique ID // On Windows Vista/7, it also retrieves an additional unique ID
// (GUID) for the recording device. // (GUID) for the recording device.
virtual int GetRecordingDeviceName(int index, char strNameUTF8[128], virtual int GetRecordingDeviceName(int index,
char strGuidUTF8[128]) = 0; char strNameUTF8[128],
// Gets the name of a specific playout device given by an |index|.
// On Windows Vista/7, it also retrieves an additional unique ID
// (GUID) for the playout device.
virtual int GetPlayoutDeviceName(int index, char strNameUTF8[128],
char strGuidUTF8[128]) = 0; char strGuidUTF8[128]) = 0;
// Sets the audio device used for recording. // Gets the name of a specific playout device given by an |index|.
virtual int SetRecordingDevice( // On Windows Vista/7, it also retrieves an additional unique ID
int index, StereoChannel recordingChannel = kStereoBoth) = 0; // (GUID) for the playout device.
virtual int GetPlayoutDeviceName(int index,
char strNameUTF8[128],
char strGuidUTF8[128]) = 0;
// Sets the audio device used for playout. // Sets the audio device used for recording.
virtual int SetPlayoutDevice(int index) = 0; virtual int SetRecordingDevice(
int index,
StereoChannel recordingChannel = kStereoBoth) = 0;
// Sets the type of audio device layer to use. // Sets the audio device used for playout.
virtual int SetAudioDeviceLayer(AudioLayers audioLayer) = 0; virtual int SetPlayoutDevice(int index) = 0;
// Gets the currently used (active) audio device layer. // Sets the type of audio device layer to use.
virtual int GetAudioDeviceLayer(AudioLayers& audioLayer) = 0; virtual int SetAudioDeviceLayer(AudioLayers audioLayer) = 0;
// Native sample rate controls (samples/sec) // Gets the currently used (active) audio device layer.
virtual int SetRecordingSampleRate(unsigned int samples_per_sec) = 0; virtual int GetAudioDeviceLayer(AudioLayers& audioLayer) = 0;
virtual int RecordingSampleRate(unsigned int* samples_per_sec) const = 0;
virtual int SetPlayoutSampleRate(unsigned int samples_per_sec) = 0;
virtual int PlayoutSampleRate(unsigned int* samples_per_sec) const = 0;
virtual bool BuiltInAECIsAvailable() const = 0; // Native sample rate controls (samples/sec)
virtual int EnableBuiltInAEC(bool enable) = 0; virtual int SetRecordingSampleRate(unsigned int samples_per_sec) = 0;
virtual int RecordingSampleRate(unsigned int* samples_per_sec) const = 0;
virtual int SetPlayoutSampleRate(unsigned int samples_per_sec) = 0;
virtual int PlayoutSampleRate(unsigned int* samples_per_sec) const = 0;
// To be removed. Don't use. virtual bool BuiltInAECIsAvailable() const = 0;
virtual bool BuiltInAECIsEnabled() const { return false; } virtual int EnableBuiltInAEC(bool enable) = 0;
virtual int GetRecordingDeviceStatus(bool& isAvailable) { return -1; }
virtual int GetPlayoutDeviceStatus(bool& isAvailable) { return -1; }
virtual int ResetAudioDevice() { return -1; }
virtual int AudioDeviceControl(unsigned int par1, unsigned int par2,
unsigned int par3) { return -1; }
virtual int SetLoudspeakerStatus(bool enable) { return -1; }
virtual int GetLoudspeakerStatus(bool& enabled) { return -1; }
virtual int GetCPULoad(int& loadPercent) { return -1; }
// To be removed. Don't use.
virtual bool BuiltInAECIsEnabled() const { return false; }
virtual int GetRecordingDeviceStatus(bool& isAvailable) { return -1; }
virtual int GetPlayoutDeviceStatus(bool& isAvailable) { return -1; }
virtual int ResetAudioDevice() { return -1; }
virtual int AudioDeviceControl(unsigned int par1,
unsigned int par2,
unsigned int par3) {
return -1;
}
virtual int SetLoudspeakerStatus(bool enable) { return -1; }
virtual int GetLoudspeakerStatus(bool& enabled) { return -1; }
virtual int GetCPULoad(int& loadPercent) { return -1; }
protected: protected:
VoEHardware() {} VoEHardware() {}
virtual ~VoEHardware() {} virtual ~VoEHardware() {}
}; };
} // namespace webrtc } // namespace webrtc

View File

@ -17,33 +17,33 @@ namespace webrtc {
class VoiceEngine; class VoiceEngine;
class WEBRTC_DLLEXPORT VoENetEqStats class WEBRTC_DLLEXPORT VoENetEqStats {
{ public:
public: // Factory for the VoENetEqStats sub-API. Increases an internal
// Factory for the VoENetEqStats sub-API. Increases an internal // reference counter if successful. Returns NULL if the API is not
// reference counter if successful. Returns NULL if the API is not // supported or if construction fails.
// supported or if construction fails. static VoENetEqStats* GetInterface(VoiceEngine* voiceEngine);
static VoENetEqStats* GetInterface(VoiceEngine* voiceEngine);
// Releases the VoENetEqStats sub-API and decreases an internal // Releases the VoENetEqStats sub-API and decreases an internal
// reference counter. Returns the new reference count. This value should // reference counter. Returns the new reference count. This value should
// be zero for all sub-API:s before the VoiceEngine object can be safely // be zero for all sub-API:s before the VoiceEngine object can be safely
// deleted. // deleted.
virtual int Release() = 0; virtual int Release() = 0;
// Get the "in-call" statistics from NetEQ. // Get the "in-call" statistics from NetEQ.
// The statistics are reset after the query. // The statistics are reset after the query.
virtual int GetNetworkStatistics(int channel, NetworkStatistics& stats) = 0; virtual int GetNetworkStatistics(int channel, NetworkStatistics& stats) = 0;
// Get statistics of calls to AudioCodingModule::PlayoutData10Ms(). // Get statistics of calls to AudioCodingModule::PlayoutData10Ms().
virtual int GetDecodingCallStatistics( virtual int GetDecodingCallStatistics(
int channel, AudioDecodingCallStats* stats) const = 0; int channel,
AudioDecodingCallStats* stats) const = 0;
protected: protected:
VoENetEqStats() {} VoENetEqStats() {}
virtual ~VoENetEqStats() {} virtual ~VoENetEqStats() {}
}; };
} // namespace webrtc } // namespace webrtc
#endif // #ifndef WEBRTC_VOICE_ENGINE_VOE_NETEQ_STATS_H #endif // #ifndef WEBRTC_VOICE_ENGINE_VOE_NETEQ_STATS_H

View File

@ -41,52 +41,50 @@ namespace webrtc {
class VoiceEngine; class VoiceEngine;
// VoENetwork // VoENetwork
class WEBRTC_DLLEXPORT VoENetwork class WEBRTC_DLLEXPORT VoENetwork {
{ public:
public: // Factory for the VoENetwork sub-API. Increases an internal
// Factory for the VoENetwork sub-API. Increases an internal // reference counter if successful. Returns NULL if the API is not
// reference counter if successful. Returns NULL if the API is not // supported or if construction fails.
// supported or if construction fails. static VoENetwork* GetInterface(VoiceEngine* voiceEngine);
static VoENetwork* GetInterface(VoiceEngine* voiceEngine);
// Releases the VoENetwork sub-API and decreases an internal // Releases the VoENetwork sub-API and decreases an internal
// reference counter. Returns the new reference count. This value should // reference counter. Returns the new reference count. This value should
// be zero for all sub-API:s before the VoiceEngine object can be safely // be zero for all sub-API:s before the VoiceEngine object can be safely
// deleted. // deleted.
virtual int Release() = 0; virtual int Release() = 0;
// Installs and enables a user-defined external transport protocol for a // Installs and enables a user-defined external transport protocol for a
// specified |channel|. // specified |channel|.
virtual int RegisterExternalTransport( virtual int RegisterExternalTransport(int channel, Transport& transport) = 0;
int channel, Transport& transport) = 0;
// Removes and disables a user-defined external transport protocol for a // Removes and disables a user-defined external transport protocol for a
// specified |channel|. // specified |channel|.
virtual int DeRegisterExternalTransport(int channel) = 0; virtual int DeRegisterExternalTransport(int channel) = 0;
// The packets received from the network should be passed to this // The packets received from the network should be passed to this
// function when external transport is enabled. Note that the data // function when external transport is enabled. Note that the data
// including the RTP-header must also be given to the VoiceEngine. // including the RTP-header must also be given to the VoiceEngine.
virtual int ReceivedRTPPacket(int channel, virtual int ReceivedRTPPacket(int channel,
const void* data, const void* data,
size_t length) = 0; size_t length) = 0;
virtual int ReceivedRTPPacket(int channel, virtual int ReceivedRTPPacket(int channel,
const void* data, const void* data,
size_t length, size_t length,
const PacketTime& packet_time) { const PacketTime& packet_time) {
return 0; return 0;
} }
// The packets received from the network should be passed to this // The packets received from the network should be passed to this
// function when external transport is enabled. Note that the data // function when external transport is enabled. Note that the data
// including the RTCP-header must also be given to the VoiceEngine. // including the RTCP-header must also be given to the VoiceEngine.
virtual int ReceivedRTCPPacket(int channel, virtual int ReceivedRTCPPacket(int channel,
const void* data, const void* data,
size_t length) = 0; size_t length) = 0;
protected: protected:
VoENetwork() {} VoENetwork() {}
virtual ~VoENetwork() {} virtual ~VoENetwork() {}
}; };
} // namespace webrtc } // namespace webrtc

View File

@ -48,34 +48,32 @@ class ViENetwork;
class VoiceEngine; class VoiceEngine;
// VoERTPObserver // VoERTPObserver
class WEBRTC_DLLEXPORT VoERTPObserver class WEBRTC_DLLEXPORT VoERTPObserver {
{ public:
public: virtual void OnIncomingCSRCChanged(int channel,
virtual void OnIncomingCSRCChanged( unsigned int CSRC,
int channel, unsigned int CSRC, bool added) = 0; bool added) = 0;
virtual void OnIncomingSSRCChanged( virtual void OnIncomingSSRCChanged(int channel, unsigned int SSRC) = 0;
int channel, unsigned int SSRC) = 0;
protected: protected:
virtual ~VoERTPObserver() {} virtual ~VoERTPObserver() {}
}; };
// CallStatistics // CallStatistics
struct CallStatistics struct CallStatistics {
{ unsigned short fractionLost;
unsigned short fractionLost; unsigned int cumulativeLost;
unsigned int cumulativeLost; unsigned int extendedMax;
unsigned int extendedMax; unsigned int jitterSamples;
unsigned int jitterSamples; int64_t rttMs;
int64_t rttMs; size_t bytesSent;
size_t bytesSent; int packetsSent;
int packetsSent; size_t bytesReceived;
size_t bytesReceived; int packetsReceived;
int packetsReceived; // The capture ntp time (in local timebase) of the first played out audio
// The capture ntp time (in local timebase) of the first played out audio // frame.
// frame. int64_t capture_start_ntp_time_ms_;
int64_t capture_start_ntp_time_ms_;
}; };
// See section 6.4.1 in http://www.ietf.org/rfc/rfc3550.txt for details. // See section 6.4.1 in http://www.ietf.org/rfc/rfc3550.txt for details.
@ -89,7 +87,7 @@ struct SenderInfo {
// See section 6.4.2 in http://www.ietf.org/rfc/rfc3550.txt for details. // See section 6.4.2 in http://www.ietf.org/rfc/rfc3550.txt for details.
struct ReportBlock { struct ReportBlock {
uint32_t sender_SSRC; // SSRC of sender uint32_t sender_SSRC; // SSRC of sender
uint32_t source_SSRC; uint32_t source_SSRC;
uint8_t fraction_lost; uint8_t fraction_lost;
uint32_t cumulative_num_packets_lost; uint32_t cumulative_num_packets_lost;
@ -100,177 +98,191 @@ struct ReportBlock {
}; };
// VoERTP_RTCP // VoERTP_RTCP
class WEBRTC_DLLEXPORT VoERTP_RTCP class WEBRTC_DLLEXPORT VoERTP_RTCP {
{ public:
public: // Factory for the VoERTP_RTCP sub-API. Increases an internal
// reference counter if successful. Returns NULL if the API is not
// supported or if construction fails.
static VoERTP_RTCP* GetInterface(VoiceEngine* voiceEngine);
// Factory for the VoERTP_RTCP sub-API. Increases an internal // Releases the VoERTP_RTCP sub-API and decreases an internal
// reference counter if successful. Returns NULL if the API is not // reference counter. Returns the new reference count. This value should
// supported or if construction fails. // be zero for all sub-API:s before the VoiceEngine object can be safely
static VoERTP_RTCP* GetInterface(VoiceEngine* voiceEngine); // deleted.
virtual int Release() = 0;
// Releases the VoERTP_RTCP sub-API and decreases an internal // Sets the local RTP synchronization source identifier (SSRC) explicitly.
// reference counter. Returns the new reference count. This value should virtual int SetLocalSSRC(int channel, unsigned int ssrc) = 0;
// be zero for all sub-API:s before the VoiceEngine object can be safely
// deleted.
virtual int Release() = 0;
// Sets the local RTP synchronization source identifier (SSRC) explicitly. // Gets the local RTP SSRC of a specified |channel|.
virtual int SetLocalSSRC(int channel, unsigned int ssrc) = 0; virtual int GetLocalSSRC(int channel, unsigned int& ssrc) = 0;
// Gets the local RTP SSRC of a specified |channel|. // Gets the SSRC of the incoming RTP packets.
virtual int GetLocalSSRC(int channel, unsigned int& ssrc) = 0; virtual int GetRemoteSSRC(int channel, unsigned int& ssrc) = 0;
// Gets the SSRC of the incoming RTP packets. // Sets the status of rtp-audio-level-indication on a specific |channel|.
virtual int GetRemoteSSRC(int channel, unsigned int& ssrc) = 0; virtual int SetSendAudioLevelIndicationStatus(int channel,
// Sets the status of rtp-audio-level-indication on a specific |channel|.
virtual int SetSendAudioLevelIndicationStatus(int channel,
bool enable,
unsigned char id = 1) = 0;
// Sets the status of receiving rtp-audio-level-indication on a specific
// |channel|.
virtual int SetReceiveAudioLevelIndicationStatus(int channel,
bool enable,
unsigned char id = 1) {
// TODO(wu): Remove default implementation once talk is updated.
return 0;
}
// Sets the status of sending absolute sender time on a specific |channel|.
virtual int SetSendAbsoluteSenderTimeStatus(int channel,
bool enable, bool enable,
unsigned char id) = 0; unsigned char id = 1) = 0;
// Sets status of receiving absolute sender time on a specific |channel|. // Sets the status of receiving rtp-audio-level-indication on a specific
virtual int SetReceiveAbsoluteSenderTimeStatus(int channel, // |channel|.
virtual int SetReceiveAudioLevelIndicationStatus(int channel,
bool enable, bool enable,
unsigned char id) = 0; unsigned char id = 1) {
// TODO(wu): Remove default implementation once talk is updated.
return 0;
}
// Sets the RTCP status on a specific |channel|. // Sets the status of sending absolute sender time on a specific |channel|.
virtual int SetRTCPStatus(int channel, bool enable) = 0; virtual int SetSendAbsoluteSenderTimeStatus(int channel,
bool enable,
unsigned char id) = 0;
// Gets the RTCP status on a specific |channel|. // Sets status of receiving absolute sender time on a specific |channel|.
virtual int GetRTCPStatus(int channel, bool& enabled) = 0; virtual int SetReceiveAbsoluteSenderTimeStatus(int channel,
bool enable,
unsigned char id) = 0;
// Sets the canonical name (CNAME) parameter for RTCP reports on a // Sets the RTCP status on a specific |channel|.
// specific |channel|. virtual int SetRTCPStatus(int channel, bool enable) = 0;
virtual int SetRTCP_CNAME(int channel, const char cName[256]) = 0;
// TODO(holmer): Remove this API once it has been removed from // Gets the RTCP status on a specific |channel|.
// fakewebrtcvoiceengine.h. virtual int GetRTCPStatus(int channel, bool& enabled) = 0;
virtual int GetRTCP_CNAME(int channel, char cName[256]) {
return -1;
}
// Gets the canonical name (CNAME) parameter for incoming RTCP reports // Sets the canonical name (CNAME) parameter for RTCP reports on a
// on a specific channel. // specific |channel|.
virtual int GetRemoteRTCP_CNAME(int channel, char cName[256]) = 0; virtual int SetRTCP_CNAME(int channel, const char cName[256]) = 0;
// Gets RTCP data from incoming RTCP Sender Reports. // TODO(holmer): Remove this API once it has been removed from
virtual int GetRemoteRTCPData( // fakewebrtcvoiceengine.h.
int channel, unsigned int& NTPHigh, unsigned int& NTPLow, virtual int GetRTCP_CNAME(int channel, char cName[256]) { return -1; }
unsigned int& timestamp, unsigned int& playoutTimestamp,
unsigned int* jitter = NULL, unsigned short* fractionLost = NULL) = 0;
// Gets RTP statistics for a specific |channel|. // Gets the canonical name (CNAME) parameter for incoming RTCP reports
virtual int GetRTPStatistics( // on a specific channel.
int channel, unsigned int& averageJitterMs, unsigned int& maxJitterMs, virtual int GetRemoteRTCP_CNAME(int channel, char cName[256]) = 0;
unsigned int& discardedPackets) = 0;
// Gets RTCP statistics for a specific |channel|. // Gets RTCP data from incoming RTCP Sender Reports.
virtual int GetRTCPStatistics(int channel, CallStatistics& stats) = 0; virtual int GetRemoteRTCPData(int channel,
unsigned int& NTPHigh,
unsigned int& NTPLow,
unsigned int& timestamp,
unsigned int& playoutTimestamp,
unsigned int* jitter = NULL,
unsigned short* fractionLost = NULL) = 0;
// Gets the report block parts of the last received RTCP Sender Report (SR), // Gets RTP statistics for a specific |channel|.
// or RTCP Receiver Report (RR) on a specified |channel|. Each vector virtual int GetRTPStatistics(int channel,
// element also contains the SSRC of the sender in addition to a report unsigned int& averageJitterMs,
// block. unsigned int& maxJitterMs,
virtual int GetRemoteRTCPReportBlocks( unsigned int& discardedPackets) = 0;
int channel, std::vector<ReportBlock>* receive_blocks) = 0;
// Sets the Redundant Coding (RED) status on a specific |channel|. // Gets RTCP statistics for a specific |channel|.
// TODO(minyue): Make SetREDStatus() pure virtual when fakewebrtcvoiceengine virtual int GetRTCPStatistics(int channel, CallStatistics& stats) = 0;
// in talk is ready.
virtual int SetREDStatus(
int channel, bool enable, int redPayloadtype = -1) { return -1; }
// Gets the RED status on a specific |channel|. // Gets the report block parts of the last received RTCP Sender Report (SR),
// TODO(minyue): Make GetREDStatus() pure virtual when fakewebrtcvoiceengine // or RTCP Receiver Report (RR) on a specified |channel|. Each vector
// in talk is ready. // element also contains the SSRC of the sender in addition to a report
virtual int GetREDStatus( // block.
int channel, bool& enabled, int& redPayloadtype) { return -1; } virtual int GetRemoteRTCPReportBlocks(
int channel,
std::vector<ReportBlock>* receive_blocks) = 0;
// Sets the Forward Error Correction (FEC) status on a specific |channel|. // Sets the Redundant Coding (RED) status on a specific |channel|.
// TODO(minyue): Remove SetFECStatus() when SetFECStatus() is replaced by // TODO(minyue): Make SetREDStatus() pure virtual when fakewebrtcvoiceengine
// SetREDStatus() in fakewebrtcvoiceengine. // in talk is ready.
virtual int SetFECStatus( virtual int SetREDStatus(int channel, bool enable, int redPayloadtype = -1) {
int channel, bool enable, int redPayloadtype = -1) { return -1;
return SetREDStatus(channel, enable, redPayloadtype); }
};
// Gets the FEC status on a specific |channel|. // Gets the RED status on a specific |channel|.
// TODO(minyue): Remove GetFECStatus() when GetFECStatus() is replaced by // TODO(minyue): Make GetREDStatus() pure virtual when fakewebrtcvoiceengine
// GetREDStatus() in fakewebrtcvoiceengine. // in talk is ready.
virtual int GetFECStatus( virtual int GetREDStatus(int channel, bool& enabled, int& redPayloadtype) {
int channel, bool& enabled, int& redPayloadtype) { return -1;
return SetREDStatus(channel, enabled, redPayloadtype); }
}
// This function enables Negative Acknowledgment (NACK) using RTCP, // Sets the Forward Error Correction (FEC) status on a specific |channel|.
// implemented based on RFC 4585. NACK retransmits RTP packets if lost on // TODO(minyue): Remove SetFECStatus() when SetFECStatus() is replaced by
// the network. This creates a lossless transport at the expense of delay. // SetREDStatus() in fakewebrtcvoiceengine.
// If using NACK, NACK should be enabled on both endpoints in a call. virtual int SetFECStatus(int channel, bool enable, int redPayloadtype = -1) {
virtual int SetNACKStatus(int channel, return SetREDStatus(channel, enable, redPayloadtype);
bool enable, };
int maxNoPackets) = 0;
// Enables capturing of RTP packets to a binary file on a specific // Gets the FEC status on a specific |channel|.
// |channel| and for a given |direction|. The file can later be replayed // TODO(minyue): Remove GetFECStatus() when GetFECStatus() is replaced by
// using e.g. RTP Tools rtpplay since the binary file format is // GetREDStatus() in fakewebrtcvoiceengine.
// compatible with the rtpdump format. virtual int GetFECStatus(int channel, bool& enabled, int& redPayloadtype) {
virtual int StartRTPDump( return SetREDStatus(channel, enabled, redPayloadtype);
int channel, const char fileNameUTF8[1024], }
RTPDirections direction = kRtpIncoming) = 0;
// Disables capturing of RTP packets to a binary file on a specific // This function enables Negative Acknowledgment (NACK) using RTCP,
// |channel| and for a given |direction|. // implemented based on RFC 4585. NACK retransmits RTP packets if lost on
virtual int StopRTPDump( // the network. This creates a lossless transport at the expense of delay.
int channel, RTPDirections direction = kRtpIncoming) = 0; // If using NACK, NACK should be enabled on both endpoints in a call.
virtual int SetNACKStatus(int channel, bool enable, int maxNoPackets) = 0;
// Gets the the current RTP capturing state for the specified // Enables capturing of RTP packets to a binary file on a specific
// |channel| and |direction|. // |channel| and for a given |direction|. The file can later be replayed
virtual int RTPDumpIsActive( // using e.g. RTP Tools rtpplay since the binary file format is
int channel, RTPDirections direction = kRtpIncoming) = 0; // compatible with the rtpdump format.
virtual int StartRTPDump(int channel,
const char fileNameUTF8[1024],
RTPDirections direction = kRtpIncoming) = 0;
// Sets video engine channel to receive incoming audio packets for // Disables capturing of RTP packets to a binary file on a specific
// aggregated bandwidth estimation. Takes ownership of the ViENetwork // |channel| and for a given |direction|.
// interface. virtual int StopRTPDump(int channel,
virtual int SetVideoEngineBWETarget(int channel, ViENetwork* vie_network, RTPDirections direction = kRtpIncoming) = 0;
int video_channel) {
return 0;
}
// Will be removed. Don't use. // Gets the the current RTP capturing state for the specified
virtual int RegisterRTPObserver(int channel, // |channel| and |direction|.
VoERTPObserver& observer) { return -1; }; virtual int RTPDumpIsActive(int channel,
virtual int DeRegisterRTPObserver(int channel) { return -1; }; RTPDirections direction = kRtpIncoming) = 0;
virtual int GetRemoteCSRCs(int channel,
unsigned int arrCSRC[15]) { return -1; };
virtual int InsertExtraRTPPacket(
int channel, unsigned char payloadType, bool markerBit,
const char* payloadData, unsigned short payloadSize) { return -1; };
virtual int GetRemoteRTCPSenderInfo(
int channel, SenderInfo* sender_info) { return -1; };
virtual int SendApplicationDefinedRTCPPacket(
int channel, unsigned char subType, unsigned int name,
const char* data, unsigned short dataLengthInBytes) { return -1; };
virtual int GetLastRemoteTimeStamp(int channel,
uint32_t* lastRemoteTimeStamp) { return -1; };
protected: // Sets video engine channel to receive incoming audio packets for
VoERTP_RTCP() {} // aggregated bandwidth estimation. Takes ownership of the ViENetwork
virtual ~VoERTP_RTCP() {} // interface.
virtual int SetVideoEngineBWETarget(int channel,
ViENetwork* vie_network,
int video_channel) {
return 0;
}
// Will be removed. Don't use.
virtual int RegisterRTPObserver(int channel, VoERTPObserver& observer) {
return -1;
};
virtual int DeRegisterRTPObserver(int channel) { return -1; };
virtual int GetRemoteCSRCs(int channel, unsigned int arrCSRC[15]) {
return -1;
};
virtual int InsertExtraRTPPacket(int channel,
unsigned char payloadType,
bool markerBit,
const char* payloadData,
unsigned short payloadSize) {
return -1;
};
virtual int GetRemoteRTCPSenderInfo(int channel, SenderInfo* sender_info) {
return -1;
};
virtual int SendApplicationDefinedRTCPPacket(
int channel,
unsigned char subType,
unsigned int name,
const char* data,
unsigned short dataLengthInBytes) {
return -1;
};
virtual int GetLastRemoteTimeStamp(int channel,
uint32_t* lastRemoteTimeStamp) {
return -1;
};
protected:
VoERTP_RTCP() {}
virtual ~VoERTP_RTCP() {}
}; };
} // namespace webrtc } // namespace webrtc

View File

@ -41,64 +41,64 @@ class RtpReceiver;
class RtpRtcp; class RtpRtcp;
class VoiceEngine; class VoiceEngine;
class WEBRTC_DLLEXPORT VoEVideoSync class WEBRTC_DLLEXPORT VoEVideoSync {
{ public:
public: // Factory for the VoEVideoSync sub-API. Increases an internal
// Factory for the VoEVideoSync sub-API. Increases an internal // reference counter if successful. Returns NULL if the API is not
// reference counter if successful. Returns NULL if the API is not // supported or if construction fails.
// supported or if construction fails. static VoEVideoSync* GetInterface(VoiceEngine* voiceEngine);
static VoEVideoSync* GetInterface(VoiceEngine* voiceEngine);
// Releases the VoEVideoSync sub-API and decreases an internal // Releases the VoEVideoSync sub-API and decreases an internal
// reference counter. Returns the new reference count. This value should // reference counter. Returns the new reference count. This value should
// be zero for all sub-API:s before the VoiceEngine object can be safely // be zero for all sub-API:s before the VoiceEngine object can be safely
// deleted. // deleted.
virtual int Release() = 0; virtual int Release() = 0;
// Gets the current sound card buffer size (playout delay). // Gets the current sound card buffer size (playout delay).
virtual int GetPlayoutBufferSize(int& buffer_ms) = 0; virtual int GetPlayoutBufferSize(int& buffer_ms) = 0;
// Sets a minimum target delay for the jitter buffer. This delay is // Sets a minimum target delay for the jitter buffer. This delay is
// maintained by the jitter buffer, unless channel condition (jitter in // maintained by the jitter buffer, unless channel condition (jitter in
// inter-arrival times) dictates a higher required delay. The overall // inter-arrival times) dictates a higher required delay. The overall
// jitter buffer delay is max of |delay_ms| and the latency that NetEq // jitter buffer delay is max of |delay_ms| and the latency that NetEq
// computes based on inter-arrival times and its playout mode. // computes based on inter-arrival times and its playout mode.
virtual int SetMinimumPlayoutDelay(int channel, int delay_ms) = 0; virtual int SetMinimumPlayoutDelay(int channel, int delay_ms) = 0;
// Sets an initial delay for the playout jitter buffer. The playout of the // Sets an initial delay for the playout jitter buffer. The playout of the
// audio is delayed by |delay_ms| in milliseconds. Thereafter, the delay is // audio is delayed by |delay_ms| in milliseconds. Thereafter, the delay is
// maintained, unless NetEq's internal mechanism requires a higher latency. // maintained, unless NetEq's internal mechanism requires a higher latency.
// Such a latency is computed based on inter-arrival times and NetEq's // Such a latency is computed based on inter-arrival times and NetEq's
// playout mode. // playout mode.
virtual int SetInitialPlayoutDelay(int channel, int delay_ms) = 0; virtual int SetInitialPlayoutDelay(int channel, int delay_ms) = 0;
// Gets the |jitter_buffer_delay_ms| (including the algorithmic delay), and // Gets the |jitter_buffer_delay_ms| (including the algorithmic delay), and
// the |playout_buffer_delay_ms| for a specified |channel|. // the |playout_buffer_delay_ms| for a specified |channel|.
virtual int GetDelayEstimate(int channel, virtual int GetDelayEstimate(int channel,
int* jitter_buffer_delay_ms, int* jitter_buffer_delay_ms,
int* playout_buffer_delay_ms) = 0; int* playout_buffer_delay_ms) = 0;
// Returns the least required jitter buffer delay. This is computed by the // Returns the least required jitter buffer delay. This is computed by the
// the jitter buffer based on the inter-arrival time of RTP packets and // the jitter buffer based on the inter-arrival time of RTP packets and
// playout mode. NetEq maintains this latency unless a higher value is // playout mode. NetEq maintains this latency unless a higher value is
// requested by calling SetMinimumPlayoutDelay(). // requested by calling SetMinimumPlayoutDelay().
virtual int GetLeastRequiredDelayMs(int channel) const = 0; virtual int GetLeastRequiredDelayMs(int channel) const = 0;
// Manual initialization of the RTP timestamp. // Manual initialization of the RTP timestamp.
virtual int SetInitTimestamp(int channel, unsigned int timestamp) = 0; virtual int SetInitTimestamp(int channel, unsigned int timestamp) = 0;
// Manual initialization of the RTP sequence number. // Manual initialization of the RTP sequence number.
virtual int SetInitSequenceNumber(int channel, short sequenceNumber) = 0; virtual int SetInitSequenceNumber(int channel, short sequenceNumber) = 0;
// Get the received RTP timestamp // Get the received RTP timestamp
virtual int GetPlayoutTimestamp(int channel, unsigned int& timestamp) = 0; virtual int GetPlayoutTimestamp(int channel, unsigned int& timestamp) = 0;
virtual int GetRtpRtcp (int channel, RtpRtcp** rtpRtcpModule, virtual int GetRtpRtcp(int channel,
RtpReceiver** rtp_receiver) = 0; RtpRtcp** rtpRtcpModule,
RtpReceiver** rtp_receiver) = 0;
protected: protected:
VoEVideoSync() { } VoEVideoSync() {}
virtual ~VoEVideoSync() { } virtual ~VoEVideoSync() {}
}; };
} // namespace webrtc } // namespace webrtc

View File

@ -42,78 +42,77 @@ namespace webrtc {
class VoiceEngine; class VoiceEngine;
class WEBRTC_DLLEXPORT VoEVolumeControl class WEBRTC_DLLEXPORT VoEVolumeControl {
{ public:
public: // Factory for the VoEVolumeControl sub-API. Increases an internal
// Factory for the VoEVolumeControl sub-API. Increases an internal // reference counter if successful. Returns NULL if the API is not
// reference counter if successful. Returns NULL if the API is not // supported or if construction fails.
// supported or if construction fails. static VoEVolumeControl* GetInterface(VoiceEngine* voiceEngine);
static VoEVolumeControl* GetInterface(VoiceEngine* voiceEngine);
// Releases the VoEVolumeControl sub-API and decreases an internal // Releases the VoEVolumeControl sub-API and decreases an internal
// reference counter. Returns the new reference count. This value should // reference counter. Returns the new reference count. This value should
// be zero for all sub-API:s before the VoiceEngine object can be safely // be zero for all sub-API:s before the VoiceEngine object can be safely
// deleted. // deleted.
virtual int Release() = 0; virtual int Release() = 0;
// Sets the speaker |volume| level. Valid range is [0,255]. // Sets the speaker |volume| level. Valid range is [0,255].
virtual int SetSpeakerVolume(unsigned int volume) = 0; virtual int SetSpeakerVolume(unsigned int volume) = 0;
// Gets the speaker |volume| level. // Gets the speaker |volume| level.
virtual int GetSpeakerVolume(unsigned int& volume) = 0; virtual int GetSpeakerVolume(unsigned int& volume) = 0;
// Sets the microphone volume level. Valid range is [0,255]. // Sets the microphone volume level. Valid range is [0,255].
virtual int SetMicVolume(unsigned int volume) = 0; virtual int SetMicVolume(unsigned int volume) = 0;
// Gets the microphone volume level. // Gets the microphone volume level.
virtual int GetMicVolume(unsigned int& volume) = 0; virtual int GetMicVolume(unsigned int& volume) = 0;
// Mutes the microphone input signal completely without affecting // Mutes the microphone input signal completely without affecting
// the audio device volume. // the audio device volume.
virtual int SetInputMute(int channel, bool enable) = 0; virtual int SetInputMute(int channel, bool enable) = 0;
// Gets the current microphone input mute state. // Gets the current microphone input mute state.
virtual int GetInputMute(int channel, bool& enabled) = 0; virtual int GetInputMute(int channel, bool& enabled) = 0;
// Gets the microphone speech |level|, mapped non-linearly to the range // Gets the microphone speech |level|, mapped non-linearly to the range
// [0,9]. // [0,9].
virtual int GetSpeechInputLevel(unsigned int& level) = 0; virtual int GetSpeechInputLevel(unsigned int& level) = 0;
// Gets the speaker speech |level|, mapped non-linearly to the range // Gets the speaker speech |level|, mapped non-linearly to the range
// [0,9]. // [0,9].
virtual int GetSpeechOutputLevel(int channel, unsigned int& level) = 0; virtual int GetSpeechOutputLevel(int channel, unsigned int& level) = 0;
// Gets the microphone speech |level|, mapped linearly to the range // Gets the microphone speech |level|, mapped linearly to the range
// [0,32768]. // [0,32768].
virtual int GetSpeechInputLevelFullRange(unsigned int& level) = 0; virtual int GetSpeechInputLevelFullRange(unsigned int& level) = 0;
// Gets the speaker speech |level|, mapped linearly to the range [0,32768]. // Gets the speaker speech |level|, mapped linearly to the range [0,32768].
virtual int GetSpeechOutputLevelFullRange( virtual int GetSpeechOutputLevelFullRange(int channel,
int channel, unsigned int& level) = 0; unsigned int& level) = 0;
// Sets a volume |scaling| applied to the outgoing signal of a specific // Sets a volume |scaling| applied to the outgoing signal of a specific
// channel. Valid scale range is [0.0, 10.0]. // channel. Valid scale range is [0.0, 10.0].
virtual int SetChannelOutputVolumeScaling(int channel, float scaling) = 0; virtual int SetChannelOutputVolumeScaling(int channel, float scaling) = 0;
// Gets the current volume scaling for a specified |channel|. // Gets the current volume scaling for a specified |channel|.
virtual int GetChannelOutputVolumeScaling(int channel, float& scaling) = 0; virtual int GetChannelOutputVolumeScaling(int channel, float& scaling) = 0;
// Scales volume of the |left| and |right| channels independently. // Scales volume of the |left| and |right| channels independently.
// Valid scale range is [0.0, 1.0]. // Valid scale range is [0.0, 1.0].
virtual int SetOutputVolumePan(int channel, float left, float right) = 0; virtual int SetOutputVolumePan(int channel, float left, float right) = 0;
// Gets the current left and right scaling factors. // Gets the current left and right scaling factors.
virtual int GetOutputVolumePan(int channel, float& left, float& right) = 0; virtual int GetOutputVolumePan(int channel, float& left, float& right) = 0;
// Don't use. Will be removed. // Don't use. Will be removed.
virtual int SetSystemOutputMute(bool enable) { return -1; } virtual int SetSystemOutputMute(bool enable) { return -1; }
virtual int GetSystemOutputMute(bool &enabled) { return -1; } virtual int GetSystemOutputMute(bool& enabled) { return -1; }
virtual int SetSystemInputMute(bool enable) { return -1; } virtual int SetSystemInputMute(bool enable) { return -1; }
virtual int GetSystemInputMute(bool& enabled) { return -1; } virtual int GetSystemInputMute(bool& enabled) { return -1; }
protected: protected:
VoEVolumeControl() {}; VoEVolumeControl(){};
virtual ~VoEVolumeControl() {}; virtual ~VoEVolumeControl(){};
}; };
} // namespace webrtc } // namespace webrtc

View File

@ -59,8 +59,7 @@ VoEAudioProcessing* VoEAudioProcessing::GetInterface(VoiceEngine* voiceEngine) {
#ifdef WEBRTC_VOICE_ENGINE_AUDIO_PROCESSING_API #ifdef WEBRTC_VOICE_ENGINE_AUDIO_PROCESSING_API
VoEAudioProcessingImpl::VoEAudioProcessingImpl(voe::SharedData* shared) VoEAudioProcessingImpl::VoEAudioProcessingImpl(voe::SharedData* shared)
: _isAecMode(kDefaultEcMode == kEcAec), : _isAecMode(kDefaultEcMode == kEcAec), _shared(shared) {
_shared(shared) {
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1), WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
"VoEAudioProcessingImpl::VoEAudioProcessingImpl() - ctor"); "VoEAudioProcessingImpl::VoEAudioProcessingImpl() - ctor");
} }
@ -104,22 +103,22 @@ int VoEAudioProcessingImpl::SetNsStatus(bool enable, NsModes mode) {
break; break;
} }
if (_shared->audio_processing()->noise_suppression()-> if (_shared->audio_processing()->noise_suppression()->set_level(nsLevel) !=
set_level(nsLevel) != 0) { 0) {
_shared->SetLastError(VE_APM_ERROR, kTraceError, _shared->SetLastError(VE_APM_ERROR, kTraceError,
"SetNsStatus() failed to set Ns mode"); "SetNsStatus() failed to set Ns mode");
return -1; return -1;
} }
if (_shared->audio_processing()->noise_suppression()->Enable(enable) != 0) { if (_shared->audio_processing()->noise_suppression()->Enable(enable) != 0) {
_shared->SetLastError(VE_APM_ERROR, kTraceError, _shared->SetLastError(VE_APM_ERROR, kTraceError,
"SetNsStatus() failed to set Ns state"); "SetNsStatus() failed to set Ns state");
return -1; return -1;
} }
return 0; return 0;
#else #else
_shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError, _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
"SetNsStatus() Ns is not supported"); "SetNsStatus() Ns is not supported");
return -1; return -1;
#endif #endif
} }
@ -157,7 +156,7 @@ int VoEAudioProcessingImpl::GetNsStatus(bool& enabled, NsModes& mode) {
return 0; return 0;
#else #else
_shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError, _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
"GetNsStatus() Ns is not supported"); "GetNsStatus() Ns is not supported");
return -1; return -1;
#endif #endif
} }
@ -174,7 +173,7 @@ int VoEAudioProcessingImpl::SetAgcStatus(bool enable, AgcModes mode) {
#if defined(WEBRTC_IOS) || defined(ATA) || defined(WEBRTC_ANDROID) #if defined(WEBRTC_IOS) || defined(ATA) || defined(WEBRTC_ANDROID)
if (mode == kAgcAdaptiveAnalog) { if (mode == kAgcAdaptiveAnalog) {
_shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError, _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
"SetAgcStatus() invalid Agc mode for mobile device"); "SetAgcStatus() invalid Agc mode for mobile device");
return -1; return -1;
} }
#endif #endif
@ -200,12 +199,12 @@ int VoEAudioProcessingImpl::SetAgcStatus(bool enable, AgcModes mode) {
if (_shared->audio_processing()->gain_control()->set_mode(agcMode) != 0) { if (_shared->audio_processing()->gain_control()->set_mode(agcMode) != 0) {
_shared->SetLastError(VE_APM_ERROR, kTraceError, _shared->SetLastError(VE_APM_ERROR, kTraceError,
"SetAgcStatus() failed to set Agc mode"); "SetAgcStatus() failed to set Agc mode");
return -1; return -1;
} }
if (_shared->audio_processing()->gain_control()->Enable(enable) != 0) { if (_shared->audio_processing()->gain_control()->Enable(enable) != 0) {
_shared->SetLastError(VE_APM_ERROR, kTraceError, _shared->SetLastError(VE_APM_ERROR, kTraceError,
"SetAgcStatus() failed to set Agc state"); "SetAgcStatus() failed to set Agc state");
return -1; return -1;
} }
@ -215,15 +214,15 @@ int VoEAudioProcessingImpl::SetAgcStatus(bool enable, AgcModes mode) {
// used since we want to be able to provide the APM with updated mic // used since we want to be able to provide the APM with updated mic
// levels when the user modifies the mic level manually. // levels when the user modifies the mic level manually.
if (_shared->audio_device()->SetAGC(enable) != 0) { if (_shared->audio_device()->SetAGC(enable) != 0) {
_shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, _shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceWarning,
kTraceWarning, "SetAgcStatus() failed to set Agc mode"); "SetAgcStatus() failed to set Agc mode");
} }
} }
return 0; return 0;
#else #else
_shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError, _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
"SetAgcStatus() Agc is not supported"); "SetAgcStatus() Agc is not supported");
return -1; return -1;
#endif #endif
} }
@ -239,7 +238,7 @@ int VoEAudioProcessingImpl::GetAgcStatus(bool& enabled, AgcModes& mode) {
enabled = _shared->audio_processing()->gain_control()->is_enabled(); enabled = _shared->audio_processing()->gain_control()->is_enabled();
GainControl::Mode agcMode = GainControl::Mode agcMode =
_shared->audio_processing()->gain_control()->mode(); _shared->audio_processing()->gain_control()->mode();
switch (agcMode) { switch (agcMode) {
case GainControl::kFixedDigital: case GainControl::kFixedDigital:
@ -258,7 +257,7 @@ int VoEAudioProcessingImpl::GetAgcStatus(bool& enabled, AgcModes& mode) {
return 0; return 0;
#else #else
_shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError, _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
"GetAgcStatus() Agc is not supported"); "GetAgcStatus() Agc is not supported");
return -1; return -1;
#endif #endif
} }
@ -273,22 +272,23 @@ int VoEAudioProcessingImpl::SetAgcConfig(AgcConfig config) {
} }
if (_shared->audio_processing()->gain_control()->set_target_level_dbfs( if (_shared->audio_processing()->gain_control()->set_target_level_dbfs(
config.targetLeveldBOv) != 0) { config.targetLeveldBOv) != 0) {
_shared->SetLastError(VE_APM_ERROR, kTraceError, _shared->SetLastError(VE_APM_ERROR, kTraceError,
"SetAgcConfig() failed to set target peak |level|" "SetAgcConfig() failed to set target peak |level|"
" (or envelope) of the Agc"); " (or envelope) of the Agc");
return -1; return -1;
} }
if (_shared->audio_processing()->gain_control()->set_compression_gain_db( if (_shared->audio_processing()->gain_control()->set_compression_gain_db(
config.digitalCompressionGaindB) != 0) { config.digitalCompressionGaindB) != 0) {
_shared->SetLastError(VE_APM_ERROR, kTraceError, _shared->SetLastError(VE_APM_ERROR, kTraceError,
"SetAgcConfig() failed to set the range in |gain| " "SetAgcConfig() failed to set the range in |gain| "
"the digital compression stage may apply"); "the digital compression stage may apply");
return -1; return -1;
} }
if (_shared->audio_processing()->gain_control()->enable_limiter( if (_shared->audio_processing()->gain_control()->enable_limiter(
config.limiterEnable) != 0) { config.limiterEnable) != 0) {
_shared->SetLastError(VE_APM_ERROR, kTraceError, _shared->SetLastError(
VE_APM_ERROR, kTraceError,
"SetAgcConfig() failed to set hard limiter to the signal"); "SetAgcConfig() failed to set hard limiter to the signal");
return -1; return -1;
} }
@ -296,7 +296,7 @@ int VoEAudioProcessingImpl::SetAgcConfig(AgcConfig config) {
return 0; return 0;
#else #else
_shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError, _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
"SetAgcConfig() EC is not supported"); "SetAgcConfig() EC is not supported");
return -1; return -1;
#endif #endif
} }
@ -311,23 +311,22 @@ int VoEAudioProcessingImpl::GetAgcConfig(AgcConfig& config) {
} }
config.targetLeveldBOv = config.targetLeveldBOv =
_shared->audio_processing()->gain_control()->target_level_dbfs(); _shared->audio_processing()->gain_control()->target_level_dbfs();
config.digitalCompressionGaindB = config.digitalCompressionGaindB =
_shared->audio_processing()->gain_control()->compression_gain_db(); _shared->audio_processing()->gain_control()->compression_gain_db();
config.limiterEnable = config.limiterEnable =
_shared->audio_processing()->gain_control()->is_limiter_enabled(); _shared->audio_processing()->gain_control()->is_limiter_enabled();
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1), WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
"GetAgcConfig() => targetLeveldBOv=%u, " "GetAgcConfig() => targetLeveldBOv=%u, "
"digitalCompressionGaindB=%u, limiterEnable=%d", "digitalCompressionGaindB=%u, limiterEnable=%d",
config.targetLeveldBOv, config.targetLeveldBOv, config.digitalCompressionGaindB,
config.digitalCompressionGaindB,
config.limiterEnable); config.limiterEnable);
return 0; return 0;
#else #else
_shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError, _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
"GetAgcConfig() EC is not supported"); "GetAgcConfig() EC is not supported");
return -1; return -1;
#endif #endif
} }
@ -346,13 +345,13 @@ int VoEAudioProcessingImpl::SetRxNsStatus(int channel,
voe::Channel* channelPtr = ch.channel(); voe::Channel* channelPtr = ch.channel();
if (channelPtr == NULL) { if (channelPtr == NULL) {
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"SetRxNsStatus() failed to locate channel"); "SetRxNsStatus() failed to locate channel");
return -1; return -1;
} }
return channelPtr->SetRxNsStatus(enable, mode); return channelPtr->SetRxNsStatus(enable, mode);
#else #else
_shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError, _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
"SetRxNsStatus() NS is not supported"); "SetRxNsStatus() NS is not supported");
return -1; return -1;
#endif #endif
} }
@ -372,13 +371,13 @@ int VoEAudioProcessingImpl::GetRxNsStatus(int channel,
voe::Channel* channelPtr = ch.channel(); voe::Channel* channelPtr = ch.channel();
if (channelPtr == NULL) { if (channelPtr == NULL) {
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"GetRxNsStatus() failed to locate channel"); "GetRxNsStatus() failed to locate channel");
return -1; return -1;
} }
return channelPtr->GetRxNsStatus(enabled, mode); return channelPtr->GetRxNsStatus(enabled, mode);
#else #else
_shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError, _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
"GetRxNsStatus() NS is not supported"); "GetRxNsStatus() NS is not supported");
return -1; return -1;
#endif #endif
} }
@ -387,8 +386,8 @@ int VoEAudioProcessingImpl::SetRxAgcStatus(int channel,
bool enable, bool enable,
AgcModes mode) { AgcModes mode) {
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
"SetRxAgcStatus(channel=%d, enable=%d, mode=%d)", "SetRxAgcStatus(channel=%d, enable=%d, mode=%d)", channel,
channel, (int)enable, (int)mode); (int)enable, (int)mode);
#ifdef WEBRTC_VOICE_ENGINE_AGC #ifdef WEBRTC_VOICE_ENGINE_AGC
if (!_shared->statistics().Initialized()) { if (!_shared->statistics().Initialized()) {
_shared->SetLastError(VE_NOT_INITED, kTraceError); _shared->SetLastError(VE_NOT_INITED, kTraceError);
@ -399,13 +398,13 @@ int VoEAudioProcessingImpl::SetRxAgcStatus(int channel,
voe::Channel* channelPtr = ch.channel(); voe::Channel* channelPtr = ch.channel();
if (channelPtr == NULL) { if (channelPtr == NULL) {
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"SetRxAgcStatus() failed to locate channel"); "SetRxAgcStatus() failed to locate channel");
return -1; return -1;
} }
return channelPtr->SetRxAgcStatus(enable, mode); return channelPtr->SetRxAgcStatus(enable, mode);
#else #else
_shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError, _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
"SetRxAgcStatus() Agc is not supported"); "SetRxAgcStatus() Agc is not supported");
return -1; return -1;
#endif #endif
} }
@ -425,19 +424,18 @@ int VoEAudioProcessingImpl::GetRxAgcStatus(int channel,
voe::Channel* channelPtr = ch.channel(); voe::Channel* channelPtr = ch.channel();
if (channelPtr == NULL) { if (channelPtr == NULL) {
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"GetRxAgcStatus() failed to locate channel"); "GetRxAgcStatus() failed to locate channel");
return -1; return -1;
} }
return channelPtr->GetRxAgcStatus(enabled, mode); return channelPtr->GetRxAgcStatus(enabled, mode);
#else #else
_shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError, _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
"GetRxAgcStatus() Agc is not supported"); "GetRxAgcStatus() Agc is not supported");
return -1; return -1;
#endif #endif
} }
int VoEAudioProcessingImpl::SetRxAgcConfig(int channel, int VoEAudioProcessingImpl::SetRxAgcConfig(int channel, AgcConfig config) {
AgcConfig config) {
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
"SetRxAgcConfig(channel=%d)", channel); "SetRxAgcConfig(channel=%d)", channel);
#ifdef WEBRTC_VOICE_ENGINE_AGC #ifdef WEBRTC_VOICE_ENGINE_AGC
@ -450,13 +448,13 @@ int VoEAudioProcessingImpl::SetRxAgcConfig(int channel,
voe::Channel* channelPtr = ch.channel(); voe::Channel* channelPtr = ch.channel();
if (channelPtr == NULL) { if (channelPtr == NULL) {
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"SetRxAgcConfig() failed to locate channel"); "SetRxAgcConfig() failed to locate channel");
return -1; return -1;
} }
return channelPtr->SetRxAgcConfig(config); return channelPtr->SetRxAgcConfig(config);
#else #else
_shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError, _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
"SetRxAgcConfig() Agc is not supported"); "SetRxAgcConfig() Agc is not supported");
return -1; return -1;
#endif #endif
} }
@ -474,13 +472,13 @@ int VoEAudioProcessingImpl::GetRxAgcConfig(int channel, AgcConfig& config) {
voe::Channel* channelPtr = ch.channel(); voe::Channel* channelPtr = ch.channel();
if (channelPtr == NULL) { if (channelPtr == NULL) {
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"GetRxAgcConfig() failed to locate channel"); "GetRxAgcConfig() failed to locate channel");
return -1; return -1;
} }
return channelPtr->GetRxAgcConfig(config); return channelPtr->GetRxAgcConfig(config);
#else #else
_shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError, _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
"GetRxAgcConfig() Agc is not supported"); "GetRxAgcConfig() Agc is not supported");
return -1; return -1;
#endif #endif
} }
@ -498,7 +496,8 @@ int VoEAudioProcessingImpl::EnableDriftCompensation(bool enable) {
WEBRTC_VOICE_INIT_CHECK(); WEBRTC_VOICE_INIT_CHECK();
if (!DriftCompensationSupported()) { if (!DriftCompensationSupported()) {
_shared->SetLastError(VE_APM_ERROR, kTraceWarning, _shared->SetLastError(
VE_APM_ERROR, kTraceWarning,
"Drift compensation is not supported on this platform."); "Drift compensation is not supported on this platform.");
return -1; return -1;
} }
@ -506,7 +505,7 @@ int VoEAudioProcessingImpl::EnableDriftCompensation(bool enable) {
EchoCancellation* aec = _shared->audio_processing()->echo_cancellation(); EchoCancellation* aec = _shared->audio_processing()->echo_cancellation();
if (aec->enable_drift_compensation(enable) != 0) { if (aec->enable_drift_compensation(enable) != 0) {
_shared->SetLastError(VE_APM_ERROR, kTraceError, _shared->SetLastError(VE_APM_ERROR, kTraceError,
"aec->enable_drift_compensation() failed"); "aec->enable_drift_compensation() failed");
return -1; return -1;
} }
return 0; return 0;
@ -530,41 +529,43 @@ int VoEAudioProcessingImpl::SetEcStatus(bool enable, EcModes mode) {
} }
// AEC mode // AEC mode
if ((mode == kEcDefault) || if ((mode == kEcDefault) || (mode == kEcConference) || (mode == kEcAec) ||
(mode == kEcConference) || ((mode == kEcUnchanged) && (_isAecMode == true))) {
(mode == kEcAec) ||
((mode == kEcUnchanged) &&
(_isAecMode == true))) {
if (enable) { if (enable) {
// Disable the AECM before enable the AEC // Disable the AECM before enable the AEC
if (_shared->audio_processing()->echo_control_mobile()->is_enabled()) { if (_shared->audio_processing()->echo_control_mobile()->is_enabled()) {
_shared->SetLastError(VE_APM_ERROR, kTraceWarning, _shared->SetLastError(VE_APM_ERROR, kTraceWarning,
"SetEcStatus() disable AECM before enabling AEC"); "SetEcStatus() disable AECM before enabling AEC");
if (_shared->audio_processing()->echo_control_mobile()-> if (_shared->audio_processing()->echo_control_mobile()->Enable(false) !=
Enable(false) != 0) { 0) {
_shared->SetLastError(VE_APM_ERROR, kTraceError, _shared->SetLastError(VE_APM_ERROR, kTraceError,
"SetEcStatus() failed to disable AECM"); "SetEcStatus() failed to disable AECM");
return -1; return -1;
} }
} }
} }
if (_shared->audio_processing()->echo_cancellation()->Enable(enable) != 0) { if (_shared->audio_processing()->echo_cancellation()->Enable(enable) != 0) {
_shared->SetLastError(VE_APM_ERROR, kTraceError, _shared->SetLastError(VE_APM_ERROR, kTraceError,
"SetEcStatus() failed to set AEC state"); "SetEcStatus() failed to set AEC state");
return -1; return -1;
} }
if (mode == kEcConference) { if (mode == kEcConference) {
if (_shared->audio_processing()->echo_cancellation()-> if (_shared->audio_processing()
set_suppression_level(EchoCancellation::kHighSuppression) != 0) { ->echo_cancellation()
_shared->SetLastError(VE_APM_ERROR, kTraceError, ->set_suppression_level(EchoCancellation::kHighSuppression) !=
0) {
_shared->SetLastError(
VE_APM_ERROR, kTraceError,
"SetEcStatus() failed to set aggressiveness to high"); "SetEcStatus() failed to set aggressiveness to high");
return -1; return -1;
} }
} else { } else {
if (_shared->audio_processing()->echo_cancellation()-> if (_shared->audio_processing()
set_suppression_level( ->echo_cancellation()
EchoCancellation::kModerateSuppression) != 0) { ->set_suppression_level(EchoCancellation::kModerateSuppression) !=
_shared->SetLastError(VE_APM_ERROR, kTraceError, 0) {
_shared->SetLastError(
VE_APM_ERROR, kTraceError,
"SetEcStatus() failed to set aggressiveness to moderate"); "SetEcStatus() failed to set aggressiveness to moderate");
return -1; return -1;
} }
@ -572,38 +573,37 @@ int VoEAudioProcessingImpl::SetEcStatus(bool enable, EcModes mode) {
_isAecMode = true; _isAecMode = true;
} else if ((mode == kEcAecm) || } else if ((mode == kEcAecm) ||
((mode == kEcUnchanged) && ((mode == kEcUnchanged) && (_isAecMode == false))) {
(_isAecMode == false))) {
if (enable) { if (enable) {
// Disable the AEC before enable the AECM // Disable the AEC before enable the AECM
if (_shared->audio_processing()->echo_cancellation()->is_enabled()) { if (_shared->audio_processing()->echo_cancellation()->is_enabled()) {
_shared->SetLastError(VE_APM_ERROR, kTraceWarning, _shared->SetLastError(VE_APM_ERROR, kTraceWarning,
"SetEcStatus() disable AEC before enabling AECM"); "SetEcStatus() disable AEC before enabling AECM");
if (_shared->audio_processing()->echo_cancellation()-> if (_shared->audio_processing()->echo_cancellation()->Enable(false) !=
Enable(false) != 0) { 0) {
_shared->SetLastError(VE_APM_ERROR, kTraceError, _shared->SetLastError(VE_APM_ERROR, kTraceError,
"SetEcStatus() failed to disable AEC"); "SetEcStatus() failed to disable AEC");
return -1; return -1;
} }
} }
} }
if (_shared->audio_processing()->echo_control_mobile()-> if (_shared->audio_processing()->echo_control_mobile()->Enable(enable) !=
Enable(enable) != 0) { 0) {
_shared->SetLastError(VE_APM_ERROR, kTraceError, _shared->SetLastError(VE_APM_ERROR, kTraceError,
"SetEcStatus() failed to set AECM state"); "SetEcStatus() failed to set AECM state");
return -1; return -1;
} }
_isAecMode = false; _isAecMode = false;
} else { } else {
_shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError, _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
"SetEcStatus() invalid EC mode"); "SetEcStatus() invalid EC mode");
return -1; return -1;
} }
return 0; return 0;
#else #else
_shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError, _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
"SetEcStatus() EC is not supported"); "SetEcStatus() EC is not supported");
return -1; return -1;
#endif #endif
} }
@ -622,17 +622,15 @@ int VoEAudioProcessingImpl::GetEcStatus(bool& enabled, EcModes& mode) {
enabled = _shared->audio_processing()->echo_cancellation()->is_enabled(); enabled = _shared->audio_processing()->echo_cancellation()->is_enabled();
} else { } else {
mode = kEcAecm; mode = kEcAecm;
enabled = _shared->audio_processing()->echo_control_mobile()-> enabled = _shared->audio_processing()->echo_control_mobile()->is_enabled();
is_enabled();
} }
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1), WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
"GetEcStatus() => enabled=%i, mode=%i", "GetEcStatus() => enabled=%i, mode=%i", enabled, (int)mode);
enabled, (int)mode);
return 0; return 0;
#else #else
_shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError, _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
"GetEcStatus() EC is not supported"); "GetEcStatus() EC is not supported");
return -1; return -1;
#endif #endif
} }
@ -679,16 +677,16 @@ int VoEAudioProcessingImpl::SetAecmMode(AecmModes mode, bool enableCNG) {
break; break;
} }
if (_shared->audio_processing()->echo_control_mobile()->set_routing_mode(
if (_shared->audio_processing()->echo_control_mobile()-> aecmMode) != 0) {
set_routing_mode(aecmMode) != 0) {
_shared->SetLastError(VE_APM_ERROR, kTraceError, _shared->SetLastError(VE_APM_ERROR, kTraceError,
"SetAECMMode() failed to set AECM routing mode"); "SetAECMMode() failed to set AECM routing mode");
return -1; return -1;
} }
if (_shared->audio_processing()->echo_control_mobile()-> if (_shared->audio_processing()->echo_control_mobile()->enable_comfort_noise(
enable_comfort_noise(enableCNG) != 0) { enableCNG) != 0) {
_shared->SetLastError(VE_APM_ERROR, kTraceError, _shared->SetLastError(
VE_APM_ERROR, kTraceError,
"SetAECMMode() failed to set comfort noise state for AECM"); "SetAECMMode() failed to set comfort noise state for AECM");
return -1; return -1;
} }
@ -696,7 +694,7 @@ int VoEAudioProcessingImpl::SetAecmMode(AecmModes mode, bool enableCNG) {
return 0; return 0;
#else #else
_shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError, _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
"SetAECMMode() EC is not supported"); "SetAECMMode() EC is not supported");
return -1; return -1;
#endif #endif
} }
@ -714,8 +712,9 @@ int VoEAudioProcessingImpl::GetAecmMode(AecmModes& mode, bool& enabledCNG) {
EchoControlMobile::RoutingMode aecmMode = EchoControlMobile::RoutingMode aecmMode =
_shared->audio_processing()->echo_control_mobile()->routing_mode(); _shared->audio_processing()->echo_control_mobile()->routing_mode();
enabledCNG = _shared->audio_processing()->echo_control_mobile()-> enabledCNG = _shared->audio_processing()
is_comfort_noise_enabled(); ->echo_control_mobile()
->is_comfort_noise_enabled();
switch (aecmMode) { switch (aecmMode) {
case EchoControlMobile::kQuietEarpieceOrHeadset: case EchoControlMobile::kQuietEarpieceOrHeadset:
@ -738,7 +737,7 @@ int VoEAudioProcessingImpl::GetAecmMode(AecmModes& mode, bool& enabledCNG) {
return 0; return 0;
#else #else
_shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError, _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
"GetAECMMode() EC is not supported"); "GetAECMMode() EC is not supported");
return -1; return -1;
#endif #endif
} }
@ -749,7 +748,7 @@ int VoEAudioProcessingImpl::EnableHighPassFilter(bool enable) {
if (_shared->audio_processing()->high_pass_filter()->Enable(enable) != if (_shared->audio_processing()->high_pass_filter()->Enable(enable) !=
AudioProcessing::kNoError) { AudioProcessing::kNoError) {
_shared->SetLastError(VE_APM_ERROR, kTraceError, _shared->SetLastError(VE_APM_ERROR, kTraceError,
"HighPassFilter::Enable() failed."); "HighPassFilter::Enable() failed.");
return -1; return -1;
} }
@ -762,9 +761,8 @@ bool VoEAudioProcessingImpl::IsHighPassFilterEnabled() {
return _shared->audio_processing()->high_pass_filter()->is_enabled(); return _shared->audio_processing()->high_pass_filter()->is_enabled();
} }
int VoEAudioProcessingImpl::RegisterRxVadObserver( int VoEAudioProcessingImpl::RegisterRxVadObserver(int channel,
int channel, VoERxVadCallback& observer) {
VoERxVadCallback& observer) {
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
"RegisterRxVadObserver()"); "RegisterRxVadObserver()");
if (!_shared->statistics().Initialized()) { if (!_shared->statistics().Initialized()) {
@ -775,7 +773,7 @@ int VoEAudioProcessingImpl::RegisterRxVadObserver(
voe::Channel* channelPtr = ch.channel(); voe::Channel* channelPtr = ch.channel();
if (channelPtr == NULL) { if (channelPtr == NULL) {
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"RegisterRxVadObserver() failed to locate channel"); "RegisterRxVadObserver() failed to locate channel");
return -1; return -1;
} }
return channelPtr->RegisterRxVadObserver(observer); return channelPtr->RegisterRxVadObserver(observer);
@ -792,7 +790,7 @@ int VoEAudioProcessingImpl::DeRegisterRxVadObserver(int channel) {
voe::Channel* channelPtr = ch.channel(); voe::Channel* channelPtr = ch.channel();
if (channelPtr == NULL) { if (channelPtr == NULL) {
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"DeRegisterRxVadObserver() failed to locate channel"); "DeRegisterRxVadObserver() failed to locate channel");
return -1; return -1;
} }
@ -811,7 +809,7 @@ int VoEAudioProcessingImpl::VoiceActivityIndicator(int channel) {
voe::Channel* channelPtr = ch.channel(); voe::Channel* channelPtr = ch.channel();
if (channelPtr == NULL) { if (channelPtr == NULL) {
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"DeRegisterRxVadObserver() failed to locate channel"); "DeRegisterRxVadObserver() failed to locate channel");
return -1; return -1;
} }
int activity(-1); int activity(-1);
@ -829,18 +827,18 @@ int VoEAudioProcessingImpl::SetEcMetricsStatus(bool enable) {
return -1; return -1;
} }
if ((_shared->audio_processing()->echo_cancellation()->enable_metrics(enable) if ((_shared->audio_processing()->echo_cancellation()->enable_metrics(
!= 0) || enable) != 0) ||
(_shared->audio_processing()->echo_cancellation()->enable_delay_logging( (_shared->audio_processing()->echo_cancellation()->enable_delay_logging(
enable) != 0)) { enable) != 0)) {
_shared->SetLastError(VE_APM_ERROR, kTraceError, _shared->SetLastError(VE_APM_ERROR, kTraceError,
"SetEcMetricsStatus() unable to set EC metrics mode"); "SetEcMetricsStatus() unable to set EC metrics mode");
return -1; return -1;
} }
return 0; return 0;
#else #else
_shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError, _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
"SetEcStatus() EC is not supported"); "SetEcStatus() EC is not supported");
return -1; return -1;
#endif #endif
} }
@ -855,12 +853,14 @@ int VoEAudioProcessingImpl::GetEcMetricsStatus(bool& enabled) {
} }
bool echo_mode = bool echo_mode =
_shared->audio_processing()->echo_cancellation()->are_metrics_enabled(); _shared->audio_processing()->echo_cancellation()->are_metrics_enabled();
bool delay_mode = _shared->audio_processing()->echo_cancellation()-> bool delay_mode = _shared->audio_processing()
is_delay_logging_enabled(); ->echo_cancellation()
->is_delay_logging_enabled();
if (echo_mode != delay_mode) { if (echo_mode != delay_mode) {
_shared->SetLastError(VE_APM_ERROR, kTraceError, _shared->SetLastError(
VE_APM_ERROR, kTraceError,
"GetEcMetricsStatus() delay logging and echo mode are not the same"); "GetEcMetricsStatus() delay logging and echo mode are not the same");
return -1; return -1;
} }
@ -872,7 +872,7 @@ int VoEAudioProcessingImpl::GetEcMetricsStatus(bool& enabled) {
return 0; return 0;
#else #else
_shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError, _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
"SetEcStatus() EC is not supported"); "SetEcStatus() EC is not supported");
return -1; return -1;
#endif #endif
} }
@ -889,7 +889,8 @@ int VoEAudioProcessingImpl::GetEchoMetrics(int& ERL,
return -1; return -1;
} }
if (!_shared->audio_processing()->echo_cancellation()->is_enabled()) { if (!_shared->audio_processing()->echo_cancellation()->is_enabled()) {
_shared->SetLastError(VE_APM_ERROR, kTraceWarning, _shared->SetLastError(
VE_APM_ERROR, kTraceWarning,
"GetEchoMetrics() AudioProcessingModule AEC is not enabled"); "GetEchoMetrics() AudioProcessingModule AEC is not enabled");
return -1; return -1;
} }
@ -910,12 +911,12 @@ int VoEAudioProcessingImpl::GetEchoMetrics(int& ERL,
A_NLP = echoMetrics.a_nlp.instant; A_NLP = echoMetrics.a_nlp.instant;
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1), WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
"GetEchoMetrics() => ERL=%d, ERLE=%d, RERL=%d, A_NLP=%d", "GetEchoMetrics() => ERL=%d, ERLE=%d, RERL=%d, A_NLP=%d", ERL,
ERL, ERLE, RERL, A_NLP); ERLE, RERL, A_NLP);
return 0; return 0;
#else #else
_shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError, _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
"SetEcStatus() EC is not supported"); "SetEcStatus() EC is not supported");
return -1; return -1;
#endif #endif
} }
@ -931,7 +932,8 @@ int VoEAudioProcessingImpl::GetEcDelayMetrics(int& delay_median,
return -1; return -1;
} }
if (!_shared->audio_processing()->echo_cancellation()->is_enabled()) { if (!_shared->audio_processing()->echo_cancellation()->is_enabled()) {
_shared->SetLastError(VE_APM_ERROR, kTraceWarning, _shared->SetLastError(
VE_APM_ERROR, kTraceWarning,
"GetEcDelayMetrics() AudioProcessingModule AEC is not enabled"); "GetEcDelayMetrics() AudioProcessingModule AEC is not enabled");
return -1; return -1;
} }
@ -941,7 +943,7 @@ int VoEAudioProcessingImpl::GetEcDelayMetrics(int& delay_median,
float poor_fraction = 0; float poor_fraction = 0;
// Get delay-logging values from Audio Processing Module. // Get delay-logging values from Audio Processing Module.
if (_shared->audio_processing()->echo_cancellation()->GetDelayMetrics( if (_shared->audio_processing()->echo_cancellation()->GetDelayMetrics(
&median, &std, &poor_fraction)) { &median, &std, &poor_fraction)) {
WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_shared->instance_id(), -1), WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_shared->instance_id(), -1),
"GetEcDelayMetrics(), AudioProcessingModule delay-logging " "GetEcDelayMetrics(), AudioProcessingModule delay-logging "
"error"); "error");
@ -955,12 +957,12 @@ int VoEAudioProcessingImpl::GetEcDelayMetrics(int& delay_median,
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1), WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
"GetEcDelayMetrics() => delay_median=%d, delay_std=%d, " "GetEcDelayMetrics() => delay_median=%d, delay_std=%d, "
"fraction_poor_delays=%f", delay_median, delay_std, "fraction_poor_delays=%f",
fraction_poor_delays); delay_median, delay_std, fraction_poor_delays);
return 0; return 0;
#else #else
_shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError, _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
"SetEcStatus() EC is not supported"); "SetEcStatus() EC is not supported");
return -1; return -1;
#endif #endif
} }
@ -1014,12 +1016,13 @@ int VoEAudioProcessingImpl::SetTypingDetectionStatus(bool enable) {
if (_shared->audio_processing()->voice_detection()->Enable(enable)) { if (_shared->audio_processing()->voice_detection()->Enable(enable)) {
_shared->SetLastError(VE_APM_ERROR, kTraceWarning, _shared->SetLastError(VE_APM_ERROR, kTraceWarning,
"SetTypingDetectionStatus() failed to set VAD state"); "SetTypingDetectionStatus() failed to set VAD state");
return -1; return -1;
} }
if (_shared->audio_processing()->voice_detection()->set_likelihood( if (_shared->audio_processing()->voice_detection()->set_likelihood(
VoiceDetection::kVeryLowLikelihood)) { VoiceDetection::kVeryLowLikelihood)) {
_shared->SetLastError(VE_APM_ERROR, kTraceWarning, _shared->SetLastError(
VE_APM_ERROR, kTraceWarning,
"SetTypingDetectionStatus() failed to set VAD likelihood to low"); "SetTypingDetectionStatus() failed to set VAD likelihood to low");
return -1; return -1;
} }
@ -1043,8 +1046,7 @@ int VoEAudioProcessingImpl::GetTypingDetectionStatus(bool& enabled) {
return 0; return 0;
} }
int VoEAudioProcessingImpl::TimeSinceLastTyping(int& seconds) {
int VoEAudioProcessingImpl::TimeSinceLastTyping(int &seconds) {
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
"TimeSinceLastTyping()"); "TimeSinceLastTyping()");
#if !defined(WEBRTC_VOICE_ENGINE_TYPING_DETECTION) #if !defined(WEBRTC_VOICE_ENGINE_TYPING_DETECTION)
@ -1056,16 +1058,13 @@ int VoEAudioProcessingImpl::TimeSinceLastTyping(int &seconds) {
} }
// Check if typing detection is enabled // Check if typing detection is enabled
bool enabled = _shared->audio_processing()->voice_detection()->is_enabled(); bool enabled = _shared->audio_processing()->voice_detection()->is_enabled();
if (enabled) if (enabled) {
{
_shared->transmit_mixer()->TimeSinceLastTyping(seconds); _shared->transmit_mixer()->TimeSinceLastTyping(seconds);
return 0; return 0;
} } else {
else
{
_shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError, _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
"SetTypingDetectionStatus is not enabled"); "SetTypingDetectionStatus is not enabled");
return -1; return -1;
} }
#endif #endif
} }
@ -1084,8 +1083,9 @@ int VoEAudioProcessingImpl::SetTypingDetectionParameters(int timeWindow,
_shared->statistics().SetLastError(VE_NOT_INITED, kTraceError); _shared->statistics().SetLastError(VE_NOT_INITED, kTraceError);
return -1; return -1;
} }
return (_shared->transmit_mixer()->SetTypingDetectionParameters(timeWindow, return (_shared->transmit_mixer()->SetTypingDetectionParameters(
costPerTyping, reportingThreshold, penaltyDecay, typeEventDelay)); timeWindow, costPerTyping, reportingThreshold, penaltyDecay,
typeEventDelay));
#endif #endif
} }

View File

@ -19,90 +19,90 @@ namespace webrtc {
class VoEAudioProcessingImpl : public VoEAudioProcessing { class VoEAudioProcessingImpl : public VoEAudioProcessing {
public: public:
virtual int SetNsStatus(bool enable, NsModes mode = kNsUnchanged); int SetNsStatus(bool enable, NsModes mode = kNsUnchanged) override;
virtual int GetNsStatus(bool& enabled, NsModes& mode); int GetNsStatus(bool& enabled, NsModes& mode) override;
virtual int SetAgcStatus(bool enable, AgcModes mode = kAgcUnchanged); int SetAgcStatus(bool enable, AgcModes mode = kAgcUnchanged) override;
virtual int GetAgcStatus(bool& enabled, AgcModes& mode); int GetAgcStatus(bool& enabled, AgcModes& mode) override;
virtual int SetAgcConfig(AgcConfig config); int SetAgcConfig(AgcConfig config) override;
virtual int GetAgcConfig(AgcConfig& config); int GetAgcConfig(AgcConfig& config) override;
virtual int SetRxNsStatus(int channel, int SetRxNsStatus(int channel,
bool enable, bool enable,
NsModes mode = kNsUnchanged); NsModes mode = kNsUnchanged) override;
virtual int GetRxNsStatus(int channel, bool& enabled, NsModes& mode); int GetRxNsStatus(int channel, bool& enabled, NsModes& mode) override;
virtual int SetRxAgcStatus(int channel, int SetRxAgcStatus(int channel,
bool enable, bool enable,
AgcModes mode = kAgcUnchanged); AgcModes mode = kAgcUnchanged) override;
virtual int GetRxAgcStatus(int channel, bool& enabled, AgcModes& mode); int GetRxAgcStatus(int channel, bool& enabled, AgcModes& mode) override;
virtual int SetRxAgcConfig(int channel, AgcConfig config); int SetRxAgcConfig(int channel, AgcConfig config) override;
virtual int GetRxAgcConfig(int channel, AgcConfig& config); int GetRxAgcConfig(int channel, AgcConfig& config) override;
virtual int SetEcStatus(bool enable, EcModes mode = kEcUnchanged); int SetEcStatus(bool enable, EcModes mode = kEcUnchanged) override;
virtual int GetEcStatus(bool& enabled, EcModes& mode); int GetEcStatus(bool& enabled, EcModes& mode) override;
virtual int EnableDriftCompensation(bool enable); int EnableDriftCompensation(bool enable) override;
virtual bool DriftCompensationEnabled(); bool DriftCompensationEnabled() override;
virtual void SetDelayOffsetMs(int offset); void SetDelayOffsetMs(int offset) override;
virtual int DelayOffsetMs(); int DelayOffsetMs() override;
virtual int SetAecmMode(AecmModes mode = kAecmSpeakerphone, int SetAecmMode(AecmModes mode = kAecmSpeakerphone,
bool enableCNG = true); bool enableCNG = true) override;
virtual int GetAecmMode(AecmModes& mode, bool& enabledCNG); int GetAecmMode(AecmModes& mode, bool& enabledCNG) override;
virtual int EnableHighPassFilter(bool enable); int EnableHighPassFilter(bool enable) override;
virtual bool IsHighPassFilterEnabled(); bool IsHighPassFilterEnabled() override;
virtual int RegisterRxVadObserver(int channel, int RegisterRxVadObserver(int channel, VoERxVadCallback& observer) override;
VoERxVadCallback& observer);
virtual int DeRegisterRxVadObserver(int channel); int DeRegisterRxVadObserver(int channel) override;
virtual int VoiceActivityIndicator(int channel); int VoiceActivityIndicator(int channel) override;
virtual int SetEcMetricsStatus(bool enable); int SetEcMetricsStatus(bool enable) override;
virtual int GetEcMetricsStatus(bool& enabled); int GetEcMetricsStatus(bool& enabled) override;
virtual int GetEchoMetrics(int& ERL, int& ERLE, int& RERL, int& A_NLP); int GetEchoMetrics(int& ERL, int& ERLE, int& RERL, int& A_NLP) override;
virtual int GetEcDelayMetrics(int& delay_median, int& delay_std, int GetEcDelayMetrics(int& delay_median,
float& fraction_poor_delays); int& delay_std,
float& fraction_poor_delays) override;
virtual int StartDebugRecording(const char* fileNameUTF8); int StartDebugRecording(const char* fileNameUTF8) override;
virtual int StartDebugRecording(FILE* file_handle); int StartDebugRecording(FILE* file_handle) override;
virtual int StopDebugRecording(); int StopDebugRecording() override;
virtual int SetTypingDetectionStatus(bool enable); int SetTypingDetectionStatus(bool enable) override;
virtual int GetTypingDetectionStatus(bool& enabled); int GetTypingDetectionStatus(bool& enabled) override;
virtual int TimeSinceLastTyping(int &seconds); int TimeSinceLastTyping(int& seconds) override;
// TODO(niklase) Remove default argument as soon as libJingle is updated! // TODO(niklase) Remove default argument as soon as libJingle is updated!
virtual int SetTypingDetectionParameters(int timeWindow, int SetTypingDetectionParameters(int timeWindow,
int costPerTyping, int costPerTyping,
int reportingThreshold, int reportingThreshold,
int penaltyDecay, int penaltyDecay,
int typeEventDelay = 0); int typeEventDelay = 0) override;
virtual void EnableStereoChannelSwapping(bool enable); void EnableStereoChannelSwapping(bool enable) override;
virtual bool IsStereoChannelSwappingEnabled(); bool IsStereoChannelSwappingEnabled() override;
protected: protected:
VoEAudioProcessingImpl(voe::SharedData* shared); VoEAudioProcessingImpl(voe::SharedData* shared);
virtual ~VoEAudioProcessingImpl(); ~VoEAudioProcessingImpl() override;
private: private:
bool _isAecMode; bool _isAecMode;

View File

@ -22,8 +22,7 @@ class VoEAudioProcessingTest : public ::testing::Test {
VoEAudioProcessingTest() VoEAudioProcessingTest()
: voe_(VoiceEngine::Create()), : voe_(VoiceEngine::Create()),
base_(VoEBase::GetInterface(voe_)), base_(VoEBase::GetInterface(voe_)),
audioproc_(VoEAudioProcessing::GetInterface(voe_)) { audioproc_(VoEAudioProcessing::GetInterface(voe_)) {}
}
virtual ~VoEAudioProcessingTest() { virtual ~VoEAudioProcessingTest() {
base_->Terminate(); base_->Terminate();

View File

@ -17,167 +17,145 @@
#include "webrtc/voice_engine/include/voe_errors.h" #include "webrtc/voice_engine/include/voe_errors.h"
#include "webrtc/voice_engine/voice_engine_impl.h" #include "webrtc/voice_engine/voice_engine_impl.h"
namespace webrtc namespace webrtc {
{
VoECodec* VoECodec::GetInterface(VoiceEngine* voiceEngine) VoECodec* VoECodec::GetInterface(VoiceEngine* voiceEngine) {
{
#ifndef WEBRTC_VOICE_ENGINE_CODEC_API #ifndef WEBRTC_VOICE_ENGINE_CODEC_API
return NULL; return NULL;
#else #else
if (NULL == voiceEngine) if (NULL == voiceEngine) {
{ return NULL;
return NULL; }
} VoiceEngineImpl* s = static_cast<VoiceEngineImpl*>(voiceEngine);
VoiceEngineImpl* s = static_cast<VoiceEngineImpl*>(voiceEngine); s->AddRef();
s->AddRef(); return s;
return s;
#endif #endif
} }
#ifdef WEBRTC_VOICE_ENGINE_CODEC_API #ifdef WEBRTC_VOICE_ENGINE_CODEC_API
VoECodecImpl::VoECodecImpl(voe::SharedData* shared) : _shared(shared) VoECodecImpl::VoECodecImpl(voe::SharedData* shared) : _shared(shared) {
{ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1), "VoECodecImpl() - ctor");
"VoECodecImpl() - ctor");
} }
VoECodecImpl::~VoECodecImpl() VoECodecImpl::~VoECodecImpl() {
{ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1), "~VoECodecImpl() - dtor");
"~VoECodecImpl() - dtor");
} }
int VoECodecImpl::NumOfCodecs() int VoECodecImpl::NumOfCodecs() {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), "NumOfCodecs()");
"NumOfCodecs()");
// Number of supported codecs in the ACM // Number of supported codecs in the ACM
uint8_t nSupportedCodecs = AudioCodingModule::NumberOfCodecs(); uint8_t nSupportedCodecs = AudioCodingModule::NumberOfCodecs();
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
VoEId(_shared->instance_id(), -1), "NumOfCodecs() => %u", nSupportedCodecs);
"NumOfCodecs() => %u", nSupportedCodecs); return (nSupportedCodecs);
return (nSupportedCodecs);
} }
int VoECodecImpl::GetCodec(int index, CodecInst& codec) int VoECodecImpl::GetCodec(int index, CodecInst& codec) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), "GetCodec(index=%d, codec=?)", index);
"GetCodec(index=%d, codec=?)", index); CodecInst acmCodec;
CodecInst acmCodec; if (AudioCodingModule::Codec(index, &acmCodec) == -1) {
if (AudioCodingModule::Codec(index, &acmCodec) _shared->SetLastError(VE_INVALID_LISTNR, kTraceError,
== -1) "GetCodec() invalid index");
{ return -1;
_shared->SetLastError(VE_INVALID_LISTNR, kTraceError, }
"GetCodec() invalid index"); ACMToExternalCodecRepresentation(codec, acmCodec);
return -1; WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
} "GetCodec() => plname=%s, pacsize=%d, plfreq=%d, pltype=%d, "
ACMToExternalCodecRepresentation(codec, acmCodec); "channels=%d, rate=%d",
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, codec.plname, codec.pacsize, codec.plfreq, codec.pltype,
VoEId(_shared->instance_id(), -1), codec.channels, codec.rate);
"GetCodec() => plname=%s, pacsize=%d, plfreq=%d, pltype=%d, " return 0;
"channels=%d, rate=%d", codec.plname, codec.pacsize,
codec.plfreq, codec.pltype, codec.channels, codec.rate);
return 0;
} }
int VoECodecImpl::SetSendCodec(int channel, const CodecInst& codec) int VoECodecImpl::SetSendCodec(int channel, const CodecInst& codec) {
{ CodecInst copyCodec;
CodecInst copyCodec; ExternalToACMCodecRepresentation(copyCodec, codec);
ExternalToACMCodecRepresentation(copyCodec, codec);
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
"SetSendCodec(channel=%d, codec)", channel); "SetSendCodec(channel=%d, codec)", channel);
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_shared->instance_id(), -1), WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
"codec: plname=%s, pacsize=%d, plfreq=%d, pltype=%d, " "codec: plname=%s, pacsize=%d, plfreq=%d, pltype=%d, "
"channels=%d, rate=%d", codec.plname, codec.pacsize, "channels=%d, rate=%d",
codec.plfreq, codec.pltype, codec.channels, codec.rate); codec.plname, codec.pacsize, codec.plfreq, codec.pltype,
if (!_shared->statistics().Initialized()) codec.channels, codec.rate);
{ if (!_shared->statistics().Initialized()) {
_shared->SetLastError(VE_NOT_INITED, kTraceError); _shared->SetLastError(VE_NOT_INITED, kTraceError);
return -1; return -1;
} }
// External sanity checks performed outside the ACM // External sanity checks performed outside the ACM
if ((STR_CASE_CMP(copyCodec.plname, "L16") == 0) && if ((STR_CASE_CMP(copyCodec.plname, "L16") == 0) &&
(copyCodec.pacsize >= 960)) (copyCodec.pacsize >= 960)) {
{ _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
_shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError, "SetSendCodec() invalid L16 packet size");
"SetSendCodec() invalid L16 packet size"); return -1;
return -1; }
} if (!STR_CASE_CMP(copyCodec.plname, "CN") ||
if (!STR_CASE_CMP(copyCodec.plname, "CN") !STR_CASE_CMP(copyCodec.plname, "TELEPHONE-EVENT") ||
|| !STR_CASE_CMP(copyCodec.plname, "TELEPHONE-EVENT") !STR_CASE_CMP(copyCodec.plname, "RED")) {
|| !STR_CASE_CMP(copyCodec.plname, "RED")) _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
{ "SetSendCodec() invalid codec name");
_shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError, return -1;
"SetSendCodec() invalid codec name"); }
return -1; if ((copyCodec.channels != 1) && (copyCodec.channels != 2)) {
} _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
if ((copyCodec.channels != 1) && (copyCodec.channels != 2)) "SetSendCodec() invalid number of channels");
{ return -1;
_shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError, }
"SetSendCodec() invalid number of channels"); voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
return -1; voe::Channel* channelPtr = ch.channel();
} if (channelPtr == NULL) {
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
voe::Channel* channelPtr = ch.channel(); "GetSendCodec() failed to locate channel");
if (channelPtr == NULL) return -1;
{ }
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, if (!AudioCodingModule::IsCodecValid((CodecInst&)copyCodec)) {
"GetSendCodec() failed to locate channel"); _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
return -1; "SetSendCodec() invalid codec");
} return -1;
if (!AudioCodingModule::IsCodecValid( }
(CodecInst&) copyCodec)) if (channelPtr->SetSendCodec(copyCodec) != 0) {
{ _shared->SetLastError(VE_CANNOT_SET_SEND_CODEC, kTraceError,
_shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError, "SetSendCodec() failed to set send codec");
"SetSendCodec() invalid codec"); return -1;
return -1; }
}
if (channelPtr->SetSendCodec(copyCodec) != 0)
{
_shared->SetLastError(VE_CANNOT_SET_SEND_CODEC, kTraceError,
"SetSendCodec() failed to set send codec");
return -1;
}
return 0; return 0;
} }
int VoECodecImpl::GetSendCodec(int channel, CodecInst& codec) int VoECodecImpl::GetSendCodec(int channel, CodecInst& codec) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), "GetSendCodec(channel=%d, codec=?)", channel);
"GetSendCodec(channel=%d, codec=?)", channel); if (!_shared->statistics().Initialized()) {
if (!_shared->statistics().Initialized()) _shared->SetLastError(VE_NOT_INITED, kTraceError);
{ return -1;
_shared->SetLastError(VE_NOT_INITED, kTraceError); }
return -1; voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
} voe::Channel* channelPtr = ch.channel();
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); if (channelPtr == NULL) {
voe::Channel* channelPtr = ch.channel(); _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
if (channelPtr == NULL) "GetSendCodec() failed to locate channel");
{ return -1;
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, }
"GetSendCodec() failed to locate channel"); CodecInst acmCodec;
return -1; if (channelPtr->GetSendCodec(acmCodec) != 0) {
} _shared->SetLastError(VE_CANNOT_GET_SEND_CODEC, kTraceError,
CodecInst acmCodec; "GetSendCodec() failed to get send codec");
if (channelPtr->GetSendCodec(acmCodec) != 0) return -1;
{ }
_shared->SetLastError(VE_CANNOT_GET_SEND_CODEC, kTraceError, ACMToExternalCodecRepresentation(codec, acmCodec);
"GetSendCodec() failed to get send codec"); WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
return -1; "GetSendCodec() => plname=%s, pacsize=%d, plfreq=%d, "
} "channels=%d, rate=%d",
ACMToExternalCodecRepresentation(codec, acmCodec); codec.plname, codec.pacsize, codec.plfreq, codec.channels,
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, codec.rate);
VoEId(_shared->instance_id(), -1), return 0;
"GetSendCodec() => plname=%s, pacsize=%d, plfreq=%d, "
"channels=%d, rate=%d", codec.plname, codec.pacsize,
codec.plfreq, codec.channels, codec.rate);
return 0;
} }
int VoECodecImpl::SetBitRate(int channel, int bitrate_bps) { int VoECodecImpl::SetBitRate(int channel, int bitrate_bps) {
@ -192,119 +170,106 @@ int VoECodecImpl::SetBitRate(int channel, int bitrate_bps) {
return 0; return 0;
} }
int VoECodecImpl::GetRecCodec(int channel, CodecInst& codec) int VoECodecImpl::GetRecCodec(int channel, CodecInst& codec) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), "GetRecCodec(channel=%d, codec=?)", channel);
"GetRecCodec(channel=%d, codec=?)", channel); if (!_shared->statistics().Initialized()) {
if (!_shared->statistics().Initialized()) _shared->SetLastError(VE_NOT_INITED, kTraceError);
{ return -1;
_shared->SetLastError(VE_NOT_INITED, kTraceError); }
return -1; voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
} voe::Channel* channelPtr = ch.channel();
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); if (channelPtr == NULL) {
voe::Channel* channelPtr = ch.channel(); _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
if (channelPtr == NULL) "GetRecCodec() failed to locate channel");
{ return -1;
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, }
"GetRecCodec() failed to locate channel"); CodecInst acmCodec;
return -1; if (channelPtr->GetRecCodec(acmCodec) != 0) {
} _shared->SetLastError(VE_CANNOT_GET_REC_CODEC, kTraceError,
CodecInst acmCodec; "GetRecCodec() failed to get received codec");
if (channelPtr->GetRecCodec(acmCodec) != 0) return -1;
{ }
_shared->SetLastError(VE_CANNOT_GET_REC_CODEC, kTraceError, ACMToExternalCodecRepresentation(codec, acmCodec);
"GetRecCodec() failed to get received codec"); WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
return -1; "GetRecCodec() => plname=%s, pacsize=%d, plfreq=%d, "
} "channels=%d, rate=%d",
ACMToExternalCodecRepresentation(codec, acmCodec); codec.plname, codec.pacsize, codec.plfreq, codec.channels,
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, codec.rate);
VoEId(_shared->instance_id(), -1), return 0;
"GetRecCodec() => plname=%s, pacsize=%d, plfreq=%d, "
"channels=%d, rate=%d", codec.plname, codec.pacsize,
codec.plfreq, codec.channels, codec.rate);
return 0;
} }
int VoECodecImpl::SetRecPayloadType(int channel, const CodecInst& codec) int VoECodecImpl::SetRecPayloadType(int channel, const CodecInst& codec) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), "SetRecPayloadType(channel=%d, codec)", channel);
"SetRecPayloadType(channel=%d, codec)", channel); WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
"codec: plname=%s, plfreq=%d, pltype=%d, channels=%u, " "codec: plname=%s, plfreq=%d, pltype=%d, channels=%u, "
"pacsize=%d, rate=%d", codec.plname, codec.plfreq, codec.pltype, "pacsize=%d, rate=%d",
codec.channels, codec.pacsize, codec.rate); codec.plname, codec.plfreq, codec.pltype, codec.channels,
if (!_shared->statistics().Initialized()) codec.pacsize, codec.rate);
{ if (!_shared->statistics().Initialized()) {
_shared->SetLastError(VE_NOT_INITED, kTraceError); _shared->SetLastError(VE_NOT_INITED, kTraceError);
return -1; return -1;
} }
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
voe::Channel* channelPtr = ch.channel(); voe::Channel* channelPtr = ch.channel();
if (channelPtr == NULL) if (channelPtr == NULL) {
{ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, "GetRecPayloadType() failed to locate channel");
"GetRecPayloadType() failed to locate channel"); return -1;
return -1; }
} return channelPtr->SetRecPayloadType(codec);
return channelPtr->SetRecPayloadType(codec);
} }
int VoECodecImpl::GetRecPayloadType(int channel, CodecInst& codec) int VoECodecImpl::GetRecPayloadType(int channel, CodecInst& codec) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), "GetRecPayloadType(channel=%d, codec)", channel);
"GetRecPayloadType(channel=%d, codec)", channel); if (!_shared->statistics().Initialized()) {
if (!_shared->statistics().Initialized()) _shared->SetLastError(VE_NOT_INITED, kTraceError);
{ return -1;
_shared->SetLastError(VE_NOT_INITED, kTraceError); }
return -1; voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
} voe::Channel* channelPtr = ch.channel();
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); if (channelPtr == NULL) {
voe::Channel* channelPtr = ch.channel(); _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
if (channelPtr == NULL) "GetRecPayloadType() failed to locate channel");
{ return -1;
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, }
"GetRecPayloadType() failed to locate channel"); return channelPtr->GetRecPayloadType(codec);
return -1;
}
return channelPtr->GetRecPayloadType(codec);
} }
int VoECodecImpl::SetSendCNPayloadType(int channel, int type, int VoECodecImpl::SetSendCNPayloadType(int channel,
PayloadFrequencies frequency) int type,
{ PayloadFrequencies frequency) {
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
"SetSendCNPayloadType(channel=%d, type=%d, frequency=%d)", "SetSendCNPayloadType(channel=%d, type=%d, frequency=%d)",
channel, type, frequency); channel, type, frequency);
if (!_shared->statistics().Initialized()) if (!_shared->statistics().Initialized()) {
{ _shared->SetLastError(VE_NOT_INITED, kTraceError);
_shared->SetLastError(VE_NOT_INITED, kTraceError); return -1;
return -1; }
} if (type < 96 || type > 127) {
if (type < 96 || type > 127) // Only allow dynamic range: 96 to 127
{ _shared->SetLastError(VE_INVALID_PLTYPE, kTraceError,
// Only allow dynamic range: 96 to 127 "SetSendCNPayloadType() invalid payload type");
_shared->SetLastError(VE_INVALID_PLTYPE, kTraceError, return -1;
"SetSendCNPayloadType() invalid payload type"); }
return -1; if ((frequency != kFreq16000Hz) && (frequency != kFreq32000Hz)) {
} // It is not possible to modify the payload type for CN/8000.
if ((frequency != kFreq16000Hz) && (frequency != kFreq32000Hz)) // We only allow modification of the CN payload type for CN/16000
{ // and CN/32000.
// It is not possible to modify the payload type for CN/8000. _shared->SetLastError(VE_INVALID_PLFREQ, kTraceError,
// We only allow modification of the CN payload type for CN/16000 "SetSendCNPayloadType() invalid payload frequency");
// and CN/32000. return -1;
_shared->SetLastError(VE_INVALID_PLFREQ, kTraceError, }
"SetSendCNPayloadType() invalid payload frequency"); voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
return -1; voe::Channel* channelPtr = ch.channel();
} if (channelPtr == NULL) {
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
voe::Channel* channelPtr = ch.channel(); "SetSendCNPayloadType() failed to locate channel");
if (channelPtr == NULL) return -1;
{ }
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, return channelPtr->SetSendCNPayloadType(type, frequency);
"SetSendCNPayloadType() failed to locate channel");
return -1;
}
return channelPtr->SetSendCNPayloadType(type, frequency);
} }
int VoECodecImpl::SetFECStatus(int channel, bool enable) { int VoECodecImpl::SetFECStatus(int channel, bool enable) {
@ -342,92 +307,87 @@ int VoECodecImpl::GetFECStatus(int channel, bool& enabled) {
return 0; return 0;
} }
int VoECodecImpl::SetVADStatus(int channel, bool enable, VadModes mode, int VoECodecImpl::SetVADStatus(int channel,
bool disableDTX) bool enable,
{ VadModes mode,
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), bool disableDTX) {
"SetVADStatus(channel=%i, enable=%i, mode=%i, disableDTX=%i)", WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
channel, enable, mode, disableDTX); "SetVADStatus(channel=%i, enable=%i, mode=%i, disableDTX=%i)",
channel, enable, mode, disableDTX);
if (!_shared->statistics().Initialized()) if (!_shared->statistics().Initialized()) {
{ _shared->SetLastError(VE_NOT_INITED, kTraceError);
_shared->SetLastError(VE_NOT_INITED, kTraceError); return -1;
return -1; }
} voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); voe::Channel* channelPtr = ch.channel();
voe::Channel* channelPtr = ch.channel(); if (channelPtr == NULL) {
if (channelPtr == NULL) _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
{ "SetVADStatus failed to locate channel");
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, return -1;
"SetVADStatus failed to locate channel"); }
return -1;
}
ACMVADMode vadMode(VADNormal); ACMVADMode vadMode(VADNormal);
switch (mode) switch (mode) {
{ case kVadConventional:
case kVadConventional: vadMode = VADNormal;
vadMode = VADNormal; break;
break; case kVadAggressiveLow:
case kVadAggressiveLow: vadMode = VADLowBitrate;
vadMode = VADLowBitrate; break;
break; case kVadAggressiveMid:
case kVadAggressiveMid: vadMode = VADAggr;
vadMode = VADAggr; break;
break; case kVadAggressiveHigh:
case kVadAggressiveHigh: vadMode = VADVeryAggr;
vadMode = VADVeryAggr; break;
break; }
} return channelPtr->SetVADStatus(enable, vadMode, disableDTX);
return channelPtr->SetVADStatus(enable, vadMode, disableDTX);
} }
int VoECodecImpl::GetVADStatus(int channel, bool& enabled, VadModes& mode, int VoECodecImpl::GetVADStatus(int channel,
bool& disabledDTX) bool& enabled,
{ VadModes& mode,
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), bool& disabledDTX) {
"GetVADStatus(channel=%i)", channel); WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
"GetVADStatus(channel=%i)", channel);
if (!_shared->statistics().Initialized()) if (!_shared->statistics().Initialized()) {
{ _shared->SetLastError(VE_NOT_INITED, kTraceError);
_shared->SetLastError(VE_NOT_INITED, kTraceError); return -1;
return -1; }
} voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); voe::Channel* channelPtr = ch.channel();
voe::Channel* channelPtr = ch.channel(); if (channelPtr == NULL) {
if (channelPtr == NULL) _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
{ "GetVADStatus failed to locate channel");
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, return -1;
"GetVADStatus failed to locate channel"); }
return -1;
}
ACMVADMode vadMode; ACMVADMode vadMode;
int ret = channelPtr->GetVADStatus(enabled, vadMode, disabledDTX); int ret = channelPtr->GetVADStatus(enabled, vadMode, disabledDTX);
if (ret != 0) if (ret != 0) {
{ _shared->SetLastError(VE_INVALID_OPERATION, kTraceError,
_shared->SetLastError(VE_INVALID_OPERATION, kTraceError, "GetVADStatus failed to get VAD mode");
"GetVADStatus failed to get VAD mode"); return -1;
return -1; }
} switch (vadMode) {
switch (vadMode) case VADNormal:
{ mode = kVadConventional;
case VADNormal: break;
mode = kVadConventional; case VADLowBitrate:
break; mode = kVadAggressiveLow;
case VADLowBitrate: break;
mode = kVadAggressiveLow; case VADAggr:
break; mode = kVadAggressiveMid;
case VADAggr: break;
mode = kVadAggressiveMid; case VADVeryAggr:
break; mode = kVadAggressiveHigh;
case VADVeryAggr: break;
mode = kVadAggressiveHigh; }
break;
}
return 0; return 0;
} }
int VoECodecImpl::SetOpusMaxPlaybackRate(int channel, int frequency_hz) { int VoECodecImpl::SetOpusMaxPlaybackRate(int channel, int frequency_hz) {
@ -466,81 +426,51 @@ int VoECodecImpl::SetOpusDtx(int channel, bool enable_dtx) {
} }
void VoECodecImpl::ACMToExternalCodecRepresentation(CodecInst& toInst, void VoECodecImpl::ACMToExternalCodecRepresentation(CodecInst& toInst,
const CodecInst& fromInst) const CodecInst& fromInst) {
{ toInst = fromInst;
toInst = fromInst; if (STR_CASE_CMP(fromInst.plname, "SILK") == 0) {
if (STR_CASE_CMP(fromInst.plname,"SILK") == 0) if (fromInst.plfreq == 12000) {
{ if (fromInst.pacsize == 320) {
if (fromInst.plfreq == 12000) toInst.pacsize = 240;
{ } else if (fromInst.pacsize == 640) {
if (fromInst.pacsize == 320) toInst.pacsize = 480;
{ } else if (fromInst.pacsize == 960) {
toInst.pacsize = 240; toInst.pacsize = 720;
} }
else if (fromInst.pacsize == 640) } else if (fromInst.plfreq == 24000) {
{ if (fromInst.pacsize == 640) {
toInst.pacsize = 480; toInst.pacsize = 480;
} } else if (fromInst.pacsize == 1280) {
else if (fromInst.pacsize == 960) toInst.pacsize = 960;
{ } else if (fromInst.pacsize == 1920) {
toInst.pacsize = 720; toInst.pacsize = 1440;
} }
}
else if (fromInst.plfreq == 24000)
{
if (fromInst.pacsize == 640)
{
toInst.pacsize = 480;
}
else if (fromInst.pacsize == 1280)
{
toInst.pacsize = 960;
}
else if (fromInst.pacsize == 1920)
{
toInst.pacsize = 1440;
}
}
} }
}
} }
void VoECodecImpl::ExternalToACMCodecRepresentation(CodecInst& toInst, void VoECodecImpl::ExternalToACMCodecRepresentation(CodecInst& toInst,
const CodecInst& fromInst) const CodecInst& fromInst) {
{ toInst = fromInst;
toInst = fromInst; if (STR_CASE_CMP(fromInst.plname, "SILK") == 0) {
if (STR_CASE_CMP(fromInst.plname,"SILK") == 0) if (fromInst.plfreq == 12000) {
{ if (fromInst.pacsize == 240) {
if (fromInst.plfreq == 12000) toInst.pacsize = 320;
{ } else if (fromInst.pacsize == 480) {
if (fromInst.pacsize == 240) toInst.pacsize = 640;
{ } else if (fromInst.pacsize == 720) {
toInst.pacsize = 320; toInst.pacsize = 960;
} }
else if (fromInst.pacsize == 480) } else if (fromInst.plfreq == 24000) {
{ if (fromInst.pacsize == 480) {
toInst.pacsize = 640; toInst.pacsize = 640;
} } else if (fromInst.pacsize == 960) {
else if (fromInst.pacsize == 720) toInst.pacsize = 1280;
{ } else if (fromInst.pacsize == 1440) {
toInst.pacsize = 960; toInst.pacsize = 1920;
} }
}
else if (fromInst.plfreq == 24000)
{
if (fromInst.pacsize == 480)
{
toInst.pacsize = 640;
}
else if (fromInst.pacsize == 960)
{
toInst.pacsize = 1280;
}
else if (fromInst.pacsize == 1440)
{
toInst.pacsize = 1920;
}
}
} }
}
} }
#endif // WEBRTC_VOICE_ENGINE_CODEC_API #endif // WEBRTC_VOICE_ENGINE_CODEC_API

View File

@ -15,63 +15,61 @@
#include "webrtc/voice_engine/shared_data.h" #include "webrtc/voice_engine/shared_data.h"
namespace webrtc namespace webrtc {
{
class VoECodecImpl: public VoECodec class VoECodecImpl : public VoECodec {
{ public:
public: int NumOfCodecs() override;
virtual int NumOfCodecs();
virtual int GetCodec(int index, CodecInst& codec); int GetCodec(int index, CodecInst& codec) override;
virtual int SetSendCodec(int channel, const CodecInst& codec); int SetSendCodec(int channel, const CodecInst& codec) override;
virtual int GetSendCodec(int channel, CodecInst& codec); int GetSendCodec(int channel, CodecInst& codec) override;
int SetBitRate(int channel, int bitrate_bps) override; int SetBitRate(int channel, int bitrate_bps) override;
virtual int GetRecCodec(int channel, CodecInst& codec); int GetRecCodec(int channel, CodecInst& codec) override;
virtual int SetSendCNPayloadType( int SetSendCNPayloadType(
int channel, int type, int channel,
PayloadFrequencies frequency = kFreq16000Hz); int type,
PayloadFrequencies frequency = kFreq16000Hz) override;
virtual int SetRecPayloadType(int channel, int SetRecPayloadType(int channel, const CodecInst& codec) override;
const CodecInst& codec);
virtual int GetRecPayloadType(int channel, CodecInst& codec); int GetRecPayloadType(int channel, CodecInst& codec) override;
virtual int SetFECStatus(int channel, bool enable); int SetFECStatus(int channel, bool enable) override;
virtual int GetFECStatus(int channel, bool& enabled); int GetFECStatus(int channel, bool& enabled) override;
virtual int SetVADStatus(int channel, int SetVADStatus(int channel,
bool enable, bool enable,
VadModes mode = kVadConventional, VadModes mode = kVadConventional,
bool disableDTX = false); bool disableDTX = false) override;
virtual int GetVADStatus(int channel, int GetVADStatus(int channel,
bool& enabled, bool& enabled,
VadModes& mode, VadModes& mode,
bool& disabledDTX); bool& disabledDTX) override;
virtual int SetOpusMaxPlaybackRate(int channel, int frequency_hz); int SetOpusMaxPlaybackRate(int channel, int frequency_hz) override;
virtual int SetOpusDtx(int channel, bool enable_dtx); int SetOpusDtx(int channel, bool enable_dtx) override;
protected: protected:
VoECodecImpl(voe::SharedData* shared); VoECodecImpl(voe::SharedData* shared);
virtual ~VoECodecImpl(); ~VoECodecImpl() override;
private: private:
void ACMToExternalCodecRepresentation(CodecInst& toInst, void ACMToExternalCodecRepresentation(CodecInst& toInst,
const CodecInst& fromInst); const CodecInst& fromInst);
void ExternalToACMCodecRepresentation(CodecInst& toInst, void ExternalToACMCodecRepresentation(CodecInst& toInst,
const CodecInst& fromInst); const CodecInst& fromInst);
voe::SharedData* _shared; voe::SharedData* _shared;
}; };
} // namespace webrtc } // namespace webrtc

View File

@ -30,8 +30,7 @@ class VoECodecTest : public ::testing::Test {
voe_codec_(VoECodec::GetInterface(voe_)), voe_codec_(VoECodec::GetInterface(voe_)),
channel_(-1), channel_(-1),
adm_(new FakeAudioDeviceModule), adm_(new FakeAudioDeviceModule),
red_payload_type_(-1) { red_payload_type_(-1) {}
}
~VoECodecTest() {} ~VoECodecTest() {}
@ -62,18 +61,19 @@ class VoECodecTest : public ::testing::Test {
// Find primary and secondary codecs. // Find primary and secondary codecs.
int num_codecs = voe_codec_->NumOfCodecs(); int num_codecs = voe_codec_->NumOfCodecs();
int n = 0; int n = 0;
while (n < num_codecs && (!primary_found || !valid_secondary_found || while (n < num_codecs &&
!invalid_secondary_found || red_payload_type_ < 0)) { (!primary_found || !valid_secondary_found ||
!invalid_secondary_found || red_payload_type_ < 0)) {
EXPECT_EQ(0, voe_codec_->GetCodec(n, my_codec)); EXPECT_EQ(0, voe_codec_->GetCodec(n, my_codec));
if (!STR_CASE_CMP(my_codec.plname, "isac") && my_codec.plfreq == 16000) { if (!STR_CASE_CMP(my_codec.plname, "isac") && my_codec.plfreq == 16000) {
memcpy(&valid_secondary_, &my_codec, sizeof(my_codec)); memcpy(&valid_secondary_, &my_codec, sizeof(my_codec));
valid_secondary_found = true; valid_secondary_found = true;
} else if (!STR_CASE_CMP(my_codec.plname, "isac") && } else if (!STR_CASE_CMP(my_codec.plname, "isac") &&
my_codec.plfreq == 32000) { my_codec.plfreq == 32000) {
memcpy(&invalid_secondary_, &my_codec, sizeof(my_codec)); memcpy(&invalid_secondary_, &my_codec, sizeof(my_codec));
invalid_secondary_found = true; invalid_secondary_found = true;
} else if (!STR_CASE_CMP(my_codec.plname, "L16") && } else if (!STR_CASE_CMP(my_codec.plname, "L16") &&
my_codec.plfreq == 16000) { my_codec.plfreq == 16000) {
memcpy(&primary_, &my_codec, sizeof(my_codec)); memcpy(&primary_, &my_codec, sizeof(my_codec));
primary_found = true; primary_found = true;
} else if (!STR_CASE_CMP(my_codec.plname, "RED")) { } else if (!STR_CASE_CMP(my_codec.plname, "RED")) {

View File

@ -20,242 +20,206 @@
namespace webrtc { namespace webrtc {
VoEDtmf* VoEDtmf::GetInterface(VoiceEngine* voiceEngine) VoEDtmf* VoEDtmf::GetInterface(VoiceEngine* voiceEngine) {
{
#ifndef WEBRTC_VOICE_ENGINE_DTMF_API #ifndef WEBRTC_VOICE_ENGINE_DTMF_API
return NULL; return NULL;
#else #else
if (NULL == voiceEngine) if (NULL == voiceEngine) {
{ return NULL;
return NULL; }
} VoiceEngineImpl* s = static_cast<VoiceEngineImpl*>(voiceEngine);
VoiceEngineImpl* s = static_cast<VoiceEngineImpl*>(voiceEngine); s->AddRef();
s->AddRef(); return s;
return s;
#endif #endif
} }
#ifdef WEBRTC_VOICE_ENGINE_DTMF_API #ifdef WEBRTC_VOICE_ENGINE_DTMF_API
VoEDtmfImpl::VoEDtmfImpl(voe::SharedData* shared) : VoEDtmfImpl::VoEDtmfImpl(voe::SharedData* shared)
_dtmfFeedback(true), : _dtmfFeedback(true), _dtmfDirectFeedback(false), _shared(shared) {
_dtmfDirectFeedback(false), WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
_shared(shared) "VoEDtmfImpl::VoEDtmfImpl() - ctor");
{
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
"VoEDtmfImpl::VoEDtmfImpl() - ctor");
} }
VoEDtmfImpl::~VoEDtmfImpl() VoEDtmfImpl::~VoEDtmfImpl() {
{ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1), "VoEDtmfImpl::~VoEDtmfImpl() - dtor");
"VoEDtmfImpl::~VoEDtmfImpl() - dtor");
} }
int VoEDtmfImpl::SendTelephoneEvent(int channel, int VoEDtmfImpl::SendTelephoneEvent(int channel,
int eventCode, int eventCode,
bool outOfBand, bool outOfBand,
int lengthMs, int lengthMs,
int attenuationDb) int attenuationDb) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), "SendTelephoneEvent(channel=%d, eventCode=%d, outOfBand=%d,"
"SendTelephoneEvent(channel=%d, eventCode=%d, outOfBand=%d," "length=%d, attenuationDb=%d)",
"length=%d, attenuationDb=%d)", channel, eventCode, (int)outOfBand, lengthMs, attenuationDb);
channel, eventCode, (int)outOfBand, lengthMs, attenuationDb); if (!_shared->statistics().Initialized()) {
if (!_shared->statistics().Initialized()) _shared->SetLastError(VE_NOT_INITED, kTraceError);
{ return -1;
_shared->SetLastError(VE_NOT_INITED, kTraceError); }
return -1; voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
} voe::Channel* channelPtr = ch.channel();
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); if (channelPtr == NULL) {
voe::Channel* channelPtr = ch.channel(); _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
if (channelPtr == NULL) "SendTelephoneEvent() failed to locate channel");
{ return -1;
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, }
"SendTelephoneEvent() failed to locate channel"); if (!channelPtr->Sending()) {
return -1; _shared->SetLastError(VE_NOT_SENDING, kTraceError,
} "SendTelephoneEvent() sending is not active");
if (!channelPtr->Sending()) return -1;
{ }
_shared->SetLastError(VE_NOT_SENDING, kTraceError,
"SendTelephoneEvent() sending is not active");
return -1;
}
// Sanity check // Sanity check
const int maxEventCode = outOfBand ? const int maxEventCode = outOfBand ? static_cast<int>(kMaxTelephoneEventCode)
static_cast<int>(kMaxTelephoneEventCode) : : static_cast<int>(kMaxDtmfEventCode);
static_cast<int>(kMaxDtmfEventCode); const bool testFailed = ((eventCode < 0) || (eventCode > maxEventCode) ||
const bool testFailed = ((eventCode < 0) || (lengthMs < kMinTelephoneEventDuration) ||
(eventCode > maxEventCode) || (lengthMs > kMaxTelephoneEventDuration) ||
(lengthMs < kMinTelephoneEventDuration) || (attenuationDb < kMinTelephoneEventAttenuation) ||
(lengthMs > kMaxTelephoneEventDuration) || (attenuationDb > kMaxTelephoneEventAttenuation));
(attenuationDb < kMinTelephoneEventAttenuation) || if (testFailed) {
(attenuationDb > kMaxTelephoneEventAttenuation)); _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
if (testFailed) "SendTelephoneEvent() invalid parameter(s)");
{ return -1;
_shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError, }
"SendTelephoneEvent() invalid parameter(s)");
return -1;
}
const bool isDtmf = const bool isDtmf = (eventCode >= 0) && (eventCode <= kMaxDtmfEventCode);
(eventCode >= 0) && (eventCode <= kMaxDtmfEventCode); const bool playDtmfToneDirect =
const bool playDtmfToneDirect = isDtmf && (_dtmfFeedback && _dtmfDirectFeedback);
isDtmf && (_dtmfFeedback && _dtmfDirectFeedback);
if (playDtmfToneDirect) if (playDtmfToneDirect) {
{ // Mute the microphone signal while playing back the tone directly.
// Mute the microphone signal while playing back the tone directly. // This is to reduce the risk of introducing echo from the added output.
// This is to reduce the risk of introducing echo from the added output. _shared->transmit_mixer()->UpdateMuteMicrophoneTime(lengthMs);
_shared->transmit_mixer()->UpdateMuteMicrophoneTime(lengthMs);
// Play out local feedback tone directly (same approach for both inband // Play out local feedback tone directly (same approach for both inband
// and outband). // and outband).
// Reduce the length of the the tone with 80ms to reduce risk of echo. // Reduce the length of the the tone with 80ms to reduce risk of echo.
// For non-direct feedback, outband and inband cases are handled // For non-direct feedback, outband and inband cases are handled
// differently. // differently.
_shared->output_mixer()->PlayDtmfTone(eventCode, lengthMs - 80, _shared->output_mixer()->PlayDtmfTone(eventCode, lengthMs - 80,
attenuationDb); attenuationDb);
} }
if (outOfBand) if (outOfBand) {
{ // The RTP/RTCP module will always deliver OnPlayTelephoneEvent when
// The RTP/RTCP module will always deliver OnPlayTelephoneEvent when // an event is transmitted. It is up to the VoE to utilize it or not.
// an event is transmitted. It is up to the VoE to utilize it or not. // This flag ensures that feedback/playout is enabled; however, the
// This flag ensures that feedback/playout is enabled; however, the // channel object must still parse out the Dtmf events (0-15) from
// channel object must still parse out the Dtmf events (0-15) from // all possible events (0-255).
// all possible events (0-255). const bool playDTFMEvent = (_dtmfFeedback && !_dtmfDirectFeedback);
const bool playDTFMEvent = (_dtmfFeedback && !_dtmfDirectFeedback);
return channelPtr->SendTelephoneEventOutband(eventCode, return channelPtr->SendTelephoneEventOutband(eventCode, lengthMs,
lengthMs, attenuationDb, playDTFMEvent);
attenuationDb, } else {
playDTFMEvent); // For Dtmf tones, we want to ensure that inband tones are played out
} // in sync with the transmitted audio. This flag is utilized by the
else // channel object to determine if the queued Dtmf e vent shall also
{ // be fed to the output mixer in the same step as input audio is
// For Dtmf tones, we want to ensure that inband tones are played out // replaced by inband Dtmf tones.
// in sync with the transmitted audio. This flag is utilized by the const bool playDTFMEvent =
// channel object to determine if the queued Dtmf e vent shall also (isDtmf && _dtmfFeedback && !_dtmfDirectFeedback);
// be fed to the output mixer in the same step as input audio is
// replaced by inband Dtmf tones.
const bool playDTFMEvent =
(isDtmf && _dtmfFeedback && !_dtmfDirectFeedback);
return channelPtr->SendTelephoneEventInband(eventCode, return channelPtr->SendTelephoneEventInband(eventCode, lengthMs,
lengthMs, attenuationDb, playDTFMEvent);
attenuationDb, }
playDTFMEvent);
}
} }
int VoEDtmfImpl::SetSendTelephoneEventPayloadType(int channel, int VoEDtmfImpl::SetSendTelephoneEventPayloadType(int channel,
unsigned char type) unsigned char type) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), "SetSendTelephoneEventPayloadType(channel=%d, type=%u)", channel,
"SetSendTelephoneEventPayloadType(channel=%d, type=%u)", type);
channel, type); if (!_shared->statistics().Initialized()) {
if (!_shared->statistics().Initialized()) _shared->SetLastError(VE_NOT_INITED, kTraceError);
{ return -1;
_shared->SetLastError(VE_NOT_INITED, kTraceError); }
return -1; voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
} voe::Channel* channelPtr = ch.channel();
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); if (channelPtr == NULL) {
voe::Channel* channelPtr = ch.channel(); _shared->SetLastError(
if (channelPtr == NULL) VE_CHANNEL_NOT_VALID, kTraceError,
{ "SetSendTelephoneEventPayloadType() failed to locate channel");
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, return -1;
"SetSendTelephoneEventPayloadType() failed to locate channel"); }
return -1; return channelPtr->SetSendTelephoneEventPayloadType(type);
}
return channelPtr->SetSendTelephoneEventPayloadType(type);
} }
int VoEDtmfImpl::GetSendTelephoneEventPayloadType(int channel, int VoEDtmfImpl::GetSendTelephoneEventPayloadType(int channel,
unsigned char& type) unsigned char& type) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), "GetSendTelephoneEventPayloadType(channel=%d)", channel);
"GetSendTelephoneEventPayloadType(channel=%d)", channel); if (!_shared->statistics().Initialized()) {
if (!_shared->statistics().Initialized()) _shared->SetLastError(VE_NOT_INITED, kTraceError);
{ return -1;
_shared->SetLastError(VE_NOT_INITED, kTraceError); }
return -1; voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
} voe::Channel* channelPtr = ch.channel();
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); if (channelPtr == NULL) {
voe::Channel* channelPtr = ch.channel(); _shared->SetLastError(
if (channelPtr == NULL) VE_CHANNEL_NOT_VALID, kTraceError,
{ "GetSendTelephoneEventPayloadType() failed to locate channel");
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, return -1;
"GetSendTelephoneEventPayloadType() failed to locate channel"); }
return -1; return channelPtr->GetSendTelephoneEventPayloadType(type);
}
return channelPtr->GetSendTelephoneEventPayloadType(type);
} }
int VoEDtmfImpl::PlayDtmfTone(int eventCode, int VoEDtmfImpl::PlayDtmfTone(int eventCode, int lengthMs, int attenuationDb) {
int lengthMs, WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
int attenuationDb) "PlayDtmfTone(eventCode=%d, lengthMs=%d, attenuationDb=%d)",
{ eventCode, lengthMs, attenuationDb);
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
"PlayDtmfTone(eventCode=%d, lengthMs=%d, attenuationDb=%d)",
eventCode, lengthMs, attenuationDb);
if (!_shared->statistics().Initialized()) if (!_shared->statistics().Initialized()) {
{ _shared->SetLastError(VE_NOT_INITED, kTraceError);
_shared->SetLastError(VE_NOT_INITED, kTraceError); return -1;
return -1; }
} if (!_shared->audio_device()->Playing()) {
if (!_shared->audio_device()->Playing()) _shared->SetLastError(VE_NOT_PLAYING, kTraceError,
{ "PlayDtmfTone() no channel is playing out");
_shared->SetLastError(VE_NOT_PLAYING, kTraceError, return -1;
"PlayDtmfTone() no channel is playing out"); }
return -1; if ((eventCode < kMinDtmfEventCode) || (eventCode > kMaxDtmfEventCode) ||
} (lengthMs < kMinTelephoneEventDuration) ||
if ((eventCode < kMinDtmfEventCode) || (lengthMs > kMaxTelephoneEventDuration) ||
(eventCode > kMaxDtmfEventCode) || (attenuationDb < kMinTelephoneEventAttenuation) ||
(lengthMs < kMinTelephoneEventDuration) || (attenuationDb > kMaxTelephoneEventAttenuation)) {
(lengthMs > kMaxTelephoneEventDuration) || _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
(attenuationDb <kMinTelephoneEventAttenuation) || "PlayDtmfTone() invalid tone parameter(s)");
(attenuationDb > kMaxTelephoneEventAttenuation)) return -1;
{ }
_shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError, return _shared->output_mixer()->PlayDtmfTone(eventCode, lengthMs,
"PlayDtmfTone() invalid tone parameter(s)");
return -1;
}
return _shared->output_mixer()->PlayDtmfTone(eventCode, lengthMs,
attenuationDb); attenuationDb);
} }
int VoEDtmfImpl::SetDtmfFeedbackStatus(bool enable, bool directFeedback) int VoEDtmfImpl::SetDtmfFeedbackStatus(bool enable, bool directFeedback) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), "SetDtmfFeedbackStatus(enable=%d, directFeeback=%d)",
"SetDtmfFeedbackStatus(enable=%d, directFeeback=%d)", (int)enable, (int)directFeedback);
(int)enable, (int)directFeedback);
CriticalSectionScoped sc(_shared->crit_sec()); CriticalSectionScoped sc(_shared->crit_sec());
_dtmfFeedback = enable; _dtmfFeedback = enable;
_dtmfDirectFeedback = directFeedback; _dtmfDirectFeedback = directFeedback;
return 0; return 0;
} }
int VoEDtmfImpl::GetDtmfFeedbackStatus(bool& enabled, bool& directFeedback) int VoEDtmfImpl::GetDtmfFeedbackStatus(bool& enabled, bool& directFeedback) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), "GetDtmfFeedbackStatus()");
"GetDtmfFeedbackStatus()");
CriticalSectionScoped sc(_shared->crit_sec()); CriticalSectionScoped sc(_shared->crit_sec());
enabled = _dtmfFeedback; enabled = _dtmfFeedback;
directFeedback = _dtmfDirectFeedback; directFeedback = _dtmfDirectFeedback;
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
VoEId(_shared->instance_id(), -1), "GetDtmfFeedbackStatus() => enabled=%d, directFeedback=%d",
"GetDtmfFeedbackStatus() => enabled=%d, directFeedback=%d", enabled, directFeedback);
enabled, directFeedback); return 0;
return 0;
} }
#endif // #ifdef WEBRTC_VOICE_ENGINE_DTMF_API #endif // #ifdef WEBRTC_VOICE_ENGINE_DTMF_API

View File

@ -14,42 +14,38 @@
#include "webrtc/voice_engine/include/voe_dtmf.h" #include "webrtc/voice_engine/include/voe_dtmf.h"
#include "webrtc/voice_engine/shared_data.h" #include "webrtc/voice_engine/shared_data.h"
namespace webrtc namespace webrtc {
{
class VoEDtmfImpl : public VoEDtmf class VoEDtmfImpl : public VoEDtmf {
{ public:
public: int SendTelephoneEvent(int channel,
virtual int SendTelephoneEvent( int eventCode,
int channel, bool outOfBand = true,
int eventCode, int lengthMs = 160,
bool outOfBand = true, int attenuationDb = 10) override;
int lengthMs = 160,
int attenuationDb = 10);
virtual int SetSendTelephoneEventPayloadType(int channel, int SetSendTelephoneEventPayloadType(int channel,
unsigned char type); unsigned char type) override;
virtual int GetSendTelephoneEventPayloadType(int channel, int GetSendTelephoneEventPayloadType(int channel,
unsigned char& type); unsigned char& type) override;
virtual int SetDtmfFeedbackStatus(bool enable, int SetDtmfFeedbackStatus(bool enable, bool directFeedback = false) override;
bool directFeedback = false);
virtual int GetDtmfFeedbackStatus(bool& enabled, bool& directFeedback); int GetDtmfFeedbackStatus(bool& enabled, bool& directFeedback) override;
virtual int PlayDtmfTone(int eventCode, int PlayDtmfTone(int eventCode,
int lengthMs = 200, int lengthMs = 200,
int attenuationDb = 10); int attenuationDb = 10) override;
protected: protected:
VoEDtmfImpl(voe::SharedData* shared); VoEDtmfImpl(voe::SharedData* shared);
virtual ~VoEDtmfImpl(); ~VoEDtmfImpl() override;
private: private:
bool _dtmfFeedback; bool _dtmfFeedback;
bool _dtmfDirectFeedback; bool _dtmfDirectFeedback;
voe::SharedData* _shared; voe::SharedData* _shared;
}; };
} // namespace webrtc } // namespace webrtc

View File

@ -20,18 +20,16 @@
namespace webrtc { namespace webrtc {
VoEExternalMedia* VoEExternalMedia::GetInterface(VoiceEngine* voiceEngine) VoEExternalMedia* VoEExternalMedia::GetInterface(VoiceEngine* voiceEngine) {
{
#ifndef WEBRTC_VOICE_ENGINE_EXTERNAL_MEDIA_API #ifndef WEBRTC_VOICE_ENGINE_EXTERNAL_MEDIA_API
return NULL; return NULL;
#else #else
if (NULL == voiceEngine) if (NULL == voiceEngine) {
{ return NULL;
return NULL; }
} VoiceEngineImpl* s = static_cast<VoiceEngineImpl*>(voiceEngine);
VoiceEngineImpl* s = static_cast<VoiceEngineImpl*>(voiceEngine); s->AddRef();
s->AddRef(); return s;
return s;
#endif #endif
} }
@ -40,167 +38,145 @@ VoEExternalMedia* VoEExternalMedia::GetInterface(VoiceEngine* voiceEngine)
VoEExternalMediaImpl::VoEExternalMediaImpl(voe::SharedData* shared) VoEExternalMediaImpl::VoEExternalMediaImpl(voe::SharedData* shared)
: :
#ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT #ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
playout_delay_ms_(0), playout_delay_ms_(0),
#endif #endif
shared_(shared) shared_(shared) {
{ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(shared_->instance_id(), -1),
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(shared_->instance_id(), -1), "VoEExternalMediaImpl() - ctor");
"VoEExternalMediaImpl() - ctor");
} }
VoEExternalMediaImpl::~VoEExternalMediaImpl() VoEExternalMediaImpl::~VoEExternalMediaImpl() {
{ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(shared_->instance_id(), -1),
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(shared_->instance_id(), -1), "~VoEExternalMediaImpl() - dtor");
"~VoEExternalMediaImpl() - dtor");
} }
int VoEExternalMediaImpl::RegisterExternalMediaProcessing( int VoEExternalMediaImpl::RegisterExternalMediaProcessing(
int channel, int channel,
ProcessingTypes type, ProcessingTypes type,
VoEMediaProcess& processObject) VoEMediaProcess& processObject) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(shared_->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(shared_->instance_id(), -1), "RegisterExternalMediaProcessing(channel=%d, type=%d, "
"RegisterExternalMediaProcessing(channel=%d, type=%d, " "processObject=0x%x)",
"processObject=0x%x)", channel, type, &processObject); channel, type, &processObject);
if (!shared_->statistics().Initialized()) if (!shared_->statistics().Initialized()) {
{ shared_->SetLastError(VE_NOT_INITED, kTraceError);
shared_->SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
switch (type)
{
case kPlaybackPerChannel:
case kRecordingPerChannel:
{
voe::ChannelOwner ch =
shared_->channel_manager().GetChannel(channel);
voe::Channel* channelPtr = ch.channel();
if (channelPtr == NULL)
{
shared_->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"RegisterExternalMediaProcessing() failed to locate "
"channel");
return -1;
}
return channelPtr->RegisterExternalMediaProcessing(type,
processObject);
}
case kPlaybackAllChannelsMixed:
{
return shared_->output_mixer()->RegisterExternalMediaProcessing(
processObject);
}
case kRecordingAllChannelsMixed:
case kRecordingPreprocessing:
{
return shared_->transmit_mixer()->RegisterExternalMediaProcessing(
&processObject, type);
}
}
return -1; return -1;
}
switch (type) {
case kPlaybackPerChannel:
case kRecordingPerChannel: {
voe::ChannelOwner ch = shared_->channel_manager().GetChannel(channel);
voe::Channel* channelPtr = ch.channel();
if (channelPtr == NULL) {
shared_->SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"RegisterExternalMediaProcessing() failed to locate "
"channel");
return -1;
}
return channelPtr->RegisterExternalMediaProcessing(type, processObject);
}
case kPlaybackAllChannelsMixed: {
return shared_->output_mixer()->RegisterExternalMediaProcessing(
processObject);
}
case kRecordingAllChannelsMixed:
case kRecordingPreprocessing: {
return shared_->transmit_mixer()->RegisterExternalMediaProcessing(
&processObject, type);
}
}
return -1;
} }
int VoEExternalMediaImpl::DeRegisterExternalMediaProcessing( int VoEExternalMediaImpl::DeRegisterExternalMediaProcessing(
int channel, int channel,
ProcessingTypes type) ProcessingTypes type) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(shared_->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(shared_->instance_id(), -1), "DeRegisterExternalMediaProcessing(channel=%d)", channel);
"DeRegisterExternalMediaProcessing(channel=%d)", channel); if (!shared_->statistics().Initialized()) {
if (!shared_->statistics().Initialized()) shared_->SetLastError(VE_NOT_INITED, kTraceError);
{
shared_->SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
switch (type)
{
case kPlaybackPerChannel:
case kRecordingPerChannel:
{
voe::ChannelOwner ch =
shared_->channel_manager().GetChannel(channel);
voe::Channel* channelPtr = ch.channel();
if (channelPtr == NULL)
{
shared_->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"RegisterExternalMediaProcessing() "
"failed to locate channel");
return -1;
}
return channelPtr->DeRegisterExternalMediaProcessing(type);
}
case kPlaybackAllChannelsMixed:
{
return shared_->output_mixer()->
DeRegisterExternalMediaProcessing();
}
case kRecordingAllChannelsMixed:
case kRecordingPreprocessing:
{
return shared_->transmit_mixer()->
DeRegisterExternalMediaProcessing(type);
}
}
return -1; return -1;
}
switch (type) {
case kPlaybackPerChannel:
case kRecordingPerChannel: {
voe::ChannelOwner ch = shared_->channel_manager().GetChannel(channel);
voe::Channel* channelPtr = ch.channel();
if (channelPtr == NULL) {
shared_->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"RegisterExternalMediaProcessing() "
"failed to locate channel");
return -1;
}
return channelPtr->DeRegisterExternalMediaProcessing(type);
}
case kPlaybackAllChannelsMixed: {
return shared_->output_mixer()->DeRegisterExternalMediaProcessing();
}
case kRecordingAllChannelsMixed:
case kRecordingPreprocessing: {
return shared_->transmit_mixer()->DeRegisterExternalMediaProcessing(type);
}
}
return -1;
} }
int VoEExternalMediaImpl::GetAudioFrame(int channel, int desired_sample_rate_hz, int VoEExternalMediaImpl::GetAudioFrame(int channel, int desired_sample_rate_hz,
AudioFrame* frame) { AudioFrame* frame) {
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, WEBRTC_TRACE(kTraceApiCall, kTraceVoice,
VoEId(shared_->instance_id(), channel), VoEId(shared_->instance_id(), channel),
"GetAudioFrame(channel=%d, desired_sample_rate_hz=%d)", "GetAudioFrame(channel=%d, desired_sample_rate_hz=%d)", channel,
channel, desired_sample_rate_hz); desired_sample_rate_hz);
if (!shared_->statistics().Initialized()) if (!shared_->statistics().Initialized()) {
{ shared_->SetLastError(VE_NOT_INITED, kTraceError);
shared_->SetLastError(VE_NOT_INITED, kTraceError); return -1;
return -1; }
} voe::ChannelOwner ch = shared_->channel_manager().GetChannel(channel);
voe::ChannelOwner ch = shared_->channel_manager().GetChannel(channel); voe::Channel* channelPtr = ch.channel();
voe::Channel* channelPtr = ch.channel(); if (channelPtr == NULL) {
if (channelPtr == NULL) shared_->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
{ "GetAudioFrame() failed to locate channel");
shared_->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, return -1;
"GetAudioFrame() failed to locate channel"); }
return -1; if (!channelPtr->ExternalMixing()) {
} shared_->SetLastError(VE_INVALID_OPERATION, kTraceError,
if (!channelPtr->ExternalMixing()) { "GetAudioFrame() was called on channel that is not"
shared_->SetLastError(VE_INVALID_OPERATION, kTraceError, " externally mixed.");
"GetAudioFrame() was called on channel that is not" return -1;
" externally mixed."); }
return -1; if (!channelPtr->Playing()) {
} shared_->SetLastError(
if (!channelPtr->Playing()) { VE_INVALID_OPERATION, kTraceError,
shared_->SetLastError(VE_INVALID_OPERATION, kTraceError, "GetAudioFrame() was called on channel that is not playing.");
"GetAudioFrame() was called on channel that is not playing."); return -1;
return -1; }
} if (desired_sample_rate_hz == -1) {
if (desired_sample_rate_hz == -1) { shared_->SetLastError(VE_BAD_ARGUMENT, kTraceError,
shared_->SetLastError(VE_BAD_ARGUMENT, kTraceError, "GetAudioFrame() was called with bad sample rate.");
"GetAudioFrame() was called with bad sample rate."); return -1;
return -1; }
} frame->sample_rate_hz_ =
frame->sample_rate_hz_ = desired_sample_rate_hz == 0 ? -1 : desired_sample_rate_hz == 0 ? -1 : desired_sample_rate_hz;
desired_sample_rate_hz; return channelPtr->GetAudioFrame(channel, *frame);
return channelPtr->GetAudioFrame(channel, *frame);
} }
int VoEExternalMediaImpl::SetExternalMixing(int channel, bool enable) { int VoEExternalMediaImpl::SetExternalMixing(int channel, bool enable) {
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, WEBRTC_TRACE(kTraceApiCall, kTraceVoice,
VoEId(shared_->instance_id(), channel), VoEId(shared_->instance_id(), channel),
"SetExternalMixing(channel=%d, enable=%d)", channel, enable); "SetExternalMixing(channel=%d, enable=%d)", channel, enable);
if (!shared_->statistics().Initialized()) if (!shared_->statistics().Initialized()) {
{ shared_->SetLastError(VE_NOT_INITED, kTraceError);
shared_->SetLastError(VE_NOT_INITED, kTraceError); return -1;
return -1; }
} voe::ChannelOwner ch = shared_->channel_manager().GetChannel(channel);
voe::ChannelOwner ch = shared_->channel_manager().GetChannel(channel); voe::Channel* channelPtr = ch.channel();
voe::Channel* channelPtr = ch.channel(); if (channelPtr == NULL) {
if (channelPtr == NULL) shared_->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
{ "SetExternalMixing() failed to locate channel");
shared_->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, return -1;
"SetExternalMixing() failed to locate channel"); }
return -1; return channelPtr->SetExternalMixing(enable);
}
return channelPtr->SetExternalMixing(enable);
} }
#endif // WEBRTC_VOICE_ENGINE_EXTERNAL_MEDIA_API #endif // WEBRTC_VOICE_ENGINE_EXTERNAL_MEDIA_API

View File

@ -17,30 +17,27 @@
namespace webrtc { namespace webrtc {
class VoEExternalMediaImpl : public VoEExternalMedia class VoEExternalMediaImpl : public VoEExternalMedia {
{ public:
public: int RegisterExternalMediaProcessing(int channel,
virtual int RegisterExternalMediaProcessing( ProcessingTypes type,
int channel, VoEMediaProcess& processObject) override;
ProcessingTypes type,
VoEMediaProcess& processObject);
virtual int DeRegisterExternalMediaProcessing( int DeRegisterExternalMediaProcessing(int channel,
int channel, ProcessingTypes type) override;
ProcessingTypes type);
int GetAudioFrame(int channel,
int desired_sample_rate_hz,
AudioFrame* frame) override;
virtual int GetAudioFrame(int channel, int desired_sample_rate_hz, int SetExternalMixing(int channel, bool enable) override;
AudioFrame* frame);
virtual int SetExternalMixing(int channel, bool enable); protected:
VoEExternalMediaImpl(voe::SharedData* shared);
~VoEExternalMediaImpl() override;
protected: private:
VoEExternalMediaImpl(voe::SharedData* shared); voe::SharedData* shared_;
virtual ~VoEExternalMediaImpl();
private:
voe::SharedData* shared_;
}; };
} // namespace webrtc } // namespace webrtc

View File

@ -22,71 +22,60 @@
namespace webrtc { namespace webrtc {
VoEFile* VoEFile::GetInterface(VoiceEngine* voiceEngine) VoEFile* VoEFile::GetInterface(VoiceEngine* voiceEngine) {
{
#ifndef WEBRTC_VOICE_ENGINE_FILE_API #ifndef WEBRTC_VOICE_ENGINE_FILE_API
return NULL; return NULL;
#else #else
if (NULL == voiceEngine) if (NULL == voiceEngine) {
{ return NULL;
return NULL; }
} VoiceEngineImpl* s = static_cast<VoiceEngineImpl*>(voiceEngine);
VoiceEngineImpl* s = static_cast<VoiceEngineImpl*>(voiceEngine); s->AddRef();
s->AddRef(); return s;
return s;
#endif #endif
} }
#ifdef WEBRTC_VOICE_ENGINE_FILE_API #ifdef WEBRTC_VOICE_ENGINE_FILE_API
VoEFileImpl::VoEFileImpl(voe::SharedData* shared) : _shared(shared) VoEFileImpl::VoEFileImpl(voe::SharedData* shared) : _shared(shared) {
{ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1), "VoEFileImpl::VoEFileImpl() - ctor");
"VoEFileImpl::VoEFileImpl() - ctor");
} }
VoEFileImpl::~VoEFileImpl() VoEFileImpl::~VoEFileImpl() {
{ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1), "VoEFileImpl::~VoEFileImpl() - dtor");
"VoEFileImpl::~VoEFileImpl() - dtor");
} }
int VoEFileImpl::StartPlayingFileLocally( int VoEFileImpl::StartPlayingFileLocally(int channel,
int channel, const char fileNameUTF8[1024],
const char fileNameUTF8[1024], bool loop,
bool loop, FileFormats format, FileFormats format,
float volumeScaling, float volumeScaling,
int startPointMs, int startPointMs,
int stopPointMs) int stopPointMs) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), "StartPlayingFileLocally(channel=%d, fileNameUTF8[]=%s, "
"StartPlayingFileLocally(channel=%d, fileNameUTF8[]=%s, " "loop=%d, format=%d, volumeScaling=%5.3f, startPointMs=%d,"
"loop=%d, format=%d, volumeScaling=%5.3f, startPointMs=%d," " stopPointMs=%d)",
" stopPointMs=%d)", channel, fileNameUTF8, loop, format, volumeScaling, startPointMs,
channel, fileNameUTF8, loop, format, volumeScaling, stopPointMs);
startPointMs, stopPointMs); assert(1024 == FileWrapper::kMaxFileNameSize);
assert(1024 == FileWrapper::kMaxFileNameSize); if (!_shared->statistics().Initialized()) {
if (!_shared->statistics().Initialized()) _shared->SetLastError(VE_NOT_INITED, kTraceError);
{ return -1;
_shared->SetLastError(VE_NOT_INITED, kTraceError); }
return -1; voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
} voe::Channel* channelPtr = ch.channel();
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); if (channelPtr == NULL) {
voe::Channel* channelPtr = ch.channel(); _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
if (channelPtr == NULL) "StartPlayingFileLocally() failed to locate channel");
{ return -1;
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, }
"StartPlayingFileLocally() failed to locate channel");
return -1;
}
return channelPtr->StartPlayingFileLocally(fileNameUTF8, return channelPtr->StartPlayingFileLocally(fileNameUTF8, loop, format,
loop, startPointMs, volumeScaling,
format, stopPointMs, NULL);
startPointMs,
volumeScaling,
stopPointMs,
NULL);
} }
int VoEFileImpl::StartPlayingFileLocally(int channel, int VoEFileImpl::StartPlayingFileLocally(int channel,
@ -94,74 +83,61 @@ int VoEFileImpl::StartPlayingFileLocally(int channel,
FileFormats format, FileFormats format,
float volumeScaling, float volumeScaling,
int startPointMs, int startPointMs,
int stopPointMs) int stopPointMs) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), "StartPlayingFileLocally(channel=%d, stream, format=%d, "
"StartPlayingFileLocally(channel=%d, stream, format=%d, " "volumeScaling=%5.3f, startPointMs=%d, stopPointMs=%d)",
"volumeScaling=%5.3f, startPointMs=%d, stopPointMs=%d)", channel, format, volumeScaling, startPointMs, stopPointMs);
channel, format, volumeScaling, startPointMs, stopPointMs);
if (!_shared->statistics().Initialized()) if (!_shared->statistics().Initialized()) {
{ _shared->SetLastError(VE_NOT_INITED, kTraceError);
_shared->SetLastError(VE_NOT_INITED, kTraceError); return -1;
return -1; }
}
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
voe::Channel* channelPtr = ch.channel(); voe::Channel* channelPtr = ch.channel();
if (channelPtr == NULL) if (channelPtr == NULL) {
{ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, "StartPlayingFileLocally() failed to locate channel");
"StartPlayingFileLocally() failed to locate channel"); return -1;
return -1; }
}
return channelPtr->StartPlayingFileLocally(stream, return channelPtr->StartPlayingFileLocally(stream, format, startPointMs,
format, volumeScaling, stopPointMs, NULL);
startPointMs,
volumeScaling,
stopPointMs,
NULL);
} }
int VoEFileImpl::StopPlayingFileLocally(int channel) int VoEFileImpl::StopPlayingFileLocally(int channel) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), "StopPlayingFileLocally()");
"StopPlayingFileLocally()"); if (!_shared->statistics().Initialized()) {
if (!_shared->statistics().Initialized()) _shared->SetLastError(VE_NOT_INITED, kTraceError);
{ return -1;
_shared->SetLastError(VE_NOT_INITED, kTraceError); }
return -1; voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
} voe::Channel* channelPtr = ch.channel();
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); if (channelPtr == NULL) {
voe::Channel* channelPtr = ch.channel(); _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
if (channelPtr == NULL) "StopPlayingFileLocally() failed to locate channel");
{ return -1;
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, }
"StopPlayingFileLocally() failed to locate channel"); return channelPtr->StopPlayingFileLocally();
return -1;
}
return channelPtr->StopPlayingFileLocally();
} }
int VoEFileImpl::IsPlayingFileLocally(int channel) int VoEFileImpl::IsPlayingFileLocally(int channel) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), "IsPlayingFileLocally(channel=%d)", channel);
"IsPlayingFileLocally(channel=%d)", channel); if (!_shared->statistics().Initialized()) {
if (!_shared->statistics().Initialized()) _shared->SetLastError(VE_NOT_INITED, kTraceError);
{ return -1;
_shared->SetLastError(VE_NOT_INITED, kTraceError); }
return -1; voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
} voe::Channel* channelPtr = ch.channel();
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); if (channelPtr == NULL) {
voe::Channel* channelPtr = ch.channel(); _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
if (channelPtr == NULL) "StopPlayingFileLocally() failed to locate channel");
{ return -1;
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, }
"StopPlayingFileLocally() failed to locate channel"); return channelPtr->IsPlayingFileLocally();
return -1;
}
return channelPtr->IsPlayingFileLocally();
} }
int VoEFileImpl::StartPlayingFileAsMicrophone(int channel, int VoEFileImpl::StartPlayingFileAsMicrophone(int channel,
@ -169,426 +145,338 @@ int VoEFileImpl::StartPlayingFileAsMicrophone(int channel,
bool loop, bool loop,
bool mixWithMicrophone, bool mixWithMicrophone,
FileFormats format, FileFormats format,
float volumeScaling) float volumeScaling) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), "StartPlayingFileAsMicrophone(channel=%d, fileNameUTF8=%s, "
"StartPlayingFileAsMicrophone(channel=%d, fileNameUTF8=%s, " "loop=%d, mixWithMicrophone=%d, format=%d, "
"loop=%d, mixWithMicrophone=%d, format=%d, " "volumeScaling=%5.3f)",
"volumeScaling=%5.3f)", channel, fileNameUTF8, loop, mixWithMicrophone, format,
channel, fileNameUTF8, loop, mixWithMicrophone, format, volumeScaling);
volumeScaling); assert(1024 == FileWrapper::kMaxFileNameSize);
assert(1024 == FileWrapper::kMaxFileNameSize); if (!_shared->statistics().Initialized()) {
if (!_shared->statistics().Initialized()) _shared->SetLastError(VE_NOT_INITED, kTraceError);
{ return -1;
_shared->SetLastError(VE_NOT_INITED, kTraceError); }
return -1;
const uint32_t startPointMs(0);
const uint32_t stopPointMs(0);
if (channel == -1) {
int res = _shared->transmit_mixer()->StartPlayingFileAsMicrophone(
fileNameUTF8, loop, format, startPointMs, volumeScaling, stopPointMs,
NULL);
if (res) {
WEBRTC_TRACE(
kTraceError, kTraceVoice, VoEId(_shared->instance_id(), -1),
"StartPlayingFileAsMicrophone() failed to start playing file");
return (-1);
} else {
_shared->transmit_mixer()->SetMixWithMicStatus(mixWithMicrophone);
return (0);
}
} else {
// Add file after demultiplexing <=> affects one channel only
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
voe::Channel* channelPtr = ch.channel();
if (channelPtr == NULL) {
_shared->SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"StartPlayingFileAsMicrophone() failed to locate channel");
return -1;
} }
const uint32_t startPointMs(0); int res = channelPtr->StartPlayingFileAsMicrophone(
const uint32_t stopPointMs(0); fileNameUTF8, loop, format, startPointMs, volumeScaling, stopPointMs,
NULL);
if (channel == -1) if (res) {
{ WEBRTC_TRACE(
int res = _shared->transmit_mixer()->StartPlayingFileAsMicrophone( kTraceError, kTraceVoice, VoEId(_shared->instance_id(), -1),
fileNameUTF8, "StartPlayingFileAsMicrophone() failed to start playing file");
loop, return -1;
format, } else {
startPointMs, channelPtr->SetMixWithMicStatus(mixWithMicrophone);
volumeScaling, return 0;
stopPointMs,
NULL);
if (res)
{
WEBRTC_TRACE(kTraceError, kTraceVoice,
VoEId(_shared->instance_id(), -1),
"StartPlayingFileAsMicrophone() failed to start playing file");
return(-1);
}
else
{
_shared->transmit_mixer()->SetMixWithMicStatus(mixWithMicrophone);
return(0);
}
}
else
{
// Add file after demultiplexing <=> affects one channel only
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
voe::Channel* channelPtr = ch.channel();
if (channelPtr == NULL)
{
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"StartPlayingFileAsMicrophone() failed to locate channel");
return -1;
}
int res = channelPtr->StartPlayingFileAsMicrophone(fileNameUTF8,
loop,
format,
startPointMs,
volumeScaling,
stopPointMs,
NULL);
if (res)
{
WEBRTC_TRACE(kTraceError, kTraceVoice,
VoEId(_shared->instance_id(), -1),
"StartPlayingFileAsMicrophone() failed to start playing file");
return -1;
}
else
{
channelPtr->SetMixWithMicStatus(mixWithMicrophone);
return 0;
}
} }
}
} }
int VoEFileImpl::StartPlayingFileAsMicrophone(int channel, int VoEFileImpl::StartPlayingFileAsMicrophone(int channel,
InStream* stream, InStream* stream,
bool mixWithMicrophone, bool mixWithMicrophone,
FileFormats format, FileFormats format,
float volumeScaling) float volumeScaling) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), "StartPlayingFileAsMicrophone(channel=%d, stream,"
"StartPlayingFileAsMicrophone(channel=%d, stream," " mixWithMicrophone=%d, format=%d, volumeScaling=%5.3f)",
" mixWithMicrophone=%d, format=%d, volumeScaling=%5.3f)", channel, mixWithMicrophone, format, volumeScaling);
channel, mixWithMicrophone, format, volumeScaling);
if (!_shared->statistics().Initialized()) if (!_shared->statistics().Initialized()) {
{ _shared->SetLastError(VE_NOT_INITED, kTraceError);
_shared->SetLastError(VE_NOT_INITED, kTraceError); return -1;
return -1; }
const uint32_t startPointMs(0);
const uint32_t stopPointMs(0);
if (channel == -1) {
int res = _shared->transmit_mixer()->StartPlayingFileAsMicrophone(
stream, format, startPointMs, volumeScaling, stopPointMs, NULL);
if (res) {
WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_shared->instance_id(), -1),
"StartPlayingFileAsMicrophone() failed to start "
"playing stream");
return (-1);
} else {
_shared->transmit_mixer()->SetMixWithMicStatus(mixWithMicrophone);
return (0);
}
} else {
// Add file after demultiplexing <=> affects one channel only
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
voe::Channel* channelPtr = ch.channel();
if (channelPtr == NULL) {
_shared->SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"StartPlayingFileAsMicrophone() failed to locate channel");
return -1;
} }
const uint32_t startPointMs(0); int res = channelPtr->StartPlayingFileAsMicrophone(
const uint32_t stopPointMs(0); stream, format, startPointMs, volumeScaling, stopPointMs, NULL);
if (res) {
if (channel == -1) WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_shared->instance_id(), -1),
{ "StartPlayingFileAsMicrophone() failed to start "
int res = _shared->transmit_mixer()->StartPlayingFileAsMicrophone( "playing stream");
stream, return -1;
format, } else {
startPointMs, channelPtr->SetMixWithMicStatus(mixWithMicrophone);
volumeScaling, return 0;
stopPointMs,
NULL);
if (res)
{
WEBRTC_TRACE(kTraceError, kTraceVoice,
VoEId(_shared->instance_id(), -1),
"StartPlayingFileAsMicrophone() failed to start "
"playing stream");
return(-1);
}
else
{
_shared->transmit_mixer()->SetMixWithMicStatus(mixWithMicrophone);
return(0);
}
}
else
{
// Add file after demultiplexing <=> affects one channel only
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
voe::Channel* channelPtr = ch.channel();
if (channelPtr == NULL)
{
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"StartPlayingFileAsMicrophone() failed to locate channel");
return -1;
}
int res = channelPtr->StartPlayingFileAsMicrophone(
stream, format, startPointMs, volumeScaling, stopPointMs, NULL);
if (res)
{
WEBRTC_TRACE(kTraceError, kTraceVoice,
VoEId(_shared->instance_id(), -1),
"StartPlayingFileAsMicrophone() failed to start "
"playing stream");
return -1;
}
else
{
channelPtr->SetMixWithMicStatus(mixWithMicrophone);
return 0;
}
} }
}
} }
int VoEFileImpl::StopPlayingFileAsMicrophone(int channel) int VoEFileImpl::StopPlayingFileAsMicrophone(int channel) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), "StopPlayingFileAsMicrophone(channel=%d)", channel);
"StopPlayingFileAsMicrophone(channel=%d)", channel); if (!_shared->statistics().Initialized()) {
if (!_shared->statistics().Initialized()) _shared->SetLastError(VE_NOT_INITED, kTraceError);
{ return -1;
_shared->SetLastError(VE_NOT_INITED, kTraceError); }
return -1; if (channel == -1) {
} // Stop adding file before demultiplexing <=> affects all channels
if (channel == -1) return _shared->transmit_mixer()->StopPlayingFileAsMicrophone();
{ } else {
// Stop adding file before demultiplexing <=> affects all channels // Stop adding file after demultiplexing <=> affects one channel only
return _shared->transmit_mixer()->StopPlayingFileAsMicrophone(); voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
} voe::Channel* channelPtr = ch.channel();
else if (channelPtr == NULL) {
{ _shared->SetLastError(
// Stop adding file after demultiplexing <=> affects one channel only VE_CHANNEL_NOT_VALID, kTraceError,
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); "StopPlayingFileAsMicrophone() failed to locate channel");
voe::Channel* channelPtr = ch.channel(); return -1;
if (channelPtr == NULL)
{
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"StopPlayingFileAsMicrophone() failed to locate channel");
return -1;
}
return channelPtr->StopPlayingFileAsMicrophone();
} }
return channelPtr->StopPlayingFileAsMicrophone();
}
} }
int VoEFileImpl::IsPlayingFileAsMicrophone(int channel) int VoEFileImpl::IsPlayingFileAsMicrophone(int channel) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), "IsPlayingFileAsMicrophone(channel=%d)", channel);
"IsPlayingFileAsMicrophone(channel=%d)", channel); if (!_shared->statistics().Initialized()) {
if (!_shared->statistics().Initialized()) _shared->SetLastError(VE_NOT_INITED, kTraceError);
{ return -1;
_shared->SetLastError(VE_NOT_INITED, kTraceError); }
return -1; if (channel == -1) {
} return _shared->transmit_mixer()->IsPlayingFileAsMicrophone();
if (channel == -1) } else {
{ // Stop adding file after demultiplexing <=> affects one channel only
return _shared->transmit_mixer()->IsPlayingFileAsMicrophone(); voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
} voe::Channel* channelPtr = ch.channel();
else if (channelPtr == NULL) {
{ _shared->SetLastError(
// Stop adding file after demultiplexing <=> affects one channel only VE_CHANNEL_NOT_VALID, kTraceError,
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); "IsPlayingFileAsMicrophone() failed to locate channel");
voe::Channel* channelPtr = ch.channel(); return -1;
if (channelPtr == NULL)
{
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"IsPlayingFileAsMicrophone() failed to locate channel");
return -1;
}
return channelPtr->IsPlayingFileAsMicrophone();
} }
return channelPtr->IsPlayingFileAsMicrophone();
}
} }
int VoEFileImpl::StartRecordingPlayout( int VoEFileImpl::StartRecordingPlayout(int channel,
int channel, const char* fileNameUTF8, CodecInst* compression, const char* fileNameUTF8,
int maxSizeBytes) CodecInst* compression,
{ int maxSizeBytes) {
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
"StartRecordingPlayout(channel=%d, fileNameUTF8=%s, " "StartRecordingPlayout(channel=%d, fileNameUTF8=%s, "
"compression, maxSizeBytes=%d)", "compression, maxSizeBytes=%d)",
channel, fileNameUTF8, maxSizeBytes); channel, fileNameUTF8, maxSizeBytes);
assert(1024 == FileWrapper::kMaxFileNameSize); assert(1024 == FileWrapper::kMaxFileNameSize);
if (!_shared->statistics().Initialized()) if (!_shared->statistics().Initialized()) {
{ _shared->SetLastError(VE_NOT_INITED, kTraceError);
_shared->SetLastError(VE_NOT_INITED, kTraceError); return -1;
return -1; }
} if (channel == -1) {
if (channel == -1) return _shared->output_mixer()->StartRecordingPlayout(fileNameUTF8,
{ compression);
return _shared->output_mixer()->StartRecordingPlayout } else {
(fileNameUTF8, compression); // Add file after demultiplexing <=> affects one channel only
} voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
else voe::Channel* channelPtr = ch.channel();
{ if (channelPtr == NULL) {
// Add file after demultiplexing <=> affects one channel only _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); "StartRecordingPlayout() failed to locate channel");
voe::Channel* channelPtr = ch.channel(); return -1;
if (channelPtr == NULL)
{
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"StartRecordingPlayout() failed to locate channel");
return -1;
}
return channelPtr->StartRecordingPlayout(fileNameUTF8, compression);
} }
return channelPtr->StartRecordingPlayout(fileNameUTF8, compression);
}
} }
int VoEFileImpl::StartRecordingPlayout( int VoEFileImpl::StartRecordingPlayout(int channel,
int channel, OutStream* stream, CodecInst* compression) OutStream* stream,
{ CodecInst* compression) {
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
"StartRecordingPlayout(channel=%d, stream, compression)", "StartRecordingPlayout(channel=%d, stream, compression)",
channel); channel);
if (!_shared->statistics().Initialized()) if (!_shared->statistics().Initialized()) {
{ _shared->SetLastError(VE_NOT_INITED, kTraceError);
_shared->SetLastError(VE_NOT_INITED, kTraceError); return -1;
return -1; }
} if (channel == -1) {
if (channel == -1) return _shared->output_mixer()->StartRecordingPlayout(stream, compression);
{ } else {
return _shared->output_mixer()-> voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
StartRecordingPlayout(stream, compression); voe::Channel* channelPtr = ch.channel();
} if (channelPtr == NULL) {
else _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
{ "StartRecordingPlayout() failed to locate channel");
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); return -1;
voe::Channel* channelPtr = ch.channel();
if (channelPtr == NULL)
{
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"StartRecordingPlayout() failed to locate channel");
return -1;
}
return channelPtr->StartRecordingPlayout(stream, compression);
} }
return channelPtr->StartRecordingPlayout(stream, compression);
}
} }
int VoEFileImpl::StopRecordingPlayout(int channel) int VoEFileImpl::StopRecordingPlayout(int channel) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), "StopRecordingPlayout(channel=%d)", channel);
"StopRecordingPlayout(channel=%d)", channel); if (!_shared->statistics().Initialized()) {
if (!_shared->statistics().Initialized()) _shared->SetLastError(VE_NOT_INITED, kTraceError);
{ return -1;
_shared->SetLastError(VE_NOT_INITED, kTraceError); }
return -1; if (channel == -1) {
} return _shared->output_mixer()->StopRecordingPlayout();
if (channel == -1) } else {
{ voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
return _shared->output_mixer()->StopRecordingPlayout(); voe::Channel* channelPtr = ch.channel();
} if (channelPtr == NULL) {
else _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
{ "StopRecordingPlayout() failed to locate channel");
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); return -1;
voe::Channel* channelPtr = ch.channel();
if (channelPtr == NULL)
{
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"StopRecordingPlayout() failed to locate channel");
return -1;
}
return channelPtr->StopRecordingPlayout();
} }
return channelPtr->StopRecordingPlayout();
}
} }
int VoEFileImpl::StartRecordingMicrophone( int VoEFileImpl::StartRecordingMicrophone(const char* fileNameUTF8,
const char* fileNameUTF8, CodecInst* compression, int maxSizeBytes) CodecInst* compression,
{ int maxSizeBytes) {
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
"StartRecordingMicrophone(fileNameUTF8=%s, compression, " "StartRecordingMicrophone(fileNameUTF8=%s, compression, "
"maxSizeBytes=%d)", fileNameUTF8, maxSizeBytes); "maxSizeBytes=%d)",
assert(1024 == FileWrapper::kMaxFileNameSize); fileNameUTF8, maxSizeBytes);
assert(1024 == FileWrapper::kMaxFileNameSize);
if (!_shared->statistics().Initialized()) if (!_shared->statistics().Initialized()) {
{ _shared->SetLastError(VE_NOT_INITED, kTraceError);
_shared->SetLastError(VE_NOT_INITED, kTraceError); return -1;
return -1; }
} if (_shared->transmit_mixer()->StartRecordingMicrophone(fileNameUTF8,
if (_shared->transmit_mixer()->StartRecordingMicrophone(fileNameUTF8, compression)) {
compression)) WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_shared->instance_id(), -1),
{ "StartRecordingMicrophone() failed to start recording");
WEBRTC_TRACE(kTraceError, kTraceVoice, return -1;
VoEId(_shared->instance_id(), -1), }
"StartRecordingMicrophone() failed to start recording"); if (_shared->audio_device()->Recording()) {
return -1;
}
if (_shared->audio_device()->Recording())
{
return 0;
}
if (!_shared->ext_recording())
{
if (_shared->audio_device()->InitRecording() != 0)
{
WEBRTC_TRACE(kTraceError, kTraceVoice,
VoEId(_shared->instance_id(), -1),
"StartRecordingMicrophone() failed to initialize recording");
return -1;
}
if (_shared->audio_device()->StartRecording() != 0)
{
WEBRTC_TRACE(kTraceError, kTraceVoice,
VoEId(_shared->instance_id(), -1),
"StartRecordingMicrophone() failed to start recording");
return -1;
}
}
return 0; return 0;
}
if (!_shared->ext_recording()) {
if (_shared->audio_device()->InitRecording() != 0) {
WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_shared->instance_id(), -1),
"StartRecordingMicrophone() failed to initialize recording");
return -1;
}
if (_shared->audio_device()->StartRecording() != 0) {
WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_shared->instance_id(), -1),
"StartRecordingMicrophone() failed to start recording");
return -1;
}
}
return 0;
} }
int VoEFileImpl::StartRecordingMicrophone( int VoEFileImpl::StartRecordingMicrophone(OutStream* stream,
OutStream* stream, CodecInst* compression) CodecInst* compression) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), "StartRecordingMicrophone(stream, compression)");
"StartRecordingMicrophone(stream, compression)");
if (!_shared->statistics().Initialized()) if (!_shared->statistics().Initialized()) {
{ _shared->SetLastError(VE_NOT_INITED, kTraceError);
_shared->SetLastError(VE_NOT_INITED, kTraceError); return -1;
return -1; }
} if (_shared->transmit_mixer()->StartRecordingMicrophone(stream,
if (_shared->transmit_mixer()->StartRecordingMicrophone(stream, compression) == -1) {
compression) == -1) WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_shared->instance_id(), -1),
{ "StartRecordingMicrophone() failed to start recording");
WEBRTC_TRACE(kTraceError, kTraceVoice, return -1;
VoEId(_shared->instance_id(), -1), }
"StartRecordingMicrophone() failed to start recording"); if (_shared->audio_device()->Recording()) {
return -1;
}
if (_shared->audio_device()->Recording())
{
return 0;
}
if (!_shared->ext_recording())
{
if (_shared->audio_device()->InitRecording() != 0)
{
WEBRTC_TRACE(kTraceError, kTraceVoice,
VoEId(_shared->instance_id(), -1),
"StartRecordingMicrophone() failed to initialize recording");
return -1;
}
if (_shared->audio_device()->StartRecording() != 0)
{
WEBRTC_TRACE(kTraceError, kTraceVoice,
VoEId(_shared->instance_id(), -1),
"StartRecordingMicrophone() failed to start recording");
return -1;
}
}
return 0; return 0;
}
if (!_shared->ext_recording()) {
if (_shared->audio_device()->InitRecording() != 0) {
WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_shared->instance_id(), -1),
"StartRecordingMicrophone() failed to initialize recording");
return -1;
}
if (_shared->audio_device()->StartRecording() != 0) {
WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_shared->instance_id(), -1),
"StartRecordingMicrophone() failed to start recording");
return -1;
}
}
return 0;
} }
int VoEFileImpl::StopRecordingMicrophone() int VoEFileImpl::StopRecordingMicrophone() {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), "StopRecordingMicrophone()");
"StopRecordingMicrophone()"); if (!_shared->statistics().Initialized()) {
if (!_shared->statistics().Initialized()) _shared->SetLastError(VE_NOT_INITED, kTraceError);
{ return -1;
_shared->SetLastError(VE_NOT_INITED, kTraceError); }
return -1;
int err = 0;
// TODO(xians): consider removing Start/StopRecording() in
// Start/StopRecordingMicrophone() if no channel is recording.
if (_shared->NumOfSendingChannels() == 0 &&
_shared->audio_device()->Recording()) {
// Stop audio-device recording if no channel is recording
if (_shared->audio_device()->StopRecording() != 0) {
_shared->SetLastError(
VE_CANNOT_STOP_RECORDING, kTraceError,
"StopRecordingMicrophone() failed to stop recording");
err = -1;
} }
}
int err = 0; if (_shared->transmit_mixer()->StopRecordingMicrophone() != 0) {
WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_shared->instance_id(), -1),
"StopRecordingMicrophone() failed to stop recording to mixer");
err = -1;
}
// TODO(xians): consider removing Start/StopRecording() in return err;
// Start/StopRecordingMicrophone() if no channel is recording.
if (_shared->NumOfSendingChannels() == 0 &&
_shared->audio_device()->Recording())
{
// Stop audio-device recording if no channel is recording
if (_shared->audio_device()->StopRecording() != 0)
{
_shared->SetLastError(VE_CANNOT_STOP_RECORDING, kTraceError,
"StopRecordingMicrophone() failed to stop recording");
err = -1;
}
}
if (_shared->transmit_mixer()->StopRecordingMicrophone() != 0)
{
WEBRTC_TRACE(kTraceError, kTraceVoice,
VoEId(_shared->instance_id(), -1),
"StopRecordingMicrophone() failed to stop recording to mixer");
err = -1;
}
return err;
} }
#endif // #ifdef WEBRTC_VOICE_ENGINE_FILE_API #endif // #ifdef WEBRTC_VOICE_ENGINE_FILE_API

View File

@ -16,82 +16,78 @@
namespace webrtc { namespace webrtc {
class VoEFileImpl : public VoEFile class VoEFileImpl : public VoEFile {
{ public:
public: // Playout file locally
// Playout file locally
virtual int StartPlayingFileLocally( int StartPlayingFileLocally(int channel,
int channel, const char fileNameUTF8[1024],
const char fileNameUTF8[1024], bool loop = false,
bool loop = false, FileFormats format = kFileFormatPcm16kHzFile,
FileFormats format = kFileFormatPcm16kHzFile, float volumeScaling = 1.0,
float volumeScaling = 1.0, int startPointMs = 0,
int startPointMs = 0, int stopPointMs = 0) override;
int stopPointMs = 0);
virtual int StartPlayingFileLocally( int StartPlayingFileLocally(int channel,
int channel, InStream* stream,
InStream* stream, FileFormats format = kFileFormatPcm16kHzFile,
FileFormats format = kFileFormatPcm16kHzFile, float volumeScaling = 1.0,
float volumeScaling = 1.0, int startPointMs = 0,
int startPointMs = 0, int stopPointMs = 0); int stopPointMs = 0) override;
virtual int StopPlayingFileLocally(int channel); int StopPlayingFileLocally(int channel) override;
virtual int IsPlayingFileLocally(int channel); int IsPlayingFileLocally(int channel) override;
// Use file as microphone input // Use file as microphone input
virtual int StartPlayingFileAsMicrophone( int StartPlayingFileAsMicrophone(int channel,
int channel, const char fileNameUTF8[1024],
const char fileNameUTF8[1024], bool loop = false,
bool loop = false , bool mixWithMicrophone = false,
bool mixWithMicrophone = false, FileFormats format = kFileFormatPcm16kHzFile,
FileFormats format = kFileFormatPcm16kHzFile, float volumeScaling = 1.0) override;
float volumeScaling = 1.0);
virtual int StartPlayingFileAsMicrophone( int StartPlayingFileAsMicrophone(int channel,
int channel, InStream* stream,
InStream* stream, bool mixWithMicrophone = false,
bool mixWithMicrophone = false, FileFormats format = kFileFormatPcm16kHzFile,
FileFormats format = kFileFormatPcm16kHzFile, float volumeScaling = 1.0) override;
float volumeScaling = 1.0);
virtual int StopPlayingFileAsMicrophone(int channel); int StopPlayingFileAsMicrophone(int channel) override;
virtual int IsPlayingFileAsMicrophone(int channel); int IsPlayingFileAsMicrophone(int channel) override;
// Record speaker signal to file // Record speaker signal to file
virtual int StartRecordingPlayout(int channel, int StartRecordingPlayout(int channel,
const char* fileNameUTF8, const char* fileNameUTF8,
CodecInst* compression = NULL, CodecInst* compression = NULL,
int maxSizeBytes = -1); int maxSizeBytes = -1) override;
virtual int StartRecordingPlayout(int channel, int StartRecordingPlayout(int channel,
OutStream* stream, OutStream* stream,
CodecInst* compression = NULL); CodecInst* compression = NULL) override;
virtual int StopRecordingPlayout(int channel); int StopRecordingPlayout(int channel) override;
// Record microphone signal to file // Record microphone signal to file
virtual int StartRecordingMicrophone(const char* fileNameUTF8, int StartRecordingMicrophone(const char* fileNameUTF8,
CodecInst* compression = NULL, CodecInst* compression = NULL,
int maxSizeBytes = -1); int maxSizeBytes = -1) override;
virtual int StartRecordingMicrophone(OutStream* stream, int StartRecordingMicrophone(OutStream* stream,
CodecInst* compression = NULL); CodecInst* compression = NULL) override;
virtual int StopRecordingMicrophone(); int StopRecordingMicrophone() override;
protected: protected:
VoEFileImpl(voe::SharedData* shared); VoEFileImpl(voe::SharedData* shared);
virtual ~VoEFileImpl(); ~VoEFileImpl() override;
private: private:
voe::SharedData* _shared; voe::SharedData* _shared;
}; };
} // namespace webrtc } // namespace webrtc

View File

@ -17,515 +17,453 @@
#include "webrtc/voice_engine/include/voe_errors.h" #include "webrtc/voice_engine/include/voe_errors.h"
#include "webrtc/voice_engine/voice_engine_impl.h" #include "webrtc/voice_engine/voice_engine_impl.h"
namespace webrtc namespace webrtc {
{
VoEHardware* VoEHardware::GetInterface(VoiceEngine* voiceEngine) VoEHardware* VoEHardware::GetInterface(VoiceEngine* voiceEngine) {
{
#ifndef WEBRTC_VOICE_ENGINE_HARDWARE_API #ifndef WEBRTC_VOICE_ENGINE_HARDWARE_API
return NULL; return NULL;
#else #else
if (NULL == voiceEngine) if (NULL == voiceEngine) {
{ return NULL;
return NULL; }
} VoiceEngineImpl* s = static_cast<VoiceEngineImpl*>(voiceEngine);
VoiceEngineImpl* s = static_cast<VoiceEngineImpl*>(voiceEngine); s->AddRef();
s->AddRef(); return s;
return s;
#endif #endif
} }
#ifdef WEBRTC_VOICE_ENGINE_HARDWARE_API #ifdef WEBRTC_VOICE_ENGINE_HARDWARE_API
VoEHardwareImpl::VoEHardwareImpl(voe::SharedData* shared) : _shared(shared) VoEHardwareImpl::VoEHardwareImpl(voe::SharedData* shared) : _shared(shared) {
{ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1), "VoEHardwareImpl() - ctor");
"VoEHardwareImpl() - ctor");
} }
VoEHardwareImpl::~VoEHardwareImpl() VoEHardwareImpl::~VoEHardwareImpl() {
{ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1), "~VoEHardwareImpl() - dtor");
"~VoEHardwareImpl() - dtor");
} }
int VoEHardwareImpl::SetAudioDeviceLayer(AudioLayers audioLayer) int VoEHardwareImpl::SetAudioDeviceLayer(AudioLayers audioLayer) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), "SetAudioDeviceLayer(audioLayer=%d)", audioLayer);
"SetAudioDeviceLayer(audioLayer=%d)", audioLayer);
// Don't allow a change if VoE is initialized // Don't allow a change if VoE is initialized
if (_shared->statistics().Initialized()) if (_shared->statistics().Initialized()) {
{ _shared->SetLastError(VE_ALREADY_INITED, kTraceError);
_shared->SetLastError(VE_ALREADY_INITED, kTraceError); return -1;
return -1; }
}
// Map to AudioDeviceModule::AudioLayer // Map to AudioDeviceModule::AudioLayer
AudioDeviceModule::AudioLayer AudioDeviceModule::AudioLayer wantedLayer(
wantedLayer(AudioDeviceModule::kPlatformDefaultAudio); AudioDeviceModule::kPlatformDefaultAudio);
switch (audioLayer) switch (audioLayer) {
{ case kAudioPlatformDefault:
case kAudioPlatformDefault: // already set above
// already set above break;
break; case kAudioWindowsCore:
case kAudioWindowsCore: wantedLayer = AudioDeviceModule::kWindowsCoreAudio;
wantedLayer = AudioDeviceModule::kWindowsCoreAudio; break;
break; case kAudioWindowsWave:
case kAudioWindowsWave: wantedLayer = AudioDeviceModule::kWindowsWaveAudio;
wantedLayer = AudioDeviceModule::kWindowsWaveAudio; break;
break; case kAudioLinuxAlsa:
case kAudioLinuxAlsa: wantedLayer = AudioDeviceModule::kLinuxAlsaAudio;
wantedLayer = AudioDeviceModule::kLinuxAlsaAudio; break;
break; case kAudioLinuxPulse:
case kAudioLinuxPulse: wantedLayer = AudioDeviceModule::kLinuxPulseAudio;
wantedLayer = AudioDeviceModule::kLinuxPulseAudio; break;
break; }
}
// Save the audio device layer for Init() // Save the audio device layer for Init()
_shared->set_audio_device_layer(wantedLayer); _shared->set_audio_device_layer(wantedLayer);
return 0; return 0;
} }
int VoEHardwareImpl::GetAudioDeviceLayer(AudioLayers& audioLayer) int VoEHardwareImpl::GetAudioDeviceLayer(AudioLayers& audioLayer) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
"GetAudioDeviceLayer(devices=?)"); "GetAudioDeviceLayer(devices=?)");
// Can always be called regardless of VoE state // Can always be called regardless of VoE state
AudioDeviceModule::AudioLayer AudioDeviceModule::AudioLayer activeLayer(
activeLayer(AudioDeviceModule::kPlatformDefaultAudio); AudioDeviceModule::kPlatformDefaultAudio);
if (_shared->audio_device()) if (_shared->audio_device()) {
{ // Get active audio layer from ADM
// Get active audio layer from ADM if (_shared->audio_device()->ActiveAudioLayer(&activeLayer) != 0) {
if (_shared->audio_device()->ActiveAudioLayer(&activeLayer) != 0) _shared->SetLastError(VE_UNDEFINED_SC_ERR, kTraceError,
{ " Audio Device error");
_shared->SetLastError(VE_UNDEFINED_SC_ERR, kTraceError, return -1;
" Audio Device error");
return -1;
}
}
else
{
// Return VoE's internal layer setting
activeLayer = _shared->audio_device_layer();
} }
} else {
// Return VoE's internal layer setting
activeLayer = _shared->audio_device_layer();
}
// Map to AudioLayers // Map to AudioLayers
switch (activeLayer) switch (activeLayer) {
{ case AudioDeviceModule::kPlatformDefaultAudio:
case AudioDeviceModule::kPlatformDefaultAudio: audioLayer = kAudioPlatformDefault;
audioLayer = kAudioPlatformDefault; break;
break; case AudioDeviceModule::kWindowsCoreAudio:
case AudioDeviceModule::kWindowsCoreAudio: audioLayer = kAudioWindowsCore;
audioLayer = kAudioWindowsCore; break;
break; case AudioDeviceModule::kWindowsWaveAudio:
case AudioDeviceModule::kWindowsWaveAudio: audioLayer = kAudioWindowsWave;
audioLayer = kAudioWindowsWave; break;
break; case AudioDeviceModule::kLinuxAlsaAudio:
case AudioDeviceModule::kLinuxAlsaAudio: audioLayer = kAudioLinuxAlsa;
audioLayer = kAudioLinuxAlsa; break;
break; case AudioDeviceModule::kLinuxPulseAudio:
case AudioDeviceModule::kLinuxPulseAudio: audioLayer = kAudioLinuxPulse;
audioLayer = kAudioLinuxPulse; break;
break; default:
default: _shared->SetLastError(VE_UNDEFINED_SC_ERR, kTraceError,
_shared->SetLastError(VE_UNDEFINED_SC_ERR, kTraceError, " unknown audio layer");
" unknown audio layer"); }
}
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
VoEId(_shared->instance_id(), -1), " Output: audioLayer=%d", audioLayer);
" Output: audioLayer=%d", audioLayer);
return 0; return 0;
} }
int VoEHardwareImpl::GetNumOfRecordingDevices(int& devices) int VoEHardwareImpl::GetNumOfRecordingDevices(int& devices) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), "GetNumOfRecordingDevices(devices=?)");
"GetNumOfRecordingDevices(devices=?)");
if (!_shared->statistics().Initialized()) if (!_shared->statistics().Initialized()) {
{ _shared->SetLastError(VE_NOT_INITED, kTraceError);
_shared->SetLastError(VE_NOT_INITED, kTraceError); return -1;
return -1; }
}
devices = static_cast<int> (_shared->audio_device()->RecordingDevices()); devices = static_cast<int>(_shared->audio_device()->RecordingDevices());
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
VoEId(_shared->instance_id(), -1), " Output: devices=%d", devices); " Output: devices=%d", devices);
return 0; return 0;
} }
int VoEHardwareImpl::GetNumOfPlayoutDevices(int& devices) int VoEHardwareImpl::GetNumOfPlayoutDevices(int& devices) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), "GetNumOfPlayoutDevices(devices=?)");
"GetNumOfPlayoutDevices(devices=?)");
if (!_shared->statistics().Initialized()) if (!_shared->statistics().Initialized()) {
{ _shared->SetLastError(VE_NOT_INITED, kTraceError);
_shared->SetLastError(VE_NOT_INITED, kTraceError); return -1;
return -1; }
}
devices = static_cast<int> (_shared->audio_device()->PlayoutDevices()); devices = static_cast<int>(_shared->audio_device()->PlayoutDevices());
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
VoEId(_shared->instance_id(), -1), " Output: devices=%d", devices);
" Output: devices=%d", devices);
return 0; return 0;
} }
int VoEHardwareImpl::GetRecordingDeviceName(int index, int VoEHardwareImpl::GetRecordingDeviceName(int index,
char strNameUTF8[128], char strNameUTF8[128],
char strGuidUTF8[128]) char strGuidUTF8[128]) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), "GetRecordingDeviceName(index=%d)", index);
"GetRecordingDeviceName(index=%d)", index);
if (!_shared->statistics().Initialized()) if (!_shared->statistics().Initialized()) {
{ _shared->SetLastError(VE_NOT_INITED, kTraceError);
_shared->SetLastError(VE_NOT_INITED, kTraceError); return -1;
return -1; }
} if (strNameUTF8 == NULL) {
if (strNameUTF8 == NULL) _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
{ "GetRecordingDeviceName() invalid argument");
_shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError, return -1;
"GetRecordingDeviceName() invalid argument"); }
return -1;
}
// Note that strGuidUTF8 is allowed to be NULL // Note that strGuidUTF8 is allowed to be NULL
// Init len variable to length of supplied vectors // Init len variable to length of supplied vectors
const uint16_t strLen = 128; const uint16_t strLen = 128;
// Check if length has been changed in module // Check if length has been changed in module
assert(strLen == kAdmMaxDeviceNameSize); assert(strLen == kAdmMaxDeviceNameSize);
assert(strLen == kAdmMaxGuidSize); assert(strLen == kAdmMaxGuidSize);
char name[strLen]; char name[strLen];
char guid[strLen]; char guid[strLen];
// Get names from module // Get names from module
if (_shared->audio_device()->RecordingDeviceName(index, name, guid) != 0) if (_shared->audio_device()->RecordingDeviceName(index, name, guid) != 0) {
{ _shared->SetLastError(VE_CANNOT_RETRIEVE_DEVICE_NAME, kTraceError,
_shared->SetLastError(VE_CANNOT_RETRIEVE_DEVICE_NAME, kTraceError, "GetRecordingDeviceName() failed to get device name");
"GetRecordingDeviceName() failed to get device name"); return -1;
return -1; }
}
// Copy to vectors supplied by user // Copy to vectors supplied by user
strncpy(strNameUTF8, name, strLen); strncpy(strNameUTF8, name, strLen);
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
" Output: strNameUTF8=%s", strNameUTF8);
if (strGuidUTF8 != NULL) {
strncpy(strGuidUTF8, guid, strLen);
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
VoEId(_shared->instance_id(), -1), VoEId(_shared->instance_id(), -1), " Output: strGuidUTF8=%s",
" Output: strNameUTF8=%s", strNameUTF8); strGuidUTF8);
}
if (strGuidUTF8 != NULL) return 0;
{
strncpy(strGuidUTF8, guid, strLen);
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
VoEId(_shared->instance_id(), -1),
" Output: strGuidUTF8=%s", strGuidUTF8);
}
return 0;
} }
int VoEHardwareImpl::GetPlayoutDeviceName(int index, int VoEHardwareImpl::GetPlayoutDeviceName(int index,
char strNameUTF8[128], char strNameUTF8[128],
char strGuidUTF8[128]) char strGuidUTF8[128]) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), "GetPlayoutDeviceName(index=%d)", index);
"GetPlayoutDeviceName(index=%d)", index);
if (!_shared->statistics().Initialized()) if (!_shared->statistics().Initialized()) {
{ _shared->SetLastError(VE_NOT_INITED, kTraceError);
_shared->SetLastError(VE_NOT_INITED, kTraceError); return -1;
return -1; }
} if (strNameUTF8 == NULL) {
if (strNameUTF8 == NULL) _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
{ "GetPlayoutDeviceName() invalid argument");
_shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError, return -1;
"GetPlayoutDeviceName() invalid argument"); }
return -1;
}
// Note that strGuidUTF8 is allowed to be NULL // Note that strGuidUTF8 is allowed to be NULL
// Init len variable to length of supplied vectors // Init len variable to length of supplied vectors
const uint16_t strLen = 128; const uint16_t strLen = 128;
// Check if length has been changed in module // Check if length has been changed in module
assert(strLen == kAdmMaxDeviceNameSize); assert(strLen == kAdmMaxDeviceNameSize);
assert(strLen == kAdmMaxGuidSize); assert(strLen == kAdmMaxGuidSize);
char name[strLen]; char name[strLen];
char guid[strLen]; char guid[strLen];
// Get names from module // Get names from module
if (_shared->audio_device()->PlayoutDeviceName(index, name, guid) != 0) if (_shared->audio_device()->PlayoutDeviceName(index, name, guid) != 0) {
{ _shared->SetLastError(VE_CANNOT_RETRIEVE_DEVICE_NAME, kTraceError,
_shared->SetLastError(VE_CANNOT_RETRIEVE_DEVICE_NAME, kTraceError, "GetPlayoutDeviceName() failed to get device name");
"GetPlayoutDeviceName() failed to get device name"); return -1;
return -1; }
}
// Copy to vectors supplied by user // Copy to vectors supplied by user
strncpy(strNameUTF8, name, strLen); strncpy(strNameUTF8, name, strLen);
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
" Output: strNameUTF8=%s", strNameUTF8);
if (strGuidUTF8 != NULL) {
strncpy(strGuidUTF8, guid, strLen);
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
VoEId(_shared->instance_id(), -1), VoEId(_shared->instance_id(), -1), " Output: strGuidUTF8=%s",
" Output: strNameUTF8=%s", strNameUTF8); strGuidUTF8);
}
if (strGuidUTF8 != NULL) return 0;
{
strncpy(strGuidUTF8, guid, strLen);
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
VoEId(_shared->instance_id(), -1),
" Output: strGuidUTF8=%s", strGuidUTF8);
}
return 0;
} }
int VoEHardwareImpl::SetRecordingDevice(int index, int VoEHardwareImpl::SetRecordingDevice(int index,
StereoChannel recordingChannel) StereoChannel recordingChannel) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), "SetRecordingDevice(index=%d, recordingChannel=%d)", index,
"SetRecordingDevice(index=%d, recordingChannel=%d)", (int)recordingChannel);
index, (int) recordingChannel); CriticalSectionScoped cs(_shared->crit_sec());
CriticalSectionScoped cs(_shared->crit_sec());
if (!_shared->statistics().Initialized()) if (!_shared->statistics().Initialized()) {
{ _shared->SetLastError(VE_NOT_INITED, kTraceError);
_shared->SetLastError(VE_NOT_INITED, kTraceError); return -1;
}
bool isRecording(false);
// Store state about activated recording to be able to restore it after the
// recording device has been modified.
if (_shared->audio_device()->Recording()) {
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
"SetRecordingDevice() device is modified while recording"
" is active...");
isRecording = true;
if (_shared->audio_device()->StopRecording() == -1) {
_shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceError,
"SetRecordingDevice() unable to stop recording");
return -1;
}
}
// We let the module do the index sanity
// Set recording channel
AudioDeviceModule::ChannelType recCh = AudioDeviceModule::kChannelBoth;
switch (recordingChannel) {
case kStereoLeft:
recCh = AudioDeviceModule::kChannelLeft;
break;
case kStereoRight:
recCh = AudioDeviceModule::kChannelRight;
break;
case kStereoBoth:
// default setting kChannelBoth (<=> mono)
break;
}
if (_shared->audio_device()->SetRecordingChannel(recCh) != 0) {
_shared->SetLastError(
VE_AUDIO_DEVICE_MODULE_ERROR, kTraceWarning,
"SetRecordingChannel() unable to set the recording channel");
}
// Map indices to unsigned since underlying functions need that
uint16_t indexU = static_cast<uint16_t>(index);
int32_t res(0);
if (index == -1) {
res = _shared->audio_device()->SetRecordingDevice(
AudioDeviceModule::kDefaultCommunicationDevice);
} else if (index == -2) {
res = _shared->audio_device()->SetRecordingDevice(
AudioDeviceModule::kDefaultDevice);
} else {
res = _shared->audio_device()->SetRecordingDevice(indexU);
}
if (res != 0) {
_shared->SetLastError(
VE_AUDIO_DEVICE_MODULE_ERROR, kTraceError,
"SetRecordingDevice() unable to set the recording device");
return -1;
}
// Init microphone, so user can do volume settings etc
if (_shared->audio_device()->InitMicrophone() == -1) {
_shared->SetLastError(VE_CANNOT_ACCESS_MIC_VOL, kTraceWarning,
"SetRecordingDevice() cannot access microphone");
}
// Set number of channels
bool available = false;
if (_shared->audio_device()->StereoRecordingIsAvailable(&available) != 0) {
_shared->SetLastError(
VE_SOUNDCARD_ERROR, kTraceWarning,
"StereoRecordingIsAvailable() failed to query stereo recording");
}
if (_shared->audio_device()->SetStereoRecording(available) != 0) {
_shared->SetLastError(
VE_SOUNDCARD_ERROR, kTraceWarning,
"SetRecordingDevice() failed to set mono recording mode");
}
// Restore recording if it was enabled already when calling this function.
if (isRecording) {
if (!_shared->ext_recording()) {
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
"SetRecordingDevice() recording is now being restored...");
if (_shared->audio_device()->InitRecording() != 0) {
WEBRTC_TRACE(kTraceError, kTraceVoice,
VoEId(_shared->instance_id(), -1),
"SetRecordingDevice() failed to initialize recording");
return -1; return -1;
} }
if (_shared->audio_device()->StartRecording() != 0) {
bool isRecording(false); WEBRTC_TRACE(kTraceError, kTraceVoice,
VoEId(_shared->instance_id(), -1),
// Store state about activated recording to be able to restore it after the "SetRecordingDevice() failed to start recording");
// recording device has been modified.
if (_shared->audio_device()->Recording())
{
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
"SetRecordingDevice() device is modified while recording"
" is active...");
isRecording = true;
if (_shared->audio_device()->StopRecording() == -1)
{
_shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceError,
"SetRecordingDevice() unable to stop recording");
return -1;
}
}
// We let the module do the index sanity
// Set recording channel
AudioDeviceModule::ChannelType recCh =
AudioDeviceModule::kChannelBoth;
switch (recordingChannel)
{
case kStereoLeft:
recCh = AudioDeviceModule::kChannelLeft;
break;
case kStereoRight:
recCh = AudioDeviceModule::kChannelRight;
break;
case kStereoBoth:
// default setting kChannelBoth (<=> mono)
break;
}
if (_shared->audio_device()->SetRecordingChannel(recCh) != 0) {
_shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceWarning,
"SetRecordingChannel() unable to set the recording channel");
}
// Map indices to unsigned since underlying functions need that
uint16_t indexU = static_cast<uint16_t> (index);
int32_t res(0);
if (index == -1)
{
res = _shared->audio_device()->SetRecordingDevice(
AudioDeviceModule::kDefaultCommunicationDevice);
}
else if (index == -2)
{
res = _shared->audio_device()->SetRecordingDevice(
AudioDeviceModule::kDefaultDevice);
}
else
{
res = _shared->audio_device()->SetRecordingDevice(indexU);
}
if (res != 0)
{
_shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceError,
"SetRecordingDevice() unable to set the recording device");
return -1; return -1;
}
} }
}
// Init microphone, so user can do volume settings etc return 0;
if (_shared->audio_device()->InitMicrophone() == -1)
{
_shared->SetLastError(VE_CANNOT_ACCESS_MIC_VOL, kTraceWarning,
"SetRecordingDevice() cannot access microphone");
}
// Set number of channels
bool available = false;
if (_shared->audio_device()->StereoRecordingIsAvailable(&available) != 0) {
_shared->SetLastError(VE_SOUNDCARD_ERROR, kTraceWarning,
"StereoRecordingIsAvailable() failed to query stereo recording");
}
if (_shared->audio_device()->SetStereoRecording(available) != 0)
{
_shared->SetLastError(VE_SOUNDCARD_ERROR, kTraceWarning,
"SetRecordingDevice() failed to set mono recording mode");
}
// Restore recording if it was enabled already when calling this function.
if (isRecording)
{
if (!_shared->ext_recording())
{
WEBRTC_TRACE(kTraceInfo, kTraceVoice,
VoEId(_shared->instance_id(), -1),
"SetRecordingDevice() recording is now being restored...");
if (_shared->audio_device()->InitRecording() != 0)
{
WEBRTC_TRACE(kTraceError, kTraceVoice,
VoEId(_shared->instance_id(), -1),
"SetRecordingDevice() failed to initialize recording");
return -1;
}
if (_shared->audio_device()->StartRecording() != 0)
{
WEBRTC_TRACE(kTraceError, kTraceVoice,
VoEId(_shared->instance_id(), -1),
"SetRecordingDevice() failed to start recording");
return -1;
}
}
}
return 0;
} }
int VoEHardwareImpl::SetPlayoutDevice(int index) int VoEHardwareImpl::SetPlayoutDevice(int index) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), "SetPlayoutDevice(index=%d)", index);
"SetPlayoutDevice(index=%d)", index); CriticalSectionScoped cs(_shared->crit_sec());
CriticalSectionScoped cs(_shared->crit_sec());
if (!_shared->statistics().Initialized()) if (!_shared->statistics().Initialized()) {
{ _shared->SetLastError(VE_NOT_INITED, kTraceError);
_shared->SetLastError(VE_NOT_INITED, kTraceError); return -1;
}
bool isPlaying(false);
// Store state about activated playout to be able to restore it after the
// playout device has been modified.
if (_shared->audio_device()->Playing()) {
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
"SetPlayoutDevice() device is modified while playout is "
"active...");
isPlaying = true;
if (_shared->audio_device()->StopPlayout() == -1) {
_shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceError,
"SetPlayoutDevice() unable to stop playout");
return -1;
}
}
// We let the module do the index sanity
// Map indices to unsigned since underlying functions need that
uint16_t indexU = static_cast<uint16_t>(index);
int32_t res(0);
if (index == -1) {
res = _shared->audio_device()->SetPlayoutDevice(
AudioDeviceModule::kDefaultCommunicationDevice);
} else if (index == -2) {
res = _shared->audio_device()->SetPlayoutDevice(
AudioDeviceModule::kDefaultDevice);
} else {
res = _shared->audio_device()->SetPlayoutDevice(indexU);
}
if (res != 0) {
_shared->SetLastError(
VE_SOUNDCARD_ERROR, kTraceError,
"SetPlayoutDevice() unable to set the playout device");
return -1;
}
// Init speaker, so user can do volume settings etc
if (_shared->audio_device()->InitSpeaker() == -1) {
_shared->SetLastError(VE_CANNOT_ACCESS_SPEAKER_VOL, kTraceWarning,
"SetPlayoutDevice() cannot access speaker");
}
// Set number of channels
bool available = false;
_shared->audio_device()->StereoPlayoutIsAvailable(&available);
if (_shared->audio_device()->SetStereoPlayout(available) != 0) {
_shared->SetLastError(
VE_SOUNDCARD_ERROR, kTraceWarning,
"SetPlayoutDevice() failed to set stereo playout mode");
}
// Restore playout if it was enabled already when calling this function.
if (isPlaying) {
if (!_shared->ext_playout()) {
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
"SetPlayoutDevice() playout is now being restored...");
if (_shared->audio_device()->InitPlayout() != 0) {
WEBRTC_TRACE(kTraceError, kTraceVoice,
VoEId(_shared->instance_id(), -1),
"SetPlayoutDevice() failed to initialize playout");
return -1; return -1;
} }
if (_shared->audio_device()->StartPlayout() != 0) {
bool isPlaying(false); WEBRTC_TRACE(kTraceError, kTraceVoice,
VoEId(_shared->instance_id(), -1),
// Store state about activated playout to be able to restore it after the "SetPlayoutDevice() failed to start playout");
// playout device has been modified.
if (_shared->audio_device()->Playing())
{
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
"SetPlayoutDevice() device is modified while playout is "
"active...");
isPlaying = true;
if (_shared->audio_device()->StopPlayout() == -1)
{
_shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceError,
"SetPlayoutDevice() unable to stop playout");
return -1;
}
}
// We let the module do the index sanity
// Map indices to unsigned since underlying functions need that
uint16_t indexU = static_cast<uint16_t> (index);
int32_t res(0);
if (index == -1)
{
res = _shared->audio_device()->SetPlayoutDevice(
AudioDeviceModule::kDefaultCommunicationDevice);
}
else if (index == -2)
{
res = _shared->audio_device()->SetPlayoutDevice(
AudioDeviceModule::kDefaultDevice);
}
else
{
res = _shared->audio_device()->SetPlayoutDevice(indexU);
}
if (res != 0)
{
_shared->SetLastError(VE_SOUNDCARD_ERROR, kTraceError,
"SetPlayoutDevice() unable to set the playout device");
return -1; return -1;
}
} }
}
// Init speaker, so user can do volume settings etc return 0;
if (_shared->audio_device()->InitSpeaker() == -1)
{
_shared->SetLastError(VE_CANNOT_ACCESS_SPEAKER_VOL, kTraceWarning,
"SetPlayoutDevice() cannot access speaker");
}
// Set number of channels
bool available = false;
_shared->audio_device()->StereoPlayoutIsAvailable(&available);
if (_shared->audio_device()->SetStereoPlayout(available) != 0)
{
_shared->SetLastError(VE_SOUNDCARD_ERROR, kTraceWarning,
"SetPlayoutDevice() failed to set stereo playout mode");
}
// Restore playout if it was enabled already when calling this function.
if (isPlaying)
{
if (!_shared->ext_playout())
{
WEBRTC_TRACE(kTraceInfo, kTraceVoice,
VoEId(_shared->instance_id(), -1),
"SetPlayoutDevice() playout is now being restored...");
if (_shared->audio_device()->InitPlayout() != 0)
{
WEBRTC_TRACE(kTraceError, kTraceVoice,
VoEId(_shared->instance_id(), -1),
"SetPlayoutDevice() failed to initialize playout");
return -1;
}
if (_shared->audio_device()->StartPlayout() != 0)
{
WEBRTC_TRACE(kTraceError, kTraceVoice,
VoEId(_shared->instance_id(), -1),
"SetPlayoutDevice() failed to start playout");
return -1;
}
}
}
return 0;
} }
int VoEHardwareImpl::SetRecordingSampleRate(unsigned int samples_per_sec) { int VoEHardwareImpl::SetRecordingSampleRate(unsigned int samples_per_sec) {
@ -569,7 +507,7 @@ int VoEHardwareImpl::PlayoutSampleRate(unsigned int* samples_per_sec) const {
} }
bool VoEHardwareImpl::BuiltInAECIsAvailable() const { bool VoEHardwareImpl::BuiltInAECIsAvailable() const {
if (!_shared->statistics().Initialized()) { if (!_shared->statistics().Initialized()) {
_shared->SetLastError(VE_NOT_INITED, kTraceError); _shared->SetLastError(VE_NOT_INITED, kTraceError);
return false; return false;
} }
@ -577,7 +515,7 @@ if (!_shared->statistics().Initialized()) {
} }
int VoEHardwareImpl::EnableBuiltInAEC(bool enable) { int VoEHardwareImpl::EnableBuiltInAEC(bool enable) {
if (!_shared->statistics().Initialized()) { if (!_shared->statistics().Initialized()) {
_shared->SetLastError(VE_NOT_INITED, kTraceError); _shared->SetLastError(VE_NOT_INITED, kTraceError);
return -1; return -1;
} }

View File

@ -15,48 +15,45 @@
#include "webrtc/voice_engine/shared_data.h" #include "webrtc/voice_engine/shared_data.h"
namespace webrtc namespace webrtc {
{
class VoEHardwareImpl: public VoEHardware class VoEHardwareImpl : public VoEHardware {
{ public:
public: int GetNumOfRecordingDevices(int& devices) override;
virtual int GetNumOfRecordingDevices(int& devices);
virtual int GetNumOfPlayoutDevices(int& devices); int GetNumOfPlayoutDevices(int& devices) override;
virtual int GetRecordingDeviceName(int index, int GetRecordingDeviceName(int index,
char strNameUTF8[128], char strNameUTF8[128],
char strGuidUTF8[128]); char strGuidUTF8[128]) override;
virtual int GetPlayoutDeviceName(int index, int GetPlayoutDeviceName(int index,
char strNameUTF8[128], char strNameUTF8[128],
char strGuidUTF8[128]); char strGuidUTF8[128]) override;
virtual int SetRecordingDevice( int SetRecordingDevice(int index,
int index, StereoChannel recordingChannel = kStereoBoth) override;
StereoChannel recordingChannel = kStereoBoth);
virtual int SetPlayoutDevice(int index); int SetPlayoutDevice(int index) override;
virtual int SetAudioDeviceLayer(AudioLayers audioLayer); int SetAudioDeviceLayer(AudioLayers audioLayer) override;
virtual int GetAudioDeviceLayer(AudioLayers& audioLayer); int GetAudioDeviceLayer(AudioLayers& audioLayer) override;
virtual int SetRecordingSampleRate(unsigned int samples_per_sec); int SetRecordingSampleRate(unsigned int samples_per_sec) override;
virtual int RecordingSampleRate(unsigned int* samples_per_sec) const; int RecordingSampleRate(unsigned int* samples_per_sec) const override;
virtual int SetPlayoutSampleRate(unsigned int samples_per_sec); int SetPlayoutSampleRate(unsigned int samples_per_sec) override;
virtual int PlayoutSampleRate(unsigned int* samples_per_sec) const; int PlayoutSampleRate(unsigned int* samples_per_sec) const override;
virtual bool BuiltInAECIsAvailable() const; bool BuiltInAECIsAvailable() const override;
virtual int EnableBuiltInAEC(bool enable); int EnableBuiltInAEC(bool enable) override;
protected: protected:
VoEHardwareImpl(voe::SharedData* shared); VoEHardwareImpl(voe::SharedData* shared);
virtual ~VoEHardwareImpl(); ~VoEHardwareImpl() override;
private: private:
voe::SharedData* _shared; voe::SharedData* _shared;
}; };
} // namespace webrtc } // namespace webrtc

View File

@ -19,61 +19,54 @@
namespace webrtc { namespace webrtc {
VoENetEqStats* VoENetEqStats::GetInterface(VoiceEngine* voiceEngine) VoENetEqStats* VoENetEqStats::GetInterface(VoiceEngine* voiceEngine) {
{
#ifndef WEBRTC_VOICE_ENGINE_NETEQ_STATS_API #ifndef WEBRTC_VOICE_ENGINE_NETEQ_STATS_API
return NULL; return NULL;
#else #else
if (NULL == voiceEngine) if (NULL == voiceEngine) {
{ return NULL;
return NULL; }
} VoiceEngineImpl* s = static_cast<VoiceEngineImpl*>(voiceEngine);
VoiceEngineImpl* s = static_cast<VoiceEngineImpl*>(voiceEngine); s->AddRef();
s->AddRef(); return s;
return s;
#endif #endif
} }
#ifdef WEBRTC_VOICE_ENGINE_NETEQ_STATS_API #ifdef WEBRTC_VOICE_ENGINE_NETEQ_STATS_API
VoENetEqStatsImpl::VoENetEqStatsImpl(voe::SharedData* shared) : _shared(shared) VoENetEqStatsImpl::VoENetEqStatsImpl(voe::SharedData* shared)
{ : _shared(shared) {
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1), WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
"VoENetEqStatsImpl::VoENetEqStatsImpl() - ctor"); "VoENetEqStatsImpl::VoENetEqStatsImpl() - ctor");
} }
VoENetEqStatsImpl::~VoENetEqStatsImpl() VoENetEqStatsImpl::~VoENetEqStatsImpl() {
{ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1), "VoENetEqStatsImpl::~VoENetEqStatsImpl() - dtor");
"VoENetEqStatsImpl::~VoENetEqStatsImpl() - dtor");
} }
int VoENetEqStatsImpl::GetNetworkStatistics(int channel, int VoENetEqStatsImpl::GetNetworkStatistics(int channel,
NetworkStatistics& stats) NetworkStatistics& stats) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), "GetNetworkStatistics(channel=%d, stats=?)", channel);
"GetNetworkStatistics(channel=%d, stats=?)", channel);
if (!_shared->statistics().Initialized()) if (!_shared->statistics().Initialized()) {
{ _shared->SetLastError(VE_NOT_INITED, kTraceError);
_shared->SetLastError(VE_NOT_INITED, kTraceError); return -1;
return -1; }
} voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); voe::Channel* channelPtr = ch.channel();
voe::Channel* channelPtr = ch.channel(); if (channelPtr == NULL) {
if (channelPtr == NULL) _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
{ "GetNetworkStatistics() failed to locate channel");
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, return -1;
"GetNetworkStatistics() failed to locate channel"); }
return -1;
}
return channelPtr->GetNetworkStatistics(stats); return channelPtr->GetNetworkStatistics(stats);
} }
int VoENetEqStatsImpl::GetDecodingCallStatistics( int VoENetEqStatsImpl::GetDecodingCallStatistics(
int channel, AudioDecodingCallStats* stats) const { int channel, AudioDecodingCallStats* stats) const {
if (!_shared->statistics().Initialized()) { if (!_shared->statistics().Initialized()) {
_shared->SetLastError(VE_NOT_INITED, kTraceError); _shared->SetLastError(VE_NOT_INITED, kTraceError);
return -1; return -1;

View File

@ -18,23 +18,21 @@
namespace webrtc { namespace webrtc {
class VoENetEqStatsImpl : public VoENetEqStats class VoENetEqStatsImpl : public VoENetEqStats {
{ public:
public: int GetNetworkStatistics(int channel, NetworkStatistics& stats) override;
virtual int GetNetworkStatistics(int channel,
NetworkStatistics& stats);
virtual int GetDecodingCallStatistics( int GetDecodingCallStatistics(int channel,
int channel, AudioDecodingCallStats* stats) const; AudioDecodingCallStats* stats) const override;
protected: protected:
VoENetEqStatsImpl(voe::SharedData* shared); VoENetEqStatsImpl(voe::SharedData* shared);
virtual ~VoENetEqStatsImpl(); ~VoENetEqStatsImpl() override;
private: private:
voe::SharedData* _shared; voe::SharedData* _shared;
}; };
} // namespace webrtc } // namespace webrtc
#endif // WEBRTC_VOICE_ENGINE_VOE_NETEQ_STATS_IMPL_H #endif // WEBRTC_VOICE_ENGINE_VOE_NETEQ_STATS_IMPL_H

View File

@ -18,73 +18,62 @@
#include "webrtc/voice_engine/include/voe_errors.h" #include "webrtc/voice_engine/include/voe_errors.h"
#include "webrtc/voice_engine/voice_engine_impl.h" #include "webrtc/voice_engine/voice_engine_impl.h"
namespace webrtc namespace webrtc {
{
VoENetwork* VoENetwork::GetInterface(VoiceEngine* voiceEngine) VoENetwork* VoENetwork::GetInterface(VoiceEngine* voiceEngine) {
{ if (NULL == voiceEngine) {
if (NULL == voiceEngine) return NULL;
{ }
return NULL; VoiceEngineImpl* s = static_cast<VoiceEngineImpl*>(voiceEngine);
} s->AddRef();
VoiceEngineImpl* s = static_cast<VoiceEngineImpl*>(voiceEngine); return s;
s->AddRef();
return s;
} }
VoENetworkImpl::VoENetworkImpl(voe::SharedData* shared) : _shared(shared) VoENetworkImpl::VoENetworkImpl(voe::SharedData* shared) : _shared(shared) {
{ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1), "VoENetworkImpl() - ctor");
"VoENetworkImpl() - ctor");
} }
VoENetworkImpl::~VoENetworkImpl() VoENetworkImpl::~VoENetworkImpl() {
{ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1), "~VoENetworkImpl() - dtor");
"~VoENetworkImpl() - dtor");
} }
int VoENetworkImpl::RegisterExternalTransport(int channel, int VoENetworkImpl::RegisterExternalTransport(int channel,
Transport& transport) Transport& transport) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), "SetExternalTransport(channel=%d, transport=0x%x)", channel,
"SetExternalTransport(channel=%d, transport=0x%x)", &transport);
channel, &transport); if (!_shared->statistics().Initialized()) {
if (!_shared->statistics().Initialized()) _shared->SetLastError(VE_NOT_INITED, kTraceError);
{ return -1;
_shared->SetLastError(VE_NOT_INITED, kTraceError); }
return -1; voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
} voe::Channel* channelPtr = ch.channel();
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); if (channelPtr == NULL) {
voe::Channel* channelPtr = ch.channel(); _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
if (channelPtr == NULL) "SetExternalTransport() failed to locate channel");
{ return -1;
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, }
"SetExternalTransport() failed to locate channel"); return channelPtr->RegisterExternalTransport(transport);
return -1;
}
return channelPtr->RegisterExternalTransport(transport);
} }
int VoENetworkImpl::DeRegisterExternalTransport(int channel) int VoENetworkImpl::DeRegisterExternalTransport(int channel) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), "DeRegisterExternalTransport(channel=%d)", channel);
"DeRegisterExternalTransport(channel=%d)", channel); if (!_shared->statistics().Initialized()) {
if (!_shared->statistics().Initialized()) WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_shared->instance_id(), -1),
{ "DeRegisterExternalTransport() - invalid state");
WEBRTC_TRACE(kTraceError, kTraceVoice, }
VoEId(_shared->instance_id(), -1), voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
"DeRegisterExternalTransport() - invalid state"); voe::Channel* channelPtr = ch.channel();
} if (channelPtr == NULL) {
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); _shared->SetLastError(
voe::Channel* channelPtr = ch.channel(); VE_CHANNEL_NOT_VALID, kTraceError,
if (channelPtr == NULL) "DeRegisterExternalTransport() failed to locate channel");
{ return -1;
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, }
"DeRegisterExternalTransport() failed to locate channel"); return channelPtr->DeRegisterExternalTransport();
return -1;
}
return channelPtr->DeRegisterExternalTransport();
} }
int VoENetworkImpl::ReceivedRTPPacket(int channel, int VoENetworkImpl::ReceivedRTPPacket(int channel,
@ -96,85 +85,76 @@ int VoENetworkImpl::ReceivedRTPPacket(int channel,
int VoENetworkImpl::ReceivedRTPPacket(int channel, int VoENetworkImpl::ReceivedRTPPacket(int channel,
const void* data, const void* data,
size_t length, size_t length,
const PacketTime& packet_time) const PacketTime& packet_time) {
{ WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_shared->instance_id(), -1), "ReceivedRTPPacket(channel=%d, length=%" PRIuS ")", channel,
"ReceivedRTPPacket(channel=%d, length=%" PRIuS ")", channel, length);
length); if (!_shared->statistics().Initialized()) {
if (!_shared->statistics().Initialized()) _shared->SetLastError(VE_NOT_INITED, kTraceError);
{ return -1;
_shared->SetLastError(VE_NOT_INITED, kTraceError); }
return -1; // L16 at 32 kHz, stereo, 10 ms frames (+12 byte RTP header) -> 1292 bytes
} if ((length < 12) || (length > 1292)) {
// L16 at 32 kHz, stereo, 10 ms frames (+12 byte RTP header) -> 1292 bytes _shared->SetLastError(VE_INVALID_PACKET);
if ((length < 12) || (length > 1292)) LOG(LS_ERROR) << "Invalid packet length: " << length;
{ return -1;
_shared->SetLastError(VE_INVALID_PACKET); }
LOG(LS_ERROR) << "Invalid packet length: " << length; if (NULL == data) {
return -1; _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
} "ReceivedRTPPacket() invalid data vector");
if (NULL == data) return -1;
{ }
_shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError, voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
"ReceivedRTPPacket() invalid data vector"); voe::Channel* channelPtr = ch.channel();
return -1; if (channelPtr == NULL) {
} _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); "ReceivedRTPPacket() failed to locate channel");
voe::Channel* channelPtr = ch.channel(); return -1;
if (channelPtr == NULL) }
{
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"ReceivedRTPPacket() failed to locate channel");
return -1;
}
if (!channelPtr->ExternalTransport()) if (!channelPtr->ExternalTransport()) {
{ _shared->SetLastError(
_shared->SetLastError(VE_INVALID_OPERATION, kTraceError, VE_INVALID_OPERATION, kTraceError,
"ReceivedRTPPacket() external transport is not enabled"); "ReceivedRTPPacket() external transport is not enabled");
return -1; return -1;
} }
return channelPtr->ReceivedRTPPacket((const int8_t*) data, length, return channelPtr->ReceivedRTPPacket((const int8_t*)data, length,
packet_time); packet_time);
} }
int VoENetworkImpl::ReceivedRTCPPacket(int channel, const void* data, int VoENetworkImpl::ReceivedRTCPPacket(int channel,
size_t length) const void* data,
{ size_t length) {
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_shared->instance_id(), -1), WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_shared->instance_id(), -1),
"ReceivedRTCPPacket(channel=%d, length=%" PRIuS ")", channel, "ReceivedRTCPPacket(channel=%d, length=%" PRIuS ")", channel,
length); length);
if (!_shared->statistics().Initialized()) if (!_shared->statistics().Initialized()) {
{ _shared->SetLastError(VE_NOT_INITED, kTraceError);
_shared->SetLastError(VE_NOT_INITED, kTraceError); return -1;
return -1; }
} if (length < 4) {
if (length < 4) _shared->SetLastError(VE_INVALID_PACKET, kTraceError,
{ "ReceivedRTCPPacket() invalid packet length");
_shared->SetLastError(VE_INVALID_PACKET, kTraceError, return -1;
"ReceivedRTCPPacket() invalid packet length"); }
return -1; if (NULL == data) {
} _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
if (NULL == data) "ReceivedRTCPPacket() invalid data vector");
{ return -1;
_shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError, }
"ReceivedRTCPPacket() invalid data vector"); voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
return -1; voe::Channel* channelPtr = ch.channel();
} if (channelPtr == NULL) {
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
voe::Channel* channelPtr = ch.channel(); "ReceivedRTCPPacket() failed to locate channel");
if (channelPtr == NULL) return -1;
{ }
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, if (!channelPtr->ExternalTransport()) {
"ReceivedRTCPPacket() failed to locate channel"); _shared->SetLastError(
return -1; VE_INVALID_OPERATION, kTraceError,
} "ReceivedRTCPPacket() external transport is not enabled");
if (!channelPtr->ExternalTransport()) return -1;
{ }
_shared->SetLastError(VE_INVALID_OPERATION, kTraceError, return channelPtr->ReceivedRTCPPacket((const int8_t*)data, length);
"ReceivedRTCPPacket() external transport is not enabled");
return -1;
}
return channelPtr->ReceivedRTCPPacket((const int8_t*) data, length);
} }
} // namespace webrtc } // namespace webrtc

View File

@ -15,30 +15,28 @@
#include "webrtc/voice_engine/shared_data.h" #include "webrtc/voice_engine/shared_data.h"
namespace webrtc {
namespace webrtc class VoENetworkImpl : public VoENetwork {
{ public:
int RegisterExternalTransport(int channel, Transport& transport) override;
class VoENetworkImpl: public VoENetwork int DeRegisterExternalTransport(int channel) override;
{
public:
int RegisterExternalTransport(int channel, Transport& transport) override;
int DeRegisterExternalTransport(int channel) override; int ReceivedRTPPacket(int channel, const void* data, size_t length) override;
int ReceivedRTPPacket(int channel,
const void* data,
size_t length,
const PacketTime& packet_time) override;
int ReceivedRTPPacket(int channel, const void* data, size_t length) override; int ReceivedRTCPPacket(int channel, const void* data, size_t length) override;
int ReceivedRTPPacket(int channel,
const void* data,
size_t length,
const PacketTime& packet_time) override;
int ReceivedRTCPPacket(int channel, const void* data, size_t length) override; protected:
VoENetworkImpl(voe::SharedData* shared);
~VoENetworkImpl() override;
protected: private:
VoENetworkImpl(voe::SharedData* shared); voe::SharedData* _shared;
virtual ~VoENetworkImpl();
private:
voe::SharedData* _shared;
}; };
} // namespace webrtc } // namespace webrtc

View File

@ -21,145 +21,132 @@
namespace webrtc { namespace webrtc {
VoERTP_RTCP* VoERTP_RTCP::GetInterface(VoiceEngine* voiceEngine) VoERTP_RTCP* VoERTP_RTCP::GetInterface(VoiceEngine* voiceEngine) {
{
#ifndef WEBRTC_VOICE_ENGINE_RTP_RTCP_API #ifndef WEBRTC_VOICE_ENGINE_RTP_RTCP_API
return NULL; return NULL;
#else #else
if (NULL == voiceEngine) if (NULL == voiceEngine) {
{ return NULL;
return NULL; }
} VoiceEngineImpl* s = static_cast<VoiceEngineImpl*>(voiceEngine);
VoiceEngineImpl* s = static_cast<VoiceEngineImpl*>(voiceEngine); s->AddRef();
s->AddRef(); return s;
return s;
#endif #endif
} }
#ifdef WEBRTC_VOICE_ENGINE_RTP_RTCP_API #ifdef WEBRTC_VOICE_ENGINE_RTP_RTCP_API
VoERTP_RTCPImpl::VoERTP_RTCPImpl(voe::SharedData* shared) : _shared(shared) VoERTP_RTCPImpl::VoERTP_RTCPImpl(voe::SharedData* shared) : _shared(shared) {
{ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1), "VoERTP_RTCPImpl::VoERTP_RTCPImpl() - ctor");
"VoERTP_RTCPImpl::VoERTP_RTCPImpl() - ctor");
} }
VoERTP_RTCPImpl::~VoERTP_RTCPImpl() VoERTP_RTCPImpl::~VoERTP_RTCPImpl() {
{ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1), "VoERTP_RTCPImpl::~VoERTP_RTCPImpl() - dtor");
"VoERTP_RTCPImpl::~VoERTP_RTCPImpl() - dtor");
} }
int VoERTP_RTCPImpl::SetLocalSSRC(int channel, unsigned int ssrc) int VoERTP_RTCPImpl::SetLocalSSRC(int channel, unsigned int ssrc) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), "SetLocalSSRC(channel=%d, %lu)", channel, ssrc);
"SetLocalSSRC(channel=%d, %lu)", channel, ssrc); if (!_shared->statistics().Initialized()) {
if (!_shared->statistics().Initialized()) _shared->SetLastError(VE_NOT_INITED, kTraceError);
{ return -1;
_shared->SetLastError(VE_NOT_INITED, kTraceError); }
return -1; voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
} voe::Channel* channelPtr = ch.channel();
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); if (channelPtr == NULL) {
voe::Channel* channelPtr = ch.channel(); _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
if (channelPtr == NULL) "SetLocalSSRC() failed to locate channel");
{ return -1;
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, }
"SetLocalSSRC() failed to locate channel"); return channelPtr->SetLocalSSRC(ssrc);
return -1;
}
return channelPtr->SetLocalSSRC(ssrc);
} }
int VoERTP_RTCPImpl::GetLocalSSRC(int channel, unsigned int& ssrc) int VoERTP_RTCPImpl::GetLocalSSRC(int channel, unsigned int& ssrc) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), "GetLocalSSRC(channel=%d, ssrc=?)", channel);
"GetLocalSSRC(channel=%d, ssrc=?)", channel); if (!_shared->statistics().Initialized()) {
if (!_shared->statistics().Initialized()) _shared->SetLastError(VE_NOT_INITED, kTraceError);
{ return -1;
_shared->SetLastError(VE_NOT_INITED, kTraceError); }
return -1; voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
} voe::Channel* channelPtr = ch.channel();
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); if (channelPtr == NULL) {
voe::Channel* channelPtr = ch.channel(); _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
if (channelPtr == NULL) "GetLocalSSRC() failed to locate channel");
{ return -1;
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, }
"GetLocalSSRC() failed to locate channel"); return channelPtr->GetLocalSSRC(ssrc);
return -1;
}
return channelPtr->GetLocalSSRC(ssrc);
} }
int VoERTP_RTCPImpl::GetRemoteSSRC(int channel, unsigned int& ssrc) int VoERTP_RTCPImpl::GetRemoteSSRC(int channel, unsigned int& ssrc) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), "GetRemoteSSRC(channel=%d, ssrc=?)", channel);
"GetRemoteSSRC(channel=%d, ssrc=?)", channel); if (!_shared->statistics().Initialized()) {
if (!_shared->statistics().Initialized()) _shared->SetLastError(VE_NOT_INITED, kTraceError);
{ return -1;
_shared->SetLastError(VE_NOT_INITED, kTraceError); }
return -1; voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
} voe::Channel* channelPtr = ch.channel();
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); if (channelPtr == NULL) {
voe::Channel* channelPtr = ch.channel(); _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
if (channelPtr == NULL) "GetRemoteSSRC() failed to locate channel");
{ return -1;
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, }
"GetRemoteSSRC() failed to locate channel"); return channelPtr->GetRemoteSSRC(ssrc);
return -1;
}
return channelPtr->GetRemoteSSRC(ssrc);
} }
int VoERTP_RTCPImpl::SetSendAudioLevelIndicationStatus(int channel, int VoERTP_RTCPImpl::SetSendAudioLevelIndicationStatus(int channel,
bool enable, bool enable,
unsigned char id) unsigned char id) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), "SetSendAudioLevelIndicationStatus(channel=%d, enable=%d,"
"SetSendAudioLevelIndicationStatus(channel=%d, enable=%d," " ID=%u)",
" ID=%u)", channel, enable, id); channel, enable, id);
if (!_shared->statistics().Initialized()) if (!_shared->statistics().Initialized()) {
{ _shared->SetLastError(VE_NOT_INITED, kTraceError);
_shared->SetLastError(VE_NOT_INITED, kTraceError); return -1;
return -1; }
} if (enable && (id < kVoiceEngineMinRtpExtensionId ||
if (enable && (id < kVoiceEngineMinRtpExtensionId || id > kVoiceEngineMaxRtpExtensionId)) {
id > kVoiceEngineMaxRtpExtensionId)) // [RFC5285] The 4-bit id is the local identifier of this element in
{ // the range 1-14 inclusive.
// [RFC5285] The 4-bit id is the local identifier of this element in _shared->SetLastError(
// the range 1-14 inclusive. VE_INVALID_ARGUMENT, kTraceError,
_shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError, "SetSendAudioLevelIndicationStatus() invalid ID parameter");
"SetSendAudioLevelIndicationStatus() invalid ID parameter"); return -1;
return -1; }
}
// Set state and id for the specified channel. // Set state and id for the specified channel.
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
voe::Channel* channelPtr = ch.channel(); voe::Channel* channelPtr = ch.channel();
if (channelPtr == NULL) if (channelPtr == NULL) {
{ _shared->SetLastError(
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, VE_CHANNEL_NOT_VALID, kTraceError,
"SetSendAudioLevelIndicationStatus() failed to locate channel"); "SetSendAudioLevelIndicationStatus() failed to locate channel");
return -1; return -1;
} }
return channelPtr->SetSendAudioLevelIndicationStatus(enable, id); return channelPtr->SetSendAudioLevelIndicationStatus(enable, id);
} }
int VoERTP_RTCPImpl::SetReceiveAudioLevelIndicationStatus(int channel, int VoERTP_RTCPImpl::SetReceiveAudioLevelIndicationStatus(int channel,
bool enable, bool enable,
unsigned char id) { unsigned char id) {
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), WEBRTC_TRACE(
kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
"SetReceiveAudioLevelIndicationStatus(channel=%d, enable=%d, id=%u)", "SetReceiveAudioLevelIndicationStatus(channel=%d, enable=%d, id=%u)",
channel, enable, id); channel, enable, id);
if (!_shared->statistics().Initialized()) { if (!_shared->statistics().Initialized()) {
_shared->SetLastError(VE_NOT_INITED, kTraceError); _shared->SetLastError(VE_NOT_INITED, kTraceError);
return -1; return -1;
} }
if (enable && if (enable && (id < kVoiceEngineMinRtpExtensionId ||
(id < kVoiceEngineMinRtpExtensionId || id > kVoiceEngineMaxRtpExtensionId)) {
id > kVoiceEngineMaxRtpExtensionId)) {
// [RFC5285] The 4-bit id is the local identifier of this element in // [RFC5285] The 4-bit id is the local identifier of this element in
// the range 1-14 inclusive. // the range 1-14 inclusive.
_shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError, _shared->SetLastError(
VE_INVALID_ARGUMENT, kTraceError,
"SetReceiveAbsoluteSenderTimeStatus() invalid id parameter"); "SetReceiveAbsoluteSenderTimeStatus() invalid id parameter");
return -1; return -1;
} }
@ -167,7 +154,8 @@ int VoERTP_RTCPImpl::SetReceiveAudioLevelIndicationStatus(int channel,
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
voe::Channel* channel_ptr = ch.channel(); voe::Channel* channel_ptr = ch.channel();
if (channel_ptr == NULL) { if (channel_ptr == NULL) {
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, _shared->SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"SetReceiveAudioLevelIndicationStatus() failed to locate channel"); "SetReceiveAudioLevelIndicationStatus() failed to locate channel");
return -1; return -1;
} }
@ -188,7 +176,8 @@ int VoERTP_RTCPImpl::SetSendAbsoluteSenderTimeStatus(int channel,
id > kVoiceEngineMaxRtpExtensionId)) { id > kVoiceEngineMaxRtpExtensionId)) {
// [RFC5285] The 4-bit id is the local identifier of this element in // [RFC5285] The 4-bit id is the local identifier of this element in
// the range 1-14 inclusive. // the range 1-14 inclusive.
_shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError, _shared->SetLastError(
VE_INVALID_ARGUMENT, kTraceError,
"SetSendAbsoluteSenderTimeStatus() invalid id parameter"); "SetSendAbsoluteSenderTimeStatus() invalid id parameter");
return -1; return -1;
} }
@ -196,7 +185,8 @@ int VoERTP_RTCPImpl::SetSendAbsoluteSenderTimeStatus(int channel,
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
voe::Channel* channelPtr = ch.channel(); voe::Channel* channelPtr = ch.channel();
if (channelPtr == NULL) { if (channelPtr == NULL) {
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, _shared->SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"SetSendAbsoluteSenderTimeStatus() failed to locate channel"); "SetSendAbsoluteSenderTimeStatus() failed to locate channel");
return -1; return -1;
} }
@ -206,7 +196,8 @@ int VoERTP_RTCPImpl::SetSendAbsoluteSenderTimeStatus(int channel,
int VoERTP_RTCPImpl::SetReceiveAbsoluteSenderTimeStatus(int channel, int VoERTP_RTCPImpl::SetReceiveAbsoluteSenderTimeStatus(int channel,
bool enable, bool enable,
unsigned char id) { unsigned char id) {
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), WEBRTC_TRACE(
kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
"SetReceiveAbsoluteSenderTimeStatus(channel=%d, enable=%d, id=%u)", "SetReceiveAbsoluteSenderTimeStatus(channel=%d, enable=%d, id=%u)",
channel, enable, id); channel, enable, id);
if (!_shared->statistics().Initialized()) { if (!_shared->statistics().Initialized()) {
@ -217,7 +208,8 @@ int VoERTP_RTCPImpl::SetReceiveAbsoluteSenderTimeStatus(int channel,
id > kVoiceEngineMaxRtpExtensionId)) { id > kVoiceEngineMaxRtpExtensionId)) {
// [RFC5285] The 4-bit id is the local identifier of this element in // [RFC5285] The 4-bit id is the local identifier of this element in
// the range 1-14 inclusive. // the range 1-14 inclusive.
_shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError, _shared->SetLastError(
VE_INVALID_ARGUMENT, kTraceError,
"SetReceiveAbsoluteSenderTimeStatus() invalid id parameter"); "SetReceiveAbsoluteSenderTimeStatus() invalid id parameter");
return -1; return -1;
} }
@ -225,169 +217,145 @@ int VoERTP_RTCPImpl::SetReceiveAbsoluteSenderTimeStatus(int channel,
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
voe::Channel* channelPtr = ch.channel(); voe::Channel* channelPtr = ch.channel();
if (channelPtr == NULL) { if (channelPtr == NULL) {
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, _shared->SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"SetReceiveAbsoluteSenderTimeStatus() failed to locate channel"); "SetReceiveAbsoluteSenderTimeStatus() failed to locate channel");
return -1; return -1;
} }
return channelPtr->SetReceiveAbsoluteSenderTimeStatus(enable, id); return channelPtr->SetReceiveAbsoluteSenderTimeStatus(enable, id);
} }
int VoERTP_RTCPImpl::SetRTCPStatus(int channel, bool enable) int VoERTP_RTCPImpl::SetRTCPStatus(int channel, bool enable) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), "SetRTCPStatus(channel=%d, enable=%d)", channel, enable);
"SetRTCPStatus(channel=%d, enable=%d)", channel, enable); if (!_shared->statistics().Initialized()) {
if (!_shared->statistics().Initialized()) _shared->SetLastError(VE_NOT_INITED, kTraceError);
{ return -1;
_shared->SetLastError(VE_NOT_INITED, kTraceError); }
return -1; voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
} voe::Channel* channelPtr = ch.channel();
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); if (channelPtr == NULL) {
voe::Channel* channelPtr = ch.channel(); _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
if (channelPtr == NULL) "SetRTCPStatus() failed to locate channel");
{ return -1;
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, }
"SetRTCPStatus() failed to locate channel"); channelPtr->SetRTCPStatus(enable);
return -1; return 0;
}
channelPtr->SetRTCPStatus(enable);
return 0;
} }
int VoERTP_RTCPImpl::GetRTCPStatus(int channel, bool& enabled) int VoERTP_RTCPImpl::GetRTCPStatus(int channel, bool& enabled) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), "GetRTCPStatus(channel=%d)", channel);
"GetRTCPStatus(channel=%d)", channel); if (!_shared->statistics().Initialized()) {
if (!_shared->statistics().Initialized()) _shared->SetLastError(VE_NOT_INITED, kTraceError);
{ return -1;
_shared->SetLastError(VE_NOT_INITED, kTraceError); }
return -1; voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
} voe::Channel* channelPtr = ch.channel();
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); if (channelPtr == NULL) {
voe::Channel* channelPtr = ch.channel(); _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
if (channelPtr == NULL) "GetRTCPStatus() failed to locate channel");
{ return -1;
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, }
"GetRTCPStatus() failed to locate channel"); return channelPtr->GetRTCPStatus(enabled);
return -1;
}
return channelPtr->GetRTCPStatus(enabled);
} }
int VoERTP_RTCPImpl::SetRTCP_CNAME(int channel, const char cName[256]) int VoERTP_RTCPImpl::SetRTCP_CNAME(int channel, const char cName[256]) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), "SetRTCP_CNAME(channel=%d, cName=%s)", channel, cName);
"SetRTCP_CNAME(channel=%d, cName=%s)", channel, cName); if (!_shared->statistics().Initialized()) {
if (!_shared->statistics().Initialized()) _shared->SetLastError(VE_NOT_INITED, kTraceError);
{ return -1;
_shared->SetLastError(VE_NOT_INITED, kTraceError); }
return -1; voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
} voe::Channel* channelPtr = ch.channel();
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); if (channelPtr == NULL) {
voe::Channel* channelPtr = ch.channel(); _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
if (channelPtr == NULL) "SetRTCP_CNAME() failed to locate channel");
{ return -1;
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, }
"SetRTCP_CNAME() failed to locate channel"); return channelPtr->SetRTCP_CNAME(cName);
return -1;
}
return channelPtr->SetRTCP_CNAME(cName);
} }
int VoERTP_RTCPImpl::GetRemoteRTCP_CNAME(int channel, char cName[256]) int VoERTP_RTCPImpl::GetRemoteRTCP_CNAME(int channel, char cName[256]) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), "GetRemoteRTCP_CNAME(channel=%d, cName=?)", channel);
"GetRemoteRTCP_CNAME(channel=%d, cName=?)", channel); if (!_shared->statistics().Initialized()) {
if (!_shared->statistics().Initialized()) _shared->SetLastError(VE_NOT_INITED, kTraceError);
{ return -1;
_shared->SetLastError(VE_NOT_INITED, kTraceError); }
return -1; voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
} voe::Channel* channelPtr = ch.channel();
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); if (channelPtr == NULL) {
voe::Channel* channelPtr = ch.channel(); _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
if (channelPtr == NULL) "GetRemoteRTCP_CNAME() failed to locate channel");
{ return -1;
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, }
"GetRemoteRTCP_CNAME() failed to locate channel"); return channelPtr->GetRemoteRTCP_CNAME(cName);
return -1;
}
return channelPtr->GetRemoteRTCP_CNAME(cName);
} }
int VoERTP_RTCPImpl::GetRemoteRTCPData( int VoERTP_RTCPImpl::GetRemoteRTCPData(
int channel, int channel,
unsigned int& NTPHigh, // from sender info in SR unsigned int& NTPHigh, // from sender info in SR
unsigned int& NTPLow, // from sender info in SR unsigned int& NTPLow, // from sender info in SR
unsigned int& timestamp, // from sender info in SR unsigned int& timestamp, // from sender info in SR
unsigned int& playoutTimestamp, // derived locally unsigned int& playoutTimestamp, // derived locally
unsigned int* jitter, // from report block 1 in SR/RR unsigned int* jitter, // from report block 1 in SR/RR
unsigned short* fractionLost) // from report block 1 in SR/RR unsigned short* fractionLost) // from report block 1 in SR/RR
{ {
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
"GetRemoteRTCPData(channel=%d,...)", channel); "GetRemoteRTCPData(channel=%d,...)", channel);
if (!_shared->statistics().Initialized()) if (!_shared->statistics().Initialized()) {
{ _shared->SetLastError(VE_NOT_INITED, kTraceError);
_shared->SetLastError(VE_NOT_INITED, kTraceError); return -1;
return -1; }
} voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); voe::Channel* channelPtr = ch.channel();
voe::Channel* channelPtr = ch.channel(); if (channelPtr == NULL) {
if (channelPtr == NULL) _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
{ "GetRemoteRTCP_CNAME() failed to locate channel");
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, return -1;
"GetRemoteRTCP_CNAME() failed to locate channel"); }
return -1; return channelPtr->GetRemoteRTCPData(NTPHigh, NTPLow, timestamp,
} playoutTimestamp, jitter, fractionLost);
return channelPtr->GetRemoteRTCPData(NTPHigh,
NTPLow,
timestamp,
playoutTimestamp,
jitter,
fractionLost);
} }
int VoERTP_RTCPImpl::GetRTPStatistics(int channel, int VoERTP_RTCPImpl::GetRTPStatistics(int channel,
unsigned int& averageJitterMs, unsigned int& averageJitterMs,
unsigned int& maxJitterMs, unsigned int& maxJitterMs,
unsigned int& discardedPackets) unsigned int& discardedPackets) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), "GetRTPStatistics(channel=%d,....)", channel);
"GetRTPStatistics(channel=%d,....)", channel); if (!_shared->statistics().Initialized()) {
if (!_shared->statistics().Initialized()) _shared->SetLastError(VE_NOT_INITED, kTraceError);
{ return -1;
_shared->SetLastError(VE_NOT_INITED, kTraceError); }
return -1; voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
} voe::Channel* channelPtr = ch.channel();
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); if (channelPtr == NULL) {
voe::Channel* channelPtr = ch.channel(); _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
if (channelPtr == NULL) "GetRTPStatistics() failed to locate channel");
{ return -1;
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, }
"GetRTPStatistics() failed to locate channel"); return channelPtr->GetRTPStatistics(averageJitterMs, maxJitterMs,
return -1; discardedPackets);
}
return channelPtr->GetRTPStatistics(averageJitterMs,
maxJitterMs,
discardedPackets);
} }
int VoERTP_RTCPImpl::GetRTCPStatistics(int channel, CallStatistics& stats) int VoERTP_RTCPImpl::GetRTCPStatistics(int channel, CallStatistics& stats) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), "GetRTCPStatistics(channel=%d)", channel);
"GetRTCPStatistics(channel=%d)", channel); if (!_shared->statistics().Initialized()) {
if (!_shared->statistics().Initialized()) _shared->SetLastError(VE_NOT_INITED, kTraceError);
{ return -1;
_shared->SetLastError(VE_NOT_INITED, kTraceError); }
return -1; voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
} voe::Channel* channelPtr = ch.channel();
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); if (channelPtr == NULL) {
voe::Channel* channelPtr = ch.channel(); _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
if (channelPtr == NULL) "GetRTPStatistics() failed to locate channel");
{ return -1;
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, }
"GetRTPStatistics() failed to locate channel"); return channelPtr->GetRTPStatistics(stats);
return -1;
}
return channelPtr->GetRTPStatistics(stats);
} }
int VoERTP_RTCPImpl::GetRemoteRTCPReportBlocks( int VoERTP_RTCPImpl::GetRemoteRTCPReportBlocks(
@ -401,159 +369,142 @@ int VoERTP_RTCPImpl::GetRemoteRTCPReportBlocks(
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
voe::Channel* channel_ptr = ch.channel(); voe::Channel* channel_ptr = ch.channel();
if (channel_ptr == NULL) { if (channel_ptr == NULL) {
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, _shared->SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"GetRemoteRTCPReportBlocks() failed to locate channel"); "GetRemoteRTCPReportBlocks() failed to locate channel");
return -1; return -1;
} }
return channel_ptr->GetRemoteRTCPReportBlocks(report_blocks); return channel_ptr->GetRemoteRTCPReportBlocks(report_blocks);
} }
int VoERTP_RTCPImpl::SetREDStatus(int channel, bool enable, int redPayloadtype) int VoERTP_RTCPImpl::SetREDStatus(int channel,
{ bool enable,
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), int redPayloadtype) {
"SetREDStatus(channel=%d, enable=%d, redPayloadtype=%d)", WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
channel, enable, redPayloadtype); "SetREDStatus(channel=%d, enable=%d, redPayloadtype=%d)",
channel, enable, redPayloadtype);
#ifdef WEBRTC_CODEC_RED #ifdef WEBRTC_CODEC_RED
if (!_shared->statistics().Initialized()) if (!_shared->statistics().Initialized()) {
{ _shared->SetLastError(VE_NOT_INITED, kTraceError);
_shared->SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
voe::Channel* channelPtr = ch.channel();
if (channelPtr == NULL)
{
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"SetREDStatus() failed to locate channel");
return -1;
}
return channelPtr->SetREDStatus(enable, redPayloadtype);
#else
_shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
"SetREDStatus() RED is not supported");
return -1; return -1;
}
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
voe::Channel* channelPtr = ch.channel();
if (channelPtr == NULL) {
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"SetREDStatus() failed to locate channel");
return -1;
}
return channelPtr->SetREDStatus(enable, redPayloadtype);
#else
_shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
"SetREDStatus() RED is not supported");
return -1;
#endif #endif
} }
int VoERTP_RTCPImpl::GetREDStatus(int channel, int VoERTP_RTCPImpl::GetREDStatus(int channel,
bool& enabled, bool& enabled,
int& redPayloadtype) int& redPayloadtype) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), "GetREDStatus(channel=%d, enabled=?, redPayloadtype=?)",
"GetREDStatus(channel=%d, enabled=?, redPayloadtype=?)", channel);
channel);
#ifdef WEBRTC_CODEC_RED #ifdef WEBRTC_CODEC_RED
if (!_shared->statistics().Initialized()) if (!_shared->statistics().Initialized()) {
{ _shared->SetLastError(VE_NOT_INITED, kTraceError);
_shared->SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
voe::Channel* channelPtr = ch.channel();
if (channelPtr == NULL)
{
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"GetREDStatus() failed to locate channel");
return -1;
}
return channelPtr->GetREDStatus(enabled, redPayloadtype);
#else
_shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
"GetREDStatus() RED is not supported");
return -1; return -1;
}
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
voe::Channel* channelPtr = ch.channel();
if (channelPtr == NULL) {
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"GetREDStatus() failed to locate channel");
return -1;
}
return channelPtr->GetREDStatus(enabled, redPayloadtype);
#else
_shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
"GetREDStatus() RED is not supported");
return -1;
#endif #endif
} }
int VoERTP_RTCPImpl::SetNACKStatus(int channel, int VoERTP_RTCPImpl::SetNACKStatus(int channel, bool enable, int maxNoPackets) {
bool enable, WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
int maxNoPackets) "SetNACKStatus(channel=%d, enable=%d, maxNoPackets=%d)", channel,
{ enable, maxNoPackets);
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
"SetNACKStatus(channel=%d, enable=%d, maxNoPackets=%d)",
channel, enable, maxNoPackets);
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
voe::Channel* channelPtr = ch.channel(); voe::Channel* channelPtr = ch.channel();
if (channelPtr == NULL) if (channelPtr == NULL) {
{ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, "SetNACKStatus() failed to locate channel");
"SetNACKStatus() failed to locate channel"); return -1;
return -1; }
} channelPtr->SetNACKStatus(enable, maxNoPackets);
channelPtr->SetNACKStatus(enable, maxNoPackets); return 0;
return 0;
} }
int VoERTP_RTCPImpl::StartRTPDump(int channel, int VoERTP_RTCPImpl::StartRTPDump(int channel,
const char fileNameUTF8[1024], const char fileNameUTF8[1024],
RTPDirections direction) RTPDirections direction) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), "StartRTPDump(channel=%d, fileNameUTF8=%s, direction=%d)",
"StartRTPDump(channel=%d, fileNameUTF8=%s, direction=%d)", channel, fileNameUTF8, direction);
channel, fileNameUTF8, direction); assert(1024 == FileWrapper::kMaxFileNameSize);
assert(1024 == FileWrapper::kMaxFileNameSize); if (!_shared->statistics().Initialized()) {
if (!_shared->statistics().Initialized()) _shared->SetLastError(VE_NOT_INITED, kTraceError);
{ return -1;
_shared->SetLastError(VE_NOT_INITED, kTraceError); }
return -1; voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
} voe::Channel* channelPtr = ch.channel();
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); if (channelPtr == NULL) {
voe::Channel* channelPtr = ch.channel(); _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
if (channelPtr == NULL) "StartRTPDump() failed to locate channel");
{ return -1;
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, }
"StartRTPDump() failed to locate channel"); return channelPtr->StartRTPDump(fileNameUTF8, direction);
return -1;
}
return channelPtr->StartRTPDump(fileNameUTF8, direction);
} }
int VoERTP_RTCPImpl::StopRTPDump(int channel, RTPDirections direction) int VoERTP_RTCPImpl::StopRTPDump(int channel, RTPDirections direction) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), "StopRTPDump(channel=%d, direction=%d)", channel, direction);
"StopRTPDump(channel=%d, direction=%d)", channel, direction); if (!_shared->statistics().Initialized()) {
if (!_shared->statistics().Initialized()) _shared->SetLastError(VE_NOT_INITED, kTraceError);
{ return -1;
_shared->SetLastError(VE_NOT_INITED, kTraceError); }
return -1; voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
} voe::Channel* channelPtr = ch.channel();
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); if (channelPtr == NULL) {
voe::Channel* channelPtr = ch.channel(); _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
if (channelPtr == NULL) "StopRTPDump() failed to locate channel");
{ return -1;
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, }
"StopRTPDump() failed to locate channel"); return channelPtr->StopRTPDump(direction);
return -1;
}
return channelPtr->StopRTPDump(direction);
} }
int VoERTP_RTCPImpl::RTPDumpIsActive(int channel, RTPDirections direction) int VoERTP_RTCPImpl::RTPDumpIsActive(int channel, RTPDirections direction) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), "RTPDumpIsActive(channel=%d, direction=%d)", channel, direction);
"RTPDumpIsActive(channel=%d, direction=%d)", if (!_shared->statistics().Initialized()) {
channel, direction); _shared->SetLastError(VE_NOT_INITED, kTraceError);
if (!_shared->statistics().Initialized()) return -1;
{ }
_shared->SetLastError(VE_NOT_INITED, kTraceError); voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
return -1; voe::Channel* channelPtr = ch.channel();
} if (channelPtr == NULL) {
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
voe::Channel* channelPtr = ch.channel(); "StopRTPDump() failed to locate channel");
if (channelPtr == NULL) return -1;
{ }
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, return channelPtr->RTPDumpIsActive(direction);
"StopRTPDump() failed to locate channel");
return -1;
}
return channelPtr->RTPDumpIsActive(direction);
} }
int VoERTP_RTCPImpl::SetVideoEngineBWETarget(int channel, int VoERTP_RTCPImpl::SetVideoEngineBWETarget(int channel,
ViENetwork* vie_network, ViENetwork* vie_network,
int video_channel) { int video_channel) {
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), WEBRTC_TRACE(
kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
"SetVideoEngineBWETarget(channel=%d, vie_network=?, video_channel=%d)", "SetVideoEngineBWETarget(channel=%d, vie_network=?, video_channel=%d)",
channel, vie_network, video_channel); channel, vie_network, video_channel);

View File

@ -17,93 +17,90 @@
namespace webrtc { namespace webrtc {
class VoERTP_RTCPImpl : public VoERTP_RTCP class VoERTP_RTCPImpl : public VoERTP_RTCP {
{ public:
public: // RTCP
// RTCP int SetRTCPStatus(int channel, bool enable) override;
virtual int SetRTCPStatus(int channel, bool enable);
virtual int GetRTCPStatus(int channel, bool& enabled); int GetRTCPStatus(int channel, bool& enabled) override;
virtual int SetRTCP_CNAME(int channel, const char cName[256]); int SetRTCP_CNAME(int channel, const char cName[256]) override;
virtual int GetRemoteRTCP_CNAME(int channel, char cName[256]); int GetRemoteRTCP_CNAME(int channel, char cName[256]) override;
virtual int GetRemoteRTCPData(int channel, int GetRemoteRTCPData(int channel,
unsigned int& NTPHigh, unsigned int& NTPHigh,
unsigned int& NTPLow, unsigned int& NTPLow,
unsigned int& timestamp, unsigned int& timestamp,
unsigned int& playoutTimestamp, unsigned int& playoutTimestamp,
unsigned int* jitter = NULL, unsigned int* jitter = NULL,
unsigned short* fractionLost = NULL); unsigned short* fractionLost = NULL) override;
// SSRC // SSRC
virtual int SetLocalSSRC(int channel, unsigned int ssrc); int SetLocalSSRC(int channel, unsigned int ssrc) override;
virtual int GetLocalSSRC(int channel, unsigned int& ssrc); int GetLocalSSRC(int channel, unsigned int& ssrc) override;
virtual int GetRemoteSSRC(int channel, unsigned int& ssrc); int GetRemoteSSRC(int channel, unsigned int& ssrc) override;
// RTP Header Extension for Client-to-Mixer Audio Level Indication // RTP Header Extension for Client-to-Mixer Audio Level Indication
virtual int SetSendAudioLevelIndicationStatus(int channel, int SetSendAudioLevelIndicationStatus(int channel,
bool enable, bool enable,
unsigned char id); unsigned char id) override;
virtual int SetReceiveAudioLevelIndicationStatus(int channel, int SetReceiveAudioLevelIndicationStatus(int channel,
bool enable, bool enable,
unsigned char id); unsigned char id) override;
// RTP Header Extension for Absolute Sender Time // RTP Header Extension for Absolute Sender Time
virtual int SetSendAbsoluteSenderTimeStatus(int channel, int SetSendAbsoluteSenderTimeStatus(int channel,
bool enable, bool enable,
unsigned char id); unsigned char id) override;
virtual int SetReceiveAbsoluteSenderTimeStatus(int channel, int SetReceiveAbsoluteSenderTimeStatus(int channel,
bool enable, bool enable,
unsigned char id); unsigned char id) override;
// Statistics // Statistics
virtual int GetRTPStatistics(int channel, int GetRTPStatistics(int channel,
unsigned int& averageJitterMs, unsigned int& averageJitterMs,
unsigned int& maxJitterMs, unsigned int& maxJitterMs,
unsigned int& discardedPackets); unsigned int& discardedPackets) override;
virtual int GetRTCPStatistics(int channel, CallStatistics& stats); int GetRTCPStatistics(int channel, CallStatistics& stats) override;
virtual int GetRemoteRTCPReportBlocks( int GetRemoteRTCPReportBlocks(
int channel, std::vector<ReportBlock>* report_blocks); int channel,
std::vector<ReportBlock>* report_blocks) override;
// RED // RED
virtual int SetREDStatus(int channel, int SetREDStatus(int channel, bool enable, int redPayloadtype = -1) override;
bool enable,
int redPayloadtype = -1);
virtual int GetREDStatus(int channel, bool& enabled, int& redPayloadtype); int GetREDStatus(int channel, bool& enabled, int& redPayloadtype) override;
//NACK // NACK
virtual int SetNACKStatus(int channel, int SetNACKStatus(int channel, bool enable, int maxNoPackets) override;
bool enable,
int maxNoPackets);
// Store RTP and RTCP packets and dump to file (compatible with rtpplay) // Store RTP and RTCP packets and dump to file (compatible with rtpplay)
virtual int StartRTPDump(int channel, int StartRTPDump(int channel,
const char fileNameUTF8[1024], const char fileNameUTF8[1024],
RTPDirections direction = kRtpIncoming); RTPDirections direction = kRtpIncoming) override;
virtual int StopRTPDump(int channel, int StopRTPDump(int channel, RTPDirections direction = kRtpIncoming) override;
RTPDirections direction = kRtpIncoming);
virtual int RTPDumpIsActive(int channel, int RTPDumpIsActive(int channel,
RTPDirections direction = kRtpIncoming); RTPDirections direction = kRtpIncoming) override;
virtual int SetVideoEngineBWETarget(int channel, ViENetwork* vie_network, int SetVideoEngineBWETarget(int channel,
int video_channel); ViENetwork* vie_network,
protected: int video_channel) override;
VoERTP_RTCPImpl(voe::SharedData* shared);
virtual ~VoERTP_RTCPImpl();
private: protected:
voe::SharedData* _shared; VoERTP_RTCPImpl(voe::SharedData* shared);
~VoERTP_RTCPImpl() override;
private:
voe::SharedData* _shared;
}; };
} // namespace webrtc } // namespace webrtc
#endif // WEBRTC_VOICE_ENGINE_VOE_RTP_RTCP_IMPL_H #endif // WEBRTC_VOICE_ENGINE_VOE_RTP_RTCP_IMPL_H

View File

@ -18,144 +18,124 @@
namespace webrtc { namespace webrtc {
VoEVideoSync* VoEVideoSync::GetInterface(VoiceEngine* voiceEngine) VoEVideoSync* VoEVideoSync::GetInterface(VoiceEngine* voiceEngine) {
{
#ifndef WEBRTC_VOICE_ENGINE_VIDEO_SYNC_API #ifndef WEBRTC_VOICE_ENGINE_VIDEO_SYNC_API
return NULL; return NULL;
#else #else
if (NULL == voiceEngine) if (NULL == voiceEngine) {
{ return NULL;
return NULL; }
} VoiceEngineImpl* s = static_cast<VoiceEngineImpl*>(voiceEngine);
VoiceEngineImpl* s = static_cast<VoiceEngineImpl*>(voiceEngine); s->AddRef();
s->AddRef(); return s;
return s;
#endif #endif
} }
#ifdef WEBRTC_VOICE_ENGINE_VIDEO_SYNC_API #ifdef WEBRTC_VOICE_ENGINE_VIDEO_SYNC_API
VoEVideoSyncImpl::VoEVideoSyncImpl(voe::SharedData* shared) : _shared(shared) VoEVideoSyncImpl::VoEVideoSyncImpl(voe::SharedData* shared) : _shared(shared) {
{ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1), "VoEVideoSyncImpl::VoEVideoSyncImpl() - ctor");
"VoEVideoSyncImpl::VoEVideoSyncImpl() - ctor");
} }
VoEVideoSyncImpl::~VoEVideoSyncImpl() VoEVideoSyncImpl::~VoEVideoSyncImpl() {
{ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1), "VoEVideoSyncImpl::~VoEVideoSyncImpl() - dtor");
"VoEVideoSyncImpl::~VoEVideoSyncImpl() - dtor");
} }
int VoEVideoSyncImpl::GetPlayoutTimestamp(int channel, unsigned int& timestamp) int VoEVideoSyncImpl::GetPlayoutTimestamp(int channel,
{ unsigned int& timestamp) {
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
"GetPlayoutTimestamp(channel=%d, timestamp=?)", channel); "GetPlayoutTimestamp(channel=%d, timestamp=?)", channel);
if (!_shared->statistics().Initialized()) if (!_shared->statistics().Initialized()) {
{ _shared->SetLastError(VE_NOT_INITED, kTraceError);
_shared->SetLastError(VE_NOT_INITED, kTraceError); return -1;
return -1; }
} voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); voe::Channel* channel_ptr = ch.channel();
voe::Channel* channel_ptr = ch.channel(); if (channel_ptr == NULL) {
if (channel_ptr == NULL) _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
{ "GetPlayoutTimestamp() failed to locate channel");
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, return -1;
"GetPlayoutTimestamp() failed to locate channel"); }
return -1; return channel_ptr->GetPlayoutTimestamp(timestamp);
}
return channel_ptr->GetPlayoutTimestamp(timestamp);
} }
int VoEVideoSyncImpl::SetInitTimestamp(int channel, int VoEVideoSyncImpl::SetInitTimestamp(int channel, unsigned int timestamp) {
unsigned int timestamp) WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
{ "SetInitTimestamp(channel=%d, timestamp=%lu)", channel,
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), timestamp);
"SetInitTimestamp(channel=%d, timestamp=%lu)",
channel, timestamp);
if (!_shared->statistics().Initialized()) if (!_shared->statistics().Initialized()) {
{ _shared->SetLastError(VE_NOT_INITED, kTraceError);
_shared->SetLastError(VE_NOT_INITED, kTraceError); return -1;
return -1; }
} voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); voe::Channel* channelPtr = ch.channel();
voe::Channel* channelPtr = ch.channel(); if (channelPtr == NULL) {
if (channelPtr == NULL) _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
{ "SetInitTimestamp() failed to locate channel");
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, return -1;
"SetInitTimestamp() failed to locate channel"); }
return -1; return channelPtr->SetInitTimestamp(timestamp);
}
return channelPtr->SetInitTimestamp(timestamp);
} }
int VoEVideoSyncImpl::SetInitSequenceNumber(int channel, int VoEVideoSyncImpl::SetInitSequenceNumber(int channel, short sequenceNumber) {
short sequenceNumber) WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
{ "SetInitSequenceNumber(channel=%d, sequenceNumber=%hd)", channel,
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), sequenceNumber);
"SetInitSequenceNumber(channel=%d, sequenceNumber=%hd)",
channel, sequenceNumber);
if (!_shared->statistics().Initialized()) if (!_shared->statistics().Initialized()) {
{ _shared->SetLastError(VE_NOT_INITED, kTraceError);
_shared->SetLastError(VE_NOT_INITED, kTraceError); return -1;
return -1; }
} voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); voe::Channel* channelPtr = ch.channel();
voe::Channel* channelPtr = ch.channel(); if (channelPtr == NULL) {
if (channelPtr == NULL) _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
{ "SetInitSequenceNumber() failed to locate channel");
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, return -1;
"SetInitSequenceNumber() failed to locate channel"); }
return -1; return channelPtr->SetInitSequenceNumber(sequenceNumber);
}
return channelPtr->SetInitSequenceNumber(sequenceNumber);
} }
int VoEVideoSyncImpl::SetMinimumPlayoutDelay(int channel,int delayMs) int VoEVideoSyncImpl::SetMinimumPlayoutDelay(int channel, int delayMs) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), "SetMinimumPlayoutDelay(channel=%d, delayMs=%d)", channel,
"SetMinimumPlayoutDelay(channel=%d, delayMs=%d)", delayMs);
channel, delayMs);
if (!_shared->statistics().Initialized()) if (!_shared->statistics().Initialized()) {
{ _shared->SetLastError(VE_NOT_INITED, kTraceError);
_shared->SetLastError(VE_NOT_INITED, kTraceError); return -1;
return -1; }
} voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); voe::Channel* channelPtr = ch.channel();
voe::Channel* channelPtr = ch.channel(); if (channelPtr == NULL) {
if (channelPtr == NULL) _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
{ "SetMinimumPlayoutDelay() failed to locate channel");
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, return -1;
"SetMinimumPlayoutDelay() failed to locate channel"); }
return -1; return channelPtr->SetMinimumPlayoutDelay(delayMs);
}
return channelPtr->SetMinimumPlayoutDelay(delayMs);
} }
int VoEVideoSyncImpl::SetInitialPlayoutDelay(int channel, int delay_ms) int VoEVideoSyncImpl::SetInitialPlayoutDelay(int channel, int delay_ms) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), "SetInitialPlayoutDelay(channel=%d, delay_ms=%d)", channel,
"SetInitialPlayoutDelay(channel=%d, delay_ms=%d)", delay_ms);
channel, delay_ms);
if (!_shared->statistics().Initialized()) if (!_shared->statistics().Initialized()) {
{ _shared->SetLastError(VE_NOT_INITED, kTraceError);
_shared->SetLastError(VE_NOT_INITED, kTraceError); return -1;
return -1; }
} voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); voe::Channel* channelPtr = ch.channel();
voe::Channel* channelPtr = ch.channel(); if (channelPtr == NULL) {
if (channelPtr == NULL) _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
{ "SetInitialPlayoutDelay() failed to locate channel");
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, return -1;
"SetInitialPlayoutDelay() failed to locate channel"); }
return -1; return channelPtr->SetInitialPlayoutDelay(delay_ms);
}
return channelPtr->SetInitialPlayoutDelay(delay_ms);
} }
int VoEVideoSyncImpl::GetDelayEstimate(int channel, int VoEVideoSyncImpl::GetDelayEstimate(int channel,
@ -182,52 +162,45 @@ int VoEVideoSyncImpl::GetDelayEstimate(int channel,
return 0; return 0;
} }
int VoEVideoSyncImpl::GetPlayoutBufferSize(int& bufferMs) int VoEVideoSyncImpl::GetPlayoutBufferSize(int& bufferMs) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
"GetPlayoutBufferSize(bufferMs=?)"); "GetPlayoutBufferSize(bufferMs=?)");
if (!_shared->statistics().Initialized()) if (!_shared->statistics().Initialized()) {
{ _shared->SetLastError(VE_NOT_INITED, kTraceError);
_shared->SetLastError(VE_NOT_INITED, kTraceError); return -1;
return -1; }
} AudioDeviceModule::BufferType type(AudioDeviceModule::kFixedBufferSize);
AudioDeviceModule::BufferType type uint16_t sizeMS(0);
(AudioDeviceModule::kFixedBufferSize); if (_shared->audio_device()->PlayoutBuffer(&type, &sizeMS) != 0) {
uint16_t sizeMS(0); _shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceError,
if (_shared->audio_device()->PlayoutBuffer(&type, &sizeMS) != 0) "GetPlayoutBufferSize() failed to read buffer size");
{ return -1;
_shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceError, }
"GetPlayoutBufferSize() failed to read buffer size"); bufferMs = sizeMS;
return -1; WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
} "GetPlayoutBufferSize() => bufferMs=%d", bufferMs);
bufferMs = sizeMS; return 0;
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
VoEId(_shared->instance_id(), -1),
"GetPlayoutBufferSize() => bufferMs=%d", bufferMs);
return 0;
} }
int VoEVideoSyncImpl::GetRtpRtcp(int channel, RtpRtcp** rtpRtcpModule, int VoEVideoSyncImpl::GetRtpRtcp(int channel,
RtpReceiver** rtp_receiver) RtpRtcp** rtpRtcpModule,
{ RtpReceiver** rtp_receiver) {
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
"GetRtpRtcp(channel=%i)", channel); "GetRtpRtcp(channel=%i)", channel);
if (!_shared->statistics().Initialized()) if (!_shared->statistics().Initialized()) {
{ _shared->SetLastError(VE_NOT_INITED, kTraceError);
_shared->SetLastError(VE_NOT_INITED, kTraceError); return -1;
return -1; }
} voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); voe::Channel* channelPtr = ch.channel();
voe::Channel* channelPtr = ch.channel(); if (channelPtr == NULL) {
if (channelPtr == NULL) _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
{ "GetPlayoutTimestamp() failed to locate channel");
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, return -1;
"GetPlayoutTimestamp() failed to locate channel"); }
return -1; return channelPtr->GetRtpRtcp(rtpRtcpModule, rtp_receiver);
}
return channelPtr->GetRtpRtcp(rtpRtcpModule, rtp_receiver);
} }
int VoEVideoSyncImpl::GetLeastRequiredDelayMs(int channel) const { int VoEVideoSyncImpl::GetLeastRequiredDelayMs(int channel) const {

View File

@ -17,38 +17,38 @@
namespace webrtc { namespace webrtc {
class VoEVideoSyncImpl : public VoEVideoSync class VoEVideoSyncImpl : public VoEVideoSync {
{ public:
public: int GetPlayoutBufferSize(int& bufferMs) override;
virtual int GetPlayoutBufferSize(int& bufferMs);
virtual int SetMinimumPlayoutDelay(int channel, int delayMs); int SetMinimumPlayoutDelay(int channel, int delayMs) override;
virtual int SetInitialPlayoutDelay(int channel, int delay_ms); int SetInitialPlayoutDelay(int channel, int delay_ms) override;
virtual int GetDelayEstimate(int channel, int GetDelayEstimate(int channel,
int* jitter_buffer_delay_ms, int* jitter_buffer_delay_ms,
int* playout_buffer_delay_ms); int* playout_buffer_delay_ms) override;
virtual int GetLeastRequiredDelayMs(int channel) const; int GetLeastRequiredDelayMs(int channel) const override;
virtual int SetInitTimestamp(int channel, unsigned int timestamp); int SetInitTimestamp(int channel, unsigned int timestamp) override;
virtual int SetInitSequenceNumber(int channel, short sequenceNumber); int SetInitSequenceNumber(int channel, short sequenceNumber) override;
virtual int GetPlayoutTimestamp(int channel, unsigned int& timestamp); int GetPlayoutTimestamp(int channel, unsigned int& timestamp) override;
virtual int GetRtpRtcp(int channel, RtpRtcp** rtpRtcpModule, int GetRtpRtcp(int channel,
RtpReceiver** rtp_receiver); RtpRtcp** rtpRtcpModule,
RtpReceiver** rtp_receiver) override;
protected: protected:
VoEVideoSyncImpl(voe::SharedData* shared); VoEVideoSyncImpl(voe::SharedData* shared);
virtual ~VoEVideoSyncImpl(); ~VoEVideoSyncImpl() override;
private: private:
voe::SharedData* _shared; voe::SharedData* _shared;
}; };
} // namespace webrtc } // namespace webrtc
#endif // WEBRTC_VOICE_ENGINE_VOE_VIDEO_SYNC_IMPL_H #endif // WEBRTC_VOICE_ENGINE_VOE_VIDEO_SYNC_IMPL_H

View File

@ -20,507 +20,435 @@
namespace webrtc { namespace webrtc {
VoEVolumeControl* VoEVolumeControl::GetInterface(VoiceEngine* voiceEngine) VoEVolumeControl* VoEVolumeControl::GetInterface(VoiceEngine* voiceEngine) {
{
#ifndef WEBRTC_VOICE_ENGINE_VOLUME_CONTROL_API #ifndef WEBRTC_VOICE_ENGINE_VOLUME_CONTROL_API
return NULL; return NULL;
#else #else
if (NULL == voiceEngine) if (NULL == voiceEngine) {
{ return NULL;
return NULL; }
} VoiceEngineImpl* s = static_cast<VoiceEngineImpl*>(voiceEngine);
VoiceEngineImpl* s = static_cast<VoiceEngineImpl*>(voiceEngine); s->AddRef();
s->AddRef(); return s;
return s;
#endif #endif
} }
#ifdef WEBRTC_VOICE_ENGINE_VOLUME_CONTROL_API #ifdef WEBRTC_VOICE_ENGINE_VOLUME_CONTROL_API
VoEVolumeControlImpl::VoEVolumeControlImpl(voe::SharedData* shared) VoEVolumeControlImpl::VoEVolumeControlImpl(voe::SharedData* shared)
: _shared(shared) : _shared(shared) {
{ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
"VoEVolumeControlImpl::VoEVolumeControlImpl() - ctor"); "VoEVolumeControlImpl::VoEVolumeControlImpl() - ctor");
} }
VoEVolumeControlImpl::~VoEVolumeControlImpl() VoEVolumeControlImpl::~VoEVolumeControlImpl() {
{ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
"VoEVolumeControlImpl::~VoEVolumeControlImpl() - dtor"); "VoEVolumeControlImpl::~VoEVolumeControlImpl() - dtor");
} }
int VoEVolumeControlImpl::SetSpeakerVolume(unsigned int volume) int VoEVolumeControlImpl::SetSpeakerVolume(unsigned int volume) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
"SetSpeakerVolume(volume=%u)", volume); "SetSpeakerVolume(volume=%u)", volume);
if (!_shared->statistics().Initialized()) if (!_shared->statistics().Initialized()) {
{ _shared->SetLastError(VE_NOT_INITED, kTraceError);
_shared->SetLastError(VE_NOT_INITED, kTraceError); return -1;
return -1; }
} if (volume > kMaxVolumeLevel) {
if (volume > kMaxVolumeLevel) _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
{ "SetSpeakerVolume() invalid argument");
_shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError, return -1;
"SetSpeakerVolume() invalid argument"); }
return -1;
}
uint32_t maxVol(0); uint32_t maxVol(0);
uint32_t spkrVol(0); uint32_t spkrVol(0);
// scale: [0,kMaxVolumeLevel] -> [0,MaxSpeakerVolume] // scale: [0,kMaxVolumeLevel] -> [0,MaxSpeakerVolume]
if (_shared->audio_device()->MaxSpeakerVolume(&maxVol) != 0) if (_shared->audio_device()->MaxSpeakerVolume(&maxVol) != 0) {
{ _shared->SetLastError(VE_MIC_VOL_ERROR, kTraceError,
_shared->SetLastError(VE_MIC_VOL_ERROR, kTraceError, "SetSpeakerVolume() failed to get max volume");
"SetSpeakerVolume() failed to get max volume"); return -1;
return -1; }
} // Round the value and avoid floating computation.
// Round the value and avoid floating computation. spkrVol = (uint32_t)((volume * maxVol + (int)(kMaxVolumeLevel / 2)) /
spkrVol = (uint32_t)((volume * maxVol + (kMaxVolumeLevel));
(int)(kMaxVolumeLevel / 2)) / (kMaxVolumeLevel));
// set the actual volume using the audio mixer // set the actual volume using the audio mixer
if (_shared->audio_device()->SetSpeakerVolume(spkrVol) != 0) if (_shared->audio_device()->SetSpeakerVolume(spkrVol) != 0) {
{ _shared->SetLastError(VE_MIC_VOL_ERROR, kTraceError,
_shared->SetLastError(VE_MIC_VOL_ERROR, kTraceError, "SetSpeakerVolume() failed to set speaker volume");
"SetSpeakerVolume() failed to set speaker volume"); return -1;
return -1; }
} return 0;
return 0;
} }
int VoEVolumeControlImpl::GetSpeakerVolume(unsigned int& volume) int VoEVolumeControlImpl::GetSpeakerVolume(unsigned int& volume) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
"GetSpeakerVolume()"); "GetSpeakerVolume()");
if (!_shared->statistics().Initialized()) if (!_shared->statistics().Initialized()) {
{ _shared->SetLastError(VE_NOT_INITED, kTraceError);
_shared->SetLastError(VE_NOT_INITED, kTraceError); return -1;
return -1; }
}
uint32_t spkrVol(0); uint32_t spkrVol(0);
uint32_t maxVol(0); uint32_t maxVol(0);
if (_shared->audio_device()->SpeakerVolume(&spkrVol) != 0) if (_shared->audio_device()->SpeakerVolume(&spkrVol) != 0) {
{ _shared->SetLastError(VE_GET_MIC_VOL_ERROR, kTraceError,
_shared->SetLastError(VE_GET_MIC_VOL_ERROR, kTraceError, "GetSpeakerVolume() unable to get speaker volume");
"GetSpeakerVolume() unable to get speaker volume"); return -1;
return -1; }
}
// scale: [0, MaxSpeakerVolume] -> [0, kMaxVolumeLevel] // scale: [0, MaxSpeakerVolume] -> [0, kMaxVolumeLevel]
if (_shared->audio_device()->MaxSpeakerVolume(&maxVol) != 0) if (_shared->audio_device()->MaxSpeakerVolume(&maxVol) != 0) {
{ _shared->SetLastError(
_shared->SetLastError(VE_GET_MIC_VOL_ERROR, kTraceError, VE_GET_MIC_VOL_ERROR, kTraceError,
"GetSpeakerVolume() unable to get max speaker volume"); "GetSpeakerVolume() unable to get max speaker volume");
return -1; return -1;
} }
// Round the value and avoid floating computation. // Round the value and avoid floating computation.
volume = (uint32_t) ((spkrVol * kMaxVolumeLevel + volume =
(int)(maxVol / 2)) / (maxVol)); (uint32_t)((spkrVol * kMaxVolumeLevel + (int)(maxVol / 2)) / (maxVol));
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
VoEId(_shared->instance_id(), -1), "GetSpeakerVolume() => volume=%d", volume);
"GetSpeakerVolume() => volume=%d", volume); return 0;
return 0;
} }
int VoEVolumeControlImpl::SetMicVolume(unsigned int volume) int VoEVolumeControlImpl::SetMicVolume(unsigned int volume) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
"SetMicVolume(volume=%u)", volume); "SetMicVolume(volume=%u)", volume);
if (!_shared->statistics().Initialized()) if (!_shared->statistics().Initialized()) {
{ _shared->SetLastError(VE_NOT_INITED, kTraceError);
_shared->SetLastError(VE_NOT_INITED, kTraceError); return -1;
return -1; }
} if (volume > kMaxVolumeLevel) {
if (volume > kMaxVolumeLevel) _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
{ "SetMicVolume() invalid argument");
_shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError, return -1;
"SetMicVolume() invalid argument"); }
return -1;
}
uint32_t maxVol(0); uint32_t maxVol(0);
uint32_t micVol(0); uint32_t micVol(0);
// scale: [0, kMaxVolumeLevel] -> [0,MaxMicrophoneVolume] // scale: [0, kMaxVolumeLevel] -> [0,MaxMicrophoneVolume]
if (_shared->audio_device()->MaxMicrophoneVolume(&maxVol) != 0) if (_shared->audio_device()->MaxMicrophoneVolume(&maxVol) != 0) {
{ _shared->SetLastError(VE_MIC_VOL_ERROR, kTraceError,
_shared->SetLastError(VE_MIC_VOL_ERROR, kTraceError, "SetMicVolume() failed to get max volume");
"SetMicVolume() failed to get max volume"); return -1;
return -1; }
if (volume == kMaxVolumeLevel) {
// On Linux running pulse, users are able to set the volume above 100%
// through the volume control panel, where the +100% range is digital
// scaling. WebRTC does not support setting the volume above 100%, and
// simply ignores changing the volume if the user tries to set it to
// |kMaxVolumeLevel| while the current volume is higher than |maxVol|.
if (_shared->audio_device()->MicrophoneVolume(&micVol) != 0) {
_shared->SetLastError(VE_GET_MIC_VOL_ERROR, kTraceError,
"SetMicVolume() unable to get microphone volume");
return -1;
} }
if (micVol >= maxVol)
return 0;
}
if (volume == kMaxVolumeLevel) { // Round the value and avoid floating point computation.
// On Linux running pulse, users are able to set the volume above 100% micVol = (uint32_t)((volume * maxVol + (int)(kMaxVolumeLevel / 2)) /
// through the volume control panel, where the +100% range is digital (kMaxVolumeLevel));
// scaling. WebRTC does not support setting the volume above 100%, and
// simply ignores changing the volume if the user tries to set it to
// |kMaxVolumeLevel| while the current volume is higher than |maxVol|.
if (_shared->audio_device()->MicrophoneVolume(&micVol) != 0) {
_shared->SetLastError(VE_GET_MIC_VOL_ERROR, kTraceError,
"SetMicVolume() unable to get microphone volume");
return -1;
}
if (micVol >= maxVol)
return 0;
}
// Round the value and avoid floating point computation. // set the actual volume using the audio mixer
micVol = (uint32_t) ((volume * maxVol + if (_shared->audio_device()->SetMicrophoneVolume(micVol) != 0) {
(int)(kMaxVolumeLevel / 2)) / (kMaxVolumeLevel)); _shared->SetLastError(VE_MIC_VOL_ERROR, kTraceError,
"SetMicVolume() failed to set mic volume");
// set the actual volume using the audio mixer return -1;
if (_shared->audio_device()->SetMicrophoneVolume(micVol) != 0) }
{ return 0;
_shared->SetLastError(VE_MIC_VOL_ERROR, kTraceError,
"SetMicVolume() failed to set mic volume");
return -1;
}
return 0;
} }
int VoEVolumeControlImpl::GetMicVolume(unsigned int& volume) int VoEVolumeControlImpl::GetMicVolume(unsigned int& volume) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
"GetMicVolume()"); "GetMicVolume()");
if (!_shared->statistics().Initialized()) if (!_shared->statistics().Initialized()) {
{ _shared->SetLastError(VE_NOT_INITED, kTraceError);
_shared->SetLastError(VE_NOT_INITED, kTraceError); return -1;
return -1; }
}
uint32_t micVol(0); uint32_t micVol(0);
uint32_t maxVol(0); uint32_t maxVol(0);
if (_shared->audio_device()->MicrophoneVolume(&micVol) != 0) if (_shared->audio_device()->MicrophoneVolume(&micVol) != 0) {
{ _shared->SetLastError(VE_GET_MIC_VOL_ERROR, kTraceError,
_shared->SetLastError(VE_GET_MIC_VOL_ERROR, kTraceError, "GetMicVolume() unable to get microphone volume");
"GetMicVolume() unable to get microphone volume"); return -1;
return -1; }
}
// scale: [0, MaxMicrophoneVolume] -> [0, kMaxVolumeLevel] // scale: [0, MaxMicrophoneVolume] -> [0, kMaxVolumeLevel]
if (_shared->audio_device()->MaxMicrophoneVolume(&maxVol) != 0) if (_shared->audio_device()->MaxMicrophoneVolume(&maxVol) != 0) {
{ _shared->SetLastError(VE_GET_MIC_VOL_ERROR, kTraceError,
_shared->SetLastError(VE_GET_MIC_VOL_ERROR, kTraceError, "GetMicVolume() unable to get max microphone volume");
"GetMicVolume() unable to get max microphone volume"); return -1;
return -1; }
} if (micVol < maxVol) {
if (micVol < maxVol) { // Round the value and avoid floating point calculation.
// Round the value and avoid floating point calculation. volume =
volume = (uint32_t) ((micVol * kMaxVolumeLevel + (uint32_t)((micVol * kMaxVolumeLevel + (int)(maxVol / 2)) / (maxVol));
(int)(maxVol / 2)) / (maxVol)); } else {
} else { // Truncate the value to the kMaxVolumeLevel.
// Truncate the value to the kMaxVolumeLevel. volume = kMaxVolumeLevel;
volume = kMaxVolumeLevel; }
}
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
VoEId(_shared->instance_id(), -1), "GetMicVolume() => volume=%d", volume);
"GetMicVolume() => volume=%d", volume); return 0;
return 0;
} }
int VoEVolumeControlImpl::SetInputMute(int channel, bool enable) int VoEVolumeControlImpl::SetInputMute(int channel, bool enable) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
"SetInputMute(channel=%d, enable=%d)", channel, enable); "SetInputMute(channel=%d, enable=%d)", channel, enable);
if (!_shared->statistics().Initialized()) if (!_shared->statistics().Initialized()) {
{ _shared->SetLastError(VE_NOT_INITED, kTraceError);
_shared->SetLastError(VE_NOT_INITED, kTraceError); return -1;
return -1; }
} if (channel == -1) {
if (channel == -1) // Mute before demultiplexing <=> affects all channels
{ return _shared->transmit_mixer()->SetMute(enable);
// Mute before demultiplexing <=> affects all channels }
return _shared->transmit_mixer()->SetMute(enable); // Mute after demultiplexing <=> affects one channel only
} voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
// Mute after demultiplexing <=> affects one channel only voe::Channel* channelPtr = ch.channel();
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); if (channelPtr == NULL) {
voe::Channel* channelPtr = ch.channel(); _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
if (channelPtr == NULL) "SetInputMute() failed to locate channel");
{ return -1;
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, }
"SetInputMute() failed to locate channel"); return channelPtr->SetMute(enable);
return -1;
}
return channelPtr->SetMute(enable);
} }
int VoEVolumeControlImpl::GetInputMute(int channel, bool& enabled) int VoEVolumeControlImpl::GetInputMute(int channel, bool& enabled) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
"GetInputMute(channel=%d)", channel); "GetInputMute(channel=%d)", channel);
if (!_shared->statistics().Initialized()) if (!_shared->statistics().Initialized()) {
{ _shared->SetLastError(VE_NOT_INITED, kTraceError);
_shared->SetLastError(VE_NOT_INITED, kTraceError); return -1;
return -1; }
if (channel == -1) {
enabled = _shared->transmit_mixer()->Mute();
} else {
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
voe::Channel* channelPtr = ch.channel();
if (channelPtr == NULL) {
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"SetInputMute() failed to locate channel");
return -1;
} }
if (channel == -1) enabled = channelPtr->Mute();
{ }
enabled = _shared->transmit_mixer()->Mute(); WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
} "GetInputMute() => enabled = %d", (int)enabled);
else return 0;
{
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
voe::Channel* channelPtr = ch.channel();
if (channelPtr == NULL)
{
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"SetInputMute() failed to locate channel");
return -1;
}
enabled = channelPtr->Mute();
}
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
VoEId(_shared->instance_id(), -1),
"GetInputMute() => enabled = %d", (int)enabled);
return 0;
} }
int VoEVolumeControlImpl::GetSpeechInputLevel(unsigned int& level) int VoEVolumeControlImpl::GetSpeechInputLevel(unsigned int& level) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
"GetSpeechInputLevel()"); "GetSpeechInputLevel()");
if (!_shared->statistics().Initialized()) if (!_shared->statistics().Initialized()) {
{ _shared->SetLastError(VE_NOT_INITED, kTraceError);
_shared->SetLastError(VE_NOT_INITED, kTraceError); return -1;
return -1; }
} int8_t currentLevel = _shared->transmit_mixer()->AudioLevel();
int8_t currentLevel = _shared->transmit_mixer()->AudioLevel(); level = static_cast<unsigned int>(currentLevel);
level = static_cast<unsigned int> (currentLevel); WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, "GetSpeechInputLevel() => %d", level);
VoEId(_shared->instance_id(), -1), return 0;
"GetSpeechInputLevel() => %d", level);
return 0;
} }
int VoEVolumeControlImpl::GetSpeechOutputLevel(int channel, int VoEVolumeControlImpl::GetSpeechOutputLevel(int channel,
unsigned int& level) unsigned int& level) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
"GetSpeechOutputLevel(channel=%d, level=?)", channel); "GetSpeechOutputLevel(channel=%d, level=?)", channel);
if (!_shared->statistics().Initialized()) if (!_shared->statistics().Initialized()) {
{ _shared->SetLastError(VE_NOT_INITED, kTraceError);
_shared->SetLastError(VE_NOT_INITED, kTraceError); return -1;
return -1; }
if (channel == -1) {
return _shared->output_mixer()->GetSpeechOutputLevel((uint32_t&)level);
} else {
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
voe::Channel* channelPtr = ch.channel();
if (channelPtr == NULL) {
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"GetSpeechOutputLevel() failed to locate channel");
return -1;
} }
if (channel == -1) channelPtr->GetSpeechOutputLevel((uint32_t&)level);
{ }
return _shared->output_mixer()->GetSpeechOutputLevel( return 0;
(uint32_t&)level);
}
else
{
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
voe::Channel* channelPtr = ch.channel();
if (channelPtr == NULL)
{
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"GetSpeechOutputLevel() failed to locate channel");
return -1;
}
channelPtr->GetSpeechOutputLevel((uint32_t&)level);
}
return 0;
} }
int VoEVolumeControlImpl::GetSpeechInputLevelFullRange(unsigned int& level) int VoEVolumeControlImpl::GetSpeechInputLevelFullRange(unsigned int& level) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
"GetSpeechInputLevelFullRange(level=?)"); "GetSpeechInputLevelFullRange(level=?)");
if (!_shared->statistics().Initialized()) if (!_shared->statistics().Initialized()) {
{ _shared->SetLastError(VE_NOT_INITED, kTraceError);
_shared->SetLastError(VE_NOT_INITED, kTraceError); return -1;
return -1; }
} int16_t currentLevel = _shared->transmit_mixer()->AudioLevelFullRange();
int16_t currentLevel = _shared->transmit_mixer()-> level = static_cast<unsigned int>(currentLevel);
AudioLevelFullRange(); WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
level = static_cast<unsigned int> (currentLevel); "GetSpeechInputLevelFullRange() => %d", level);
WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, return 0;
VoEId(_shared->instance_id(), -1),
"GetSpeechInputLevelFullRange() => %d", level);
return 0;
} }
int VoEVolumeControlImpl::GetSpeechOutputLevelFullRange(int channel, int VoEVolumeControlImpl::GetSpeechOutputLevelFullRange(int channel,
unsigned int& level) unsigned int& level) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
"GetSpeechOutputLevelFullRange(channel=%d, level=?)", channel); "GetSpeechOutputLevelFullRange(channel=%d, level=?)", channel);
if (!_shared->statistics().Initialized()) if (!_shared->statistics().Initialized()) {
{ _shared->SetLastError(VE_NOT_INITED, kTraceError);
_shared->SetLastError(VE_NOT_INITED, kTraceError); return -1;
return -1; }
if (channel == -1) {
return _shared->output_mixer()->GetSpeechOutputLevelFullRange(
(uint32_t&)level);
} else {
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
voe::Channel* channelPtr = ch.channel();
if (channelPtr == NULL) {
_shared->SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"GetSpeechOutputLevelFullRange() failed to locate channel");
return -1;
} }
if (channel == -1) channelPtr->GetSpeechOutputLevelFullRange((uint32_t&)level);
{ }
return _shared->output_mixer()->GetSpeechOutputLevelFullRange( return 0;
(uint32_t&)level);
}
else
{
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
voe::Channel* channelPtr = ch.channel();
if (channelPtr == NULL)
{
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
"GetSpeechOutputLevelFullRange() failed to locate channel");
return -1;
}
channelPtr->GetSpeechOutputLevelFullRange((uint32_t&)level);
}
return 0;
} }
int VoEVolumeControlImpl::SetChannelOutputVolumeScaling(int channel, int VoEVolumeControlImpl::SetChannelOutputVolumeScaling(int channel,
float scaling) float scaling) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
"SetChannelOutputVolumeScaling(channel=%d, scaling=%3.2f)", "SetChannelOutputVolumeScaling(channel=%d, scaling=%3.2f)",
channel, scaling); channel, scaling);
if (!_shared->statistics().Initialized()) if (!_shared->statistics().Initialized()) {
{ _shared->SetLastError(VE_NOT_INITED, kTraceError);
_shared->SetLastError(VE_NOT_INITED, kTraceError); return -1;
return -1; }
} if (scaling < kMinOutputVolumeScaling || scaling > kMaxOutputVolumeScaling) {
if (scaling < kMinOutputVolumeScaling || _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
scaling > kMaxOutputVolumeScaling) "SetChannelOutputVolumeScaling() invalid parameter");
{ return -1;
_shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError, }
"SetChannelOutputVolumeScaling() invalid parameter"); voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
return -1; voe::Channel* channelPtr = ch.channel();
} if (channelPtr == NULL) {
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); _shared->SetLastError(
voe::Channel* channelPtr = ch.channel(); VE_CHANNEL_NOT_VALID, kTraceError,
if (channelPtr == NULL) "SetChannelOutputVolumeScaling() failed to locate channel");
{ return -1;
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, }
"SetChannelOutputVolumeScaling() failed to locate channel"); return channelPtr->SetChannelOutputVolumeScaling(scaling);
return -1;
}
return channelPtr->SetChannelOutputVolumeScaling(scaling);
} }
int VoEVolumeControlImpl::GetChannelOutputVolumeScaling(int channel, int VoEVolumeControlImpl::GetChannelOutputVolumeScaling(int channel,
float& scaling) float& scaling) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
"GetChannelOutputVolumeScaling(channel=%d, scaling=?)", channel); "GetChannelOutputVolumeScaling(channel=%d, scaling=?)", channel);
if (!_shared->statistics().Initialized()) if (!_shared->statistics().Initialized()) {
{ _shared->SetLastError(VE_NOT_INITED, kTraceError);
_shared->SetLastError(VE_NOT_INITED, kTraceError); return -1;
return -1; }
} voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); voe::Channel* channelPtr = ch.channel();
voe::Channel* channelPtr = ch.channel(); if (channelPtr == NULL) {
if (channelPtr == NULL) _shared->SetLastError(
{ VE_CHANNEL_NOT_VALID, kTraceError,
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, "GetChannelOutputVolumeScaling() failed to locate channel");
"GetChannelOutputVolumeScaling() failed to locate channel"); return -1;
return -1; }
} return channelPtr->GetChannelOutputVolumeScaling(scaling);
return channelPtr->GetChannelOutputVolumeScaling(scaling);
} }
int VoEVolumeControlImpl::SetOutputVolumePan(int channel, int VoEVolumeControlImpl::SetOutputVolumePan(int channel,
float left, float left,
float right) float right) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
"SetOutputVolumePan(channel=%d, left=%2.1f, right=%2.1f)", "SetOutputVolumePan(channel=%d, left=%2.1f, right=%2.1f)",
channel, left, right); channel, left, right);
if (!_shared->statistics().Initialized()) if (!_shared->statistics().Initialized()) {
{ _shared->SetLastError(VE_NOT_INITED, kTraceError);
_shared->SetLastError(VE_NOT_INITED, kTraceError); return -1;
return -1; }
}
bool available(false); bool available(false);
_shared->audio_device()->StereoPlayoutIsAvailable(&available); _shared->audio_device()->StereoPlayoutIsAvailable(&available);
if (!available) if (!available) {
{ _shared->SetLastError(VE_FUNC_NO_STEREO, kTraceError,
_shared->SetLastError(VE_FUNC_NO_STEREO, kTraceError, "SetOutputVolumePan() stereo playout not supported");
"SetOutputVolumePan() stereo playout not supported"); return -1;
return -1; }
} if ((left < kMinOutputVolumePanning) || (left > kMaxOutputVolumePanning) ||
if ((left < kMinOutputVolumePanning) || (right < kMinOutputVolumePanning) || (right > kMaxOutputVolumePanning)) {
(left > kMaxOutputVolumePanning) || _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
(right < kMinOutputVolumePanning) || "SetOutputVolumePan() invalid parameter");
(right > kMaxOutputVolumePanning)) return -1;
{ }
_shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
"SetOutputVolumePan() invalid parameter");
return -1;
}
if (channel == -1) if (channel == -1) {
{ // Master balance (affectes the signal after output mixing)
// Master balance (affectes the signal after output mixing) return _shared->output_mixer()->SetOutputVolumePan(left, right);
return _shared->output_mixer()->SetOutputVolumePan(left, right); }
} // Per-channel balance (affects the signal before output mixing)
// Per-channel balance (affects the signal before output mixing) voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); voe::Channel* channelPtr = ch.channel();
voe::Channel* channelPtr = ch.channel(); if (channelPtr == NULL) {
if (channelPtr == NULL) _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
{ "SetOutputVolumePan() failed to locate channel");
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, return -1;
"SetOutputVolumePan() failed to locate channel"); }
return -1; return channelPtr->SetOutputVolumePan(left, right);
}
return channelPtr->SetOutputVolumePan(left, right);
} }
int VoEVolumeControlImpl::GetOutputVolumePan(int channel, int VoEVolumeControlImpl::GetOutputVolumePan(int channel,
float& left, float& left,
float& right) float& right) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
"GetOutputVolumePan(channel=%d, left=?, right=?)", channel); "GetOutputVolumePan(channel=%d, left=?, right=?)", channel);
if (!_shared->statistics().Initialized()) if (!_shared->statistics().Initialized()) {
{ _shared->SetLastError(VE_NOT_INITED, kTraceError);
_shared->SetLastError(VE_NOT_INITED, kTraceError); return -1;
return -1; }
}
bool available(false); bool available(false);
_shared->audio_device()->StereoPlayoutIsAvailable(&available); _shared->audio_device()->StereoPlayoutIsAvailable(&available);
if (!available) if (!available) {
{ _shared->SetLastError(VE_FUNC_NO_STEREO, kTraceError,
_shared->SetLastError(VE_FUNC_NO_STEREO, kTraceError, "GetOutputVolumePan() stereo playout not supported");
"GetOutputVolumePan() stereo playout not supported"); return -1;
return -1; }
}
if (channel == -1) if (channel == -1) {
{ return _shared->output_mixer()->GetOutputVolumePan(left, right);
return _shared->output_mixer()->GetOutputVolumePan(left, right); }
} voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); voe::Channel* channelPtr = ch.channel();
voe::Channel* channelPtr = ch.channel(); if (channelPtr == NULL) {
if (channelPtr == NULL) _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
{ "GetOutputVolumePan() failed to locate channel");
_shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, return -1;
"GetOutputVolumePan() failed to locate channel"); }
return -1; return channelPtr->GetOutputVolumePan(left, right);
}
return channelPtr->GetOutputVolumePan(left, right);
} }
#endif // #ifdef WEBRTC_VOICE_ENGINE_VOLUME_CONTROL_API #endif // #ifdef WEBRTC_VOICE_ENGINE_VOLUME_CONTROL_API

View File

@ -17,47 +17,44 @@
namespace webrtc { namespace webrtc {
class VoEVolumeControlImpl : public VoEVolumeControl class VoEVolumeControlImpl : public VoEVolumeControl {
{ public:
public: int SetSpeakerVolume(unsigned int volume) override;
virtual int SetSpeakerVolume(unsigned int volume);
virtual int GetSpeakerVolume(unsigned int& volume); int GetSpeakerVolume(unsigned int& volume) override;
virtual int SetMicVolume(unsigned int volume); int SetMicVolume(unsigned int volume) override;
virtual int GetMicVolume(unsigned int& volume); int GetMicVolume(unsigned int& volume) override;
virtual int SetInputMute(int channel, bool enable); int SetInputMute(int channel, bool enable) override;
virtual int GetInputMute(int channel, bool& enabled); int GetInputMute(int channel, bool& enabled) override;
virtual int GetSpeechInputLevel(unsigned int& level); int GetSpeechInputLevel(unsigned int& level) override;
virtual int GetSpeechOutputLevel(int channel, unsigned int& level); int GetSpeechOutputLevel(int channel, unsigned int& level) override;
virtual int GetSpeechInputLevelFullRange(unsigned int& level); int GetSpeechInputLevelFullRange(unsigned int& level) override;
virtual int GetSpeechOutputLevelFullRange(int channel, int GetSpeechOutputLevelFullRange(int channel, unsigned int& level) override;
unsigned int& level);
virtual int SetChannelOutputVolumeScaling(int channel, float scaling); int SetChannelOutputVolumeScaling(int channel, float scaling) override;
virtual int GetChannelOutputVolumeScaling(int channel, float& scaling); int GetChannelOutputVolumeScaling(int channel, float& scaling) override;
virtual int SetOutputVolumePan(int channel, float left, float right); int SetOutputVolumePan(int channel, float left, float right) override;
virtual int GetOutputVolumePan(int channel, float& left, float& right); int GetOutputVolumePan(int channel, float& left, float& right) override;
protected:
VoEVolumeControlImpl(voe::SharedData* shared);
~VoEVolumeControlImpl() override;
protected: private:
VoEVolumeControlImpl(voe::SharedData* shared); voe::SharedData* _shared;
virtual ~VoEVolumeControlImpl();
private:
voe::SharedData* _shared;
}; };
} // namespace webrtc } // namespace webrtc
#endif // WEBRTC_VOICE_ENGINE_VOE_VOLUME_CONTROL_IMPL_H #endif // WEBRTC_VOICE_ENGINE_VOE_VOLUME_CONTROL_IMPL_H

View File

@ -44,17 +44,17 @@ const float kMinOutputVolumePanning = 0.0f;
const float kMaxOutputVolumePanning = 1.0f; const float kMaxOutputVolumePanning = 1.0f;
// DTMF // DTMF
enum { kMinDtmfEventCode = 0 }; // DTMF digit "0" enum { kMinDtmfEventCode = 0 }; // DTMF digit "0"
enum { kMaxDtmfEventCode = 15 }; // DTMF digit "D" enum { kMaxDtmfEventCode = 15 }; // DTMF digit "D"
enum { kMinTelephoneEventCode = 0 }; // RFC4733 (Section 2.3.1) enum { kMinTelephoneEventCode = 0 }; // RFC4733 (Section 2.3.1)
enum { kMaxTelephoneEventCode = 255 }; // RFC4733 (Section 2.3.1) enum { kMaxTelephoneEventCode = 255 }; // RFC4733 (Section 2.3.1)
enum { kMinTelephoneEventDuration = 100 }; enum { kMinTelephoneEventDuration = 100 };
enum { kMaxTelephoneEventDuration = 60000 }; // Actual limit is 2^16 enum { kMaxTelephoneEventDuration = 60000 }; // Actual limit is 2^16
enum { kMinTelephoneEventAttenuation = 0 }; // 0 dBm0 enum { kMinTelephoneEventAttenuation = 0 }; // 0 dBm0
enum { kMaxTelephoneEventAttenuation = 36 }; // -36 dBm0 enum { kMaxTelephoneEventAttenuation = 36 }; // -36 dBm0
enum { kMinTelephoneEventSeparationMs = 100 }; // Min delta time between two enum { kMinTelephoneEventSeparationMs = 100 }; // Min delta time between two
// telephone events // telephone events
enum { kVoiceEngineMaxIpPacketSizeBytes = 1500 }; // assumes Ethernet enum { kVoiceEngineMaxIpPacketSizeBytes = 1500 }; // assumes Ethernet
enum { kVoiceEngineMaxModuleVersionSize = 960 }; enum { kVoiceEngineMaxModuleVersionSize = 960 };
@ -65,15 +65,15 @@ enum { kVoiceEngineVersionMaxMessageSize = 1024 };
const NoiseSuppression::Level kDefaultNsMode = NoiseSuppression::kModerate; const NoiseSuppression::Level kDefaultNsMode = NoiseSuppression::kModerate;
const GainControl::Mode kDefaultAgcMode = const GainControl::Mode kDefaultAgcMode =
#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) #if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS)
GainControl::kAdaptiveDigital; GainControl::kAdaptiveDigital;
#else #else
GainControl::kAdaptiveAnalog; GainControl::kAdaptiveAnalog;
#endif #endif
const bool kDefaultAgcState = const bool kDefaultAgcState =
#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) #if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS)
false; false;
#else #else
true; true;
#endif #endif
const GainControl::Mode kDefaultRxAgcMode = GainControl::kAdaptiveDigital; const GainControl::Mode kDefaultRxAgcMode = GainControl::kAdaptiveDigital;
@ -131,53 +131,50 @@ enum { kVoiceEngineMaxRtpExtensionId = 14 };
// Macros // Macros
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
#define NOT_SUPPORTED(stat) \ #define NOT_SUPPORTED(stat) \
LOG_F(LS_ERROR) << "not supported"; \ LOG_F(LS_ERROR) << "not supported"; \
stat.SetLastError(VE_FUNC_NOT_SUPPORTED); \ stat.SetLastError(VE_FUNC_NOT_SUPPORTED); \
return -1; return -1;
#if (defined(_DEBUG) && defined(_WIN32) && (_MSC_VER >= 1400)) #if (defined(_DEBUG) && defined(_WIN32) && (_MSC_VER >= 1400))
#include <windows.h> #include <windows.h>
#include <stdio.h> #include <stdio.h>
#define DEBUG_PRINT(...) \ #define DEBUG_PRINT(...) \
{ \ { \
char msg[256]; \ char msg[256]; \
sprintf(msg, __VA_ARGS__); \ sprintf(msg, __VA_ARGS__); \
OutputDebugStringA(msg); \ OutputDebugStringA(msg); \
} }
#else #else
// special fix for visual 2003 // special fix for visual 2003
#define DEBUG_PRINT(exp) ((void)0) #define DEBUG_PRINT(exp) ((void)0)
#endif // defined(_DEBUG) && defined(_WIN32) #endif // defined(_DEBUG) && defined(_WIN32)
#define CHECK_CHANNEL(channel) if (CheckChannel(channel) == -1) return -1; #define CHECK_CHANNEL(channel) \
if (CheckChannel(channel) == -1) \
return -1;
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// Inline functions // Inline functions
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
namespace webrtc namespace webrtc {
{
inline int VoEId(int veId, int chId) inline int VoEId(int veId, int chId) {
{ if (chId == -1) {
if (chId == -1) const int dummyChannel(99);
{ return (int)((veId << 16) + dummyChannel);
const int dummyChannel(99); }
return (int) ((veId << 16) + dummyChannel); return (int)((veId << 16) + chId);
}
return (int) ((veId << 16) + chId);
} }
inline int VoEModuleId(int veId, int chId) inline int VoEModuleId(int veId, int chId) {
{ return (int)((veId << 16) + chId);
return (int) ((veId << 16) + chId);
} }
// Convert module ID to internal VoE channel ID // Convert module ID to internal VoE channel ID
inline int VoEChannelId(int moduleId) inline int VoEChannelId(int moduleId) {
{ return (int)(moduleId & 0xffff);
return (int) (moduleId & 0xffff);
} }
} // namespace webrtc } // namespace webrtc
@ -190,21 +187,21 @@ inline int VoEChannelId(int moduleId)
#if defined(_WIN32) #if defined(_WIN32)
#include <windows.h> #include <windows.h>
#pragma comment( lib, "winmm.lib" ) #pragma comment(lib, "winmm.lib")
#ifndef WEBRTC_EXTERNAL_TRANSPORT #ifndef WEBRTC_EXTERNAL_TRANSPORT
#pragma comment( lib, "ws2_32.lib" ) #pragma comment(lib, "ws2_32.lib")
#endif #endif
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// Defines // Defines
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// Default device for Windows PC // Default device for Windows PC
#define WEBRTC_VOICE_ENGINE_DEFAULT_DEVICE \ #define WEBRTC_VOICE_ENGINE_DEFAULT_DEVICE \
AudioDeviceModule::kDefaultCommunicationDevice AudioDeviceModule::kDefaultCommunicationDevice
#endif // #if (defined(_WIN32) #endif // #if (defined(_WIN32)
@ -218,11 +215,11 @@ inline int VoEChannelId(int moduleId)
#include <sys/socket.h> #include <sys/socket.h>
#include <sys/types.h> #include <sys/types.h>
#ifndef QNX #ifndef QNX
#include <linux/net.h> #include <linux/net.h>
#ifndef ANDROID #ifndef ANDROID
#include <sys/soundcard.h> #include <sys/soundcard.h>
#endif // ANDROID #endif // ANDROID
#endif // QNX #endif // QNX
#include <errno.h> #include <errno.h>
#include <fcntl.h> #include <fcntl.h>
#include <sched.h> #include <sched.h>
@ -250,8 +247,8 @@ inline int VoEChannelId(int moduleId)
#endif #endif
#define GetLastError() errno #define GetLastError() errno
#define WSAGetLastError() errno #define WSAGetLastError() errno
#define LPCTSTR const char* #define LPCTSTR const char *
#define LPCSTR const char* #define LPCSTR const char *
#define wsprintf sprintf #define wsprintf sprintf
#define TEXT(a) a #define TEXT(a) a
#define _ftprintf fprintf #define _ftprintf fprintf
@ -287,11 +284,11 @@ inline int VoEChannelId(int moduleId)
#include <time.h> #include <time.h>
#include <unistd.h> #include <unistd.h>
#if !defined(WEBRTC_IOS) #if !defined(WEBRTC_IOS)
#include <CoreServices/CoreServices.h> #include <CoreServices/CoreServices.h>
#include <CoreAudio/CoreAudio.h> #include <CoreAudio/CoreAudio.h>
#include <AudioToolbox/DefaultAudioOutput.h> #include <AudioToolbox/DefaultAudioOutput.h>
#include <AudioToolbox/AudioConverter.h> #include <AudioToolbox/AudioConverter.h>
#include <CoreAudio/HostTime.h> #include <CoreAudio/HostTime.h>
#endif #endif
#define DWORD unsigned long int #define DWORD unsigned long int
@ -306,7 +303,7 @@ inline int VoEChannelId(int moduleId)
#define _stricmp strcasecmp #define _stricmp strcasecmp
#define GetLastError() errno #define GetLastError() errno
#define WSAGetLastError() errno #define WSAGetLastError() errno
#define LPCTSTR const char* #define LPCTSTR const char *
#define wsprintf sprintf #define wsprintf sprintf
#define TEXT(a) a #define TEXT(a) a
#define _ftprintf fprintf #define _ftprintf fprintf
@ -314,11 +311,11 @@ inline int VoEChannelId(int moduleId)
#define FAR #define FAR
#define __cdecl #define __cdecl
#define LPSOCKADDR struct sockaddr * #define LPSOCKADDR struct sockaddr *
#define LPCSTR const char* #define LPCSTR const char *
#define ULONG unsigned long #define ULONG unsigned long
// Default device for Mac and iPhone // Default device for Mac and iPhone
#define WEBRTC_VOICE_ENGINE_DEFAULT_DEVICE 0 #define WEBRTC_VOICE_ENGINE_DEFAULT_DEVICE 0
#endif // #ifdef WEBRTC_MAC #endif // #ifdef WEBRTC_MAC
#endif // WEBRTC_VOICE_ENGINE_VOICE_ENGINE_DEFINES_H #endif // WEBRTC_VOICE_ENGINE_VOICE_ENGINE_DEFINES_H

View File

@ -22,8 +22,7 @@
#include "webrtc/system_wrappers/interface/trace.h" #include "webrtc/system_wrappers/interface/trace.h"
#include "webrtc/voice_engine/voice_engine_impl.h" #include "webrtc/voice_engine/voice_engine_impl.h"
namespace webrtc namespace webrtc {
{
// Counter to be ensure that we can add a correct ID in all static trace // Counter to be ensure that we can add a correct ID in all static trace
// methods. It is not the nicest solution, especially not since we already // methods. It is not the nicest solution, especially not since we already
@ -31,15 +30,14 @@ namespace webrtc
// improvement here. // improvement here.
static int32_t gVoiceEngineInstanceCounter = 0; static int32_t gVoiceEngineInstanceCounter = 0;
VoiceEngine* GetVoiceEngine(const Config* config, bool owns_config) VoiceEngine* GetVoiceEngine(const Config* config, bool owns_config) {
{
#if (defined _WIN32) #if (defined _WIN32)
HMODULE hmod = LoadLibrary(TEXT("VoiceEngineTestingDynamic.dll")); HMODULE hmod = LoadLibrary(TEXT("VoiceEngineTestingDynamic.dll"));
if (hmod) { if (hmod) {
typedef VoiceEngine* (*PfnGetVoiceEngine)(void); typedef VoiceEngine* (*PfnGetVoiceEngine)(void);
PfnGetVoiceEngine pfn = (PfnGetVoiceEngine)GetProcAddress( PfnGetVoiceEngine pfn =
hmod,"GetVoiceEngine"); (PfnGetVoiceEngine)GetProcAddress(hmod, "GetVoiceEngine");
if (pfn) { if (pfn) {
VoiceEngine* self = pfn(); VoiceEngine* self = pfn();
if (owns_config) { if (owns_config) {
@ -50,13 +48,12 @@ VoiceEngine* GetVoiceEngine(const Config* config, bool owns_config)
} }
#endif #endif
VoiceEngineImpl* self = new VoiceEngineImpl(config, owns_config); VoiceEngineImpl* self = new VoiceEngineImpl(config, owns_config);
if (self != NULL) if (self != NULL) {
{ self->AddRef(); // First reference. Released in VoiceEngine::Delete.
self->AddRef(); // First reference. Released in VoiceEngine::Delete. gVoiceEngineInstanceCounter++;
gVoiceEngineInstanceCounter++; }
} return self;
return self;
} }
int VoiceEngineImpl::AddRef() { int VoiceEngineImpl::AddRef() {
@ -69,8 +66,7 @@ int VoiceEngineImpl::Release() {
assert(new_ref >= 0); assert(new_ref >= 0);
if (new_ref == 0) { if (new_ref == 0) {
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, -1, WEBRTC_TRACE(kTraceApiCall, kTraceVoice, -1,
"VoiceEngineImpl self deleting (voiceEngine=0x%p)", "VoiceEngineImpl self deleting (voiceEngine=0x%p)", this);
this);
// Clear any pointers before starting destruction. Otherwise worker- // Clear any pointers before starting destruction. Otherwise worker-
// threads will still have pointers to a partially destructed object. // threads will still have pointers to a partially destructed object.
@ -93,67 +89,62 @@ VoiceEngine* VoiceEngine::Create(const Config& config) {
return GetVoiceEngine(&config, false); return GetVoiceEngine(&config, false);
} }
int VoiceEngine::SetTraceFilter(unsigned int filter) int VoiceEngine::SetTraceFilter(unsigned int filter) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice,
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(gVoiceEngineInstanceCounter, -1),
VoEId(gVoiceEngineInstanceCounter, -1), "SetTraceFilter(filter=0x%x)", filter);
"SetTraceFilter(filter=0x%x)", filter);
// Remember old filter // Remember old filter
uint32_t oldFilter = Trace::level_filter(); uint32_t oldFilter = Trace::level_filter();
Trace::set_level_filter(filter); Trace::set_level_filter(filter);
// If previous log was ignored, log again after changing filter // If previous log was ignored, log again after changing filter
if (kTraceNone == oldFilter) if (kTraceNone == oldFilter) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, -1, "SetTraceFilter(filter=0x%x)",
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, -1, filter);
"SetTraceFilter(filter=0x%x)", filter); }
}
return 0; return 0;
} }
int VoiceEngine::SetTraceFile(const char* fileNameUTF8, int VoiceEngine::SetTraceFile(const char* fileNameUTF8, bool addFileCounter) {
bool addFileCounter) int ret = Trace::SetTraceFile(fileNameUTF8, addFileCounter);
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice,
int ret = Trace::SetTraceFile(fileNameUTF8, addFileCounter); VoEId(gVoiceEngineInstanceCounter, -1),
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, "SetTraceFile(fileNameUTF8=%s, addFileCounter=%d)", fileNameUTF8,
VoEId(gVoiceEngineInstanceCounter, -1), addFileCounter);
"SetTraceFile(fileNameUTF8=%s, addFileCounter=%d)", return (ret);
fileNameUTF8, addFileCounter);
return (ret);
} }
int VoiceEngine::SetTraceCallback(TraceCallback* callback) int VoiceEngine::SetTraceCallback(TraceCallback* callback) {
{ WEBRTC_TRACE(kTraceApiCall, kTraceVoice,
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(gVoiceEngineInstanceCounter, -1),
VoEId(gVoiceEngineInstanceCounter, -1), "SetTraceCallback(callback=0x%x)", callback);
"SetTraceCallback(callback=0x%x)", callback); return (Trace::SetTraceCallback(callback));
return (Trace::SetTraceCallback(callback));
} }
bool VoiceEngine::Delete(VoiceEngine*& voiceEngine) bool VoiceEngine::Delete(VoiceEngine*& voiceEngine) {
{ if (voiceEngine == NULL)
if (voiceEngine == NULL) return false;
return false;
VoiceEngineImpl* s = static_cast<VoiceEngineImpl*>(voiceEngine); VoiceEngineImpl* s = static_cast<VoiceEngineImpl*>(voiceEngine);
// Release the reference that was added in GetVoiceEngine. // Release the reference that was added in GetVoiceEngine.
int ref = s->Release(); int ref = s->Release();
voiceEngine = NULL; voiceEngine = NULL;
if (ref != 0) { if (ref != 0) {
WEBRTC_TRACE(kTraceWarning, kTraceVoice, -1, WEBRTC_TRACE(
"VoiceEngine::Delete did not release the very last reference. " kTraceWarning, kTraceVoice, -1,
"%d references remain.", ref); "VoiceEngine::Delete did not release the very last reference. "
} "%d references remain.",
ref);
}
return true; return true;
} }
#if !defined(WEBRTC_CHROMIUM_BUILD) #if !defined(WEBRTC_CHROMIUM_BUILD)
int VoiceEngine::SetAndroidObjects(void* javaVM, void* context) int VoiceEngine::SetAndroidObjects(void* javaVM, void* context) {
{
#ifdef WEBRTC_ANDROID #ifdef WEBRTC_ANDROID
#ifdef WEBRTC_ANDROID_OPENSLES #ifdef WEBRTC_ANDROID_OPENSLES
typedef AudioDeviceTemplate<OpenSlesInput, OpenSlesOutput> typedef AudioDeviceTemplate<OpenSlesInput, OpenSlesOutput>

View File

@ -47,8 +47,7 @@
#include "webrtc/voice_engine/voe_volume_control_impl.h" #include "webrtc/voice_engine/voe_volume_control_impl.h"
#endif #endif
namespace webrtc namespace webrtc {
{
class VoiceEngineImpl : public voe::SharedData, // Must be the first base class class VoiceEngineImpl : public voe::SharedData, // Must be the first base class
public VoiceEngine, public VoiceEngine,
@ -83,11 +82,10 @@ class VoiceEngineImpl : public voe::SharedData, // Must be the first base class
#ifdef WEBRTC_VOICE_ENGINE_VOLUME_CONTROL_API #ifdef WEBRTC_VOICE_ENGINE_VOLUME_CONTROL_API
public VoEVolumeControlImpl, public VoEVolumeControlImpl,
#endif #endif
public VoEBaseImpl public VoEBaseImpl {
{ public:
public: VoiceEngineImpl(const Config* config, bool owns_config)
VoiceEngineImpl(const Config* config, bool owns_config) : : SharedData(*config),
SharedData(*config),
#ifdef WEBRTC_VOICE_ENGINE_AUDIO_PROCESSING_API #ifdef WEBRTC_VOICE_ENGINE_AUDIO_PROCESSING_API
VoEAudioProcessingImpl(this), VoEAudioProcessingImpl(this),
#endif #endif
@ -121,24 +119,20 @@ public:
#endif #endif
VoEBaseImpl(this), VoEBaseImpl(this),
_ref_count(0), _ref_count(0),
own_config_(owns_config ? config : NULL) own_config_(owns_config ? config : NULL) {
{ }
} ~VoiceEngineImpl() override { assert(_ref_count.Value() == 0); }
virtual ~VoiceEngineImpl()
{
assert(_ref_count.Value() == 0);
}
int AddRef(); int AddRef();
// This implements the Release() method for all the inherited interfaces. // This implements the Release() method for all the inherited interfaces.
virtual int Release(); int Release() override;
private: private:
Atomic32 _ref_count; Atomic32 _ref_count;
rtc::scoped_ptr<const Config> own_config_; rtc::scoped_ptr<const Config> own_config_;
}; };
} // namespace webrtc } // namespace webrtc
#endif // WEBRTC_VOICE_ENGINE_VOICE_ENGINE_IMPL_H #endif // WEBRTC_VOICE_ENGINE_VOICE_ENGINE_IMPL_H