Use backticks not vertical bars to denote variables in comments for /sdk

Bug: webrtc:12338
Change-Id: Ifaad29ccb63b0f2f3aeefb77dae061ebc7f87e6c
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/227024
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Artem Titov <titovartem@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#34561}
This commit is contained in:
Artem Titov
2021-07-27 12:23:39 +02:00
committed by WebRTC LUCI CQ
parent f0671921a1
commit d7ac581045
87 changed files with 235 additions and 235 deletions

View File

@ -514,7 +514,7 @@ class AndroidVideoDecoder implements VideoDecoder, VideoSink {
throw new AssertionError("Stride is not divisible by two: " + stride);
}
// Note that the case with odd |sliceHeight| is handled in a special way.
// Note that the case with odd `sliceHeight` is handled in a special way.
// The chroma height contained in the payload is rounded down instead of
// up, making it one row less than what we expect in WebRTC. Therefore, we
// have to duplicate the last chroma rows for this case. Also, the offset

View File

@ -133,7 +133,7 @@ class Camera1Session implements CameraSession {
private static CaptureFormat findClosestCaptureFormat(
android.hardware.Camera.Parameters parameters, int width, int height, int framerate) {
// Find closest supported format for |width| x |height| @ |framerate|.
// Find closest supported format for `width` x `height` @ `framerate`.
final List<CaptureFormat.FramerateRange> supportedFramerates =
Camera1Enumerator.convertFramerates(parameters.getSupportedPreviewFpsRange());
Logging.d(TAG, "Available fps ranges: " + supportedFramerates);

View File

@ -69,7 +69,7 @@ class EglBase14Impl implements EglBase14 {
}
// Create a new context with the specified config type, sharing data with sharedContext.
// |sharedContext| may be null.
// `sharedContext` may be null.
public EglBase14Impl(EGLContext sharedContext, int[] configAttributes) {
eglDisplay = getEglDisplay();
eglConfig = getEglConfig(eglDisplay, configAttributes);

View File

@ -22,7 +22,7 @@ import org.webrtc.Logging;
// This class wraps control of three different platform effects. Supported
// effects are: AcousticEchoCanceler (AEC) and NoiseSuppressor (NS).
// Calling enable() will active all effects that are
// supported by the device if the corresponding |shouldEnableXXX| member is set.
// supported by the device if the corresponding `shouldEnableXXX` member is set.
class WebRtcAudioEffects {
private static final boolean DEBUG = false;
@ -71,7 +71,7 @@ class WebRtcAudioEffects {
}
// Call this method to enable or disable the platform AEC. It modifies
// |shouldEnableAec| which is used in enable() where the actual state
// `shouldEnableAec` which is used in enable() where the actual state
// of the AEC effect is modified. Returns true if HW AEC is supported and
// false otherwise.
public boolean setAEC(boolean enable) {
@ -90,7 +90,7 @@ class WebRtcAudioEffects {
}
// Call this method to enable or disable the platform NS. It modifies
// |shouldEnableNs| which is used in enable() where the actual state
// `shouldEnableNs` which is used in enable() where the actual state
// of the NS effect is modified. Returns true if HW NS is supported and
// false otherwise.
public boolean setNS(boolean enable) {
@ -180,7 +180,7 @@ class WebRtcAudioEffects {
}
}
// Returns true for effect types in |type| that are of "VoIP" types:
// Returns true for effect types in `type` that are of "VoIP" types:
// Acoustic Echo Canceler (AEC) or Automatic Gain Control (AGC) or
// Noise Suppressor (NS). Note that, an extra check for support is needed
// in each comparison since some devices includes effects in the
@ -217,7 +217,7 @@ class WebRtcAudioEffects {
}
// Returns true if an effect of the specified type is available. Functionally
// equivalent to (NoiseSuppressor|AutomaticGainControl|...).isAvailable(), but
// equivalent to (NoiseSuppressor`AutomaticGainControl`...).isAvailable(), but
// faster as it avoids the expensive OS call to enumerate effects.
private static boolean isEffectTypeAvailable(UUID effectType, UUID blockListedUuid) {
Descriptor[] effects = getAvailableEffects();

View File

@ -237,7 +237,7 @@ class WebRtcAudioRecord {
// Returns true if verifyAudioConfig() succeeds. This value is set after a specific delay when
// startRecording() has been called. Hence, should preferably be called in combination with
// stopRecording() to ensure that it has been set properly. |isAudioConfigVerified| is
// stopRecording() to ensure that it has been set properly. `isAudioConfigVerified` is
// enabled in WebRtcAudioRecord to ensure that the returned value is valid.
@CalledByNative
boolean isAudioSourceMatchingRecordingSession() {
@ -491,7 +491,7 @@ class WebRtcAudioRecord {
long nativeAudioRecordJni, ByteBuffer byteBuffer);
private native void nativeDataIsRecorded(long nativeAudioRecordJni, int bytes);
// Sets all recorded samples to zero if |mute| is true, i.e., ensures that
// Sets all recorded samples to zero if `mute` is true, i.e., ensures that
// the microphone is muted.
public void setMicrophoneMute(boolean mute) {
Logging.w(TAG, "setMicrophoneMute(" + mute + ")");

View File

@ -76,7 +76,7 @@ class WebRtcAudioTrack {
private @Nullable AudioTrackThread audioThread;
private final VolumeLogger volumeLogger;
// Samples to be played are replaced by zeros if |speakerMute| is set to true.
// Samples to be played are replaced by zeros if `speakerMute` is set to true.
// Can be used to ensure that the speaker is fully muted.
private volatile boolean speakerMute;
private byte[] emptyBytes;
@ -218,9 +218,9 @@ class WebRtcAudioTrack {
Logging.d(TAG, "minBufferSizeInBytes: " + minBufferSizeInBytes);
// For the streaming mode, data must be written to the audio sink in
// chunks of size (given by byteBuffer.capacity()) less than or equal
// to the total buffer size |minBufferSizeInBytes|. But, we have seen
// to the total buffer size `minBufferSizeInBytes`. But, we have seen
// reports of "getMinBufferSize(): error querying hardware". Hence, it
// can happen that |minBufferSizeInBytes| contains an invalid value.
// can happen that `minBufferSizeInBytes` contains an invalid value.
if (minBufferSizeInBytes < byteBuffer.capacity()) {
reportWebRtcAudioTrackInitError("AudioTrack.getMinBufferSize returns an invalid value.");
return -1;
@ -559,7 +559,7 @@ class WebRtcAudioTrack {
long nativeAudioTrackJni, ByteBuffer byteBuffer);
private static native void nativeGetPlayoutData(long nativeAudioTrackJni, int bytes);
// Sets all samples to be played out to zero if |mute| is true, i.e.,
// Sets all samples to be played out to zero if `mute` is true, i.e.,
// ensures that the speaker is muted.
public void setSpeakerMute(boolean mute) {
Logging.w(TAG, "setSpeakerMute(" + mute + ")");

View File

@ -31,14 +31,14 @@ static ScopedJavaLocalRef<jobject> JNI_Metrics_GetAndReset(JNIEnv* jni) {
std::map<std::string, std::unique_ptr<metrics::SampleInfo>> histograms;
metrics::GetAndReset(&histograms);
for (const auto& kv : histograms) {
// Create and add samples to |HistogramInfo|.
// Create and add samples to `HistogramInfo`.
ScopedJavaLocalRef<jobject> j_info = Java_HistogramInfo_Constructor(
jni, kv.second->min, kv.second->max,
static_cast<int>(kv.second->bucket_count));
for (const auto& sample : kv.second->samples) {
Java_HistogramInfo_addSample(jni, j_info, sample.first, sample.second);
}
// Add |HistogramInfo| to |Metrics|.
// Add `HistogramInfo` to `Metrics`.
ScopedJavaLocalRef<jstring> j_name = NativeToJavaString(jni, kv.first);
Java_Metrics_add(jni, j_metrics, j_name, j_info);
}

View File

@ -376,7 +376,7 @@ rtc::NetworkBindingResult AndroidNetworkMonitor::BindSocketToNetwork(
rv = lollipopSetNetworkForSocket(*network_handle, socket_fd);
}
// If |network| has since disconnected, |rv| will be ENONET. Surface this as
// If `network` has since disconnected, `rv` will be ENONET. Surface this as
// ERR_NETWORK_CHANGED, rather than MapSystemError(ENONET) which gives back
// the less descriptive ERR_FAILED.
if (rv == 0) {

View File

@ -76,7 +76,7 @@ class AndroidNetworkMonitor : public rtc::NetworkMonitorInterface {
void Start() override;
void Stop() override;
// Does |this| NetworkMonitorInterface implement BindSocketToNetwork?
// Does `this` NetworkMonitorInterface implement BindSocketToNetwork?
// Only Android returns true.
virtual bool SupportsBindSocketToNetwork() const override { return true; }

View File

@ -200,7 +200,7 @@ aaudio_data_callback_result_t AAudioPlayer::OnDataCallback(void* audio_data,
}
// Read audio data from the WebRTC source using the FineAudioBuffer object
// and write that data into |audio_data| to be played out by AAudio.
// and write that data into `audio_data` to be played out by AAudio.
// Prime output with zeros during a short initial phase to avoid distortion.
// TODO(henrika): do more work to figure out of if the initial forced silence
// period is really needed.

View File

@ -81,8 +81,8 @@ class AAudioPlayer final : public AudioOutput,
protected:
// AAudioObserverInterface implementation.
// For an output stream, this function should render and write |num_frames|
// of data in the streams current data format to the |audio_data| buffer.
// For an output stream, this function should render and write `num_frames`
// of data in the streams current data format to the `audio_data` buffer.
// Called on a real-time thread owned by AAudio.
aaudio_data_callback_result_t OnDataCallback(void* audio_data,
int32_t num_frames) override;

View File

@ -157,7 +157,7 @@ void AAudioRecorder::OnErrorCallback(aaudio_result_t error) {
}
}
// Read and process |num_frames| of data from the |audio_data| buffer.
// Read and process `num_frames` of data from the `audio_data` buffer.
// TODO(henrika): possibly add trace here to be included in systrace.
// See https://developer.android.com/studio/profile/systrace-commandline.html.
aaudio_data_callback_result_t AAudioRecorder::OnDataCallback(
@ -191,7 +191,7 @@ aaudio_data_callback_result_t AAudioRecorder::OnDataCallback(
RTC_DLOG(INFO) << "input latency: " << latency_millis_
<< ", num_frames: " << num_frames;
}
// Copy recorded audio in |audio_data| to the WebRTC sink using the
// Copy recorded audio in `audio_data` to the WebRTC sink using the
// FineAudioBuffer object.
fine_audio_buffer_->DeliverRecordedData(
rtc::MakeArrayView(static_cast<const int16_t*>(audio_data),

View File

@ -72,8 +72,8 @@ class AAudioRecorder : public AudioInput,
protected:
// AAudioObserverInterface implementation.
// For an input stream, this function should read |num_frames| of recorded
// data, in the stream's current data format, from the |audio_data| buffer.
// For an input stream, this function should read `num_frames` of recorded
// data, in the stream's current data format, from the `audio_data` buffer.
// Called on a real-time thread owned by AAudio.
aaudio_data_callback_result_t OnDataCallback(void* audio_data,
int32_t num_frames) override;

View File

@ -253,8 +253,8 @@ void AudioRecordJni::DataIsRecorded(JNIEnv* env,
audio_device_buffer_->SetRecordedBuffer(direct_buffer_address_,
frames_per_buffer_);
// We provide one (combined) fixed delay estimate for the APM and use the
// |playDelayMs| parameter only. Components like the AEC only sees the sum
// of |playDelayMs| and |recDelayMs|, hence the distributions does not matter.
// `playDelayMs` parameter only. Components like the AEC only sees the sum
// of `playDelayMs` and `recDelayMs`, hence the distributions does not matter.
audio_device_buffer_->SetVQEData(total_delay_ms_, 0);
if (audio_device_buffer_->DeliverRecordedData() == -1) {
RTC_LOG(INFO) << "AudioDeviceBuffer::DeliverRecordedData failed";

View File

@ -74,8 +74,8 @@ class AudioRecordJni : public AudioInput {
int32_t EnableBuiltInNS(bool enable) override;
// Called from Java side so we can cache the address of the Java-manged
// |byte_buffer| in |direct_buffer_address_|. The size of the buffer
// is also stored in |direct_buffer_capacity_in_bytes_|.
// `byte_buffer` in `direct_buffer_address_`. The size of the buffer
// is also stored in `direct_buffer_capacity_in_bytes_`.
// This method will be called by the WebRtcAudioRecord constructor, i.e.,
// on the same thread that this object is created on.
void CacheDirectBufferAddress(JNIEnv* env,
@ -83,8 +83,8 @@ class AudioRecordJni : public AudioInput {
const JavaParamRef<jobject>& byte_buffer);
// Called periodically by the Java based WebRtcAudioRecord object when
// recording has started. Each call indicates that there are |length| new
// bytes recorded in the memory area |direct_buffer_address_| and it is
// recording has started. Each call indicates that there are `length` new
// bytes recorded in the memory area `direct_buffer_address_` and it is
// now time to send these to the consumer.
// This method is called on a high-priority thread from Java. The name of
// the thread is 'AudioRecordThread'.
@ -111,10 +111,10 @@ class AudioRecordJni : public AudioInput {
// possible values. See audio_common.h for details.
const int total_delay_ms_;
// Cached copy of address to direct audio buffer owned by |j_audio_record_|.
// Cached copy of address to direct audio buffer owned by `j_audio_record_`.
void* direct_buffer_address_;
// Number of bytes in the direct audio buffer owned by |j_audio_record_|.
// Number of bytes in the direct audio buffer owned by `j_audio_record_`.
size_t direct_buffer_capacity_in_bytes_;
// Number audio frames per audio buffer. Each audio frame corresponds to

View File

@ -71,14 +71,14 @@ class AudioTrackJni : public AudioOutput {
void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) override;
// Called from Java side so we can cache the address of the Java-manged
// |byte_buffer| in |direct_buffer_address_|. The size of the buffer
// is also stored in |direct_buffer_capacity_in_bytes_|.
// `byte_buffer` in `direct_buffer_address_`. The size of the buffer
// is also stored in `direct_buffer_capacity_in_bytes_`.
// Called on the same thread as the creating thread.
void CacheDirectBufferAddress(JNIEnv* env,
const JavaParamRef<jobject>& byte_buffer);
// Called periodically by the Java based WebRtcAudioTrack object when
// playout has started. Each call indicates that |length| new bytes should
// be written to the memory area |direct_buffer_address_| for playout.
// playout has started. Each call indicates that `length` new bytes should
// be written to the memory area `direct_buffer_address_` for playout.
// This method is called on a high-priority thread from Java. The name of
// the thread is 'AudioTrackThread'.
void GetPlayoutData(JNIEnv* env, size_t length);
@ -99,10 +99,10 @@ class AudioTrackJni : public AudioOutput {
// AudioManager.
const AudioParameters audio_parameters_;
// Cached copy of address to direct audio buffer owned by |j_audio_track_|.
// Cached copy of address to direct audio buffer owned by `j_audio_track_`.
void* direct_buffer_address_;
// Number of bytes in the direct audio buffer owned by |j_audio_track_|.
// Number of bytes in the direct audio buffer owned by `j_audio_track_`.
size_t direct_buffer_capacity_in_bytes_;
// Number of audio frames per audio buffer. Each audio frame corresponds to

View File

@ -95,7 +95,7 @@ class OpenSLESPlayer : public AudioOutput {
// Reads audio data in PCM format using the AudioDeviceBuffer.
// Can be called both on the main thread (during Start()) and from the
// internal audio thread while output streaming is active.
// If the |silence| flag is set, the audio is filled with zeros instead of
// If the `silence` flag is set, the audio is filled with zeros instead of
// asking the WebRTC layer for real audio data. This procedure is also known
// as audio priming.
void EnqueuePlayoutData(bool silence);
@ -106,7 +106,7 @@ class OpenSLESPlayer : public AudioOutput {
// Obtaines the SL Engine Interface from the existing global Engine object.
// The interface exposes creation methods of all the OpenSL ES object types.
// This method defines the |engine_| member variable.
// This method defines the `engine_` member variable.
bool ObtainEngineInterface();
// Creates/destroys the output mix object.

View File

@ -88,7 +88,7 @@ class OpenSLESRecorder : public AudioInput {
private:
// Obtaines the SL Engine Interface from the existing global Engine object.
// The interface exposes creation methods of all the OpenSL ES object types.
// This method defines the |engine_| member variable.
// This method defines the `engine_` member variable.
bool ObtainEngineInterface();
// Creates/destroys the audio recorder and the simple-buffer queue object.
@ -109,7 +109,7 @@ class OpenSLESRecorder : public AudioInput {
// Wraps calls to SLAndroidSimpleBufferQueueState::Enqueue() and it can be
// called both on the main thread (but before recording has started) and from
// the internal audio thread while input streaming is active. It uses
// |simple_buffer_queue_| but no lock is needed since the initial calls from
// `simple_buffer_queue_` but no lock is needed since the initial calls from
// the main thread and the native callback thread are mutually exclusive.
bool EnqueueAudioBuffer();

View File

@ -15,7 +15,7 @@
namespace webrtc {
// If |atomic_class_id| set, it'll return immediately. Otherwise, it will look
// If `atomic_class_id` set, it'll return immediately. Otherwise, it will look
// up the class and store it. If there's a race, we take care to only store one
// global reference (and the duplicated effort will happen only once).
jclass LazyGetClass(JNIEnv* env,
@ -29,18 +29,18 @@ jclass LazyGetClass(JNIEnv* env,
jclass cas_result = nullptr;
if (std::atomic_compare_exchange_strong(atomic_class_id, &cas_result,
clazz.obj())) {
// We sucessfully stored |clazz| in |atomic_class_id|, so we are
// We sucessfully stored `clazz` in `atomic_class_id`, so we are
// intentionally leaking the global ref since it's now stored there.
return clazz.Release();
} else {
// Some other thread came before us and stored a global pointer in
// |atomic_class_id|. Relase our global ref and return the ref from the
// `atomic_class_id`. Relase our global ref and return the ref from the
// other thread.
return cas_result;
}
}
// If |atomic_method_id| set, it'll return immediately. Otherwise, it will look
// If `atomic_method_id` set, it'll return immediately. Otherwise, it will look
// up the method id and store it. If there's a race, it's ok since the values
// are the same (and the duplicated effort will happen only once).
template <MethodID::Type type>

View File

@ -44,11 +44,11 @@
namespace webrtc {
// This function will initialize |atomic_class_id| to contain a global ref to
// This function will initialize `atomic_class_id` to contain a global ref to
// the given class, and will return that ref on subsequent calls. The caller is
// responsible to zero-initialize |atomic_class_id|. It's fine to
// responsible to zero-initialize `atomic_class_id`. It's fine to
// simultaneously call this on multiple threads referencing the same
// |atomic_method_id|.
// `atomic_method_id`.
jclass LazyGetClass(JNIEnv* env,
const char* class_name,
std::atomic<jclass>* atomic_class_id);
@ -61,11 +61,11 @@ class MethodID {
TYPE_INSTANCE,
};
// This function will initialize |atomic_method_id| to contain a ref to
// This function will initialize `atomic_method_id` to contain a ref to
// the given method, and will return that ref on subsequent calls. The caller
// is responsible to zero-initialize |atomic_method_id|. It's fine to
// is responsible to zero-initialize `atomic_method_id`. It's fine to
// simultaneously call this on multiple threads referencing the same
// |atomic_method_id|.
// `atomic_method_id`.
template <Type type>
static jmethodID LazyGet(JNIEnv* env,
jclass clazz,
@ -151,7 +151,7 @@ struct BASE_EXPORT JniJavaCallContextChecked {
const char* jni_signature,
std::atomic<jmethodID>* atomic_method_id) {
base.Init<type>(env, clazz, method_name, jni_signature, atomic_method_id);
// Reset |pc| to correct caller.
// Reset `pc` to correct caller.
base.pc = reinterpret_cast<uintptr_t>(__builtin_return_address(0));
}

View File

@ -27,7 +27,7 @@ static JavaVM* g_jvm = nullptr;
static pthread_once_t g_jni_ptr_once = PTHREAD_ONCE_INIT;
// Key for per-thread JNIEnv* data. Non-NULL in threads attached to |g_jvm| by
// Key for per-thread JNIEnv* data. Non-NULL in threads attached to `g_jvm` by
// AttachCurrentThreadIfNeeded(), NULL in unattached threads and threads that
// were attached by the JVM because of a Java->native call.
static pthread_key_t g_jni_ptr;
@ -48,7 +48,7 @@ JNIEnv* GetEnv() {
}
static void ThreadDestructor(void* prev_jni_ptr) {
// This function only runs on threads where |g_jni_ptr| is non-NULL, meaning
// This function only runs on threads where `g_jni_ptr` is non-NULL, meaning
// we were responsible for originally attaching the thread, so are responsible
// for detaching it now. However, because some JVM implementations (notably
// Oracle's http://goo.gl/eHApYT) also use the pthread_key_create mechanism,
@ -102,7 +102,7 @@ static std::string GetThreadName() {
return std::string(name);
}
// Return a |JNIEnv*| usable on this thread. Attaches to |g_jvm| if necessary.
// Return a |JNIEnv*| usable on this thread. Attaches to `g_jvm` if necessary.
JNIEnv* AttachCurrentThreadIfNeeded() {
JNIEnv* jni = GetEnv();
if (jni)

View File

@ -23,7 +23,7 @@ JNIEnv* GetEnv();
JavaVM* GetJVM();
// Return a |JNIEnv*| usable on this thread. Attaches to |g_jvm| if necessary.
// Return a |JNIEnv*| usable on this thread. Attaches to `g_jvm` if necessary.
JNIEnv* AttachCurrentThreadIfNeeded();
} // namespace jni

View File

@ -49,7 +49,7 @@ JavaMediaStream::JavaMediaStream(
observer_->SignalVideoTrackAdded.connect(
this, &JavaMediaStream::OnVideoTrackAddedToStream);
// |j_media_stream| holds one reference. Corresponding Release() is in
// `j_media_stream` holds one reference. Corresponding Release() is in
// MediaStream_free, triggered by MediaStream.dispose().
media_stream.release();
}

View File

@ -499,7 +499,7 @@ static ScopedJavaLocalRef<jobject> JNI_PeerConnection_GetLocalDescription(
const JavaParamRef<jobject>& j_pc) {
PeerConnectionInterface* pc = ExtractNativePC(jni, j_pc);
// It's only safe to operate on SessionDescriptionInterface on the
// signaling thread, but |jni| may only be used on the current thread, so we
// signaling thread, but `jni` may only be used on the current thread, so we
// must do this odd dance.
std::string sdp;
std::string type;
@ -518,7 +518,7 @@ static ScopedJavaLocalRef<jobject> JNI_PeerConnection_GetRemoteDescription(
const JavaParamRef<jobject>& j_pc) {
PeerConnectionInterface* pc = ExtractNativePC(jni, j_pc);
// It's only safe to operate on SessionDescriptionInterface on the
// signaling thread, but |jni| may only be used on the current thread, so we
// signaling thread, but `jni` may only be used on the current thread, so we
// must do this odd dance.
std::string sdp;
std::string type;

View File

@ -242,9 +242,9 @@ static void JNI_PeerConnectionFactory_ShutdownInternalTracer(JNIEnv* jni) {
}
// Following parameters are optional:
// |audio_device_module|, |jencoder_factory|, |jdecoder_factory|,
// |audio_processor|, |fec_controller_factory|,
// |network_state_predictor_factory|, |neteq_factory|.
// `audio_device_module`, `jencoder_factory`, `jdecoder_factory`,
// `audio_processor`, `fec_controller_factory`,
// `network_state_predictor_factory`, `neteq_factory`.
ScopedJavaLocalRef<jobject> CreatePeerConnectionFactoryForJava(
JNIEnv* jni,
const JavaParamRef<jobject>& jcontext,

View File

@ -18,7 +18,7 @@
namespace webrtc {
namespace jni {
// Creates java PeerConnectionFactory with specified |pcf|.
// Creates java PeerConnectionFactory with specified `pcf`.
jobject NativeToJavaPeerConnectionFactory(
JNIEnv* jni,
rtc::scoped_refptr<webrtc::PeerConnectionFactoryInterface> pcf,

View File

@ -23,7 +23,7 @@ ScopedJavaLocalRef<jobject> NativeToJavaRtpReceiver(
JNIEnv* env,
rtc::scoped_refptr<RtpReceiverInterface> receiver);
// Takes ownership of the passed |j_receiver| and stores it as a global
// Takes ownership of the passed `j_receiver` and stores it as a global
// reference. Will call dispose() in the dtor.
class JavaRtpReceiverGlobalOwner {
public:

View File

@ -27,7 +27,7 @@ ScopedJavaLocalRef<jobject> NativeToJavaRtpTransceiver(
JNIEnv* env,
rtc::scoped_refptr<RtpTransceiverInterface> transceiver);
// This takes ownership of the of the |j_transceiver| and stores it as a global
// This takes ownership of the of the `j_transceiver` and stores it as a global
// reference. This calls the Java Transceiver's dispose() method with the dtor.
class JavaRtpTransceiverGlobalOwner {
public:

View File

@ -267,7 +267,7 @@ void VideoEncoderWrapper::OnEncodedFrame(
frame_extra_infos_.pop_front();
}
// This is a bit subtle. The |frame| variable from the lambda capture is
// This is a bit subtle. The `frame` variable from the lambda capture is
// const. Which implies that (i) we need to make a copy to be able to
// write to the metadata, and (ii) we should avoid using the .data()
// method (including implicit conversion to ArrayView) on the non-const

View File

@ -41,8 +41,8 @@ class AndroidVideoBuffer : public VideoFrameBuffer {
const ScopedJavaGlobalRef<jobject>& video_frame_buffer() const;
// Crops a region defined by |crop_x|, |crop_y|, |crop_width| and
// |crop_height|. Scales it to size |scale_width| x |scale_height|.
// Crops a region defined by `crop_x`, `crop_y`, `crop_width` and
// `crop_height`. Scales it to size `scale_width` x `scale_height`.
rtc::scoped_refptr<VideoFrameBuffer> CropAndScale(int crop_x,
int crop_y,
int crop_width,