Use backticks not vertical bars to denote variables in comments for /sdk
Bug: webrtc:12338 Change-Id: Ifaad29ccb63b0f2f3aeefb77dae061ebc7f87e6c Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/227024 Reviewed-by: Harald Alvestrand <hta@webrtc.org> Commit-Queue: Artem Titov <titovartem@webrtc.org> Cr-Commit-Position: refs/heads/master@{#34561}
This commit is contained in:
committed by
WebRTC LUCI CQ
parent
f0671921a1
commit
d7ac581045
@ -158,7 +158,7 @@ public class Camera1Enumerator implements CameraEnumerator {
|
||||
return ranges;
|
||||
}
|
||||
|
||||
// Returns the camera index for camera with name |deviceName|, or throws IllegalArgumentException
|
||||
// Returns the camera index for camera with name `deviceName`, or throws IllegalArgumentException
|
||||
// if no such camera can be found.
|
||||
static int getCameraIndex(String deviceName) {
|
||||
Logging.d(TAG, "getCameraIndex: " + deviceName);
|
||||
|
||||
@ -152,24 +152,24 @@ public class CameraEnumerationAndroid {
|
||||
}
|
||||
}
|
||||
|
||||
// Prefer a fps range with an upper bound close to |framerate|. Also prefer a fps range with a low
|
||||
// Prefer a fps range with an upper bound close to `framerate`. Also prefer a fps range with a low
|
||||
// lower bound, to allow the framerate to fluctuate based on lightning conditions.
|
||||
public static CaptureFormat.FramerateRange getClosestSupportedFramerateRange(
|
||||
List<CaptureFormat.FramerateRange> supportedFramerates, final int requestedFps) {
|
||||
return Collections.min(
|
||||
supportedFramerates, new ClosestComparator<CaptureFormat.FramerateRange>() {
|
||||
// Progressive penalty if the upper bound is further away than |MAX_FPS_DIFF_THRESHOLD|
|
||||
// Progressive penalty if the upper bound is further away than `MAX_FPS_DIFF_THRESHOLD`
|
||||
// from requested.
|
||||
private static final int MAX_FPS_DIFF_THRESHOLD = 5000;
|
||||
private static final int MAX_FPS_LOW_DIFF_WEIGHT = 1;
|
||||
private static final int MAX_FPS_HIGH_DIFF_WEIGHT = 3;
|
||||
|
||||
// Progressive penalty if the lower bound is bigger than |MIN_FPS_THRESHOLD|.
|
||||
// Progressive penalty if the lower bound is bigger than `MIN_FPS_THRESHOLD`.
|
||||
private static final int MIN_FPS_THRESHOLD = 8000;
|
||||
private static final int MIN_FPS_LOW_VALUE_WEIGHT = 1;
|
||||
private static final int MIN_FPS_HIGH_VALUE_WEIGHT = 4;
|
||||
|
||||
// Use one weight for small |value| less than |threshold|, and another weight above.
|
||||
// Use one weight for small `value` less than `threshold`, and another weight above.
|
||||
private int progressivePenalty(int value, int threshold, int lowWeight, int highWeight) {
|
||||
return (value < threshold) ? value * lowWeight
|
||||
: threshold * lowWeight + (value - threshold) * highWeight;
|
||||
|
||||
@ -48,7 +48,7 @@ public interface CameraVideoCapturer extends VideoCapturer {
|
||||
* The callback may be called on an arbitrary thread.
|
||||
*/
|
||||
public interface CameraSwitchHandler {
|
||||
// Invoked on success. |isFrontCamera| is true if the new camera is front facing.
|
||||
// Invoked on success. `isFrontCamera` is true if the new camera is front facing.
|
||||
void onCameraSwitchDone(boolean isFrontCamera);
|
||||
|
||||
// Invoked on failure, e.g. camera is stopped or only one camera available.
|
||||
|
||||
@ -63,7 +63,7 @@ public class DataChannel {
|
||||
public final ByteBuffer data;
|
||||
|
||||
/**
|
||||
* Indicates whether |data| contains UTF-8 text or "binary data"
|
||||
* Indicates whether `data` contains UTF-8 text or "binary data"
|
||||
* (i.e. anything else).
|
||||
*/
|
||||
public final boolean binary;
|
||||
@ -110,7 +110,7 @@ public class DataChannel {
|
||||
this.nativeDataChannel = nativeDataChannel;
|
||||
}
|
||||
|
||||
/** Register |observer|, replacing any previously-registered observer. */
|
||||
/** Register `observer`, replacing any previously-registered observer. */
|
||||
public void registerObserver(Observer observer) {
|
||||
checkDataChannelExists();
|
||||
if (nativeObserver != 0) {
|
||||
@ -157,7 +157,7 @@ public class DataChannel {
|
||||
nativeClose();
|
||||
}
|
||||
|
||||
/** Send |data| to the remote peer; return success. */
|
||||
/** Send `data` to the remote peer; return success. */
|
||||
public boolean send(Buffer buffer) {
|
||||
checkDataChannelExists();
|
||||
// TODO(fischman): this could be cleverer about avoiding copies if the
|
||||
|
||||
@ -146,8 +146,8 @@ public interface EglBase {
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new context with the specified config attributes, sharing data with |sharedContext|.
|
||||
* If |sharedContext| is null, a root context is created. This function will try to create an EGL
|
||||
* Create a new context with the specified config attributes, sharing data with `sharedContext`.
|
||||
* If `sharedContext` is null, a root context is created. This function will try to create an EGL
|
||||
* 1.4 context if possible, and an EGL 1.0 context otherwise.
|
||||
*/
|
||||
public static EglBase create(@Nullable Context sharedContext, int[] configAttributes) {
|
||||
@ -171,7 +171,7 @@ public interface EglBase {
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper function for creating a plain context, sharing data with |sharedContext|. This function
|
||||
* Helper function for creating a plain context, sharing data with `sharedContext`. This function
|
||||
* will try to create an EGL 1.4 context if possible, and an EGL 1.0 context otherwise.
|
||||
*/
|
||||
public static EglBase create(Context sharedContext) {
|
||||
|
||||
@ -111,8 +111,8 @@ public class EglRenderer implements VideoSink {
|
||||
|
||||
protected final String name;
|
||||
|
||||
// |renderThreadHandler| is a handler for communicating with |renderThread|, and is synchronized
|
||||
// on |handlerLock|.
|
||||
// `renderThreadHandler` is a handler for communicating with `renderThread`, and is synchronized
|
||||
// on `handlerLock`.
|
||||
private final Object handlerLock = new Object();
|
||||
@Nullable private Handler renderThreadHandler;
|
||||
|
||||
@ -136,11 +136,11 @@ public class EglRenderer implements VideoSink {
|
||||
private boolean usePresentationTimeStamp;
|
||||
private final Matrix drawMatrix = new Matrix();
|
||||
|
||||
// Pending frame to render. Serves as a queue with size 1. Synchronized on |frameLock|.
|
||||
// Pending frame to render. Serves as a queue with size 1. Synchronized on `frameLock`.
|
||||
private final Object frameLock = new Object();
|
||||
@Nullable private VideoFrame pendingFrame;
|
||||
|
||||
// These variables are synchronized on |layoutLock|.
|
||||
// These variables are synchronized on `layoutLock`.
|
||||
private final Object layoutLock = new Object();
|
||||
private float layoutAspectRatio;
|
||||
// If true, mirrors the video stream horizontally.
|
||||
@ -148,7 +148,7 @@ public class EglRenderer implements VideoSink {
|
||||
// If true, mirrors the video stream vertically.
|
||||
private boolean mirrorVertically;
|
||||
|
||||
// These variables are synchronized on |statisticsLock|.
|
||||
// These variables are synchronized on `statisticsLock`.
|
||||
private final Object statisticsLock = new Object();
|
||||
// Total number of video frames received in renderFrame() call.
|
||||
private int framesReceived;
|
||||
@ -198,9 +198,9 @@ public class EglRenderer implements VideoSink {
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize this class, sharing resources with |sharedContext|. The custom |drawer| will be used
|
||||
* Initialize this class, sharing resources with `sharedContext`. The custom `drawer` will be used
|
||||
* for drawing frames on the EGLSurface. This class is responsible for calling release() on
|
||||
* |drawer|. It is allowed to call init() to reinitialize the renderer after a previous
|
||||
* `drawer`. It is allowed to call init() to reinitialize the renderer after a previous
|
||||
* init()/release() cycle. If usePresentationTimeStamp is true, eglPresentationTimeANDROID will be
|
||||
* set with the frame timestamps, which specifies desired presentation time and might be useful
|
||||
* for e.g. syncing audio and video.
|
||||
@ -592,10 +592,10 @@ public class EglRenderer implements VideoSink {
|
||||
}
|
||||
|
||||
/**
|
||||
* Renders and releases |pendingFrame|.
|
||||
* Renders and releases `pendingFrame`.
|
||||
*/
|
||||
private void renderFrameOnRenderThread() {
|
||||
// Fetch and render |pendingFrame|.
|
||||
// Fetch and render `pendingFrame`.
|
||||
final VideoFrame frame;
|
||||
synchronized (frameLock) {
|
||||
if (pendingFrame == null) {
|
||||
|
||||
@ -78,16 +78,16 @@ public class GlShader {
|
||||
}
|
||||
|
||||
/**
|
||||
* Enable and upload a vertex array for attribute |label|. The vertex data is specified in
|
||||
* |buffer| with |dimension| number of components per vertex.
|
||||
* Enable and upload a vertex array for attribute `label`. The vertex data is specified in
|
||||
* `buffer` with `dimension` number of components per vertex.
|
||||
*/
|
||||
public void setVertexAttribArray(String label, int dimension, FloatBuffer buffer) {
|
||||
setVertexAttribArray(label, dimension, 0 /* stride */, buffer);
|
||||
}
|
||||
|
||||
/**
|
||||
* Enable and upload a vertex array for attribute |label|. The vertex data is specified in
|
||||
* |buffer| with |dimension| number of components per vertex and specified |stride|.
|
||||
* Enable and upload a vertex array for attribute `label`. The vertex data is specified in
|
||||
* `buffer` with `dimension` number of components per vertex and specified `stride`.
|
||||
*/
|
||||
public void setVertexAttribArray(String label, int dimension, int stride, FloatBuffer buffer) {
|
||||
if (program == -1) {
|
||||
|
||||
@ -18,12 +18,12 @@ import java.util.Map;
|
||||
// Rtc histograms can be queried through the API, getAndReset().
|
||||
// The returned map holds the name of a histogram and its samples.
|
||||
//
|
||||
// Example of |map| with one histogram:
|
||||
// |name|: "WebRTC.Video.InputFramesPerSecond"
|
||||
// |min|: 1
|
||||
// |max|: 100
|
||||
// |bucketCount|: 50
|
||||
// |samples|: [30]:1
|
||||
// Example of `map` with one histogram:
|
||||
// `name`: "WebRTC.Video.InputFramesPerSecond"
|
||||
// `min`: 1
|
||||
// `max`: 100
|
||||
// `bucketCount`: 50
|
||||
// `samples`: [30]:1
|
||||
//
|
||||
// Most histograms are not updated frequently (e.g. most video metrics are an
|
||||
// average over the call and recorded when a stream is removed).
|
||||
|
||||
@ -98,9 +98,9 @@ public interface NetworkChangeDetector {
|
||||
|
||||
/**
|
||||
* Called when network preference change for a (list of) connection type(s). (e.g WIFI) is
|
||||
* |NOT_PREFERRED| or |NEUTRAL|.
|
||||
* `NOT_PREFERRED` or `NEUTRAL`.
|
||||
*
|
||||
* <p>note: |types| is a list of ConnectionTypes, so that all cellular types can be modified in
|
||||
* <p>note: `types` is a list of ConnectionTypes, so that all cellular types can be modified in
|
||||
* one call.
|
||||
*/
|
||||
public void onNetworkPreference(List<ConnectionType> types, @NetworkPreference int preference);
|
||||
|
||||
@ -172,7 +172,7 @@ public class NetworkMonitorAutoDetect extends BroadcastReceiver implements Netwo
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns connection type and status information about |network|.
|
||||
* Returns connection type and status information about `network`.
|
||||
* Only callable on Lollipop and newer releases.
|
||||
*/
|
||||
@SuppressLint("NewApi")
|
||||
@ -186,9 +186,9 @@ public class NetworkMonitorAutoDetect extends BroadcastReceiver implements Netwo
|
||||
return new NetworkState(false, -1, -1, -1, -1);
|
||||
}
|
||||
// The general logic of handling a VPN in this method is as follows. getNetworkInfo will
|
||||
// return the info of the network with the same id as in |network| when it is registered via
|
||||
// ConnectivityManager.registerNetworkAgent in Android. |networkInfo| may or may not indicate
|
||||
// the type TYPE_VPN if |network| is a VPN. To reliably detect the VPN interface, we need to
|
||||
// return the info of the network with the same id as in `network` when it is registered via
|
||||
// ConnectivityManager.registerNetworkAgent in Android. `networkInfo` may or may not indicate
|
||||
// the type TYPE_VPN if `network` is a VPN. To reliably detect the VPN interface, we need to
|
||||
// query the network capability as below in the case when networkInfo.getType() is not
|
||||
// TYPE_VPN. On the other hand when networkInfo.getType() is TYPE_VPN, the only solution so
|
||||
// far to obtain the underlying network information is to query the active network interface.
|
||||
@ -198,7 +198,7 @@ public class NetworkMonitorAutoDetect extends BroadcastReceiver implements Netwo
|
||||
// getActiveNetworkInfo may thus give the wrong interface information, and one should note
|
||||
// that getActiveNetworkInfo would return the default network interface if the VPN does not
|
||||
// specify its underlying networks in the implementation. Therefore, we need further compare
|
||||
// |network| to the active network. If they are not the same network, we will have to fall
|
||||
// `network` to the active network. If they are not the same network, we will have to fall
|
||||
// back to report an unknown network.
|
||||
|
||||
if (networkInfo.getType() != ConnectivityManager.TYPE_VPN) {
|
||||
@ -209,15 +209,15 @@ public class NetworkMonitorAutoDetect extends BroadcastReceiver implements Netwo
|
||||
|| !networkCapabilities.hasTransport(NetworkCapabilities.TRANSPORT_VPN)) {
|
||||
return getNetworkState(networkInfo);
|
||||
}
|
||||
// When |network| is in fact a VPN after querying its capability but |networkInfo| is not of
|
||||
// type TYPE_VPN, |networkInfo| contains the info for the underlying network, and we return
|
||||
// When `network` is in fact a VPN after querying its capability but `networkInfo` is not of
|
||||
// type TYPE_VPN, `networkInfo` contains the info for the underlying network, and we return
|
||||
// a NetworkState constructed from it.
|
||||
return new NetworkState(networkInfo.isConnected(), ConnectivityManager.TYPE_VPN, -1,
|
||||
networkInfo.getType(), networkInfo.getSubtype());
|
||||
}
|
||||
|
||||
// When |networkInfo| is of type TYPE_VPN, which implies |network| is a VPN, we return the
|
||||
// NetworkState of the active network via getActiveNetworkInfo(), if |network| is the active
|
||||
// When `networkInfo` is of type TYPE_VPN, which implies `network` is a VPN, we return the
|
||||
// NetworkState of the active network via getActiveNetworkInfo(), if `network` is the active
|
||||
// network that supports the VPN. Otherwise, NetworkState of an unknown network with type -1
|
||||
// will be returned.
|
||||
//
|
||||
|
||||
@ -169,9 +169,9 @@ public class PeerConnection {
|
||||
public final String password;
|
||||
public final TlsCertPolicy tlsCertPolicy;
|
||||
|
||||
// If the URIs in |urls| only contain IP addresses, this field can be used
|
||||
// If the URIs in `urls` only contain IP addresses, this field can be used
|
||||
// to indicate the hostname, which may be necessary for TLS (using the SNI
|
||||
// extension). If |urls| itself contains the hostname, this isn't
|
||||
// extension). If `urls` itself contains the hostname, this isn't
|
||||
// necessary.
|
||||
public final String hostname;
|
||||
|
||||
@ -1106,7 +1106,7 @@ public class PeerConnection {
|
||||
* transceiver will cause future calls to CreateOffer to add a media description
|
||||
* for the corresponding transceiver.
|
||||
*
|
||||
* <p>The initial value of |mid| in the returned transceiver is null. Setting a
|
||||
* <p>The initial value of `mid` in the returned transceiver is null. Setting a
|
||||
* new session description may change it to a non-null value.
|
||||
*
|
||||
* <p>https://w3c.github.io/webrtc-pc/#dom-rtcpeerconnection-addtransceiver
|
||||
|
||||
@ -133,7 +133,7 @@ public class PeerConnectionFactory {
|
||||
public static class Options {
|
||||
// Keep in sync with webrtc/rtc_base/network.h!
|
||||
//
|
||||
// These bit fields are defined for |networkIgnoreMask| below.
|
||||
// These bit fields are defined for `networkIgnoreMask` below.
|
||||
static final int ADAPTER_TYPE_UNKNOWN = 0;
|
||||
static final int ADAPTER_TYPE_ETHERNET = 1 << 0;
|
||||
static final int ADAPTER_TYPE_WIFI = 1 << 1;
|
||||
|
||||
@ -14,8 +14,8 @@ import java.util.Map;
|
||||
|
||||
/**
|
||||
* Java version of webrtc::RTCStats. Represents an RTCStats object, as
|
||||
* described in https://w3c.github.io/webrtc-stats/. The |id|, |timestampUs|
|
||||
* and |type| accessors have the same meaning for this class as for the
|
||||
* described in https://w3c.github.io/webrtc-stats/. The `id`, `timestampUs`
|
||||
* and `type` accessors have the same meaning for this class as for the
|
||||
* RTCStats dictionary. Each RTCStatsReport produced by getStats contains
|
||||
* multiple RTCStats objects; one for each underlying object (codec, stream,
|
||||
* transport, etc.) that was inspected to produce the stats.
|
||||
|
||||
@ -123,9 +123,9 @@ public class RendererCommon {
|
||||
// clipped.
|
||||
// SCALE_ASPECT_BALANCED - Compromise between FIT and FILL. Video frame will fill as much as
|
||||
// possible of the view while maintaining aspect ratio, under the constraint that at least
|
||||
// |BALANCED_VISIBLE_FRACTION| of the frame content will be shown.
|
||||
// `BALANCED_VISIBLE_FRACTION` of the frame content will be shown.
|
||||
public static enum ScalingType { SCALE_ASPECT_FIT, SCALE_ASPECT_FILL, SCALE_ASPECT_BALANCED }
|
||||
// The minimum fraction of the frame content that will be shown for |SCALE_ASPECT_BALANCED|.
|
||||
// The minimum fraction of the frame content that will be shown for `SCALE_ASPECT_BALANCED`.
|
||||
// This limits excessive cropping when adjusting display size.
|
||||
private static float BALANCED_VISIBLE_FRACTION = 0.5625f;
|
||||
|
||||
@ -209,7 +209,7 @@ public class RendererCommon {
|
||||
}
|
||||
|
||||
/**
|
||||
* Move |matrix| transformation origin to (0.5, 0.5). This is the origin for texture coordinates
|
||||
* Move `matrix` transformation origin to (0.5, 0.5). This is the origin for texture coordinates
|
||||
* that are in the range 0 to 1.
|
||||
*/
|
||||
private static void adjustOrigin(float[] matrix) {
|
||||
|
||||
@ -39,7 +39,7 @@ public class RtpSender {
|
||||
*
|
||||
* @param takeOwnership If true, the RtpSender takes ownership of the track
|
||||
* from the caller, and will auto-dispose of it when no
|
||||
* longer needed. |takeOwnership| should only be used if
|
||||
* longer needed. `takeOwnership` should only be used if
|
||||
* the caller owns the track; it is not appropriate when
|
||||
* the track is owned by, for example, another RtpSender
|
||||
* or a MediaStream.
|
||||
|
||||
@ -42,9 +42,9 @@ public class SurfaceEglRenderer extends EglRenderer implements SurfaceHolder.Cal
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize this class, sharing resources with |sharedContext|. The custom |drawer| will be used
|
||||
* Initialize this class, sharing resources with `sharedContext`. The custom `drawer` will be used
|
||||
* for drawing frames on the EGLSurface. This class is responsible for calling release() on
|
||||
* |drawer|. It is allowed to call init() to reinitialize the renderer after a previous
|
||||
* `drawer`. It is allowed to call init() to reinitialize the renderer after a previous
|
||||
* init()/release() cycle.
|
||||
*/
|
||||
public void init(final EglBase.Context sharedContext,
|
||||
@ -125,7 +125,7 @@ public class SurfaceEglRenderer extends EglRenderer implements SurfaceHolder.Cal
|
||||
logD("surfaceChanged: format: " + format + " size: " + width + "x" + height);
|
||||
}
|
||||
|
||||
// Update frame dimensions and report any changes to |rendererEvents|.
|
||||
// Update frame dimensions and report any changes to `rendererEvents`.
|
||||
private void updateFrameDimensionsAndReportEvents(VideoFrame frame) {
|
||||
synchronized (layoutLock) {
|
||||
if (isRenderingPaused) {
|
||||
|
||||
@ -48,7 +48,7 @@ public class SurfaceTextureHelper {
|
||||
|
||||
private static final String TAG = "SurfaceTextureHelper";
|
||||
/**
|
||||
* Construct a new SurfaceTextureHelper sharing OpenGL resources with |sharedContext|. A dedicated
|
||||
* Construct a new SurfaceTextureHelper sharing OpenGL resources with `sharedContext`. A dedicated
|
||||
* thread and handler is created for handling the SurfaceTexture. May return null if EGL fails to
|
||||
* initialize a pixel buffer surface and make it current. If alignTimestamps is true, the frame
|
||||
* timestamps will be aligned to rtc::TimeNanos(). If frame timestamps are aligned to
|
||||
@ -66,7 +66,7 @@ public class SurfaceTextureHelper {
|
||||
// The onFrameAvailable() callback will be executed on the SurfaceTexture ctor thread. See:
|
||||
// http://grepcode.com/file/repository.grepcode.com/java/ext/com.google.android/android/5.1.1_r1/android/graphics/SurfaceTexture.java#195.
|
||||
// Therefore, in order to control the callback thread on API lvl < 21, the SurfaceTextureHelper
|
||||
// is constructed on the |handler| thread.
|
||||
// is constructed on the `handler` thread.
|
||||
return ThreadUtils.invokeAtFrontUninterruptibly(handler, new Callable<SurfaceTextureHelper>() {
|
||||
@Nullable
|
||||
@Override
|
||||
@ -147,7 +147,7 @@ public class SurfaceTextureHelper {
|
||||
@Nullable private final TimestampAligner timestampAligner;
|
||||
private final FrameRefMonitor frameRefMonitor;
|
||||
|
||||
// These variables are only accessed from the |handler| thread.
|
||||
// These variables are only accessed from the `handler` thread.
|
||||
@Nullable private VideoSink listener;
|
||||
// The possible states of this class.
|
||||
private boolean hasPendingTexture;
|
||||
@ -156,7 +156,7 @@ public class SurfaceTextureHelper {
|
||||
private int frameRotation;
|
||||
private int textureWidth;
|
||||
private int textureHeight;
|
||||
// |pendingListener| is set in setListener() and the runnable is posted to the handler thread.
|
||||
// `pendingListener` is set in setListener() and the runnable is posted to the handler thread.
|
||||
// setListener() is not allowed to be called again before stopListening(), so this is thread safe.
|
||||
@Nullable private VideoSink pendingListener;
|
||||
final Runnable setListenerRunnable = new Runnable() {
|
||||
@ -223,7 +223,7 @@ public class SurfaceTextureHelper {
|
||||
}
|
||||
|
||||
/**
|
||||
* Start to stream textures to the given |listener|. If you need to change listener, you need to
|
||||
* Start to stream textures to the given `listener`. If you need to change listener, you need to
|
||||
* call stopListening() first.
|
||||
*/
|
||||
public void startListening(final VideoSink listener) {
|
||||
@ -331,7 +331,7 @@ public class SurfaceTextureHelper {
|
||||
}
|
||||
|
||||
/**
|
||||
* Posts to the correct thread to convert |textureBuffer| to I420.
|
||||
* Posts to the correct thread to convert `textureBuffer` to I420.
|
||||
*
|
||||
* @deprecated Use toI420() instead.
|
||||
*/
|
||||
|
||||
@ -64,7 +64,7 @@ public class SurfaceViewRenderer extends SurfaceView
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize this class, sharing resources with |sharedContext|. It is allowed to call init() to
|
||||
* Initialize this class, sharing resources with `sharedContext`. It is allowed to call init() to
|
||||
* reinitialize the renderer after a previous init()/release() cycle.
|
||||
*/
|
||||
public void init(EglBase.Context sharedContext, RendererCommon.RendererEvents rendererEvents) {
|
||||
@ -72,9 +72,9 @@ public class SurfaceViewRenderer extends SurfaceView
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize this class, sharing resources with |sharedContext|. The custom |drawer| will be used
|
||||
* Initialize this class, sharing resources with `sharedContext`. The custom `drawer` will be used
|
||||
* for drawing frames on the EGLSurface. This class is responsible for calling release() on
|
||||
* |drawer|. It is allowed to call init() to reinitialize the renderer after a previous
|
||||
* `drawer`. It is allowed to call init() to reinitialize the renderer after a previous
|
||||
* init()/release() cycle.
|
||||
*/
|
||||
public void init(final EglBase.Context sharedContext,
|
||||
|
||||
@ -31,7 +31,7 @@ public class TimestampAligner {
|
||||
|
||||
/**
|
||||
* Translates camera timestamps to the same timescale as is used by rtc::TimeNanos().
|
||||
* |cameraTimeNs| is assumed to be accurate, but with an unknown epoch and clock drift. Returns
|
||||
* `cameraTimeNs` is assumed to be accurate, but with an unknown epoch and clock drift. Returns
|
||||
* the translated timestamp.
|
||||
*/
|
||||
public long translateTimestamp(long cameraTimeNs) {
|
||||
|
||||
@ -238,7 +238,7 @@ public interface VideoEncoder {
|
||||
|
||||
public interface Callback {
|
||||
/**
|
||||
* Old encoders assume that the byte buffer held by |frame| is not accessed after the call to
|
||||
* Old encoders assume that the byte buffer held by `frame` is not accessed after the call to
|
||||
* this method returns. If the pipeline downstream needs to hold on to the buffer, it then has
|
||||
* to make its own copy. We want to move to a model where no copying is needed, and instead use
|
||||
* retain()/release() to signal to the encoder when it is safe to reuse the buffer.
|
||||
|
||||
@ -60,8 +60,8 @@ public class VideoFrame implements RefCounted {
|
||||
@Override @CalledByNative("Buffer") void release();
|
||||
|
||||
/**
|
||||
* Crops a region defined by |cropx|, |cropY|, |cropWidth| and |cropHeight|. Scales it to size
|
||||
* |scaleWidth| x |scaleHeight|.
|
||||
* Crops a region defined by `cropx`, `cropY`, `cropWidth` and `cropHeight`. Scales it to size
|
||||
* `scaleWidth` x `scaleHeight`.
|
||||
*/
|
||||
@CalledByNative("Buffer")
|
||||
Buffer cropAndScale(
|
||||
|
||||
@ -61,7 +61,7 @@ public class VideoFrameDrawer {
|
||||
@Nullable private int[] yuvTextures;
|
||||
|
||||
/**
|
||||
* Upload |planes| into OpenGL textures, taking stride into consideration.
|
||||
* Upload `planes` into OpenGL textures, taking stride into consideration.
|
||||
*
|
||||
* @return Array of three texture indices corresponding to Y-, U-, and V-plane respectively.
|
||||
*/
|
||||
@ -145,8 +145,8 @@ public class VideoFrameDrawer {
|
||||
private int renderWidth;
|
||||
private int renderHeight;
|
||||
|
||||
// Calculate the frame size after |renderMatrix| is applied. Stores the output in member variables
|
||||
// |renderWidth| and |renderHeight| to avoid allocations since this function is called for every
|
||||
// Calculate the frame size after `renderMatrix` is applied. Stores the output in member variables
|
||||
// `renderWidth` and `renderHeight` to avoid allocations since this function is called for every
|
||||
// frame.
|
||||
private void calculateTransformedRenderSize(
|
||||
int frameWidth, int frameHeight, @Nullable Matrix renderMatrix) {
|
||||
@ -155,7 +155,7 @@ public class VideoFrameDrawer {
|
||||
renderHeight = frameHeight;
|
||||
return;
|
||||
}
|
||||
// Transform the texture coordinates (in the range [0, 1]) according to |renderMatrix|.
|
||||
// Transform the texture coordinates (in the range [0, 1]) according to `renderMatrix`.
|
||||
renderMatrix.mapPoints(dstPoints, srcPoints);
|
||||
|
||||
// Multiply with the width and height to get the positions in terms of pixels.
|
||||
|
||||
@ -153,7 +153,7 @@ public final class YuvConverter {
|
||||
// +----+----+
|
||||
//
|
||||
// In memory, we use the same stride for all of Y, U and V. The
|
||||
// U data starts at offset |height| * |stride| from the Y data,
|
||||
// U data starts at offset `height` * `stride` from the Y data,
|
||||
// and the V data starts at at offset |stride/2| from the U
|
||||
// data, with rows of U and V data alternating.
|
||||
//
|
||||
@ -161,12 +161,12 @@ public final class YuvConverter {
|
||||
// a single byte per pixel (EGL10.EGL_COLOR_BUFFER_TYPE,
|
||||
// EGL10.EGL_LUMINANCE_BUFFER,), but that seems to be
|
||||
// unsupported by devices. So do the following hack: Allocate an
|
||||
// RGBA buffer, of width |stride|/4. To render each of these
|
||||
// RGBA buffer, of width `stride`/4. To render each of these
|
||||
// large pixels, sample the texture at 4 different x coordinates
|
||||
// and store the results in the four components.
|
||||
//
|
||||
// Since the V data needs to start on a boundary of such a
|
||||
// larger pixel, it is not sufficient that |stride| is even, it
|
||||
// larger pixel, it is not sufficient that `stride` is even, it
|
||||
// has to be a multiple of 8 pixels.
|
||||
final int frameWidth = preparedBuffer.getWidth();
|
||||
final int frameHeight = preparedBuffer.getHeight();
|
||||
|
||||
Reference in New Issue
Block a user