Use std::make_unique instead of absl::make_unique.
WebRTC is now using C++14 so there is no need to use the Abseil version of std::make_unique. This CL has been created with the following steps: git grep -l absl::make_unique | sort | uniq > /tmp/make_unique.txt git grep -l absl::WrapUnique | sort | uniq > /tmp/wrap_unique.txt git grep -l "#include <memory>" | sort | uniq > /tmp/memory.txt diff --new-line-format="" --unchanged-line-format="" \ /tmp/make_unique.txt /tmp/wrap_unique.txt | sort | \ uniq > /tmp/only_make_unique.txt diff --new-line-format="" --unchanged-line-format="" \ /tmp/only_make_unique.txt /tmp/memory.txt | \ xargs grep -l "absl/memory" > /tmp/add-memory.txt git grep -l "\babsl::make_unique\b" | \ xargs sed -i "s/\babsl::make_unique\b/std::make_unique/g" git checkout PRESUBMIT.py abseil-in-webrtc.md cat /tmp/add-memory.txt | \ xargs sed -i \ 's/#include "absl\/memory\/memory.h"/#include <memory>/g' git cl format # Manual fix order of the new inserted #include <memory> cat /tmp/only_make_unique | xargs grep -l "#include <memory>" | \ xargs sed -i '/#include "absl\/memory\/memory.h"/d' git ls-files | grep BUILD.gn | \ xargs sed -i '/\/\/third_party\/abseil-cpp\/absl\/memory/d' python tools_webrtc/gn_check_autofix.py \ -m tryserver.webrtc -b linux_rel # Repead the gn_check_autofix step for other platforms git ls-files | grep BUILD.gn | \ xargs sed -i 's/absl\/memory:memory/absl\/memory/g' git cl format Bug: webrtc:10945 Change-Id: I3fe28ea80f4dd3ba3cf28effd151d5e1f19aff89 Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/153221 Commit-Queue: Mirko Bonadei <mbonadei@webrtc.org> Reviewed-by: Alessio Bazzica <alessiob@webrtc.org> Reviewed-by: Karl Wiberg <kwiberg@webrtc.org> Cr-Commit-Position: refs/heads/master@{#29209}
This commit is contained in:
committed by
Commit Bot
parent
809198edff
commit
317a1f09ed
@ -989,7 +989,7 @@ bool MediaCodecVideoEncoder::DeliverPendingOutputs(JNIEnv* jni) {
|
||||
EncodedImageCallback::Result callback_result(
|
||||
EncodedImageCallback::Result::OK);
|
||||
if (callback_) {
|
||||
auto image = absl::make_unique<EncodedImage>();
|
||||
auto image = std::make_unique<EncodedImage>();
|
||||
// The corresponding (and deprecated) java classes are not prepared for
|
||||
// late calls to releaseOutputBuffer, so to keep things simple, make a
|
||||
// copy here, and call releaseOutputBuffer before returning.
|
||||
|
||||
@ -10,7 +10,8 @@
|
||||
|
||||
#include "sdk/android/src/jni/audio_device/aaudio_player.h"
|
||||
|
||||
#include "absl/memory/memory.h"
|
||||
#include <memory>
|
||||
|
||||
#include "api/array_view.h"
|
||||
#include "modules/audio_device/fine_audio_buffer.h"
|
||||
#include "rtc_base/checks.h"
|
||||
@ -124,7 +125,7 @@ void AAudioPlayer::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
|
||||
// Create a modified audio buffer class which allows us to ask for any number
|
||||
// of samples (and not only multiple of 10ms) to match the optimal buffer
|
||||
// size per callback used by AAudio.
|
||||
fine_audio_buffer_ = absl::make_unique<FineAudioBuffer>(audio_device_buffer_);
|
||||
fine_audio_buffer_ = std::make_unique<FineAudioBuffer>(audio_device_buffer_);
|
||||
}
|
||||
|
||||
bool AAudioPlayer::SpeakerVolumeIsAvailable() {
|
||||
|
||||
@ -10,7 +10,8 @@
|
||||
|
||||
#include "sdk/android/src/jni/audio_device/aaudio_recorder.h"
|
||||
|
||||
#include "absl/memory/memory.h"
|
||||
#include <memory>
|
||||
|
||||
#include "api/array_view.h"
|
||||
#include "modules/audio_device/fine_audio_buffer.h"
|
||||
#include "rtc_base/checks.h"
|
||||
@ -120,7 +121,7 @@ void AAudioRecorder::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
|
||||
// Create a modified audio buffer class which allows us to deliver any number
|
||||
// of samples (and not only multiples of 10ms which WebRTC uses) to match the
|
||||
// native AAudio buffer size.
|
||||
fine_audio_buffer_ = absl::make_unique<FineAudioBuffer>(audio_device_buffer_);
|
||||
fine_audio_buffer_ = std::make_unique<FineAudioBuffer>(audio_device_buffer_);
|
||||
}
|
||||
|
||||
bool AAudioRecorder::IsAcousticEchoCancelerSupported() const {
|
||||
|
||||
@ -10,9 +10,9 @@
|
||||
|
||||
#include "sdk/android/src/jni/audio_device/audio_device_module.h"
|
||||
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
|
||||
#include "absl/memory/memory.h"
|
||||
#include "api/task_queue/default_task_queue_factory.h"
|
||||
#include "api/task_queue/task_queue_factory.h"
|
||||
#include "modules/audio_device/audio_device_buffer.h"
|
||||
@ -92,7 +92,7 @@ class AndroidAudioDeviceModule : public AudioDeviceModule {
|
||||
RTC_LOG(INFO) << __FUNCTION__;
|
||||
RTC_DCHECK(thread_checker_.IsCurrent());
|
||||
audio_device_buffer_ =
|
||||
absl::make_unique<AudioDeviceBuffer>(task_queue_factory_.get());
|
||||
std::make_unique<AudioDeviceBuffer>(task_queue_factory_.get());
|
||||
AttachAudioBuffer();
|
||||
if (initialized_) {
|
||||
return 0;
|
||||
|
||||
@ -8,7 +8,8 @@
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "absl/memory/memory.h"
|
||||
#include <memory>
|
||||
|
||||
#include "sdk/android/generated_java_audio_jni/JavaAudioDeviceModule_jni.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_record_jni.h"
|
||||
#include "sdk/android/src/jni/audio_device/audio_track_jni.h"
|
||||
@ -33,11 +34,11 @@ static jlong JNI_JavaAudioDeviceModule_CreateAudioDeviceModule(
|
||||
output_sample_rate, j_use_stereo_input,
|
||||
j_use_stereo_output, &input_parameters,
|
||||
&output_parameters);
|
||||
auto audio_input = absl::make_unique<AudioRecordJni>(
|
||||
auto audio_input = std::make_unique<AudioRecordJni>(
|
||||
env, input_parameters, kHighLatencyModeDelayEstimateInMilliseconds,
|
||||
j_webrtc_audio_record);
|
||||
auto audio_output = absl::make_unique<AudioTrackJni>(env, output_parameters,
|
||||
j_webrtc_audio_track);
|
||||
auto audio_output = std::make_unique<AudioTrackJni>(env, output_parameters,
|
||||
j_webrtc_audio_track);
|
||||
return jlongFromPointer(CreateAudioDeviceModuleFromInputAndOutput(
|
||||
AudioDeviceModule::kAndroidJavaAudio,
|
||||
j_use_stereo_input, j_use_stereo_output,
|
||||
|
||||
@ -12,7 +12,7 @@
|
||||
|
||||
#include <android/log.h>
|
||||
|
||||
#include "absl/memory/memory.h"
|
||||
#include <memory>
|
||||
#include "api/array_view.h"
|
||||
#include "modules/audio_device/fine_audio_buffer.h"
|
||||
#include "rtc_base/arraysize.h"
|
||||
@ -226,7 +226,7 @@ void OpenSLESPlayer::AllocateDataBuffers() {
|
||||
ALOGD("native buffer size: %" RTC_PRIuS, buffer_size_in_samples);
|
||||
ALOGD("native buffer size in ms: %.2f",
|
||||
audio_parameters_.GetBufferSizeInMilliseconds());
|
||||
fine_audio_buffer_ = absl::make_unique<FineAudioBuffer>(audio_device_buffer_);
|
||||
fine_audio_buffer_ = std::make_unique<FineAudioBuffer>(audio_device_buffer_);
|
||||
// Allocated memory for audio buffers.
|
||||
for (int i = 0; i < kNumOfOpenSLESBuffers; ++i) {
|
||||
audio_buffers_[i].reset(new SLint16[buffer_size_in_samples]);
|
||||
|
||||
@ -12,7 +12,7 @@
|
||||
|
||||
#include <android/log.h>
|
||||
|
||||
#include "absl/memory/memory.h"
|
||||
#include <memory>
|
||||
#include "api/array_view.h"
|
||||
#include "modules/audio_device/fine_audio_buffer.h"
|
||||
#include "rtc_base/arraysize.h"
|
||||
@ -353,7 +353,7 @@ void OpenSLESRecorder::AllocateDataBuffers() {
|
||||
audio_parameters_.GetBytesPerBuffer());
|
||||
ALOGD("native sample rate: %d", audio_parameters_.sample_rate());
|
||||
RTC_DCHECK(audio_device_buffer_);
|
||||
fine_audio_buffer_ = absl::make_unique<FineAudioBuffer>(audio_device_buffer_);
|
||||
fine_audio_buffer_ = std::make_unique<FineAudioBuffer>(audio_device_buffer_);
|
||||
// Allocate queue of audio buffers that stores recorded audio samples.
|
||||
const int buffer_size_samples =
|
||||
audio_parameters_.frames_per_buffer() * audio_parameters_.channels();
|
||||
|
||||
@ -12,7 +12,6 @@
|
||||
|
||||
#include <limits>
|
||||
|
||||
#include "absl/memory/memory.h"
|
||||
#include "api/data_channel_interface.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "sdk/android/generated_peerconnection_jni/DataChannel_jni.h"
|
||||
@ -97,7 +96,7 @@ static jlong JNI_DataChannel_RegisterObserver(
|
||||
JNIEnv* jni,
|
||||
const JavaParamRef<jobject>& j_dc,
|
||||
const JavaParamRef<jobject>& j_observer) {
|
||||
auto observer = absl::make_unique<DataChannelObserverJni>(jni, j_observer);
|
||||
auto observer = std::make_unique<DataChannelObserverJni>(jni, j_observer);
|
||||
ExtractNativeDC(jni, j_dc)->RegisterObserver(observer.get());
|
||||
return jlongFromPointer(observer.release());
|
||||
}
|
||||
|
||||
@ -10,7 +10,8 @@
|
||||
|
||||
#include "sdk/android/src/jni/pc/media_constraints.h"
|
||||
|
||||
#include "absl/memory/memory.h"
|
||||
#include <memory>
|
||||
|
||||
#include "sdk/android/generated_peerconnection_jni/MediaConstraints_jni.h"
|
||||
#include "sdk/android/native_api/jni/java_types.h"
|
||||
#include "sdk/android/src/jni/jni_helpers.h"
|
||||
@ -39,7 +40,7 @@ MediaConstraints::Constraints PopulateConstraintsFromJavaPairList(
|
||||
std::unique_ptr<MediaConstraints> JavaToNativeMediaConstraints(
|
||||
JNIEnv* env,
|
||||
const JavaRef<jobject>& j_constraints) {
|
||||
return absl::make_unique<MediaConstraints>(
|
||||
return std::make_unique<MediaConstraints>(
|
||||
PopulateConstraintsFromJavaPairList(
|
||||
env, Java_MediaConstraints_getMandatory(env, j_constraints)),
|
||||
PopulateConstraintsFromJavaPairList(
|
||||
|
||||
@ -10,7 +10,8 @@
|
||||
|
||||
#include "sdk/android/src/jni/pc/media_stream.h"
|
||||
|
||||
#include "absl/memory/memory.h"
|
||||
#include <memory>
|
||||
|
||||
#include "sdk/android/generated_peerconnection_jni/MediaStream_jni.h"
|
||||
#include "sdk/android/native_api/jni/java_types.h"
|
||||
#include "sdk/android/src/jni/jni_helpers.h"
|
||||
@ -25,7 +26,7 @@ JavaMediaStream::JavaMediaStream(
|
||||
env,
|
||||
Java_MediaStream_Constructor(env,
|
||||
jlongFromPointer(media_stream.get()))),
|
||||
observer_(absl::make_unique<MediaStreamObserver>(media_stream)) {
|
||||
observer_(std::make_unique<MediaStreamObserver>(media_stream)) {
|
||||
for (rtc::scoped_refptr<AudioTrackInterface> track :
|
||||
media_stream->GetAudioTracks()) {
|
||||
Java_MediaStream_addNativeAudioTrack(env, j_media_stream_,
|
||||
|
||||
@ -32,7 +32,6 @@
|
||||
#include <string>
|
||||
#include <utility>
|
||||
|
||||
#include "absl/memory/memory.h"
|
||||
#include "api/peer_connection_interface.h"
|
||||
#include "api/rtc_event_log_output_file.h"
|
||||
#include "api/rtp_receiver_interface.h"
|
||||
@ -786,7 +785,7 @@ static jboolean JNI_PeerConnection_StartRtcEventLog(
|
||||
return false;
|
||||
}
|
||||
return ExtractNativePC(jni, j_pc)->StartRtcEventLog(
|
||||
absl::make_unique<RtcEventLogOutputFile>(f, max_size));
|
||||
std::make_unique<RtcEventLogOutputFile>(f, max_size));
|
||||
}
|
||||
|
||||
static void JNI_PeerConnection_StopRtcEventLog(
|
||||
|
||||
@ -204,7 +204,7 @@ static void JNI_PeerConnectionFactory_InitializeFieldTrials(
|
||||
field_trial::InitFieldTrialsFromString(nullptr);
|
||||
return;
|
||||
}
|
||||
field_trials_init_string = absl::make_unique<std::string>(
|
||||
field_trials_init_string = std::make_unique<std::string>(
|
||||
JavaToNativeString(jni, j_trials_init_string));
|
||||
RTC_LOG(LS_INFO) << "initializeFieldTrials: " << *field_trials_init_string;
|
||||
field_trial::InitFieldTrialsFromString(field_trials_init_string->c_str());
|
||||
@ -302,7 +302,7 @@ ScopedJavaLocalRef<jobject> CreatePeerConnectionFactoryForJava(
|
||||
dependencies.signaling_thread = signaling_thread.get();
|
||||
dependencies.task_queue_factory = CreateDefaultTaskQueueFactory();
|
||||
dependencies.call_factory = CreateCallFactory();
|
||||
dependencies.event_log_factory = absl::make_unique<RtcEventLogFactory>(
|
||||
dependencies.event_log_factory = std::make_unique<RtcEventLogFactory>(
|
||||
dependencies.task_queue_factory.get());
|
||||
dependencies.fec_controller_factory = std::move(fec_controller_factory);
|
||||
dependencies.network_controller_factory =
|
||||
@ -476,7 +476,7 @@ static jlong JNI_PeerConnectionFactory_CreatePeerConnection(
|
||||
PeerConnectionDependencies peer_connection_dependencies(observer.get());
|
||||
if (!j_sslCertificateVerifier.is_null()) {
|
||||
peer_connection_dependencies.tls_cert_verifier =
|
||||
absl::make_unique<SSLCertificateVerifierWrapper>(
|
||||
std::make_unique<SSLCertificateVerifierWrapper>(
|
||||
jni, j_sslCertificateVerifier);
|
||||
}
|
||||
|
||||
@ -531,7 +531,7 @@ static void JNI_PeerConnectionFactory_InjectLoggable(
|
||||
if (jni_log_sink) {
|
||||
rtc::LogMessage::RemoveLogToStream(jni_log_sink.get());
|
||||
}
|
||||
jni_log_sink = absl::make_unique<JNILogSink>(jni, j_logging);
|
||||
jni_log_sink = std::make_unique<JNILogSink>(jni, j_logging);
|
||||
rtc::LogMessage::AddLogToStream(
|
||||
jni_log_sink.get(), static_cast<rtc::LoggingSeverity>(nativeSeverity));
|
||||
rtc::LogMessage::LogToDebug(rtc::LS_NONE);
|
||||
|
||||
Reference in New Issue
Block a user