Update modules/ to not use implicit conversion from scoped_refptr<T> to T*.

Bug: webrtc:13464
Change-Id: I3906e91906edbf80d558e5c367d6b9429497c021
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/259762
Reviewed-by: Henrik Andreassson <henrika@webrtc.org>
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Reviewed-by: Ilya Nikolaevskiy <ilnik@webrtc.org>
Commit-Queue: Niels Moller <nisse@webrtc.org>
Cr-Commit-Position: refs/heads/main@{#36606}
This commit is contained in:
Niels Möller
2022-04-21 15:06:35 +02:00
committed by WebRTC LUCI CQ
parent d090952628
commit c7b690272d
11 changed files with 51 additions and 42 deletions

View File

@ -320,7 +320,7 @@ TEST_F(NetEqImplTest, InsertPacket) {
*dec = std::move(mock_decoder);
}));
DecoderDatabase::DecoderInfo info(SdpAudioFormat("pcmu", 8000, 1),
absl::nullopt, mock_decoder_factory);
absl::nullopt, mock_decoder_factory.get());
// Expectations for decoder database.
EXPECT_CALL(*mock_decoder_database_, GetDecoderInfo(kPayloadType))
@ -1633,7 +1633,7 @@ TEST_F(NetEqImplTest, NoCrashWith1000Channels) {
decoder = dec->get();
}));
DecoderDatabase::DecoderInfo info(SdpAudioFormat("pcmu", 8000, 1),
absl::nullopt, mock_decoder_factory);
absl::nullopt, mock_decoder_factory.get());
// Expectations for decoder database.
EXPECT_CALL(*mock_decoder_database_, GetDecoderInfo(kPayloadType))
.WillRepeatedly(Return(&info));

View File

@ -316,7 +316,7 @@ TEST(PacketBuffer, InsertPacketList) {
MockDecoderDatabase decoder_database;
auto factory = CreateBuiltinAudioDecoderFactory();
const DecoderDatabase::DecoderInfo info(SdpAudioFormat("pcmu", 8000, 1),
absl::nullopt, factory);
absl::nullopt, factory.get());
EXPECT_CALL(decoder_database, GetDecoderInfo(0))
.WillRepeatedly(Return(&info));
@ -366,11 +366,11 @@ TEST(PacketBuffer, InsertPacketListChangePayloadType) {
MockDecoderDatabase decoder_database;
auto factory = CreateBuiltinAudioDecoderFactory();
const DecoderDatabase::DecoderInfo info0(SdpAudioFormat("pcmu", 8000, 1),
absl::nullopt, factory);
absl::nullopt, factory.get());
EXPECT_CALL(decoder_database, GetDecoderInfo(0))
.WillRepeatedly(Return(&info0));
const DecoderDatabase::DecoderInfo info1(SdpAudioFormat("pcma", 8000, 1),
absl::nullopt, factory);
absl::nullopt, factory.get());
EXPECT_CALL(decoder_database, GetDecoderInfo(1))
.WillRepeatedly(Return(&info1));
@ -562,7 +562,7 @@ TEST(PacketBuffer, Reordering) {
MockDecoderDatabase decoder_database;
auto factory = CreateBuiltinAudioDecoderFactory();
const DecoderDatabase::DecoderInfo info(SdpAudioFormat("pcmu", 8000, 1),
absl::nullopt, factory);
absl::nullopt, factory.get());
EXPECT_CALL(decoder_database, GetDecoderInfo(0))
.WillRepeatedly(Return(&info));
absl::optional<uint8_t> current_pt;
@ -609,11 +609,11 @@ TEST(PacketBuffer, CngFirstThenSpeechWithNewSampleRate) {
MockDecoderDatabase decoder_database;
auto factory = CreateBuiltinAudioDecoderFactory();
const DecoderDatabase::DecoderInfo info_cng(SdpAudioFormat("cn", 8000, 1),
absl::nullopt, factory);
absl::nullopt, factory.get());
EXPECT_CALL(decoder_database, GetDecoderInfo(kCngPt))
.WillRepeatedly(Return(&info_cng));
const DecoderDatabase::DecoderInfo info_speech(
SdpAudioFormat("l16", 16000, 1), absl::nullopt, factory);
SdpAudioFormat("l16", 16000, 1), absl::nullopt, factory.get());
EXPECT_CALL(decoder_database, GetDecoderInfo(kSpeechPt))
.WillRepeatedly(Return(&info_speech));
@ -736,7 +736,7 @@ TEST(PacketBuffer, Failures) {
list.push_back(gen.NextPacket(payload_len, nullptr)); // Valid packet.
auto factory = CreateBuiltinAudioDecoderFactory();
const DecoderDatabase::DecoderInfo info(SdpAudioFormat("pcmu", 8000, 1),
absl::nullopt, factory);
absl::nullopt, factory.get());
EXPECT_CALL(decoder_database, GetDecoderInfo(0))
.WillRepeatedly(Return(&info));
absl::optional<uint8_t> current_pt;

View File

@ -27,7 +27,7 @@ TEST(TimestampScaler, TestNoScaling) {
auto factory = CreateBuiltinAudioDecoderFactory();
// Use PCMu, because it doesn't use scaled timestamps.
const DecoderDatabase::DecoderInfo info(SdpAudioFormat("pcmu", 8000, 1),
absl::nullopt, factory);
absl::nullopt, factory.get());
static const uint8_t kRtpPayloadType = 0;
EXPECT_CALL(db, GetDecoderInfo(kRtpPayloadType))
.WillRepeatedly(Return(&info));
@ -49,7 +49,7 @@ TEST(TimestampScaler, TestNoScalingLargeStep) {
auto factory = CreateBuiltinAudioDecoderFactory();
// Use PCMu, because it doesn't use scaled timestamps.
const DecoderDatabase::DecoderInfo info(SdpAudioFormat("pcmu", 8000, 1),
absl::nullopt, factory);
absl::nullopt, factory.get());
static const uint8_t kRtpPayloadType = 0;
EXPECT_CALL(db, GetDecoderInfo(kRtpPayloadType))
.WillRepeatedly(Return(&info));
@ -76,7 +76,7 @@ TEST(TimestampScaler, TestG722) {
auto factory = CreateBuiltinAudioDecoderFactory();
// Use G722, which has a factor 2 scaling.
const DecoderDatabase::DecoderInfo info(SdpAudioFormat("g722", 8000, 1),
absl::nullopt, factory);
absl::nullopt, factory.get());
static const uint8_t kRtpPayloadType = 17;
EXPECT_CALL(db, GetDecoderInfo(kRtpPayloadType))
.WillRepeatedly(Return(&info));
@ -102,7 +102,7 @@ TEST(TimestampScaler, TestG722LargeStep) {
auto factory = CreateBuiltinAudioDecoderFactory();
// Use G722, which has a factor 2 scaling.
const DecoderDatabase::DecoderInfo info(SdpAudioFormat("g722", 8000, 1),
absl::nullopt, factory);
absl::nullopt, factory.get());
static const uint8_t kRtpPayloadType = 17;
EXPECT_CALL(db, GetDecoderInfo(kRtpPayloadType))
.WillRepeatedly(Return(&info));
@ -132,9 +132,9 @@ TEST(TimestampScaler, TestG722WithCng) {
auto factory = CreateBuiltinAudioDecoderFactory();
// Use G722, which has a factor 2 scaling.
const DecoderDatabase::DecoderInfo info_g722(SdpAudioFormat("g722", 8000, 1),
absl::nullopt, factory);
absl::nullopt, factory.get());
const DecoderDatabase::DecoderInfo info_cng(SdpAudioFormat("cn", 16000, 1),
absl::nullopt, factory);
absl::nullopt, factory.get());
static const uint8_t kRtpPayloadTypeG722 = 17;
static const uint8_t kRtpPayloadTypeCng = 13;
EXPECT_CALL(db, GetDecoderInfo(kRtpPayloadTypeG722))
@ -176,7 +176,7 @@ TEST(TimestampScaler, TestG722Packet) {
auto factory = CreateBuiltinAudioDecoderFactory();
// Use G722, which has a factor 2 scaling.
const DecoderDatabase::DecoderInfo info(SdpAudioFormat("g722", 8000, 1),
absl::nullopt, factory);
absl::nullopt, factory.get());
static const uint8_t kRtpPayloadType = 17;
EXPECT_CALL(db, GetDecoderInfo(kRtpPayloadType))
.WillRepeatedly(Return(&info));
@ -206,7 +206,7 @@ TEST(TimestampScaler, TestG722PacketList) {
auto factory = CreateBuiltinAudioDecoderFactory();
// Use G722, which has a factor 2 scaling.
const DecoderDatabase::DecoderInfo info(SdpAudioFormat("g722", 8000, 1),
absl::nullopt, factory);
absl::nullopt, factory.get());
static const uint8_t kRtpPayloadType = 17;
EXPECT_CALL(db, GetDecoderInfo(kRtpPayloadType))
.WillRepeatedly(Return(&info));
@ -240,7 +240,7 @@ TEST(TimestampScaler, TestG722Reset) {
auto factory = CreateBuiltinAudioDecoderFactory();
// Use G722, which has a factor 2 scaling.
const DecoderDatabase::DecoderInfo info(SdpAudioFormat("g722", 8000, 1),
absl::nullopt, factory);
absl::nullopt, factory.get());
static const uint8_t kRtpPayloadType = 17;
EXPECT_CALL(db, GetDecoderInfo(kRtpPayloadType))
.WillRepeatedly(Return(&info));
@ -281,7 +281,7 @@ TEST(TimestampScaler, TestOpusLargeStep) {
MockDecoderDatabase db;
auto factory = CreateBuiltinAudioDecoderFactory();
const DecoderDatabase::DecoderInfo info(SdpAudioFormat("opus", 48000, 2),
absl::nullopt, factory);
absl::nullopt, factory.get());
static const uint8_t kRtpPayloadType = 17;
EXPECT_CALL(db, GetDecoderInfo(kRtpPayloadType))
.WillRepeatedly(Return(&info));

View File

@ -23,7 +23,11 @@
#ifdef WEBRTC_WINDOWS_CORE_AUDIO_BUILD
// clang-format off
// To get Windows includes in the right order, this must come before the Windows
// includes below.
#include "modules/audio_device/win/audio_device_core_win.h"
// clang-format on
#include <string.h>
@ -40,6 +44,7 @@
#include "rtc_base/checks.h"
#include "rtc_base/logging.h"
#include "rtc_base/platform_thread.h"
#include "rtc_base/ref_counted_object.h"
#include "rtc_base/string_utils.h"
#include "rtc_base/thread_annotations.h"
#include "system_wrappers/include/sleep.h"
@ -2090,7 +2095,8 @@ int32_t AudioDeviceWindowsCore::InitRecordingDMO() {
<< "AudioDeviceBuffer must be attached before streaming can start";
}
_mediaBuffer = new MediaBufferImpl(_recBlockSize * _recAudioFrameSize);
_mediaBuffer = rtc::make_ref_counted<MediaBufferImpl>(_recBlockSize *
_recAudioFrameSize);
// Optional, but if called, must be after media types are set.
hr = _dmo->AllocateStreamingResources();
@ -2996,7 +3002,7 @@ DWORD AudioDeviceWindowsCore::DoCaptureThreadPollDMO() {
DWORD dwStatus = 0;
{
DMO_OUTPUT_DATA_BUFFER dmoBuffer = {0};
dmoBuffer.pBuffer = _mediaBuffer;
dmoBuffer.pBuffer = _mediaBuffer.get();
dmoBuffer.pBuffer->AddRef();
// Poll the DMO for AEC processed capture data. The DMO will
@ -3393,32 +3399,34 @@ int AudioDeviceWindowsCore::SetDMOProperties() {
// Set the AEC system mode.
// SINGLE_CHANNEL_AEC - AEC processing only.
if (SetVtI4Property(ps, MFPKEY_WMAAECMA_SYSTEM_MODE, SINGLE_CHANNEL_AEC)) {
if (SetVtI4Property(ps.get(), MFPKEY_WMAAECMA_SYSTEM_MODE,
SINGLE_CHANNEL_AEC)) {
return -1;
}
// Set the AEC source mode.
// VARIANT_TRUE - Source mode (we poll the AEC for captured data).
if (SetBoolProperty(ps, MFPKEY_WMAAECMA_DMO_SOURCE_MODE, VARIANT_TRUE) ==
-1) {
if (SetBoolProperty(ps.get(), MFPKEY_WMAAECMA_DMO_SOURCE_MODE,
VARIANT_TRUE) == -1) {
return -1;
}
// Enable the feature mode.
// This lets us override all the default processing settings below.
if (SetBoolProperty(ps, MFPKEY_WMAAECMA_FEATURE_MODE, VARIANT_TRUE) == -1) {
if (SetBoolProperty(ps.get(), MFPKEY_WMAAECMA_FEATURE_MODE, VARIANT_TRUE) ==
-1) {
return -1;
}
// Disable analog AGC (default enabled).
if (SetBoolProperty(ps, MFPKEY_WMAAECMA_MIC_GAIN_BOUNDER, VARIANT_FALSE) ==
-1) {
if (SetBoolProperty(ps.get(), MFPKEY_WMAAECMA_MIC_GAIN_BOUNDER,
VARIANT_FALSE) == -1) {
return -1;
}
// Disable noise suppression (default enabled).
// 0 - Disabled, 1 - Enabled
if (SetVtI4Property(ps, MFPKEY_WMAAECMA_FEATR_NS, 0) == -1) {
if (SetVtI4Property(ps.get(), MFPKEY_WMAAECMA_FEATR_NS, 0) == -1) {
return -1;
}
@ -3463,7 +3471,8 @@ int AudioDeviceWindowsCore::SetDMOProperties() {
static_cast<uint32_t>(0x0000ffff & inDevIndex);
RTC_LOG(LS_VERBOSE) << "Capture device index: " << inDevIndex
<< ", render device index: " << outDevIndex;
if (SetVtI4Property(ps, MFPKEY_WMAAECMA_DEVICE_INDEXES, devIndex) == -1) {
if (SetVtI4Property(ps.get(), MFPKEY_WMAAECMA_DEVICE_INDEXES, devIndex) ==
-1) {
return -1;
}
@ -3766,7 +3775,7 @@ int32_t AudioDeviceWindowsCore::_GetDefaultDeviceIndex(EDataFlow dir,
SAFE_RELEASE(ptrDevice);
}
if (_GetDeviceID(device, szDeviceID, kDeviceIDLength) == -1) {
if (_GetDeviceID(device.get(), szDeviceID, kDeviceIDLength) == -1) {
return -1;
}

View File

@ -651,10 +651,10 @@ bool ForwardErrorCorrection::RecoverPacket(const ReceivedFecPacket& fec_packet,
// This is the packet we're recovering.
recovered_packet->seq_num = protected_packet->seq_num;
} else {
XorHeaders(*protected_packet->pkt, recovered_packet->pkt);
XorHeaders(*protected_packet->pkt, recovered_packet->pkt.get());
XorPayloads(*protected_packet->pkt,
protected_packet->pkt->data.size() - kRtpHeaderSize,
kRtpHeaderSize, recovered_packet->pkt);
kRtpHeaderSize, recovered_packet->pkt.get());
}
}
if (!FinishPacketRecovery(fec_packet, recovered_packet)) {

View File

@ -787,7 +787,7 @@ TEST_P(RtpSenderVideoTest,
config.clock = &fake_clock_;
config.rtp_sender = rtp_module_->RtpSender();
config.field_trials = &field_trials_;
config.frame_encryptor = encryptor;
config.frame_encryptor = encryptor.get();
RTPSenderVideo rtp_sender_video(config);
FrameDependencyStructure video_structure;

View File

@ -177,7 +177,7 @@ int32_t UlpfecReceiverImpl::ProcessReceivedFec() {
for (const auto& received_packet : received_packets) {
// Send received media packet to VCM.
if (!received_packet->is_fec) {
ForwardErrorCorrection::Packet* packet = received_packet->pkt;
ForwardErrorCorrection::Packet* packet = received_packet->pkt.get();
recovered_packet_callback_->OnRecoveredPacket(packet->data.data(),
packet->data.size());
// Create a packet with the buffer to modify it.
@ -211,7 +211,7 @@ int32_t UlpfecReceiverImpl::ProcessReceivedFec() {
// Already sent to the VCM and the jitter buffer.
continue;
}
ForwardErrorCorrection::Packet* packet = recovered_packet->pkt;
ForwardErrorCorrection::Packet* packet = recovered_packet->pkt.get();
++packet_counter_.num_recovered_packets;
// Set this flag first; in case the recovered packet carries a RED
// header, OnRecoveredPacket will recurse back here.

View File

@ -707,7 +707,7 @@ CaptureInputPin::GetAllocator(IMemAllocator** allocator) {
return hr;
allocator_.swap(allocator);
}
*allocator = allocator_;
*allocator = allocator_.get();
allocator_->AddRef();
return S_OK;
}

View File

@ -35,7 +35,7 @@ VideoCaptureDS::~VideoCaptureDS() {
}
if (_graphBuilder) {
if (sink_filter_)
_graphBuilder->RemoveFilter(sink_filter_);
_graphBuilder->RemoveFilter(sink_filter_.get());
if (_captureFilter)
_graphBuilder->RemoveFilter(_captureFilter);
if (_dvFilter)
@ -101,13 +101,13 @@ int32_t VideoCaptureDS::Init(const char* deviceUniqueIdUTF8) {
// Create the sink filte used for receiving Captured frames.
sink_filter_ = new ComRefCount<CaptureSinkFilter>(this);
hr = _graphBuilder->AddFilter(sink_filter_, SINK_FILTER_NAME);
hr = _graphBuilder->AddFilter(sink_filter_.get(), SINK_FILTER_NAME);
if (FAILED(hr)) {
RTC_LOG(LS_INFO) << "Failed to add the send filter to the graph.";
return -1;
}
_inputSendPin = GetInputPin(sink_filter_);
_inputSendPin = GetInputPin(sink_filter_.get());
if (!_inputSendPin) {
RTC_LOG(LS_INFO) << "Failed to get input send pin";
return -1;

View File

@ -507,7 +507,7 @@ void VideoProcessor::WriteDecodedFrame(const I420BufferInterface& decoded_frame,
scaled_buffer = I420Buffer::Create(input_video_width, input_video_height);
scaled_buffer->ScaleFrom(decoded_frame);
scaled_frame = scaled_buffer;
scaled_frame = scaled_buffer.get();
}
// Ensure there is no padding.

View File

@ -1367,7 +1367,7 @@ LibvpxVp8Encoder::PrepareBuffers(rtc::scoped_refptr<VideoFrameBuffer> buffer) {
// Prepare `raw_images_` from `mapped_buffer` and, if simulcast, scaled
// versions of `buffer`.
std::vector<rtc::scoped_refptr<VideoFrameBuffer>> prepared_buffers;
SetRawImagePlanes(&raw_images_[0], mapped_buffer);
SetRawImagePlanes(&raw_images_[0], mapped_buffer.get());
prepared_buffers.push_back(mapped_buffer);
for (size_t i = 1; i < encoders_.size(); ++i) {
// Native buffers should implement optimized scaling and is the preferred
@ -1410,7 +1410,7 @@ LibvpxVp8Encoder::PrepareBuffers(rtc::scoped_refptr<VideoFrameBuffer> buffer) {
<< VideoFrameBufferTypeToString(mapped_buffer->type());
return {};
}
SetRawImagePlanes(&raw_images_[i], scaled_buffer);
SetRawImagePlanes(&raw_images_[i], scaled_buffer.get());
prepared_buffers.push_back(scaled_buffer);
}
return prepared_buffers;