Refactor TestAudioDeviceModule to not depend on EventTimerWrapper.

In addition, let the processing thread loop explicitly, and not use
the deprecated builtin looping in PlatformThread.

Bug: webrtc:3380
Change-Id: I5171ce3457b80f922c8284259882da63c8f146f1
Reviewed-on: https://webrtc-review.googlesource.com/96544
Commit-Queue: Niels Moller <nisse@webrtc.org>
Reviewed-by: Henrik Andreassson <henrika@webrtc.org>
Reviewed-by: Artem Titov <titovartem@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#24492}
This commit is contained in:
Niels Möller
2018-08-29 14:46:31 +02:00
committed by Commit Bot
parent 8d1b582f33
commit 9ea5765f78

View File

@ -22,20 +22,20 @@
#include "rtc_base/checks.h"
#include "rtc_base/criticalsection.h"
#include "rtc_base/event.h"
#include "rtc_base/logging.h"
#include "rtc_base/numerics/safe_conversions.h"
#include "rtc_base/platform_thread.h"
#include "rtc_base/random.h"
#include "rtc_base/refcountedobject.h"
#include "system_wrappers/include/event_wrapper.h"
#include "rtc_base/thread.h"
#include "rtc_base/timeutils.h"
namespace webrtc {
class EventTimerWrapper;
namespace {
constexpr int kFrameLengthMs = 10;
constexpr int kFramesPerSecond = 1000 / kFrameLengthMs;
constexpr int kFrameLengthUs = 10000;
constexpr int kFramesPerSecond = rtc::kNumMicrosecsPerSec / kFrameLengthUs;
// TestAudioDeviceModule implements an AudioDevice module that can act both as a
// capturer and a renderer. It will use 10ms audio frames.
@ -54,13 +54,13 @@ class TestAudioDeviceModuleImpl
float speed = 1)
: capturer_(std::move(capturer)),
renderer_(std::move(renderer)),
speed_(speed),
process_interval_us_(kFrameLengthUs / speed),
audio_callback_(nullptr),
rendering_(false),
capturing_(false),
done_rendering_(true, true),
done_capturing_(true, true),
tick_(EventTimerWrapper::Create()) {
stop_thread_(false) {
auto good_sample_rate = [](int sr) {
return sr == 8000 || sr == 16000 || sr == 32000 || sr == 44100 ||
sr == 48000;
@ -81,16 +81,19 @@ class TestAudioDeviceModuleImpl
StopPlayout();
StopRecording();
if (thread_) {
{
rtc::CritScope cs(&lock_);
stop_thread_ = true;
}
thread_->Stop();
}
}
int32_t Init() {
RTC_CHECK(tick_->StartTimer(true, kFrameLengthMs / speed_));
thread_ = absl::make_unique<rtc::PlatformThread>(
TestAudioDeviceModuleImpl::Run, this, "TestAudioDeviceModuleImpl");
TestAudioDeviceModuleImpl::Run, this, "TestAudioDeviceModuleImpl",
rtc::kHighPriority);
thread_->Start();
thread_->SetPriority(rtc::kHighPriority);
return 0;
}
@ -155,51 +158,72 @@ class TestAudioDeviceModuleImpl
private:
void ProcessAudio() {
{
rtc::CritScope cs(&lock_);
if (capturing_) {
// Capture 10ms of audio. 2 bytes per sample.
const bool keep_capturing = capturer_->Capture(&recording_buffer_);
uint32_t new_mic_level = 0;
if (recording_buffer_.size() > 0) {
audio_callback_->RecordedDataIsAvailable(
recording_buffer_.data(), recording_buffer_.size(), 2,
capturer_->NumChannels(), capturer_->SamplingFrequency(), 0, 0, 0,
false, new_mic_level);
int64_t time_us = rtc::TimeMicros();
bool logged_once = false;
for (;;) {
{
rtc::CritScope cs(&lock_);
if (stop_thread_) {
return;
}
if (!keep_capturing) {
capturing_ = false;
done_capturing_.Set();
if (capturing_) {
// Capture 10ms of audio. 2 bytes per sample.
const bool keep_capturing = capturer_->Capture(&recording_buffer_);
uint32_t new_mic_level = 0;
if (recording_buffer_.size() > 0) {
audio_callback_->RecordedDataIsAvailable(
recording_buffer_.data(), recording_buffer_.size(), 2,
capturer_->NumChannels(), capturer_->SamplingFrequency(), 0, 0,
0, false, new_mic_level);
}
if (!keep_capturing) {
capturing_ = false;
done_capturing_.Set();
}
}
if (rendering_) {
size_t samples_out = 0;
int64_t elapsed_time_ms = -1;
int64_t ntp_time_ms = -1;
const int sampling_frequency = renderer_->SamplingFrequency();
audio_callback_->NeedMorePlayData(
SamplesPerFrame(sampling_frequency), 2, renderer_->NumChannels(),
sampling_frequency, playout_buffer_.data(), samples_out,
&elapsed_time_ms, &ntp_time_ms);
const bool keep_rendering =
renderer_->Render(rtc::ArrayView<const int16_t>(
playout_buffer_.data(), samples_out));
if (!keep_rendering) {
rendering_ = false;
done_rendering_.Set();
}
}
}
if (rendering_) {
size_t samples_out = 0;
int64_t elapsed_time_ms = -1;
int64_t ntp_time_ms = -1;
const int sampling_frequency = renderer_->SamplingFrequency();
audio_callback_->NeedMorePlayData(
SamplesPerFrame(sampling_frequency), 2, renderer_->NumChannels(),
sampling_frequency, playout_buffer_.data(), samples_out,
&elapsed_time_ms, &ntp_time_ms);
const bool keep_rendering = renderer_->Render(
rtc::ArrayView<const int16_t>(playout_buffer_.data(), samples_out));
if (!keep_rendering) {
rendering_ = false;
done_rendering_.Set();
time_us += process_interval_us_;
int64_t time_left_us = time_us - rtc::TimeMicros();
if (time_left_us < 0) {
if (!logged_once) {
RTC_LOG(LS_ERROR) << "ProcessAudio is too slow";
logged_once = true;
}
} else {
while (time_left_us > 1000) {
if (rtc::Thread::SleepMs(time_left_us / 1000))
break;
time_left_us = time_us - rtc::TimeMicros();
}
}
}
tick_->Wait(WEBRTC_EVENT_INFINITE);
}
static bool Run(void* obj) {
static void Run(void* obj) {
static_cast<TestAudioDeviceModuleImpl*>(obj)->ProcessAudio();
return true;
}
const std::unique_ptr<Capturer> capturer_ RTC_GUARDED_BY(lock_);
const std::unique_ptr<Renderer> renderer_ RTC_GUARDED_BY(lock_);
const float speed_;
const int64_t process_interval_us_;
rtc::CriticalSection lock_;
AudioTransport* audio_callback_ RTC_GUARDED_BY(lock_);
@ -211,8 +235,8 @@ class TestAudioDeviceModuleImpl
std::vector<int16_t> playout_buffer_ RTC_GUARDED_BY(lock_);
rtc::BufferT<int16_t> recording_buffer_ RTC_GUARDED_BY(lock_);
std::unique_ptr<EventTimerWrapper> tick_;
std::unique_ptr<rtc::PlatformThread> thread_;
bool stop_thread_ RTC_GUARDED_BY(lock_);
};
// A fake capturer that generates pulses with random samples between