Remove CaptureManager and related calls in ChannelManager.

Removed unused screencast APIs.

BUG=webrtc:5426

Review URL: https://codereview.webrtc.org/1757843003

Cr-Commit-Position: refs/heads/master@{#11896}
This commit is contained in:
perkj
2016-03-07 17:34:13 -08:00
committed by Commit bot
parent 6ebc4d3f7d
commit c11b184837
12 changed files with 10 additions and 881 deletions

View File

@ -18,8 +18,6 @@
#include "webrtc/api/jsepsessiondescription.h"
#include "webrtc/api/peerconnection.h"
#include "webrtc/api/sctputils.h"
#include "webrtc/api/streamcollection.h"
#include "webrtc/api/streamcollection.h"
#include "webrtc/api/test/fakedtlsidentitystore.h"
#include "webrtc/api/videotrack.h"
#include "webrtc/api/webrtcsession.h"
@ -323,7 +321,6 @@ class WebRtcSessionTest
channel_manager_(
new cricket::ChannelManager(media_engine_,
data_engine_,
new cricket::CaptureManager(),
rtc::Thread::Current())),
fake_call_(webrtc::Call::Config()),
media_controller_(

View File

@ -10,290 +10,4 @@
#include "webrtc/media/base/capturemanager.h"
#include <algorithm>
#include "webrtc/base/checks.h"
#include "webrtc/base/logging.h"
#include "webrtc/media/base/videocapturer.h"
namespace cricket {
// CaptureManager helper class.
class VideoCapturerState {
public:
static const VideoFormatPod kDefaultCaptureFormat;
explicit VideoCapturerState(VideoCapturer* capturer);
~VideoCapturerState() {}
void AddCaptureResolution(const VideoFormat& desired_format);
bool RemoveCaptureResolution(const VideoFormat& format);
VideoFormat GetHighestFormat(VideoCapturer* video_capturer) const;
int IncCaptureStartRef();
int DecCaptureStartRef();
VideoCapturer* GetVideoCapturer() {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
return video_capturer_;
}
int start_count() const {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
return start_count_;
}
private:
struct CaptureResolutionInfo {
VideoFormat video_format;
int format_ref_count;
};
typedef std::vector<CaptureResolutionInfo> CaptureFormats;
rtc::ThreadChecker thread_checker_;
VideoCapturer* video_capturer_;
int start_count_;
CaptureFormats capture_formats_;
};
const VideoFormatPod VideoCapturerState::kDefaultCaptureFormat = {
640, 360, FPS_TO_INTERVAL(30), FOURCC_ANY
};
VideoCapturerState::VideoCapturerState(VideoCapturer* capturer)
: video_capturer_(capturer), start_count_(1) {}
void VideoCapturerState::AddCaptureResolution(
const VideoFormat& desired_format) {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
for (CaptureFormats::iterator iter = capture_formats_.begin();
iter != capture_formats_.end(); ++iter) {
if (desired_format == iter->video_format) {
++(iter->format_ref_count);
return;
}
}
CaptureResolutionInfo capture_resolution = { desired_format, 1 };
capture_formats_.push_back(capture_resolution);
}
bool VideoCapturerState::RemoveCaptureResolution(const VideoFormat& format) {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
for (CaptureFormats::iterator iter = capture_formats_.begin();
iter != capture_formats_.end(); ++iter) {
if (format == iter->video_format) {
--(iter->format_ref_count);
if (iter->format_ref_count == 0) {
capture_formats_.erase(iter);
}
return true;
}
}
return false;
}
VideoFormat VideoCapturerState::GetHighestFormat(
VideoCapturer* video_capturer) const {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
VideoFormat highest_format(0, 0, VideoFormat::FpsToInterval(1), FOURCC_ANY);
if (capture_formats_.empty()) {
VideoFormat default_format(kDefaultCaptureFormat);
return default_format;
}
for (CaptureFormats::const_iterator iter = capture_formats_.begin();
iter != capture_formats_.end(); ++iter) {
if (iter->video_format.width > highest_format.width) {
highest_format.width = iter->video_format.width;
}
if (iter->video_format.height > highest_format.height) {
highest_format.height = iter->video_format.height;
}
if (iter->video_format.interval < highest_format.interval) {
highest_format.interval = iter->video_format.interval;
}
}
return highest_format;
}
int VideoCapturerState::IncCaptureStartRef() {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
return ++start_count_;
}
int VideoCapturerState::DecCaptureStartRef() {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
if (start_count_ > 0) {
// Start count may be 0 if a capturer was added but never started.
--start_count_;
}
return start_count_;
}
CaptureManager::CaptureManager() {
// Allowing construction of manager in any thread as long as subsequent calls
// are all from the same thread.
thread_checker_.DetachFromThread();
}
CaptureManager::~CaptureManager() {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
// Since we don't own any of the capturers, all capturers should have been
// cleaned up before we get here. In fact, in the normal shutdown sequence,
// all capturers *will* be shut down by now, so trying to stop them here
// will crash. If we're still tracking any, it's a dangling pointer.
// TODO(hbos): RTC_DCHECK instead of RTC_CHECK until we figure out why
// capture_states_ is not always empty here.
RTC_DCHECK(capture_states_.empty());
}
bool CaptureManager::StartVideoCapture(VideoCapturer* video_capturer,
const VideoFormat& desired_format) {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
if (desired_format.width == 0 || desired_format.height == 0) {
return false;
}
if (!video_capturer) {
return false;
}
VideoCapturerState* capture_state = GetCaptureState(video_capturer);
if (capture_state) {
const int ref_count = capture_state->IncCaptureStartRef();
if (ref_count < 1) {
ASSERT(false);
}
// VideoCapturer has already been started. Don't start listening to
// callbacks since that has already been done.
capture_state->AddCaptureResolution(desired_format);
return true;
}
if (!RegisterVideoCapturer(video_capturer)) {
return false;
}
capture_state = GetCaptureState(video_capturer);
ASSERT(capture_state != NULL);
capture_state->AddCaptureResolution(desired_format);
if (!StartWithBestCaptureFormat(capture_state, video_capturer)) {
UnregisterVideoCapturer(capture_state);
return false;
}
return true;
}
bool CaptureManager::StopVideoCapture(VideoCapturer* video_capturer,
const VideoFormat& format) {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
VideoCapturerState* capture_state = GetCaptureState(video_capturer);
if (!capture_state) {
return false;
}
if (!capture_state->RemoveCaptureResolution(format)) {
return false;
}
if (capture_state->DecCaptureStartRef() == 0) {
// Unregistering cannot fail as capture_state is not NULL.
UnregisterVideoCapturer(capture_state);
}
return true;
}
void CaptureManager::AddVideoSink(VideoCapturer* video_capturer,
rtc::VideoSinkInterface<VideoFrame>* sink) {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
// TODO(nisse): Do we really need to tolerate NULL inputs?
if (!video_capturer || !sink) {
return;
}
rtc::VideoSinkWants wants;
// Renderers must be able to apply rotation.
wants.rotation_applied = false;
video_capturer->AddOrUpdateSink(sink, wants);
}
void CaptureManager::RemoveVideoSink(
VideoCapturer* video_capturer,
rtc::VideoSinkInterface<VideoFrame>* sink) {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
if (!video_capturer || !sink) {
return;
}
video_capturer->RemoveSink(sink);
}
bool CaptureManager::IsCapturerRegistered(VideoCapturer* video_capturer) const {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
return GetCaptureState(video_capturer) != NULL;
}
bool CaptureManager::RegisterVideoCapturer(VideoCapturer* video_capturer) {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
VideoCapturerState* capture_state = new VideoCapturerState(video_capturer);
capture_states_[video_capturer] = capture_state;
SignalCapturerStateChange.repeat(video_capturer->SignalStateChange);
return true;
}
void CaptureManager::UnregisterVideoCapturer(
VideoCapturerState* capture_state) {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
VideoCapturer* video_capturer = capture_state->GetVideoCapturer();
capture_states_.erase(video_capturer);
delete capture_state;
// When unregistering a VideoCapturer, the CaptureManager needs to unregister
// from all state change callbacks from the VideoCapturer. E.g. to avoid
// problems with multiple callbacks if registering the same VideoCapturer
// multiple times. The VideoCapturer will update the capturer state. However,
// this is done through Post-calls which means it may happen at any time. If
// the CaptureManager no longer is listening to the VideoCapturer it will not
// receive those callbacks. Here it is made sure that the the callback is
// indeed sent by letting the ChannelManager do the signaling. The downside is
// that the callback may happen before the VideoCapturer is stopped. However,
// for the CaptureManager it doesn't matter as it will no longer receive any
// frames from the VideoCapturer.
SignalCapturerStateChange.stop(video_capturer->SignalStateChange);
if (video_capturer->IsRunning()) {
video_capturer->Stop();
SignalCapturerStateChange(video_capturer, CS_STOPPED);
}
}
bool CaptureManager::StartWithBestCaptureFormat(
VideoCapturerState* capture_state, VideoCapturer* video_capturer) {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
VideoFormat highest_asked_format =
capture_state->GetHighestFormat(video_capturer);
VideoFormat capture_format;
if (!video_capturer->GetBestCaptureFormat(highest_asked_format,
&capture_format)) {
LOG(LS_WARNING) << "Unsupported format:"
<< " width=" << highest_asked_format.width
<< " height=" << highest_asked_format.height
<< ". Supported formats are:";
const std::vector<VideoFormat>* formats =
video_capturer->GetSupportedFormats();
ASSERT(formats != NULL);
for (std::vector<VideoFormat>::const_iterator i = formats->begin();
i != formats->end(); ++i) {
const VideoFormat& format = *i;
LOG(LS_WARNING) << " " << GetFourccName(format.fourcc)
<< ":" << format.width << "x" << format.height << "x"
<< format.framerate();
}
return false;
}
return video_capturer->StartCapturing(capture_format);
}
VideoCapturerState* CaptureManager::GetCaptureState(
VideoCapturer* video_capturer) const {
RTC_DCHECK(thread_checker_.CalledOnValidThread());
CaptureStates::const_iterator iter = capture_states_.find(video_capturer);
if (iter == capture_states_.end()) {
return NULL;
}
return iter->second;
}
} // namespace cricket
// TODO(perkj): Remove this file once Chrome's gyp file doesn't depend on it.

View File

@ -8,74 +8,4 @@
* be found in the AUTHORS file in the root of the source tree.
*/
// The CaptureManager class manages VideoCapturers to make it possible to share
// the same VideoCapturers across multiple instances. E.g. if two instances of
// some class want to listen to same VideoCapturer they can't individually stop
// and start capturing as doing so will affect the other instance.
// The class employs reference counting on starting and stopping of capturing of
// frames such that if anyone is still listening it will not be stopped. The
// class also provides APIs for attaching VideoRenderers to a specific capturer
// such that the VideoRenderers are fed frames directly from the capturer.
// CaptureManager is Thread-unsafe. This means that none of its APIs may be
// called concurrently. Note that callbacks are called by the VideoCapturer's
// thread which is normally a separate unmarshalled thread and thus normally
// require lock protection.
#ifndef WEBRTC_MEDIA_BASE_CAPTUREMANAGER_H_
#define WEBRTC_MEDIA_BASE_CAPTUREMANAGER_H_
#include <map>
#include <vector>
#include "webrtc/base/sigslotrepeater.h"
#include "webrtc/base/thread_checker.h"
#include "webrtc/media/base/videocommon.h"
#include "webrtc/media/base/videocapturer.h"
#include "webrtc/media/base/videosinkinterface.h"
namespace cricket {
class VideoFrame;
class VideoCapturerState;
class CaptureManager : public sigslot::has_slots<> {
public:
enum RestartOptions {
kRequestRestart,
kForceRestart
};
CaptureManager();
virtual ~CaptureManager();
virtual bool StartVideoCapture(VideoCapturer* video_capturer,
const VideoFormat& desired_format);
virtual bool StopVideoCapture(VideoCapturer* video_capturer,
const VideoFormat& format);
virtual void AddVideoSink(VideoCapturer* video_capturer,
rtc::VideoSinkInterface<VideoFrame>* sink);
virtual void RemoveVideoSink(VideoCapturer* video_capturer,
rtc::VideoSinkInterface<VideoFrame>* sink);
sigslot::repeater2<VideoCapturer*, CaptureState> SignalCapturerStateChange;
private:
typedef std::map<VideoCapturer*, VideoCapturerState*> CaptureStates;
bool IsCapturerRegistered(VideoCapturer* video_capturer) const;
bool RegisterVideoCapturer(VideoCapturer* video_capturer);
void UnregisterVideoCapturer(VideoCapturerState* capture_state);
bool StartWithBestCaptureFormat(VideoCapturerState* capture_info,
VideoCapturer* video_capturer);
VideoCapturerState* GetCaptureState(VideoCapturer* video_capturer) const;
rtc::ThreadChecker thread_checker_;
CaptureStates capture_states_;
};
} // namespace cricket
#endif // WEBRTC_MEDIA_BASE_CAPTUREMANAGER_H_
// TODO(perkj): Remove this file once Chrome's gyp file doesn't depend on it.

View File

@ -1,162 +0,0 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/media/base/capturemanager.h"
#include "webrtc/base/arraysize.h"
#include "webrtc/base/gunit.h"
#include "webrtc/base/sigslot.h"
#include "webrtc/media/base/fakevideocapturer.h"
#include "webrtc/media/base/fakevideorenderer.h"
const int kMsCallbackWait = 50;
const int kFps = 30;
const cricket::VideoFormatPod kCameraFormats[] = {
{640, 480, cricket::VideoFormat::FpsToInterval(kFps), cricket::FOURCC_I420},
{320, 240, cricket::VideoFormat::FpsToInterval(kFps), cricket::FOURCC_I420}
};
class CaptureManagerTest : public ::testing::Test, public sigslot::has_slots<> {
public:
CaptureManagerTest()
: capture_manager_(),
callback_count_(0),
format_vga_(kCameraFormats[0]),
format_qvga_(kCameraFormats[1]) {
}
virtual void SetUp() {
PopulateSupportedFormats();
capture_state_ = cricket::CS_STOPPED;
capture_manager_.SignalCapturerStateChange.connect(
this,
&CaptureManagerTest::OnCapturerStateChange);
}
void PopulateSupportedFormats() {
std::vector<cricket::VideoFormat> formats;
for (uint32_t i = 0; i < arraysize(kCameraFormats); ++i) {
formats.push_back(cricket::VideoFormat(kCameraFormats[i]));
}
video_capturer_.ResetSupportedFormats(formats);
}
int NumFramesRendered() { return video_renderer_.num_rendered_frames(); }
bool WasRenderedResolution(cricket::VideoFormat format) {
return format.width == video_renderer_.width() &&
format.height == video_renderer_.height();
}
cricket::CaptureState capture_state() { return capture_state_; }
int callback_count() { return callback_count_; }
void OnCapturerStateChange(cricket::VideoCapturer* capturer,
cricket::CaptureState capture_state) {
capture_state_ = capture_state;
++callback_count_;
}
protected:
cricket::FakeVideoCapturer video_capturer_;
cricket::FakeVideoRenderer video_renderer_;
cricket::CaptureManager capture_manager_;
cricket::CaptureState capture_state_;
int callback_count_;
cricket::VideoFormat format_vga_;
cricket::VideoFormat format_qvga_;
};
// Incorrect use cases.
TEST_F(CaptureManagerTest, InvalidAddingRemoving) {
EXPECT_FALSE(capture_manager_.StopVideoCapture(&video_capturer_,
cricket::VideoFormat()));
EXPECT_TRUE(capture_manager_.StartVideoCapture(&video_capturer_,
format_vga_));
EXPECT_EQ_WAIT(cricket::CS_RUNNING, capture_state(), kMsCallbackWait);
EXPECT_EQ(1, callback_count());
// NULL argument currently allowed, and does nothing.
capture_manager_.AddVideoSink(&video_capturer_, NULL);
EXPECT_TRUE(capture_manager_.StopVideoCapture(&video_capturer_, format_vga_));
}
// Valid use cases
TEST_F(CaptureManagerTest, KeepFirstResolutionHigh) {
EXPECT_TRUE(capture_manager_.StartVideoCapture(&video_capturer_,
format_vga_));
EXPECT_EQ_WAIT(cricket::CS_RUNNING, capture_state(), kMsCallbackWait);
EXPECT_EQ(1, callback_count());
capture_manager_.AddVideoSink(&video_capturer_, &video_renderer_);
EXPECT_TRUE(video_capturer_.CaptureFrame());
EXPECT_EQ(1, NumFramesRendered());
// Renderer should be fed frames with the resolution of format_vga_.
EXPECT_TRUE(WasRenderedResolution(format_vga_));
// Start again with one more format.
EXPECT_TRUE(capture_manager_.StartVideoCapture(&video_capturer_,
format_qvga_));
// Existing renderers should be fed frames with the resolution of format_vga_.
EXPECT_TRUE(video_capturer_.CaptureFrame());
EXPECT_TRUE(WasRenderedResolution(format_vga_));
EXPECT_TRUE(capture_manager_.StopVideoCapture(&video_capturer_, format_vga_));
EXPECT_TRUE(capture_manager_.StopVideoCapture(&video_capturer_,
format_qvga_));
EXPECT_FALSE(capture_manager_.StopVideoCapture(&video_capturer_,
format_vga_));
EXPECT_FALSE(capture_manager_.StopVideoCapture(&video_capturer_,
format_qvga_));
}
// Should pick the lowest resolution as the highest resolution is not chosen
// until after capturing has started. This ensures that no particular resolution
// is favored over others.
TEST_F(CaptureManagerTest, KeepFirstResolutionLow) {
EXPECT_TRUE(capture_manager_.StartVideoCapture(&video_capturer_,
format_qvga_));
EXPECT_TRUE(capture_manager_.StartVideoCapture(&video_capturer_,
format_vga_));
capture_manager_.AddVideoSink(&video_capturer_, &video_renderer_);
EXPECT_EQ_WAIT(1, callback_count(), kMsCallbackWait);
EXPECT_TRUE(video_capturer_.CaptureFrame());
EXPECT_EQ(1, NumFramesRendered());
EXPECT_TRUE(WasRenderedResolution(format_qvga_));
EXPECT_TRUE(capture_manager_.StopVideoCapture(&video_capturer_,
format_qvga_));
EXPECT_TRUE(capture_manager_.StopVideoCapture(&video_capturer_,
format_vga_));
}
// Ensure that the reference counting is working when multiple start and
// multiple stop calls are made.
TEST_F(CaptureManagerTest, MultipleStartStops) {
EXPECT_TRUE(capture_manager_.StartVideoCapture(&video_capturer_,
format_vga_));
// Add video capturer but with different format.
EXPECT_TRUE(capture_manager_.StartVideoCapture(&video_capturer_,
format_qvga_));
EXPECT_EQ_WAIT(cricket::CS_RUNNING, capture_state(), kMsCallbackWait);
EXPECT_EQ(1, callback_count());
capture_manager_.AddVideoSink(&video_capturer_, &video_renderer_);
// Ensure that a frame can be captured when two start calls have been made.
EXPECT_TRUE(video_capturer_.CaptureFrame());
EXPECT_EQ(1, NumFramesRendered());
EXPECT_TRUE(capture_manager_.StopVideoCapture(&video_capturer_, format_vga_));
// Video should still render since there has been two start calls but only
// one stop call.
EXPECT_TRUE(video_capturer_.CaptureFrame());
EXPECT_EQ(2, NumFramesRendered());
EXPECT_TRUE(capture_manager_.StopVideoCapture(&video_capturer_,
format_qvga_));
EXPECT_EQ_WAIT(cricket::CS_STOPPED, capture_state(), kMsCallbackWait);
EXPECT_EQ(2, callback_count());
// Last stop call should fail as it is one more than the number of start
// calls.
EXPECT_FALSE(capture_manager_.StopVideoCapture(&video_capturer_,
format_vga_));
}

View File

@ -1,31 +0,0 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MEDIA_BASE_FAKECAPTUREMANAGER_H_
#define WEBRTC_MEDIA_BASE_FAKECAPTUREMANAGER_H_
#include "webrtc/media/base/capturemanager.h"
namespace cricket {
class FakeCaptureManager : public CaptureManager {
public:
FakeCaptureManager() {}
~FakeCaptureManager() {}
void AddVideoSink(VideoCapturer* video_capturer,
rtc::VideoSinkInterface<VideoFrame>* sink) override {}
void RemoveVideoSink(VideoCapturer* video_capturer,
rtc::VideoSinkInterface<VideoFrame>* sink) override {}
};
} // namespace cricket
#endif // WEBRTC_MEDIA_BASE_FAKECAPTUREMANAGER_H_

View File

@ -44,7 +44,6 @@
'<(DEPTH)/testing/gtest',
],
'sources': [
'base/fakecapturemanager.h',
'base/fakemediaengine.h',
'base/fakenetworkinterface.h',
'base/fakertp.h',
@ -77,7 +76,6 @@
'rtc_unittest_main',
],
'sources': [
'base/capturemanager_unittest.cc',
'base/codec_unittest.cc',
'base/rtpdataengine_unittest.cc',
'base/rtpdump_unittest.cc',

View File

@ -40,7 +40,6 @@ bool SetRawAudioSink_w(VoiceMediaChannel* channel,
enum {
MSG_EARLYMEDIATIMEOUT = 1,
MSG_SCREENCASTWINDOWEVENT,
MSG_RTPPACKET,
MSG_RTCPPACKET,
MSG_CHANNEL_ERROR,
@ -66,13 +65,6 @@ struct PacketMessageData : public rtc::MessageData {
rtc::PacketOptions options;
};
struct ScreencastEventMessageData : public rtc::MessageData {
ScreencastEventMessageData(uint32_t s, rtc::WindowEvent we)
: ssrc(s), event(we) {}
uint32_t ssrc;
rtc::WindowEvent event;
};
struct VoiceChannelErrorMessageData : public rtc::MessageData {
VoiceChannelErrorMessageData(uint32_t in_ssrc,
VoiceMediaChannel::Error in_error)
@ -1632,8 +1624,7 @@ VideoChannel::VideoChannel(rtc::Thread* thread,
media_channel,
transport_controller,
content_name,
rtcp),
previous_we_(rtc::WE_CLOSE) {}
rtcp) {}
bool VideoChannel::Init() {
if (!BaseChannel::Init()) {
@ -1643,17 +1634,6 @@ bool VideoChannel::Init() {
}
VideoChannel::~VideoChannel() {
std::vector<uint32_t> screencast_ssrcs;
ScreencastMap::iterator iter;
while (!screencast_capturers_.empty()) {
if (!RemoveScreencast(screencast_capturers_.begin()->first)) {
LOG(LS_ERROR) << "Unable to delete screencast with ssrc "
<< screencast_capturers_.begin()->first;
ASSERT(false);
break;
}
}
StopMediaMonitor();
// this can't be done in the base class, since it calls a virtual
DisableMedia_w();
@ -1668,24 +1648,11 @@ bool VideoChannel::SetSink(uint32_t ssrc,
return true;
}
bool VideoChannel::AddScreencast(uint32_t ssrc, VideoCapturer* capturer) {
return worker_thread()->Invoke<bool>(Bind(
&VideoChannel::AddScreencast_w, this, ssrc, capturer));
}
bool VideoChannel::SetCapturer(uint32_t ssrc, VideoCapturer* capturer) {
return InvokeOnWorker(Bind(&VideoMediaChannel::SetCapturer,
media_channel(), ssrc, capturer));
}
bool VideoChannel::RemoveScreencast(uint32_t ssrc) {
return InvokeOnWorker(Bind(&VideoChannel::RemoveScreencast_w, this, ssrc));
}
bool VideoChannel::IsScreencasting() {
return InvokeOnWorker(Bind(&VideoChannel::IsScreencasting_w, this));
}
bool VideoChannel::SetVideoSend(uint32_t ssrc,
bool mute,
const VideoOptions* options) {
@ -1825,45 +1792,8 @@ bool VideoChannel::SetRemoteContent_w(const MediaContentDescription* content,
return true;
}
bool VideoChannel::AddScreencast_w(uint32_t ssrc, VideoCapturer* capturer) {
if (screencast_capturers_.find(ssrc) != screencast_capturers_.end()) {
return false;
}
capturer->SignalStateChange.connect(this, &VideoChannel::OnStateChange);
screencast_capturers_[ssrc] = capturer;
return true;
}
bool VideoChannel::RemoveScreencast_w(uint32_t ssrc) {
ScreencastMap::iterator iter = screencast_capturers_.find(ssrc);
if (iter == screencast_capturers_.end()) {
return false;
}
// Clean up VideoCapturer.
delete iter->second;
screencast_capturers_.erase(iter);
return true;
}
bool VideoChannel::IsScreencasting_w() const {
return !screencast_capturers_.empty();
}
void VideoChannel::OnScreencastWindowEvent_s(uint32_t ssrc,
rtc::WindowEvent we) {
ASSERT(signaling_thread() == rtc::Thread::Current());
SignalScreencastWindowEvent(ssrc, we);
}
void VideoChannel::OnMessage(rtc::Message *pmsg) {
switch (pmsg->message_id) {
case MSG_SCREENCASTWINDOWEVENT: {
const ScreencastEventMessageData* data =
static_cast<ScreencastEventMessageData*>(pmsg->pdata);
OnScreencastWindowEvent_s(data->ssrc, data->event);
delete data;
break;
}
case MSG_CHANNEL_ERROR: {
const VideoChannelErrorMessageData* data =
static_cast<VideoChannelErrorMessageData*>(pmsg->pdata);
@ -1889,48 +1819,6 @@ void VideoChannel::OnMediaMonitorUpdate(
SignalMediaMonitor(this, info);
}
void VideoChannel::OnScreencastWindowEvent(uint32_t ssrc,
rtc::WindowEvent event) {
ScreencastEventMessageData* pdata =
new ScreencastEventMessageData(ssrc, event);
signaling_thread()->Post(this, MSG_SCREENCASTWINDOWEVENT, pdata);
}
void VideoChannel::OnStateChange(VideoCapturer* capturer, CaptureState ev) {
// Map capturer events to window events. In the future we may want to simply
// pass these events up directly.
rtc::WindowEvent we;
if (ev == CS_STOPPED) {
we = rtc::WE_CLOSE;
} else if (ev == CS_PAUSED) {
we = rtc::WE_MINIMIZE;
} else if (ev == CS_RUNNING && previous_we_ == rtc::WE_MINIMIZE) {
we = rtc::WE_RESTORE;
} else {
return;
}
previous_we_ = we;
uint32_t ssrc = 0;
if (!GetLocalSsrc(capturer, &ssrc)) {
return;
}
OnScreencastWindowEvent(ssrc, we);
}
bool VideoChannel::GetLocalSsrc(const VideoCapturer* capturer, uint32_t* ssrc) {
*ssrc = 0;
for (ScreencastMap::iterator iter = screencast_capturers_.begin();
iter != screencast_capturers_.end(); ++iter) {
if (iter->second == capturer) {
*ssrc = iter->first;
return true;
}
}
return false;
}
void VideoChannel::GetSrtpCryptoSuites(std::vector<int>* crypto_suites) const {
GetSupportedVideoCryptoSuites(crypto_suites);
}

View File

@ -8,8 +8,8 @@
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef TALK_SESSION_MEDIA_CHANNEL_H_
#define TALK_SESSION_MEDIA_CHANNEL_H_
#ifndef WEBRTC_PC_CHANNEL_H_
#define WEBRTC_PC_CHANNEL_H_
#include <map>
#include <set>
@ -439,16 +439,7 @@ class VideoChannel : public BaseChannel {
}
bool SetSink(uint32_t ssrc, rtc::VideoSinkInterface<VideoFrame>* sink);
// TODO(pthatcher): Refactor to use a "capture id" instead of an
// ssrc here as the "key".
// Passes ownership of the capturer to the channel.
bool AddScreencast(uint32_t ssrc, VideoCapturer* capturer);
bool SetCapturer(uint32_t ssrc, VideoCapturer* capturer);
bool RemoveScreencast(uint32_t ssrc);
// True if we've added a screencast. Doesn't matter if the capturer
// has been started or not.
bool IsScreencasting();
// Get statistics about the current media session.
bool GetStats(VideoMediaInfo* stats);
@ -458,13 +449,10 @@ class VideoChannel : public BaseChannel {
void StartMediaMonitor(int cms);
void StopMediaMonitor();
sigslot::signal2<VideoChannel*, const VideoMediaInfo&> SignalMediaMonitor;
sigslot::signal2<uint32_t, rtc::WindowEvent> SignalScreencastWindowEvent;
bool SetVideoSend(uint32_t ssrc, bool enable, const VideoOptions* options);
private:
typedef std::map<uint32_t, VideoCapturer*> ScreencastMap;
// overrides from BaseChannel
virtual void ChangeState();
virtual const ContentInfo* GetFirstContent(const SessionDescription* sdesc);
@ -474,11 +462,6 @@ class VideoChannel : public BaseChannel {
virtual bool SetRemoteContent_w(const MediaContentDescription* content,
ContentAction action,
std::string* error_desc);
bool AddScreencast_w(uint32_t ssrc, VideoCapturer* capturer);
bool RemoveScreencast_w(uint32_t ssrc);
void OnScreencastWindowEvent_s(uint32_t ssrc, rtc::WindowEvent we);
bool IsScreencasting_w() const;
bool GetStats_w(VideoMediaInfo* stats);
virtual void OnMessage(rtc::Message* pmsg);
@ -487,15 +470,9 @@ class VideoChannel : public BaseChannel {
ConnectionMonitor* monitor, const std::vector<ConnectionInfo>& infos);
virtual void OnMediaMonitorUpdate(
VideoMediaChannel* media_channel, const VideoMediaInfo& info);
virtual void OnScreencastWindowEvent(uint32_t ssrc, rtc::WindowEvent event);
virtual void OnStateChange(VideoCapturer* capturer, CaptureState ev);
bool GetLocalSsrc(const VideoCapturer* capturer, uint32_t* ssrc);
ScreencastMap screencast_capturers_;
rtc::scoped_ptr<VideoMediaMonitor> media_monitor_;
rtc::WindowEvent previous_we_;
// Last VideoSendParameters sent down to the media_channel() via
// SetSendParameters.
VideoSendParameters last_send_params_;
@ -626,4 +603,4 @@ class DataChannel : public BaseChannel {
} // namespace cricket
#endif // TALK_SESSION_MEDIA_CHANNEL_H_
#endif // WEBRTC_PC_CHANNEL_H_

View File

@ -2256,36 +2256,6 @@ TEST_F(VideoChannelTest, TestStreams) {
Base::TestStreams();
}
TEST_F(VideoChannelTest, TestScreencastEvents) {
const int kTimeoutMs = 500;
TestInit();
cricket::ScreencastEventCatcher catcher;
channel1_->SignalScreencastWindowEvent.connect(
&catcher,
&cricket::ScreencastEventCatcher::OnEvent);
rtc::scoped_ptr<cricket::FakeScreenCapturerFactory>
screen_capturer_factory(new cricket::FakeScreenCapturerFactory());
cricket::VideoCapturer* screen_capturer = screen_capturer_factory->Create(
ScreencastId(WindowId(0)));
ASSERT_TRUE(screen_capturer != NULL);
EXPECT_TRUE(channel1_->AddScreencast(0, screen_capturer));
EXPECT_EQ_WAIT(cricket::CS_STOPPED, screen_capturer_factory->capture_state(),
kTimeoutMs);
screen_capturer->SignalStateChange(screen_capturer, cricket::CS_PAUSED);
EXPECT_EQ_WAIT(rtc::WE_MINIMIZE, catcher.event(), kTimeoutMs);
screen_capturer->SignalStateChange(screen_capturer, cricket::CS_RUNNING);
EXPECT_EQ_WAIT(rtc::WE_RESTORE, catcher.event(), kTimeoutMs);
screen_capturer->SignalStateChange(screen_capturer, cricket::CS_STOPPED);
EXPECT_EQ_WAIT(rtc::WE_CLOSE, catcher.event(), kTimeoutMs);
EXPECT_TRUE(channel1_->RemoveScreencast(0));
}
TEST_F(VideoChannelTest, TestUpdateStreamsInLocalContent) {
Base::TestUpdateStreamsInLocalContent();
}

View File

@ -20,15 +20,12 @@
#include "webrtc/base/bind.h"
#include "webrtc/base/common.h"
#include "webrtc/base/logging.h"
#include "webrtc/base/sigslotrepeater.h"
#include "webrtc/base/stringencode.h"
#include "webrtc/base/stringutils.h"
#include "webrtc/base/trace_event.h"
#include "webrtc/media/base/capturemanager.h"
#include "webrtc/media/base/device.h"
#include "webrtc/media/base/hybriddataengine.h"
#include "webrtc/media/base/rtpdataengine.h"
#include "webrtc/media/base/videocapturer.h"
#ifdef HAVE_SCTP
#include "webrtc/media/sctp/sctpdataengine.h"
#endif
@ -36,22 +33,11 @@
namespace cricket {
enum {
MSG_VIDEOCAPTURESTATE = 1,
};
using rtc::Bind;
static const int kNotSetOutputVolume = -1;
struct CaptureStateParams : public rtc::MessageData {
CaptureStateParams(cricket::VideoCapturer* c, cricket::CaptureState s)
: capturer(c),
state(s) {}
cricket::VideoCapturer* capturer;
cricket::CaptureState state;
};
static DataEngineInterface* ConstructDataEngine() {
#ifdef HAVE_SCTP
return new HybridDataEngine(new RtpDataEngine(), new SctpDataEngine());
@ -62,35 +48,28 @@ static DataEngineInterface* ConstructDataEngine() {
ChannelManager::ChannelManager(MediaEngineInterface* me,
DataEngineInterface* dme,
CaptureManager* cm,
rtc::Thread* worker_thread) {
Construct(me, dme, cm, worker_thread);
Construct(me, dme, worker_thread);
}
ChannelManager::ChannelManager(MediaEngineInterface* me,
rtc::Thread* worker_thread) {
Construct(me,
ConstructDataEngine(),
new CaptureManager(),
worker_thread);
}
void ChannelManager::Construct(MediaEngineInterface* me,
DataEngineInterface* dme,
CaptureManager* cm,
rtc::Thread* worker_thread) {
media_engine_.reset(me);
data_media_engine_.reset(dme);
capture_manager_.reset(cm);
initialized_ = false;
main_thread_ = rtc::Thread::Current();
worker_thread_ = worker_thread;
audio_output_volume_ = kNotSetOutputVolume;
capturing_ = false;
enable_rtx_ = false;
capture_manager_->SignalCapturerStateChange.connect(
this, &ChannelManager::OnVideoCaptureStateChange);
}
ChannelManager::~ChannelManager() {
@ -103,8 +82,8 @@ ChannelManager::~ChannelManager() {
// shutdown.
ShutdownSrtp();
}
// Some deletes need to be on the worker thread for thread safe destruction,
// this includes the media engine and capture manager.
// The media engine needs to be deleted on the worker thread for thread safe
// destruction,
worker_thread_->Invoke<void>(Bind(
&ChannelManager::DestructorDeletes_w, this));
}
@ -214,7 +193,6 @@ void ChannelManager::Terminate() {
void ChannelManager::DestructorDeletes_w() {
ASSERT(worker_thread_ == rtc::Thread::Current());
media_engine_.reset(NULL);
capture_manager_.reset(NULL);
}
void ChannelManager::Terminate_w() {
@ -427,92 +405,6 @@ bool ChannelManager::SetOutputVolume(int level) {
return ret;
}
std::vector<cricket::VideoFormat> ChannelManager::GetSupportedFormats(
VideoCapturer* capturer) const {
ASSERT(capturer != NULL);
std::vector<VideoFormat> formats;
worker_thread_->Invoke<void>(rtc::Bind(&ChannelManager::GetSupportedFormats_w,
this, capturer, &formats));
return formats;
}
void ChannelManager::GetSupportedFormats_w(
VideoCapturer* capturer,
std::vector<cricket::VideoFormat>* out_formats) const {
const std::vector<VideoFormat>* formats = capturer->GetSupportedFormats();
if (formats != NULL)
*out_formats = *formats;
}
// The following are done in the new "CaptureManager" style that
// all local video capturers, processors, and managers should move
// to.
// TODO(pthatcher): Add more of the CaptureManager interface.
bool ChannelManager::StartVideoCapture(
VideoCapturer* capturer, const VideoFormat& video_format) {
return initialized_ && worker_thread_->Invoke<bool>(
Bind(&CaptureManager::StartVideoCapture,
capture_manager_.get(), capturer, video_format));
}
bool ChannelManager::StopVideoCapture(
VideoCapturer* capturer, const VideoFormat& video_format) {
return initialized_ && worker_thread_->Invoke<bool>(
Bind(&CaptureManager::StopVideoCapture,
capture_manager_.get(), capturer, video_format));
}
void ChannelManager::AddVideoSink(
VideoCapturer* capturer, rtc::VideoSinkInterface<VideoFrame>* sink) {
if (initialized_)
worker_thread_->Invoke<void>(
Bind(&CaptureManager::AddVideoSink,
capture_manager_.get(), capturer, sink));
}
void ChannelManager::RemoveVideoSink(
VideoCapturer* capturer, rtc::VideoSinkInterface<VideoFrame>* sink) {
if (initialized_)
worker_thread_->Invoke<void>(
Bind(&CaptureManager::RemoveVideoSink,
capture_manager_.get(), capturer, sink));
}
bool ChannelManager::IsScreencastRunning() const {
return initialized_ && worker_thread_->Invoke<bool>(
Bind(&ChannelManager::IsScreencastRunning_w, this));
}
bool ChannelManager::IsScreencastRunning_w() const {
VideoChannels::const_iterator it = video_channels_.begin();
for ( ; it != video_channels_.end(); ++it) {
if ((*it) && (*it)->IsScreencasting()) {
return true;
}
}
return false;
}
void ChannelManager::OnVideoCaptureStateChange(VideoCapturer* capturer,
CaptureState result) {
// TODO(whyuan): Check capturer and signal failure only for camera video, not
// screencast.
capturing_ = result == CS_RUNNING;
main_thread_->Post(this, MSG_VIDEOCAPTURESTATE,
new CaptureStateParams(capturer, result));
}
void ChannelManager::OnMessage(rtc::Message* message) {
switch (message->message_id) {
case MSG_VIDEOCAPTURESTATE: {
CaptureStateParams* data =
static_cast<CaptureStateParams*>(message->pdata);
SignalVideoCaptureStateChange(data->capturer, data->state);
delete data;
break;
}
}
}
bool ChannelManager::StartAecDump(rtc::PlatformFile file,
int64_t max_size_bytes) {

View File

@ -14,11 +14,8 @@
#include <string>
#include <vector>
#include "webrtc/base/criticalsection.h"
#include "webrtc/base/fileutils.h"
#include "webrtc/base/sigslotrepeater.h"
#include "webrtc/base/thread.h"
#include "webrtc/media/base/capturemanager.h"
#include "webrtc/media/base/mediaengine.h"
#include "webrtc/pc/voicechannel.h"
@ -37,15 +34,13 @@ class VoiceChannel;
// voice or just video channels.
// ChannelManager also allows the application to discover what devices it has
// using device manager.
class ChannelManager : public rtc::MessageHandler,
public sigslot::has_slots<> {
class ChannelManager {
public:
// For testing purposes. Allows the media engine and data media
// engine and dev manager to be mocks. The ChannelManager takes
// ownership of these objects.
ChannelManager(MediaEngineInterface* me,
DataEngineInterface* dme,
CaptureManager* cm,
rtc::Thread* worker);
// Same as above, but gives an easier default DataEngine.
ChannelManager(MediaEngineInterface* me,
@ -119,29 +114,6 @@ class ChannelManager : public rtc::MessageHandler,
// Starts/stops the local microphone and enables polling of the input level.
bool capturing() const { return capturing_; }
// Gets capturer's supported formats in a thread safe manner
std::vector<cricket::VideoFormat> GetSupportedFormats(
VideoCapturer* capturer) const;
// The following are done in the new "CaptureManager" style that
// all local video capturers, processors, and managers should move to.
// TODO(pthatcher): Make methods nicer by having start return a handle that
// can be used for stop and restart, rather than needing to pass around
// formats a a pseudo-handle.
bool StartVideoCapture(VideoCapturer* video_capturer,
const VideoFormat& video_format);
bool StopVideoCapture(VideoCapturer* video_capturer,
const VideoFormat& video_format);
bool RestartVideoCapture(VideoCapturer* video_capturer,
const VideoFormat& previous_format,
const VideoFormat& desired_format,
CaptureManager::RestartOptions options);
virtual void AddVideoSink(VideoCapturer* video_capturer,
rtc::VideoSinkInterface<VideoFrame>* sink);
virtual void RemoveVideoSink(VideoCapturer* video_capturer,
rtc::VideoSinkInterface<VideoFrame>* sink);
bool IsScreencastRunning() const;
// The operations below occur on the main thread.
// Starts AEC dump using existing file, with a specified maximum file size in
@ -158,8 +130,6 @@ class ChannelManager : public rtc::MessageHandler,
// Stops logging RtcEventLog.
void StopRtcEventLog();
sigslot::signal2<VideoCapturer*, CaptureState> SignalVideoCaptureStateChange;
private:
typedef std::vector<VoiceChannel*> VoiceChannels;
typedef std::vector<VideoChannel*> VideoChannels;
@ -167,7 +137,6 @@ class ChannelManager : public rtc::MessageHandler,
void Construct(MediaEngineInterface* me,
DataEngineInterface* dme,
CaptureManager* cm,
rtc::Thread* worker_thread);
bool InitMediaEngine_w();
void DestructorDeletes_w();
@ -191,17 +160,9 @@ class ChannelManager : public rtc::MessageHandler,
bool rtcp,
DataChannelType data_channel_type);
void DestroyDataChannel_w(DataChannel* data_channel);
void OnVideoCaptureStateChange(VideoCapturer* capturer,
CaptureState result);
void GetSupportedFormats_w(
VideoCapturer* capturer,
std::vector<cricket::VideoFormat>* out_formats) const;
bool IsScreencastRunning_w() const;
virtual void OnMessage(rtc::Message *message);
rtc::scoped_ptr<MediaEngineInterface> media_engine_;
rtc::scoped_ptr<DataEngineInterface> data_media_engine_;
rtc::scoped_ptr<CaptureManager> capture_manager_;
bool initialized_;
rtc::Thread* main_thread_;
rtc::Thread* worker_thread_;

View File

@ -12,7 +12,6 @@
#include "webrtc/base/gunit.h"
#include "webrtc/base/logging.h"
#include "webrtc/base/thread.h"
#include "webrtc/media/base/fakecapturemanager.h"
#include "webrtc/media/base/fakemediaengine.h"
#include "webrtc/media/base/fakevideocapturer.h"
#include "webrtc/media/base/testutils.h"
@ -38,10 +37,8 @@ class ChannelManagerTest : public testing::Test {
ChannelManagerTest()
: fme_(new cricket::FakeMediaEngine()),
fdme_(new cricket::FakeDataEngine()),
fcm_(new cricket::FakeCaptureManager()),
cm_(new cricket::ChannelManager(fme_,
fdme_,
fcm_,
rtc::Thread::Current())),
fake_call_(webrtc::Call::Config()),
fake_mc_(cm_, &fake_call_),
@ -57,7 +54,6 @@ class ChannelManagerTest : public testing::Test {
delete transport_controller_;
delete cm_;
cm_ = NULL;
fcm_ = NULL;
fdme_ = NULL;
fme_ = NULL;
}
@ -65,7 +61,6 @@ class ChannelManagerTest : public testing::Test {
rtc::Thread worker_;
cricket::FakeMediaEngine* fme_;
cricket::FakeDataEngine* fdme_;
cricket::FakeCaptureManager* fcm_;
cricket::ChannelManager* cm_;
cricket::FakeCall fake_call_;
cricket::FakeMediaController fake_mc_;