Remove cricket::VideoProcessor and AddVideoProcessor() functionality
This functionality is not used internally in WebRTC. Also, it's not safe, because the frame is supposed to be read-only, and it will likely not work for texture frames. R=pthatcher@webrtc.org Review URL: https://codereview.webrtc.org/1296113002 . Cr-Commit-Position: refs/heads/master@{#9753}
This commit is contained in:
@ -469,7 +469,6 @@
|
||||
'media/base/videoframe.h',
|
||||
'media/base/videoframefactory.cc',
|
||||
'media/base/videoframefactory.h',
|
||||
'media/base/videoprocessor.h',
|
||||
'media/base/videorenderer.h',
|
||||
'media/base/voiceprocessor.h',
|
||||
'media/base/yuvframegenerator.cc',
|
||||
|
@ -30,7 +30,6 @@
|
||||
#include <algorithm>
|
||||
|
||||
#include "talk/media/base/videocapturer.h"
|
||||
#include "talk/media/base/videoprocessor.h"
|
||||
#include "talk/media/base/videorenderer.h"
|
||||
#include "webrtc/base/checks.h"
|
||||
#include "webrtc/base/logging.h"
|
||||
@ -314,31 +313,6 @@ bool CaptureManager::RemoveVideoRenderer(VideoCapturer* video_capturer,
|
||||
return adapter->RemoveRenderer(video_renderer);
|
||||
}
|
||||
|
||||
bool CaptureManager::AddVideoProcessor(VideoCapturer* video_capturer,
|
||||
VideoProcessor* video_processor) {
|
||||
DCHECK(thread_checker_.CalledOnValidThread());
|
||||
if (!video_capturer || !video_processor) {
|
||||
return false;
|
||||
}
|
||||
if (!IsCapturerRegistered(video_capturer)) {
|
||||
return false;
|
||||
}
|
||||
video_capturer->AddVideoProcessor(video_processor);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool CaptureManager::RemoveVideoProcessor(VideoCapturer* video_capturer,
|
||||
VideoProcessor* video_processor) {
|
||||
DCHECK(thread_checker_.CalledOnValidThread());
|
||||
if (!video_capturer || !video_processor) {
|
||||
return false;
|
||||
}
|
||||
if (!IsCapturerRegistered(video_capturer)) {
|
||||
return false;
|
||||
}
|
||||
return video_capturer->RemoveVideoProcessor(video_processor);
|
||||
}
|
||||
|
||||
bool CaptureManager::IsCapturerRegistered(VideoCapturer* video_capturer) const {
|
||||
DCHECK(thread_checker_.CalledOnValidThread());
|
||||
return GetCaptureState(video_capturer) != NULL;
|
||||
|
@ -32,9 +32,7 @@
|
||||
// The class employs reference counting on starting and stopping of capturing of
|
||||
// frames such that if anyone is still listening it will not be stopped. The
|
||||
// class also provides APIs for attaching VideoRenderers to a specific capturer
|
||||
// such that the VideoRenderers are fed frames directly from the capturer. In
|
||||
// addition, these frames can be altered before being sent to the capturers by
|
||||
// way of VideoProcessors.
|
||||
// such that the VideoRenderers are fed frames directly from the capturer.
|
||||
// CaptureManager is Thread-unsafe. This means that none of its APIs may be
|
||||
// called concurrently. Note that callbacks are called by the VideoCapturer's
|
||||
// thread which is normally a separate unmarshalled thread and thus normally
|
||||
@ -54,7 +52,6 @@
|
||||
namespace cricket {
|
||||
|
||||
class VideoCapturer;
|
||||
class VideoProcessor;
|
||||
class VideoRenderer;
|
||||
class VideoCapturerState;
|
||||
|
||||
@ -87,11 +84,6 @@ class CaptureManager : public sigslot::has_slots<> {
|
||||
virtual bool RemoveVideoRenderer(VideoCapturer* video_capturer,
|
||||
VideoRenderer* video_renderer);
|
||||
|
||||
virtual bool AddVideoProcessor(VideoCapturer* video_capturer,
|
||||
VideoProcessor* video_processor);
|
||||
virtual bool RemoveVideoProcessor(VideoCapturer* video_capturer,
|
||||
VideoProcessor* video_processor);
|
||||
|
||||
sigslot::repeater2<VideoCapturer*, CaptureState> SignalCapturerStateChange;
|
||||
|
||||
private:
|
||||
|
@ -63,7 +63,6 @@ class CaptureManagerTest : public ::testing::Test, public sigslot::has_slots<> {
|
||||
}
|
||||
video_capturer_.ResetSupportedFormats(formats);
|
||||
}
|
||||
int NumFramesProcessed() { return media_processor_.video_frame_count(); }
|
||||
int NumFramesRendered() { return video_renderer_.num_rendered_frames(); }
|
||||
bool WasRenderedResolution(cricket::VideoFormat format) {
|
||||
return format.width == video_renderer_.width() &&
|
||||
@ -95,8 +94,6 @@ TEST_F(CaptureManagerTest, InvalidCallOrder) {
|
||||
// Capturer must be registered before any of these calls.
|
||||
EXPECT_FALSE(capture_manager_.AddVideoRenderer(&video_capturer_,
|
||||
&video_renderer_));
|
||||
EXPECT_FALSE(capture_manager_.AddVideoProcessor(&video_capturer_,
|
||||
&media_processor_));
|
||||
}
|
||||
|
||||
TEST_F(CaptureManagerTest, InvalidAddingRemoving) {
|
||||
@ -109,36 +106,10 @@ TEST_F(CaptureManagerTest, InvalidAddingRemoving) {
|
||||
EXPECT_FALSE(capture_manager_.AddVideoRenderer(&video_capturer_, NULL));
|
||||
EXPECT_FALSE(capture_manager_.RemoveVideoRenderer(&video_capturer_,
|
||||
&video_renderer_));
|
||||
EXPECT_FALSE(capture_manager_.AddVideoProcessor(&video_capturer_,
|
||||
NULL));
|
||||
EXPECT_FALSE(capture_manager_.RemoveVideoProcessor(&video_capturer_,
|
||||
&media_processor_));
|
||||
EXPECT_TRUE(capture_manager_.StopVideoCapture(&video_capturer_, format_vga_));
|
||||
}
|
||||
|
||||
// Valid use cases
|
||||
TEST_F(CaptureManagerTest, ProcessorTest) {
|
||||
EXPECT_TRUE(capture_manager_.StartVideoCapture(&video_capturer_,
|
||||
format_vga_));
|
||||
EXPECT_EQ_WAIT(cricket::CS_RUNNING, capture_state(), kMsCallbackWait);
|
||||
EXPECT_EQ(1, callback_count());
|
||||
EXPECT_TRUE(capture_manager_.AddVideoRenderer(&video_capturer_,
|
||||
&video_renderer_));
|
||||
EXPECT_TRUE(capture_manager_.AddVideoProcessor(&video_capturer_,
|
||||
&media_processor_));
|
||||
EXPECT_TRUE(video_capturer_.CaptureFrame());
|
||||
EXPECT_EQ(1, NumFramesProcessed());
|
||||
EXPECT_EQ(1, NumFramesRendered());
|
||||
EXPECT_TRUE(capture_manager_.RemoveVideoProcessor(&video_capturer_,
|
||||
&media_processor_));
|
||||
// Processor has been removed so no more frames should be processed.
|
||||
EXPECT_TRUE(video_capturer_.CaptureFrame());
|
||||
EXPECT_EQ(1, NumFramesProcessed());
|
||||
EXPECT_EQ(2, NumFramesRendered());
|
||||
EXPECT_TRUE(capture_manager_.StopVideoCapture(&video_capturer_, format_vga_));
|
||||
EXPECT_EQ(2, callback_count());
|
||||
}
|
||||
|
||||
TEST_F(CaptureManagerTest, KeepFirstResolutionHigh) {
|
||||
EXPECT_TRUE(capture_manager_.StartVideoCapture(&video_capturer_,
|
||||
format_vga_));
|
||||
|
@ -28,7 +28,6 @@
|
||||
#include "talk/media/base/capturerenderadapter.h"
|
||||
|
||||
#include "talk/media/base/videocapturer.h"
|
||||
#include "talk/media/base/videoprocessor.h"
|
||||
#include "talk/media/base/videorenderer.h"
|
||||
#include "webrtc/base/logging.h"
|
||||
|
||||
|
@ -856,7 +856,7 @@ class FakeVideoEngine : public FakeBaseEngine {
|
||||
public:
|
||||
FakeVideoEngine() : FakeVideoEngine(nullptr) {}
|
||||
explicit FakeVideoEngine(FakeVoiceEngine* voice)
|
||||
: capture_(false), processor_(NULL) {
|
||||
: capture_(false) {
|
||||
// Add a fake video codec. Note that the name must not be "" as there are
|
||||
// sanity checks against that.
|
||||
codecs_.push_back(VideoCodec(0, "fake_video_codec", 0, 0, 0, 0));
|
||||
@ -925,7 +925,6 @@ class FakeVideoEngine : public FakeBaseEngine {
|
||||
VideoEncoderConfig default_encoder_config_;
|
||||
std::string in_device_;
|
||||
bool capture_;
|
||||
VideoProcessor* processor_;
|
||||
VideoOptions options_;
|
||||
|
||||
friend class FakeMediaEngine;
|
||||
|
@ -28,20 +28,16 @@
|
||||
#ifndef TALK_MEDIA_BASE_FAKEMEDIAPROCESSOR_H_
|
||||
#define TALK_MEDIA_BASE_FAKEMEDIAPROCESSOR_H_
|
||||
|
||||
#include "talk/media/base/videoprocessor.h"
|
||||
#include "talk/media/base/voiceprocessor.h"
|
||||
|
||||
namespace cricket {
|
||||
|
||||
class AudioFrame;
|
||||
|
||||
class FakeMediaProcessor : public VoiceProcessor, public VideoProcessor {
|
||||
class FakeMediaProcessor : public VoiceProcessor {
|
||||
public:
|
||||
FakeMediaProcessor()
|
||||
: voice_frame_count_(0),
|
||||
video_frame_count_(0),
|
||||
drop_frames_(false),
|
||||
dropped_frame_count_(0) {
|
||||
: voice_frame_count_(0) {
|
||||
}
|
||||
virtual ~FakeMediaProcessor() {}
|
||||
|
||||
@ -50,28 +46,14 @@ class FakeMediaProcessor : public VoiceProcessor, public VideoProcessor {
|
||||
AudioFrame* frame) {
|
||||
++voice_frame_count_;
|
||||
}
|
||||
virtual void OnFrame(uint32 ssrc, VideoFrame* frame_ptr, bool* drop_frame) {
|
||||
++video_frame_count_;
|
||||
if (drop_frames_) {
|
||||
*drop_frame = true;
|
||||
++dropped_frame_count_;
|
||||
}
|
||||
}
|
||||
virtual void OnVoiceMute(uint32 ssrc, bool muted) {}
|
||||
virtual void OnVideoMute(uint32 ssrc, bool muted) {}
|
||||
|
||||
int voice_frame_count() const { return voice_frame_count_; }
|
||||
int video_frame_count() const { return video_frame_count_; }
|
||||
|
||||
void set_drop_frames(bool b) { drop_frames_ = b; }
|
||||
int dropped_frame_count() const { return dropped_frame_count_; }
|
||||
|
||||
private:
|
||||
// TODO(janahan): make is a map so that we can multiple ssrcs
|
||||
int voice_frame_count_;
|
||||
int video_frame_count_;
|
||||
bool drop_frames_;
|
||||
int dropped_frame_count_;
|
||||
};
|
||||
|
||||
} // namespace cricket
|
||||
|
@ -42,7 +42,6 @@
|
||||
#include "talk/media/base/mediacommon.h"
|
||||
#include "talk/media/base/videocapturer.h"
|
||||
#include "talk/media/base/videocommon.h"
|
||||
#include "talk/media/base/videoprocessor.h"
|
||||
#include "talk/media/base/voiceprocessor.h"
|
||||
#include "talk/media/devices/devicemanager.h"
|
||||
#include "webrtc/base/fileutils.h"
|
||||
|
@ -33,7 +33,6 @@
|
||||
|
||||
#include "libyuv/scale_argb.h"
|
||||
#include "talk/media/base/videoframefactory.h"
|
||||
#include "talk/media/base/videoprocessor.h"
|
||||
#include "webrtc/base/common.h"
|
||||
#include "webrtc/base/logging.h"
|
||||
#include "webrtc/base/systeminfo.h"
|
||||
@ -107,7 +106,6 @@ webrtc::VideoRotation CapturedFrame::GetRotation() const {
|
||||
VideoCapturer::VideoCapturer()
|
||||
: thread_(rtc::Thread::Current()),
|
||||
adapt_frame_drops_data_(kMaxAccumulatorSize),
|
||||
effect_frame_drops_data_(kMaxAccumulatorSize),
|
||||
frame_time_data_(kMaxAccumulatorSize),
|
||||
apply_rotation_(true) {
|
||||
Construct();
|
||||
@ -116,7 +114,6 @@ VideoCapturer::VideoCapturer()
|
||||
VideoCapturer::VideoCapturer(rtc::Thread* thread)
|
||||
: thread_(thread),
|
||||
adapt_frame_drops_data_(kMaxAccumulatorSize),
|
||||
effect_frame_drops_data_(kMaxAccumulatorSize),
|
||||
frame_time_data_(kMaxAccumulatorSize),
|
||||
apply_rotation_(true) {
|
||||
Construct();
|
||||
@ -135,7 +132,6 @@ void VideoCapturer::Construct() {
|
||||
black_frame_count_down_ = kNumBlackFramesOnMute;
|
||||
enable_video_adapter_ = true;
|
||||
adapt_frame_drops_ = 0;
|
||||
effect_frame_drops_ = 0;
|
||||
previous_frame_time_ = 0.0;
|
||||
#ifdef HAVE_WEBRTC_VIDEO
|
||||
// There are lots of video capturers out there that don't call
|
||||
@ -309,24 +305,6 @@ bool VideoCapturer::GetBestCaptureFormat(const VideoFormat& format,
|
||||
return true;
|
||||
}
|
||||
|
||||
void VideoCapturer::AddVideoProcessor(VideoProcessor* video_processor) {
|
||||
rtc::CritScope cs(&crit_);
|
||||
ASSERT(std::find(video_processors_.begin(), video_processors_.end(),
|
||||
video_processor) == video_processors_.end());
|
||||
video_processors_.push_back(video_processor);
|
||||
}
|
||||
|
||||
bool VideoCapturer::RemoveVideoProcessor(VideoProcessor* video_processor) {
|
||||
rtc::CritScope cs(&crit_);
|
||||
VideoProcessors::iterator found = std::find(
|
||||
video_processors_.begin(), video_processors_.end(), video_processor);
|
||||
if (found == video_processors_.end()) {
|
||||
return false;
|
||||
}
|
||||
video_processors_.erase(found);
|
||||
return true;
|
||||
}
|
||||
|
||||
void VideoCapturer::ConstrainSupportedFormats(const VideoFormat& max_format) {
|
||||
max_format_.reset(new VideoFormat(max_format));
|
||||
LOG(LS_VERBOSE) << " ConstrainSupportedFormats " << max_format.ToString();
|
||||
@ -363,12 +341,10 @@ void VideoCapturer::GetStats(VariableInfo<int>* adapt_drops_stats,
|
||||
VideoFormat* last_captured_frame_format) {
|
||||
rtc::CritScope cs(&frame_stats_crit_);
|
||||
GetVariableSnapshot(adapt_frame_drops_data_, adapt_drops_stats);
|
||||
GetVariableSnapshot(effect_frame_drops_data_, effect_drops_stats);
|
||||
GetVariableSnapshot(frame_time_data_, frame_time_stats);
|
||||
*last_captured_frame_format = last_captured_frame_format_;
|
||||
|
||||
adapt_frame_drops_data_.Reset();
|
||||
effect_frame_drops_data_.Reset();
|
||||
frame_time_data_.Reset();
|
||||
}
|
||||
|
||||
@ -567,11 +543,6 @@ void VideoCapturer::OnFrameCaptured(VideoCapturer*,
|
||||
return;
|
||||
}
|
||||
|
||||
if (!muted_ && !ApplyProcessors(adapted_frame.get())) {
|
||||
// Processor dropped the frame.
|
||||
++effect_frame_drops_;
|
||||
return;
|
||||
}
|
||||
if (muted_) {
|
||||
// TODO(pthatcher): Use frame_factory_->CreateBlackFrame() instead.
|
||||
adapted_frame->SetToBlack();
|
||||
@ -709,19 +680,6 @@ int64 VideoCapturer::GetFormatDistance(const VideoFormat& desired,
|
||||
return distance;
|
||||
}
|
||||
|
||||
bool VideoCapturer::ApplyProcessors(VideoFrame* video_frame) {
|
||||
bool drop_frame = false;
|
||||
rtc::CritScope cs(&crit_);
|
||||
for (VideoProcessors::iterator iter = video_processors_.begin();
|
||||
iter != video_processors_.end(); ++iter) {
|
||||
(*iter)->OnFrame(kDummyVideoSsrc, video_frame, &drop_frame);
|
||||
if (drop_frame) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void VideoCapturer::UpdateFilteredSupportedFormats() {
|
||||
filtered_supported_formats_.clear();
|
||||
filtered_supported_formats_ = supported_formats_;
|
||||
@ -765,11 +723,9 @@ void VideoCapturer::UpdateStats(const CapturedFrame* captured_frame) {
|
||||
double time_now = frame_length_time_reporter_.TimerNow();
|
||||
if (previous_frame_time_ != 0.0) {
|
||||
adapt_frame_drops_data_.AddSample(adapt_frame_drops_);
|
||||
effect_frame_drops_data_.AddSample(effect_frame_drops_);
|
||||
frame_time_data_.AddSample(time_now - previous_frame_time_);
|
||||
}
|
||||
previous_frame_time_ = time_now;
|
||||
effect_frame_drops_ = 0;
|
||||
adapt_frame_drops_ = 0;
|
||||
}
|
||||
|
||||
|
@ -51,8 +51,6 @@
|
||||
|
||||
namespace cricket {
|
||||
|
||||
class VideoProcessor;
|
||||
|
||||
// Current state of the capturer.
|
||||
// TODO(hellner): CS_NO_DEVICE is an error code not a capture state. Separate
|
||||
// error codes and states.
|
||||
@ -115,8 +113,8 @@ struct CapturedFrame {
|
||||
//
|
||||
// The captured frames may need to be adapted (for example, cropping).
|
||||
// Video adaptation is built into and enabled by default. After a frame has
|
||||
// been captured from the device, it is sent to the video adapter, then video
|
||||
// processors, then out to the encoder.
|
||||
// been captured from the device, it is sent to the video adapter, then out to
|
||||
// the encoder.
|
||||
//
|
||||
// Programming model:
|
||||
// Create an object of a subclass of VideoCapturer
|
||||
@ -139,8 +137,6 @@ class VideoCapturer
|
||||
: public sigslot::has_slots<>,
|
||||
public rtc::MessageHandler {
|
||||
public:
|
||||
typedef std::vector<VideoProcessor*> VideoProcessors;
|
||||
|
||||
// All signals are marshalled to |thread| or the creating thread if
|
||||
// none is provided.
|
||||
VideoCapturer();
|
||||
@ -233,14 +229,6 @@ class VideoCapturer
|
||||
virtual bool SetApplyRotation(bool enable);
|
||||
virtual bool GetApplyRotation() { return apply_rotation_; }
|
||||
|
||||
// Adds a video processor that will be applied on VideoFrames returned by
|
||||
// |SignalVideoFrame|. Multiple video processors can be added. The video
|
||||
// processors will be applied in the order they were added.
|
||||
void AddVideoProcessor(VideoProcessor* video_processor);
|
||||
// Removes the |video_processor| from the list of video processors or
|
||||
// returns false.
|
||||
bool RemoveVideoProcessor(VideoProcessor* video_processor);
|
||||
|
||||
// Returns true if the capturer is screencasting. This can be used to
|
||||
// implement screencast specific behavior.
|
||||
virtual bool IsScreencast() const = 0;
|
||||
@ -282,8 +270,6 @@ class VideoCapturer
|
||||
sigslot::signal2<VideoCapturer*, const VideoFrame*,
|
||||
sigslot::multi_threaded_local> SignalVideoFrame;
|
||||
|
||||
const VideoProcessors& video_processors() const { return video_processors_; }
|
||||
|
||||
// If 'screencast_max_pixels' is set greater than zero, screencasts will be
|
||||
// scaled to be no larger than this value.
|
||||
// If set to zero, the max pixels will be limited to
|
||||
@ -361,11 +347,6 @@ class VideoCapturer
|
||||
// Convert captured frame to readable string for LOG messages.
|
||||
std::string ToString(const CapturedFrame* frame) const;
|
||||
|
||||
// Applies all registered processors. If any of the processors signal that
|
||||
// the frame should be dropped the return value will be false. Note that
|
||||
// this frame should be dropped as it has not applied all processors.
|
||||
bool ApplyProcessors(VideoFrame* video_frame);
|
||||
|
||||
// Updates filtered_supported_formats_ so that it contains the formats in
|
||||
// supported_formats_ that fulfill all applied restrictions.
|
||||
void UpdateFilteredSupportedFormats();
|
||||
@ -408,16 +389,11 @@ class VideoCapturer
|
||||
|
||||
int adapt_frame_drops_;
|
||||
rtc::RollingAccumulator<int> adapt_frame_drops_data_;
|
||||
int effect_frame_drops_;
|
||||
rtc::RollingAccumulator<int> effect_frame_drops_data_;
|
||||
double previous_frame_time_;
|
||||
rtc::RollingAccumulator<double> frame_time_data_;
|
||||
// The captured frame format before potential adapation.
|
||||
VideoFormat last_captured_frame_format_;
|
||||
|
||||
rtc::CriticalSection crit_;
|
||||
VideoProcessors video_processors_;
|
||||
|
||||
// Whether capturer should apply rotation to the frame before signaling it.
|
||||
bool apply_rotation_;
|
||||
|
||||
|
@ -33,7 +33,6 @@
|
||||
#include "talk/media/base/fakevideorenderer.h"
|
||||
#include "talk/media/base/testutils.h"
|
||||
#include "talk/media/base/videocapturer.h"
|
||||
#include "talk/media/base/videoprocessor.h"
|
||||
#include "webrtc/base/gunit.h"
|
||||
#include "webrtc/base/logging.h"
|
||||
#include "webrtc/base/thread.h"
|
||||
@ -49,26 +48,6 @@ const uint32 kTimeout = 5000U;
|
||||
|
||||
} // namespace
|
||||
|
||||
// Sets the elapsed time in the video frame to 0.
|
||||
class VideoProcessor0 : public cricket::VideoProcessor {
|
||||
public:
|
||||
virtual void OnFrame(uint32 /*ssrc*/, cricket::VideoFrame* frame,
|
||||
bool* drop_frame) {
|
||||
frame->SetElapsedTime(0u);
|
||||
}
|
||||
};
|
||||
|
||||
// Adds one to the video frame's elapsed time. Note that VideoProcessor0 and
|
||||
// VideoProcessor1 are not commutative.
|
||||
class VideoProcessor1 : public cricket::VideoProcessor {
|
||||
public:
|
||||
virtual void OnFrame(uint32 /*ssrc*/, cricket::VideoFrame* frame,
|
||||
bool* drop_frame) {
|
||||
int64 elapsed_time = frame->GetElapsedTime();
|
||||
frame->SetElapsedTime(elapsed_time + 1);
|
||||
}
|
||||
};
|
||||
|
||||
class VideoCapturerTest
|
||||
: public sigslot::has_slots<>,
|
||||
public testing::Test {
|
||||
@ -806,47 +785,6 @@ TEST_F(VideoCapturerTest, VideoFrame) {
|
||||
EXPECT_TRUE(capturer_.CaptureFrame());
|
||||
EXPECT_EQ(1, video_frames_received());
|
||||
}
|
||||
|
||||
TEST_F(VideoCapturerTest, ProcessorChainTest) {
|
||||
VideoProcessor0 processor0;
|
||||
VideoProcessor1 processor1;
|
||||
EXPECT_EQ(cricket::CS_RUNNING, capturer_.Start(cricket::VideoFormat(
|
||||
640,
|
||||
480,
|
||||
cricket::VideoFormat::FpsToInterval(30),
|
||||
cricket::FOURCC_I420)));
|
||||
EXPECT_TRUE(capturer_.IsRunning());
|
||||
EXPECT_EQ(0, video_frames_received());
|
||||
// First processor sets elapsed time to 0.
|
||||
capturer_.AddVideoProcessor(&processor0);
|
||||
// Second processor adds 1 to the elapsed time. I.e. a frames elapsed time
|
||||
// should now always be 1 (and not 0).
|
||||
capturer_.AddVideoProcessor(&processor1);
|
||||
EXPECT_TRUE(capturer_.CaptureFrame());
|
||||
EXPECT_EQ(1, video_frames_received());
|
||||
EXPECT_EQ(1u, last_frame_elapsed_time());
|
||||
capturer_.RemoveVideoProcessor(&processor1);
|
||||
EXPECT_TRUE(capturer_.CaptureFrame());
|
||||
// Since processor1 has been removed the elapsed time should now be 0.
|
||||
EXPECT_EQ(2, video_frames_received());
|
||||
EXPECT_EQ(0u, last_frame_elapsed_time());
|
||||
}
|
||||
|
||||
TEST_F(VideoCapturerTest, ProcessorDropFrame) {
|
||||
cricket::FakeMediaProcessor dropping_processor_;
|
||||
dropping_processor_.set_drop_frames(true);
|
||||
EXPECT_EQ(cricket::CS_RUNNING, capturer_.Start(cricket::VideoFormat(
|
||||
640,
|
||||
480,
|
||||
cricket::VideoFormat::FpsToInterval(30),
|
||||
cricket::FOURCC_I420)));
|
||||
EXPECT_TRUE(capturer_.IsRunning());
|
||||
EXPECT_EQ(0, video_frames_received());
|
||||
// Install a processor that always drop frames.
|
||||
capturer_.AddVideoProcessor(&dropping_processor_);
|
||||
EXPECT_TRUE(capturer_.CaptureFrame());
|
||||
EXPECT_EQ(0, video_frames_received());
|
||||
}
|
||||
#endif // HAVE_WEBRTC_VIDEO
|
||||
|
||||
bool HdFormatInList(const std::vector<cricket::VideoFormat>& formats) {
|
||||
|
@ -1,50 +0,0 @@
|
||||
/*
|
||||
* libjingle
|
||||
* Copyright 2004 Google Inc.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
* 3. The name of the author may not be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
|
||||
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
|
||||
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
|
||||
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
|
||||
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
|
||||
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef TALK_MEDIA_BASE_VIDEOPROCESSOR_H_
|
||||
#define TALK_MEDIA_BASE_VIDEOPROCESSOR_H_
|
||||
|
||||
#include "talk/media/base/videoframe.h"
|
||||
#include "webrtc/base/sigslot.h"
|
||||
|
||||
namespace cricket {
|
||||
|
||||
class VideoProcessor : public sigslot::has_slots<> {
|
||||
public:
|
||||
virtual ~VideoProcessor() {}
|
||||
// Contents of frame may be manipulated by the processor.
|
||||
// The processed data is expected to be the same size as the
|
||||
// original data. VideoProcessors may be chained together and may decide
|
||||
// that the current frame should be dropped. If *drop_frame is true,
|
||||
// the current processor should skip processing. If the current processor
|
||||
// decides it cannot process the current frame in a timely manner, it may set
|
||||
// *drop_frame = true and the frame will be dropped.
|
||||
virtual void OnFrame(uint32 ssrc, VideoFrame* frame, bool* drop_frame) = 0;
|
||||
};
|
||||
|
||||
} // namespace cricket
|
||||
#endif // TALK_MEDIA_BASE_VIDEOPROCESSOR_H_
|
@ -768,34 +768,6 @@ void ChannelManager::GetSupportedFormats_w(
|
||||
*out_formats = *formats;
|
||||
}
|
||||
|
||||
// TODO(janahan): For now pass this request through the mediaengine to the
|
||||
// voice and video engines to do the real work. Once the capturer refactoring
|
||||
// is done, we will access the capturer using the ssrc (similar to how the
|
||||
// renderer is accessed today) and register with it directly.
|
||||
bool ChannelManager::RegisterVideoProcessor(VideoCapturer* capturer,
|
||||
VideoProcessor* processor) {
|
||||
return initialized_ && worker_thread_->Invoke<bool>(
|
||||
Bind(&ChannelManager::RegisterVideoProcessor_w, this,
|
||||
capturer, processor));
|
||||
}
|
||||
|
||||
bool ChannelManager::RegisterVideoProcessor_w(VideoCapturer* capturer,
|
||||
VideoProcessor* processor) {
|
||||
return capture_manager_->AddVideoProcessor(capturer, processor);
|
||||
}
|
||||
|
||||
bool ChannelManager::UnregisterVideoProcessor(VideoCapturer* capturer,
|
||||
VideoProcessor* processor) {
|
||||
return initialized_ && worker_thread_->Invoke<bool>(
|
||||
Bind(&ChannelManager::UnregisterVideoProcessor_w, this,
|
||||
capturer, processor));
|
||||
}
|
||||
|
||||
bool ChannelManager::UnregisterVideoProcessor_w(VideoCapturer* capturer,
|
||||
VideoProcessor* processor) {
|
||||
return capture_manager_->RemoveVideoProcessor(capturer, processor);
|
||||
}
|
||||
|
||||
bool ChannelManager::RegisterVoiceProcessor(
|
||||
uint32 ssrc,
|
||||
VoiceProcessor* processor,
|
||||
|
@ -44,7 +44,6 @@ namespace cricket {
|
||||
|
||||
const int kDefaultAudioDelayOffset = 0;
|
||||
|
||||
class VideoProcessor;
|
||||
class VoiceChannel;
|
||||
class VoiceProcessor;
|
||||
|
||||
@ -175,14 +174,7 @@ class ChannelManager : public rtc::MessageHandler,
|
||||
// Gets capturer's supported formats in a thread safe manner
|
||||
std::vector<cricket::VideoFormat> GetSupportedFormats(
|
||||
VideoCapturer* capturer) const;
|
||||
// The channel manager handles the Tx side for Video processing,
|
||||
// as well as Tx and Rx side for Voice processing.
|
||||
// (The Rx Video processing will go throug the simplerenderingmanager,
|
||||
// to be implemented).
|
||||
bool RegisterVideoProcessor(VideoCapturer* capturer,
|
||||
VideoProcessor* processor);
|
||||
bool UnregisterVideoProcessor(VideoCapturer* capturer,
|
||||
VideoProcessor* processor);
|
||||
// The channel manager handles the Tx and Rx side for Voice processing.
|
||||
bool RegisterVoiceProcessor(uint32 ssrc,
|
||||
VoiceProcessor* processor,
|
||||
MediaProcessorDirection direction);
|
||||
@ -279,10 +271,6 @@ class ChannelManager : public rtc::MessageHandler,
|
||||
void GetSupportedFormats_w(
|
||||
VideoCapturer* capturer,
|
||||
std::vector<cricket::VideoFormat>* out_formats) const;
|
||||
bool RegisterVideoProcessor_w(VideoCapturer* capturer,
|
||||
VideoProcessor* processor);
|
||||
bool UnregisterVideoProcessor_w(VideoCapturer* capturer,
|
||||
VideoProcessor* processor);
|
||||
bool IsScreencastRunning_w() const;
|
||||
virtual void OnMessage(rtc::Message *message);
|
||||
|
||||
|
Reference in New Issue
Block a user