Moving src/webrtc into src/.

In order to eliminate the WebRTC Subtree mirror in Chromium, 
WebRTC is moving the content of the src/webrtc directory up
to the src/ directory.

NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
TBR=tommi@webrtc.org

Bug: chromium:611808
Change-Id: Iac59c5b51b950f174119565bac87955a7994bc38
Reviewed-on: https://webrtc-review.googlesource.com/1560
Commit-Queue: Mirko Bonadei <mbonadei@webrtc.org>
Reviewed-by: Henrik Kjellander <kjellander@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#19845}
This commit is contained in:
Mirko Bonadei
2017-09-15 06:15:48 +02:00
committed by Commit Bot
parent 6674846b4a
commit bb547203bf
4576 changed files with 1092 additions and 1196 deletions

730
test/BUILD.gn Normal file
View File

@ -0,0 +1,730 @@
# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import("../webrtc.gni")
import("//build/config/ui.gni")
if (is_android) {
import("//build/config/android/rules.gni")
}
if (rtc_use_memcheck) {
import("//tools_webrtc/valgrind/valgrind-webrtc.gni")
}
group("test") {
testonly = true
deps = [
":field_trial",
":rtp_test_utils",
":test_common",
":test_renderer",
":test_support",
":video_test_common",
]
if (!build_with_chromium) {
deps += [
":test_main",
":test_support_unittests",
]
}
}
rtc_source_set("video_test_common") {
testonly = true
sources = [
"fake_texture_frame.cc",
"fake_texture_frame.h",
"frame_generator.cc",
"frame_generator.h",
"frame_generator_capturer.cc",
"frame_generator_capturer.h",
"frame_utils.cc",
"frame_utils.h",
"vcm_capturer.cc",
"vcm_capturer.h",
"video_capturer.cc",
"video_capturer.h",
"video_codec_settings.h",
]
if (!build_with_chromium && is_clang) {
# Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
}
deps = [
"..:webrtc_common",
"../api:optional",
"../api/video_codecs:video_codecs_api",
"../call:video_stream_api",
"../common_video",
"../media:rtc_media_base",
"../modules/video_capture:video_capture_module",
"../rtc_base:rtc_base_approved",
"../rtc_base:rtc_task_queue",
"../system_wrappers",
]
}
rtc_source_set("rtp_test_utils") {
testonly = true
sources = [
"rtcp_packet_parser.cc",
"rtcp_packet_parser.h",
"rtp_file_reader.cc",
"rtp_file_reader.h",
"rtp_file_writer.cc",
"rtp_file_writer.h",
]
if (!build_with_chromium && is_clang) {
# Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
}
deps = [
"..:webrtc_common",
"../api:array_view",
"../modules/rtp_rtcp",
"../rtc_base:rtc_base_approved",
"//testing/gtest",
]
}
rtc_source_set("field_trial") {
testonly = true
sources = [
"field_trial.cc",
"field_trial.h",
]
deps = [
"..:webrtc_common",
"../system_wrappers",
"../system_wrappers:field_trial_default",
]
}
if (is_ios) {
rtc_source_set("test_support_objc") {
testonly = true
visibility = [ ":test_support" ]
sources = [
"ios/test_support.h",
"ios/test_support.mm",
]
deps = [
"../sdk:common_objc",
]
}
}
rtc_source_set("test_support") {
testonly = true
sources = [
"gmock.h",
"gtest.h",
"testsupport/packet_reader.cc",
"testsupport/packet_reader.h",
"testsupport/perf_test.cc",
"testsupport/perf_test.h",
"testsupport/trace_to_stderr.cc",
"testsupport/trace_to_stderr.h",
"testsupport/unittest_utils.h",
]
public_deps = [
":fileutils",
]
if (is_ios) {
public_deps += [ ":test_support_objc" ]
}
deps = [
"..:webrtc_common",
"../common_video",
"../rtc_base:gtest_prod",
"../rtc_base:rtc_base_approved",
"../system_wrappers",
"//testing/gmock",
"//testing/gtest",
]
if (!build_with_chromium && is_clang) {
# Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
}
if (is_android) {
deps += [ "//base:base" ]
}
if (is_ios && !build_with_chromium) {
deps += [ "../sdk:common_objc" ]
}
if (rtc_use_memcheck) {
data = valgrind_webrtc_dependencies
}
}
if (!build_with_chromium) {
# This target used to depend on //third_party/gflags which Chromium does not
# provide. TODO(oprypin): remove the conditional.
rtc_source_set("test_main") {
testonly = true
sources = [
"test_main.cc",
]
public_deps = [
":test_support",
]
deps = [
":field_trial",
"../rtc_base:rtc_base_approved",
"../system_wrappers:metrics_default",
"//testing/gmock",
"//testing/gtest",
]
}
rtc_source_set("video_test_support") {
testonly = true
sources = [
"testsupport/frame_reader.h",
"testsupport/frame_writer.h",
"testsupport/metrics/video_metrics.cc",
"testsupport/metrics/video_metrics.h",
"testsupport/mock/mock_frame_reader.h",
"testsupport/mock/mock_frame_writer.h",
"testsupport/y4m_frame_writer.cc",
"testsupport/yuv_frame_reader.cc",
"testsupport/yuv_frame_writer.cc",
]
deps = [
":test_support",
":video_test_common",
"..:webrtc_common",
"../api:video_frame_api",
"../common_video",
"../rtc_base:rtc_base_approved",
"../system_wrappers",
"//testing/gmock",
"//testing/gtest",
]
if (!is_ios) {
deps += [ "//third_party:jpeg" ]
sources += [ "testsupport/jpeg_frame_writer.cc" ]
} else {
sources += [ "testsupport/jpeg_frame_writer_ios.cc" ]
}
public_deps = [
":fileutils",
]
if (!build_with_chromium && is_clang) {
# Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
}
if (is_android) {
deps += [ "//base:base" ]
}
if (is_ios) {
deps += [ "../sdk:common_objc" ]
}
if (rtc_use_memcheck) {
data = valgrind_webrtc_dependencies
}
}
rtc_source_set("test_support_test_output") {
testonly = true
sources = [
"testsupport/test_output.cc",
"testsupport/test_output.h",
]
deps = [
":fileutils",
"../rtc_base:rtc_base_approved",
]
}
test_support_unittests_resources = [
"../../resources/foreman_cif_short.yuv",
"../../resources/video_coding/frame-ethernet-ii.pcap",
"../../resources/video_coding/frame-loopback.pcap",
"../../resources/video_coding/pltype103.rtp",
"../../resources/video_coding/pltype103_header_only.rtp",
"../../resources/video_coding/ssrcs-2.pcap",
"../../resources/video_coding/ssrcs-3.pcap",
]
if (is_ios) {
bundle_data("test_support_unittests_bundle_data") {
testonly = true
sources = test_support_unittests_resources
outputs = [
"{{bundle_resources_dir}}/{{source_file_part}}",
]
}
}
rtc_test("test_support_unittests") {
deps = [
":fake_audio_device",
":rtp_test_utils",
"../api:video_frame_api",
"../call:call_interfaces",
"../common_audio",
"../modules/rtp_rtcp",
"../rtc_base:rtc_base_approved",
"../system_wrappers",
]
sources = [
"fake_audio_device_unittest.cc",
"fake_network_pipe_unittest.cc",
"frame_generator_unittest.cc",
"rtp_file_reader_unittest.cc",
"rtp_file_writer_unittest.cc",
"single_threaded_task_queue_unittest.cc",
"testsupport/always_passing_unittest.cc",
"testsupport/metrics/video_metrics_unittest.cc",
"testsupport/packet_reader_unittest.cc",
"testsupport/perf_test_unittest.cc",
"testsupport/test_output_unittest.cc",
"testsupport/y4m_frame_writer_unittest.cc",
"testsupport/yuv_frame_reader_unittest.cc",
"testsupport/yuv_frame_writer_unittest.cc",
]
# TODO(jschuh): Bug 1348: fix this warning.
configs += [ "//build/config/compiler:no_size_t_to_int_warning" ]
if (is_win) {
# virtual override w/different const/volatile signature.
cflags = [ "/wd4373" ]
}
if (!build_with_chromium && is_clang) {
# Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
}
data = test_support_unittests_resources
if (is_android) {
deps += [ "//testing/android/native_test:native_test_support" ]
shard_timeout = 900
}
if (is_ios) {
deps += [ ":test_support_unittests_bundle_data" ]
}
deps += [
":direct_transport",
":fileutils_unittests",
":test_common",
":test_main",
":test_support_test_output",
":video_test_common",
":video_test_support",
"../modules/video_capture",
"../rtc_base:rtc_base_approved",
"//testing/gmock",
"//testing/gtest",
]
}
}
if (is_ios) {
rtc_source_set("fileutils_objc") {
visibility = [ ":fileutils" ]
sources = [
"testsupport/iosfileutils.mm",
]
deps = [
"..:webrtc_common",
"../rtc_base:rtc_base_approved",
"../sdk:common_objc",
]
}
}
rtc_source_set("fileutils") {
testonly = true
visibility = [
":fileutils_unittests",
":test_support_test_output",
":test_support",
":video_test_support",
]
sources = [
"testsupport/fileutils.cc",
"testsupport/fileutils.h",
]
deps = [
"..:webrtc_common",
"../api:optional",
"../rtc_base:rtc_base_approved",
]
if (is_ios) {
deps += [ ":fileutils_objc" ]
}
if (is_win) {
deps += [ "../rtc_base:rtc_base" ]
}
}
rtc_source_set("run_test") {
testonly = true
if (is_mac) {
public_deps = [
":run_test_objc",
]
} else {
public_deps = [
":run_test_generic",
]
}
}
rtc_source_set("run_test_interface") {
sources = [
"run_test.h",
]
}
if (is_mac) {
rtc_source_set("run_test_objc") {
testonly = true
visibility = [ ":run_test" ]
sources = [
"mac/run_test.mm",
]
deps = [
":run_test_interface",
]
}
}
rtc_source_set("run_test_generic") {
testonly = true
visibility = [ ":run_test" ]
sources = [
"run_test.cc",
]
deps = [
":run_test_interface",
]
}
rtc_source_set("fileutils_unittests") {
testonly = true
visibility = [ ":*" ] # Only targets in this file can depend on this.
sources = [
"testsupport/fileutils_unittest.cc",
]
deps = [
":fileutils",
":test_support",
"../api:optional",
"../rtc_base:rtc_base_approved",
"//testing/gmock",
"//testing/gtest",
]
}
rtc_source_set("direct_transport") {
testonly = true
sources = [
"direct_transport.cc",
"direct_transport.h",
"fake_network_pipe.cc",
"fake_network_pipe.h",
]
if (!build_with_chromium && is_clang) {
# Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
}
deps = [
"..:webrtc_common",
"../api:transport_api",
"../call",
"../modules/rtp_rtcp",
"../rtc_base:rtc_base_approved",
"../rtc_base:sequenced_task_checker",
"../system_wrappers",
]
public_deps = [
":single_threaded_task_queue",
]
}
rtc_source_set("single_threaded_task_queue") {
testonly = true
sources = [
"single_threaded_task_queue.cc",
"single_threaded_task_queue.h",
]
deps = [
"../rtc_base:rtc_base_approved",
]
}
rtc_source_set("fake_audio_device") {
testonly = true
sources = [
"fake_audio_device.cc",
"fake_audio_device.h",
]
if (!build_with_chromium && is_clang) {
# Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
}
deps = [
"..:webrtc_common",
"../api:array_view",
"../common_audio:common_audio",
"../modules/audio_device:audio_device",
"../rtc_base:rtc_base_approved",
"../system_wrappers:system_wrappers",
]
}
rtc_source_set("test_common") {
testonly = true
sources = [
"call_test.cc",
"call_test.h",
"configurable_frame_size_encoder.cc",
"configurable_frame_size_encoder.h",
"constants.cc",
"constants.h",
"drifting_clock.cc",
"drifting_clock.h",
"encoder_settings.cc",
"encoder_settings.h",
"fake_decoder.cc",
"fake_decoder.h",
"fake_encoder.cc",
"fake_encoder.h",
"fake_videorenderer.h",
"layer_filtering_transport.cc",
"layer_filtering_transport.h",
"mock_transport.h",
"mock_voe_channel_proxy.h",
"mock_voice_engine.h",
"null_transport.cc",
"null_transport.h",
"rtp_rtcp_observer.h",
"statistics.cc",
"statistics.h",
"win/run_loop_win.cc",
]
if (!is_win) {
sources += [
"run_loop.cc",
"run_loop.h",
]
}
if (!build_with_chromium && is_clang) {
# Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
}
deps = [
":direct_transport",
":fake_audio_device",
":rtp_test_utils",
":test_support",
":video_test_common",
"..:webrtc_common",
"../api:transport_api",
"../api:video_frame_api",
"../api/audio_codecs:builtin_audio_decoder_factory",
"../api/audio_codecs:builtin_audio_encoder_factory",
"../api/video_codecs:video_codecs_api",
"../audio",
"../call",
"../call:rtp_sender",
"../call:video_stream_api",
"../common_video",
"../logging:rtc_event_log_api",
"../modules/audio_device:mock_audio_device",
"../modules/audio_mixer:audio_mixer_impl",
"../modules/audio_processing",
"../modules/rtp_rtcp",
"../modules/rtp_rtcp:mock_rtp_rtcp",
"../modules/video_coding:webrtc_h264",
"../modules/video_coding:webrtc_vp8",
"../modules/video_coding:webrtc_vp9",
"../rtc_base:rtc_base_approved",
"../rtc_base:rtc_task_queue",
"../rtc_base:sequenced_task_checker",
"../system_wrappers",
"../video",
"../voice_engine",
"//testing/gmock",
"//testing/gtest",
]
if (!is_android && !build_with_chromium) {
deps += [ "../modules/video_capture:video_capture_internal_impl" ]
}
}
config("test_renderer_exported_config") {
if (is_win && is_clang) {
# GN orders flags on a target before flags from configs. The default config
# adds -Wall, and this flag have to be after -Wall -- so they need to
# come from a config and cannot be on the target directly.
cflags = [
"-Wno-bool-conversion",
"-Wno-comment",
"-Wno-delete-non-virtual-dtor",
]
}
}
rtc_source_set("test_renderer") {
public_deps = [
":test_renderer_generic",
]
testonly = true
if (is_mac) {
public_deps += [ ":test_renderer_objc" ]
}
}
if (is_mac) {
rtc_source_set("test_renderer_objc") {
testonly = true
visibility = [ ":test_renderer" ]
sources = [
"mac/video_renderer_mac.h",
"mac/video_renderer_mac.mm",
]
deps = [
":test_renderer_generic",
"../rtc_base:rtc_base_approved",
]
libs = [
"Cocoa.framework",
"OpenGL.framework",
"CoreVideo.framework",
]
if (!build_with_chromium && is_clang) {
# Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
}
}
}
rtc_source_set("test_renderer_generic") {
testonly = true
visibility = [
":test_renderer",
":test_renderer_objc",
]
libs = []
sources = [
"linux/glx_renderer.cc",
"linux/glx_renderer.h",
"linux/video_renderer_linux.cc",
"video_renderer.cc",
"video_renderer.h",
"win/d3d_renderer.cc",
"win/d3d_renderer.h",
]
deps = [
":test_support",
"..:webrtc_common",
"../common_video",
"../modules/media_file",
"../rtc_base:rtc_base_approved",
"//testing/gtest",
]
if (!is_linux && !is_mac && !is_win) {
sources += [ "null_platform_renderer.cc" ]
}
if (is_linux || is_mac) {
sources += [
"gl/gl_renderer.cc",
"gl/gl_renderer.h",
]
}
if (is_linux) {
libs += [
"Xext",
"X11",
"GL",
]
}
if (is_android) {
libs += [
"GLESv2",
"log",
]
}
public_configs = [ ":test_renderer_exported_config" ]
if (!build_with_chromium && is_clang) {
# Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
suppressed_configs += [ "//build/config/clang:find_bad_constructs" ]
}
}
rtc_source_set("audio_codec_mocks") {
testonly = true
sources = [
"mock_audio_decoder.cc",
"mock_audio_decoder.h",
"mock_audio_decoder_factory.h",
"mock_audio_encoder.cc",
"mock_audio_encoder.h",
"mock_audio_encoder_factory.h",
]
deps = [
":test_support",
"../api:array_view",
"../api/audio_codecs:audio_codecs_api",
"../api/audio_codecs:builtin_audio_decoder_factory",
"../rtc_base:rtc_base_approved",
"//testing/gmock",
]
}
if (!build_with_chromium && is_android) {
android_library("native_test_java") {
testonly = true
java_files = [
"android/org/webrtc/native_test/RTCNativeUnitTest.java",
"android/org/webrtc/native_test/RTCNativeUnitTestActivity.java",
]
deps = [
"../rtc_base:base_java",
"//testing/android/native_test:native_test_java",
]
# TODO(sakal): Fix build hooks crbug.com/webrtc/8148
no_build_hooks = true
}
}

43
test/DEPS Normal file
View File

@ -0,0 +1,43 @@
include_rules = [
"+third_party/libjpeg",
"+third_party/libjpeg_turbo",
"+webrtc/call",
"+webrtc/common_audio",
"+webrtc/common_video",
"+webrtc/logging/rtc_event_log",
"+webrtc/media/base",
"+webrtc/modules/audio_coding",
"+webrtc/modules/audio_device",
"+webrtc/modules/audio_mixer",
"+webrtc/modules/audio_processing",
"+webrtc/modules/media_file",
"+webrtc/modules/rtp_rtcp",
"+webrtc/modules/video_capture",
"+webrtc/modules/video_coding",
"+webrtc/sdk",
"+webrtc/system_wrappers",
"+webrtc/voice_engine",
]
specific_include_rules = {
"gmock\.h": [
"+testing/gmock/include/gmock",
],
"gtest\.h": [
"+testing/gtest/include/gtest",
],
".*congestion_controller_feedback_fuzzer\.cc": [
"+webrtc/modules/congestion_controller/include/receive_side_congestion_controller.h",
"+webrtc/modules/pacing/packet_router.h",
"+webrtc/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h",
],
".*pseudotcp_parser_fuzzer\.cc": [
"+webrtc/p2p/base/pseudotcp.h",
],
".*stun_parser_fuzzer\.cc": [
"+webrtc/p2p/base/stun.h",
],
".*stun_validator_fuzzer\.cc": [
"+webrtc/p2p/base/stun.h",
],
}

4
test/OWNERS Normal file
View File

@ -0,0 +1,4 @@
kjellander@webrtc.org
pbos@webrtc.org
phoglund@webrtc.org
stefan@webrtc.org

View File

@ -0,0 +1,46 @@
<?xml version="1.0" encoding="utf-8"?>
<!--
Copyright 2017 The WebRTC project authors. All Rights Reserved.
Use of this source code is governed by a BSD-style license
that can be found in the LICENSE file in the root of the source
tree. An additional intellectual property rights grant can be found
in the file PATENTS. All contributing project authors may
be found in the AUTHORS file in the root of the source tree.
-->
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
package="org.webrtc.native_test"
android:versionCode="1"
android:versionName="1.0">
<uses-sdk android:minSdkVersion="16" android:targetSdkVersion="23" />
<uses-permission android:name="android.permission.ACCESS_NETWORK_STATE"/>
<uses-permission android:name="android.permission.BLUETOOTH"/>
<uses-permission android:name="android.permission.BLUETOOTH_ADMIN"/>
<uses-permission android:name="android.permission.CAMERA" />
<uses-permission android:name="android.permission.INTERNET"/>
<uses-permission android:name="android.permission.MODIFY_AUDIO_SETTINGS"/>
<uses-permission android:name="android.permission.RECORD_AUDIO"/>
<uses-permission android:name="android.permission.WAKE_LOCK"/>
<uses-permission android:name="android.permission.WRITE_EXTERNAL_STORAGE"/>
<application android:label="NativeTests"
android:name="org.chromium.base.BaseChromiumApplication">
<uses-library android:name="android.test.runner"/>
<activity android:name=".RTCNativeUnitTestActivity"
android:label="NativeTest"
android:configChanges="orientation|keyboardHidden"
android:process=":test_process">
<intent-filter>
<action android:name="android.intent.action.MAIN" />
<category android:name="android.intent.category.LAUNCHER" />
</intent-filter>
</activity>
</application>
<instrumentation android:name="org.chromium.native_test.NativeTestInstrumentationTestRunner"
android:targetPackage="org.webrtc.native_test"
android:label="Instrumentation entry point for org.webrtc.native_test"/>
</manifest>

View File

@ -0,0 +1,26 @@
/*
* Copyright 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
package org.webrtc.native_test;
import android.app.Activity;
import org.chromium.native_test.NativeUnitTest;
import org.webrtc.ContextUtils;
/**
* Native unit test that calls ContextUtils.initialize for WebRTC.
*/
public class RTCNativeUnitTest extends NativeUnitTest {
@Override
public void preCreate(Activity activity) {
super.preCreate(activity);
ContextUtils.initialize(activity.getApplicationContext());
}
}

View File

@ -0,0 +1,34 @@
/*
* Copyright 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
package org.webrtc.native_test;
import android.app.Activity;
import android.os.Bundle;
/**
* Activity that uses RTCNativeUnitTest to run the tests.
*/
public class RTCNativeUnitTestActivity extends Activity {
private RTCNativeUnitTest mTest = new RTCNativeUnitTest();
@Override
public void onCreate(Bundle savedInstanceState) {
mTest.preCreate(this);
super.onCreate(savedInstanceState);
mTest.postCreate(this);
}
@Override
public void onStart() {
super.onStart();
mTest.postStart(this, false);
}
}

612
test/call_test.cc Normal file
View File

@ -0,0 +1,612 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/test/call_test.h"
#include <algorithm>
#include "webrtc/api/audio_codecs/builtin_audio_decoder_factory.h"
#include "webrtc/api/audio_codecs/builtin_audio_encoder_factory.h"
#include "webrtc/call/rtp_transport_controller_send.h"
#include "webrtc/call/video_config.h"
#include "webrtc/modules/audio_mixer/audio_mixer_impl.h"
#include "webrtc/rtc_base/checks.h"
#include "webrtc/rtc_base/event.h"
#include "webrtc/rtc_base/ptr_util.h"
#include "webrtc/test/testsupport/fileutils.h"
#include "webrtc/voice_engine/include/voe_base.h"
namespace webrtc {
namespace test {
namespace {
const int kVideoRotationRtpExtensionId = 4;
}
CallTest::CallTest()
: clock_(Clock::GetRealTimeClock()),
event_log_(RtcEventLog::CreateNull()),
sender_call_transport_controller_(nullptr),
video_send_config_(nullptr),
video_send_stream_(nullptr),
audio_send_config_(nullptr),
audio_send_stream_(nullptr),
fake_encoder_(clock_),
num_video_streams_(1),
num_audio_streams_(0),
num_flexfec_streams_(0),
decoder_factory_(CreateBuiltinAudioDecoderFactory()),
encoder_factory_(CreateBuiltinAudioEncoderFactory()),
task_queue_("CallTestTaskQueue"),
fake_send_audio_device_(nullptr),
fake_recv_audio_device_(nullptr) {}
CallTest::~CallTest() {
task_queue_.SendTask([this]() {
fake_send_audio_device_.reset();
fake_recv_audio_device_.reset();
frame_generator_capturer_.reset();
});
}
void CallTest::RunBaseTest(BaseTest* test) {
task_queue_.SendTask([this, test]() {
num_video_streams_ = test->GetNumVideoStreams();
num_audio_streams_ = test->GetNumAudioStreams();
num_flexfec_streams_ = test->GetNumFlexfecStreams();
RTC_DCHECK(num_video_streams_ > 0 || num_audio_streams_ > 0);
Call::Config send_config(test->GetSenderCallConfig());
if (num_audio_streams_ > 0) {
CreateFakeAudioDevices(test->CreateCapturer(), test->CreateRenderer());
test->OnFakeAudioDevicesCreated(fake_send_audio_device_.get(),
fake_recv_audio_device_.get());
apm_send_ = AudioProcessing::Create();
apm_recv_ = AudioProcessing::Create();
CreateVoiceEngines();
AudioState::Config audio_state_config;
audio_state_config.voice_engine = voe_send_.voice_engine;
audio_state_config.audio_mixer = AudioMixerImpl::Create();
audio_state_config.audio_processing = apm_send_;
send_config.audio_state = AudioState::Create(audio_state_config);
}
CreateSenderCall(send_config);
if (sender_call_transport_controller_ != nullptr) {
test->OnRtpTransportControllerSendCreated(
sender_call_transport_controller_);
}
if (test->ShouldCreateReceivers()) {
Call::Config recv_config(test->GetReceiverCallConfig());
if (num_audio_streams_ > 0) {
AudioState::Config audio_state_config;
audio_state_config.voice_engine = voe_recv_.voice_engine;
audio_state_config.audio_mixer = AudioMixerImpl::Create();
audio_state_config.audio_processing = apm_recv_;
recv_config.audio_state = AudioState::Create(audio_state_config);
}
CreateReceiverCall(recv_config);
}
test->OnCallsCreated(sender_call_.get(), receiver_call_.get());
receive_transport_.reset(test->CreateReceiveTransport(&task_queue_));
send_transport_.reset(
test->CreateSendTransport(&task_queue_, sender_call_.get()));
if (test->ShouldCreateReceivers()) {
send_transport_->SetReceiver(receiver_call_->Receiver());
receive_transport_->SetReceiver(sender_call_->Receiver());
if (num_video_streams_ > 0)
receiver_call_->SignalChannelNetworkState(MediaType::VIDEO, kNetworkUp);
if (num_audio_streams_ > 0)
receiver_call_->SignalChannelNetworkState(MediaType::AUDIO, kNetworkUp);
} else {
// Sender-only call delivers to itself.
send_transport_->SetReceiver(sender_call_->Receiver());
receive_transport_->SetReceiver(nullptr);
}
CreateSendConfig(num_video_streams_, num_audio_streams_,
num_flexfec_streams_, send_transport_.get());
if (test->ShouldCreateReceivers()) {
CreateMatchingReceiveConfigs(receive_transport_.get());
}
if (num_video_streams_ > 0) {
test->ModifyVideoConfigs(&video_send_config_, &video_receive_configs_,
&video_encoder_config_);
}
if (num_audio_streams_ > 0) {
test->ModifyAudioConfigs(&audio_send_config_, &audio_receive_configs_);
}
if (num_flexfec_streams_ > 0) {
test->ModifyFlexfecConfigs(&flexfec_receive_configs_);
}
if (num_flexfec_streams_ > 0) {
CreateFlexfecStreams();
test->OnFlexfecStreamsCreated(flexfec_receive_streams_);
}
if (num_video_streams_ > 0) {
CreateVideoStreams();
test->OnVideoStreamsCreated(video_send_stream_, video_receive_streams_);
}
if (num_audio_streams_ > 0) {
CreateAudioStreams();
test->OnAudioStreamsCreated(audio_send_stream_, audio_receive_streams_);
}
if (num_video_streams_ > 0) {
int width = kDefaultWidth;
int height = kDefaultHeight;
int frame_rate = kDefaultFramerate;
test->ModifyVideoCaptureStartResolution(&width, &height, &frame_rate);
CreateFrameGeneratorCapturer(frame_rate, width, height);
test->OnFrameGeneratorCapturerCreated(frame_generator_capturer_.get());
}
Start();
});
test->PerformTest();
task_queue_.SendTask([this, test]() {
Stop();
test->OnStreamsStopped();
DestroyStreams();
send_transport_.reset();
receive_transport_.reset();
DestroyCalls();
if (num_audio_streams_ > 0)
DestroyVoiceEngines();
});
}
void CallTest::CreateCalls(const Call::Config& sender_config,
const Call::Config& receiver_config) {
CreateSenderCall(sender_config);
CreateReceiverCall(receiver_config);
}
void CallTest::CreateSenderCall(const Call::Config& config) {
sender_call_transport_controller_ = new RtpTransportControllerSend(
Clock::GetRealTimeClock(), config.event_log);
sender_call_.reset(
Call::Create(config, std::unique_ptr<RtpTransportControllerSend>(
sender_call_transport_controller_)));
}
void CallTest::CreateReceiverCall(const Call::Config& config) {
receiver_call_.reset(Call::Create(config));
}
void CallTest::DestroyCalls() {
sender_call_.reset();
receiver_call_.reset();
}
void CallTest::CreateSendConfig(size_t num_video_streams,
size_t num_audio_streams,
size_t num_flexfec_streams,
Transport* send_transport) {
RTC_DCHECK(num_video_streams <= kNumSsrcs);
RTC_DCHECK_LE(num_audio_streams, 1);
RTC_DCHECK_LE(num_flexfec_streams, 1);
RTC_DCHECK(num_audio_streams == 0 || voe_send_.channel_id >= 0);
if (num_video_streams > 0) {
video_send_config_ = VideoSendStream::Config(send_transport);
video_send_config_.encoder_settings.encoder = &fake_encoder_;
video_send_config_.encoder_settings.payload_name = "FAKE";
video_send_config_.encoder_settings.payload_type =
kFakeVideoSendPayloadType;
video_send_config_.rtp.extensions.push_back(
RtpExtension(RtpExtension::kTransportSequenceNumberUri,
kTransportSequenceNumberExtensionId));
video_send_config_.rtp.extensions.push_back(RtpExtension(
RtpExtension::kVideoContentTypeUri, kVideoContentTypeExtensionId));
FillEncoderConfiguration(num_video_streams, &video_encoder_config_);
for (size_t i = 0; i < num_video_streams; ++i)
video_send_config_.rtp.ssrcs.push_back(kVideoSendSsrcs[i]);
video_send_config_.rtp.extensions.push_back(RtpExtension(
RtpExtension::kVideoRotationUri, kVideoRotationRtpExtensionId));
}
if (num_audio_streams > 0) {
audio_send_config_ = AudioSendStream::Config(send_transport);
audio_send_config_.voe_channel_id = voe_send_.channel_id;
audio_send_config_.rtp.ssrc = kAudioSendSsrc;
audio_send_config_.send_codec_spec =
rtc::Optional<AudioSendStream::Config::SendCodecSpec>(
{kAudioSendPayloadType, {"opus", 48000, 2, {{"stereo", "1"}}}});
audio_send_config_.encoder_factory = encoder_factory_;
}
// TODO(brandtr): Update this when we support multistream protection.
if (num_flexfec_streams > 0) {
video_send_config_.rtp.flexfec.payload_type = kFlexfecPayloadType;
video_send_config_.rtp.flexfec.ssrc = kFlexfecSendSsrc;
video_send_config_.rtp.flexfec.protected_media_ssrcs = {kVideoSendSsrcs[0]};
}
}
void CallTest::CreateMatchingReceiveConfigs(Transport* rtcp_send_transport) {
video_receive_configs_.clear();
allocated_decoders_.clear();
if (num_video_streams_ > 0) {
RTC_DCHECK(!video_send_config_.rtp.ssrcs.empty());
VideoReceiveStream::Config video_config(rtcp_send_transport);
video_config.rtp.remb = false;
video_config.rtp.transport_cc = true;
video_config.rtp.local_ssrc = kReceiverLocalVideoSsrc;
for (const RtpExtension& extension : video_send_config_.rtp.extensions)
video_config.rtp.extensions.push_back(extension);
video_config.renderer = &fake_renderer_;
for (size_t i = 0; i < video_send_config_.rtp.ssrcs.size(); ++i) {
VideoReceiveStream::Decoder decoder =
test::CreateMatchingDecoder(video_send_config_.encoder_settings);
allocated_decoders_.push_back(
std::unique_ptr<VideoDecoder>(decoder.decoder));
video_config.decoders.clear();
video_config.decoders.push_back(decoder);
video_config.rtp.remote_ssrc = video_send_config_.rtp.ssrcs[i];
video_receive_configs_.push_back(video_config.Copy());
}
}
RTC_DCHECK_GE(1, num_audio_streams_);
if (num_audio_streams_ == 1) {
RTC_DCHECK_LE(0, voe_send_.channel_id);
AudioReceiveStream::Config audio_config;
audio_config.rtp.local_ssrc = kReceiverLocalAudioSsrc;
audio_config.rtcp_send_transport = rtcp_send_transport;
audio_config.voe_channel_id = voe_recv_.channel_id;
audio_config.rtp.remote_ssrc = audio_send_config_.rtp.ssrc;
audio_config.decoder_factory = decoder_factory_;
audio_config.decoder_map = {{kAudioSendPayloadType, {"opus", 48000, 2}}};
audio_receive_configs_.push_back(audio_config);
}
// TODO(brandtr): Update this when we support multistream protection.
RTC_DCHECK(num_flexfec_streams_ <= 1);
if (num_flexfec_streams_ == 1) {
FlexfecReceiveStream::Config config(rtcp_send_transport);
config.payload_type = kFlexfecPayloadType;
config.remote_ssrc = kFlexfecSendSsrc;
config.protected_media_ssrcs = {kVideoSendSsrcs[0]};
config.local_ssrc = kReceiverLocalVideoSsrc;
for (const RtpExtension& extension : video_send_config_.rtp.extensions)
config.rtp_header_extensions.push_back(extension);
flexfec_receive_configs_.push_back(config);
}
}
void CallTest::CreateFrameGeneratorCapturerWithDrift(Clock* clock,
float speed,
int framerate,
int width,
int height) {
frame_generator_capturer_.reset(test::FrameGeneratorCapturer::Create(
width, height, framerate * speed, clock));
video_send_stream_->SetSource(
frame_generator_capturer_.get(),
VideoSendStream::DegradationPreference::kMaintainFramerate);
}
void CallTest::CreateFrameGeneratorCapturer(int framerate,
int width,
int height) {
frame_generator_capturer_.reset(
test::FrameGeneratorCapturer::Create(width, height, framerate, clock_));
video_send_stream_->SetSource(
frame_generator_capturer_.get(),
VideoSendStream::DegradationPreference::kMaintainFramerate);
}
void CallTest::CreateFakeAudioDevices(
std::unique_ptr<FakeAudioDevice::Capturer> capturer,
std::unique_ptr<FakeAudioDevice::Renderer> renderer) {
fake_send_audio_device_.reset(new FakeAudioDevice(
std::move(capturer), nullptr, 1.f));
fake_recv_audio_device_.reset(new FakeAudioDevice(
nullptr, std::move(renderer), 1.f));
}
void CallTest::CreateVideoStreams() {
RTC_DCHECK(video_send_stream_ == nullptr);
RTC_DCHECK(video_receive_streams_.empty());
RTC_DCHECK(audio_send_stream_ == nullptr);
RTC_DCHECK(audio_receive_streams_.empty());
video_send_stream_ = sender_call_->CreateVideoSendStream(
video_send_config_.Copy(), video_encoder_config_.Copy());
for (size_t i = 0; i < video_receive_configs_.size(); ++i) {
video_receive_streams_.push_back(receiver_call_->CreateVideoReceiveStream(
video_receive_configs_[i].Copy()));
}
AssociateFlexfecStreamsWithVideoStreams();
}
void CallTest::CreateAudioStreams() {
audio_send_stream_ = sender_call_->CreateAudioSendStream(audio_send_config_);
for (size_t i = 0; i < audio_receive_configs_.size(); ++i) {
audio_receive_streams_.push_back(
receiver_call_->CreateAudioReceiveStream(audio_receive_configs_[i]));
}
}
void CallTest::CreateFlexfecStreams() {
for (size_t i = 0; i < flexfec_receive_configs_.size(); ++i) {
flexfec_receive_streams_.push_back(
receiver_call_->CreateFlexfecReceiveStream(
flexfec_receive_configs_[i]));
}
AssociateFlexfecStreamsWithVideoStreams();
}
void CallTest::AssociateFlexfecStreamsWithVideoStreams() {
// All FlexFEC streams protect all of the video streams.
for (FlexfecReceiveStream* flexfec_recv_stream : flexfec_receive_streams_) {
for (VideoReceiveStream* video_recv_stream : video_receive_streams_) {
video_recv_stream->AddSecondarySink(flexfec_recv_stream);
}
}
}
void CallTest::DissociateFlexfecStreamsFromVideoStreams() {
for (FlexfecReceiveStream* flexfec_recv_stream : flexfec_receive_streams_) {
for (VideoReceiveStream* video_recv_stream : video_receive_streams_) {
video_recv_stream->RemoveSecondarySink(flexfec_recv_stream);
}
}
}
void CallTest::Start() {
if (video_send_stream_)
video_send_stream_->Start();
for (VideoReceiveStream* video_recv_stream : video_receive_streams_)
video_recv_stream->Start();
if (audio_send_stream_) {
audio_send_stream_->Start();
}
for (AudioReceiveStream* audio_recv_stream : audio_receive_streams_)
audio_recv_stream->Start();
if (frame_generator_capturer_.get() != NULL)
frame_generator_capturer_->Start();
}
void CallTest::Stop() {
if (frame_generator_capturer_.get() != NULL)
frame_generator_capturer_->Stop();
for (AudioReceiveStream* audio_recv_stream : audio_receive_streams_)
audio_recv_stream->Stop();
if (audio_send_stream_) {
audio_send_stream_->Stop();
}
for (VideoReceiveStream* video_recv_stream : video_receive_streams_)
video_recv_stream->Stop();
if (video_send_stream_)
video_send_stream_->Stop();
}
void CallTest::DestroyStreams() {
DissociateFlexfecStreamsFromVideoStreams();
if (audio_send_stream_)
sender_call_->DestroyAudioSendStream(audio_send_stream_);
audio_send_stream_ = nullptr;
for (AudioReceiveStream* audio_recv_stream : audio_receive_streams_)
receiver_call_->DestroyAudioReceiveStream(audio_recv_stream);
if (video_send_stream_)
sender_call_->DestroyVideoSendStream(video_send_stream_);
video_send_stream_ = nullptr;
for (VideoReceiveStream* video_recv_stream : video_receive_streams_)
receiver_call_->DestroyVideoReceiveStream(video_recv_stream);
for (FlexfecReceiveStream* flexfec_recv_stream : flexfec_receive_streams_)
receiver_call_->DestroyFlexfecReceiveStream(flexfec_recv_stream);
video_receive_streams_.clear();
allocated_decoders_.clear();
}
void CallTest::SetFakeVideoCaptureRotation(VideoRotation rotation) {
frame_generator_capturer_->SetFakeRotation(rotation);
}
void CallTest::CreateVoiceEngines() {
voe_send_.voice_engine = VoiceEngine::Create();
voe_send_.base = VoEBase::GetInterface(voe_send_.voice_engine);
EXPECT_EQ(0, voe_send_.base->Init(fake_send_audio_device_.get(),
apm_send_.get(), decoder_factory_));
VoEBase::ChannelConfig config;
config.enable_voice_pacing = true;
voe_send_.channel_id = voe_send_.base->CreateChannel(config);
EXPECT_GE(voe_send_.channel_id, 0);
voe_recv_.voice_engine = VoiceEngine::Create();
voe_recv_.base = VoEBase::GetInterface(voe_recv_.voice_engine);
EXPECT_EQ(0, voe_recv_.base->Init(fake_recv_audio_device_.get(),
apm_recv_.get(), decoder_factory_));
voe_recv_.channel_id = voe_recv_.base->CreateChannel();
EXPECT_GE(voe_recv_.channel_id, 0);
}
void CallTest::DestroyVoiceEngines() {
voe_recv_.base->DeleteChannel(voe_recv_.channel_id);
voe_recv_.channel_id = -1;
voe_recv_.base->Release();
voe_recv_.base = nullptr;
voe_send_.base->DeleteChannel(voe_send_.channel_id);
voe_send_.channel_id = -1;
voe_send_.base->Release();
voe_send_.base = nullptr;
VoiceEngine::Delete(voe_send_.voice_engine);
voe_send_.voice_engine = nullptr;
VoiceEngine::Delete(voe_recv_.voice_engine);
voe_recv_.voice_engine = nullptr;
}
const int CallTest::kDefaultWidth;
const int CallTest::kDefaultHeight;
const int CallTest::kDefaultFramerate;
const int CallTest::kDefaultTimeoutMs = 30 * 1000;
const int CallTest::kLongTimeoutMs = 120 * 1000;
const uint8_t CallTest::kVideoSendPayloadType = 100;
const uint8_t CallTest::kFakeVideoSendPayloadType = 125;
const uint8_t CallTest::kSendRtxPayloadType = 98;
const uint8_t CallTest::kRedPayloadType = 118;
const uint8_t CallTest::kRtxRedPayloadType = 99;
const uint8_t CallTest::kUlpfecPayloadType = 119;
const uint8_t CallTest::kFlexfecPayloadType = 120;
const uint8_t CallTest::kAudioSendPayloadType = 103;
const uint8_t CallTest::kPayloadTypeH264 = 122;
const uint8_t CallTest::kPayloadTypeVP8 = 123;
const uint8_t CallTest::kPayloadTypeVP9 = 124;
const uint32_t CallTest::kSendRtxSsrcs[kNumSsrcs] = {0xBADCAFD, 0xBADCAFE,
0xBADCAFF};
const uint32_t CallTest::kVideoSendSsrcs[kNumSsrcs] = {0xC0FFED, 0xC0FFEE,
0xC0FFEF};
const uint32_t CallTest::kAudioSendSsrc = 0xDEADBEEF;
const uint32_t CallTest::kFlexfecSendSsrc = 0xBADBEEF;
const uint32_t CallTest::kReceiverLocalVideoSsrc = 0x123456;
const uint32_t CallTest::kReceiverLocalAudioSsrc = 0x1234567;
const int CallTest::kNackRtpHistoryMs = 1000;
const uint8_t CallTest::kDefaultKeepalivePayloadType =
RtpKeepAliveConfig().payload_type;
const std::map<uint8_t, MediaType> CallTest::payload_type_map_ = {
{CallTest::kVideoSendPayloadType, MediaType::VIDEO},
{CallTest::kFakeVideoSendPayloadType, MediaType::VIDEO},
{CallTest::kSendRtxPayloadType, MediaType::VIDEO},
{CallTest::kRedPayloadType, MediaType::VIDEO},
{CallTest::kRtxRedPayloadType, MediaType::VIDEO},
{CallTest::kUlpfecPayloadType, MediaType::VIDEO},
{CallTest::kFlexfecPayloadType, MediaType::VIDEO},
{CallTest::kAudioSendPayloadType, MediaType::AUDIO},
{CallTest::kDefaultKeepalivePayloadType, MediaType::ANY}};
BaseTest::BaseTest() : event_log_(RtcEventLog::CreateNull()) {}
BaseTest::BaseTest(unsigned int timeout_ms)
: RtpRtcpObserver(timeout_ms), event_log_(RtcEventLog::CreateNull()) {}
BaseTest::~BaseTest() {
}
std::unique_ptr<FakeAudioDevice::Capturer> BaseTest::CreateCapturer() {
return FakeAudioDevice::CreatePulsedNoiseCapturer(256, 48000);
}
std::unique_ptr<FakeAudioDevice::Renderer> BaseTest::CreateRenderer() {
return FakeAudioDevice::CreateDiscardRenderer(48000);
}
void BaseTest::OnFakeAudioDevicesCreated(FakeAudioDevice* send_audio_device,
FakeAudioDevice* recv_audio_device) {
}
Call::Config BaseTest::GetSenderCallConfig() {
return Call::Config(event_log_.get());
}
Call::Config BaseTest::GetReceiverCallConfig() {
return Call::Config(event_log_.get());
}
void BaseTest::OnRtpTransportControllerSendCreated(
RtpTransportControllerSend* controller) {}
void BaseTest::OnCallsCreated(Call* sender_call, Call* receiver_call) {
}
test::PacketTransport* BaseTest::CreateSendTransport(
SingleThreadedTaskQueueForTesting* task_queue,
Call* sender_call) {
return new PacketTransport(
task_queue, sender_call, this, test::PacketTransport::kSender,
CallTest::payload_type_map_, FakeNetworkPipe::Config());
}
test::PacketTransport* BaseTest::CreateReceiveTransport(
SingleThreadedTaskQueueForTesting* task_queue) {
return new PacketTransport(
task_queue, nullptr, this, test::PacketTransport::kReceiver,
CallTest::payload_type_map_, FakeNetworkPipe::Config());
}
size_t BaseTest::GetNumVideoStreams() const {
return 1;
}
size_t BaseTest::GetNumAudioStreams() const {
return 0;
}
size_t BaseTest::GetNumFlexfecStreams() const {
return 0;
}
void BaseTest::ModifyVideoConfigs(
VideoSendStream::Config* send_config,
std::vector<VideoReceiveStream::Config>* receive_configs,
VideoEncoderConfig* encoder_config) {}
void BaseTest::ModifyVideoCaptureStartResolution(int* width,
int* heigt,
int* frame_rate) {}
void BaseTest::OnVideoStreamsCreated(
VideoSendStream* send_stream,
const std::vector<VideoReceiveStream*>& receive_streams) {}
void BaseTest::ModifyAudioConfigs(
AudioSendStream::Config* send_config,
std::vector<AudioReceiveStream::Config>* receive_configs) {}
void BaseTest::OnAudioStreamsCreated(
AudioSendStream* send_stream,
const std::vector<AudioReceiveStream*>& receive_streams) {}
void BaseTest::ModifyFlexfecConfigs(
std::vector<FlexfecReceiveStream::Config>* receive_configs) {}
void BaseTest::OnFlexfecStreamsCreated(
const std::vector<FlexfecReceiveStream*>& receive_streams) {}
void BaseTest::OnFrameGeneratorCapturerCreated(
FrameGeneratorCapturer* frame_generator_capturer) {
}
void BaseTest::OnStreamsStopped() {
}
SendTest::SendTest(unsigned int timeout_ms) : BaseTest(timeout_ms) {
}
bool SendTest::ShouldCreateReceivers() const {
return false;
}
EndToEndTest::EndToEndTest() {}
EndToEndTest::EndToEndTest(unsigned int timeout_ms) : BaseTest(timeout_ms) {
}
bool EndToEndTest::ShouldCreateReceivers() const {
return true;
}
} // namespace test
} // namespace webrtc

249
test/call_test.h Normal file
View File

@ -0,0 +1,249 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_TEST_CALL_TEST_H_
#define WEBRTC_TEST_CALL_TEST_H_
#include <memory>
#include <vector>
#include "webrtc/call/call.h"
#include "webrtc/call/rtp_transport_controller_send.h"
#include "webrtc/logging/rtc_event_log/rtc_event_log.h"
#include "webrtc/test/encoder_settings.h"
#include "webrtc/test/fake_audio_device.h"
#include "webrtc/test/fake_decoder.h"
#include "webrtc/test/fake_encoder.h"
#include "webrtc/test/fake_videorenderer.h"
#include "webrtc/test/frame_generator_capturer.h"
#include "webrtc/test/rtp_rtcp_observer.h"
#include "webrtc/test/single_threaded_task_queue.h"
namespace webrtc {
class VoEBase;
namespace test {
class BaseTest;
class CallTest : public ::testing::Test {
public:
CallTest();
virtual ~CallTest();
static const size_t kNumSsrcs = 3;
static const int kDefaultWidth = 320;
static const int kDefaultHeight = 180;
static const int kDefaultFramerate = 30;
static const int kDefaultTimeoutMs;
static const int kLongTimeoutMs;
static const uint8_t kVideoSendPayloadType;
static const uint8_t kSendRtxPayloadType;
static const uint8_t kFakeVideoSendPayloadType;
static const uint8_t kRedPayloadType;
static const uint8_t kRtxRedPayloadType;
static const uint8_t kUlpfecPayloadType;
static const uint8_t kFlexfecPayloadType;
static const uint8_t kAudioSendPayloadType;
static const uint8_t kPayloadTypeH264;
static const uint8_t kPayloadTypeVP8;
static const uint8_t kPayloadTypeVP9;
static const uint32_t kSendRtxSsrcs[kNumSsrcs];
static const uint32_t kVideoSendSsrcs[kNumSsrcs];
static const uint32_t kAudioSendSsrc;
static const uint32_t kFlexfecSendSsrc;
static const uint32_t kReceiverLocalVideoSsrc;
static const uint32_t kReceiverLocalAudioSsrc;
static const int kNackRtpHistoryMs;
static const uint8_t kDefaultKeepalivePayloadType;
static const std::map<uint8_t, MediaType> payload_type_map_;
protected:
// RunBaseTest overwrites the audio_state and the voice_engine of the send and
// receive Call configs to simplify test code and avoid having old VoiceEngine
// APIs in the tests.
void RunBaseTest(BaseTest* test);
void CreateCalls(const Call::Config& sender_config,
const Call::Config& receiver_config);
void CreateSenderCall(const Call::Config& config);
void CreateReceiverCall(const Call::Config& config);
void DestroyCalls();
void CreateSendConfig(size_t num_video_streams,
size_t num_audio_streams,
size_t num_flexfec_streams,
Transport* send_transport);
void CreateMatchingReceiveConfigs(Transport* rtcp_send_transport);
void CreateFrameGeneratorCapturerWithDrift(Clock* drift_clock,
float speed,
int framerate,
int width,
int height);
void CreateFrameGeneratorCapturer(int framerate, int width, int height);
void CreateFakeAudioDevices(
std::unique_ptr<FakeAudioDevice::Capturer> capturer,
std::unique_ptr<FakeAudioDevice::Renderer> renderer);
void CreateVideoStreams();
void CreateAudioStreams();
void CreateFlexfecStreams();
void AssociateFlexfecStreamsWithVideoStreams();
void DissociateFlexfecStreamsFromVideoStreams();
void Start();
void Stop();
void DestroyStreams();
void SetFakeVideoCaptureRotation(VideoRotation rotation);
Clock* const clock_;
std::unique_ptr<webrtc::RtcEventLog> event_log_;
std::unique_ptr<Call> sender_call_;
RtpTransportControllerSend* sender_call_transport_controller_;
std::unique_ptr<PacketTransport> send_transport_;
VideoSendStream::Config video_send_config_;
VideoEncoderConfig video_encoder_config_;
VideoSendStream* video_send_stream_;
AudioSendStream::Config audio_send_config_;
AudioSendStream* audio_send_stream_;
std::unique_ptr<Call> receiver_call_;
std::unique_ptr<PacketTransport> receive_transport_;
std::vector<VideoReceiveStream::Config> video_receive_configs_;
std::vector<VideoReceiveStream*> video_receive_streams_;
std::vector<AudioReceiveStream::Config> audio_receive_configs_;
std::vector<AudioReceiveStream*> audio_receive_streams_;
std::vector<FlexfecReceiveStream::Config> flexfec_receive_configs_;
std::vector<FlexfecReceiveStream*> flexfec_receive_streams_;
std::unique_ptr<test::FrameGeneratorCapturer> frame_generator_capturer_;
test::FakeEncoder fake_encoder_;
std::vector<std::unique_ptr<VideoDecoder>> allocated_decoders_;
size_t num_video_streams_;
size_t num_audio_streams_;
size_t num_flexfec_streams_;
rtc::scoped_refptr<AudioDecoderFactory> decoder_factory_;
rtc::scoped_refptr<AudioEncoderFactory> encoder_factory_;
test::FakeVideoRenderer fake_renderer_;
SingleThreadedTaskQueueForTesting task_queue_;
private:
// TODO(holmer): Remove once VoiceEngine is fully refactored to the new API.
// These methods are used to set up legacy voice engines and channels which is
// necessary while voice engine is being refactored to the new stream API.
struct VoiceEngineState {
VoiceEngineState()
: voice_engine(nullptr),
base(nullptr),
channel_id(-1) {}
VoiceEngine* voice_engine;
VoEBase* base;
int channel_id;
};
void CreateVoiceEngines();
void DestroyVoiceEngines();
VoiceEngineState voe_send_;
VoiceEngineState voe_recv_;
rtc::scoped_refptr<AudioProcessing> apm_send_;
rtc::scoped_refptr<AudioProcessing> apm_recv_;
// The audio devices must outlive the voice engines.
std::unique_ptr<test::FakeAudioDevice> fake_send_audio_device_;
std::unique_ptr<test::FakeAudioDevice> fake_recv_audio_device_;
};
class BaseTest : public RtpRtcpObserver {
public:
BaseTest();
explicit BaseTest(unsigned int timeout_ms);
virtual ~BaseTest();
virtual void PerformTest() = 0;
virtual bool ShouldCreateReceivers() const = 0;
virtual size_t GetNumVideoStreams() const;
virtual size_t GetNumAudioStreams() const;
virtual size_t GetNumFlexfecStreams() const;
virtual std::unique_ptr<FakeAudioDevice::Capturer> CreateCapturer();
virtual std::unique_ptr<FakeAudioDevice::Renderer> CreateRenderer();
virtual void OnFakeAudioDevicesCreated(FakeAudioDevice* send_audio_device,
FakeAudioDevice* recv_audio_device);
virtual Call::Config GetSenderCallConfig();
virtual Call::Config GetReceiverCallConfig();
virtual void OnRtpTransportControllerSendCreated(
RtpTransportControllerSend* controller);
virtual void OnCallsCreated(Call* sender_call, Call* receiver_call);
virtual test::PacketTransport* CreateSendTransport(
SingleThreadedTaskQueueForTesting* task_queue,
Call* sender_call);
virtual test::PacketTransport* CreateReceiveTransport(
SingleThreadedTaskQueueForTesting* task_queue);
virtual void ModifyVideoConfigs(
VideoSendStream::Config* send_config,
std::vector<VideoReceiveStream::Config>* receive_configs,
VideoEncoderConfig* encoder_config);
virtual void ModifyVideoCaptureStartResolution(int* width,
int* heigt,
int* frame_rate);
virtual void OnVideoStreamsCreated(
VideoSendStream* send_stream,
const std::vector<VideoReceiveStream*>& receive_streams);
virtual void ModifyAudioConfigs(
AudioSendStream::Config* send_config,
std::vector<AudioReceiveStream::Config>* receive_configs);
virtual void OnAudioStreamsCreated(
AudioSendStream* send_stream,
const std::vector<AudioReceiveStream*>& receive_streams);
virtual void ModifyFlexfecConfigs(
std::vector<FlexfecReceiveStream::Config>* receive_configs);
virtual void OnFlexfecStreamsCreated(
const std::vector<FlexfecReceiveStream*>& receive_streams);
virtual void OnFrameGeneratorCapturerCreated(
FrameGeneratorCapturer* frame_generator_capturer);
virtual void OnStreamsStopped();
std::unique_ptr<webrtc::RtcEventLog> event_log_;
};
class SendTest : public BaseTest {
public:
explicit SendTest(unsigned int timeout_ms);
bool ShouldCreateReceivers() const override;
};
class EndToEndTest : public BaseTest {
public:
EndToEndTest();
explicit EndToEndTest(unsigned int timeout_ms);
bool ShouldCreateReceivers() const override;
};
} // namespace test
} // namespace webrtc
#endif // WEBRTC_TEST_CALL_TEST_H_

View File

@ -0,0 +1,93 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/test/configurable_frame_size_encoder.h"
#include <string.h>
#include "webrtc/common_video/include/video_frame.h"
#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/rtc_base/checks.h"
#include "webrtc/test/gtest.h"
namespace webrtc {
namespace test {
ConfigurableFrameSizeEncoder::ConfigurableFrameSizeEncoder(
size_t max_frame_size)
: callback_(NULL),
max_frame_size_(max_frame_size),
current_frame_size_(max_frame_size),
buffer_(new uint8_t[max_frame_size]) {
memset(buffer_.get(), 0, max_frame_size);
}
ConfigurableFrameSizeEncoder::~ConfigurableFrameSizeEncoder() {}
int32_t ConfigurableFrameSizeEncoder::InitEncode(
const VideoCodec* codec_settings,
int32_t number_of_cores,
size_t max_payload_size) {
return WEBRTC_VIDEO_CODEC_OK;
}
int32_t ConfigurableFrameSizeEncoder::Encode(
const VideoFrame& inputImage,
const CodecSpecificInfo* codecSpecificInfo,
const std::vector<FrameType>* frame_types) {
EncodedImage encodedImage(
buffer_.get(), current_frame_size_, max_frame_size_);
encodedImage._completeFrame = true;
encodedImage._encodedHeight = inputImage.height();
encodedImage._encodedWidth = inputImage.width();
encodedImage._frameType = kVideoFrameKey;
encodedImage._timeStamp = inputImage.timestamp();
encodedImage.capture_time_ms_ = inputImage.render_time_ms();
RTPFragmentationHeader* fragmentation = NULL;
CodecSpecificInfo specific;
memset(&specific, 0, sizeof(specific));
callback_->OnEncodedImage(encodedImage, &specific, fragmentation);
return WEBRTC_VIDEO_CODEC_OK;
}
int32_t ConfigurableFrameSizeEncoder::RegisterEncodeCompleteCallback(
EncodedImageCallback* callback) {
callback_ = callback;
return WEBRTC_VIDEO_CODEC_OK;
}
int32_t ConfigurableFrameSizeEncoder::Release() {
return WEBRTC_VIDEO_CODEC_OK;
}
int32_t ConfigurableFrameSizeEncoder::SetChannelParameters(uint32_t packet_loss,
int64_t rtt) {
return WEBRTC_VIDEO_CODEC_OK;
}
int32_t ConfigurableFrameSizeEncoder::SetRateAllocation(
const BitrateAllocation& allocation,
uint32_t framerate) {
return WEBRTC_VIDEO_CODEC_OK;
}
int32_t ConfigurableFrameSizeEncoder::SetPeriodicKeyFrames(bool enable) {
return WEBRTC_VIDEO_CODEC_OK;
}
int32_t ConfigurableFrameSizeEncoder::SetFrameSize(size_t size) {
RTC_DCHECK_LE(size, max_frame_size_);
current_frame_size_ = size;
return WEBRTC_VIDEO_CODEC_OK;
}
} // namespace test
} // namespace webrtc

View File

@ -0,0 +1,59 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_TEST_CONFIGURABLE_FRAME_SIZE_ENCODER_H_
#define WEBRTC_TEST_CONFIGURABLE_FRAME_SIZE_ENCODER_H_
#include <memory>
#include <vector>
#include "webrtc/api/video_codecs/video_encoder.h"
namespace webrtc {
namespace test {
class ConfigurableFrameSizeEncoder : public VideoEncoder {
public:
explicit ConfigurableFrameSizeEncoder(size_t max_frame_size);
virtual ~ConfigurableFrameSizeEncoder();
int32_t InitEncode(const VideoCodec* codec_settings,
int32_t number_of_cores,
size_t max_payload_size) override;
int32_t Encode(const VideoFrame& input_image,
const CodecSpecificInfo* codec_specific_info,
const std::vector<FrameType>* frame_types) override;
int32_t RegisterEncodeCompleteCallback(
EncodedImageCallback* callback) override;
int32_t Release() override;
int32_t SetChannelParameters(uint32_t packet_loss, int64_t rtt) override;
int32_t SetRateAllocation(const BitrateAllocation& allocation,
uint32_t framerate) override;
int32_t SetPeriodicKeyFrames(bool enable) override;
int32_t SetFrameSize(size_t size);
private:
EncodedImageCallback* callback_;
const size_t max_frame_size_;
size_t current_frame_size_;
std::unique_ptr<uint8_t[]> buffer_;
};
} // namespace test
} // namespace webrtc
#endif // WEBRTC_TEST_CONFIGURABLE_FRAME_SIZE_ENCODER_H_

24
test/constants.cc Normal file
View File

@ -0,0 +1,24 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/test/constants.h"
namespace webrtc {
namespace test {
const int kTOffsetExtensionId = 6;
const int kAbsSendTimeExtensionId = 7;
const int kTransportSequenceNumberExtensionId = 8;
const int kVideoRotationExtensionId = 9;
const int kVideoContentTypeExtensionId = 10;
const int kVideoTimingExtensionId = 11;
} // namespace test
} // namespace webrtc

21
test/constants.h Normal file
View File

@ -0,0 +1,21 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
namespace webrtc {
namespace test {
extern const int kTOffsetExtensionId;
extern const int kAbsSendTimeExtensionId;
extern const int kTransportSequenceNumberExtensionId;
extern const int kVideoRotationExtensionId;
extern const int kVideoContentTypeExtensionId;
extern const int kVideoTimingExtensionId;
} // namespace test
} // namespace webrtc

111
test/direct_transport.cc Normal file
View File

@ -0,0 +1,111 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/test/direct_transport.h"
#include "webrtc/call/call.h"
#include "webrtc/rtc_base/ptr_util.h"
#include "webrtc/system_wrappers/include/clock.h"
#include "webrtc/test/single_threaded_task_queue.h"
namespace webrtc {
namespace test {
DirectTransport::DirectTransport(
SingleThreadedTaskQueueForTesting* task_queue,
Call* send_call,
const std::map<uint8_t, MediaType>& payload_type_map)
: DirectTransport(task_queue,
FakeNetworkPipe::Config(),
send_call,
payload_type_map) {
}
DirectTransport::DirectTransport(
SingleThreadedTaskQueueForTesting* task_queue,
const FakeNetworkPipe::Config& config,
Call* send_call,
const std::map<uint8_t, MediaType>& payload_type_map)
: DirectTransport(
task_queue,
config,
send_call,
std::unique_ptr<Demuxer>(new DemuxerImpl(payload_type_map))) {
}
DirectTransport::DirectTransport(SingleThreadedTaskQueueForTesting* task_queue,
const FakeNetworkPipe::Config& config,
Call* send_call,
std::unique_ptr<Demuxer> demuxer)
: send_call_(send_call),
clock_(Clock::GetRealTimeClock()),
task_queue_(task_queue),
fake_network_(clock_, config, std::move(demuxer)) {
RTC_DCHECK(task_queue);
if (send_call_) {
send_call_->SignalChannelNetworkState(MediaType::AUDIO, kNetworkUp);
send_call_->SignalChannelNetworkState(MediaType::VIDEO, kNetworkUp);
}
SendPackets();
}
DirectTransport::~DirectTransport() {
RTC_DCHECK_CALLED_SEQUENTIALLY(&sequence_checker_);
// Constructor updates |next_scheduled_task_|, so it's guaranteed to
// be initialized.
task_queue_->CancelTask(next_scheduled_task_);
}
void DirectTransport::SetConfig(const FakeNetworkPipe::Config& config) {
fake_network_.SetConfig(config);
}
void DirectTransport::StopSending() {
RTC_DCHECK_CALLED_SEQUENTIALLY(&sequence_checker_);
task_queue_->CancelTask(next_scheduled_task_);
}
void DirectTransport::SetReceiver(PacketReceiver* receiver) {
RTC_DCHECK_CALLED_SEQUENTIALLY(&sequence_checker_);
fake_network_.SetReceiver(receiver);
}
bool DirectTransport::SendRtp(const uint8_t* data,
size_t length,
const PacketOptions& options) {
if (send_call_) {
rtc::SentPacket sent_packet(options.packet_id,
clock_->TimeInMilliseconds());
send_call_->OnSentPacket(sent_packet);
}
fake_network_.SendPacket(data, length);
return true;
}
bool DirectTransport::SendRtcp(const uint8_t* data, size_t length) {
fake_network_.SendPacket(data, length);
return true;
}
int DirectTransport::GetAverageDelayMs() {
return fake_network_.AverageDelay();
}
void DirectTransport::SendPackets() {
RTC_DCHECK_CALLED_SEQUENTIALLY(&sequence_checker_);
fake_network_.Process();
int64_t delay_ms = fake_network_.TimeUntilNextProcess();
next_scheduled_task_ = task_queue_->PostDelayedTask([this]() {
SendPackets();
}, delay_ms);
}
} // namespace test
} // namespace webrtc

82
test/direct_transport.h Normal file
View File

@ -0,0 +1,82 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_TEST_DIRECT_TRANSPORT_H_
#define WEBRTC_TEST_DIRECT_TRANSPORT_H_
#include <assert.h>
#include <memory>
#include "webrtc/api/call/transport.h"
#include "webrtc/call/call.h"
#include "webrtc/rtc_base/sequenced_task_checker.h"
#include "webrtc/rtc_base/thread_annotations.h"
#include "webrtc/test/fake_network_pipe.h"
#include "webrtc/test/single_threaded_task_queue.h"
namespace webrtc {
class Clock;
class PacketReceiver;
namespace test {
// Objects of this class are expected to be allocated and destroyed on the
// same task-queue - the one that's passed in via the constructor.
class DirectTransport : public Transport {
public:
DirectTransport(SingleThreadedTaskQueueForTesting* task_queue,
Call* send_call,
const std::map<uint8_t, MediaType>& payload_type_map);
DirectTransport(SingleThreadedTaskQueueForTesting* task_queue,
const FakeNetworkPipe::Config& config,
Call* send_call,
const std::map<uint8_t, MediaType>& payload_type_map);
DirectTransport(SingleThreadedTaskQueueForTesting* task_queue,
const FakeNetworkPipe::Config& config,
Call* send_call,
std::unique_ptr<Demuxer> demuxer);
~DirectTransport() override;
void SetConfig(const FakeNetworkPipe::Config& config);
RTC_DEPRECATED void StopSending();
// TODO(holmer): Look into moving this to the constructor.
virtual void SetReceiver(PacketReceiver* receiver);
bool SendRtp(const uint8_t* data,
size_t length,
const PacketOptions& options) override;
bool SendRtcp(const uint8_t* data, size_t length) override;
int GetAverageDelayMs();
private:
void SendPackets();
Call* const send_call_;
Clock* const clock_;
SingleThreadedTaskQueueForTesting* const task_queue_;
SingleThreadedTaskQueueForTesting::TaskId next_scheduled_task_
RTC_GUARDED_BY(&sequence_checker_);
FakeNetworkPipe fake_network_;
rtc::SequencedTaskChecker sequence_checker_;
};
} // namespace test
} // namespace webrtc
#endif // WEBRTC_TEST_DIRECT_TRANSPORT_H_

56
test/drifting_clock.cc Normal file
View File

@ -0,0 +1,56 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/test/drifting_clock.h"
#include "webrtc/rtc_base/checks.h"
namespace webrtc {
namespace test {
const float DriftingClock::kDoubleSpeed = 2.0f;
const float DriftingClock::kNoDrift = 1.0f;
const float DriftingClock::kHalfSpeed = 0.5f;
DriftingClock::DriftingClock(Clock* clock, float speed)
: clock_(clock),
drift_(speed - 1.0f),
start_time_(clock_->TimeInMicroseconds()) {
RTC_CHECK(clock);
RTC_CHECK_GT(speed, 0.0f);
}
float DriftingClock::Drift() const {
int64_t now = clock_->TimeInMicroseconds();
RTC_DCHECK_GE(now, start_time_);
return (now - start_time_) * drift_;
}
int64_t DriftingClock::TimeInMilliseconds() const {
return clock_->TimeInMilliseconds() + Drift() / 1000.;
}
int64_t DriftingClock::TimeInMicroseconds() const {
return clock_->TimeInMicroseconds() + Drift();
}
NtpTime DriftingClock::CurrentNtpTime() const {
// NTP precision is 1/2^32 seconds, i.e. 2^32 ntp fractions = 1 second.
const double kNtpFracPerMicroSecond = 4294.967296; // = 2^32 / 10^6
NtpTime ntp = clock_->CurrentNtpTime();
uint64_t total_fractions = static_cast<uint64_t>(ntp);
total_fractions += Drift() * kNtpFracPerMicroSecond;
return NtpTime(total_fractions);
}
int64_t DriftingClock::CurrentNtpInMilliseconds() const {
return clock_->CurrentNtpInMilliseconds() + Drift() / 1000.;
}
} // namespace test
} // namespace webrtc

45
test/drifting_clock.h Normal file
View File

@ -0,0 +1,45 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_TEST_DRIFTING_CLOCK_H_
#define WEBRTC_TEST_DRIFTING_CLOCK_H_
#include "webrtc/system_wrappers/include/clock.h"
namespace webrtc {
namespace test {
class DriftingClock : public Clock {
public:
// TODO(danilchap): Make this constants constexpr when it would be supported.
static const float kDoubleSpeed; // 2.0f;
static const float kNoDrift; // 1.0f;
static const float kHalfSpeed; // 0.5f;
DriftingClock(Clock* clock, float speed);
// TODO(danilchap): Make this functions constexpr when it would be supported.
static float PercentsFaster(float percent) { return 1.0f + percent / 100.0f; }
static float PercentsSlower(float percent) { return 1.0f - percent / 100.0f; }
int64_t TimeInMilliseconds() const override;
int64_t TimeInMicroseconds() const override;
NtpTime CurrentNtpTime() const override;
int64_t CurrentNtpInMilliseconds() const override;
private:
float Drift() const;
Clock* const clock_;
const float drift_;
const int64_t start_time_;
};
} // namespace test
} // namespace webrtc
#endif // WEBRTC_TEST_DRIFTING_CLOCK_H_

101
test/encoder_settings.cc Normal file
View File

@ -0,0 +1,101 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/test/encoder_settings.h"
#include <algorithm>
#include <string>
#include "webrtc/modules/video_coding/codecs/h264/include/h264.h"
#include "webrtc/modules/video_coding/codecs/vp8/include/vp8.h"
#include "webrtc/modules/video_coding/codecs/vp9/include/vp9.h"
#include "webrtc/test/fake_decoder.h"
namespace webrtc {
namespace test {
const size_t DefaultVideoStreamFactory::kMaxNumberOfStreams;
const int DefaultVideoStreamFactory::kMaxBitratePerStream[] = {150000, 450000,
1500000};
const int DefaultVideoStreamFactory::kDefaultMinBitratePerStream[] = {
30000, 200000, 700000};
// static
std::vector<VideoStream> CreateVideoStreams(
int width,
int height,
const webrtc::VideoEncoderConfig& encoder_config) {
RTC_DCHECK(encoder_config.number_of_streams <=
DefaultVideoStreamFactory::kMaxNumberOfStreams);
std::vector<VideoStream> stream_settings(encoder_config.number_of_streams);
int bitrate_left_bps = encoder_config.max_bitrate_bps;
for (size_t i = 0; i < encoder_config.number_of_streams; ++i) {
stream_settings[i].width =
(i + 1) * width / encoder_config.number_of_streams;
stream_settings[i].height =
(i + 1) * height / encoder_config.number_of_streams;
stream_settings[i].max_framerate = 30;
stream_settings[i].min_bitrate_bps =
DefaultVideoStreamFactory::kDefaultMinBitratePerStream[i];
stream_settings[i].target_bitrate_bps = stream_settings[i].max_bitrate_bps =
std::min(bitrate_left_bps,
DefaultVideoStreamFactory::kMaxBitratePerStream[i]);
stream_settings[i].max_qp = 56;
bitrate_left_bps -= stream_settings[i].target_bitrate_bps;
}
stream_settings[encoder_config.number_of_streams - 1].max_bitrate_bps +=
bitrate_left_bps;
return stream_settings;
}
DefaultVideoStreamFactory::DefaultVideoStreamFactory() {}
std::vector<VideoStream> DefaultVideoStreamFactory::CreateEncoderStreams(
int width,
int height,
const webrtc::VideoEncoderConfig& encoder_config) {
return CreateVideoStreams(width, height, encoder_config);
}
void FillEncoderConfiguration(size_t num_streams,
VideoEncoderConfig* configuration) {
RTC_DCHECK_LE(num_streams, DefaultVideoStreamFactory::kMaxNumberOfStreams);
configuration->number_of_streams = num_streams;
configuration->video_stream_factory =
new rtc::RefCountedObject<DefaultVideoStreamFactory>();
configuration->max_bitrate_bps = 0;
for (size_t i = 0; i < num_streams; ++i) {
configuration->max_bitrate_bps +=
DefaultVideoStreamFactory::kMaxBitratePerStream[i];
}
}
VideoReceiveStream::Decoder CreateMatchingDecoder(
const VideoSendStream::Config::EncoderSettings& encoder_settings) {
VideoReceiveStream::Decoder decoder;
decoder.payload_type = encoder_settings.payload_type;
decoder.payload_name = encoder_settings.payload_name;
if (encoder_settings.payload_name == "H264") {
decoder.decoder = H264Decoder::Create();
} else if (encoder_settings.payload_name == "VP8") {
decoder.decoder = VP8Decoder::Create();
} else if (encoder_settings.payload_name == "VP9") {
decoder.decoder = VP9Decoder::Create();
} else {
decoder.decoder = new FakeDecoder();
}
return decoder;
}
} // namespace test
} // namespace webrtc

56
test/encoder_settings.h Normal file
View File

@ -0,0 +1,56 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_TEST_ENCODER_SETTINGS_H_
#define WEBRTC_TEST_ENCODER_SETTINGS_H_
#include <vector>
#include "webrtc/call/video_receive_stream.h"
#include "webrtc/call/video_send_stream.h"
namespace webrtc {
namespace test {
class DefaultVideoStreamFactory
: public VideoEncoderConfig::VideoStreamFactoryInterface {
public:
DefaultVideoStreamFactory();
static const size_t kMaxNumberOfStreams = 3;
// Defined as {150000, 450000, 1500000};
static const int kMaxBitratePerStream[];
// Defined as {50000, 200000, 700000};
static const int kDefaultMinBitratePerStream[];
private:
std::vector<VideoStream> CreateEncoderStreams(
int width,
int height,
const VideoEncoderConfig& encoder_config) override;
};
// Creates |encoder_config.number_of_streams| VideoStreams where index
// |encoder_config.number_of_streams -1| have width = |width|, height =
// |height|. The total max bitrate of all VideoStreams is
// |encoder_config.max_bitrate_bps|.
std::vector<VideoStream> CreateVideoStreams(
int width,
int height,
const webrtc::VideoEncoderConfig& encoder_config);
void FillEncoderConfiguration(size_t num_streams,
VideoEncoderConfig* configuration);
VideoReceiveStream::Decoder CreateMatchingDecoder(
const VideoSendStream::Config::EncoderSettings& encoder_settings);
} // namespace test
} // namespace webrtc
#endif // WEBRTC_TEST_ENCODER_SETTINGS_H_

380
test/fake_audio_device.cc Normal file
View File

@ -0,0 +1,380 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/test/fake_audio_device.h"
#include <algorithm>
#include <utility>
#include "webrtc/common_audio/wav_file.h"
#include "webrtc/rtc_base/checks.h"
#include "webrtc/rtc_base/random.h"
#include "webrtc/system_wrappers/include/event_wrapper.h"
namespace webrtc {
namespace {
constexpr int kFrameLengthMs = 10;
constexpr int kFramesPerSecond = 1000 / kFrameLengthMs;
// Assuming 10ms audio packets..
class PulsedNoiseCapturer final : public test::FakeAudioDevice::Capturer {
public:
PulsedNoiseCapturer(int16_t max_amplitude, int sampling_frequency_in_hz)
: sampling_frequency_in_hz_(sampling_frequency_in_hz),
fill_with_zero_(false),
random_generator_(1),
max_amplitude_(max_amplitude) {
RTC_DCHECK_GT(max_amplitude, 0);
}
int SamplingFrequency() const override {
return sampling_frequency_in_hz_;
}
bool Capture(rtc::BufferT<int16_t>* buffer) override {
fill_with_zero_ = !fill_with_zero_;
buffer->SetData(
test::FakeAudioDevice::SamplesPerFrame(sampling_frequency_in_hz_),
[&](rtc::ArrayView<int16_t> data) {
if (fill_with_zero_) {
std::fill(data.begin(), data.end(), 0);
} else {
std::generate(data.begin(), data.end(), [&]() {
return random_generator_.Rand(-max_amplitude_, max_amplitude_);
});
}
return data.size();
});
return true;
}
private:
int sampling_frequency_in_hz_;
bool fill_with_zero_;
Random random_generator_;
const int16_t max_amplitude_;
};
class WavFileReader final : public test::FakeAudioDevice::Capturer {
public:
WavFileReader(std::string filename, int sampling_frequency_in_hz)
: sampling_frequency_in_hz_(sampling_frequency_in_hz),
wav_reader_(filename) {
RTC_CHECK_EQ(wav_reader_.sample_rate(), sampling_frequency_in_hz);
RTC_CHECK_EQ(wav_reader_.num_channels(), 1);
}
int SamplingFrequency() const override {
return sampling_frequency_in_hz_;
}
bool Capture(rtc::BufferT<int16_t>* buffer) override {
buffer->SetData(
test::FakeAudioDevice::SamplesPerFrame(sampling_frequency_in_hz_),
[&](rtc::ArrayView<int16_t> data) {
return wav_reader_.ReadSamples(data.size(), data.data());
});
return buffer->size() > 0;
}
private:
int sampling_frequency_in_hz_;
WavReader wav_reader_;
};
class WavFileWriter final : public test::FakeAudioDevice::Renderer {
public:
WavFileWriter(std::string filename, int sampling_frequency_in_hz)
: sampling_frequency_in_hz_(sampling_frequency_in_hz),
wav_writer_(filename, sampling_frequency_in_hz, 1) {}
int SamplingFrequency() const override {
return sampling_frequency_in_hz_;
}
bool Render(rtc::ArrayView<const int16_t> data) override {
wav_writer_.WriteSamples(data.data(), data.size());
return true;
}
private:
int sampling_frequency_in_hz_;
WavWriter wav_writer_;
};
class BoundedWavFileWriter : public test::FakeAudioDevice::Renderer {
public:
BoundedWavFileWriter(std::string filename, int sampling_frequency_in_hz)
: sampling_frequency_in_hz_(sampling_frequency_in_hz),
wav_writer_(filename, sampling_frequency_in_hz, 1),
silent_audio_(test::FakeAudioDevice::SamplesPerFrame(
sampling_frequency_in_hz), 0),
started_writing_(false),
trailing_zeros_(0) {}
int SamplingFrequency() const override {
return sampling_frequency_in_hz_;
}
bool Render(rtc::ArrayView<const int16_t> data) override {
const int16_t kAmplitudeThreshold = 5;
const int16_t* begin = data.begin();
const int16_t* end = data.end();
if (!started_writing_) {
// Cut off silence at the beginning.
while (begin < end) {
if (std::abs(*begin) > kAmplitudeThreshold) {
started_writing_ = true;
break;
}
++begin;
}
}
if (started_writing_) {
// Cut off silence at the end.
while (begin < end) {
if (*(end - 1) != 0) {
break;
}
--end;
}
if (begin < end) {
// If it turns out that the silence was not final, need to write all the
// skipped zeros and continue writing audio.
while (trailing_zeros_ > 0) {
const size_t zeros_to_write = std::min(trailing_zeros_,
silent_audio_.size());
wav_writer_.WriteSamples(silent_audio_.data(), zeros_to_write);
trailing_zeros_ -= zeros_to_write;
}
wav_writer_.WriteSamples(begin, end - begin);
}
// Save the number of zeros we skipped in case this needs to be restored.
trailing_zeros_ += data.end() - end;
}
return true;
}
private:
int sampling_frequency_in_hz_;
WavWriter wav_writer_;
std::vector<int16_t> silent_audio_;
bool started_writing_;
size_t trailing_zeros_;
};
class DiscardRenderer final : public test::FakeAudioDevice::Renderer {
public:
explicit DiscardRenderer(int sampling_frequency_in_hz)
: sampling_frequency_in_hz_(sampling_frequency_in_hz) {}
int SamplingFrequency() const override {
return sampling_frequency_in_hz_;
}
bool Render(rtc::ArrayView<const int16_t> data) override {
return true;
}
private:
int sampling_frequency_in_hz_;
};
} // namespace
namespace test {
size_t FakeAudioDevice::SamplesPerFrame(int sampling_frequency_in_hz) {
return rtc::CheckedDivExact(sampling_frequency_in_hz, kFramesPerSecond);
}
std::unique_ptr<FakeAudioDevice::Capturer>
FakeAudioDevice::CreatePulsedNoiseCapturer(
int16_t max_amplitude, int sampling_frequency_in_hz) {
return std::unique_ptr<FakeAudioDevice::Capturer>(
new PulsedNoiseCapturer(max_amplitude, sampling_frequency_in_hz));
}
std::unique_ptr<FakeAudioDevice::Capturer> FakeAudioDevice::CreateWavFileReader(
std::string filename, int sampling_frequency_in_hz) {
return std::unique_ptr<FakeAudioDevice::Capturer>(
new WavFileReader(filename, sampling_frequency_in_hz));
}
std::unique_ptr<FakeAudioDevice::Capturer> FakeAudioDevice::CreateWavFileReader(
std::string filename) {
int sampling_frequency_in_hz = WavReader(filename).sample_rate();
return std::unique_ptr<FakeAudioDevice::Capturer>(
new WavFileReader(filename, sampling_frequency_in_hz));
}
std::unique_ptr<FakeAudioDevice::Renderer> FakeAudioDevice::CreateWavFileWriter(
std::string filename, int sampling_frequency_in_hz) {
return std::unique_ptr<FakeAudioDevice::Renderer>(
new WavFileWriter(filename, sampling_frequency_in_hz));
}
std::unique_ptr<FakeAudioDevice::Renderer>
FakeAudioDevice::CreateBoundedWavFileWriter(
std::string filename, int sampling_frequency_in_hz) {
return std::unique_ptr<FakeAudioDevice::Renderer>(
new BoundedWavFileWriter(filename, sampling_frequency_in_hz));
}
std::unique_ptr<FakeAudioDevice::Renderer>
FakeAudioDevice::CreateDiscardRenderer(int sampling_frequency_in_hz) {
return std::unique_ptr<FakeAudioDevice::Renderer>(
new DiscardRenderer(sampling_frequency_in_hz));
}
FakeAudioDevice::FakeAudioDevice(std::unique_ptr<Capturer> capturer,
std::unique_ptr<Renderer> renderer,
float speed)
: capturer_(std::move(capturer)),
renderer_(std::move(renderer)),
speed_(speed),
audio_callback_(nullptr),
rendering_(false),
capturing_(false),
done_rendering_(true, true),
done_capturing_(true, true),
tick_(EventTimerWrapper::Create()),
thread_(FakeAudioDevice::Run, this, "FakeAudioDevice") {
auto good_sample_rate = [](int sr) {
return sr == 8000 || sr == 16000 || sr == 32000
|| sr == 44100 || sr == 48000;
};
if (renderer_) {
const int sample_rate = renderer_->SamplingFrequency();
playout_buffer_.resize(SamplesPerFrame(sample_rate), 0);
RTC_CHECK(good_sample_rate(sample_rate));
}
if (capturer_) {
RTC_CHECK(good_sample_rate(capturer_->SamplingFrequency()));
}
}
FakeAudioDevice::~FakeAudioDevice() {
StopPlayout();
StopRecording();
thread_.Stop();
}
int32_t FakeAudioDevice::StartPlayout() {
rtc::CritScope cs(&lock_);
RTC_CHECK(renderer_);
rendering_ = true;
done_rendering_.Reset();
return 0;
}
int32_t FakeAudioDevice::StopPlayout() {
rtc::CritScope cs(&lock_);
rendering_ = false;
done_rendering_.Set();
return 0;
}
int32_t FakeAudioDevice::StartRecording() {
rtc::CritScope cs(&lock_);
RTC_CHECK(capturer_);
capturing_ = true;
done_capturing_.Reset();
return 0;
}
int32_t FakeAudioDevice::StopRecording() {
rtc::CritScope cs(&lock_);
capturing_ = false;
done_capturing_.Set();
return 0;
}
int32_t FakeAudioDevice::Init() {
RTC_CHECK(tick_->StartTimer(true, kFrameLengthMs / speed_));
thread_.Start();
thread_.SetPriority(rtc::kHighPriority);
return 0;
}
int32_t FakeAudioDevice::RegisterAudioCallback(AudioTransport* callback) {
rtc::CritScope cs(&lock_);
RTC_DCHECK(callback || audio_callback_);
audio_callback_ = callback;
return 0;
}
bool FakeAudioDevice::Playing() const {
rtc::CritScope cs(&lock_);
return rendering_;
}
bool FakeAudioDevice::Recording() const {
rtc::CritScope cs(&lock_);
return capturing_;
}
bool FakeAudioDevice::WaitForPlayoutEnd(int timeout_ms) {
return done_rendering_.Wait(timeout_ms);
}
bool FakeAudioDevice::WaitForRecordingEnd(int timeout_ms) {
return done_capturing_.Wait(timeout_ms);
}
bool FakeAudioDevice::Run(void* obj) {
static_cast<FakeAudioDevice*>(obj)->ProcessAudio();
return true;
}
void FakeAudioDevice::ProcessAudio() {
{
rtc::CritScope cs(&lock_);
if (capturing_) {
// Capture 10ms of audio. 2 bytes per sample.
const bool keep_capturing = capturer_->Capture(&recording_buffer_);
uint32_t new_mic_level;
if (recording_buffer_.size() > 0) {
audio_callback_->RecordedDataIsAvailable(
recording_buffer_.data(), recording_buffer_.size(), 2, 1,
capturer_->SamplingFrequency(), 0, 0, 0, false, new_mic_level);
}
if (!keep_capturing) {
capturing_ = false;
done_capturing_.Set();
}
}
if (rendering_) {
size_t samples_out;
int64_t elapsed_time_ms;
int64_t ntp_time_ms;
const int sampling_frequency = renderer_->SamplingFrequency();
audio_callback_->NeedMorePlayData(
SamplesPerFrame(sampling_frequency), 2, 1, sampling_frequency,
playout_buffer_.data(), samples_out, &elapsed_time_ms, &ntp_time_ms);
const bool keep_rendering = renderer_->Render(
rtc::ArrayView<const int16_t>(playout_buffer_.data(), samples_out));
if (!keep_rendering) {
rendering_ = false;
done_rendering_.Set();
}
}
}
tick_->Wait(WEBRTC_EVENT_INFINITE);
}
} // namespace test
} // namespace webrtc

144
test/fake_audio_device.h Normal file
View File

@ -0,0 +1,144 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_TEST_FAKE_AUDIO_DEVICE_H_
#define WEBRTC_TEST_FAKE_AUDIO_DEVICE_H_
#include <memory>
#include <string>
#include <vector>
#include "webrtc/api/array_view.h"
#include "webrtc/modules/audio_device/include/fake_audio_device.h"
#include "webrtc/rtc_base/buffer.h"
#include "webrtc/rtc_base/criticalsection.h"
#include "webrtc/rtc_base/event.h"
#include "webrtc/rtc_base/platform_thread.h"
#include "webrtc/typedefs.h"
namespace webrtc {
class EventTimerWrapper;
namespace test {
// FakeAudioDevice implements an AudioDevice module that can act both as a
// capturer and a renderer. It will use 10ms audio frames.
class FakeAudioDevice : public FakeAudioDeviceModule {
public:
// Returns the number of samples that Capturers and Renderers with this
// sampling frequency will work with every time Capture or Render is called.
static size_t SamplesPerFrame(int sampling_frequency_in_hz);
class Capturer {
public:
virtual ~Capturer() {}
// Returns the sampling frequency in Hz of the audio data that this
// capturer produces.
virtual int SamplingFrequency() const = 0;
// Replaces the contents of |buffer| with 10ms of captured audio data
// (see FakeAudioDevice::SamplesPerFrame). Returns true if the capturer can
// keep producing data, or false when the capture finishes.
virtual bool Capture(rtc::BufferT<int16_t>* buffer) = 0;
};
class Renderer {
public:
virtual ~Renderer() {}
// Returns the sampling frequency in Hz of the audio data that this
// renderer receives.
virtual int SamplingFrequency() const = 0;
// Renders the passed audio data and returns true if the renderer wants
// to keep receiving data, or false otherwise.
virtual bool Render(rtc::ArrayView<const int16_t> data) = 0;
};
// Creates a new FakeAudioDevice. When capturing or playing, 10 ms audio
// frames will be processed every 10ms / |speed|.
// |capturer| is an object that produces audio data. Can be nullptr if this
// device is never used for recording.
// |renderer| is an object that receives audio data that would have been
// played out. Can be nullptr if this device is never used for playing.
// Use one of the Create... functions to get these instances.
FakeAudioDevice(std::unique_ptr<Capturer> capturer,
std::unique_ptr<Renderer> renderer,
float speed = 1);
~FakeAudioDevice() override;
// Returns a Capturer instance that generates a signal where every second
// frame is zero and every second frame is evenly distributed random noise
// with max amplitude |max_amplitude|.
static std::unique_ptr<Capturer> CreatePulsedNoiseCapturer(
int16_t max_amplitude, int sampling_frequency_in_hz);
// Returns a Capturer instance that gets its data from a file.
static std::unique_ptr<Capturer> CreateWavFileReader(
std::string filename, int sampling_frequency_in_hz);
// Returns a Capturer instance that gets its data from a file.
// Automatically detects sample rate.
static std::unique_ptr<Capturer> CreateWavFileReader(std::string filename);
// Returns a Renderer instance that writes its data to a file.
static std::unique_ptr<Renderer> CreateWavFileWriter(
std::string filename, int sampling_frequency_in_hz);
// Returns a Renderer instance that writes its data to a WAV file, cutting
// off silence at the beginning (not necessarily perfect silence, see
// kAmplitudeThreshold) and at the end (only actual 0 samples in this case).
static std::unique_ptr<Renderer> CreateBoundedWavFileWriter(
std::string filename, int sampling_frequency_in_hz);
// Returns a Renderer instance that does nothing with the audio data.
static std::unique_ptr<Renderer> CreateDiscardRenderer(
int sampling_frequency_in_hz);
int32_t Init() override;
int32_t RegisterAudioCallback(AudioTransport* callback) override;
int32_t StartPlayout() override;
int32_t StopPlayout() override;
int32_t StartRecording() override;
int32_t StopRecording() override;
bool Playing() const override;
bool Recording() const override;
// Blocks until the Renderer refuses to receive data.
// Returns false if |timeout_ms| passes before that happens.
bool WaitForPlayoutEnd(int timeout_ms = rtc::Event::kForever);
// Blocks until the Recorder stops producing data.
// Returns false if |timeout_ms| passes before that happens.
bool WaitForRecordingEnd(int timeout_ms = rtc::Event::kForever);
private:
static bool Run(void* obj);
void ProcessAudio();
const std::unique_ptr<Capturer> capturer_ RTC_GUARDED_BY(lock_);
const std::unique_ptr<Renderer> renderer_ RTC_GUARDED_BY(lock_);
const float speed_;
rtc::CriticalSection lock_;
AudioTransport* audio_callback_ RTC_GUARDED_BY(lock_);
bool rendering_ RTC_GUARDED_BY(lock_);
bool capturing_ RTC_GUARDED_BY(lock_);
rtc::Event done_rendering_;
rtc::Event done_capturing_;
std::vector<int16_t> playout_buffer_ RTC_GUARDED_BY(lock_);
rtc::BufferT<int16_t> recording_buffer_ RTC_GUARDED_BY(lock_);
std::unique_ptr<EventTimerWrapper> tick_;
rtc::PlatformThread thread_;
};
} // namespace test
} // namespace webrtc
#endif // WEBRTC_TEST_FAKE_AUDIO_DEVICE_H_

View File

@ -0,0 +1,131 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <algorithm>
#include <array>
#include "webrtc/common_audio/wav_file.h"
#include "webrtc/common_audio/wav_header.h"
#include "webrtc/test/fake_audio_device.h"
#include "webrtc/test/gtest.h"
#include "webrtc/test/testsupport/fileutils.h"
namespace webrtc {
namespace test {
namespace {
void RunTest(const std::vector<int16_t>& input_samples,
const std::vector<int16_t>& expected_samples,
size_t samples_per_frame) {
const ::testing::TestInfo* const test_info =
::testing::UnitTest::GetInstance()->current_test_info();
const std::string output_filename = test::OutputPath() +
"BoundedWavFileWriterTest_" + test_info->name() + ".wav";
static const size_t kSamplesPerFrame = 8;
static const int kSampleRate = kSamplesPerFrame * 100;
EXPECT_EQ(FakeAudioDevice::SamplesPerFrame(kSampleRate), kSamplesPerFrame);
{
std::unique_ptr<FakeAudioDevice::Renderer> writer =
FakeAudioDevice::CreateBoundedWavFileWriter(output_filename, 800);
for (size_t i = 0; i < input_samples.size(); i += kSamplesPerFrame) {
EXPECT_TRUE(writer->Render(rtc::ArrayView<const int16_t>(
&input_samples[i],
std::min(kSamplesPerFrame, input_samples.size() - i))));
}
}
{
WavReader reader(output_filename);
std::vector<int16_t> read_samples(expected_samples.size());
EXPECT_EQ(expected_samples.size(),
reader.ReadSamples(read_samples.size(), read_samples.data()));
EXPECT_EQ(expected_samples, read_samples);
EXPECT_EQ(0u, reader.ReadSamples(read_samples.size(), read_samples.data()));
}
remove(output_filename.c_str());
}
} // namespace
TEST(BoundedWavFileWriterTest, NoSilence) {
static const std::vector<int16_t> kInputSamples = {
75, 1234, 243, -1231, -22222, 0, 3, 88,
1222, -1213, -13222, -7, -3525, 5787, -25247, 8
};
static const std::vector<int16_t> kExpectedSamples = kInputSamples;
RunTest(kInputSamples, kExpectedSamples, 8);
}
TEST(BoundedWavFileWriterTest, SomeStartSilence) {
static const std::vector<int16_t> kInputSamples = {
0, 0, 0, 0, 3, 0, 0, 0,
0, 3, -13222, -7, -3525, 5787, -25247, 8
};
static const std::vector<int16_t> kExpectedSamples(kInputSamples.begin() + 10,
kInputSamples.end());
RunTest(kInputSamples, kExpectedSamples, 8);
}
TEST(BoundedWavFileWriterTest, NegativeStartSilence) {
static const std::vector<int16_t> kInputSamples = {
0, -4, -6, 0, 3, 0, 0, 0,
0, 3, -13222, -7, -3525, 5787, -25247, 8
};
static const std::vector<int16_t> kExpectedSamples(kInputSamples.begin() + 2,
kInputSamples.end());
RunTest(kInputSamples, kExpectedSamples, 8);
}
TEST(BoundedWavFileWriterTest, SomeEndSilence) {
static const std::vector<int16_t> kInputSamples = {
75, 1234, 243, -1231, -22222, 0, 1, 0,
0, 0, 0, 0, 0, 0, 0, 0
};
static const std::vector<int16_t> kExpectedSamples(kInputSamples.begin(),
kInputSamples.end() - 9);
RunTest(kInputSamples, kExpectedSamples, 8);
}
TEST(BoundedWavFileWriterTest, DoubleEndSilence) {
static const std::vector<int16_t> kInputSamples = {
75, 1234, 243, -1231, -22222, 0, 0, 0,
0, -1213, -13222, -7, -3525, 5787, 0, 0
};
static const std::vector<int16_t> kExpectedSamples(kInputSamples.begin(),
kInputSamples.end() - 2);
RunTest(kInputSamples, kExpectedSamples, 8);
}
TEST(BoundedWavFileWriterTest, DoubleSilence) {
static const std::vector<int16_t> kInputSamples = {
0, -1213, -13222, -7, -3525, 5787, 0, 0
};
static const std::vector<int16_t> kExpectedSamples(kInputSamples.begin() + 1,
kInputSamples.end() - 2);
RunTest(kInputSamples, kExpectedSamples, 8);
}
TEST(BoundedWavFileWriterTest, EndSilenceCutoff) {
static const std::vector<int16_t> kInputSamples = {
75, 1234, 243, -1231, -22222, 0, 1, 0,
0, 0, 0
};
static const std::vector<int16_t> kExpectedSamples(kInputSamples.begin(),
kInputSamples.end() - 4);
RunTest(kInputSamples, kExpectedSamples, 8);
}
} // namespace test
} // namespace webrtc

86
test/fake_decoder.cc Normal file
View File

@ -0,0 +1,86 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/test/fake_decoder.h"
#include "webrtc/api/video/i420_buffer.h"
#include "webrtc/rtc_base/timeutils.h"
#include "webrtc/test/gtest.h"
namespace webrtc {
namespace test {
FakeDecoder::FakeDecoder() : callback_(NULL) {}
int32_t FakeDecoder::InitDecode(const VideoCodec* config,
int32_t number_of_cores) {
config_ = *config;
return WEBRTC_VIDEO_CODEC_OK;
}
int32_t FakeDecoder::Decode(const EncodedImage& input,
bool missing_frames,
const RTPFragmentationHeader* fragmentation,
const CodecSpecificInfo* codec_specific_info,
int64_t render_time_ms) {
VideoFrame frame(I420Buffer::Create(config_.width, config_.height),
webrtc::kVideoRotation_0,
render_time_ms * rtc::kNumMicrosecsPerMillisec);
frame.set_timestamp(input._timeStamp);
frame.set_ntp_time_ms(input.ntp_time_ms_);
callback_->Decoded(frame);
return WEBRTC_VIDEO_CODEC_OK;
}
int32_t FakeDecoder::RegisterDecodeCompleteCallback(
DecodedImageCallback* callback) {
callback_ = callback;
return WEBRTC_VIDEO_CODEC_OK;
}
int32_t FakeDecoder::Release() {
return WEBRTC_VIDEO_CODEC_OK;
}
const char* FakeDecoder::kImplementationName = "fake_decoder";
const char* FakeDecoder::ImplementationName() const {
return kImplementationName;
}
int32_t FakeH264Decoder::Decode(const EncodedImage& input,
bool missing_frames,
const RTPFragmentationHeader* fragmentation,
const CodecSpecificInfo* codec_specific_info,
int64_t render_time_ms) {
uint8_t value = 0;
for (size_t i = 0; i < input._length; ++i) {
uint8_t kStartCode[] = {0, 0, 0, 1};
if (i < input._length - sizeof(kStartCode) &&
!memcmp(&input._buffer[i], kStartCode, sizeof(kStartCode))) {
i += sizeof(kStartCode) + 1; // Skip start code and NAL header.
}
if (input._buffer[i] != value) {
EXPECT_EQ(value, input._buffer[i])
<< "Bitstream mismatch between sender and receiver.";
return -1;
}
++value;
}
return FakeDecoder::Decode(input,
missing_frames,
fragmentation,
codec_specific_info,
render_time_ms);
}
} // namespace test
} // namespace webrtc

76
test/fake_decoder.h Normal file
View File

@ -0,0 +1,76 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_TEST_FAKE_DECODER_H_
#define WEBRTC_TEST_FAKE_DECODER_H_
#include <vector>
#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/system_wrappers/include/clock.h"
namespace webrtc {
namespace test {
class FakeDecoder : public VideoDecoder {
public:
FakeDecoder();
virtual ~FakeDecoder() {}
int32_t InitDecode(const VideoCodec* config,
int32_t number_of_cores) override;
int32_t Decode(const EncodedImage& input,
bool missing_frames,
const RTPFragmentationHeader* fragmentation,
const CodecSpecificInfo* codec_specific_info,
int64_t render_time_ms) override;
int32_t RegisterDecodeCompleteCallback(
DecodedImageCallback* callback) override;
int32_t Release() override;
const char* ImplementationName() const override;
static const char* kImplementationName;
private:
VideoCodec config_;
DecodedImageCallback* callback_;
};
class FakeH264Decoder : public FakeDecoder {
public:
virtual ~FakeH264Decoder() {}
int32_t Decode(const EncodedImage& input,
bool missing_frames,
const RTPFragmentationHeader* fragmentation,
const CodecSpecificInfo* codec_specific_info,
int64_t render_time_ms) override;
};
class FakeNullDecoder : public FakeDecoder {
public:
virtual ~FakeNullDecoder() {}
int32_t Decode(const EncodedImage& input,
bool missing_frames,
const RTPFragmentationHeader* fragmentation,
const CodecSpecificInfo* codec_specific_info,
int64_t render_time_ms) override {
return 0;
}
};
} // namespace test
} // namespace webrtc
#endif // WEBRTC_TEST_FAKE_DECODER_H_

377
test/fake_encoder.cc Normal file
View File

@ -0,0 +1,377 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/test/fake_encoder.h"
#include <string.h>
#include <algorithm>
#include <memory>
#include "webrtc/common_types.h"
#include "webrtc/modules/video_coding/include/video_codec_interface.h"
#include "webrtc/rtc_base/checks.h"
#include "webrtc/system_wrappers/include/sleep.h"
#include "webrtc/test/gtest.h"
namespace webrtc {
namespace test {
const int kKeyframeSizeFactor = 10;
FakeEncoder::FakeEncoder(Clock* clock)
: clock_(clock),
callback_(nullptr),
configured_input_framerate_(-1),
max_target_bitrate_kbps_(-1),
pending_keyframe_(true),
debt_bytes_(0) {
// Generate some arbitrary not-all-zero data
for (size_t i = 0; i < sizeof(encoded_buffer_); ++i) {
encoded_buffer_[i] = static_cast<uint8_t>(i);
}
}
void FakeEncoder::SetMaxBitrate(int max_kbps) {
RTC_DCHECK_GE(max_kbps, -1); // max_kbps == -1 disables it.
rtc::CritScope cs(&crit_sect_);
max_target_bitrate_kbps_ = max_kbps;
}
int32_t FakeEncoder::InitEncode(const VideoCodec* config,
int32_t number_of_cores,
size_t max_payload_size) {
rtc::CritScope cs(&crit_sect_);
config_ = *config;
target_bitrate_.SetBitrate(0, 0, config_.startBitrate * 1000);
configured_input_framerate_ = config_.maxFramerate;
pending_keyframe_ = true;
return 0;
}
int32_t FakeEncoder::Encode(const VideoFrame& input_image,
const CodecSpecificInfo* codec_specific_info,
const std::vector<FrameType>* frame_types) {
unsigned char max_framerate;
unsigned char num_simulcast_streams;
SimulcastStream simulcast_streams[kMaxSimulcastStreams];
EncodedImageCallback* callback;
uint32_t target_bitrate_sum_kbps;
int max_target_bitrate_kbps;
size_t num_encoded_bytes;
int framerate;
VideoCodecMode mode;
bool keyframe;
{
rtc::CritScope cs(&crit_sect_);
max_framerate = config_.maxFramerate;
num_simulcast_streams = config_.numberOfSimulcastStreams;
for (int i = 0; i < num_simulcast_streams; ++i) {
simulcast_streams[i] = config_.simulcastStream[i];
}
callback = callback_;
target_bitrate_sum_kbps = target_bitrate_.get_sum_kbps();
max_target_bitrate_kbps = max_target_bitrate_kbps_;
num_encoded_bytes = sizeof(encoded_buffer_);
mode = config_.mode;
if (configured_input_framerate_ > 0) {
framerate = configured_input_framerate_;
} else {
framerate = max_framerate;
}
keyframe = pending_keyframe_;
pending_keyframe_ = false;
}
for (FrameType frame_type : *frame_types) {
if (frame_type == kVideoFrameKey) {
keyframe = true;
break;
}
}
RTC_DCHECK_GT(max_framerate, 0);
size_t bitrate =
std::max(target_bitrate_sum_kbps, simulcast_streams[0].minBitrate);
if (max_target_bitrate_kbps > 0)
bitrate = std::min(bitrate, static_cast<size_t>(max_target_bitrate_kbps));
size_t bits_available = bitrate * 1000 / framerate;
RTC_DCHECK_GT(num_simulcast_streams, 0);
for (unsigned char i = 0; i < num_simulcast_streams; ++i) {
CodecSpecificInfo specifics;
memset(&specifics, 0, sizeof(specifics));
specifics.codecType = kVideoCodecGeneric;
specifics.codecSpecific.generic.simulcast_idx = i;
size_t min_stream_bits = static_cast<size_t>(
(simulcast_streams[i].minBitrate * 1000) / framerate);
size_t max_stream_bits = static_cast<size_t>(
(simulcast_streams[i].maxBitrate * 1000) / framerate);
size_t stream_bits = (bits_available > max_stream_bits) ? max_stream_bits :
bits_available;
size_t stream_bytes = (stream_bits + 7) / 8;
if (keyframe) {
// The first frame is a key frame and should be larger.
// Store the overshoot bytes and distribute them over the coming frames,
// so that we on average meet the bitrate target.
debt_bytes_ += (kKeyframeSizeFactor - 1) * stream_bytes;
stream_bytes *= kKeyframeSizeFactor;
} else {
if (debt_bytes_ > 0) {
// Pay at most half of the frame size for old debts.
size_t payment_size = std::min(stream_bytes / 2, debt_bytes_);
debt_bytes_ -= payment_size;
stream_bytes -= payment_size;
}
}
if (stream_bytes > num_encoded_bytes)
stream_bytes = num_encoded_bytes;
// Always encode something on the first frame.
if (min_stream_bits > bits_available && i > 0)
continue;
std::unique_ptr<uint8_t[]> encoded_buffer(new uint8_t[num_encoded_bytes]);
memcpy(encoded_buffer.get(), encoded_buffer_, num_encoded_bytes);
EncodedImage encoded(encoded_buffer.get(), stream_bytes, num_encoded_bytes);
encoded._timeStamp = input_image.timestamp();
encoded.capture_time_ms_ = input_image.render_time_ms();
encoded._frameType = (*frame_types)[i];
encoded._encodedWidth = simulcast_streams[i].width;
encoded._encodedHeight = simulcast_streams[i].height;
encoded.rotation_ = input_image.rotation();
encoded.content_type_ = (mode == kScreensharing)
? VideoContentType::SCREENSHARE
: VideoContentType::UNSPECIFIED;
specifics.codec_name = ImplementationName();
specifics.codecSpecific.generic.simulcast_idx = i;
RTC_DCHECK(callback);
if (callback->OnEncodedImage(encoded, &specifics, nullptr).error !=
EncodedImageCallback::Result::OK) {
return -1;
}
bits_available -= std::min(encoded._length * 8, bits_available);
}
return 0;
}
int32_t FakeEncoder::RegisterEncodeCompleteCallback(
EncodedImageCallback* callback) {
rtc::CritScope cs(&crit_sect_);
callback_ = callback;
return 0;
}
int32_t FakeEncoder::Release() { return 0; }
int32_t FakeEncoder::SetChannelParameters(uint32_t packet_loss, int64_t rtt) {
return 0;
}
int32_t FakeEncoder::SetRateAllocation(const BitrateAllocation& rate_allocation,
uint32_t framerate) {
rtc::CritScope cs(&crit_sect_);
target_bitrate_ = rate_allocation;
configured_input_framerate_ = framerate;
return 0;
}
const char* FakeEncoder::kImplementationName = "fake_encoder";
const char* FakeEncoder::ImplementationName() const {
return kImplementationName;
}
int FakeEncoder::GetConfiguredInputFramerate() const {
rtc::CritScope cs(&crit_sect_);
return configured_input_framerate_;
}
FakeH264Encoder::FakeH264Encoder(Clock* clock)
: FakeEncoder(clock), callback_(nullptr), idr_counter_(0) {
FakeEncoder::RegisterEncodeCompleteCallback(this);
}
int32_t FakeH264Encoder::RegisterEncodeCompleteCallback(
EncodedImageCallback* callback) {
rtc::CritScope cs(&local_crit_sect_);
callback_ = callback;
return 0;
}
EncodedImageCallback::Result FakeH264Encoder::OnEncodedImage(
const EncodedImage& encoded_image,
const CodecSpecificInfo* codec_specific_info,
const RTPFragmentationHeader* fragments) {
const size_t kSpsSize = 8;
const size_t kPpsSize = 11;
const int kIdrFrequency = 10;
EncodedImageCallback* callback;
int current_idr_counter;
{
rtc::CritScope cs(&local_crit_sect_);
callback = callback_;
current_idr_counter = idr_counter_;
++idr_counter_;
}
RTPFragmentationHeader fragmentation;
if (current_idr_counter % kIdrFrequency == 0 &&
encoded_image._length > kSpsSize + kPpsSize + 1) {
const size_t kNumSlices = 3;
fragmentation.VerifyAndAllocateFragmentationHeader(kNumSlices);
fragmentation.fragmentationOffset[0] = 0;
fragmentation.fragmentationLength[0] = kSpsSize;
fragmentation.fragmentationOffset[1] = kSpsSize;
fragmentation.fragmentationLength[1] = kPpsSize;
fragmentation.fragmentationOffset[2] = kSpsSize + kPpsSize;
fragmentation.fragmentationLength[2] =
encoded_image._length - (kSpsSize + kPpsSize);
const size_t kSpsNalHeader = 0x67;
const size_t kPpsNalHeader = 0x68;
const size_t kIdrNalHeader = 0x65;
encoded_image._buffer[fragmentation.fragmentationOffset[0]] = kSpsNalHeader;
encoded_image._buffer[fragmentation.fragmentationOffset[1]] = kPpsNalHeader;
encoded_image._buffer[fragmentation.fragmentationOffset[2]] = kIdrNalHeader;
} else {
const size_t kNumSlices = 1;
fragmentation.VerifyAndAllocateFragmentationHeader(kNumSlices);
fragmentation.fragmentationOffset[0] = 0;
fragmentation.fragmentationLength[0] = encoded_image._length;
const size_t kNalHeader = 0x41;
encoded_image._buffer[fragmentation.fragmentationOffset[0]] = kNalHeader;
}
uint8_t value = 0;
int fragment_counter = 0;
for (size_t i = 0; i < encoded_image._length; ++i) {
if (fragment_counter == fragmentation.fragmentationVectorSize ||
i != fragmentation.fragmentationOffset[fragment_counter]) {
encoded_image._buffer[i] = value++;
} else {
++fragment_counter;
}
}
CodecSpecificInfo specifics;
memset(&specifics, 0, sizeof(specifics));
specifics.codecType = kVideoCodecH264;
specifics.codecSpecific.H264.packetization_mode =
H264PacketizationMode::NonInterleaved;
RTC_DCHECK(callback);
return callback->OnEncodedImage(encoded_image, &specifics, &fragmentation);
}
DelayedEncoder::DelayedEncoder(Clock* clock, int delay_ms)
: test::FakeEncoder(clock), delay_ms_(delay_ms) {
// The encoder could be created on a different thread than
// it is being used on.
sequence_checker_.Detach();
}
void DelayedEncoder::SetDelay(int delay_ms) {
RTC_DCHECK_CALLED_SEQUENTIALLY(&sequence_checker_);
delay_ms_ = delay_ms;
}
int32_t DelayedEncoder::Encode(const VideoFrame& input_image,
const CodecSpecificInfo* codec_specific_info,
const std::vector<FrameType>* frame_types) {
RTC_DCHECK_CALLED_SEQUENTIALLY(&sequence_checker_);
SleepMs(delay_ms_);
return FakeEncoder::Encode(input_image, codec_specific_info, frame_types);
}
MultithreadedFakeH264Encoder::MultithreadedFakeH264Encoder(Clock* clock)
: test::FakeH264Encoder(clock),
current_queue_(0),
queue1_(nullptr),
queue2_(nullptr) {
// The encoder could be created on a different thread than
// it is being used on.
sequence_checker_.Detach();
}
int32_t MultithreadedFakeH264Encoder::InitEncode(const VideoCodec* config,
int32_t number_of_cores,
size_t max_payload_size) {
RTC_DCHECK_CALLED_SEQUENTIALLY(&sequence_checker_);
queue1_.reset(new rtc::TaskQueue("Queue 1"));
queue2_.reset(new rtc::TaskQueue("Queue 2"));
return FakeH264Encoder::InitEncode(config, number_of_cores, max_payload_size);
}
class MultithreadedFakeH264Encoder::EncodeTask : public rtc::QueuedTask {
public:
EncodeTask(MultithreadedFakeH264Encoder* encoder,
const VideoFrame& input_image,
const CodecSpecificInfo* codec_specific_info,
const std::vector<FrameType>* frame_types)
: encoder_(encoder),
input_image_(input_image),
codec_specific_info_(),
frame_types_(*frame_types) {
if (codec_specific_info)
codec_specific_info_ = *codec_specific_info;
}
private:
bool Run() override {
encoder_->EncodeCallback(input_image_, &codec_specific_info_,
&frame_types_);
return true;
}
MultithreadedFakeH264Encoder* const encoder_;
VideoFrame input_image_;
CodecSpecificInfo codec_specific_info_;
std::vector<FrameType> frame_types_;
};
int32_t MultithreadedFakeH264Encoder::Encode(
const VideoFrame& input_image,
const CodecSpecificInfo* codec_specific_info,
const std::vector<FrameType>* frame_types) {
RTC_DCHECK_CALLED_SEQUENTIALLY(&sequence_checker_);
std::unique_ptr<rtc::TaskQueue>& queue =
(current_queue_++ % 2 == 0) ? queue1_ : queue2_;
if (!queue) {
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
queue->PostTask(std::unique_ptr<rtc::QueuedTask>(
new EncodeTask(this, input_image, codec_specific_info, frame_types)));
return WEBRTC_VIDEO_CODEC_OK;
}
int32_t MultithreadedFakeH264Encoder::EncodeCallback(
const VideoFrame& input_image,
const CodecSpecificInfo* codec_specific_info,
const std::vector<FrameType>* frame_types) {
return FakeH264Encoder::Encode(input_image, codec_specific_info, frame_types);
}
int32_t MultithreadedFakeH264Encoder::Release() {
RTC_DCHECK_CALLED_SEQUENTIALLY(&sequence_checker_);
queue1_.reset();
queue2_.reset();
return FakeH264Encoder::Release();
}
} // namespace test
} // namespace webrtc

137
test/fake_encoder.h Normal file
View File

@ -0,0 +1,137 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_TEST_FAKE_ENCODER_H_
#define WEBRTC_TEST_FAKE_ENCODER_H_
#include <vector>
#include <memory>
#include "webrtc/api/video_codecs/video_encoder.h"
#include "webrtc/common_types.h"
#include "webrtc/rtc_base/criticalsection.h"
#include "webrtc/rtc_base/sequenced_task_checker.h"
#include "webrtc/rtc_base/task_queue.h"
#include "webrtc/system_wrappers/include/clock.h"
namespace webrtc {
namespace test {
class FakeEncoder : public VideoEncoder {
public:
explicit FakeEncoder(Clock* clock);
virtual ~FakeEncoder() = default;
// Sets max bitrate. Not thread-safe, call before registering the encoder.
void SetMaxBitrate(int max_kbps);
int32_t InitEncode(const VideoCodec* config,
int32_t number_of_cores,
size_t max_payload_size) override;
int32_t Encode(const VideoFrame& input_image,
const CodecSpecificInfo* codec_specific_info,
const std::vector<FrameType>* frame_types) override;
int32_t RegisterEncodeCompleteCallback(
EncodedImageCallback* callback) override;
int32_t Release() override;
int32_t SetChannelParameters(uint32_t packet_loss, int64_t rtt) override;
int32_t SetRateAllocation(const BitrateAllocation& rate_allocation,
uint32_t framerate) override;
const char* ImplementationName() const override;
int GetConfiguredInputFramerate() const;
static const char* kImplementationName;
protected:
Clock* const clock_;
VideoCodec config_ RTC_GUARDED_BY(crit_sect_);
EncodedImageCallback* callback_ RTC_GUARDED_BY(crit_sect_);
BitrateAllocation target_bitrate_ RTC_GUARDED_BY(crit_sect_);
int configured_input_framerate_ RTC_GUARDED_BY(crit_sect_);
int max_target_bitrate_kbps_ RTC_GUARDED_BY(crit_sect_);
bool pending_keyframe_ RTC_GUARDED_BY(crit_sect_);
rtc::CriticalSection crit_sect_;
uint8_t encoded_buffer_[100000];
// Current byte debt to be payed over a number of frames.
// The debt is acquired by keyframes overshooting the bitrate target.
size_t debt_bytes_;
};
class FakeH264Encoder : public FakeEncoder, public EncodedImageCallback {
public:
explicit FakeH264Encoder(Clock* clock);
virtual ~FakeH264Encoder() = default;
int32_t RegisterEncodeCompleteCallback(
EncodedImageCallback* callback) override;
Result OnEncodedImage(const EncodedImage& encodedImage,
const CodecSpecificInfo* codecSpecificInfo,
const RTPFragmentationHeader* fragments) override;
private:
EncodedImageCallback* callback_ RTC_GUARDED_BY(local_crit_sect_);
int idr_counter_ RTC_GUARDED_BY(local_crit_sect_);
rtc::CriticalSection local_crit_sect_;
};
class DelayedEncoder : public test::FakeEncoder {
public:
DelayedEncoder(Clock* clock, int delay_ms);
virtual ~DelayedEncoder() = default;
void SetDelay(int delay_ms);
int32_t Encode(const VideoFrame& input_image,
const CodecSpecificInfo* codec_specific_info,
const std::vector<FrameType>* frame_types) override;
private:
int delay_ms_ RTC_ACCESS_ON(sequence_checker_);
rtc::SequencedTaskChecker sequence_checker_;
};
// This class implements a multi-threaded fake encoder by posting
// FakeH264Encoder::Encode(.) tasks to |queue1_| and |queue2_|, in an
// alternating fashion. The class itself does not need to be thread safe,
// as it is called from the task queue in VideoStreamEncoder.
class MultithreadedFakeH264Encoder : public test::FakeH264Encoder {
public:
explicit MultithreadedFakeH264Encoder(Clock* clock);
virtual ~MultithreadedFakeH264Encoder() = default;
int32_t InitEncode(const VideoCodec* config,
int32_t number_of_cores,
size_t max_payload_size) override;
int32_t Encode(const VideoFrame& input_image,
const CodecSpecificInfo* codec_specific_info,
const std::vector<FrameType>* frame_types) override;
int32_t EncodeCallback(const VideoFrame& input_image,
const CodecSpecificInfo* codec_specific_info,
const std::vector<FrameType>* frame_types);
int32_t Release() override;
protected:
class EncodeTask;
int current_queue_ RTC_ACCESS_ON(sequence_checker_);
std::unique_ptr<rtc::TaskQueue> queue1_ RTC_ACCESS_ON(sequence_checker_);
std::unique_ptr<rtc::TaskQueue> queue2_ RTC_ACCESS_ON(sequence_checker_);
rtc::SequencedTaskChecker sequence_checker_;
};
} // namespace test
} // namespace webrtc
#endif // WEBRTC_TEST_FAKE_ENCODER_H_

255
test/fake_network_pipe.cc Normal file
View File

@ -0,0 +1,255 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/test/fake_network_pipe.h"
#include <assert.h>
#include <math.h>
#include <string.h>
#include <algorithm>
#include <cmath>
#include "webrtc/call/call.h"
#include "webrtc/modules/rtp_rtcp/include/rtp_header_parser.h"
#include "webrtc/rtc_base/logging.h"
#include "webrtc/system_wrappers/include/clock.h"
namespace webrtc {
namespace {
constexpr int64_t kDefaultProcessIntervalMs = 5;
}
DemuxerImpl::DemuxerImpl(const std::map<uint8_t, MediaType>& payload_type_map)
: packet_receiver_(nullptr), payload_type_map_(payload_type_map) {}
void DemuxerImpl::SetReceiver(PacketReceiver* receiver) {
packet_receiver_ = receiver;
}
void DemuxerImpl::DeliverPacket(const NetworkPacket* packet,
const PacketTime& packet_time) {
// No packet receiver means that this demuxer will terminate the flow of
// packets.
if (!packet_receiver_)
return;
const uint8_t* const packet_data = packet->data();
const size_t packet_length = packet->data_length();
MediaType media_type = MediaType::ANY;
if (!RtpHeaderParser::IsRtcp(packet_data, packet_length)) {
RTC_CHECK_GE(packet_length, 2);
const uint8_t payload_type = packet_data[1] & 0x7f;
std::map<uint8_t, MediaType>::const_iterator it =
payload_type_map_.find(payload_type);
RTC_CHECK(it != payload_type_map_.end())
<< "payload type " << static_cast<int>(payload_type) << " unknown.";
media_type = it->second;
}
packet_receiver_->DeliverPacket(media_type, packet_data, packet_length,
packet_time);
}
FakeNetworkPipe::FakeNetworkPipe(Clock* clock,
const FakeNetworkPipe::Config& config,
std::unique_ptr<Demuxer> demuxer)
: FakeNetworkPipe(clock, config, std::move(demuxer), 1) {}
FakeNetworkPipe::FakeNetworkPipe(Clock* clock,
const FakeNetworkPipe::Config& config,
std::unique_ptr<Demuxer> demuxer,
uint64_t seed)
: clock_(clock),
demuxer_(std::move(demuxer)),
random_(seed),
config_(),
dropped_packets_(0),
sent_packets_(0),
total_packet_delay_(0),
bursting_(false),
next_process_time_(clock_->TimeInMilliseconds()),
last_log_time_(clock_->TimeInMilliseconds()) {
SetConfig(config);
}
FakeNetworkPipe::~FakeNetworkPipe() {
while (!capacity_link_.empty()) {
delete capacity_link_.front();
capacity_link_.pop();
}
while (!delay_link_.empty()) {
delete *delay_link_.begin();
delay_link_.erase(delay_link_.begin());
}
}
void FakeNetworkPipe::SetReceiver(PacketReceiver* receiver) {
RTC_CHECK(demuxer_);
demuxer_->SetReceiver(receiver);
}
void FakeNetworkPipe::SetConfig(const FakeNetworkPipe::Config& config) {
rtc::CritScope crit(&lock_);
config_ = config; // Shallow copy of the struct.
double prob_loss = config.loss_percent / 100.0;
if (config_.avg_burst_loss_length == -1) {
// Uniform loss
prob_loss_bursting_ = prob_loss;
prob_start_bursting_ = prob_loss;
} else {
// Lose packets according to a gilbert-elliot model.
int avg_burst_loss_length = config.avg_burst_loss_length;
int min_avg_burst_loss_length = std::ceil(prob_loss / (1 - prob_loss));
RTC_CHECK_GT(avg_burst_loss_length, min_avg_burst_loss_length)
<< "For a total packet loss of " << config.loss_percent << "%% then"
<< " avg_burst_loss_length must be " << min_avg_burst_loss_length + 1
<< " or higher.";
prob_loss_bursting_ = (1.0 - 1.0 / avg_burst_loss_length);
prob_start_bursting_ = prob_loss / (1 - prob_loss) / avg_burst_loss_length;
}
}
void FakeNetworkPipe::SendPacket(const uint8_t* data, size_t data_length) {
RTC_CHECK(demuxer_);
rtc::CritScope crit(&lock_);
if (config_.queue_length_packets > 0 &&
capacity_link_.size() >= config_.queue_length_packets) {
// Too many packet on the link, drop this one.
++dropped_packets_;
return;
}
int64_t time_now = clock_->TimeInMilliseconds();
// Delay introduced by the link capacity.
int64_t capacity_delay_ms = 0;
if (config_.link_capacity_kbps > 0) {
const int bytes_per_millisecond = config_.link_capacity_kbps / 8;
// To round to the closest millisecond we add half a milliseconds worth of
// bytes to the delay calculation.
capacity_delay_ms = (data_length + capacity_delay_error_bytes_ +
bytes_per_millisecond / 2) /
bytes_per_millisecond;
capacity_delay_error_bytes_ +=
data_length - capacity_delay_ms * bytes_per_millisecond;
}
int64_t network_start_time = time_now;
// Check if there already are packets on the link and change network start
// time forward if there is.
if (!capacity_link_.empty() &&
network_start_time < capacity_link_.back()->arrival_time())
network_start_time = capacity_link_.back()->arrival_time();
int64_t arrival_time = network_start_time + capacity_delay_ms;
NetworkPacket* packet = new NetworkPacket(data, data_length, time_now,
arrival_time);
capacity_link_.push(packet);
}
float FakeNetworkPipe::PercentageLoss() {
rtc::CritScope crit(&lock_);
if (sent_packets_ == 0)
return 0;
return static_cast<float>(dropped_packets_) /
(sent_packets_ + dropped_packets_);
}
int FakeNetworkPipe::AverageDelay() {
rtc::CritScope crit(&lock_);
if (sent_packets_ == 0)
return 0;
return static_cast<int>(total_packet_delay_ /
static_cast<int64_t>(sent_packets_));
}
void FakeNetworkPipe::Process() {
int64_t time_now = clock_->TimeInMilliseconds();
std::queue<NetworkPacket*> packets_to_deliver;
{
rtc::CritScope crit(&lock_);
if (time_now - last_log_time_ > 5000) {
int64_t queueing_delay_ms = 0;
if (!capacity_link_.empty()) {
queueing_delay_ms = time_now - capacity_link_.front()->send_time();
}
LOG(LS_INFO) << "Network queue: " << queueing_delay_ms << " ms.";
last_log_time_ = time_now;
}
// Check the capacity link first.
while (!capacity_link_.empty() &&
time_now >= capacity_link_.front()->arrival_time()) {
// Time to get this packet.
NetworkPacket* packet = capacity_link_.front();
capacity_link_.pop();
// Drop packets at an average rate of |config_.loss_percent| with
// and average loss burst length of |config_.avg_burst_loss_length|.
if ((bursting_ && random_.Rand<double>() < prob_loss_bursting_) ||
(!bursting_ && random_.Rand<double>() < prob_start_bursting_)) {
bursting_ = true;
delete packet;
continue;
} else {
bursting_ = false;
}
int arrival_time_jitter = random_.Gaussian(
config_.queue_delay_ms, config_.delay_standard_deviation_ms);
// If reordering is not allowed then adjust arrival_time_jitter
// to make sure all packets are sent in order.
if (!config_.allow_reordering && !delay_link_.empty() &&
packet->arrival_time() + arrival_time_jitter <
(*delay_link_.rbegin())->arrival_time()) {
arrival_time_jitter =
(*delay_link_.rbegin())->arrival_time() - packet->arrival_time();
}
packet->IncrementArrivalTime(arrival_time_jitter);
delay_link_.insert(packet);
}
// Check the extra delay queue.
while (!delay_link_.empty() &&
time_now >= (*delay_link_.begin())->arrival_time()) {
// Deliver this packet.
NetworkPacket* packet = *delay_link_.begin();
packets_to_deliver.push(packet);
delay_link_.erase(delay_link_.begin());
// |time_now| might be later than when the packet should have arrived, due
// to NetworkProcess being called too late. For stats, use the time it
// should have been on the link.
total_packet_delay_ += packet->arrival_time() - packet->send_time();
}
sent_packets_ += packets_to_deliver.size();
}
while (!packets_to_deliver.empty()) {
NetworkPacket* packet = packets_to_deliver.front();
packets_to_deliver.pop();
demuxer_->DeliverPacket(packet, PacketTime());
delete packet;
}
next_process_time_ = !delay_link_.empty()
? (*delay_link_.begin())->arrival_time()
: time_now + kDefaultProcessIntervalMs;
}
int64_t FakeNetworkPipe::TimeUntilNextProcess() const {
rtc::CritScope crit(&lock_);
return std::max<int64_t>(next_process_time_ - clock_->TimeInMilliseconds(),
0);
}
} // namespace webrtc

186
test/fake_network_pipe.h Normal file
View File

@ -0,0 +1,186 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_TEST_FAKE_NETWORK_PIPE_H_
#define WEBRTC_TEST_FAKE_NETWORK_PIPE_H_
#include <string.h>
#include <map>
#include <memory>
#include <queue>
#include <set>
#include "webrtc/common_types.h"
#include "webrtc/rtc_base/constructormagic.h"
#include "webrtc/rtc_base/criticalsection.h"
#include "webrtc/rtc_base/random.h"
#include "webrtc/typedefs.h"
namespace webrtc {
class Clock;
class PacketReceiver;
enum class MediaType;
class NetworkPacket {
public:
NetworkPacket(const uint8_t* data,
size_t length,
int64_t send_time,
int64_t arrival_time)
: data_(new uint8_t[length]),
data_length_(length),
send_time_(send_time),
arrival_time_(arrival_time) {
memcpy(data_.get(), data, length);
}
uint8_t* data() const { return data_.get(); }
size_t data_length() const { return data_length_; }
int64_t send_time() const { return send_time_; }
int64_t arrival_time() const { return arrival_time_; }
void IncrementArrivalTime(int64_t extra_delay) {
arrival_time_ += extra_delay;
}
private:
// The packet data.
std::unique_ptr<uint8_t[]> data_;
// Length of data_.
size_t data_length_;
// The time the packet was sent out on the network.
const int64_t send_time_;
// The time the packet should arrive at the receiver.
int64_t arrival_time_;
};
class Demuxer {
public:
virtual ~Demuxer() = default;
virtual void SetReceiver(PacketReceiver* receiver) = 0;
virtual void DeliverPacket(const NetworkPacket* packet,
const PacketTime& packet_time) = 0;
};
class DemuxerImpl final : public Demuxer {
public:
explicit DemuxerImpl(const std::map<uint8_t, MediaType>& payload_type_map);
void SetReceiver(PacketReceiver* receiver) override;
void DeliverPacket(const NetworkPacket* packet,
const PacketTime& packet_time) override;
private:
PacketReceiver* packet_receiver_;
const std::map<uint8_t, MediaType> payload_type_map_;
RTC_DISALLOW_COPY_AND_ASSIGN(DemuxerImpl);
};
// Class faking a network link. This is a simple and naive solution just faking
// capacity and adding an extra transport delay in addition to the capacity
// introduced delay.
class FakeNetworkPipe {
public:
struct Config {
Config() {}
// Queue length in number of packets.
size_t queue_length_packets = 0;
// Delay in addition to capacity induced delay.
int queue_delay_ms = 0;
// Standard deviation of the extra delay.
int delay_standard_deviation_ms = 0;
// Link capacity in kbps.
int link_capacity_kbps = 0;
// Random packet loss.
int loss_percent = 0;
// If packets are allowed to be reordered.
bool allow_reordering = false;
// The average length of a burst of lost packets.
int avg_burst_loss_length = -1;
};
FakeNetworkPipe(Clock* clock,
const FakeNetworkPipe::Config& config,
std::unique_ptr<Demuxer> demuxer);
FakeNetworkPipe(Clock* clock,
const FakeNetworkPipe::Config& config,
std::unique_ptr<Demuxer> demuxer,
uint64_t seed);
~FakeNetworkPipe();
// Sets a new configuration. This won't affect packets already in the pipe.
void SetConfig(const FakeNetworkPipe::Config& config);
// Sends a new packet to the link.
void SendPacket(const uint8_t* packet, size_t packet_length);
// Must not be called in parallel with SendPacket or Process.
void SetReceiver(PacketReceiver* receiver);
// Processes the network queues and trigger PacketReceiver::IncomingPacket for
// packets ready to be delivered.
void Process();
int64_t TimeUntilNextProcess() const;
// Get statistics.
float PercentageLoss();
int AverageDelay();
size_t dropped_packets() { return dropped_packets_; }
size_t sent_packets() { return sent_packets_; }
private:
Clock* const clock_;
rtc::CriticalSection lock_;
const std::unique_ptr<Demuxer> demuxer_;
std::queue<NetworkPacket*> capacity_link_;
Random random_;
// Since we need to access both the packet with the earliest and latest
// arrival time we need to use a multiset to keep all packets sorted,
// hence, we cannot use a priority queue.
struct PacketArrivalTimeComparator {
bool operator()(const NetworkPacket* p1, const NetworkPacket* p2) {
return p1->arrival_time() < p2->arrival_time();
}
};
std::multiset<NetworkPacket*, PacketArrivalTimeComparator> delay_link_;
// Link configuration.
Config config_;
// Statistics.
size_t dropped_packets_;
size_t sent_packets_;
int64_t total_packet_delay_;
// Are we currently dropping a burst of packets?
bool bursting_;
// The probability to drop the packet if we are currently dropping a
// burst of packet
double prob_loss_bursting_;
// The probability to drop a burst of packets.
double prob_start_bursting_;
int64_t next_process_time_;
int64_t last_log_time_;
int64_t capacity_delay_error_bytes_ = 0;
RTC_DISALLOW_COPY_AND_ASSIGN(FakeNetworkPipe);
};
} // namespace webrtc
#endif // WEBRTC_TEST_FAKE_NETWORK_PIPE_H_

View File

@ -0,0 +1,445 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <memory>
#include "webrtc/call/call.h"
#include "webrtc/modules/rtp_rtcp/include/rtp_header_parser.h"
#include "webrtc/system_wrappers/include/clock.h"
#include "webrtc/test/fake_network_pipe.h"
#include "webrtc/test/gmock.h"
#include "webrtc/test/gtest.h"
using ::testing::_;
using ::testing::AnyNumber;
using ::testing::Return;
using ::testing::Invoke;
namespace webrtc {
class TestDemuxer : public Demuxer {
public:
void IncomingPacket(NetworkPacket* packet) {
DeliverPacket(packet, PacketTime());
}
MOCK_METHOD1(SetReceiver, void(PacketReceiver* receiver));
MOCK_METHOD2(DeliverPacket,
void(const NetworkPacket* packet,
const PacketTime& packet_time));
};
class ReorderTestDemuxer : public TestDemuxer {
public:
void DeliverPacket(const NetworkPacket* packet,
const PacketTime& packet_time) override {
RTC_DCHECK_GE(packet->data_length(), sizeof(int));
int seq_num;
memcpy(&seq_num, packet->data(), sizeof(int));
delivered_sequence_numbers_.push_back(seq_num);
}
std::vector<int> delivered_sequence_numbers_;
};
class MockReceiver : public PacketReceiver {
public:
MOCK_METHOD4(
DeliverPacket,
DeliveryStatus(MediaType, const uint8_t*, size_t, const PacketTime&));
};
class FakeNetworkPipeTest : public ::testing::Test {
public:
FakeNetworkPipeTest() : fake_clock_(12345) {}
protected:
void SendPackets(FakeNetworkPipe* pipe, int number_packets, int packet_size) {
RTC_DCHECK_GE(packet_size, sizeof(int));
std::unique_ptr<uint8_t[]> packet(new uint8_t[packet_size]);
for (int i = 0; i < number_packets; ++i) {
// Set a sequence number for the packets by
// using the first bytes in the packet.
memcpy(packet.get(), &i, sizeof(int));
pipe->SendPacket(packet.get(), packet_size);
}
}
int PacketTimeMs(int capacity_kbps, int packet_size) const {
return 8 * packet_size / capacity_kbps;
}
SimulatedClock fake_clock_;
};
// Test the capacity link and verify we get as many packets as we expect.
TEST_F(FakeNetworkPipeTest, CapacityTest) {
FakeNetworkPipe::Config config;
config.queue_length_packets = 20;
config.link_capacity_kbps = 80;
TestDemuxer* demuxer = new TestDemuxer();
std::unique_ptr<FakeNetworkPipe> pipe(new FakeNetworkPipe(
&fake_clock_, config, std::unique_ptr<Demuxer>(demuxer)));
// Add 10 packets of 1000 bytes, = 80 kb, and verify it takes one second to
// get through the pipe.
const int kNumPackets = 10;
const int kPacketSize = 1000;
SendPackets(pipe.get(), kNumPackets, kPacketSize);
// Time to get one packet through the link.
const int kPacketTimeMs = PacketTimeMs(config.link_capacity_kbps,
kPacketSize);
// Time haven't increased yet, so we souldn't get any packets.
EXPECT_CALL(*demuxer, DeliverPacket(_, _)).Times(0);
pipe->Process();
// Advance enough time to release one packet.
fake_clock_.AdvanceTimeMilliseconds(kPacketTimeMs);
EXPECT_CALL(*demuxer, DeliverPacket(_, _)).Times(1);
pipe->Process();
// Release all but one packet
fake_clock_.AdvanceTimeMilliseconds(9 * kPacketTimeMs - 1);
EXPECT_CALL(*demuxer, DeliverPacket(_, _)).Times(8);
pipe->Process();
// And the last one.
fake_clock_.AdvanceTimeMilliseconds(1);
EXPECT_CALL(*demuxer, DeliverPacket(_, _)).Times(1);
pipe->Process();
}
// Test the extra network delay.
TEST_F(FakeNetworkPipeTest, ExtraDelayTest) {
FakeNetworkPipe::Config config;
config.queue_length_packets = 20;
config.queue_delay_ms = 100;
config.link_capacity_kbps = 80;
TestDemuxer* demuxer = new TestDemuxer();
std::unique_ptr<FakeNetworkPipe> pipe(new FakeNetworkPipe(
&fake_clock_, config, std::unique_ptr<Demuxer>(demuxer)));
const int kNumPackets = 2;
const int kPacketSize = 1000;
SendPackets(pipe.get(), kNumPackets, kPacketSize);
// Time to get one packet through the link.
const int kPacketTimeMs = PacketTimeMs(config.link_capacity_kbps,
kPacketSize);
// Increase more than kPacketTimeMs, but not more than the extra delay.
fake_clock_.AdvanceTimeMilliseconds(kPacketTimeMs);
EXPECT_CALL(*demuxer, DeliverPacket(_, _)).Times(0);
pipe->Process();
// Advance the network delay to get the first packet.
fake_clock_.AdvanceTimeMilliseconds(config.queue_delay_ms);
EXPECT_CALL(*demuxer, DeliverPacket(_, _)).Times(1);
pipe->Process();
// Advance one more kPacketTimeMs to get the last packet.
fake_clock_.AdvanceTimeMilliseconds(kPacketTimeMs);
EXPECT_CALL(*demuxer, DeliverPacket(_, _)).Times(1);
pipe->Process();
}
// Test the number of buffers and packets are dropped when sending too many
// packets too quickly.
TEST_F(FakeNetworkPipeTest, QueueLengthTest) {
FakeNetworkPipe::Config config;
config.queue_length_packets = 2;
config.link_capacity_kbps = 80;
TestDemuxer* demuxer = new TestDemuxer();
std::unique_ptr<FakeNetworkPipe> pipe(new FakeNetworkPipe(
&fake_clock_, config, std::unique_ptr<Demuxer>(demuxer)));
const int kPacketSize = 1000;
const int kPacketTimeMs = PacketTimeMs(config.link_capacity_kbps,
kPacketSize);
// Send three packets and verify only 2 are delivered.
SendPackets(pipe.get(), 3, kPacketSize);
// Increase time enough to deliver all three packets, verify only two are
// delivered.
fake_clock_.AdvanceTimeMilliseconds(3 * kPacketTimeMs);
EXPECT_CALL(*demuxer, DeliverPacket(_, _)).Times(2);
pipe->Process();
}
// Test we get statistics as expected.
TEST_F(FakeNetworkPipeTest, StatisticsTest) {
FakeNetworkPipe::Config config;
config.queue_length_packets = 2;
config.queue_delay_ms = 20;
config.link_capacity_kbps = 80;
TestDemuxer* demuxer = new TestDemuxer();
std::unique_ptr<FakeNetworkPipe> pipe(new FakeNetworkPipe(
&fake_clock_, config, std::unique_ptr<Demuxer>(demuxer)));
const int kPacketSize = 1000;
const int kPacketTimeMs = PacketTimeMs(config.link_capacity_kbps,
kPacketSize);
// Send three packets and verify only 2 are delivered.
SendPackets(pipe.get(), 3, kPacketSize);
fake_clock_.AdvanceTimeMilliseconds(3 * kPacketTimeMs +
config.queue_delay_ms);
EXPECT_CALL(*demuxer, DeliverPacket(_, _)).Times(2);
pipe->Process();
// Packet 1: kPacketTimeMs + config.queue_delay_ms,
// packet 2: 2 * kPacketTimeMs + config.queue_delay_ms => 170 ms average.
EXPECT_EQ(pipe->AverageDelay(), 170);
EXPECT_EQ(pipe->sent_packets(), 2u);
EXPECT_EQ(pipe->dropped_packets(), 1u);
EXPECT_EQ(pipe->PercentageLoss(), 1/3.f);
}
// Change the link capacity half-way through the test and verify that the
// delivery times change accordingly.
TEST_F(FakeNetworkPipeTest, ChangingCapacityWithEmptyPipeTest) {
FakeNetworkPipe::Config config;
config.queue_length_packets = 20;
config.link_capacity_kbps = 80;
TestDemuxer* demuxer = new TestDemuxer();
std::unique_ptr<FakeNetworkPipe> pipe(new FakeNetworkPipe(
&fake_clock_, config, std::unique_ptr<Demuxer>(demuxer)));
// Add 10 packets of 1000 bytes, = 80 kb, and verify it takes one second to
// get through the pipe.
const int kNumPackets = 10;
const int kPacketSize = 1000;
SendPackets(pipe.get(), kNumPackets, kPacketSize);
// Time to get one packet through the link.
int packet_time_ms = PacketTimeMs(config.link_capacity_kbps, kPacketSize);
// Time hasn't increased yet, so we souldn't get any packets.
EXPECT_CALL(*demuxer, DeliverPacket(_, _)).Times(0);
pipe->Process();
// Advance time in steps to release one packet at a time.
for (int i = 0; i < kNumPackets; ++i) {
fake_clock_.AdvanceTimeMilliseconds(packet_time_ms);
EXPECT_CALL(*demuxer, DeliverPacket(_, _)).Times(1);
pipe->Process();
}
// Change the capacity.
config.link_capacity_kbps /= 2; // Reduce to 50%.
pipe->SetConfig(config);
// Add another 10 packets of 1000 bytes, = 80 kb, and verify it takes two
// seconds to get them through the pipe.
SendPackets(pipe.get(), kNumPackets, kPacketSize);
// Time to get one packet through the link.
packet_time_ms = PacketTimeMs(config.link_capacity_kbps, kPacketSize);
// Time hasn't increased yet, so we souldn't get any packets.
EXPECT_CALL(*demuxer, DeliverPacket(_, _)).Times(0);
pipe->Process();
// Advance time in steps to release one packet at a time.
for (int i = 0; i < kNumPackets; ++i) {
fake_clock_.AdvanceTimeMilliseconds(packet_time_ms);
EXPECT_CALL(*demuxer, DeliverPacket(_, _)).Times(1);
pipe->Process();
}
// Check that all the packets were sent.
EXPECT_EQ(static_cast<size_t>(2 * kNumPackets), pipe->sent_packets());
fake_clock_.AdvanceTimeMilliseconds(pipe->TimeUntilNextProcess());
EXPECT_CALL(*demuxer, DeliverPacket(_, _)).Times(0);
pipe->Process();
}
// Change the link capacity half-way through the test and verify that the
// delivery times change accordingly.
TEST_F(FakeNetworkPipeTest, ChangingCapacityWithPacketsInPipeTest) {
FakeNetworkPipe::Config config;
config.queue_length_packets = 20;
config.link_capacity_kbps = 80;
TestDemuxer* demuxer = new TestDemuxer();
std::unique_ptr<FakeNetworkPipe> pipe(new FakeNetworkPipe(
&fake_clock_, config, std::unique_ptr<Demuxer>(demuxer)));
// Add 10 packets of 1000 bytes, = 80 kb.
const int kNumPackets = 10;
const int kPacketSize = 1000;
SendPackets(pipe.get(), kNumPackets, kPacketSize);
// Time to get one packet through the link at the initial speed.
int packet_time_1_ms = PacketTimeMs(config.link_capacity_kbps, kPacketSize);
// Change the capacity.
config.link_capacity_kbps *= 2; // Double the capacity.
pipe->SetConfig(config);
// Add another 10 packets of 1000 bytes, = 80 kb, and verify it takes two
// seconds to get them through the pipe.
SendPackets(pipe.get(), kNumPackets, kPacketSize);
// Time to get one packet through the link at the new capacity.
int packet_time_2_ms = PacketTimeMs(config.link_capacity_kbps, kPacketSize);
// Time hasn't increased yet, so we souldn't get any packets.
EXPECT_CALL(*demuxer, DeliverPacket(_, _)).Times(0);
pipe->Process();
// Advance time in steps to release one packet at a time.
for (int i = 0; i < kNumPackets; ++i) {
fake_clock_.AdvanceTimeMilliseconds(packet_time_1_ms);
EXPECT_CALL(*demuxer, DeliverPacket(_, _)).Times(1);
pipe->Process();
}
// Advance time in steps to release one packet at a time.
for (int i = 0; i < kNumPackets; ++i) {
fake_clock_.AdvanceTimeMilliseconds(packet_time_2_ms);
EXPECT_CALL(*demuxer, DeliverPacket(_, _)).Times(1);
pipe->Process();
}
// Check that all the packets were sent.
EXPECT_EQ(static_cast<size_t>(2 * kNumPackets), pipe->sent_packets());
fake_clock_.AdvanceTimeMilliseconds(pipe->TimeUntilNextProcess());
EXPECT_CALL(*demuxer, DeliverPacket(_, _)).Times(0);
pipe->Process();
}
// At first disallow reordering and then allow reordering.
TEST_F(FakeNetworkPipeTest, DisallowReorderingThenAllowReordering) {
FakeNetworkPipe::Config config;
config.queue_length_packets = 1000;
config.link_capacity_kbps = 800;
config.queue_delay_ms = 100;
config.delay_standard_deviation_ms = 10;
ReorderTestDemuxer* demuxer = new ReorderTestDemuxer();
std::unique_ptr<FakeNetworkPipe> pipe(new FakeNetworkPipe(
&fake_clock_, config, std::unique_ptr<Demuxer>(demuxer)));
const uint32_t kNumPackets = 100;
const int kPacketSize = 10;
SendPackets(pipe.get(), kNumPackets, kPacketSize);
fake_clock_.AdvanceTimeMilliseconds(1000);
pipe->Process();
// Confirm that all packets have been delivered in order.
EXPECT_EQ(kNumPackets, demuxer->delivered_sequence_numbers_.size());
int last_seq_num = -1;
for (int seq_num : demuxer->delivered_sequence_numbers_) {
EXPECT_GT(seq_num, last_seq_num);
last_seq_num = seq_num;
}
config.allow_reordering = true;
pipe->SetConfig(config);
SendPackets(pipe.get(), kNumPackets, kPacketSize);
fake_clock_.AdvanceTimeMilliseconds(1000);
demuxer->delivered_sequence_numbers_.clear();
pipe->Process();
// Confirm that all packets have been delivered
// and that reordering has occured.
EXPECT_EQ(kNumPackets, demuxer->delivered_sequence_numbers_.size());
bool reordering_has_occured = false;
last_seq_num = -1;
for (int seq_num : demuxer->delivered_sequence_numbers_) {
if (last_seq_num > seq_num) {
reordering_has_occured = true;
break;
}
last_seq_num = seq_num;
}
EXPECT_TRUE(reordering_has_occured);
}
TEST_F(FakeNetworkPipeTest, BurstLoss) {
const int kLossPercent = 5;
const int kAvgBurstLength = 3;
const int kNumPackets = 10000;
const int kPacketSize = 10;
FakeNetworkPipe::Config config;
config.queue_length_packets = kNumPackets;
config.loss_percent = kLossPercent;
config.avg_burst_loss_length = kAvgBurstLength;
ReorderTestDemuxer* demuxer = new ReorderTestDemuxer();
std::unique_ptr<FakeNetworkPipe> pipe(new FakeNetworkPipe(
&fake_clock_, config, std::unique_ptr<Demuxer>(demuxer)));
SendPackets(pipe.get(), kNumPackets, kPacketSize);
fake_clock_.AdvanceTimeMilliseconds(1000);
pipe->Process();
// Check that the average loss is |kLossPercent| percent.
int lost_packets = kNumPackets - demuxer->delivered_sequence_numbers_.size();
double loss_fraction = lost_packets / static_cast<double>(kNumPackets);
EXPECT_NEAR(kLossPercent / 100.0, loss_fraction, 0.05);
// Find the number of bursts that has occurred.
size_t received_packets = demuxer->delivered_sequence_numbers_.size();
int num_bursts = 0;
for (size_t i = 0; i < received_packets - 1; ++i) {
int diff = demuxer->delivered_sequence_numbers_[i + 1] -
demuxer->delivered_sequence_numbers_[i];
if (diff > 1)
++num_bursts;
}
double average_burst_length = static_cast<double>(lost_packets) / num_bursts;
EXPECT_NEAR(kAvgBurstLength, average_burst_length, 0.3);
}
TEST_F(FakeNetworkPipeTest, SetReceiver) {
FakeNetworkPipe::Config config;
TestDemuxer* demuxer = new TestDemuxer();
std::unique_ptr<FakeNetworkPipe> pipe(new FakeNetworkPipe(
&fake_clock_, config, std::unique_ptr<Demuxer>(demuxer)));
MockReceiver packet_receiver;
EXPECT_CALL(*demuxer, SetReceiver(&packet_receiver)).Times(1);
pipe->SetReceiver(&packet_receiver);
}
TEST(DemuxerImplTest, Demuxing) {
constexpr uint8_t kVideoPayloadType = 100;
constexpr uint8_t kAudioPayloadType = 101;
constexpr int64_t kTimeNow = 12345;
constexpr int64_t kArrivalTime = kTimeNow - 1;
constexpr size_t kPacketSize = 10;
DemuxerImpl demuxer({{kVideoPayloadType, MediaType::VIDEO},
{kAudioPayloadType, MediaType::AUDIO}});
MockReceiver mock_receiver;
demuxer.SetReceiver(&mock_receiver);
std::vector<uint8_t> data(kPacketSize);
data[1] = kVideoPayloadType;
std::unique_ptr<NetworkPacket> packet(
new NetworkPacket(&data[0], kPacketSize, kTimeNow, kArrivalTime));
EXPECT_CALL(mock_receiver, DeliverPacket(MediaType::VIDEO, _, _, _))
.WillOnce(Return(PacketReceiver::DELIVERY_OK));
demuxer.DeliverPacket(packet.get(), PacketTime());
data[1] = kAudioPayloadType;
packet.reset(
new NetworkPacket(&data[0], kPacketSize, kTimeNow, kArrivalTime));
EXPECT_CALL(mock_receiver, DeliverPacket(MediaType::AUDIO, _, _, _))
.WillOnce(Return(PacketReceiver::DELIVERY_OK));
demuxer.DeliverPacket(packet.get(), PacketTime());
}
} // namespace webrtc

View File

@ -0,0 +1,25 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/test/fake_texture_frame.h"
namespace webrtc {
namespace test {
VideoFrame FakeNativeBuffer::CreateFrame(int width,
int height,
uint32_t timestamp,
int64_t render_time_ms,
VideoRotation rotation) {
return VideoFrame(new rtc::RefCountedObject<FakeNativeBuffer>(width, height),
timestamp, render_time_ms, rotation);
}
} // namespace test
} // namespace webrtc

48
test/fake_texture_frame.h Normal file
View File

@ -0,0 +1,48 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_TEST_FAKE_TEXTURE_FRAME_H_
#define WEBRTC_TEST_FAKE_TEXTURE_FRAME_H_
#include "webrtc/api/video/i420_buffer.h"
#include "webrtc/api/video/video_frame.h"
#include "webrtc/common_video/include/video_frame_buffer.h"
#include "webrtc/rtc_base/checks.h"
namespace webrtc {
namespace test {
class FakeNativeBuffer : public VideoFrameBuffer {
public:
static VideoFrame CreateFrame(int width,
int height,
uint32_t timestamp,
int64_t render_time_ms,
VideoRotation rotation);
FakeNativeBuffer(int width, int height) : width_(width), height_(height) {}
Type type() const override { return Type::kNative; }
int width() const override { return width_; }
int height() const override { return height_; }
private:
rtc::scoped_refptr<I420BufferInterface> ToI420() override {
rtc::scoped_refptr<I420Buffer> buffer = I420Buffer::Create(width_, height_);
I420Buffer::SetBlack(buffer);
return buffer;
}
const int width_;
const int height_;
};
} // namespace test
} // namespace webrtc
#endif // WEBRTC_TEST_FAKE_TEXTURE_FRAME_H_

28
test/fake_videorenderer.h Normal file
View File

@ -0,0 +1,28 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_TEST_FAKE_VIDEORENDERER_H_
#define WEBRTC_TEST_FAKE_VIDEORENDERER_H_
#include "webrtc/api/video/video_frame.h"
#include "webrtc/media/base/videosinkinterface.h"
namespace webrtc {
namespace test {
class FakeVideoRenderer : public rtc::VideoSinkInterface<webrtc::VideoFrame> {
public:
void OnFrame(const webrtc::VideoFrame& frame) override {}
};
} // namespace test
} // namespace webrtc
#endif // WEBRTC_TEST_FAKE_VIDEORENDERER_H_

93
test/field_trial.cc Normal file
View File

@ -0,0 +1,93 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/test/field_trial.h"
#include <algorithm>
#include <cassert>
#include <cstdio>
#include <cstdlib>
#include <map>
#include <string>
#include "webrtc/system_wrappers/include/field_trial.h"
#include "webrtc/system_wrappers/include/field_trial_default.h"
namespace webrtc {
namespace {
bool field_trials_initiated_ = false;
} // namespace
namespace test {
// Note: this code is copied from src/base/metrics/field_trial.cc since the aim
// is to mimic chromium --force-fieldtrials.
void InitFieldTrialsFromString(const std::string& trials_string) {
static const char kPersistentStringSeparator = '/';
// Catch an error if this is called more than once.
assert(!field_trials_initiated_);
field_trials_initiated_ = true;
if (trials_string.empty())
return;
size_t next_item = 0;
std::map<std::string, std::string> field_trials;
while (next_item < trials_string.length()) {
size_t name_end = trials_string.find(kPersistentStringSeparator, next_item);
if (name_end == trials_string.npos || next_item == name_end)
break;
size_t group_name_end = trials_string.find(kPersistentStringSeparator,
name_end + 1);
if (group_name_end == trials_string.npos || name_end + 1 == group_name_end)
break;
std::string name(trials_string, next_item, name_end - next_item);
std::string group_name(trials_string, name_end + 1,
group_name_end - name_end - 1);
next_item = group_name_end + 1;
// Fail if duplicate with different group name.
if (field_trials.find(name) != field_trials.end() &&
field_trials.find(name)->second != group_name) {
break;
}
field_trials[name] = group_name;
// Successfully parsed all field trials from the string.
if (next_item == trials_string.length()) {
webrtc::field_trial::InitFieldTrialsFromString(trials_string.c_str());
return;
}
}
// Using fprintf as LOG does not print when this is called early in main.
fprintf(stderr, "Invalid field trials string.\n");
// Using abort so it crashes in both debug and release mode.
abort();
}
ScopedFieldTrials::ScopedFieldTrials(const std::string& config)
: previous_field_trials_(webrtc::field_trial::GetFieldTrialString()) {
assert(field_trials_initiated_);
field_trials_initiated_ = false;
current_field_trials_ = config;
InitFieldTrialsFromString(current_field_trials_);
}
ScopedFieldTrials::~ScopedFieldTrials() {
// Should still be initialized, since InitFieldTrials is called from ctor.
// That's why we don't restore the flag.
assert(field_trials_initiated_);
webrtc::field_trial::InitFieldTrialsFromString(previous_field_trials_);
}
} // namespace test
} // namespace webrtc

49
test/field_trial.h Normal file
View File

@ -0,0 +1,49 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_TEST_FIELD_TRIAL_H_
#define WEBRTC_TEST_FIELD_TRIAL_H_
#include <string>
#include <map>
namespace webrtc {
namespace test {
// Parses enabled field trials from a string config, such as the one passed
// to chrome's argument --force-fieldtrials and initializes webrtc::field_trial
// with such a config.
// E.g.:
// "WebRTC-experimentFoo/Enabled/WebRTC-experimentBar/Enabled100kbps/"
// Assigns the process to group "Enabled" on WebRTCExperimentFoo trial
// and to group "Enabled100kbps" on WebRTCExperimentBar.
//
// E.g. invalid config:
// "WebRTC-experiment1/Enabled" (note missing / separator at the end).
//
// Note: This method crashes with an error message if an invalid config is
// passed to it. That can be used to find out if a binary is parsing the flags.
void InitFieldTrialsFromString(const std::string& config);
// This class is used to override field-trial configs within specific tests.
// After this class goes out of scope previous field trials will be restored.
class ScopedFieldTrials {
public:
explicit ScopedFieldTrials(const std::string& config);
~ScopedFieldTrials();
private:
std::string current_field_trials_;
const char* previous_field_trials_;
};
} // namespace test
} // namespace webrtc
#endif // WEBRTC_TEST_FIELD_TRIAL_H_

450
test/frame_generator.cc Normal file
View File

@ -0,0 +1,450 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/test/frame_generator.h"
#include <math.h>
#include <stdio.h>
#include <string.h>
#include <memory>
#include "webrtc/api/video/i420_buffer.h"
#include "webrtc/common_video/include/video_frame_buffer.h"
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
#include "webrtc/rtc_base/checks.h"
#include "webrtc/rtc_base/keep_ref_until_done.h"
#include "webrtc/rtc_base/random.h"
#include "webrtc/system_wrappers/include/clock.h"
#include "webrtc/test/frame_utils.h"
namespace webrtc {
namespace test {
namespace {
// SquareGenerator is a FrameGenerator that draws 10 randomly sized and colored
// squares. Between each new generated frame, the squares are moved slightly
// towards the lower right corner.
class SquareGenerator : public FrameGenerator {
public:
SquareGenerator(int width, int height) {
ChangeResolution(width, height);
for (int i = 0; i < 10; ++i) {
squares_.emplace_back(new Square(width, height, i + 1));
}
}
void ChangeResolution(size_t width, size_t height) override {
rtc::CritScope lock(&crit_);
width_ = static_cast<int>(width);
height_ = static_cast<int>(height);
RTC_CHECK(width_ > 0);
RTC_CHECK(height_ > 0);
}
VideoFrame* NextFrame() override {
rtc::CritScope lock(&crit_);
rtc::scoped_refptr<I420Buffer> buffer(I420Buffer::Create(width_, height_));
memset(buffer->MutableDataY(), 127, height_ * buffer->StrideY());
memset(buffer->MutableDataU(), 127,
buffer->ChromaHeight() * buffer->StrideU());
memset(buffer->MutableDataV(), 127,
buffer->ChromaHeight() * buffer->StrideV());
for (const auto& square : squares_)
square->Draw(buffer);
frame_.reset(new VideoFrame(buffer, 0, 0, webrtc::kVideoRotation_0));
return frame_.get();
}
private:
class Square {
public:
Square(int width, int height, int seed)
: random_generator_(seed),
x_(random_generator_.Rand(0, width)),
y_(random_generator_.Rand(0, height)),
length_(random_generator_.Rand(1, width > 4 ? width / 4 : 1)),
yuv_y_(random_generator_.Rand(0, 255)),
yuv_u_(random_generator_.Rand(0, 255)),
yuv_v_(random_generator_.Rand(0, 255)) {}
void Draw(const rtc::scoped_refptr<I420Buffer>& buffer) {
x_ = (x_ + random_generator_.Rand(0, 4)) % (buffer->width() - length_);
y_ = (y_ + random_generator_.Rand(0, 4)) % (buffer->height() - length_);
for (int y = y_; y < y_ + length_; ++y) {
uint8_t* pos_y =
(buffer->MutableDataY() + x_ + y * buffer->StrideY());
memset(pos_y, yuv_y_, length_);
}
for (int y = y_; y < y_ + length_; y = y + 2) {
uint8_t* pos_u =
(buffer->MutableDataU() + x_ / 2 + y / 2 * buffer->StrideU());
memset(pos_u, yuv_u_, length_ / 2);
uint8_t* pos_v =
(buffer->MutableDataV() + x_ / 2 + y / 2 * buffer->StrideV());
memset(pos_v, yuv_v_, length_ / 2);
}
}
private:
Random random_generator_;
int x_;
int y_;
const int length_;
const uint8_t yuv_y_;
const uint8_t yuv_u_;
const uint8_t yuv_v_;
};
rtc::CriticalSection crit_;
int width_ RTC_GUARDED_BY(&crit_);
int height_ RTC_GUARDED_BY(&crit_);
std::vector<std::unique_ptr<Square>> squares_ RTC_GUARDED_BY(&crit_);
std::unique_ptr<VideoFrame> frame_ RTC_GUARDED_BY(&crit_);
};
class YuvFileGenerator : public FrameGenerator {
public:
YuvFileGenerator(std::vector<FILE*> files,
size_t width,
size_t height,
int frame_repeat_count)
: file_index_(0),
files_(files),
width_(width),
height_(height),
frame_size_(CalcBufferSize(VideoType::kI420,
static_cast<int>(width_),
static_cast<int>(height_))),
frame_buffer_(new uint8_t[frame_size_]),
frame_display_count_(frame_repeat_count),
current_display_count_(0) {
RTC_DCHECK_GT(width, 0);
RTC_DCHECK_GT(height, 0);
RTC_DCHECK_GT(frame_repeat_count, 0);
}
virtual ~YuvFileGenerator() {
for (FILE* file : files_)
fclose(file);
}
VideoFrame* NextFrame() override {
if (current_display_count_ == 0)
ReadNextFrame();
if (++current_display_count_ >= frame_display_count_)
current_display_count_ = 0;
temp_frame_.reset(
new VideoFrame(last_read_buffer_, 0, 0, webrtc::kVideoRotation_0));
return temp_frame_.get();
}
void ReadNextFrame() {
last_read_buffer_ =
test::ReadI420Buffer(static_cast<int>(width_),
static_cast<int>(height_),
files_[file_index_]);
if (!last_read_buffer_) {
// No more frames to read in this file, rewind and move to next file.
rewind(files_[file_index_]);
file_index_ = (file_index_ + 1) % files_.size();
last_read_buffer_ =
test::ReadI420Buffer(static_cast<int>(width_),
static_cast<int>(height_),
files_[file_index_]);
RTC_CHECK(last_read_buffer_);
}
}
private:
size_t file_index_;
const std::vector<FILE*> files_;
const size_t width_;
const size_t height_;
const size_t frame_size_;
const std::unique_ptr<uint8_t[]> frame_buffer_;
const int frame_display_count_;
int current_display_count_;
rtc::scoped_refptr<I420Buffer> last_read_buffer_;
std::unique_ptr<VideoFrame> temp_frame_;
};
// SlideGenerator works similarly to YuvFileGenerator but it fills the frames
// with randomly sized and colored squares instead of reading their content
// from files.
class SlideGenerator : public FrameGenerator {
public:
SlideGenerator(int width, int height, int frame_repeat_count)
: width_(width),
height_(height),
frame_display_count_(frame_repeat_count),
current_display_count_(0),
random_generator_(1234) {
RTC_DCHECK_GT(width, 0);
RTC_DCHECK_GT(height, 0);
RTC_DCHECK_GT(frame_repeat_count, 0);
}
VideoFrame* NextFrame() override {
if (current_display_count_ == 0)
GenerateNewFrame();
if (++current_display_count_ >= frame_display_count_)
current_display_count_ = 0;
frame_.reset(
new VideoFrame(buffer_, 0, 0, webrtc::kVideoRotation_0));
return frame_.get();
}
// Generates some randomly sized and colored squares scattered
// over the frame.
void GenerateNewFrame() {
// The squares should have a varying order of magnitude in order
// to simulate variation in the slides' complexity.
const int kSquareNum = 1 << (4 + (random_generator_.Rand(0, 3) * 4));
buffer_ = I420Buffer::Create(width_, height_);
memset(buffer_->MutableDataY(), 127, height_ * buffer_->StrideY());
memset(buffer_->MutableDataU(), 127,
buffer_->ChromaHeight() * buffer_->StrideU());
memset(buffer_->MutableDataV(), 127,
buffer_->ChromaHeight() * buffer_->StrideV());
for (int i = 0; i < kSquareNum; ++i) {
int length = random_generator_.Rand(1, width_ > 4 ? width_ / 4 : 1);
// Limit the length of later squares so that they don't overwrite the
// previous ones too much.
length = (length * (kSquareNum - i)) / kSquareNum;
int x = random_generator_.Rand(0, width_ - length);
int y = random_generator_.Rand(0, height_ - length);
uint8_t yuv_y = random_generator_.Rand(0, 255);
uint8_t yuv_u = random_generator_.Rand(0, 255);
uint8_t yuv_v = random_generator_.Rand(0, 255);
for (int yy = y; yy < y + length; ++yy) {
uint8_t* pos_y =
(buffer_->MutableDataY() + x + yy * buffer_->StrideY());
memset(pos_y, yuv_y, length);
}
for (int yy = y; yy < y + length; yy += 2) {
uint8_t* pos_u =
(buffer_->MutableDataU() + x / 2 + yy / 2 * buffer_->StrideU());
memset(pos_u, yuv_u, length / 2);
uint8_t* pos_v =
(buffer_->MutableDataV() + x / 2 + yy / 2 * buffer_->StrideV());
memset(pos_v, yuv_v, length / 2);
}
}
}
private:
const int width_;
const int height_;
const int frame_display_count_;
int current_display_count_;
Random random_generator_;
rtc::scoped_refptr<I420Buffer> buffer_;
std::unique_ptr<VideoFrame> frame_;
};
class ScrollingImageFrameGenerator : public FrameGenerator {
public:
ScrollingImageFrameGenerator(Clock* clock,
const std::vector<FILE*>& files,
size_t source_width,
size_t source_height,
size_t target_width,
size_t target_height,
int64_t scroll_time_ms,
int64_t pause_time_ms)
: clock_(clock),
start_time_(clock->TimeInMilliseconds()),
scroll_time_(scroll_time_ms),
pause_time_(pause_time_ms),
num_frames_(files.size()),
target_width_(static_cast<int>(target_width)),
target_height_(static_cast<int>(target_height)),
current_frame_num_(num_frames_ - 1),
current_source_frame_(nullptr),
file_generator_(files, source_width, source_height, 1) {
RTC_DCHECK(clock_ != nullptr);
RTC_DCHECK_GT(num_frames_, 0);
RTC_DCHECK_GE(source_height, target_height);
RTC_DCHECK_GE(source_width, target_width);
RTC_DCHECK_GE(scroll_time_ms, 0);
RTC_DCHECK_GE(pause_time_ms, 0);
RTC_DCHECK_GT(scroll_time_ms + pause_time_ms, 0);
}
virtual ~ScrollingImageFrameGenerator() {}
VideoFrame* NextFrame() override {
const int64_t kFrameDisplayTime = scroll_time_ + pause_time_;
const int64_t now = clock_->TimeInMilliseconds();
int64_t ms_since_start = now - start_time_;
size_t frame_num = (ms_since_start / kFrameDisplayTime) % num_frames_;
UpdateSourceFrame(frame_num);
double scroll_factor;
int64_t time_into_frame = ms_since_start % kFrameDisplayTime;
if (time_into_frame < scroll_time_) {
scroll_factor = static_cast<double>(time_into_frame) / scroll_time_;
} else {
scroll_factor = 1.0;
}
CropSourceToScrolledImage(scroll_factor);
return current_frame_ ? &*current_frame_ : nullptr;
}
void UpdateSourceFrame(size_t frame_num) {
while (current_frame_num_ != frame_num) {
current_source_frame_ = file_generator_.NextFrame();
current_frame_num_ = (current_frame_num_ + 1) % num_frames_;
}
RTC_DCHECK(current_source_frame_ != nullptr);
}
void CropSourceToScrolledImage(double scroll_factor) {
int scroll_margin_x = current_source_frame_->width() - target_width_;
int pixels_scrolled_x =
static_cast<int>(scroll_margin_x * scroll_factor + 0.5);
int scroll_margin_y = current_source_frame_->height() - target_height_;
int pixels_scrolled_y =
static_cast<int>(scroll_margin_y * scroll_factor + 0.5);
rtc::scoped_refptr<I420BufferInterface> i420_buffer =
current_source_frame_->video_frame_buffer()->ToI420();
int offset_y =
(i420_buffer->StrideY() * pixels_scrolled_y) + pixels_scrolled_x;
int offset_u = (i420_buffer->StrideU() * (pixels_scrolled_y / 2)) +
(pixels_scrolled_x / 2);
int offset_v = (i420_buffer->StrideV() * (pixels_scrolled_y / 2)) +
(pixels_scrolled_x / 2);
current_frame_ = rtc::Optional<webrtc::VideoFrame>(webrtc::VideoFrame(
new rtc::RefCountedObject<webrtc::WrappedI420Buffer>(
target_width_, target_height_, &i420_buffer->DataY()[offset_y],
i420_buffer->StrideY(), &i420_buffer->DataU()[offset_u],
i420_buffer->StrideU(), &i420_buffer->DataV()[offset_v],
i420_buffer->StrideV(), KeepRefUntilDone(i420_buffer)),
kVideoRotation_0, 0));
}
Clock* const clock_;
const int64_t start_time_;
const int64_t scroll_time_;
const int64_t pause_time_;
const size_t num_frames_;
const int target_width_;
const int target_height_;
size_t current_frame_num_;
VideoFrame* current_source_frame_;
rtc::Optional<VideoFrame> current_frame_;
YuvFileGenerator file_generator_;
};
} // namespace
FrameForwarder::FrameForwarder() : sink_(nullptr) {}
FrameForwarder::~FrameForwarder() {}
void FrameForwarder::IncomingCapturedFrame(const VideoFrame& video_frame) {
rtc::CritScope lock(&crit_);
if (sink_)
sink_->OnFrame(video_frame);
}
void FrameForwarder::AddOrUpdateSink(rtc::VideoSinkInterface<VideoFrame>* sink,
const rtc::VideoSinkWants& wants) {
rtc::CritScope lock(&crit_);
RTC_DCHECK(!sink_ || sink_ == sink);
sink_ = sink;
sink_wants_ = wants;
}
void FrameForwarder::RemoveSink(rtc::VideoSinkInterface<VideoFrame>* sink) {
rtc::CritScope lock(&crit_);
RTC_DCHECK_EQ(sink, sink_);
sink_ = nullptr;
}
rtc::VideoSinkWants FrameForwarder::sink_wants() const {
rtc::CritScope lock(&crit_);
return sink_wants_;
}
bool FrameForwarder::has_sinks() const {
rtc::CritScope lock(&crit_);
return sink_ != nullptr;
}
std::unique_ptr<FrameGenerator> FrameGenerator::CreateSquareGenerator(
int width,
int height) {
return std::unique_ptr<FrameGenerator>(new SquareGenerator(width, height));
}
std::unique_ptr<FrameGenerator> FrameGenerator::CreateSlideGenerator(
int width, int height, int frame_repeat_count) {
return std::unique_ptr<FrameGenerator>(new SlideGenerator(
width, height, frame_repeat_count));
}
std::unique_ptr<FrameGenerator> FrameGenerator::CreateFromYuvFile(
std::vector<std::string> filenames,
size_t width,
size_t height,
int frame_repeat_count) {
RTC_DCHECK(!filenames.empty());
std::vector<FILE*> files;
for (const std::string& filename : filenames) {
FILE* file = fopen(filename.c_str(), "rb");
RTC_DCHECK(file != nullptr);
files.push_back(file);
}
return std::unique_ptr<FrameGenerator>(
new YuvFileGenerator(files, width, height, frame_repeat_count));
}
std::unique_ptr<FrameGenerator>
FrameGenerator::CreateScrollingInputFromYuvFiles(
Clock* clock,
std::vector<std::string> filenames,
size_t source_width,
size_t source_height,
size_t target_width,
size_t target_height,
int64_t scroll_time_ms,
int64_t pause_time_ms) {
RTC_DCHECK(!filenames.empty());
std::vector<FILE*> files;
for (const std::string& filename : filenames) {
FILE* file = fopen(filename.c_str(), "rb");
RTC_DCHECK(file != nullptr);
files.push_back(file);
}
return std::unique_ptr<FrameGenerator>(new ScrollingImageFrameGenerator(
clock, files, source_width, source_height, target_width, target_height,
scroll_time_ms, pause_time_ms));
}
} // namespace test
} // namespace webrtc

101
test/frame_generator.h Normal file
View File

@ -0,0 +1,101 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_TEST_FRAME_GENERATOR_H_
#define WEBRTC_TEST_FRAME_GENERATOR_H_
#include <memory>
#include <string>
#include <vector>
#include "webrtc/api/video/video_frame.h"
#include "webrtc/media/base/videosourceinterface.h"
#include "webrtc/rtc_base/criticalsection.h"
#include "webrtc/typedefs.h"
namespace webrtc {
class Clock;
namespace test {
// FrameForwarder can be used as an implementation
// of rtc::VideoSourceInterface<VideoFrame> where the caller controls when
// a frame should be forwarded to its sink.
// Currently this implementation only support one sink.
class FrameForwarder : public rtc::VideoSourceInterface<VideoFrame> {
public:
FrameForwarder();
virtual ~FrameForwarder();
// Forwards |video_frame| to the registered |sink_|.
virtual void IncomingCapturedFrame(const VideoFrame& video_frame);
rtc::VideoSinkWants sink_wants() const;
bool has_sinks() const;
protected:
void AddOrUpdateSink(rtc::VideoSinkInterface<VideoFrame>* sink,
const rtc::VideoSinkWants& wants) override;
void RemoveSink(rtc::VideoSinkInterface<VideoFrame>* sink) override;
rtc::CriticalSection crit_;
rtc::VideoSinkInterface<VideoFrame>* sink_ RTC_GUARDED_BY(crit_);
rtc::VideoSinkWants sink_wants_ RTC_GUARDED_BY(crit_);
};
class FrameGenerator {
public:
virtual ~FrameGenerator() = default;
// Returns video frame that remains valid until next call.
virtual VideoFrame* NextFrame() = 0;
// Change the capture resolution.
virtual void ChangeResolution(size_t width, size_t height) {
RTC_NOTREACHED();
}
// Creates a frame generator that produces frames with small squares that
// move randomly towards the lower right corner.
static std::unique_ptr<FrameGenerator> CreateSquareGenerator(int width,
int height);
// Creates a frame generator that repeatedly plays a set of yuv files.
// The frame_repeat_count determines how many times each frame is shown,
// with 1 = show each frame once, etc.
static std::unique_ptr<FrameGenerator> CreateFromYuvFile(
std::vector<std::string> files,
size_t width,
size_t height,
int frame_repeat_count);
// Creates a frame generator which takes a set of yuv files (wrapping a
// frame generator created by CreateFromYuvFile() above), but outputs frames
// that have been cropped to specified resolution: source_width/source_height
// is the size of the source images, target_width/target_height is the size of
// the cropped output. For each source image read, the cropped viewport will
// be scrolled top to bottom/left to right for scroll_tim_ms milliseconds.
// After that the image will stay in place for pause_time_ms milliseconds,
// and then this will be repeated with the next file from the input set.
static std::unique_ptr<FrameGenerator> CreateScrollingInputFromYuvFiles(
Clock* clock,
std::vector<std::string> filenames,
size_t source_width,
size_t source_height,
size_t target_width,
size_t target_height,
int64_t scroll_time_ms,
int64_t pause_time_ms);
// Creates a frame generator that produces randomly generated slides.
// frame_repeat_count determines how many times each slide is shown.
static std::unique_ptr<FrameGenerator> CreateSlideGenerator(
int width, int height, int frame_repeat_count);
};
} // namespace test
} // namespace webrtc
#endif // WEBRTC_TEST_FRAME_GENERATOR_H_

View File

@ -0,0 +1,259 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/test/frame_generator_capturer.h"
#include <utility>
#include <vector>
#include "webrtc/rtc_base/criticalsection.h"
#include "webrtc/rtc_base/logging.h"
#include "webrtc/rtc_base/platform_thread.h"
#include "webrtc/rtc_base/task_queue.h"
#include "webrtc/rtc_base/timeutils.h"
#include "webrtc/system_wrappers/include/clock.h"
#include "webrtc/test/frame_generator.h"
#include "webrtc/call/video_send_stream.h"
namespace webrtc {
namespace test {
class FrameGeneratorCapturer::InsertFrameTask : public rtc::QueuedTask {
public:
// Repeats in |repeat_interval_ms|. One-time if |repeat_interval_ms| == 0.
InsertFrameTask(
webrtc::test::FrameGeneratorCapturer* frame_generator_capturer,
uint32_t repeat_interval_ms)
: frame_generator_capturer_(frame_generator_capturer),
repeat_interval_ms_(repeat_interval_ms),
intended_run_time_ms_(-1) {}
private:
bool Run() override {
bool task_completed = true;
if (repeat_interval_ms_ > 0) {
// This is not a one-off frame. Check if the frame interval for this
// task queue is the same same as the current configured frame rate.
uint32_t current_interval_ms =
1000 / frame_generator_capturer_->GetCurrentConfiguredFramerate();
if (repeat_interval_ms_ != current_interval_ms) {
// Frame rate has changed since task was started, create a new instance.
rtc::TaskQueue::Current()->PostDelayedTask(
std::unique_ptr<rtc::QueuedTask>(new InsertFrameTask(
frame_generator_capturer_, current_interval_ms)),
current_interval_ms);
} else {
// Schedule the next frame capture event to happen at approximately the
// correct absolute time point.
int64_t delay_ms;
int64_t time_now_ms = rtc::TimeMillis();
if (intended_run_time_ms_ > 0) {
delay_ms = time_now_ms - intended_run_time_ms_;
} else {
delay_ms = 0;
intended_run_time_ms_ = time_now_ms;
}
intended_run_time_ms_ += repeat_interval_ms_;
if (delay_ms < repeat_interval_ms_) {
rtc::TaskQueue::Current()->PostDelayedTask(
std::unique_ptr<rtc::QueuedTask>(this),
repeat_interval_ms_ - delay_ms);
} else {
rtc::TaskQueue::Current()->PostDelayedTask(
std::unique_ptr<rtc::QueuedTask>(this), 0);
LOG(LS_ERROR)
<< "Frame Generator Capturer can't keep up with requested fps";
}
// Repost of this instance, make sure it is not deleted.
task_completed = false;
}
}
frame_generator_capturer_->InsertFrame();
// Task should be deleted only if it's not repeating.
return task_completed;
}
webrtc::test::FrameGeneratorCapturer* const frame_generator_capturer_;
const uint32_t repeat_interval_ms_;
int64_t intended_run_time_ms_;
};
FrameGeneratorCapturer* FrameGeneratorCapturer::Create(int width,
int height,
int target_fps,
Clock* clock) {
std::unique_ptr<FrameGeneratorCapturer> capturer(new FrameGeneratorCapturer(
clock, FrameGenerator::CreateSquareGenerator(width, height), target_fps));
if (!capturer->Init())
return nullptr;
return capturer.release();
}
FrameGeneratorCapturer* FrameGeneratorCapturer::CreateFromYuvFile(
const std::string& file_name,
size_t width,
size_t height,
int target_fps,
Clock* clock) {
std::unique_ptr<FrameGeneratorCapturer> capturer(new FrameGeneratorCapturer(
clock,
FrameGenerator::CreateFromYuvFile(std::vector<std::string>(1, file_name),
width, height, 1),
target_fps));
if (!capturer->Init())
return nullptr;
return capturer.release();
}
FrameGeneratorCapturer* FrameGeneratorCapturer::CreateSlideGenerator(
int width,
int height,
int frame_repeat_count,
int target_fps,
Clock* clock) {
std::unique_ptr<FrameGeneratorCapturer> capturer(new FrameGeneratorCapturer(
clock, FrameGenerator::CreateSlideGenerator(width, height,
frame_repeat_count),
target_fps));
if (!capturer->Init())
return nullptr;
return capturer.release();
}
FrameGeneratorCapturer::FrameGeneratorCapturer(
Clock* clock,
std::unique_ptr<FrameGenerator> frame_generator,
int target_fps)
: clock_(clock),
sending_(false),
sink_(nullptr),
sink_wants_observer_(nullptr),
frame_generator_(std::move(frame_generator)),
target_fps_(target_fps),
first_frame_capture_time_(-1),
task_queue_("FrameGenCapQ",
rtc::TaskQueue::Priority::HIGH) {
RTC_DCHECK(frame_generator_);
RTC_DCHECK_GT(target_fps, 0);
}
FrameGeneratorCapturer::~FrameGeneratorCapturer() {
Stop();
}
void FrameGeneratorCapturer::SetFakeRotation(VideoRotation rotation) {
rtc::CritScope cs(&lock_);
fake_rotation_ = rotation;
}
bool FrameGeneratorCapturer::Init() {
// This check is added because frame_generator_ might be file based and should
// not crash because a file moved.
if (frame_generator_.get() == nullptr)
return false;
int framerate_fps = GetCurrentConfiguredFramerate();
task_queue_.PostDelayedTask(
std::unique_ptr<rtc::QueuedTask>(
new InsertFrameTask(this, 1000 / framerate_fps)),
1000 / framerate_fps);
return true;
}
void FrameGeneratorCapturer::InsertFrame() {
rtc::CritScope cs(&lock_);
if (sending_) {
VideoFrame* frame = frame_generator_->NextFrame();
frame->set_timestamp_us(clock_->TimeInMicroseconds());
frame->set_ntp_time_ms(clock_->CurrentNtpInMilliseconds());
frame->set_rotation(fake_rotation_);
if (first_frame_capture_time_ == -1) {
first_frame_capture_time_ = frame->ntp_time_ms();
}
if (sink_) {
rtc::Optional<VideoFrame> out_frame = AdaptFrame(*frame);
if (out_frame)
sink_->OnFrame(*out_frame);
}
}
}
void FrameGeneratorCapturer::Start() {
rtc::CritScope cs(&lock_);
sending_ = true;
}
void FrameGeneratorCapturer::Stop() {
rtc::CritScope cs(&lock_);
sending_ = false;
}
void FrameGeneratorCapturer::ChangeResolution(size_t width, size_t height) {
rtc::CritScope cs(&lock_);
frame_generator_->ChangeResolution(width, height);
}
void FrameGeneratorCapturer::SetSinkWantsObserver(SinkWantsObserver* observer) {
rtc::CritScope cs(&lock_);
RTC_DCHECK(!sink_wants_observer_);
sink_wants_observer_ = observer;
}
void FrameGeneratorCapturer::AddOrUpdateSink(
rtc::VideoSinkInterface<VideoFrame>* sink,
const rtc::VideoSinkWants& wants) {
rtc::CritScope cs(&lock_);
RTC_CHECK(!sink_ || sink_ == sink);
sink_ = sink;
if (sink_wants_observer_)
sink_wants_observer_->OnSinkWantsChanged(sink, wants);
// Handle framerate within this class, just pass on resolution for possible
// adaptation.
rtc::VideoSinkWants resolution_wants = wants;
resolution_wants.max_framerate_fps = std::numeric_limits<int>::max();
VideoCapturer::AddOrUpdateSink(sink, resolution_wants);
// Ignore any requests for framerate higher than initially configured.
if (wants.max_framerate_fps < target_fps_) {
wanted_fps_.emplace(wants.max_framerate_fps);
} else {
wanted_fps_.reset();
}
}
void FrameGeneratorCapturer::RemoveSink(
rtc::VideoSinkInterface<VideoFrame>* sink) {
rtc::CritScope cs(&lock_);
RTC_CHECK(sink_ == sink);
sink_ = nullptr;
}
void FrameGeneratorCapturer::ForceFrame() {
// One-time non-repeating task,
// therefore repeat_interval_ms is 0 in InsertFrameTask()
task_queue_.PostTask(
std::unique_ptr<rtc::QueuedTask>(new InsertFrameTask(this, 0)));
}
int FrameGeneratorCapturer::GetCurrentConfiguredFramerate() {
rtc::CritScope cs(&lock_);
if (wanted_fps_ && *wanted_fps_ < target_fps_)
return *wanted_fps_;
return target_fps_;
}
} // namespace test
} // namespace webrtc

View File

@ -0,0 +1,108 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_TEST_FRAME_GENERATOR_CAPTURER_H_
#define WEBRTC_TEST_FRAME_GENERATOR_CAPTURER_H_
#include <memory>
#include <string>
#include "webrtc/api/video/video_frame.h"
#include "webrtc/rtc_base/criticalsection.h"
#include "webrtc/rtc_base/task_queue.h"
#include "webrtc/test/video_capturer.h"
#include "webrtc/typedefs.h"
namespace webrtc {
class EventTimerWrapper;
namespace test {
class FrameGenerator;
class FrameGeneratorCapturer : public VideoCapturer {
public:
class SinkWantsObserver {
public:
// OnSinkWantsChanged is called when FrameGeneratorCapturer::AddOrUpdateSink
// is called.
virtual void OnSinkWantsChanged(rtc::VideoSinkInterface<VideoFrame>* sink,
const rtc::VideoSinkWants& wants) = 0;
protected:
virtual ~SinkWantsObserver() {}
};
static FrameGeneratorCapturer* Create(int width,
int height,
int target_fps,
Clock* clock);
static FrameGeneratorCapturer* CreateFromYuvFile(const std::string& file_name,
size_t width,
size_t height,
int target_fps,
Clock* clock);
static FrameGeneratorCapturer* CreateSlideGenerator(int width,
int height,
int frame_repeat_count,
int target_fps,
Clock* clock);
virtual ~FrameGeneratorCapturer();
void Start() override;
void Stop() override;
void ChangeResolution(size_t width, size_t height);
void SetSinkWantsObserver(SinkWantsObserver* observer);
void AddOrUpdateSink(rtc::VideoSinkInterface<VideoFrame>* sink,
const rtc::VideoSinkWants& wants) override;
void RemoveSink(rtc::VideoSinkInterface<VideoFrame>* sink) override;
void ForceFrame();
void SetFakeRotation(VideoRotation rotation);
int64_t first_frame_capture_time() const { return first_frame_capture_time_; }
FrameGeneratorCapturer(Clock* clock,
std::unique_ptr<FrameGenerator> frame_generator,
int target_fps);
bool Init();
private:
class InsertFrameTask;
void InsertFrame();
static bool Run(void* obj);
int GetCurrentConfiguredFramerate();
Clock* const clock_;
bool sending_;
rtc::VideoSinkInterface<VideoFrame>* sink_ RTC_GUARDED_BY(&lock_);
SinkWantsObserver* sink_wants_observer_ RTC_GUARDED_BY(&lock_);
rtc::CriticalSection lock_;
std::unique_ptr<FrameGenerator> frame_generator_;
int target_fps_ RTC_GUARDED_BY(&lock_);
rtc::Optional<int> wanted_fps_ RTC_GUARDED_BY(&lock_);
VideoRotation fake_rotation_ = kVideoRotation_0;
int64_t first_frame_capture_time_;
// Must be the last field, so it will be deconstructed first as tasks
// in the TaskQueue access other fields of the instance of this class.
rtc::TaskQueue task_queue_;
};
} // namespace test
} // namespace webrtc
#endif // WEBRTC_TEST_FRAME_GENERATOR_CAPTURER_H_

View File

@ -0,0 +1,189 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <stdio.h>
#include <memory>
#include <string>
#include "webrtc/test/frame_generator.h"
#include "webrtc/test/gtest.h"
#include "webrtc/test/testsupport/fileutils.h"
namespace webrtc {
namespace test {
static const int kFrameWidth = 4;
static const int kFrameHeight = 4;
class FrameGeneratorTest : public ::testing::Test {
public:
void SetUp() override {
two_frame_filename_ =
test::TempFilename(test::OutputPath(), "2_frame_yuv_file");
one_frame_filename_ =
test::TempFilename(test::OutputPath(), "1_frame_yuv_file");
FILE* file = fopen(two_frame_filename_.c_str(), "wb");
WriteYuvFile(file, 0, 0, 0);
WriteYuvFile(file, 127, 127, 127);
fclose(file);
file = fopen(one_frame_filename_.c_str(), "wb");
WriteYuvFile(file, 255, 255, 255);
fclose(file);
}
void TearDown() override {
remove(one_frame_filename_.c_str());
remove(two_frame_filename_.c_str());
}
protected:
void WriteYuvFile(FILE* file, uint8_t y, uint8_t u, uint8_t v) {
assert(file);
std::unique_ptr<uint8_t[]> plane_buffer(new uint8_t[y_size]);
memset(plane_buffer.get(), y, y_size);
fwrite(plane_buffer.get(), 1, y_size, file);
memset(plane_buffer.get(), u, uv_size);
fwrite(plane_buffer.get(), 1, uv_size, file);
memset(plane_buffer.get(), v, uv_size);
fwrite(plane_buffer.get(), 1, uv_size, file);
}
void CheckFrameAndMutate(VideoFrame* frame, uint8_t y, uint8_t u, uint8_t v) {
// Check that frame is valid, has the correct color and timestamp are clean.
ASSERT_NE(nullptr, frame);
rtc::scoped_refptr<I420BufferInterface> i420_buffer =
frame->video_frame_buffer()->ToI420();
const uint8_t* buffer;
buffer = i420_buffer->DataY();
for (int i = 0; i < y_size; ++i)
ASSERT_EQ(y, buffer[i]);
buffer = i420_buffer->DataU();
for (int i = 0; i < uv_size; ++i)
ASSERT_EQ(u, buffer[i]);
buffer = i420_buffer->DataV();
for (int i = 0; i < uv_size; ++i)
ASSERT_EQ(v, buffer[i]);
EXPECT_EQ(0, frame->ntp_time_ms());
EXPECT_EQ(0, frame->render_time_ms());
EXPECT_EQ(0u, frame->timestamp());
// Mutate to something arbitrary non-zero.
frame->set_ntp_time_ms(11);
frame->set_timestamp_us(12);
frame->set_timestamp(13);
}
uint64_t Hash(VideoFrame* frame) {
// Generate a 64-bit hash from the frame's buffer.
uint64_t hash = 19;
rtc::scoped_refptr<I420BufferInterface> i420_buffer =
frame->video_frame_buffer()->ToI420();
const uint8_t* buffer = i420_buffer->DataY();
for (int i = 0; i < y_size; ++i) {
hash = (37 * hash) + buffer[i];
}
buffer = i420_buffer->DataU();
for (int i = 0; i < uv_size; ++i) {
hash = (37 * hash) + buffer[i];
}
buffer = i420_buffer->DataV();
for (int i = 0; i < uv_size; ++i) {
hash = (37 * hash) + buffer[i];
}
return hash;
}
std::string two_frame_filename_;
std::string one_frame_filename_;
const int y_size = kFrameWidth * kFrameHeight;
const int uv_size = ((kFrameHeight + 1) / 2) * ((kFrameWidth + 1) / 2);
};
TEST_F(FrameGeneratorTest, SingleFrameFile) {
std::unique_ptr<FrameGenerator> generator(FrameGenerator::CreateFromYuvFile(
std::vector<std::string>(1, one_frame_filename_), kFrameWidth,
kFrameHeight, 1));
CheckFrameAndMutate(generator->NextFrame(), 255, 255, 255);
CheckFrameAndMutate(generator->NextFrame(), 255, 255, 255);
}
TEST_F(FrameGeneratorTest, TwoFrameFile) {
std::unique_ptr<FrameGenerator> generator(FrameGenerator::CreateFromYuvFile(
std::vector<std::string>(1, two_frame_filename_), kFrameWidth,
kFrameHeight, 1));
CheckFrameAndMutate(generator->NextFrame(), 0, 0, 0);
CheckFrameAndMutate(generator->NextFrame(), 127, 127, 127);
CheckFrameAndMutate(generator->NextFrame(), 0, 0, 0);
}
TEST_F(FrameGeneratorTest, MultipleFrameFiles) {
std::vector<std::string> files;
files.push_back(two_frame_filename_);
files.push_back(one_frame_filename_);
std::unique_ptr<FrameGenerator> generator(
FrameGenerator::CreateFromYuvFile(files, kFrameWidth, kFrameHeight, 1));
CheckFrameAndMutate(generator->NextFrame(), 0, 0, 0);
CheckFrameAndMutate(generator->NextFrame(), 127, 127, 127);
CheckFrameAndMutate(generator->NextFrame(), 255, 255, 255);
CheckFrameAndMutate(generator->NextFrame(), 0, 0, 0);
}
TEST_F(FrameGeneratorTest, TwoFrameFileWithRepeat) {
const int kRepeatCount = 3;
std::unique_ptr<FrameGenerator> generator(FrameGenerator::CreateFromYuvFile(
std::vector<std::string>(1, two_frame_filename_), kFrameWidth,
kFrameHeight, kRepeatCount));
for (int i = 0; i < kRepeatCount; ++i)
CheckFrameAndMutate(generator->NextFrame(), 0, 0, 0);
for (int i = 0; i < kRepeatCount; ++i)
CheckFrameAndMutate(generator->NextFrame(), 127, 127, 127);
CheckFrameAndMutate(generator->NextFrame(), 0, 0, 0);
}
TEST_F(FrameGeneratorTest, MultipleFrameFilesWithRepeat) {
const int kRepeatCount = 3;
std::vector<std::string> files;
files.push_back(two_frame_filename_);
files.push_back(one_frame_filename_);
std::unique_ptr<FrameGenerator> generator(FrameGenerator::CreateFromYuvFile(
files, kFrameWidth, kFrameHeight, kRepeatCount));
for (int i = 0; i < kRepeatCount; ++i)
CheckFrameAndMutate(generator->NextFrame(), 0, 0, 0);
for (int i = 0; i < kRepeatCount; ++i)
CheckFrameAndMutate(generator->NextFrame(), 127, 127, 127);
for (int i = 0; i < kRepeatCount; ++i)
CheckFrameAndMutate(generator->NextFrame(), 255, 255, 255);
CheckFrameAndMutate(generator->NextFrame(), 0, 0, 0);
}
TEST_F(FrameGeneratorTest, SlideGenerator) {
const int kGenCount = 9;
const int kRepeatCount = 3;
std::unique_ptr<FrameGenerator> generator(
FrameGenerator::CreateSlideGenerator(
kFrameWidth, kFrameHeight, kRepeatCount));
uint64_t hashes[kGenCount];
for (int i = 0; i < kGenCount; ++i) {
hashes[i] = Hash(generator->NextFrame());
}
// Check that the buffer changes only every |kRepeatCount| frames.
for (int i = 1; i < kGenCount; ++i) {
if (i % kRepeatCount == 0) {
EXPECT_NE(hashes[i-1], hashes[i]);
} else {
EXPECT_EQ(hashes[i-1], hashes[i]);
}
}
}
} // namespace test
} // namespace webrtc

91
test/frame_utils.cc Normal file
View File

@ -0,0 +1,91 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <stdio.h>
#include <string.h>
#include "webrtc/api/video/i420_buffer.h"
#include "webrtc/api/video/video_frame.h"
#include "webrtc/test/frame_utils.h"
namespace webrtc {
namespace test {
bool EqualPlane(const uint8_t* data1,
const uint8_t* data2,
int stride1,
int stride2,
int width,
int height) {
for (int y = 0; y < height; ++y) {
if (memcmp(data1, data2, width) != 0)
return false;
data1 += stride1;
data2 += stride2;
}
return true;
}
bool FramesEqual(const webrtc::VideoFrame& f1, const webrtc::VideoFrame& f2) {
if (f1.timestamp() != f2.timestamp() ||
f1.ntp_time_ms() != f2.ntp_time_ms() ||
f1.render_time_ms() != f2.render_time_ms()) {
return false;
}
return FrameBufsEqual(f1.video_frame_buffer(), f2.video_frame_buffer());
}
bool FrameBufsEqual(const rtc::scoped_refptr<webrtc::VideoFrameBuffer>& f1,
const rtc::scoped_refptr<webrtc::VideoFrameBuffer>& f2) {
if (f1 == f2) {
return true;
}
// Exlude nullptr (except if both are nullptr, as above)
if (!f1 || !f2) {
return false;
}
if (f1->width() != f2->width() || f1->height() != f2->height() ||
f1->type() != f2->type()) {
return false;
}
rtc::scoped_refptr<webrtc::I420BufferInterface> f1_i420 = f1->ToI420();
rtc::scoped_refptr<webrtc::I420BufferInterface> f2_i420 = f2->ToI420();
return EqualPlane(f1_i420->DataY(), f2_i420->DataY(),
f1_i420->StrideY(), f2_i420->StrideY(),
f1_i420->width(), f1_i420->height()) &&
EqualPlane(f1_i420->DataU(), f2_i420->DataU(),
f1_i420->StrideU(), f2_i420->StrideU(),
f1_i420->ChromaWidth(), f1_i420->ChromaHeight()) &&
EqualPlane(f1_i420->DataV(), f2_i420->DataV(),
f1_i420->StrideV(), f2_i420->StrideV(),
f1_i420->ChromaWidth(), f1_i420->ChromaHeight());
}
rtc::scoped_refptr<I420Buffer> ReadI420Buffer(int width, int height, FILE *f) {
int half_width = (width + 1) / 2;
rtc::scoped_refptr<I420Buffer> buffer(
// Explicit stride, no padding between rows.
I420Buffer::Create(width, height, width, half_width, half_width));
size_t size_y = static_cast<size_t>(width) * height;
size_t size_uv = static_cast<size_t>(half_width) * ((height + 1) / 2);
if (fread(buffer->MutableDataY(), 1, size_y, f) < size_y)
return nullptr;
if (fread(buffer->MutableDataU(), 1, size_uv, f) < size_uv)
return nullptr;
if (fread(buffer->MutableDataV(), 1, size_uv, f) < size_uv)
return nullptr;
return buffer;
}
} // namespace test
} // namespace webrtc

48
test/frame_utils.h Normal file
View File

@ -0,0 +1,48 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_TEST_FRAME_UTILS_H_
#define WEBRTC_TEST_FRAME_UTILS_H_
#include <stdint.h>
#include "webrtc/rtc_base/scoped_ref_ptr.h"
namespace webrtc {
class I420Buffer;
class VideoFrame;
class VideoFrameBuffer;
namespace test {
bool EqualPlane(const uint8_t* data1,
const uint8_t* data2,
int stride1,
int stride2,
int width,
int height);
static inline bool EqualPlane(const uint8_t* data1,
const uint8_t* data2,
int stride,
int width,
int height) {
return EqualPlane(data1, data2, stride, stride, width, height);
}
bool FramesEqual(const webrtc::VideoFrame& f1, const webrtc::VideoFrame& f2);
bool FrameBufsEqual(const rtc::scoped_refptr<webrtc::VideoFrameBuffer>& f1,
const rtc::scoped_refptr<webrtc::VideoFrameBuffer>& f2);
rtc::scoped_refptr<I420Buffer> ReadI420Buffer(int width, int height, FILE *);
} // namespace test
} // namespace webrtc
#endif // WEBRTC_TEST_FRAME_UTILS_H_

378
test/fuzzers/BUILD.gn Normal file
View File

@ -0,0 +1,378 @@
# Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import("../../webrtc.gni")
import("//build/config/features.gni")
import("//testing/libfuzzer/fuzzer_test.gni")
rtc_static_library("webrtc_fuzzer_main") {
sources = [
"webrtc_fuzzer_main.cc",
]
deps = [
"../../rtc_base:rtc_base_approved",
"../../system_wrappers:field_trial_default",
"../../system_wrappers:metrics_default",
"//testing/libfuzzer:libfuzzer_main",
]
}
template("webrtc_fuzzer_test") {
fuzzer_test(target_name) {
forward_variables_from(invoker, "*")
deps += [ ":webrtc_fuzzer_main" ]
if (!build_with_chromium && is_clang) {
# Suppress warnings from the Chromium Clang plugin (bugs.webrtc.org/163).
suppressed_configs = [ "//build/config/clang:find_bad_constructs" ]
}
}
}
webrtc_fuzzer_test("h264_depacketizer_fuzzer") {
sources = [
"h264_depacketizer_fuzzer.cc",
]
deps = [
"../../modules/rtp_rtcp",
]
}
webrtc_fuzzer_test("vp8_depacketizer_fuzzer") {
sources = [
"vp8_depacketizer_fuzzer.cc",
]
deps = [
"../../modules/rtp_rtcp",
]
}
webrtc_fuzzer_test("vp9_depacketizer_fuzzer") {
sources = [
"vp9_depacketizer_fuzzer.cc",
]
deps = [
"../../modules/rtp_rtcp",
]
}
webrtc_fuzzer_test("vp8_qp_parser_fuzzer") {
sources = [
"vp8_qp_parser_fuzzer.cc",
]
deps = [
"../../modules/video_coding:video_coding_utility",
"../../modules/video_coding/",
]
}
webrtc_fuzzer_test("vp9_qp_parser_fuzzer") {
sources = [
"vp9_qp_parser_fuzzer.cc",
]
deps = [
"../../modules/video_coding:video_coding_utility",
"../../modules/video_coding/",
]
}
webrtc_fuzzer_test("h264_bitstream_parser_fuzzer") {
sources = [
"h264_bitstream_parser_fuzzer.cc",
]
deps = [
"../../common_video",
"../../modules/video_coding/",
]
}
webrtc_fuzzer_test("flexfec_header_reader_fuzzer") {
sources = [
"flexfec_header_reader_fuzzer.cc",
]
deps = [
"../../modules/rtp_rtcp",
"../../rtc_base:rtc_base_approved",
]
}
webrtc_fuzzer_test("flexfec_sender_fuzzer") {
sources = [
"flexfec_sender_fuzzer.cc",
]
deps = [
"../../modules/rtp_rtcp",
"../../system_wrappers",
]
libfuzzer_options = [ "max_len=200" ]
}
webrtc_fuzzer_test("ulpfec_header_reader_fuzzer") {
sources = [
"ulpfec_header_reader_fuzzer.cc",
]
deps = [
"../../modules/rtp_rtcp",
"../../modules/rtp_rtcp:fec_test_helper",
"../../rtc_base:rtc_base_approved",
]
}
webrtc_fuzzer_test("ulpfec_generator_fuzzer") {
sources = [
"ulpfec_generator_fuzzer.cc",
]
deps = [
"../../modules/rtp_rtcp",
"../../modules/rtp_rtcp:fec_test_helper",
"../../rtc_base:rtc_base_approved",
]
}
webrtc_fuzzer_test("flexfec_receiver_fuzzer") {
sources = [
"flexfec_receiver_fuzzer.cc",
]
deps = [
"../../modules/rtp_rtcp",
"../../rtc_base:rtc_base_approved",
]
libfuzzer_options = [ "max_len=2000" ]
}
webrtc_fuzzer_test("packet_buffer_fuzzer") {
sources = [
"packet_buffer_fuzzer.cc",
]
deps = [
"../../modules/video_coding/",
"../../system_wrappers",
]
libfuzzer_options = [ "max_len=2000" ]
}
webrtc_fuzzer_test("rtcp_receiver_fuzzer") {
sources = [
"rtcp_receiver_fuzzer.cc",
]
deps = [
"../../modules/rtp_rtcp",
"../../rtc_base:rtc_base_approved",
"../../system_wrappers:system_wrappers",
]
seed_corpus = "corpora/rtcp-corpus"
}
webrtc_fuzzer_test("rtp_packet_fuzzer") {
sources = [
"rtp_packet_fuzzer.cc",
]
deps = [
"../../modules/rtp_rtcp",
]
seed_corpus = "corpora/rtp-corpus"
}
webrtc_fuzzer_test("rtp_header_fuzzer") {
sources = [
"rtp_header_fuzzer.cc",
]
deps = [
"../../modules/rtp_rtcp",
]
}
webrtc_fuzzer_test("congestion_controller_feedback_fuzzer") {
sources = [
"congestion_controller_feedback_fuzzer.cc",
]
deps = [
"../../logging:rtc_event_log_api",
"../../logging:rtc_event_log_impl",
"../../modules/congestion_controller",
"../../modules/pacing",
"../../modules/remote_bitrate_estimator:remote_bitrate_estimator",
"../../modules/rtp_rtcp",
]
}
rtc_static_library("audio_decoder_fuzzer") {
sources = [
"audio_decoder_fuzzer.cc",
"audio_decoder_fuzzer.h",
]
deps = [
"../..:webrtc_common",
"../../api:optional",
"../../api/audio_codecs:audio_codecs_api",
"../../modules/rtp_rtcp",
"../../rtc_base:rtc_base_approved",
]
}
webrtc_fuzzer_test("audio_decoder_ilbc_fuzzer") {
sources = [
"audio_decoder_ilbc_fuzzer.cc",
]
deps = [
":audio_decoder_fuzzer",
"../../modules/audio_coding:ilbc",
]
}
webrtc_fuzzer_test("audio_decoder_isac_fuzzer") {
sources = [
"audio_decoder_isac_fuzzer.cc",
]
deps = [
":audio_decoder_fuzzer",
"../../modules/audio_coding:isac",
]
}
webrtc_fuzzer_test("audio_decoder_isac_incoming_packet_fuzzer") {
sources = [
"audio_decoder_isac_incoming_packet_fuzzer.cc",
]
deps = [
":audio_decoder_fuzzer",
"../../modules/audio_coding:isac",
]
}
webrtc_fuzzer_test("audio_decoder_isacfix_fuzzer") {
sources = [
"audio_decoder_isacfix_fuzzer.cc",
]
deps = [
":audio_decoder_fuzzer",
"../../modules/audio_coding:isac_fix",
]
}
webrtc_fuzzer_test("audio_decoder_opus_fuzzer") {
sources = [
"audio_decoder_opus_fuzzer.cc",
]
deps = [
":audio_decoder_fuzzer",
"../../modules/audio_coding:webrtc_opus",
]
}
webrtc_fuzzer_test("audio_decoder_opus_redundant_fuzzer") {
sources = [
"audio_decoder_opus_redundant_fuzzer.cc",
]
deps = [
":audio_decoder_fuzzer",
"../../modules/audio_coding:webrtc_opus",
]
}
webrtc_fuzzer_test("turn_unwrap_fuzzer") {
sources = [
"turn_unwrap_fuzzer.cc",
]
deps = [
"../../media:media",
]
}
webrtc_fuzzer_test("neteq_rtp_fuzzer") {
sources = [
"neteq_rtp_fuzzer.cc",
]
deps = [
"../../api:array_view",
"../../modules/audio_coding:neteq",
"../../modules/audio_coding:neteq_test_tools",
"../../modules/audio_coding:neteq_tools_minimal",
"../../modules/audio_coding:pcm16b",
"../../modules/rtp_rtcp",
"../../rtc_base:rtc_base_approved",
"../../rtc_base:rtc_base_tests_utils",
]
}
webrtc_fuzzer_test("residual_echo_detector_fuzzer") {
sources = [
"residual_echo_detector_fuzzer.cc",
]
deps = [
"../../modules/audio_processing:audio_processing",
"../../rtc_base:rtc_base_approved",
]
}
webrtc_fuzzer_test("sdp_parser_fuzzer") {
sources = [
"sdp_parser_fuzzer.cc",
]
deps = [
"../../pc:libjingle_peerconnection",
]
seed_corpus = "corpora/sdp-corpus"
}
webrtc_fuzzer_test("stun_parser_fuzzer") {
sources = [
"stun_parser_fuzzer.cc",
]
deps = [
"../../p2p:rtc_p2p",
]
seed_corpus = "corpora/stun-corpus"
dict = "corpora/stun.tokens"
}
webrtc_fuzzer_test("stun_validator_fuzzer") {
sources = [
"stun_validator_fuzzer.cc",
]
deps = [
"../../p2p:rtc_p2p",
]
seed_corpus = "corpora/stun-corpus"
dict = "corpora/stun.tokens"
}
webrtc_fuzzer_test("pseudotcp_parser_fuzzer") {
sources = [
"pseudotcp_parser_fuzzer.cc",
]
deps = [
"../../p2p:rtc_p2p",
"../../rtc_base:rtc_base",
]
}
webrtc_fuzzer_test("transport_feedback_packet_loss_tracker_fuzzer") {
sources = [
"transport_feedback_packet_loss_tracker_fuzzer.cc",
]
deps = [
"../../api:array_view",
"../../modules/rtp_rtcp",
"../../rtc_base:rtc_base_approved",
"../../voice_engine",
]
}
webrtc_fuzzer_test("audio_processing_fuzzer") {
sources = [
"audio_processing_fuzzer.cc",
"audio_processing_fuzzer.h",
"audio_processing_fuzzer_configs.cc",
]
deps = [
"../../api:optional",
"../../modules:module_api",
"../../modules/audio_processing",
"../../rtc_base:rtc_base_approved",
]
}

3
test/fuzzers/DEPS Normal file
View File

@ -0,0 +1,3 @@
include_rules = [
"+webrtc",
]

2
test/fuzzers/OWNERS Normal file
View File

@ -0,0 +1,2 @@
pbos@webrtc.org
henrik.lundin@webrtc.org

View File

@ -0,0 +1,99 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/test/fuzzers/audio_decoder_fuzzer.h"
#include <limits>
#include "webrtc/api/audio_codecs/audio_decoder.h"
#include "webrtc/api/optional.h"
#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
#include "webrtc/rtc_base/checks.h"
namespace webrtc {
namespace {
template <typename T, unsigned int B = sizeof(T)>
bool ParseInt(const uint8_t** data, size_t* remaining_size, T* value) {
static_assert(std::numeric_limits<T>::is_integer, "Type must be an integer.");
static_assert(sizeof(T) <= sizeof(uint64_t),
"Cannot read wider than uint64_t.");
static_assert(B <= sizeof(T), "T must be at least B bytes wide.");
if (B > *remaining_size)
return false;
uint64_t val = ByteReader<uint64_t, B>::ReadBigEndian(*data);
*data += B;
*remaining_size -= B;
*value = static_cast<T>(val);
return true;
}
} // namespace
// This function reads two bytes from the beginning of |data|, interprets them
// as the first packet length, and reads this many bytes if available. The
// payload is inserted into the decoder, and the process continues until no more
// data is available. Either AudioDecoder::Decode or
// AudioDecoder::DecodeRedundant is used, depending on the value of
// |decode_type|.
void FuzzAudioDecoder(DecoderFunctionType decode_type,
const uint8_t* data,
size_t size,
AudioDecoder* decoder,
int sample_rate_hz,
size_t max_decoded_bytes,
int16_t* decoded) {
const uint8_t* data_ptr = data;
size_t remaining_size = size;
size_t packet_len;
while (ParseInt<size_t, 2>(&data_ptr, &remaining_size, &packet_len) &&
packet_len <= remaining_size) {
AudioDecoder::SpeechType speech_type;
switch (decode_type) {
case DecoderFunctionType::kNormalDecode:
decoder->Decode(data_ptr, packet_len, sample_rate_hz, max_decoded_bytes,
decoded, &speech_type);
break;
case DecoderFunctionType::kRedundantDecode:
decoder->DecodeRedundant(data_ptr, packet_len, sample_rate_hz,
max_decoded_bytes, decoded, &speech_type);
break;
}
data_ptr += packet_len;
remaining_size -= packet_len;
}
}
// This function is similar to FuzzAudioDecoder, but also reads fuzzed data into
// RTP header values. The fuzzed data and values are sent to the decoder's
// IncomingPacket method.
void FuzzAudioDecoderIncomingPacket(const uint8_t* data,
size_t size,
AudioDecoder* decoder) {
const uint8_t* data_ptr = data;
size_t remaining_size = size;
size_t packet_len;
while (ParseInt<size_t, 2>(&data_ptr, &remaining_size, &packet_len)) {
uint16_t rtp_sequence_number;
if (!ParseInt(&data_ptr, &remaining_size, &rtp_sequence_number))
break;
uint32_t rtp_timestamp;
if (!ParseInt(&data_ptr, &remaining_size, &rtp_timestamp))
break;
uint32_t arrival_timestamp;
if (!ParseInt(&data_ptr, &remaining_size, &arrival_timestamp))
break;
if (remaining_size < packet_len)
break;
decoder->IncomingPacket(data_ptr, packet_len, rtp_sequence_number,
rtp_timestamp, arrival_timestamp);
data_ptr += packet_len;
remaining_size -= packet_len;
}
}
} // namespace webrtc

View File

@ -0,0 +1,40 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_TEST_FUZZERS_AUDIO_DECODER_FUZZER_H_
#define WEBRTC_TEST_FUZZERS_AUDIO_DECODER_FUZZER_H_
#include <stddef.h>
#include "webrtc/typedefs.h"
namespace webrtc {
class AudioDecoder;
enum class DecoderFunctionType {
kNormalDecode,
kRedundantDecode,
};
void FuzzAudioDecoder(DecoderFunctionType decode_type,
const uint8_t* data,
size_t size,
AudioDecoder* decoder,
int sample_rate_hz,
size_t max_decoded_bytes,
int16_t* decoded);
void FuzzAudioDecoderIncomingPacket(const uint8_t* data,
size_t size,
AudioDecoder* decoder);
} // namespace webrtc
#endif // WEBRTC_TEST_FUZZERS_AUDIO_DECODER_FUZZER_H_

View File

@ -0,0 +1,23 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_coding/codecs/ilbc/audio_decoder_ilbc.h"
#include "webrtc/test/fuzzers/audio_decoder_fuzzer.h"
namespace webrtc {
void FuzzOneInput(const uint8_t* data, size_t size) {
AudioDecoderIlbcImpl dec;
static const int kSampleRateHz = 8000;
static const size_t kAllocatedOuputSizeSamples = kSampleRateHz / 10;
int16_t output[kAllocatedOuputSizeSamples];
FuzzAudioDecoder(DecoderFunctionType::kNormalDecode, data, size, &dec,
kSampleRateHz, sizeof(output), output);
}
} // namespace webrtc

View File

@ -0,0 +1,23 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_coding/codecs/isac/main/include/audio_decoder_isac.h"
#include "webrtc/test/fuzzers/audio_decoder_fuzzer.h"
namespace webrtc {
void FuzzOneInput(const uint8_t* data, size_t size) {
const int sample_rate_hz = size % 2 == 0 ? 16000 : 32000; // 16 or 32 kHz.
static const size_t kAllocatedOuputSizeSamples = 32000 / 10; // 100 ms.
int16_t output[kAllocatedOuputSizeSamples];
AudioDecoderIsacFloatImpl dec(sample_rate_hz);
FuzzAudioDecoder(DecoderFunctionType::kNormalDecode, data, size, &dec,
sample_rate_hz, sizeof(output), output);
}
} // namespace webrtc

View File

@ -0,0 +1,19 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_coding/codecs/isac/main/include/audio_decoder_isac.h"
#include "webrtc/test/fuzzers/audio_decoder_fuzzer.h"
namespace webrtc {
void FuzzOneInput(const uint8_t* data, size_t size) {
AudioDecoderIsacFloatImpl dec(16000);
FuzzAudioDecoderIncomingPacket(data, size, &dec);
}
} // namespace webrtc

View File

@ -0,0 +1,23 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_coding/codecs/isac/fix/include/audio_decoder_isacfix.h"
#include "webrtc/test/fuzzers/audio_decoder_fuzzer.h"
namespace webrtc {
void FuzzOneInput(const uint8_t* data, size_t size) {
static const int kSampleRateHz = 16000;
static const size_t kAllocatedOuputSizeSamples = 16000 / 10; // 100 ms.
int16_t output[kAllocatedOuputSizeSamples];
AudioDecoderIsacFixImpl dec(kSampleRateHz);
FuzzAudioDecoder(DecoderFunctionType::kNormalDecode, data, size, &dec,
kSampleRateHz, sizeof(output), output);
}
} // namespace webrtc

View File

@ -0,0 +1,24 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_coding/codecs/opus/audio_decoder_opus.h"
#include "webrtc/test/fuzzers/audio_decoder_fuzzer.h"
namespace webrtc {
void FuzzOneInput(const uint8_t* data, size_t size) {
const size_t channels = (size % 2) + 1; // 1 or 2 channels.
AudioDecoderOpusImpl dec(channels);
const int kSampleRateHz = 48000;
const size_t kAllocatedOuputSizeSamples = kSampleRateHz / 10; // 100 ms.
int16_t output[kAllocatedOuputSizeSamples];
FuzzAudioDecoder(DecoderFunctionType::kNormalDecode, data, size, &dec,
kSampleRateHz, sizeof(output), output);
}
} // namespace webrtc

View File

@ -0,0 +1,24 @@
/*
* Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_coding/codecs/opus/audio_decoder_opus.h"
#include "webrtc/test/fuzzers/audio_decoder_fuzzer.h"
namespace webrtc {
void FuzzOneInput(const uint8_t* data, size_t size) {
const size_t channels = (size % 2) + 1; // 1 or 2 channels.
AudioDecoderOpusImpl dec(channels);
const int kSampleRateHz = 48000;
const size_t kAllocatedOuputSizeSamples = kSampleRateHz / 10; // 100 ms.
int16_t output[kAllocatedOuputSizeSamples];
FuzzAudioDecoder(DecoderFunctionType::kRedundantDecode, data, size, &dec,
kSampleRateHz, sizeof(output), output);
}
} // namespace webrtc

View File

@ -0,0 +1,157 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/test/fuzzers/audio_processing_fuzzer.h"
#include <algorithm>
#include <array>
#include <cmath>
#include "webrtc/modules/audio_processing/include/audio_processing.h"
#include "webrtc/modules/include/module_common_types.h"
#include "webrtc/rtc_base/checks.h"
namespace webrtc {
namespace {
size_t ByteToNativeRate(uint8_t data) {
using Rate = AudioProcessing::NativeRate;
switch (data % 4) {
case 0:
// Breaks AEC3.
// return static_cast<size_t>(Rate::kSampleRate8kHz);
case 1:
return static_cast<size_t>(Rate::kSampleRate16kHz);
case 2:
return static_cast<size_t>(Rate::kSampleRate32kHz);
default:
return static_cast<size_t>(Rate::kSampleRate48kHz);
}
}
template <class T>
bool ParseSequence(size_t size,
const uint8_t** data,
size_t* remaining_size,
T* result_data) {
const size_t data_size_bytes = sizeof(T) * size;
if (data_size_bytes > *remaining_size) {
return false;
}
std::copy(*data, *data + data_size_bytes,
reinterpret_cast<uint8_t*>(result_data));
*data += data_size_bytes;
*remaining_size -= data_size_bytes;
return true;
}
void FuzzAudioProcessing(const uint8_t* data,
size_t size,
bool is_float,
AudioProcessing* apm) {
AudioFrame fixed_frame;
std::array<float, 480> float_frame{};
float* const first_channel = &float_frame[0];
while (size > 0) {
// Decide input/output rate for this iteration.
const auto input_rate_byte = ParseByte(&data, &size);
const auto output_rate_byte = ParseByte(&data, &size);
if (!input_rate_byte || !output_rate_byte) {
return;
}
const auto input_rate_hz = ByteToNativeRate(*input_rate_byte);
const auto output_rate_hz = ByteToNativeRate(*output_rate_byte);
const size_t samples_per_input_channel =
rtc::CheckedDivExact(input_rate_hz, 100ul);
fixed_frame.samples_per_channel_ = samples_per_input_channel;
fixed_frame.sample_rate_hz_ = input_rate_hz;
// Two channels breaks AEC3.
fixed_frame.num_channels_ = 1;
// Fill the arrays with audio samples from the data.
if (is_float) {
if (!ParseSequence(samples_per_input_channel, &data, &size,
&float_frame[0])) {
return;
}
} else if (!ParseSequence(samples_per_input_channel, &data, &size,
fixed_frame.mutable_data())) {
return;
}
// Filter obviously wrong values like inf/nan and values that will
// lead to inf/nan in calculations. 1e6 leads to DCHECKS failing.
for (auto& x : float_frame) {
if (!std::isnormal(x) || std::abs(x) > 1e5) {
x = 0;
}
}
// Make the APM call depending on capture/render mode and float /
// fix interface.
const auto is_capture = ParseBool(&data, &size);
if (!is_capture) {
return;
}
if (*is_capture) {
auto apm_return_code =
is_float ? (apm->ProcessStream(
&first_channel, StreamConfig(input_rate_hz, 1),
StreamConfig(output_rate_hz, 1), &first_channel))
: (apm->ProcessStream(&fixed_frame));
RTC_DCHECK_NE(apm_return_code, AudioProcessing::kBadDataLengthError);
} else {
auto apm_return_code =
is_float ? (apm->ProcessReverseStream(
&first_channel, StreamConfig(input_rate_hz, 1),
StreamConfig(output_rate_hz, 1), &first_channel))
: (apm->ProcessReverseStream(&fixed_frame));
RTC_DCHECK_NE(apm_return_code, AudioProcessing::kBadDataLengthError);
}
}
}
} // namespace
rtc::Optional<bool> ParseBool(const uint8_t** data, size_t* remaining_size) {
if (1 > *remaining_size) {
return rtc::Optional<bool>();
}
auto res = rtc::Optional<bool>((**data) % 2);
*data += 1;
*remaining_size -= 1;
return res;
}
rtc::Optional<uint8_t> ParseByte(const uint8_t** data, size_t* remaining_size) {
if (1 > *remaining_size) {
return rtc::Optional<uint8_t>();
}
auto res = rtc::Optional<uint8_t>((**data));
*data += 1;
*remaining_size -= 1;
return res;
}
void FuzzAudioProcessing(const uint8_t* data,
size_t size,
std::unique_ptr<AudioProcessing> apm) {
const auto is_float = ParseBool(&data, &size);
if (!is_float) {
return;
}
FuzzAudioProcessing(data, size, *is_float, apm.get());
}
} // namespace webrtc

View File

@ -0,0 +1,27 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_TEST_FUZZERS_AUDIO_PROCESSING_FUZZER_H_
#define WEBRTC_TEST_FUZZERS_AUDIO_PROCESSING_FUZZER_H_
#include <memory>
#include "webrtc/modules/audio_processing/include/audio_processing.h"
namespace webrtc {
rtc::Optional<bool> ParseBool(const uint8_t** data, size_t* remaining_size);
rtc::Optional<uint8_t> ParseByte(const uint8_t** data, size_t* remaining_size);
void FuzzAudioProcessing(const uint8_t* data,
size_t size,
std::unique_ptr<AudioProcessing> apm);
} // namespace webrtc
#endif // WEBRTC_TEST_FUZZERS_AUDIO_PROCESSING_FUZZER_H_

View File

@ -0,0 +1,70 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_processing/include/audio_processing.h"
#include "webrtc/test/fuzzers/audio_processing_fuzzer.h"
#include "webrtc/api/optional.h"
namespace webrtc {
std::unique_ptr<AudioProcessing> CreateAPM(const uint8_t** data,
size_t* remaining_size) {
// Parse boolean values for optionally enabling different
// configurable public components of APM.
auto exp_agc = ParseBool(data, remaining_size);
auto exp_ns = ParseBool(data, remaining_size);
auto bf = ParseBool(data, remaining_size);
auto ef = ParseBool(data, remaining_size);
auto raf = ParseBool(data, remaining_size);
auto da = ParseBool(data, remaining_size);
auto ie = ParseBool(data, remaining_size);
auto red = ParseBool(data, remaining_size);
auto lc = ParseBool(data, remaining_size);
auto hpf = ParseBool(data, remaining_size);
auto aec3 = ParseBool(data, remaining_size);
if (!(exp_agc && exp_ns && bf && ef && raf && da && ie && red && lc && hpf &&
aec3)) {
return nullptr;
}
// Components can be enabled through webrtc::Config and
// webrtc::AudioProcessingConfig.
Config config;
config.Set<ExperimentalAgc>(new ExperimentalAgc(*exp_agc));
config.Set<ExperimentalNs>(new ExperimentalNs(*exp_ns));
if (*bf) {
config.Set<Beamforming>(new Beamforming());
}
config.Set<ExtendedFilter>(new ExtendedFilter(*ef));
config.Set<RefinedAdaptiveFilter>(new RefinedAdaptiveFilter(*raf));
config.Set<DelayAgnostic>(new DelayAgnostic(*da));
config.Set<Intelligibility>(new Intelligibility(*ie));
std::unique_ptr<AudioProcessing> apm(AudioProcessing::Create(config));
webrtc::AudioProcessing::Config apm_config;
apm_config.residual_echo_detector.enabled = *red;
apm_config.level_controller.enabled = *lc;
apm_config.high_pass_filter.enabled = *hpf;
apm_config.echo_canceller3.enabled = *aec3;
apm->ApplyConfig(apm_config);
return apm;
}
void FuzzOneInput(const uint8_t* data, size_t size) {
auto apm = CreateAPM(&data, &size);
FuzzAudioProcessing(data, size, std::move(apm));
}
} // namespace webrtc

View File

@ -0,0 +1,48 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/congestion_controller/include/receive_side_congestion_controller.h"
#include "webrtc/modules/pacing/packet_router.h"
#include "webrtc/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h"
#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
namespace webrtc {
void FuzzOneInput(const uint8_t* data, size_t size) {
size_t i = 0;
if (size < sizeof(int64_t) + sizeof(uint8_t) + sizeof(uint32_t))
return;
SimulatedClock clock(data[i++]);
PacketRouter packet_router;
ReceiveSideCongestionController cc(&clock, &packet_router);
RemoteBitrateEstimator* rbe = cc.GetRemoteBitrateEstimator(true);
RTPHeader header;
header.ssrc = ByteReader<uint32_t>::ReadBigEndian(&data[i]);
i += sizeof(uint32_t);
header.extension.hasTransportSequenceNumber = true;
int64_t arrival_time_ms =
std::max<int64_t>(ByteReader<int64_t>::ReadBigEndian(&data[i]), 0);
i += sizeof(int64_t);
const size_t kMinPacketSize =
sizeof(size_t) + sizeof(uint16_t) + sizeof(uint8_t);
while (i + kMinPacketSize < size) {
size_t payload_size = ByteReader<size_t>::ReadBigEndian(&data[i]) % 1500;
i += sizeof(size_t);
header.extension.transportSequenceNumber =
ByteReader<uint16_t>::ReadBigEndian(&data[i]);
i += sizeof(uint16_t);
rbe->IncomingPacket(arrival_time_ms, payload_size, header);
clock.AdvanceTimeMilliseconds(5);
arrival_time_ms += ByteReader<uint8_t>::ReadBigEndian(&data[i]);
arrival_time_ms += sizeof(uint8_t);
}
rbe->Process();
}
} // namespace webrtc

View File

@ -0,0 +1,34 @@
This is a collection of corpora for various WebRTC fuzzers. To use
them, the gn targets define seed_corpus=$corpus_dir, which causes the
ClusterFuzz upload bot to bundle $corpus_dir and upload it.
The format is simple: one file per test case. Specific notes are
included below.
### SDP ###
This corpus was initially assembled manually from the following
sources:
- curl --silent https://www.ietf.org/rfc/rfc4317.txt | grep '^[ a-z]*=[^=]*$' | sed 's/^[[:space:]]*//' | awk -v RS='(^|\n)v=' '/./ {print "v="$0 > NR".sdp"}'
- all the SDPs used in the parser unit tests
- some manually gathered SDPs from Firefox and Opera
The SDP tokens come from:
- grep "^static const " webrtc/api/webrtcsdp.cc | cut -d'=' -f2 | cut -d ';' -f1 | tr -d '"' | tr -d "'" | tr -d ' ' | sort -u | grep -v '^(\n|\r|\r\n)$|^$' | sed -e 's/^/"/' -e 's/$/"/' | tail -n +2
### STUN ###
This corpus was initially assembled from the STUN unit tests, together
with a crash that it found relatively quickly.
### RT(C)P ###
This corpus was initially assembled from the unittests. RTCP was
minimised first.
There is also rt(c?)p-corpus-with-extra-byte, in which each sample is
prefixed by the byte 0xff. Some of the rtp fuzzers need to decide
which header extensions to enable, and the first byte of the fuzz data
is used for this.
### PseudoTCP ###
Very small corpus minimised from the unit tests.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1 @@
€

Some files were not shown because too many files have changed in this diff Show More