17 Commits

Author SHA1 Message Date
8dd0f2b606 Fix 2021-11-09 15:32:54 +08:00
54e2d07dde Update README.rst 2021-11-09 14:58:42 +08:00
b519dfc0bb 1. New GUI for face register with Tkinter, support set name when saving
faces;
2. `features_all.csv` modified to n x 129, 129D will be person_name + 128D features;

Signed-off-by: Zhengtian Xie <coneypo@gmail.com>
2021-11-09 14:47:44 +08:00
7dc1071df4 1. Update requirements.txt
2. Update the fps refresh period to 1s
2021-10-28 08:51:42 +08:00
8e8a0032c4 Update repo structure
1. Add performance counter for `face_descriptor_from_camera.py`
2. Rename `face_reco_from_camera_ot_single_person.py` to `face_reco_from_camera_single_face.py`, remove OT in it
3. Update readme
2021-08-16 10:25:30 +08:00
93bb154c8a Remove '.idea/' generated by pycharm
Signed-off-by: coneypo <coneypo@gmail.com>
2021-06-04 14:03:25 +08:00
05b78489a7 1. Using logging to set log level
2. Fix bug in OT with multi-people
3. Set 'reclassify_interval' to do re-classify for OT with multi-people

Signed-off-by: coneypo <coneypo@gmail.com>
2021-06-04 13:51:46 +08:00
8eaad06adc show chinese name in OT script 2021-01-25 11:25:40 +08:00
4e4553d5e9 Show chinese name with OT 2021-01-14 14:58:31 +08:00
e9008e3ad3 Add 're-classification feature for single-person' 2020-12-14 11:50:06 +08:00
0f5adfd5cf remove unused statement 2020-09-16 10:40:31 +08:00
8a4fb563cd add MIT license 2020-09-15 17:09:18 +08:00
2c1b6416af use OT to improve FPS 2020-09-03 15:34:26 +08:00
65c9ec0caf test 2020-08-19 23:19:21 +08:00
3313d91414 push from gitlab test 2020-07-03 13:56:05 +08:00
2b88597aee push from gitlab 2020-07-03 13:26:50 +08:00
e2698f7ae8 test 2020-07-03 11:48:58 +08:00
35 changed files with 1271 additions and 559 deletions

View File

@ -1,14 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<module type="PYTHON_MODULE" version="4">
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$">
<sourceFolder url="file://$MODULE_DIR$/data" isTestSource="false" />
</content>
<orderEntry type="jdk" jdkName="Python 3.7" jdkType="Python SDK" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
<component name="TestRunnerService">
<option name="projectConfiguration" value="pytest" />
<option name="PROJECT_TEST_RUNNER" value="py.test" />
</component>
</module>

4
.idea/encodings.xml generated
View File

@ -1,4 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="Encoding" addBOMForNewFiles="with NO BOM" />
</project>

View File

@ -1,20 +0,0 @@
<component name="InspectionProjectProfileManager">
<profile version="1.0">
<option name="myName" value="Project Default" />
<inspection_tool class="PyPep8Inspection" enabled="true" level="WEAK WARNING" enabled_by_default="true">
<option name="ignoredErrors">
<list>
<option value="E501" />
</list>
</option>
</inspection_tool>
<inspection_tool class="PyPep8NamingInspection" enabled="true" level="WEAK WARNING" enabled_by_default="true">
<option name="ignoredErrors">
<list>
<option value="N806" />
<option value="N802" />
</list>
</option>
</inspection_tool>
</profile>
</component>

4
.idea/misc.xml generated
View File

@ -1,4 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.7" project-jdk-type="Python SDK" />
</project>

8
.idea/modules.xml generated
View File

@ -1,8 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/Dlib_face_recognition_from_camera.iml" filepath="$PROJECT_DIR$/.idea/Dlib_face_recognition_from_camera.iml" />
</modules>
</component>
</project>

6
.idea/vcs.xml generated
View File

@ -1,6 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="VcsDirectoryMappings">
<mapping directory="$PROJECT_DIR$" vcs="Git" />
</component>
</project>

249
.idea/workspace.xml generated
View File

@ -1,249 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ChangeListManager">
<list default="true" id="e58b655a-3a9b-4001-b4da-39e07ab46629" name="Default Changelist" comment="">
<change beforePath="$PROJECT_DIR$/.idea/Dlib_face_recognition_from_camera.iml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/Dlib_face_recognition_from_camera.iml" afterDir="false" />
<change beforePath="$PROJECT_DIR$/.idea/misc.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/misc.xml" afterDir="false" />
<change beforePath="$PROJECT_DIR$/.idea/workspace.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/workspace.xml" afterDir="false" />
<change beforePath="$PROJECT_DIR$/face_reco_from_camera.py" beforeDir="false" afterPath="$PROJECT_DIR$/face_reco_from_camera.py" afterDir="false" />
<change beforePath="$PROJECT_DIR$/get_faces_from_camera.py" beforeDir="false" afterPath="$PROJECT_DIR$/get_faces_from_camera.py" afterDir="false" />
</list>
<option name="EXCLUDED_CONVERTED_TO_IGNORED" value="true" />
<option name="SHOW_DIALOG" value="false" />
<option name="HIGHLIGHT_CONFLICTS" value="true" />
<option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" />
<option name="LAST_RESOLUTION" value="IGNORE" />
</component>
<component name="FileTemplateManagerImpl">
<option name="RECENT_TEMPLATES">
<list>
<option value="Python Script" />
</list>
</option>
</component>
<component name="Git.Settings">
<option name="RECENT_GIT_ROOT_PATH" value="$PROJECT_DIR$" />
</component>
<component name="ProjectId" id="1Tq7xXTTl7R3HeMqxP7UMMKZMeC" />
<component name="ProjectLevelVcsManager" settingsEditedManually="true" />
<component name="ProjectViewState">
<option name="hideEmptyMiddlePackages" value="true" />
<option name="showExcludedFiles" value="true" />
<option name="showLibraryContents" value="true" />
</component>
<component name="PropertiesComponent">
<property name="SHARE_PROJECT_CONFIGURATION_FILES" value="true" />
<property name="last_opened_file_path" value="$PROJECT_DIR$" />
<property name="settings.editor.selected.configurable" value="com.jetbrains.python.configuration.PyActiveSdkModuleConfigurable" />
</component>
<component name="RunDashboard">
<option name="ruleStates">
<list>
<RuleState>
<option name="name" value="ConfigurationTypeDashboardGroupingRule" />
</RuleState>
<RuleState>
<option name="name" value="StatusDashboardGroupingRule" />
</RuleState>
</list>
</option>
</component>
<component name="RunManager" selected="Python.face_descriptor_from_camera">
<configuration name="face_descriptor_compute" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true">
<module name="Dlib_face_recognition_from_camera" />
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs>
<env name="PYTHONUNBUFFERED" value="1" />
</envs>
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" />
<option name="IS_MODULE_SDK" value="true" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/face_descriptor_compute.py" />
<option name="PARAMETERS" value="" />
<option name="SHOW_COMMAND_LINE" value="false" />
<option name="EMULATE_TERMINAL" value="false" />
<option name="MODULE_MODE" value="false" />
<option name="REDIRECT_INPUT" value="false" />
<option name="INPUT_FILE" value="" />
<method v="2" />
</configuration>
<configuration name="face_descriptor_from_camera" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true">
<module name="Dlib_face_recognition_from_camera" />
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs>
<env name="PYTHONUNBUFFERED" value="1" />
</envs>
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" />
<option name="IS_MODULE_SDK" value="true" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/face_descriptor_from_camera.py" />
<option name="PARAMETERS" value="" />
<option name="SHOW_COMMAND_LINE" value="false" />
<option name="EMULATE_TERMINAL" value="false" />
<option name="MODULE_MODE" value="false" />
<option name="REDIRECT_INPUT" value="false" />
<option name="INPUT_FILE" value="" />
<method v="2" />
</configuration>
<configuration name="face_reco_from_camera" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true">
<module name="Dlib_face_recognition_from_camera" />
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs>
<env name="PYTHONUNBUFFERED" value="1" />
</envs>
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" />
<option name="IS_MODULE_SDK" value="true" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/face_reco_from_camera.py" />
<option name="PARAMETERS" value="" />
<option name="SHOW_COMMAND_LINE" value="false" />
<option name="EMULATE_TERMINAL" value="false" />
<option name="MODULE_MODE" value="false" />
<option name="REDIRECT_INPUT" value="false" />
<option name="INPUT_FILE" value="" />
<method v="2" />
</configuration>
<configuration name="features_extraction_to_csv" type="PythonConfigurationType" factoryName="Python" temporary="true">
<module name="Dlib_face_recognition_from_camera" />
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs>
<env name="PYTHONUNBUFFERED" value="1" />
</envs>
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" />
<option name="IS_MODULE_SDK" value="true" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/features_extraction_to_csv.py" />
<option name="PARAMETERS" value="" />
<option name="SHOW_COMMAND_LINE" value="false" />
<option name="EMULATE_TERMINAL" value="false" />
<option name="MODULE_MODE" value="false" />
<option name="REDIRECT_INPUT" value="false" />
<option name="INPUT_FILE" value="" />
<method v="2" />
</configuration>
<configuration name="get_faces_from_camera" type="PythonConfigurationType" factoryName="Python" temporary="true">
<module name="Dlib_face_recognition_from_camera" />
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs>
<env name="PYTHONUNBUFFERED" value="1" />
</envs>
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" />
<option name="IS_MODULE_SDK" value="true" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/get_faces_from_camera.py" />
<option name="PARAMETERS" value="" />
<option name="SHOW_COMMAND_LINE" value="false" />
<option name="EMULATE_TERMINAL" value="false" />
<option name="MODULE_MODE" value="false" />
<option name="REDIRECT_INPUT" value="false" />
<option name="INPUT_FILE" value="" />
<method v="2" />
</configuration>
<list>
<item itemvalue="Python.face_descriptor_compute" />
<item itemvalue="Python.face_descriptor_from_camera" />
<item itemvalue="Python.face_reco_from_camera" />
<item itemvalue="Python.features_extraction_to_csv" />
<item itemvalue="Python.get_faces_from_camera" />
</list>
<recent_temporary>
<list>
<item itemvalue="Python.face_descriptor_from_camera" />
<item itemvalue="Python.face_reco_from_camera" />
<item itemvalue="Python.features_extraction_to_csv" />
<item itemvalue="Python.get_faces_from_camera" />
<item itemvalue="Python.face_descriptor_compute" />
</list>
</recent_temporary>
</component>
<component name="SvnConfiguration">
<configuration />
</component>
<component name="TaskManager">
<task active="true" id="Default" summary="Default task">
<changelist id="e58b655a-3a9b-4001-b4da-39e07ab46629" name="Default Changelist" comment="" />
<created>1538622047029</created>
<option name="number" value="Default" />
<option name="presentableId" value="Default" />
<updated>1538622047029</updated>
</task>
<servers />
</component>
<component name="Vcs.Log.Tabs.Properties">
<option name="TAB_STATES">
<map>
<entry key="MAIN">
<value>
<State>
<option name="COLUMN_ORDER" />
</State>
</value>
</entry>
</map>
</option>
</component>
<component name="WindowStateProjectService">
<state width="1897" height="194" key="GridCell.Tab.0.bottom" timestamp="1587297625581">
<screen x="0" y="27" width="1920" height="993" />
</state>
<state width="1897" height="194" key="GridCell.Tab.0.bottom/0.27.1920.993@0.27.1920.993" timestamp="1587297625581" />
<state width="1897" height="194" key="GridCell.Tab.0.center" timestamp="1587297625579">
<screen x="0" y="27" width="1920" height="993" />
</state>
<state width="1897" height="194" key="GridCell.Tab.0.center/0.27.1920.993@0.27.1920.993" timestamp="1587297625579" />
<state width="1897" height="194" key="GridCell.Tab.0.left" timestamp="1587297625578">
<screen x="0" y="27" width="1920" height="993" />
</state>
<state width="1897" height="194" key="GridCell.Tab.0.left/0.27.1920.993@0.27.1920.993" timestamp="1587297625578" />
<state width="1897" height="194" key="GridCell.Tab.0.right" timestamp="1587297625580">
<screen x="0" y="27" width="1920" height="993" />
</state>
<state width="1897" height="194" key="GridCell.Tab.0.right/0.27.1920.993@0.27.1920.993" timestamp="1587297625580" />
<state width="1485" height="299" key="GridCell.Tab.1.bottom" timestamp="1587263908422">
<screen x="0" y="27" width="1920" height="993" />
</state>
<state width="1485" height="299" key="GridCell.Tab.1.bottom/0.27.1920.993@0.27.1920.993" timestamp="1587263908422" />
<state width="1485" height="299" key="GridCell.Tab.1.center" timestamp="1587263908422">
<screen x="0" y="27" width="1920" height="993" />
</state>
<state width="1485" height="299" key="GridCell.Tab.1.center/0.27.1920.993@0.27.1920.993" timestamp="1587263908422" />
<state width="1485" height="299" key="GridCell.Tab.1.left" timestamp="1587263908422">
<screen x="0" y="27" width="1920" height="993" />
</state>
<state width="1485" height="299" key="GridCell.Tab.1.left/0.27.1920.993@0.27.1920.993" timestamp="1587263908422" />
<state width="1485" height="299" key="GridCell.Tab.1.right" timestamp="1587263908422">
<screen x="0" y="27" width="1920" height="993" />
</state>
<state width="1485" height="299" key="GridCell.Tab.1.right/0.27.1920.993@0.27.1920.993" timestamp="1587263908422" />
<state x="759" y="251" width="672" height="678" key="search.everywhere.popup" timestamp="1587264669499">
<screen x="0" y="27" width="1920" height="993" />
</state>
<state x="759" y="251" width="672" height="678" key="search.everywhere.popup/0.27.1920.993@0.27.1920.993" timestamp="1587264669499" />
</component>
<component name="XDebuggerManager">
<breakpoint-manager>
<default-breakpoints>
<breakpoint type="python-exception">
<properties notifyOnTerminate="true" exception="BaseException">
<option name="notifyOnTerminate" value="true" />
</properties>
</breakpoint>
</default-breakpoints>
</breakpoint-manager>
</component>
</project>

21
LICENSE Normal file
View File

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2018-2021 coneypo
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -6,45 +6,48 @@ Introduction
Detect and recognize single/multi-faces from camera;
调用摄像头进行人脸识别支持多张人脸同时识别;
调用摄像头进行人脸识别, 支持多张人脸同时识别;
#. Tkinter 人脸录入界面, 支持录入时设置姓名 / Face register GUI with Tkinter, support setting name when registering
#. 摄像头人脸录入 / Face register
.. image:: introduction/get_face_from_camera.png
.. image:: introduction/face_register_tkinter_GUI.png
:align: center
请不要离摄像头过近,人脸超出摄像头范围时会有 "OUT OF RANGE" 提醒 /
#. 简单的 OpenCV 摄像头人脸录入界面 / Simple face register GUI with OpenCV
.. image:: introduction/face_register.png
:align: center
请不要离摄像头过近, 人脸超出摄像头范围时会有 "OUT OF RANGE" 提醒 /
Please do not be too close to the camera, or you can't save faces with "OUT OF RANGE" warning;
.. image:: introduction/get_face_from_camera_out_of_range.png
.. image:: introduction/face_register_warning.png
:align: center
#. 提取特征建立人脸数据库 / Generate database from images captured
#. 提取特征建立人脸数据库 / Generate face database from images captured
#. 利用摄像头进行人脸识别 / Face recognizer
当单张人脸 / When single-face:
face_reco_from_camera.py, 对于每一帧都做检测识别 / Do detection and recognition for every frame:
.. image:: introduction/face_reco_single_person.png
.. image:: introduction/face_reco.png
:align: center
当多张人脸 / When multi-faces:
face_reco_from_camera_single_face.py, 对于人脸<=1, 只有新人脸出现才进行再识别来提高 FPS / Do re-reco only for new single face:
一张已录入人脸 + 未录入 unknown 人脸 / 1x known face + 2x unknown face:
.. image:: introduction/face_reco_multi_people.png
.. image:: introduction/face_reco_single.png
:align: center
同时识别多张已录入人脸 / Multi-faces recognition at the same time:
face_reco_from_camera_ot.py, 利用 OT 来实现再识别提高 FPS / Use OT to instead of re-reco for every frame to improve FPS:
.. image:: introduction/face_reco_two_people_in_database.png
.. image:: introduction/face_reco_ot.png
:align: center
实时人脸特征描述子计算 / Real-time face descriptor computation:
定制显示名字, 可以写中文 / Show chinese name:
.. image:: introduction/face_descriptor_single_person.png
.. image:: introduction/face_reco_chinese_name.png
:align: center
** 关于精度 / About accuracy:
* When using a distance threshold of ``0.6``, the dlib model obtains an accuracy of ``99.38%`` on the standard LFW face recognition benchmark.
@ -58,21 +61,26 @@ Detect and recognize single/multi-faces from camera;
Overview
********
此项目中人脸识别的实现流程 / The design of this repo:
此项目中人脸识别的实现流程 (no OT, 每一帧都进行检测+识别) / The design of this repo:
.. image:: introduction/overview.png
:align: center
实现流程(with OT, 初始帧进行检测+识别, 后续帧检测+质心跟踪) / The design of this repo:
.. image:: introduction/overview_with_ot.png
:align: center
如果利用 OT 来跟踪, 可以大大提高 FPS, 因为做识别时候需要提取特征描述子的耗时很多;
Steps
*****
#. 安装依赖库 / Install some python packages if needed
#. 安装依赖库 / Install some python packages needed
.. code-block:: bash
pip3 install opencv-python
pip3 install scikit-image
pip3 install dlib
pip install -r requirements.txt
#. 下载源码 / Download zip from website or via GitHub Desktop in windows, or git clone repo in Ubuntu
@ -80,7 +88,13 @@ Steps
git clone https://github.com/coneypo/Dlib_face_recognition_from_camera
#. 进行人脸信息采集录入 / Register faces
#. 进行人脸信息采集录入, Tkinter GUI / Register faces with Tkinter GUI
.. code-block:: bash
python3 get_faces_from_camera_tkinter.py
#. 进行人脸信息采集录入, OpenCV GUI / Register faces with OpenCV GUI
.. code-block:: bash
@ -98,6 +112,17 @@ Steps
python3 face_reco_from_camera.py
#. 对于人脸数<=1, 调用摄像头进行实时人脸识别 / Real-time face recognition (FPS improved)
.. code-block:: bash
python3 face_reco_from_camera_single_face.py
#. 利用 OT 算法, 调用摄像头进行实时人脸识别 / Real-time face recognition with OT (FPS improved)
.. code-block:: bash
python3 face_reco_from_camera_ot.py
About Source Code
*****************
@ -107,34 +132,28 @@ Repo 的 tree / 树状图:
::
.
├── get_faces_from_camera.py # Step1. Face register
├── features_extraction_to_csv.py # Step2. Feature extraction
├── face_reco_from_camera.py # Step3. Face recognizer
├── face_descriptor_from_camera.py # Face descriptor computation
├── how_to_use_camera.py # Use the default camera by opencv
├── get_faces_from_camera.py # Step 1. Face register GUI with OpenCV
├── get_faces_from_camera_tkinter.py # Step 1. Face register GUI with Tkinter
├── features_extraction_to_csv.py # Step 2. Feature extraction
├── face_reco_from_camera.py # Step 3. Face recognizer
├── face_reco_from_camera_single_face.py # Step 3. Face recognizer for single person
├── face_reco_from_camera_ot.py # Step 3. Face recognizer with OT
├── face_descriptor_from_camera.py # Face descriptor computation
├── how_to_use_camera.py # Use the default camera by opencv
├── data
│   ├── data_dlib # Dlib's model
│   ├── data_dlib # Dlib's model
│   │   ├── dlib_face_recognition_resnet_model_v1.dat
│   │   └── shape_predictor_68_face_landmarks.dat
│   ├── data_faces_from_camera # Face images captured from camera (will generate after step 1)
│   ├── data_faces_from_camera # Face images captured from camera (will generate after step 1)
│   │   ├── person_1
│   │   │   ├── img_face_1.jpg
│   │   │   └── img_face_2.jpg
│   │   └── person_2
│   │   └── img_face_1.jpg
│   │   └── img_face_2.jpg
│   └── features_all.csv # CSV to save all the features of known faces (will generate after step 2)
├── introduction # Some files for readme.rst
│   ├── Dlib_Face_recognition_by_coneypo.pptx
│   ├── face_reco_single_person_customize_name.png
│   ├── face_reco_single_person.png
│   ├── face_reco_two_people_in_database.png
│   ├── face_reco_two_people.png
│   ├── get_face_from_camera_out_of_range.png
│   ├── get_face_from_camera.png
│   └── overview.png
│   └── features_all.csv # CSV to save all the features of known faces (will generate after step 2)
├── README.rst
└── requirements.txt # Some python packages needed
└── requirements.txt # Some python packages needed
用到的 Dlib 相关模型函数:
@ -176,16 +195,16 @@ Python 源码介绍如下:
进行 Face register / 人脸信息采集录入
* 请注意存储人脸图片时矩形框不要超出摄像头范围要不然无法保存到本地;
* 请注意存储人脸图片时, 矩形框不要超出摄像头范围, 要不然无法保存到本地;
* 超出会有 "out of range" 的提醒;
#. features_extraction_to_csv.py:
从上一步存下来的图像文件中提取人脸数据存入CSV;
从上一步存下来的图像文件中, 提取人脸数据存入CSV;
* 会生成一个存储所有特征人脸数据的 "features_all.csv";
* size: n*128 , n means n people you registered and 128 means 128D features of the face
* size: n*129 , n means nx faces you registered and 129 means face name + 128D features of this face
#. face_reco_from_camera.py:
@ -196,6 +215,14 @@ Python 源码介绍如下:
* 将捕获到的人脸数据和之前存的人脸数据进行对比计算欧式距离, 由此判断是否是同一个人;
#. face_reco_from_camera_single_face.py:
针对于人脸数 <=1 的场景, 区别于 face_reco_from_camera.py (对每一帧都进行检测+识别), 只有人脸出现的时候进行识别;
#. face_reco_from_camera_ot.py:
只会对初始帧做检测+识别, 对后续帧做检测+质心跟踪;
#. (optional) face_descriptor_from_camera.py
调用摄像头进行实时特征描述子计算; / Real-time face descriptor computation;
@ -205,25 +232,27 @@ More
Tips:
#. 如果希望详细了解 dlib 的用法请参考 Dlib 官方 Python api 的网站 / You can refer to this link for more information of how to use dlib: http://dlib.net/python/index.html
#. 如果希望详细了解 dlib 的用法, 请参考 Dlib 官方 Python api 的网站 / You can refer to this link for more information of how to use dlib: http://dlib.net/python/index.html
#. Windows下建议不要把代码放到 ``C:\``, 可能会出现权限读取问题 / In windows, we will not recommend that running this repo in dir ``C:\``
#. Modify log level to ``logging.basicConfig(level=logging.DEBUG)`` to print info for every frame if needed (Default is ``logging.INFO``)
#. 代码最好不要有中文路径 / No chinese characters in your code directory
#. 人脸录入的时候先建文件夹再保存图片, 先 ``N````S`` / Press ``N`` before ``S``
#. 关于人脸识别卡顿 FPS 低问题, 不做 compare 的时候, 光跑 face_descriptor_from_camera.py 中 face_reco_model.compute_face_descriptor
在 CPU: i7-8700K FPS: 5~6, 所以主要提取特征时候耗资源
#. 关于 `face_reco_from_camera.py` 人脸识别卡顿 FPS 低问题, 原因是特征描述子提取很费时间, 光跑 face_descriptor_from_camera.py 中
face_reco_model.compute_face_descriptor 在我的机器上得到的平均 FPS 在 5 左右 (检测在 0.03s, 特征描述子提取在 0.158s, 和已知人脸进行遍历对比在 0.003s 左右),
所以主要提取特征时候耗资源, 可以用 OT 去做追踪 (使用`face_reco_from_camera_ot.py`), 而不是对每一帧都做检测+识别, 识别的性能从 20 FPS -> 200 FPS
可以访问我的博客获取本项目的更详细介绍如有问题可以邮件联系我 /
可以访问我的博客获取本项目的更详细介绍, 如有问题可以邮件联系我 /
For more details, please refer to my blog (in chinese) or mail to me :
* Blog: https://www.cnblogs.com/AdaminXie/p/9010298.html
* 关于 OT 部分的更新在 Blog: https://www.cnblogs.com/AdaminXie/p/13566269.html
* Mail: coneypo@foxmail.com ( Dlib 相关 repo 问题请联系 @foxmail 而不是 @intel )
* Feel free to creatE issue or PR for this repo :)
仅限于交流学习, 商业合作勿扰;
Thanks for your support.
Thanks for your support.

View File

@ -1,10 +1,12 @@
# 摄像头实时人脸特征描述子计算 / Real-time face descriptor compute
# Copyright (C) 2018-2021 coneypo
# SPDX-License-Identifier: MIT
# 摄像头实时人脸特征描述子计算 / Real-time face descriptor computing
import dlib # 人脸识别的库 Dlib
import cv2 # 图像处理的库 OpenCV
import time
# 1. Dlib 正向人脸检测器
detector = dlib.get_frontal_face_detector()
@ -20,6 +22,7 @@ class Face_Descriptor:
self.frame_time = 0
self.frame_start_time = 0
self.fps = 0
self.frame_cnt = 0
def update_fps(self):
now = time.time()
@ -37,20 +40,33 @@ class Face_Descriptor:
def process(self, stream):
while stream.isOpened():
flag, img_rd = stream.read()
self.frame_cnt+=1
k = cv2.waitKey(1)
print('- Frame ', self.frame_cnt, " starts:")
timestamp1 = time.time()
faces = detector(img_rd, 0)
timestamp2 = time.time()
print("--- Time used to `detector`: %s seconds ---" % (timestamp2 - timestamp1))
font = cv2.FONT_HERSHEY_SIMPLEX
# 检测到人脸
if len(faces) != 0:
for face in faces:
timestamp3 = time.time()
face_shape = predictor(img_rd, face)
timestamp4 = time.time()
print("--- Time used to `predictor`: %s seconds ---" % (timestamp4 - timestamp3))
timestamp5 = time.time()
face_desc = face_reco_model.compute_face_descriptor(img_rd, face_shape)
timestamp6 = time.time()
print("--- Time used to `compute_face_descriptor:` %s seconds ---" % (timestamp6 - timestamp5))
# 添加说明
cv2.putText(img_rd, "Face Descriptor", (20, 40), font, 1, (255, 255, 255), 1, cv2.LINE_AA)
cv2.putText(img_rd, "Face descriptor", (20, 40), font, 1, (255, 255, 255), 1, cv2.LINE_AA)
cv2.putText(img_rd, "FPS: " + str(self.fps.__round__(2)), (20, 100), font, 0.8, (0, 255, 0), 1, cv2.LINE_AA)
cv2.putText(img_rd, "Faces: " + str(len(faces)), (20, 140), font, 0.8, (0, 255, 0), 1, cv2.LINE_AA)
cv2.putText(img_rd, "S: Save current face", (20, 400), font, 0.8, (255, 255, 255), 1, cv2.LINE_AA)
@ -64,6 +80,7 @@ class Face_Descriptor:
cv2.namedWindow("camera", 1)
cv2.imshow("camera", img_rd)
print('\n')
def main():

View File

@ -1,77 +1,72 @@
# 摄像头实时人脸识别
# Real-time face recognition
# Copyright (C) 2018-2021 coneypo
# SPDX-License-Identifier: MIT
# Author: coneypo
# Blog: http://www.cnblogs.com/AdaminXie
# GitHub: https://github.com/coneypo/Dlib_face_recognition_from_camera
# Mail: coneypo@foxmail.com
# Created at 2018-05-11
# Updated at 2020-05-29
# 摄像头实时人脸识别 / Real-time face detection and recognition
import dlib # 人脸处理的库 Dlib
import numpy as np # 数据处理的库 Numpy
import cv2 # 图像处理的库 OpenCV
import pandas as pd # 数据处理的库 Pandas
import dlib
import numpy as np
import cv2
import pandas as pd
import os
import time
import logging
from PIL import Image, ImageDraw, ImageFont
# 1. Dlib 正向人脸检测器
# Dlib 正向人脸检测器 / Use frontal face detector of Dlib
detector = dlib.get_frontal_face_detector()
# 2. Dlib 人脸 landmark 特征点检测器
# Dlib 人脸 landmark 特征点检测器 / Get face landmarks
predictor = dlib.shape_predictor('data/data_dlib/shape_predictor_68_face_landmarks.dat')
# 3. Dlib Resnet 人脸识别模型,提取 128D 的特征矢量
# Dlib Resnet 人脸识别模型,提取 128D 的特征矢量 / Use Dlib resnet50 model to get 128D face descriptor
face_reco_model = dlib.face_recognition_model_v1("data/data_dlib/dlib_face_recognition_resnet_model_v1.dat")
class Face_Recognizer:
def __init__(self):
# 用来存放所有录入人脸特征的数组 / Save the features of faces in the database
self.features_known_list = []
self.face_feature_known_list = [] # 用来存放所有录入人脸特征的数组 / Save the features of faces in database
self.face_name_known_list = [] # 存储录入人脸名字 / Save the name of faces in database
# 存储录入人脸名字 / Save the name of faces known
self.name_known_cnt = 0
self.name_known_list = []
# 存储当前摄像头中捕获到的所有人脸的坐标名字 / Save the positions and names of current faces captured
self.pos_camera_list = []
self.name_camera_list = []
# 存储当前摄像头中捕获到的人脸数
self.faces_cnt = 0
# 存储当前摄像头中捕获到的人脸特征
self.features_camera_list = []
self.current_frame_face_cnt = 0 # 存储当前摄像头中捕获到的人脸数 / Counter for faces in current frame
self.current_frame_face_feature_list = [] # 存储当前摄像头中捕获到的人脸特征 / Features of faces in current frame
self.current_frame_face_name_list = [] # 存储当前摄像头中捕获到的所有人脸的名字 / Names of faces in current frame
self.current_frame_face_name_position_list = [] # 存储当前摄像头中捕获到的所有人脸的名字坐标 / Positions of faces in current frame
# Update FPS
self.fps = 0
self.fps = 0 # FPS of current frame
self.fps_show = 0 # FPS per second
self.frame_start_time = 0
self.frame_cnt = 0
self.start_time = time.time()
# 从 "features_all.csv" 读取录入人脸特征
self.font = cv2.FONT_ITALIC
self.font_chinese = ImageFont.truetype("simsun.ttc", 30)
# 从 "features_all.csv" 读取录入人脸特征 / Read known faces from "features_all.csv"
def get_face_database(self):
if os.path.exists("data/features_all.csv"):
path_features_known_csv = "data/features_all.csv"
csv_rd = pd.read_csv(path_features_known_csv, header=None)
# 2. 读取已知人脸数据 / Print known faces
for i in range(csv_rd.shape[0]):
features_someone_arr = []
for j in range(0, 128):
self.face_name_known_list.append(csv_rd.iloc[i][0])
for j in range(1, 129):
if csv_rd.iloc[i][j] == '':
features_someone_arr.append('0')
else:
features_someone_arr.append(csv_rd.iloc[i][j])
self.features_known_list.append(features_someone_arr)
self.name_known_list.append("Person_"+str(i+1))
self.name_known_cnt = len(self.name_known_list)
print("Faces in Database:", len(self.features_known_list))
self.face_feature_known_list.append(features_someone_arr)
logging.info("Faces in Database:%d", len(self.face_feature_known_list))
return 1
else:
print('##### Warning #####', '\n')
print("'features_all.csv' not found!")
print(
"Please run 'get_faces_from_camera.py' and 'features_extraction_to_csv.py' before 'face_reco_from_camera.py'",
'\n')
print('##### End Warning #####')
logging.warning("'features_all.csv' not found!")
logging.warning("Please run 'get_faces_from_camera.py' "
"and 'features_extraction_to_csv.py' before 'face_reco_from_camera.py'")
return 0
# 计算两个128D向量间的欧式距离 / Compute the e-distance between two 128D features
@ -85,43 +80,51 @@ class Face_Recognizer:
# 更新 FPS / Update FPS of Video stream
def update_fps(self):
now = time.time()
# 每秒刷新 fps / Refresh fps per second
if str(self.start_time).split(".")[0] != str(now).split(".")[0]:
self.fps_show = self.fps
self.start_time = now
self.frame_time = now - self.frame_start_time
self.fps = 1.0 / self.frame_time
self.frame_start_time = now
# 生成的 cv2 window 上面添加说明文字 / PutText on cv2 window
def draw_note(self, img_rd):
font = cv2.FONT_ITALIC
cv2.putText(img_rd, "Face Recognizer", (20, 40), font, 1, (255, 255, 255), 1, cv2.LINE_AA)
cv2.putText(img_rd, "FPS: " + str(self.fps.__round__(2)), (20, 100), font, 0.8, (0, 255, 0), 1, cv2.LINE_AA)
cv2.putText(img_rd, "Faces: " + str(self.faces_cnt), (20, 140), font, 0.8, (0, 255, 0), 1, cv2.LINE_AA)
cv2.putText(img_rd, "Q: Quit", (20, 450), font, 0.8, (255, 255, 255), 1, cv2.LINE_AA)
cv2.putText(img_rd, "Face Recognizer", (20, 40), self.font, 1, (255, 255, 255), 1, cv2.LINE_AA)
cv2.putText(img_rd, "Frame: " + str(self.frame_cnt), (20, 100), self.font, 0.8, (0, 255, 0), 1,
cv2.LINE_AA)
cv2.putText(img_rd, "FPS: " + str(self.fps_show.__round__(2)), (20, 130), self.font, 0.8, (0, 255, 0), 1,
cv2.LINE_AA)
cv2.putText(img_rd, "Faces: " + str(self.current_frame_face_cnt), (20, 160), self.font, 0.8, (0, 255, 0), 1,
cv2.LINE_AA)
cv2.putText(img_rd, "Q: Quit", (20, 450), self.font, 0.8, (255, 255, 255), 1, cv2.LINE_AA)
def draw_name(self, img_rd):
# 在人脸框下面写人脸名字 / Write names under rectangle
font = ImageFont.truetype("simsun.ttc", 30)
img = Image.fromarray(cv2.cvtColor(img_rd, cv2.COLOR_BGR2RGB))
draw = ImageDraw.Draw(img)
for i in range(self.faces_cnt):
# cv2.putText(img_rd, self.name_camera_list[i], self.pos_camera_list[i], font, 0.8, (0, 255, 255), 1, cv2.LINE_AA)
draw.text(xy=self.pos_camera_list[i], text=self.name_camera_list[i], font=font)
img_with_name = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
return img_with_name
for i in range(self.current_frame_face_cnt):
# cv2.putText(img_rd, self.current_frame_face_name_list[i], self.current_frame_face_name_position_list[i], self.font, 0.8, (0, 255, 255), 1, cv2.LINE_AA)
draw.text(xy=self.current_frame_face_name_position_list[i], text=self.current_frame_face_name_list[i], font=self.font_chinese,
fill=(255, 255, 0))
img_rd = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
return img_rd
# 修改显示人名
def modify_name_camera_list(self):
# 修改显示人名 / Show names in chinese
def show_chinese_name(self):
# Default known name: person_1, person_2, person_3
self.name_known_list[0] ='张三'.encode('utf-8').decode()
self.name_known_list[1] ='李四'.encode('utf-8').decode()
# self.name_known_list[2] ='xx'.encode('utf-8').decode()
# self.name_known_list[3] ='xx'.encode('utf-8').decode()
# self.name_known_list[4] ='xx'.encode('utf-8').decode()
if self.current_frame_face_cnt >= 1:
# 修改录入的人脸姓名 / Modify names in face_name_known_list to chinese name
self.face_name_known_list[0] = '张三'.encode('utf-8').decode()
# self.face_name_known_list[1] = '张四'.encode('utf-8').decode()
# 处理获取的视频流,进行人脸识别 / Input video stream and face reco process
# 处理获取的视频流,进行人脸识别 / Face detection and recognition from input video stream
def process(self, stream):
# 1. 读取存放所有人脸特征的 csv
# 1. 读取存放所有人脸特征的 csv / Read known faces from "features.all.csv"
if self.get_face_database():
while stream.isOpened():
self.frame_cnt += 1
logging.debug("Frame %d starts", self.frame_cnt)
flag, img_rd = stream.read()
faces = detector(img_rd, 0)
kk = cv2.waitKey(1)
@ -130,83 +133,81 @@ class Face_Recognizer:
break
else:
self.draw_note(img_rd)
self.features_camera_list = []
self.faces_cnt = 0
self.pos_camera_list = []
self.name_camera_list = []
self.current_frame_face_feature_list = []
self.current_frame_face_cnt = 0
self.current_frame_face_name_position_list = []
self.current_frame_face_name_list = []
# 2. 检测到人脸 / when face detected
# 2. 检测到人脸 / Face detected in current frame
if len(faces) != 0:
# 3. 获取当前捕获到的图像的所有人脸的特征,存储到 self.features_camera_list
# 3. Get the features captured and save into self.features_camera_list
# 3. 获取当前捕获到的图像的所有人脸的特征 / Compute the face descriptors for faces in current frame
for i in range(len(faces)):
shape = predictor(img_rd, faces[i])
self.features_camera_list.append(face_reco_model.compute_face_descriptor(img_rd, shape))
self.current_frame_face_feature_list.append(face_reco_model.compute_face_descriptor(img_rd, shape))
# 4. 遍历捕获到的图像中所有的人脸 / Traversal all the faces in the database
for k in range(len(faces)):
print("##### camera person", k + 1, "#####")
# 让人名跟随在矩形框的下方
# 确定人名的位置坐标
# 先默认所有人不认识,是 unknown
# Set the default names of faces with "unknown"
self.name_camera_list.append("unknown")
logging.debug("For face %d in camera:", k+1)
# 先默认所有人不认识,是 unknown / Set the default names of faces with "unknown"
self.current_frame_face_name_list.append("unknown")
# 每个捕获人脸的名字坐标 / Positions of faces captured
self.pos_camera_list.append(tuple(
self.current_frame_face_name_position_list.append(tuple(
[faces[k].left(), int(faces[k].bottom() + (faces[k].bottom() - faces[k].top()) / 4)]))
# 5. 对于某张人脸,遍历所有存储的人脸特征
# For every faces detected, compare the faces in the database
e_distance_list = []
for i in range(len(self.features_known_list)):
current_frame_e_distance_list = []
for i in range(len(self.face_feature_known_list)):
# 如果 person_X 数据不为空
if str(self.features_known_list[i][0]) != '0.0':
print("with person", str(i + 1), "the e distance: ", end='')
e_distance_tmp = self.return_euclidean_distance(self.features_camera_list[k],
self.features_known_list[i])
print(e_distance_tmp)
e_distance_list.append(e_distance_tmp)
if str(self.face_feature_known_list[i][0]) != '0.0':
e_distance_tmp = self.return_euclidean_distance(self.current_frame_face_feature_list[k],
self.face_feature_known_list[i])
logging.debug(" With person %s, the e-distance is %f", str(i + 1), e_distance_tmp)
current_frame_e_distance_list.append(e_distance_tmp)
else:
# 空数据 person_X
e_distance_list.append(999999999)
# 6. 寻找出最小的欧式距离匹配 / Find the one with minimum e distance
similar_person_num = e_distance_list.index(min(e_distance_list))
print("Minimum e distance with person", self.name_known_list[similar_person_num])
current_frame_e_distance_list.append(999999999)
# 6. 寻找出最小的欧式距离匹配 / Find the one with minimum e-distance
similar_person_num = current_frame_e_distance_list.index(min(current_frame_e_distance_list))
logging.debug("Minimum e-distance with %s: %f", self.face_name_known_list[similar_person_num], min(current_frame_e_distance_list))
if min(e_distance_list) < 0.4:
self.name_camera_list[k] = self.name_known_list[similar_person_num]
print("May be person " + str(self.name_known_list[similar_person_num]))
if min(current_frame_e_distance_list) < 0.4:
self.current_frame_face_name_list[k] = self.face_name_known_list[similar_person_num]
logging.debug("Face recognition result: %s", self.face_name_known_list[similar_person_num])
else:
print("Unknown person")
logging.debug("Face recognition result: Unknown person")
logging.debug("\n")
# 矩形框 / Draw rectangle
for kk, d in enumerate(faces):
# 绘制矩形框
cv2.rectangle(img_rd, tuple([d.left(), d.top()]), tuple([d.right(), d.bottom()]),
(0, 255, 255), 2)
print('\n')
(255, 255, 255), 2)
self.current_frame_face_cnt = len(faces)
self.faces_cnt = len(faces)
# 7. 在这里更改显示的人名 / Modify name if needed
self.modify_name_camera_list()
# self.show_chinese_name()
# 8. 写名字 / Draw name
# self.draw_name(img_rd)
img_with_name = self.draw_name(img_rd)
else:
img_with_name = img_rd
print("Faces in camera now:", self.name_camera_list, "\n")
logging.debug("Faces in camera now: %s", self.current_frame_face_name_list)
cv2.imshow("camera", img_with_name)
# 9. 更新 FPS / Update stream FPS
self.update_fps()
logging.debug("Frame ends\n\n")
# OpenCV 调用摄像头并进行 process
def run(self):
cap = cv2.VideoCapture(0)
cap.set(3, 480)
# cap = cv2.VideoCapture("video.mp4") # Get video stream from video file
cap = cv2.VideoCapture("0") # Get video stream from camera
cap.set(3, 480) # 640x480
self.process(cap)
cap.release()
@ -214,9 +215,11 @@ class Face_Recognizer:
def main():
# logging.basicConfig(level=logging.DEBUG) # Set log level to 'logging.DEBUG' to print debug info of every frame
logging.basicConfig(level=logging.INFO)
Face_Recognizer_con = Face_Recognizer()
Face_Recognizer_con.run()
if __name__ == '__main__':
main()
main()

306
face_reco_from_camera_ot.py Normal file
View File

@ -0,0 +1,306 @@
# Copyright (C) 2018-2021 coneypo
# SPDX-License-Identifier: MIT
# Author: coneypo
# Blog: http://www.cnblogs.com/AdaminXie
# GitHub: https://github.com/coneypo/Dlib_face_recognition_from_camera
# Mail: coneypo@foxmail.com
# 利用 OT 人脸追踪, 进行人脸实时识别 / Real-time face detection and recognition via OT for multi faces
# 检测 -> 识别人脸, 新人脸出现 -> 不需要识别, 而是利用质心追踪来判断识别结果 / Do detection -> recognize face, new face -> not do re-recognition
# 人脸进行再识别需要花费大量时间, 这里用 OT 做跟踪 / Do re-recognition for multi faces will cost much time, OT will be used to instead it
import dlib
import numpy as np
import cv2
import os
import pandas as pd
import time
import logging
# Dlib 正向人脸检测器 / Use frontal face detector of Dlib
detector = dlib.get_frontal_face_detector()
# Dlib 人脸 landmark 特征点检测器 / Get face landmarks
predictor = dlib.shape_predictor('data/data_dlib/shape_predictor_68_face_landmarks.dat')
# Dlib Resnet 人脸识别模型, 提取 128D 的特征矢量 / Use Dlib resnet50 model to get 128D face descriptor
face_reco_model = dlib.face_recognition_model_v1("data/data_dlib/dlib_face_recognition_resnet_model_v1.dat")
class Face_Recognizer:
def __init__(self):
self.font = cv2.FONT_ITALIC
# FPS
self.frame_time = 0
self.frame_start_time = 0
self.fps = 0
self.fps_show = 0
self.start_time = time.time()
# cnt for frame
self.frame_cnt = 0
# 用来存放所有录入人脸特征的数组 / Save the features of faces in the database
self.face_features_known_list = []
# 存储录入人脸名字 / Save the name of faces in the database
self.face_name_known_list = []
# 用来存储上一帧和当前帧 ROI 的质心坐标 / List to save centroid positions of ROI in frame N-1 and N
self.last_frame_face_centroid_list = []
self.current_frame_face_centroid_list = []
# 用来存储上一帧和当前帧检测出目标的名字 / List to save names of objects in frame N-1 and N
self.last_frame_face_name_list = []
self.current_frame_face_name_list = []
# 上一帧和当前帧中人脸数的计数器 / cnt for faces in frame N-1 and N
self.last_frame_face_cnt = 0
self.current_frame_face_cnt = 0
# 用来存放进行识别时候对比的欧氏距离 / Save the e-distance for faceX when recognizing
self.current_frame_face_X_e_distance_list = []
# 存储当前摄像头中捕获到的所有人脸的坐标名字 / Save the positions and names of current faces captured
self.current_frame_face_position_list = []
# 存储当前摄像头中捕获到的人脸特征 / Save the features of people in current frame
self.current_frame_face_feature_list = []
# e distance between centroid of ROI in last and current frame
self.last_current_frame_centroid_e_distance = 0
# 控制再识别的后续帧数 / Reclassify after 'reclassify_interval' frames
# 如果识别出 "unknown" 的脸, 将在 reclassify_interval_cnt 计数到 reclassify_interval 后, 对于人脸进行重新识别
self.reclassify_interval_cnt = 0
self.reclassify_interval = 10
# 从 "features_all.csv" 读取录入人脸特征 / Get known faces from "features_all.csv"
def get_face_database(self):
if os.path.exists("data/features_all.csv"):
path_features_known_csv = "data/features_all.csv"
csv_rd = pd.read_csv(path_features_known_csv, header=None)
for i in range(csv_rd.shape[0]):
features_someone_arr = []
self.face_name_known_list.append(csv_rd.iloc[i][0])
for j in range(1, 129):
if csv_rd.iloc[i][j] == '':
features_someone_arr.append('0')
else:
features_someone_arr.append(csv_rd.iloc[i][j])
self.face_features_known_list.append(features_someone_arr)
logging.info("Faces in Database: %d", len(self.face_features_known_list))
return 1
else:
logging.warning("'features_all.csv' not found!")
logging.warning("Please run 'get_faces_from_camera.py' "
"and 'features_extraction_to_csv.py' before 'face_reco_from_camera.py'")
return 0
def update_fps(self):
now = time.time()
# 每秒刷新 fps / Refresh fps per second
if str(self.start_time).split(".")[0] != str(now).split(".")[0]:
self.fps_show = self.fps
self.start_time = now
self.frame_time = now - self.frame_start_time
self.fps = 1.0 / self.frame_time
self.frame_start_time = now
@staticmethod
# 计算两个128D向量间的欧式距离 / Compute the e-distance between two 128D features
def return_euclidean_distance(feature_1, feature_2):
feature_1 = np.array(feature_1)
feature_2 = np.array(feature_2)
dist = np.sqrt(np.sum(np.square(feature_1 - feature_2)))
return dist
# 使用质心追踪来识别人脸 / Use centroid tracker to link face_x in current frame with person_x in last frame
def centroid_tracker(self):
for i in range(len(self.current_frame_face_centroid_list)):
e_distance_current_frame_person_x_list = []
# 对于当前帧中的人脸1, 和上一帧中的 人脸1/2/3/4/.. 进行欧氏距离计算 / For object 1 in current_frame, compute e-distance with object 1/2/3/4/... in last frame
for j in range(len(self.last_frame_face_centroid_list)):
self.last_current_frame_centroid_e_distance = self.return_euclidean_distance(
self.current_frame_face_centroid_list[i], self.last_frame_face_centroid_list[j])
e_distance_current_frame_person_x_list.append(
self.last_current_frame_centroid_e_distance)
last_frame_num = e_distance_current_frame_person_x_list.index(
min(e_distance_current_frame_person_x_list))
self.current_frame_face_name_list[i] = self.last_frame_face_name_list[last_frame_num]
# 生成的 cv2 window 上面添加说明文字 / putText on cv2 window
def draw_note(self, img_rd):
# 添加说明 / Add some info on windows
cv2.putText(img_rd, "Face Recognizer with OT", (20, 40), self.font, 1, (255, 255, 255), 1, cv2.LINE_AA)
cv2.putText(img_rd, "Frame: " + str(self.frame_cnt), (20, 100), self.font, 0.8, (0, 255, 0), 1,
cv2.LINE_AA)
cv2.putText(img_rd, "FPS: " + str(self.fps.__round__(2)), (20, 130), self.font, 0.8, (0, 255, 0), 1,
cv2.LINE_AA)
cv2.putText(img_rd, "Faces: " + str(self.current_frame_face_cnt), (20, 160), self.font, 0.8, (0, 255, 0), 1,
cv2.LINE_AA)
cv2.putText(img_rd, "Q: Quit", (20, 450), self.font, 0.8, (255, 255, 255), 1, cv2.LINE_AA)
for i in range(len(self.current_frame_face_name_list)):
img_rd = cv2.putText(img_rd, "Face_" + str(i + 1), tuple(
[int(self.current_frame_face_centroid_list[i][0]), int(self.current_frame_face_centroid_list[i][1])]),
self.font,
0.8, (255, 190, 0),
1,
cv2.LINE_AA)
# 处理获取的视频流, 进行人脸识别 / Face detection and recognition wit OT from input video stream
def process(self, stream):
# 1. 读取存放所有人脸特征的 csv / Get faces known from "features.all.csv"
if self.get_face_database():
while stream.isOpened():
self.frame_cnt += 1
logging.debug("Frame " + str(self.frame_cnt) + " starts")
flag, img_rd = stream.read()
kk = cv2.waitKey(1)
# 2. 检测人脸 / Detect faces for frame X
faces = detector(img_rd, 0)
# 3. 更新人脸计数器 / Update cnt for faces in frames
self.last_frame_face_cnt = self.current_frame_face_cnt
self.current_frame_face_cnt = len(faces)
# 4. 更新上一帧中的人脸列表 / Update the face name list in last frame
self.last_frame_face_name_list = self.current_frame_face_name_list[:]
# 5. 更新上一帧和当前帧的质心列表 / update frame centroid list
self.last_frame_face_centroid_list = self.current_frame_face_centroid_list
self.current_frame_face_centroid_list = []
# 6.1 如果当前帧和上一帧人脸数没有变化 / if cnt not changes
if (self.current_frame_face_cnt == self.last_frame_face_cnt) and (
self.reclassify_interval_cnt != self.reclassify_interval):
logging.debug("scene 1: 当前帧和上一帧相比没有发生人脸数变化 / No face cnt changes in this frame!!!")
self.current_frame_face_position_list = []
if "unknown" in self.current_frame_face_name_list:
logging.debug(" 有未知人脸, 开始进行 reclassify_interval_cnt 计数")
self.reclassify_interval_cnt += 1
if self.current_frame_face_cnt != 0:
for k, d in enumerate(faces):
self.current_frame_face_position_list.append(tuple(
[faces[k].left(), int(faces[k].bottom() + (faces[k].bottom() - faces[k].top()) / 4)]))
self.current_frame_face_centroid_list.append(
[int(faces[k].left() + faces[k].right()) / 2,
int(faces[k].top() + faces[k].bottom()) / 2])
img_rd = cv2.rectangle(img_rd,
tuple([d.left(), d.top()]),
tuple([d.right(), d.bottom()]),
(255, 255, 255), 2)
# 如果当前帧中有多个人脸, 使用质心追踪 / Multi-faces in current frame, use centroid-tracker to track
if self.current_frame_face_cnt != 1:
self.centroid_tracker()
for i in range(self.current_frame_face_cnt):
# 6.2 Write names under ROI
img_rd = cv2.putText(img_rd, self.current_frame_face_name_list[i],
self.current_frame_face_position_list[i], self.font, 0.8, (0, 255, 255), 1,
cv2.LINE_AA)
self.draw_note(img_rd)
# 6.2 如果当前帧和上一帧人脸数发生变化 / If cnt of faces changes, 0->1 or 1->0 or ...
else:
logging.debug("scene 2: 当前帧和上一帧相比人脸数发生变化 / Faces cnt changes in this frame")
self.current_frame_face_position_list = []
self.current_frame_face_X_e_distance_list = []
self.current_frame_face_feature_list = []
self.reclassify_interval_cnt = 0
# 6.2.1 人脸数减少 / Face cnt decreases: 1->0, 2->1, ...
if self.current_frame_face_cnt == 0:
logging.debug(" scene 2.1 人脸消失, 当前帧中没有人脸 / No faces in this frame!!!")
# clear list of names and features
self.current_frame_face_name_list = []
# 6.2.2 人脸数增加 / Face cnt increase: 0->1, 0->2, ..., 1->2, ...
else:
logging.debug(" scene 2.2 出现人脸, 进行人脸识别 / Get faces in this frame and do face recognition")
self.current_frame_face_name_list = []
for i in range(len(faces)):
shape = predictor(img_rd, faces[i])
self.current_frame_face_feature_list.append(
face_reco_model.compute_face_descriptor(img_rd, shape))
self.current_frame_face_name_list.append("unknown")
# 6.2.2.1 遍历捕获到的图像中所有的人脸 / Traversal all the faces in the database
for k in range(len(faces)):
logging.debug(" For face %d in current frame:", k + 1)
self.current_frame_face_centroid_list.append(
[int(faces[k].left() + faces[k].right()) / 2,
int(faces[k].top() + faces[k].bottom()) / 2])
self.current_frame_face_X_e_distance_list = []
# 6.2.2.2 每个捕获人脸的名字坐标 / Positions of faces captured
self.current_frame_face_position_list.append(tuple(
[faces[k].left(), int(faces[k].bottom() + (faces[k].bottom() - faces[k].top()) / 4)]))
# 6.2.2.3 对于某张人脸, 遍历所有存储的人脸特征
# For every faces detected, compare the faces in the database
for i in range(len(self.face_features_known_list)):
# 如果 q 数据不为空
if str(self.face_features_known_list[i][0]) != '0.0':
e_distance_tmp = self.return_euclidean_distance(
self.current_frame_face_feature_list[k],
self.face_features_known_list[i])
logging.debug(" with person %d, the e-distance: %f", i + 1, e_distance_tmp)
self.current_frame_face_X_e_distance_list.append(e_distance_tmp)
else:
# 空数据 person_X
self.current_frame_face_X_e_distance_list.append(999999999)
# 6.2.2.4 寻找出最小的欧式距离匹配 / Find the one with minimum e distance
similar_person_num = self.current_frame_face_X_e_distance_list.index(
min(self.current_frame_face_X_e_distance_list))
if min(self.current_frame_face_X_e_distance_list) < 0.4:
self.current_frame_face_name_list[k] = self.face_name_known_list[similar_person_num]
logging.debug(" Face recognition result: %s",
self.face_name_known_list[similar_person_num])
else:
logging.debug(" Face recognition result: Unknown person")
# 7. 生成的窗口添加说明文字 / Add note on cv2 window
self.draw_note(img_rd)
# cv2.imwrite("debug/debug_" + str(self.frame_cnt) + ".png", img_rd) # Dump current frame image if needed
# 8. 按下 'q' 键退出 / Press 'q' to exit
if kk == ord('q'):
break
self.update_fps()
cv2.namedWindow("camera", 1)
cv2.imshow("camera", img_rd)
logging.debug("Frame ends\n\n")
def run(self):
# cap = cv2.VideoCapture("video.mp4") # Get video stream from video file
cap = cv2.VideoCapture(0) # Get video stream from camera
self.process(cap)
cap.release()
cv2.destroyAllWindows()
def main():
# logging.basicConfig(level=logging.DEBUG) # Set log level to 'logging.DEBUG' to print debug info of every frame
logging.basicConfig(level=logging.INFO)
Face_Recognizer_con = Face_Recognizer()
Face_Recognizer_con.run()
if __name__ == '__main__':
main()

View File

@ -0,0 +1,329 @@
# Copyright (C) 2018-2021 coneypo
# SPDX-License-Identifier: MIT
# Author: coneypo
# Blog: http://www.cnblogs.com/AdaminXie
# GitHub: https://github.com/coneypo/Dlib_face_recognition_from_camera
# Mail: coneypo@foxmail.com
# 单张人脸实时识别 / Real-time face detection and recognition for single face
# 检测 -> 识别人脸, 新人脸出现 -> 再识别, 不会对于每一帧都进行识别 / Do detection -> recognize face, new face -> do re-recognition
# 其实对于单张人脸, 不需要 OT 进行跟踪, 对于新出现的人脸, 再识别一次就好了 / No OT here, OT will be used only for multi faces
import dlib
import numpy as np
import cv2
import os
import pandas as pd
import time
from PIL import Image, ImageDraw, ImageFont
import logging
# Dlib 正向人脸检测器 / Use frontal face detector of Dlib
detector = dlib.get_frontal_face_detector()
# Dlib 人脸 landmark 特征点检测器 / Get face landmarks
predictor = dlib.shape_predictor('data/data_dlib/shape_predictor_68_face_landmarks.dat')
# Dlib Resnet 人脸识别模型, 提取 128D 的特征矢量 / Use Dlib resnet50 model to get 128D face descriptor
face_reco_model = dlib.face_recognition_model_v1("data/data_dlib/dlib_face_recognition_resnet_model_v1.dat")
class Face_Recognizer:
def __init__(self):
self.font = cv2.FONT_ITALIC
self.font_chinese = ImageFont.truetype("simsun.ttc", 30)
# 统计 FPS / For FPS
self.frame_time = 0
self.frame_start_time = 0
self.fps = 0
self.fps_show = 0
self.start_time = time.time()
# 统计帧数 / cnt for frame
self.frame_cnt = 0
# 用来存储所有录入人脸特征的数组 / Save the features of faces in the database
self.features_known_list = []
# 用来存储录入人脸名字 / Save the name of faces in the database
self.face_name_known_list = []
# 用来存储上一帧和当前帧 ROI 的质心坐标 / List to save centroid positions of ROI in frame N-1 and N
self.last_frame_centroid_list = []
self.current_frame_centroid_list = []
# 用来存储当前帧检测出目标的名字 / List to save names of objects in current frame
self.current_frame_name_list = []
# 上一帧和当前帧中人脸数的计数器 / cnt for faces in frame N-1 and N
self.last_frame_faces_cnt = 0
self.current_frame_face_cnt = 0
# 用来存放进行识别时候对比的欧氏距离 / Save the e-distance for faceX when recognizing
self.current_frame_face_X_e_distance_list = []
# 存储当前摄像头中捕获到的所有人脸的坐标名字 / Save the positions and names of current faces captured
self.current_frame_face_position_list = []
# 存储当前摄像头中捕获到的人脸特征 / Save the features of people in current frame
self.current_frame_face_feature_list = []
# 控制再识别的后续帧数 / Reclassify after 'reclassify_interval' frames
# 如果识别出 "unknown" 的脸, 将在 reclassify_interval_cnt 计数到 reclassify_interval 后, 对于人脸进行重新识别
self.reclassify_interval_cnt = 0
self.reclassify_interval = 10
# 从 "features_all.csv" 读取录入人脸特征 / Get known faces from "features_all.csv"
def get_face_database(self):
if os.path.exists("data/features_all.csv"):
path_features_known_csv = "data/features_all.csv"
csv_rd = pd.read_csv(path_features_known_csv, header=None)
for i in range(csv_rd.shape[0]):
features_someone_arr = []
self.face_name_known_list.append(csv_rd.iloc[i][0])
for j in range(1, 129):
if csv_rd.iloc[i][j] == '':
features_someone_arr.append('0')
else:
features_someone_arr.append(csv_rd.iloc[i][j])
self.features_known_list.append(features_someone_arr)
logging.info("Faces in Database: %d", len(self.features_known_list))
return 1
else:
logging.warning("'features_all.csv' not found!")
logging.warning("Please run 'get_faces_from_camera.py' "
"and 'features_extraction_to_csv.py' before 'face_reco_from_camera.py'")
return 0
# 获取处理之后 stream 的帧数 / Update FPS of video stream
def update_fps(self):
now = time.time()
# 每秒刷新 fps / Refresh fps per second
if str(self.start_time).split(".")[0] != str(now).split(".")[0]:
self.fps_show = self.fps
self.start_time = now
self.frame_time = now - self.frame_start_time
self.fps = 1.0 / self.frame_time
self.frame_start_time = now
# 计算两个128D向量间的欧式距离 / Compute the e-distance between two 128D features
@staticmethod
def return_euclidean_distance(feature_1, feature_2):
feature_1 = np.array(feature_1)
feature_2 = np.array(feature_2)
dist = np.sqrt(np.sum(np.square(feature_1 - feature_2)))
return dist
# 生成的 cv2 window 上面添加说明文字 / putText on cv2 window
def draw_note(self, img_rd):
# 添加说明 (Add some statements
cv2.putText(img_rd, "Face Recognizer for single face", (20, 40), self.font, 1, (255, 255, 255), 1,
cv2.LINE_AA)
cv2.putText(img_rd, "Frame: " + str(self.frame_cnt), (20, 100), self.font, 0.8, (0, 255, 0), 1,
cv2.LINE_AA)
cv2.putText(img_rd, "FPS: " + str(self.fps_show.__round__(2)), (20, 130), self.font, 0.8, (0, 255, 0), 1,
cv2.LINE_AA)
cv2.putText(img_rd, "Faces: " + str(self.current_frame_face_cnt), (20, 160), self.font, 0.8, (0, 255, 0), 1,
cv2.LINE_AA)
cv2.putText(img_rd, "Q: Quit", (20, 450), self.font, 0.8, (255, 255, 255), 1, cv2.LINE_AA)
def draw_name(self, img_rd):
# 在人脸框下面写人脸名字 / Write names under ROI
logging.debug(self.current_frame_name_list)
img = Image.fromarray(cv2.cvtColor(img_rd, cv2.COLOR_BGR2RGB))
draw = ImageDraw.Draw(img)
draw.text(xy=self.current_frame_face_position_list[0], text=self.current_frame_name_list[0], font=self.font_chinese,
fill=(255, 255, 0))
img_rd = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
return img_rd
def show_chinese_name(self):
if self.current_frame_face_cnt >= 1:
logging.debug(self.face_name_known_list)
# 修改录入的人脸姓名 / Modify names in face_name_known_list to chinese name
self.face_name_known_list[0] = '张三'.encode('utf-8').decode()
# self.face_name_known_list[1] = '张四'.encode('utf-8').decode()
# 处理获取的视频流, 进行人脸识别 / Face detection and recognition wit OT from input video stream
def process(self, stream):
# 1. 读取存放所有人脸特征的 csv / Get faces known from "features.all.csv"
if self.get_face_database():
while stream.isOpened():
self.frame_cnt += 1
logging.debug("Frame " + str(self.frame_cnt) + " starts")
flag, img_rd = stream.read()
kk = cv2.waitKey(1)
# 2. 检测人脸 / Detect faces for frame X
faces = detector(img_rd, 0)
# 3. 更新帧中的人脸数 / Update cnt for faces in frames
self.last_frame_faces_cnt = self.current_frame_face_cnt
self.current_frame_face_cnt = len(faces)
# 4.1 当前帧和上一帧相比没有发生人脸数变化 / If cnt not changes, 1->1 or 0->0
if self.current_frame_face_cnt == self.last_frame_faces_cnt:
logging.debug("scene 1: 当前帧和上一帧相比没有发生人脸数变化 / No face cnt changes in this frame!!!")
if "unknown" in self.current_frame_name_list:
logging.debug(" >>> 有未知人脸, 开始进行 reclassify_interval_cnt 计数")
self.reclassify_interval_cnt += 1
# 4.1.1 当前帧一张人脸 / One face in this frame
if self.current_frame_face_cnt == 1:
if self.reclassify_interval_cnt == self.reclassify_interval:
logging.debug(" scene 1.1 需要对于当前帧重新进行人脸识别 / Re-classify for current frame")
self.reclassify_interval_cnt = 0
self.current_frame_face_feature_list = []
self.current_frame_face_X_e_distance_list = []
self.current_frame_name_list = []
for i in range(len(faces)):
shape = predictor(img_rd, faces[i])
self.current_frame_face_feature_list.append(
face_reco_model.compute_face_descriptor(img_rd, shape))
# a. 遍历捕获到的图像中所有的人脸 / Traversal all the faces in the database
for k in range(len(faces)):
self.current_frame_name_list.append("unknown")
# b. 每个捕获人脸的名字坐标 / Positions of faces captured
self.current_frame_face_position_list.append(tuple(
[faces[k].left(),
int(faces[k].bottom() + (faces[k].bottom() - faces[k].top()) / 4)]))
# c. 对于某张人脸, 遍历所有存储的人脸特征 / For every face detected, compare it with all the faces in the database
for i in range(len(self.features_known_list)):
# 如果 person_X 数据不为空 / If the data of person_X is not empty
if str(self.features_known_list[i][0]) != '0.0':
e_distance_tmp = self.return_euclidean_distance(
self.current_frame_face_feature_list[k],
self.features_known_list[i])
logging.debug(" with person %d, the e-distance: %f", i + 1, e_distance_tmp)
self.current_frame_face_X_e_distance_list.append(e_distance_tmp)
else:
# 空数据 person_X / For empty data
self.current_frame_face_X_e_distance_list.append(999999999)
# d. 寻找出最小的欧式距离匹配 / Find the one with minimum e distance
similar_person_num = self.current_frame_face_X_e_distance_list.index(
min(self.current_frame_face_X_e_distance_list))
if min(self.current_frame_face_X_e_distance_list) < 0.4:
# 在这里更改显示的人名 / Modify name if needed
self.show_chinese_name()
self.current_frame_name_list[k] = self.face_name_known_list[similar_person_num]
logging.debug(" recognition result for face %d: %s", k + 1,
self.face_name_known_list[similar_person_num])
else:
logging.debug(" recognition result for face %d: %s", k + 1, "unknown")
else:
logging.debug(
" scene 1.2 不需要对于当前帧重新进行人脸识别 / No re-classification needed for current frame")
# 获取特征框坐标 / Get ROI positions
for k, d in enumerate(faces):
cv2.rectangle(img_rd,
tuple([d.left(), d.top()]),
tuple([d.right(), d.bottom()]),
(255, 255, 255), 2)
self.current_frame_face_position_list[k] = tuple(
[faces[k].left(),
int(faces[k].bottom() + (faces[k].bottom() - faces[k].top()) / 4)])
img_rd = self.draw_name(img_rd)
# 4.2 当前帧和上一帧相比发生人脸数变化 / If face cnt changes, 1->0 or 0->1
else:
logging.debug("scene 2: 当前帧和上一帧相比人脸数发生变化 / Faces cnt changes in this frame")
self.current_frame_face_position_list = []
self.current_frame_face_X_e_distance_list = []
self.current_frame_face_feature_list = []
# 4.2.1 人脸数从 0->1 / Face cnt 0->1
if self.current_frame_face_cnt == 1:
logging.debug(" scene 2.1 出现人脸, 进行人脸识别 / Get faces in this frame and do face recognition")
self.current_frame_name_list = []
for i in range(len(faces)):
shape = predictor(img_rd, faces[i])
self.current_frame_face_feature_list.append(
face_reco_model.compute_face_descriptor(img_rd, shape))
# a. 遍历捕获到的图像中所有的人脸 / Traversal all the faces in the database
for k in range(len(faces)):
self.current_frame_name_list.append("unknown")
# b. 每个捕获人脸的名字坐标 / Positions of faces captured
self.current_frame_face_position_list.append(tuple(
[faces[k].left(), int(faces[k].bottom() + (faces[k].bottom() - faces[k].top()) / 4)]))
# c. 对于某张人脸, 遍历所有存储的人脸特征 / For every face detected, compare it with all the faces in database
for i in range(len(self.features_known_list)):
# 如果 person_X 数据不为空 / If data of person_X is not empty
if str(self.features_known_list[i][0]) != '0.0':
e_distance_tmp = self.return_euclidean_distance(
self.current_frame_face_feature_list[k],
self.features_known_list[i])
logging.debug(" with person %d, the e-distance: %f", i + 1, e_distance_tmp)
self.current_frame_face_X_e_distance_list.append(e_distance_tmp)
else:
# 空数据 person_X / Empty data for person_X
self.current_frame_face_X_e_distance_list.append(999999999)
# d. 寻找出最小的欧式距离匹配 / Find the one with minimum e distance
similar_person_num = self.current_frame_face_X_e_distance_list.index(
min(self.current_frame_face_X_e_distance_list))
if min(self.current_frame_face_X_e_distance_list) < 0.4:
# 在这里更改显示的人名 / Modify name if needed
self.show_chinese_name()
self.current_frame_name_list[k] = self.face_name_known_list[similar_person_num]
logging.debug(" recognition result for face %d: %s", k + 1,
self.face_name_known_list[similar_person_num])
else:
logging.debug(" recognition result for face %d: %s", k + 1, "unknown")
if "unknown" in self.current_frame_name_list:
self.reclassify_interval_cnt += 1
# 4.2.1 人脸数从 1->0 / Face cnt 1->0
elif self.current_frame_face_cnt == 0:
logging.debug(" scene 2.2 人脸消失, 当前帧中没有人脸 / No face in this frame!!!")
self.reclassify_interval_cnt = 0
self.current_frame_name_list = []
self.current_frame_face_feature_list = []
# 5. 生成的窗口添加说明文字 / Add note on cv2 window
self.draw_note(img_rd)
if kk == ord('q'):
break
self.update_fps()
cv2.namedWindow("camera", 1)
cv2.imshow("camera", img_rd)
logging.debug("Frame ends\n\n")
def run(self):
# cap = cv2.VideoCapture("video.mp4") # Get video stream from video file
cap = cv2.VideoCapture(0) # Get video stream from camera
self.process(cap)
cap.release()
cv2.destroyAllWindows()
def main():
# logging.basicConfig(level=logging.DEBUG) # Set log level to 'logging.DEBUG' to print debug info of every frame
logging.basicConfig(level=logging.INFO)
Face_Recognizer_con = Face_Recognizer()
Face_Recognizer_con.run()
if __name__ == '__main__':
main()

View File

@ -1,94 +1,106 @@
# 从人脸图像文件中提取人脸特征存入 CSV
# Features extraction from images and save into features_all.csv
# Copyright (C) 2018-2021 coneypo
# SPDX-License-Identifier: MIT
# Author: coneypo
# Blog: http://www.cnblogs.com/AdaminXie
# GitHub: https://github.com/coneypo/Dlib_face_recognition_from_camera
# Mail: coneypo@foxmail.com
# Created at 2018-05-11
# Updated at 2020-04-02
# 从人脸图像文件中提取人脸特征存入 "features_all.csv" / Extract features from images and save into "features_all.csv"
import os
import dlib
from skimage import io
import csv
import numpy as np
import logging
import cv2
# 要读取人脸图像文件的路径
# 要读取人脸图像文件的路径 / Path of cropped faces
path_images_from_camera = "data/data_faces_from_camera/"
# 1. Dlib 正向人脸检测器
# Dlib 正向人脸检测器 / Use frontal face detector of Dlib
detector = dlib.get_frontal_face_detector()
# 2. Dlib 人脸 landmark 特征点检测器
# Dlib 人脸 landmark 特征点检测器 / Get face landmarks
predictor = dlib.shape_predictor('data/data_dlib/shape_predictor_68_face_landmarks.dat')
# 3. Dlib Resnet 人脸识别模型,提取 128D 的特征矢量
# Dlib Resnet 人脸识别模型,提取 128D 的特征矢量 / Use Dlib resnet50 model to get 128D face descriptor
face_reco_model = dlib.face_recognition_model_v1("data/data_dlib/dlib_face_recognition_resnet_model_v1.dat")
# 返回单张图像的 128D 特征
# 返回单张图像的 128D 特征 / Return 128D features for single image
# Input: path_img <class 'str'>
# Output: face_descriptor <class 'dlib.vector'>
def return_128d_features(path_img):
img_rd = io.imread(path_img)
img_rd = cv2.imread(path_img)
faces = detector(img_rd, 1)
print("%-40s %-20s" % ("检测到人脸的图像 / Image with faces detected:", path_img), '\n')
logging.info("%-40s %-20s", "检测到人脸的图像 / Image with faces detected:", path_img)
# 因为有可能截下来的人脸再去检测,检测不出来人脸了
# 所以要确保是 检测到人脸的人脸图像 拿去算特征
# 因为有可能截下来的人脸再去检测,检测不出来人脸了, 所以要确保是 检测到人脸的人脸图像拿去算特征
# For photos of faces saved, we need to make sure that we can detect faces from the cropped images
if len(faces) != 0:
shape = predictor(img_rd, faces[0])
face_descriptor = face_reco_model.compute_face_descriptor(img_rd, shape)
else:
face_descriptor = 0
print("no face")
logging.warning("no face")
return face_descriptor
# 将文件夹中照片特征提取出来, 写入 CSV
def return_features_mean_personX(path_faces_personX):
# 返回 personX 的 128D 特征均值 / Return the mean value of 128D face descriptor for person X
# Input: path_face_personX <class 'str'>
# Output: features_mean_personX <class 'numpy.ndarray'>
def return_features_mean_personX(path_face_personX):
features_list_personX = []
photos_list = os.listdir(path_faces_personX)
photos_list = os.listdir(path_face_personX)
if photos_list:
for i in range(len(photos_list)):
# 调用return_128d_features()得到128d特征
print("%-40s %-20s" % ("正在读的人脸图像 / Image to read:", path_faces_personX + "/" + photos_list[i]))
features_128d = return_128d_features(path_faces_personX + "/" + photos_list[i])
# print(features_128d)
# 遇到没有检测出人脸的图片跳过
# 调用 return_128d_features() 得到 128D 特征 / Get 128D features for single image of personX
logging.info("%-40s %-20s", "正在读的人脸图像 / Reading image:", path_face_personX + "/" + photos_list[i])
features_128d = return_128d_features(path_face_personX + "/" + photos_list[i])
# 遇到没有检测出人脸的图片跳过 / Jump if no face detected from image
if features_128d == 0:
i += 1
else:
features_list_personX.append(features_128d)
else:
print("文件夹内图像文件为空 / Warning: No images in " + path_faces_personX + '/', '\n')
logging.warning("文件夹内图像文件为空 / Warning: No images in%s/", path_face_personX)
# 计算 128D 特征的均值
# 计算 128D 特征的均值 / Compute the mean
# personX 的 N 张图像 x 128D -> 1 x 128D
if features_list_personX:
features_mean_personX = np.array(features_list_personX).mean(axis=0)
features_mean_personX = np.array(features_list_personX, dtype=object).mean(axis=0)
else:
features_mean_personX = np.zeros(128, dtype=int, order='C')
return features_mean_personX
# 获取已录入的最后一个人脸序号 / get the num of latest person
person_list = os.listdir("data/data_faces_from_camera/")
person_num_list = []
for person in person_list:
person_num_list.append(int(person.split('_')[-1]))
person_cnt = max(person_num_list)
def main():
logging.basicConfig(level=logging.INFO)
# 获取已录入的最后一个人脸序号 / Get the order of latest person
person_list = os.listdir("data/data_faces_from_camera/")
person_list.sort()
with open("data/features_all.csv", "w", newline="") as csvfile:
writer = csv.writer(csvfile)
for person in range(person_cnt):
# Get the mean/average features of face/personX, it will be a list with a length of 128D
print(path_images_from_camera + "person_" + str(person + 1))
features_mean_personX = return_features_mean_personX(path_images_from_camera + "person_" + str(person + 1))
writer.writerow(features_mean_personX)
print("特征均值 / The mean of features:", list(features_mean_personX))
print('\n')
print("所有录入人脸数据存入 / Save all the features of faces registered into: data/features_all.csv")
with open("data/features_all.csv", "w", newline="") as csvfile:
writer = csv.writer(csvfile)
for person in person_list:
# Get the mean/average features of face/personX, it will be a list with a length of 128D
logging.info("%sperson_%s", path_images_from_camera, person)
features_mean_personX = return_features_mean_personX(path_images_from_camera + person)
if len(person.split('_', 2)) == 2:
# "person_x"
person_name = person
else:
# "person_x_tom"
person_name = person.split('_', 2)[-1]
features_mean_personX = np.insert(features_mean_personX, 0, person_name, axis=0)
# features_mean_personX will be 129D, person name + 128 features
writer.writerow(features_mean_personX)
logging.info('\n')
logging.info("所有录入人脸数据存入 / Save all the features of faces registered into: data/features_all.csv")
if __name__ == '__main__':
main()

View File

@ -1,22 +1,22 @@
# 进行人脸录入 / face register
# 录入多张人脸 / support multi-faces
# Copyright (C) 2018-2021 coneypo
# SPDX-License-Identifier: MIT
# Author: coneypo
# Blog: http://www.cnblogs.com/AdaminXie
# GitHub: https://github.com/coneypo/Dlib_face_recognition_from_camera
# Mail: coneypo@foxmail.com
# Created at 2018-05-11
# Updated at 2020-04-19
# 进行人脸录入 / Face register
import dlib # 人脸处理的库 Dlib
import numpy as np # 数据处理的库 Numpy
import cv2 # 图像处理的库 OpenCV
import os # 读写文件
import shutil # 读写文件
import dlib
import numpy as np
import cv2
import os
import shutil
import time
import logging
# Dlib 正向人脸检测器
# Dlib 正向人脸检测器 / Use frontal face detector of Dlib
detector = dlib.get_frontal_face_detector()
@ -25,28 +25,29 @@ class Face_Register:
self.path_photos_from_camera = "data/data_faces_from_camera/"
self.font = cv2.FONT_ITALIC
self.existing_faces_cnt = 0 # 已录入的人脸计数器
self.ss_cnt = 0 # 录入 personX 人脸时图片计数器
self.faces_cnt = 0 # 录入人脸计数器
self.existing_faces_cnt = 0 # 已录入的人脸计数器 / cnt for counting saved faces
self.ss_cnt = 0 # 录入 personX 人脸时图片计数器 / cnt for screen shots
self.current_frame_faces_cnt = 0 # 录入人脸计数器 / cnt for counting faces in current frame
# 之后用来控制是否保存图像的 flag / The flag to control if save
self.save_flag = 1
# 之后用来检查是否先按 'n' 再按 's' / The flag to check if press 'n' before 's'
self.press_n_flag = 0
self.save_flag = 1 # 之后用来控制是否保存图像的 flag / The flag to control if save
self.press_n_flag = 0 # 之后用来检查是否先按 'n' 再按 's' / The flag to check if press 'n' before 's'
# FPS
self.frame_time = 0
self.frame_start_time = 0
self.fps = 0
self.fps_show = 0
self.start_time = time.time()
# 新建保存人脸图像文件和数据CSV文件夹 / Mkdir for saving photos and csv
# 新建保存人脸图像文件和数据 CSV 文件夹 / Mkdir for saving photos and csv
def pre_work_mkdir(self):
# 新建文件夹 / make folders to save faces images and csv
# 新建文件夹 / Create folders to save face images and csv
if os.path.isdir(self.path_photos_from_camera):
pass
else:
os.mkdir(self.path_photos_from_camera)
# 删除之前存的人脸数据文件夹 / Delete the old data of faces
# 删除之前存的人脸数据文件夹 / Delete old face folders
def pre_work_del_old_face_folders(self):
# 删除之前存的人脸数据文件夹, 删除 "/data_faces_from_camera/person_x/"...
folders_rd = os.listdir(self.path_photos_from_camera)
@ -55,47 +56,51 @@ class Face_Register:
if os.path.isfile("data/features_all.csv"):
os.remove("data/features_all.csv")
# 如果有之前录入的人脸, 在之前 person_x 的序号按照 person_x+1 开始录入 /
# If the old folders exists, start from person_x+1
# 如果有之前录入的人脸, 在之前 person_x 的序号按照 person_x+1 开始录入 / Start from person_x+1
def check_existing_faces_cnt(self):
if os.listdir("data/data_faces_from_camera/"):
# 获取已录入的最后一个人脸序号 / Get the num of latest person
# 获取已录入的最后一个人脸序号 / Get the order of latest person
person_list = os.listdir("data/data_faces_from_camera/")
person_num_list = []
for person in person_list:
person_num_list.append(int(person.split('_')[-1]))
self.existing_faces_cnt = max(person_num_list)
# 如果第一次存储或者没有之前录入的人脸, 按照 person_1 开始录入
# Start from person_1
# 如果第一次存储或者没有之前录入的人脸, 按照 person_1 开始录入 / Start from person_1
else:
self.existing_faces_cnt = 0
# 获取处理之后 stream 的帧数 / Get the fps of video stream
# 更新 FPS / Update FPS of Video stream
def update_fps(self):
now = time.time()
# 每秒刷新 fps / Refresh fps per second
if str(self.start_time).split(".")[0] != str(now).split(".")[0]:
self.fps_show = self.fps
self.start_time = now
self.frame_time = now - self.frame_start_time
self.fps = 1.0 / self.frame_time
self.frame_start_time = now
# 生成的 cv2 window 上面添加说明文字 / putText on cv2 window
# 生成的 cv2 window 上面添加说明文字 / PutText on cv2 window
def draw_note(self, img_rd):
# 添加说明 / Add some statements
# 添加说明 / Add some notes
cv2.putText(img_rd, "Face Register", (20, 40), self.font, 1, (255, 255, 255), 1, cv2.LINE_AA)
cv2.putText(img_rd, "FPS: " + str(self.fps.__round__(2)), (20, 100), self.font, 0.8, (0, 255, 0), 1,
cv2.putText(img_rd, "FPS: " + str(self.fps_show.__round__(2)), (20, 100), self.font, 0.8, (0, 255, 0), 1,
cv2.LINE_AA)
cv2.putText(img_rd, "Faces: " + str(self.faces_cnt), (20, 140), self.font, 0.8, (0, 255, 0), 1, cv2.LINE_AA)
cv2.putText(img_rd, "Faces: " + str(self.current_frame_faces_cnt), (20, 140), self.font, 0.8, (0, 255, 0), 1, cv2.LINE_AA)
cv2.putText(img_rd, "N: Create face folder", (20, 350), self.font, 0.8, (255, 255, 255), 1, cv2.LINE_AA)
cv2.putText(img_rd, "S: Save current face", (20, 400), self.font, 0.8, (255, 255, 255), 1, cv2.LINE_AA)
cv2.putText(img_rd, "Q: Quit", (20, 450), self.font, 0.8, (255, 255, 255), 1, cv2.LINE_AA)
# 获取人脸
# 获取人脸 / Main process of face detection and saving
def process(self, stream):
# 1. 新建储存人脸图像文件目录 / Uncomment if you need mkdir
# self.pre_work_mkdir()
# 1. 新建储存人脸图像文件目录 / Create folders to save photos
self.pre_work_mkdir()
# 2. 删除 "/data/data_faces_from_camera" 中已有人脸图像文件 / Uncomment if want to delete the old faces
self.pre_work_del_old_face_folders()
# 2. 删除 "/data/data_faces_from_camera" 中已有人脸图像文件
# / Uncomment if want to delete the saved faces and start from person_1
# if os.path.isdir(self.path_photos_from_camera):
# self.pre_work_del_old_face_folders()
# 3. 检查 "/data/data_faces_from_camera" 中已有人脸文件
self.check_existing_faces_cnt()
@ -103,22 +108,21 @@ class Face_Register:
while stream.isOpened():
flag, img_rd = stream.read() # Get camera video stream
kk = cv2.waitKey(1)
faces = detector(img_rd, 0) # Use dlib face detector
faces = detector(img_rd, 0) # Use Dlib face detector
# 4. 按下 'n' 新建存储人脸的文件夹 / Press 'n' to create the folders for saving faces
if kk == ord('n'):
self.existing_faces_cnt += 1
current_face_dir = self.path_photos_from_camera + "person_" + str(self.existing_faces_cnt)
os.makedirs(current_face_dir)
print('\n')
print("新建的人脸文件夹 / Create folders: ", current_face_dir)
logging.info("\n%-40s %s", "新建的人脸文件夹 / Create folders:", current_face_dir)
self.ss_cnt = 0 # 将人脸计数器清零 / clear the cnt of faces
self.press_n_flag = 1 # 已经按下 'n' / have pressed 'n'
self.ss_cnt = 0 # 将人脸计数器清零 / Clear the cnt of screen shots
self.press_n_flag = 1 # 已经按下 'n' / Pressed 'n' already
# 5. 检测到人脸 / Face detected
if len(faces) != 0:
# 矩形框 / Show the HOG of faces
# 矩形框 / Show the ROI of faces
for k, d in enumerate(faces):
# 计算矩形框大小 / Compute the size of rectangle box
height = (d.bottom() - d.top())
@ -126,13 +130,13 @@ class Face_Register:
hh = int(height/2)
ww = int(width/2)
# 6. 判断人脸矩形框是否超出 480x640
# 6. 判断人脸矩形框是否超出 480x640 / If the size of ROI > 480x640
if (d.right()+ww) > 640 or (d.bottom()+hh > 480) or (d.left()-ww < 0) or (d.top()-hh < 0):
cv2.putText(img_rd, "OUT OF RANGE", (20, 300), self.font, 0.8, (0, 0, 255), 1, cv2.LINE_AA)
color_rectangle = (0, 0, 255)
save_flag = 0
if kk == ord('s'):
print("请调整位置 / Please adjust your position")
logging.warning("请调整位置 / Please adjust your position")
else:
color_rectangle = (255, 255, 255)
save_flag = 1
@ -142,7 +146,7 @@ class Face_Register:
tuple([d.right() + ww, d.bottom() + hh]),
color_rectangle, 2)
# 7. 根据人脸大小生成空的图像 / Create blank image according to the shape of face detected
# 7. 根据人脸大小生成空的图像 / Create blank image according to the size of face detected
img_blank = np.zeros((int(height*2), width*2, 3), np.uint8)
if save_flag:
@ -155,10 +159,12 @@ class Face_Register:
for jj in range(width*2):
img_blank[ii][jj] = img_rd[d.top()-hh + ii][d.left()-ww + jj]
cv2.imwrite(current_face_dir + "/img_face_" + str(self.ss_cnt) + ".jpg", img_blank)
print("写入本地 / Save into:", str(current_face_dir) + "/img_face_" + str(self.ss_cnt) + ".jpg")
logging.info("%-40s %s/img_face_%s.jpg", "写入本地 / Save into:",
str(current_face_dir), str(self.ss_cnt))
else:
print("请先按 'N' 来建文件夹, 按 'S' / Please press 'N' and press 'S'")
self.faces_cnt = len(faces)
logging.warning("请先按 'N' 来建文件夹, 按 'S' / Please press 'N' and press 'S'")
self.current_frame_faces_cnt = len(faces)
# 9. 生成的窗口添加说明文字 / Add note on cv2 window
self.draw_note(img_rd)
@ -167,12 +173,15 @@ class Face_Register:
if kk == ord('q'):
break
# 11. Update FPS
self.update_fps()
cv2.namedWindow("camera", 1)
cv2.imshow("camera", img_rd)
def run(self):
cap = cv2.VideoCapture(0)
# cap = cv2.VideoCapture("video.mp4") # Get video stream from video file
cap = cv2.VideoCapture(0) # Get video stream from camera
self.process(cap)
cap.release()
@ -180,6 +189,7 @@ class Face_Register:
def main():
logging.basicConfig(level=logging.INFO)
Face_Register_con = Face_Register()
Face_Register_con.run()

View File

@ -0,0 +1,291 @@
from tkinter import *
from tkinter import font as tkFont
from PIL import Image, ImageTk
import dlib
import numpy as np
import cv2
import os
import shutil
import time
import logging
# Dlib 正向人脸检测器 / Use frontal face detector of Dlib
detector = dlib.get_frontal_face_detector()
class Face_Register:
def __init__(self):
self.existing_faces_cnt = 0 # 已录入的人脸计数器 / cnt for counting saved faces
self.ss_cnt = 0 # 录入 person_n 人脸时图片计数器 / cnt for screen shots
self.current_frame_faces_cnt = 0 # 当前帧中人脸计数器 / cnt for counting faces in current frame
# Tkinter GUI
self.win = Tk()
self.win.title("Face Register @coneypo")
self.win.geometry("1300x550")
# GUI left part
self.frame_left_camera = Frame(self.win)
self.label = Label(self.win)
self.label.pack(side=LEFT)
self.frame_left_camera.pack()
# GUI right part
self.frame_right_info = Frame(self.win)
self.label_cnt_face_in_database = Label(self.frame_right_info, text=str(self.existing_faces_cnt))
self.label_fps_info = Label(self.frame_right_info, text="")
self.input_name = Entry(self.frame_right_info)
self.input_name_char = ""
self.label_warning = Label(self.frame_right_info)
self.label_face_cnt = Label(self.frame_right_info, text="Faces in current frame: ")
self.log_all = Label(self.frame_right_info)
self.font_title = tkFont.Font(family='Helvetica', size=20, weight='bold')
self.font_step_title = tkFont.Font(family='Helvetica', size=15, weight='bold')
self.font_warning = tkFont.Font(family='Helvetica', size=15, weight='bold')
self.path_photos_from_camera = "data/data_faces_from_camera/"
self.current_face_dir = ""
self.font = cv2.FONT_ITALIC
# Current frame and face ROI position
self.current_frame = np.ndarray
self.face_ROI_image = np.ndarray
self.face_ROI_width_start = 0
self.face_ROI_height_start = 0
self.face_ROI_width = 0
self.face_ROI_height = 0
self.ww = 0
self.hh = 0
self.out_of_range_flag = FALSE
self.face_folder_created_flag = FALSE
# FPS
self.frame_time = 0
self.frame_start_time = 0
self.fps = 0
self.fps_show = 0
self.start_time = time.time()
self.cap = cv2.VideoCapture(0) # Get video stream from camera
# self.cap = cv2.VideoCapture("test.mp4") # Input local video
# 删除之前存的人脸数据文件夹 / Delete old face folders
def GUI_clear_data(self):
# 删除之前存的人脸数据文件夹, 删除 "/data_faces_from_camera/person_x/"...
folders_rd = os.listdir(self.path_photos_from_camera)
for i in range(len(folders_rd)):
shutil.rmtree(self.path_photos_from_camera + folders_rd[i])
if os.path.isfile("data/features_all.csv"):
os.remove("data/features_all.csv")
self.label_cnt_face_in_database['text'] = "0"
self.existing_faces_cnt = 0
self.log_all["text"] = "Face images and `features_all.csv` removed!"
def GUI_get_input_name(self):
self.input_name_char = self.input_name.get()
self.create_face_folder()
self.label_cnt_face_in_database['text'] = str(self.existing_faces_cnt)
def GUI_info(self):
Label(self.frame_right_info,
text="Face register",
font=self.font_title).grid(row=0, column=0, columnspan=3, sticky=W, padx=2, pady=20)
Label(self.frame_right_info,
text="FPS: ").grid(row=1, column=0, columnspan=2, sticky=W, padx=5, pady=2)
self.label_fps_info.grid(row=1, column=2, sticky=W, padx=5, pady=2)
Label(self.frame_right_info,
text="Faces in database: ").grid(row=2, column=0, columnspan=2, sticky=W, padx=5, pady=2)
self.label_cnt_face_in_database.grid(row=2, column=2, columnspan=3, sticky=W, padx=5, pady=2)
Label(self.frame_right_info,
text="Faces in current frame: ").grid(row=3, column=0, columnspan=2, sticky=W, padx=5, pady=2)
self.label_face_cnt.grid(row=3, column=2, columnspan=3, sticky=W, padx=5, pady=2)
self.label_warning.grid(row=4, column=0, columnspan=3, sticky=W, padx=5, pady=2)
# Step 1: Clear old data
Label(self.frame_right_info,
font=self.font_step_title,
text="Step 1: Clear face photos").grid(row=5, column=0, columnspan=2, sticky=W, padx=5, pady=20)
Button(self.frame_right_info,
text='Clear',
command=self.GUI_clear_data).grid(row=6, column=0, columnspan=3, sticky=W, padx=5, pady=2)
# Step 2: Input name and create folders for face
Label(self.frame_right_info,
font=self.font_step_title,
text="Step 2: Input name").grid(row=7, column=0, columnspan=2, sticky=W, padx=5, pady=20)
Label(self.frame_right_info, text="Name: ").grid(row=8, column=0, sticky=W, padx=5, pady=0)
self.input_name.grid(row=8, column=1, sticky=W, padx=0, pady=2)
Button(self.frame_right_info,
text='Input',
command=self.GUI_get_input_name).grid(row=8, column=2, padx=5)
# Step 3: Save current face in frame
Label(self.frame_right_info,
font=self.font_step_title,
text="Step 3: Save face image").grid(row=9, column=0, columnspan=2, sticky=W, padx=5, pady=20)
Button(self.frame_right_info,
text='Save current face',
command=self.save_current_face).grid(row=10, column=0, columnspan=3, sticky=W)
# Log
self.log_all.grid(row=11, column=0, columnspan=20, sticky=W, padx=5, pady=20)
self.frame_right_info.pack()
# 新建保存人脸图像文件和数据 CSV 文件夹 / Mkdir for saving photos and csv
def pre_work_mkdir(self):
# 新建文件夹 / Create folders to save face images and csv
if os.path.isdir(self.path_photos_from_camera):
pass
else:
os.mkdir(self.path_photos_from_camera)
# 如果有之前录入的人脸, 在之前 person_x 的序号按照 person_x+1 开始录入 / Start from person_x+1
def check_existing_faces_cnt(self):
if os.listdir("data/data_faces_from_camera/"):
# 获取已录入的最后一个人脸序号 / Get the order of latest person
person_list = os.listdir("data/data_faces_from_camera/")
person_num_list = []
for person in person_list:
person_order = person.split('_')[1].split('_')[0]
person_num_list.append(int(person_order))
self.existing_faces_cnt = max(person_num_list)
# 如果第一次存储或者没有之前录入的人脸, 按照 person_1 开始录入 / Start from person_1
else:
self.existing_faces_cnt = 0
# 更新 FPS / Update FPS of Video stream
def update_fps(self):
now = time.time()
# 每秒刷新 fps / Refresh fps per second
if str(self.start_time).split(".")[0] != str(now).split(".")[0]:
self.fps_show = self.fps
self.start_time = now
self.frame_time = now - self.frame_start_time
self.fps = 1.0 / self.frame_time
self.frame_start_time = now
self.label_fps_info["text"] = str(self.fps.__round__(2))
def create_face_folder(self):
# # 4. 新建存储人脸的文件夹 / Create the folders for saving faces
self.existing_faces_cnt += 1
if self.input_name_char:
self.current_face_dir = self.path_photos_from_camera + \
"person_" + str(self.existing_faces_cnt) + "_" + \
self.input_name_char
else:
self.current_face_dir = self.path_photos_from_camera + \
"person_" + str(self.existing_faces_cnt)
os.makedirs(self.current_face_dir)
self.log_all["text"] = "\"" + self.current_face_dir + "/\" created!"
logging.info("\n%-40s %s", "新建的人脸文件夹 / Create folders:", self.current_face_dir)
self.ss_cnt = 0 # 将人脸计数器清零 / Clear the cnt of screen shots
self.face_folder_created_flag = 1 # 已经按下 'n' / Pressed 'n' already
def save_current_face(self):
if self.face_folder_created_flag:
if self.current_frame_faces_cnt == 1:
if not self.out_of_range_flag:
self.ss_cnt += 1
# 根据人脸大小生成空的图像 / Create blank image according to the size of face detected
self.face_ROI_image = np.zeros((int(self.face_ROI_height * 2), self.face_ROI_width * 2, 3),
np.uint8)
for ii in range(self.face_ROI_height * 2):
for jj in range(self.face_ROI_width * 2):
self.face_ROI_image[ii][jj] = self.current_frame[self.face_ROI_height_start - self.hh + ii][
self.face_ROI_width_start - self.ww + jj]
self.log_all["text"] = "\"" + self.current_face_dir + "/img_face_" + str(
self.ss_cnt) + ".jpg\"" + " saved!"
self.face_ROI_image = cv2.cvtColor(self.face_ROI_image, cv2.COLOR_BGR2RGB)
cv2.imwrite(self.current_face_dir + "/img_face_" + str(self.ss_cnt) + ".jpg", self.face_ROI_image)
logging.info("%-40s %s/img_face_%s.jpg", "写入本地 / Save into:",
str(self.current_face_dir), str(self.ss_cnt) + ".jpg")
else:
self.log_all["text"] = "Please do not out of range!"
else:
self.log_all["text"] = "No face in current frame!"
else:
self.log_all["text"] = "Please run step 2!"
def get_frame(self):
try:
if self.cap.isOpened():
ret, frame = self.cap.read()
return ret, cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
except:
print("Error: No video input!!!")
# 获取人脸 / Main process of face detection and saving
def process(self):
ret, self.current_frame = self.get_frame()
faces = detector(self.current_frame, 0)
# Get frame
if ret:
self.update_fps()
self.label_face_cnt["text"] = str(len(faces))
# 检测到人脸 / Face detected
if len(faces) != 0:
# 矩形框 / Show the ROI of faces
for k, d in enumerate(faces):
self.face_ROI_width_start = d.left()
self.face_ROI_height_start = d.top()
# 计算矩形框大小 / Compute the size of rectangle box
self.face_ROI_height = (d.bottom() - d.top())
self.face_ROI_width = (d.right() - d.left())
self.hh = int(self.face_ROI_height / 2)
self.ww = int(self.face_ROI_width / 2)
# 判断人脸矩形框是否超出 480x640 / If the size of ROI > 480x640
if (d.right() + self.ww) > 640 or (d.bottom() + self.hh > 480) or (d.left() - self.ww < 0) or (
d.top() - self.hh < 0):
self.label_warning["text"] = "OUT OF RANGE"
self.label_warning['fg'] = 'red'
self.out_of_range_flag = TRUE
color_rectangle = (255, 0, 0)
else:
self.out_of_range_flag = FALSE
self.label_warning["text"] = ""
color_rectangle = (255, 255, 255)
self.current_frame = cv2.rectangle(self.current_frame,
tuple([d.left() - self.ww, d.top() - self.hh]),
tuple([d.right() + self.ww, d.bottom() + self.hh]),
color_rectangle, 2)
self.current_frame_faces_cnt = len(faces)
img = Image.fromarray(self.current_frame)
# Convert image to PhotoImage
imgtk = ImageTk.PhotoImage(image=img)
self.label.imgtk = imgtk
self.label.configure(image=imgtk)
self.win.after(20, self.process)
def run(self):
self.pre_work_mkdir()
self.check_existing_faces_cnt()
self.GUI_info()
self.process()
self.win.mainloop()
def main():
logging.basicConfig(level=logging.INFO)
Face_Register_con = Face_Register()
Face_Register_con.run()
if __name__ == '__main__':
main()

View File

@ -1,6 +1,3 @@
# OpenCV 调用摄像头
# 默认调用笔记本摄像头
# Author: coneypo
# Blog: http://www.cnblogs.com/AdaminXie
# GitHub: https://github.com/coneypo/Dlib_face_recognition_from_camera
@ -34,7 +31,7 @@ cap = cv2.VideoCapture(0)
18. cv2.CAP_PROP_RECTIFICATION Rectification flag for stereo cameras (note: only supported by DC1394 v 2.x backend currently)
"""
# The default shape of camera will be 640x480 in Windows or Ubuntu
# The default size of frame from camera will be 640x480 in Windows or Ubuntu
# So we will not set "cap.set" here, it doesn't work
# cap.set(propId=cv2.CAP_PROP_FRAME_WIDTH, value=cap.get(cv2.CAP_PROP_FRAME_WIDTH))
@ -84,4 +81,4 @@ while cap.isOpened():
cap.release()
# 删除建立的所有窗口
cv2.destroyAllWindows()
cv2.destroyAllWindows()

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.3 MiB

BIN
introduction/face_reco.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 206 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 148 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.5 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 216 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 358 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.1 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.3 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.5 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 345 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 184 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 332 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.3 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.4 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 324 KiB

After

Width:  |  Height:  |  Size: 267 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 74 KiB

View File

@ -1,3 +1,5 @@
dlib==19.17.0
numpy==1.15.1
scikit-image==0.14.0
numpy==1.21.3
scikit-image==0.18.3
pandas==1.3.4
opencv-python==4.5.4.58

0
test_tkinter.py Normal file
View File