1 Commits

Author SHA1 Message Date
0351cc0f1a Update face_reco_from_camera.py 2019-04-26 15:37:30 +08:00
34 changed files with 630 additions and 1197 deletions

View File

@ -4,7 +4,7 @@
<content url="file://$MODULE_DIR$"> <content url="file://$MODULE_DIR$">
<sourceFolder url="file://$MODULE_DIR$/data" isTestSource="false" /> <sourceFolder url="file://$MODULE_DIR$/data" isTestSource="false" />
</content> </content>
<orderEntry type="jdk" jdkName="Python 3.7" jdkType="Python SDK" /> <orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" /> <orderEntry type="sourceFolder" forTests="false" />
</component> </component>
<component name="TestRunnerService"> <component name="TestRunnerService">

View File

@ -12,7 +12,6 @@
<option name="ignoredErrors"> <option name="ignoredErrors">
<list> <list>
<option value="N806" /> <option value="N806" />
<option value="N802" />
</list> </list>
</option> </option>
</inspection_tool> </inspection_tool>

2
.idea/misc.xml generated
View File

@ -1,4 +1,4 @@
<?xml version="1.0" encoding="UTF-8"?> <?xml version="1.0" encoding="UTF-8"?>
<project version="4"> <project version="4">
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.7" project-jdk-type="Python SDK" /> <component name="ProjectRootManager" version="2" project-jdk-name="Python 3.6" project-jdk-type="Python SDK" />
</project> </project>

357
.idea/workspace.xml generated
View File

@ -2,11 +2,24 @@
<project version="4"> <project version="4">
<component name="ChangeListManager"> <component name="ChangeListManager">
<list default="true" id="e58b655a-3a9b-4001-b4da-39e07ab46629" name="Default Changelist" comment=""> <list default="true" id="e58b655a-3a9b-4001-b4da-39e07ab46629" name="Default Changelist" comment="">
<change beforePath="$PROJECT_DIR$/.idea/Dlib_face_recognition_from_camera.iml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/Dlib_face_recognition_from_camera.iml" afterDir="false" />
<change beforePath="$PROJECT_DIR$/.idea/misc.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/misc.xml" afterDir="false" />
<change beforePath="$PROJECT_DIR$/.idea/workspace.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/workspace.xml" afterDir="false" /> <change beforePath="$PROJECT_DIR$/.idea/workspace.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/workspace.xml" afterDir="false" />
<change beforePath="$PROJECT_DIR$/README.rst" beforeDir="false" afterPath="$PROJECT_DIR$/README.rst" afterDir="false" />
<change beforePath="$PROJECT_DIR$/data/data_dlib/dlib_face_recognition_resnet_model_v1.dat" beforeDir="false" afterPath="$PROJECT_DIR$/data/data_dlib/dlib_face_recognition_resnet_model_v1.dat" afterDir="false" />
<change beforePath="$PROJECT_DIR$/data/data_dlib/shape_predictor_5_face_landmarks.dat" beforeDir="false" afterPath="$PROJECT_DIR$/data/data_dlib/shape_predictor_5_face_landmarks.dat" afterDir="false" />
<change beforePath="$PROJECT_DIR$/data/data_dlib/shape_predictor_68_face_landmarks.dat" beforeDir="false" afterPath="$PROJECT_DIR$/data/data_dlib/shape_predictor_68_face_landmarks.dat" afterDir="false" />
<change beforePath="$PROJECT_DIR$/face_reco_from_camera.py" beforeDir="false" afterPath="$PROJECT_DIR$/face_reco_from_camera.py" afterDir="false" /> <change beforePath="$PROJECT_DIR$/face_reco_from_camera.py" beforeDir="false" afterPath="$PROJECT_DIR$/face_reco_from_camera.py" afterDir="false" />
<change beforePath="$PROJECT_DIR$/features_extraction_to_csv.py" beforeDir="false" afterPath="$PROJECT_DIR$/features_extraction_to_csv.py" afterDir="false" />
<change beforePath="$PROJECT_DIR$/get_faces_from_camera.py" beforeDir="false" afterPath="$PROJECT_DIR$/get_faces_from_camera.py" afterDir="false" /> <change beforePath="$PROJECT_DIR$/get_faces_from_camera.py" beforeDir="false" afterPath="$PROJECT_DIR$/get_faces_from_camera.py" afterDir="false" />
<change beforePath="$PROJECT_DIR$/how_to_use_camera.py" beforeDir="false" afterPath="$PROJECT_DIR$/how_to_use_camera.py" afterDir="false" />
<change beforePath="$PROJECT_DIR$/introduction/Dlib_Face_recognition_by_coneypo.pptx" beforeDir="false" afterPath="$PROJECT_DIR$/introduction/Dlib_Face_recognition_by_coneypo.pptx" afterDir="false" />
<change beforePath="$PROJECT_DIR$/introduction/face_reco_single_person.png" beforeDir="false" afterPath="$PROJECT_DIR$/introduction/face_reco_single_person.png" afterDir="false" />
<change beforePath="$PROJECT_DIR$/introduction/face_reco_single_person_customize_name.png" beforeDir="false" afterPath="$PROJECT_DIR$/introduction/face_reco_single_person_customize_name.png" afterDir="false" />
<change beforePath="$PROJECT_DIR$/introduction/face_reco_two_people.png" beforeDir="false" afterPath="$PROJECT_DIR$/introduction/face_reco_two_people.png" afterDir="false" />
<change beforePath="$PROJECT_DIR$/introduction/face_reco_two_people_in_database.png" beforeDir="false" afterPath="$PROJECT_DIR$/introduction/face_reco_two_people_in_database.png" afterDir="false" />
<change beforePath="$PROJECT_DIR$/introduction/get_face_from_camera.png" beforeDir="false" afterPath="$PROJECT_DIR$/introduction/get_face_from_camera.png" afterDir="false" />
<change beforePath="$PROJECT_DIR$/introduction/get_face_from_camera_out_of_range.png" beforeDir="false" afterPath="$PROJECT_DIR$/introduction/get_face_from_camera_out_of_range.png" afterDir="false" />
<change beforePath="$PROJECT_DIR$/introduction/overview.png" beforeDir="false" afterPath="$PROJECT_DIR$/introduction/overview.png" afterDir="false" />
<change beforePath="$PROJECT_DIR$/requirements.txt" beforeDir="false" afterPath="$PROJECT_DIR$/requirements.txt" afterDir="false" />
</list> </list>
<option name="EXCLUDED_CONVERTED_TO_IGNORED" value="true" /> <option name="EXCLUDED_CONVERTED_TO_IGNORED" value="true" />
<option name="SHOW_DIALOG" value="false" /> <option name="SHOW_DIALOG" value="false" />
@ -14,6 +27,45 @@
<option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" /> <option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" />
<option name="LAST_RESOLUTION" value="IGNORE" /> <option name="LAST_RESOLUTION" value="IGNORE" />
</component> </component>
<component name="FileEditorManager">
<leaf SIDE_TABS_SIZE_LIMIT_KEY="300">
<file pinned="false" current-in-tab="false">
<entry file="file://$PROJECT_DIR$/README.rst">
<provider selected="true" editor-type-id="restructured-text-editor" />
</entry>
</file>
<file pinned="false" current-in-tab="false">
<entry file="file://$PROJECT_DIR$/get_faces_from_camera.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="309">
<caret line="69" selection-start-line="69" selection-end-line="69" />
</state>
</provider>
</entry>
</file>
<file pinned="false" current-in-tab="false">
<entry file="file://$PROJECT_DIR$/features_extraction_to_csv.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="248">
<caret line="73" column="36" selection-start-line="73" selection-start-column="36" selection-end-line="73" selection-end-column="36" />
</state>
</provider>
</entry>
</file>
<file pinned="false" current-in-tab="true">
<entry file="file://$PROJECT_DIR$/face_reco_from_camera.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="489">
<caret line="160" column="23" lean-forward="true" selection-start-line="160" selection-start-column="23" selection-end-line="160" selection-end-column="23" />
<folding>
<element signature="e#230#264#0" expanded="true" />
</folding>
</state>
</provider>
</entry>
</file>
</leaf>
</component>
<component name="FileTemplateManagerImpl"> <component name="FileTemplateManagerImpl">
<option name="RECENT_TEMPLATES"> <option name="RECENT_TEMPLATES">
<list> <list>
@ -21,20 +73,80 @@
</list> </list>
</option> </option>
</component> </component>
<component name="FindInProjectRecents">
<findStrings>
<find>path_photos_from_camera</find>
<find>path_csv_from_photos</find>
<find>facerec</find>
<find>img</find>
<find>feature_mean_list_personX</find>
<find>feature_list_personX</find>
<find>feature</find>
<find>features_list_personX</find>
<find>feature_mean_personX</find>
<find>data_csvs</find>
<find>features_known_arr</find>
<find>with</find>
</findStrings>
<replaceStrings>
<replace>face_rec</replace>
<replace>img_rd</replace>
<replace>descriptor_mean_list_personX</replace>
<replace>features_list_personX</replace>
<replace>features_mean_personX</replace>
</replaceStrings>
</component>
<component name="Git.Settings"> <component name="Git.Settings">
<option name="RECENT_GIT_ROOT_PATH" value="$PROJECT_DIR$" /> <option name="RECENT_GIT_ROOT_PATH" value="$PROJECT_DIR$" />
</component> </component>
<component name="ProjectId" id="1Tq7xXTTl7R3HeMqxP7UMMKZMeC" /> <component name="IdeDocumentHistory">
<option name="CHANGED_PATHS">
<list>
<option value="$PROJECT_DIR$/requirements.txt" />
<option value="$PROJECT_DIR$/get_features_into_CSV.py" />
<option value="$PROJECT_DIR$/get_origin.py" />
<option value="$PROJECT_DIR$/test.py" />
<option value="$PROJECT_DIR$/features_extraction_to_csv.py" />
<option value="$PROJECT_DIR$/get_faces_from_camera.py" />
<option value="$PROJECT_DIR$/README.rst" />
<option value="$PROJECT_DIR$/face_reco_from_camera.py" />
</list>
</option>
</component>
<component name="ProjectFrameBounds" extendedState="6">
<option name="x" value="-281" />
<option name="y" value="574" />
<option name="width" value="1910" />
<option name="height" value="741" />
</component>
<component name="ProjectLevelVcsManager" settingsEditedManually="true" /> <component name="ProjectLevelVcsManager" settingsEditedManually="true" />
<component name="ProjectViewState"> <component name="ProjectView">
<option name="hideEmptyMiddlePackages" value="true" /> <navigator proportions="" version="1">
<option name="showExcludedFiles" value="true" /> <foldersAlwaysOnTop value="true" />
<option name="showLibraryContents" value="true" /> </navigator>
<panes>
<pane id="ProjectPane">
<subPane>
<expand>
<path>
<item name="Dlib_face_recognition_from_camera" type="b2602c69:ProjectViewProjectNode" />
<item name="Dlib_face_recognition_from_camera" type="462c0819:PsiDirectoryNode" />
</path>
<path>
<item name="Dlib_face_recognition_from_camera" type="b2602c69:ProjectViewProjectNode" />
<item name="Dlib_face_recognition_from_camera" type="462c0819:PsiDirectoryNode" />
<item name="data" type="462c0819:PsiDirectoryNode" />
</path>
</expand>
<select />
</subPane>
</pane>
<pane id="Scope" />
</panes>
</component> </component>
<component name="PropertiesComponent"> <component name="PropertiesComponent">
<property name="SHARE_PROJECT_CONFIGURATION_FILES" value="true" /> <property name="SHARE_PROJECT_CONFIGURATION_FILES" value="true" />
<property name="last_opened_file_path" value="$PROJECT_DIR$" /> <property name="last_opened_file_path" value="/media/con/Ubuntu 18.0/Face_Recognition" />
<property name="settings.editor.selected.configurable" value="com.jetbrains.python.configuration.PyActiveSdkModuleConfigurable" />
</component> </component>
<component name="RunDashboard"> <component name="RunDashboard">
<option name="ruleStates"> <option name="ruleStates">
@ -48,50 +160,8 @@
</list> </list>
</option> </option>
</component> </component>
<component name="RunManager" selected="Python.face_descriptor_from_camera"> <component name="RunManager" selected="Python.face_reco_from_camera">
<configuration name="face_descriptor_compute" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true"> <configuration name="face_reco_from_camera" type="PythonConfigurationType" factoryName="Python" temporary="true">
<module name="Dlib_face_recognition_from_camera" />
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs>
<env name="PYTHONUNBUFFERED" value="1" />
</envs>
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" />
<option name="IS_MODULE_SDK" value="true" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/face_descriptor_compute.py" />
<option name="PARAMETERS" value="" />
<option name="SHOW_COMMAND_LINE" value="false" />
<option name="EMULATE_TERMINAL" value="false" />
<option name="MODULE_MODE" value="false" />
<option name="REDIRECT_INPUT" value="false" />
<option name="INPUT_FILE" value="" />
<method v="2" />
</configuration>
<configuration name="face_descriptor_from_camera" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true">
<module name="Dlib_face_recognition_from_camera" />
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs>
<env name="PYTHONUNBUFFERED" value="1" />
</envs>
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" />
<option name="IS_MODULE_SDK" value="true" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/face_descriptor_from_camera.py" />
<option name="PARAMETERS" value="" />
<option name="SHOW_COMMAND_LINE" value="false" />
<option name="EMULATE_TERMINAL" value="false" />
<option name="MODULE_MODE" value="false" />
<option name="REDIRECT_INPUT" value="false" />
<option name="INPUT_FILE" value="" />
<method v="2" />
</configuration>
<configuration name="face_reco_from_camera" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true">
<module name="Dlib_face_recognition_from_camera" /> <module name="Dlib_face_recognition_from_camera" />
<option name="INTERPRETER_OPTIONS" value="" /> <option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" /> <option name="PARENT_ENVS" value="true" />
@ -154,20 +224,62 @@
<option name="INPUT_FILE" value="" /> <option name="INPUT_FILE" value="" />
<method v="2" /> <method v="2" />
</configuration> </configuration>
<configuration name="get_origin" type="PythonConfigurationType" factoryName="Python" temporary="true">
<module name="Dlib_face_recognition_from_camera" />
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs>
<env name="PYTHONUNBUFFERED" value="1" />
</envs>
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" />
<option name="IS_MODULE_SDK" value="true" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/get_origin.py" />
<option name="PARAMETERS" value="" />
<option name="SHOW_COMMAND_LINE" value="false" />
<option name="EMULATE_TERMINAL" value="false" />
<option name="MODULE_MODE" value="false" />
<option name="REDIRECT_INPUT" value="false" />
<option name="INPUT_FILE" value="" />
<method v="2" />
</configuration>
<configuration name="test" type="PythonConfigurationType" factoryName="Python" temporary="true">
<module name="Dlib_face_recognition_from_camera" />
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs>
<env name="PYTHONUNBUFFERED" value="1" />
</envs>
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" />
<option name="IS_MODULE_SDK" value="true" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test.py" />
<option name="PARAMETERS" value="" />
<option name="SHOW_COMMAND_LINE" value="false" />
<option name="EMULATE_TERMINAL" value="false" />
<option name="MODULE_MODE" value="false" />
<option name="REDIRECT_INPUT" value="false" />
<option name="INPUT_FILE" value="" />
<method v="2" />
</configuration>
<list> <list>
<item itemvalue="Python.face_descriptor_compute" />
<item itemvalue="Python.face_descriptor_from_camera" />
<item itemvalue="Python.face_reco_from_camera" /> <item itemvalue="Python.face_reco_from_camera" />
<item itemvalue="Python.features_extraction_to_csv" /> <item itemvalue="Python.features_extraction_to_csv" />
<item itemvalue="Python.get_faces_from_camera" /> <item itemvalue="Python.get_faces_from_camera" />
<item itemvalue="Python.get_origin" />
<item itemvalue="Python.test" />
</list> </list>
<recent_temporary> <recent_temporary>
<list> <list>
<item itemvalue="Python.face_descriptor_from_camera" />
<item itemvalue="Python.face_reco_from_camera" /> <item itemvalue="Python.face_reco_from_camera" />
<item itemvalue="Python.features_extraction_to_csv" /> <item itemvalue="Python.features_extraction_to_csv" />
<item itemvalue="Python.get_faces_from_camera" /> <item itemvalue="Python.get_faces_from_camera" />
<item itemvalue="Python.face_descriptor_compute" /> <item itemvalue="Python.test" />
<item itemvalue="Python.get_origin" />
</list> </list>
</recent_temporary> </recent_temporary>
</component> </component>
@ -184,56 +296,28 @@
</task> </task>
<servers /> <servers />
</component> </component>
<component name="Vcs.Log.Tabs.Properties"> <component name="ToolWindowManager">
<option name="TAB_STATES"> <frame x="0" y="27" width="1920" height="988" extended-state="6" />
<map> <editor active="true" />
<entry key="MAIN"> <layout>
<value> <window_info active="true" content_ui="combo" id="Project" order="0" visible="true" weight="0.2090813" />
<State> <window_info id="Structure" order="1" weight="0.25" />
<option name="COLUMN_ORDER" /> <window_info id="Favorites" order="2" side_tool="true" />
</State> <window_info anchor="bottom" id="Message" order="0" />
</value> <window_info anchor="bottom" id="Find" order="1" />
</entry> <window_info anchor="bottom" id="Run" order="2" visible="true" weight="0.25686976" />
</map> <window_info anchor="bottom" id="Debug" order="3" weight="0.39952996" />
</option> <window_info anchor="bottom" id="Cvs" order="4" weight="0.25" />
</component> <window_info anchor="bottom" id="Inspection" order="5" weight="0.4" />
<component name="WindowStateProjectService"> <window_info anchor="bottom" id="TODO" order="6" />
<state width="1897" height="194" key="GridCell.Tab.0.bottom" timestamp="1587297625581"> <window_info anchor="bottom" id="Version Control" order="7" weight="0.32983682" />
<screen x="0" y="27" width="1920" height="993" /> <window_info anchor="bottom" id="Terminal" order="8" weight="0.28434888" />
</state> <window_info anchor="bottom" id="Event Log" order="9" side_tool="true" />
<state width="1897" height="194" key="GridCell.Tab.0.bottom/0.27.1920.993@0.27.1920.993" timestamp="1587297625581" /> <window_info anchor="bottom" id="Python Console" order="10" />
<state width="1897" height="194" key="GridCell.Tab.0.center" timestamp="1587297625579"> <window_info anchor="right" id="Commander" order="0" weight="0.4" />
<screen x="0" y="27" width="1920" height="993" /> <window_info anchor="right" id="Ant Build" order="1" weight="0.25" />
</state> <window_info anchor="right" content_ui="combo" id="Hierarchy" order="2" weight="0.25" />
<state width="1897" height="194" key="GridCell.Tab.0.center/0.27.1920.993@0.27.1920.993" timestamp="1587297625579" /> </layout>
<state width="1897" height="194" key="GridCell.Tab.0.left" timestamp="1587297625578">
<screen x="0" y="27" width="1920" height="993" />
</state>
<state width="1897" height="194" key="GridCell.Tab.0.left/0.27.1920.993@0.27.1920.993" timestamp="1587297625578" />
<state width="1897" height="194" key="GridCell.Tab.0.right" timestamp="1587297625580">
<screen x="0" y="27" width="1920" height="993" />
</state>
<state width="1897" height="194" key="GridCell.Tab.0.right/0.27.1920.993@0.27.1920.993" timestamp="1587297625580" />
<state width="1485" height="299" key="GridCell.Tab.1.bottom" timestamp="1587263908422">
<screen x="0" y="27" width="1920" height="993" />
</state>
<state width="1485" height="299" key="GridCell.Tab.1.bottom/0.27.1920.993@0.27.1920.993" timestamp="1587263908422" />
<state width="1485" height="299" key="GridCell.Tab.1.center" timestamp="1587263908422">
<screen x="0" y="27" width="1920" height="993" />
</state>
<state width="1485" height="299" key="GridCell.Tab.1.center/0.27.1920.993@0.27.1920.993" timestamp="1587263908422" />
<state width="1485" height="299" key="GridCell.Tab.1.left" timestamp="1587263908422">
<screen x="0" y="27" width="1920" height="993" />
</state>
<state width="1485" height="299" key="GridCell.Tab.1.left/0.27.1920.993@0.27.1920.993" timestamp="1587263908422" />
<state width="1485" height="299" key="GridCell.Tab.1.right" timestamp="1587263908422">
<screen x="0" y="27" width="1920" height="993" />
</state>
<state width="1485" height="299" key="GridCell.Tab.1.right/0.27.1920.993@0.27.1920.993" timestamp="1587263908422" />
<state x="759" y="251" width="672" height="678" key="search.everywhere.popup" timestamp="1587264669499">
<screen x="0" y="27" width="1920" height="993" />
</state>
<state x="759" y="251" width="672" height="678" key="search.everywhere.popup/0.27.1920.993@0.27.1920.993" timestamp="1587264669499" />
</component> </component>
<component name="XDebuggerManager"> <component name="XDebuggerManager">
<breakpoint-manager> <breakpoint-manager>
@ -246,4 +330,63 @@
</default-breakpoints> </default-breakpoints>
</breakpoint-manager> </breakpoint-manager>
</component> </component>
<component name="editorHistoryManager">
<entry file="file://$PROJECT_DIR$/use_camera.py" />
<entry file="file://$PROJECT_DIR$/patch" />
<entry file="file://$PROJECT_DIR$/README.md" />
<entry file="file://$PROJECT_DIR$/data/data_csvs_from_camera/person_2.csv" />
<entry file="file://$PROJECT_DIR$/data/data_faces_from_camera/person_6/img_face_1.jpg" />
<entry file="file://$PROJECT_DIR$/introduction/face_reco_single_person_custmize_name.png" />
<entry file="file://$PROJECT_DIR$/data/data_csvs_from_camera/person_1.csv" />
<entry file="file://$PROJECT_DIR$/get_features_into_CSV.py" />
<entry file="file://$PROJECT_DIR$/get_origin.py" />
<entry file="file://$PROJECT_DIR$/test.py" />
<entry file="file://$PROJECT_DIR$/requirements.txt">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="36">
<caret line="2" column="16" selection-start-line="2" selection-start-column="16" selection-end-line="2" selection-end-column="16" />
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/introduction/face_reco_two_people_in_database.png">
<provider selected="true" editor-type-id="images" />
</entry>
<entry file="file://$PROJECT_DIR$/how_to_use_camera.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="486">
<caret line="27" column="13" selection-start-line="27" selection-start-column="13" selection-end-line="27" selection-end-column="13" />
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/data/features_all.csv">
<provider selected="true" editor-type-id="csv-text-editor" />
</entry>
<entry file="file://$PROJECT_DIR$/get_faces_from_camera.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="309">
<caret line="69" selection-start-line="69" selection-end-line="69" />
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/features_extraction_to_csv.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="248">
<caret line="73" column="36" selection-start-line="73" selection-start-column="36" selection-end-line="73" selection-end-column="36" />
</state>
</provider>
</entry>
<entry file="file://$PROJECT_DIR$/README.rst">
<provider selected="true" editor-type-id="restructured-text-editor" />
</entry>
<entry file="file://$PROJECT_DIR$/face_reco_from_camera.py">
<provider selected="true" editor-type-id="text-editor">
<state relative-caret-position="489">
<caret line="160" column="23" lean-forward="true" selection-start-line="160" selection-start-column="23" selection-end-line="160" selection-end-column="23" />
<folding>
<element signature="e#230#264#0" expanded="true" />
</folding>
</state>
</provider>
</entry>
</component>
</project> </project>

21
LICENSE
View File

@ -1,21 +0,0 @@
MIT License
Copyright (c) 2020 coneypo
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -11,13 +11,13 @@ Detect and recognize single/multi-faces from camera;
#. 摄像头人脸录入 / Face register #. 摄像头人脸录入 / Face register
.. image:: introduction/face_register.png .. image:: introduction/get_face_from_camera.png
:align: center :align: center
请不要离摄像头过近,人脸超出摄像头范围时会有 "OUT OF RANGE" 提醒 / 请不要离摄像头过近,人脸超出摄像头范围时会有 "OUT OF RANGE" 提醒 /
Please do not be too close to the camera, or you can't save faces with "OUT OF RANGE" warning; Please do not too close to the camera, or you can't save faces with "OUT OF RANGE" warning;
.. image:: introduction/face_register_warning.png .. image:: introduction/get_face_from_camera_out_of_range.png
:align: center :align: center
#. 提取特征建立人脸数据库 / Generate database from images captured #. 提取特征建立人脸数据库 / Generate database from images captured
@ -25,79 +25,49 @@ Detect and recognize single/multi-faces from camera;
当单张人脸 / When single-face: 当单张人脸 / When single-face:
.. image:: introduction/face_reco_single.png .. image:: introduction/face_reco_single_person.png
:align: center
利用 OT 对于单张人脸追踪/ Use OT to track, which can improve FPS from 1.x to 20.x:
.. image:: introduction/face_reco_single_ot.png
:align: center :align: center
当多张人脸 / When multi-faces: 当多张人脸 / When multi-faces:
.. image:: introduction/face_reco_multi.png 一张已录入人脸 + 未录入 unknown 人脸 / 1x known face + 1x unknown face:
.. image:: introduction/face_reco_two_people.png
:align: center :align: center
利用 OT 来实现 / When multi-faces with OT: 同时识别多张已录入人脸 / multi-faces recognition at the same time:
.. image:: introduction/face_reco_multi_ot.png .. image:: introduction/face_reco_two_people_in_database.png
:align: center :align: center
定制显示名字, 可以写中文 / Customize names:
.. image:: introduction/face_reco_with_name.png
:align: center
** 关于精度 / About accuracy: ** 关于精度 / About accuracy:
* When using a distance threshold of ``0.6``, the dlib model obtains an accuracy of ``99.38%`` on the standard LFW face recognition benchmark. * When using a distance threshold of ``0.6``, the dlib model obtains an accuracy of ``99.38%`` on the standard LFW face recognition benchmark.
** 关于算法 / About algorithm
* 基于 Residual Neural Network / 残差网络的 CNN 模型;
* This model is a ResNet network with 29 conv layers. It's essentially a version of the ResNet-34 network from the paper Deep Residual Learning for Image Recognition by He, Zhang, Ren, and Sun with a few layers removed and the number of filters per layer reduced by half.
Overview Overview
******** ********
此项目中人脸识别的实现流程 (no OT, 每一帧都进行检测+识别) / The design of this repo: 此项目中人脸识别的实现流程 / The design of this repo:
.. image:: introduction/overview.png .. image:: introduction/overview.png
:align: center :align: center
实现流程(with OT, 初始帧进行检测+识别,后续帧检测+质心跟踪) / The design of this repo:
.. image:: introduction/overview_with_ot.png
:align: center
如果利用 OT 来跟踪,可以大大提高 FPS, 因为做识别时候需要提取特征描述子的耗时很多;
Steps Steps
***** *****
#. 安装依赖库 / Install some python packages if needed #. 下载源码 / Download zip from website or via GitHub Desktop in windows, or git clone in Ubuntu
.. code-block:: bash
pip3 install opencv-python
pip3 install scikit-image
pip3 install dlib
#. 下载源码 / Download zip from website or via GitHub Desktop in windows, or git clone repo in Ubuntu
.. code-block:: bash .. code-block:: bash
git clone https://github.com/coneypo/Dlib_face_recognition_from_camera git clone https://github.com/coneypo/Dlib_face_recognition_from_camera
#. 进行人脸信息采集录入 / Register faces #. 进行 face register / 人脸信息采集录入
.. code-block:: bash .. code-block:: bash
python3 get_face_from_camera.py python3 get_face_from_camera.py
#. 提取所有录入人脸数据存入 "features_all.csv" / Features extraction and save into "features_all.csv" #. 提取所有录入人脸数据存入 features_all.csv / Features extraction and save into features_all.csv
.. code-block:: bash .. code-block:: bash
@ -109,12 +79,6 @@ Steps
python3 face_reco_from_camera.py python3 face_reco_from_camera.py
#. 或者利用 OT 算法,调用摄像头进行实时人脸识别/ Real-time face recognition with OT
.. code-block:: bash
python3 face_reco_from_camera_ot_single_person.py
python3 face_reco_from_camera_ot_multi_people.py
About Source Code About Source Code
***************** *****************
@ -124,27 +88,34 @@ Repo 的 tree / 树状图:
:: ::
. .
├── get_faces_from_camera.py # Step 1. Face register ├── get_faces_from_camera.py # Step1. Faces register
├── features_extraction_to_csv.py # Step 2. Feature extraction ├── features_extraction_to_csv.py # Step2. Features extraction
├── face_reco_from_camera.py # Step 3. Face recognizer ├── face_reco_from_camera.py # Step3. Faces recognition
├── face_reco_from_camera_ot_single_person.py # Step 3. Face recognizer with OT for single person ├── how_to_use_camera.py # Use the default camera by opencv
├── face_reco_from_camera_ot_multi_people.py # Step 3. Face recognizer with OT for multi people
├── face_descriptor_from_camera.py # Face descriptor computation
├── how_to_use_camera.py # Use the default camera by opencv
├── data ├── data
│   ├── data_dlib # Dlib's model │   ├── data_dlib # Dlib's model
│   │   ├── dlib_face_recognition_resnet_model_v1.dat │   │   ├── dlib_face_recognition_resnet_model_v1.dat
│   │   ├── shape_predictor_5_face_landmarks.dat
│   │   └── shape_predictor_68_face_landmarks.dat │   │   └── shape_predictor_68_face_landmarks.dat
│   ├── data_faces_from_camera # Face images captured from camera (will generate after step 1) │   ├── data_faces_from_camera # Face images captured from camera (will generate after step 1)
│   │   ├── person_1 │   │   ├── person_1
│   │   │   ├── img_face_1.jpg │   │   │   ├── img_face_1.jpg
│   │   │   └── img_face_2.jpg │   │   │   └── img_face_2.jpg
│   │   └── person_2 │   │   └── person_2
│   │   └── img_face_1.jpg │   │   └── img_face_1.jpg
│   │   └── img_face_2.jpg │   │   └── img_face_2.jpg
│   └── features_all.csv # CSV to save all the features of known faces (will generate after step 2) │   └── features_all.csv # CSV to save all the features of known faces (will generate after step 2)
├── introduction # Some files for readme.rst
│   ├── Dlib_Face_recognition_by_coneypo.pptx
│   ├── face_reco_single_person_customize_name.png
│   ├── face_reco_single_person.png
│   ├── face_reco_two_people_in_database.png
│   ├── face_reco_two_people.png
│   ├── get_face_from_camera_out_of_range.png
│   ├── get_face_from_camera.png
│   └── overview.png
├── README.rst ├── README.rst
└── requirements.txt # Some python packages needed └── requirements.txt # Some python packages needed
用到的 Dlib 相关模型函数: 用到的 Dlib 相关模型函数:
@ -157,22 +128,15 @@ Repo 的 tree / 树状图:
faces = detector(img_gray, 0) faces = detector(img_gray, 0)
#. Dlib 人脸 landmark 特征点检测器, output: <class 'dlib.dlib.full_object_detection'>, #. Dlib 人脸测器, output: <class 'dlib.dlib.full_object_detection'>
will use shape_predictor_68_face_landmarks.dat
.. code-block:: python .. code-block:: python
# This is trained on the ibug 300-W dataset (https://ibug.doc.ic.ac.uk/resources/facial-point-annotations/) predictor = dlib.shape_predictor("data/data_dlib/shape_predictor_5_face_landmarks.dat")
# Also note that this model file is designed for use with dlib's HOG face detector.
# That is, it expects the bounding boxes from the face detector to be aligned a certain way, the way dlib's HOG face detector does it.
# It won't work as well when used with a face detector that produces differently aligned boxes,
# such as the CNN based mmod_human_face_detector.dat face detector.
predictor = dlib.shape_predictor("data/data_dlib/shape_predictor_68_face_landmarks.dat")
shape = predictor(img_rd, faces[i]) shape = predictor(img_rd, faces[i])
#. Dlib 特征描述子 Face recognition model, the object maps human faces into 128D vectors #. 特征描述子 Face recognition model, the object maps human faces into 128D vectors
.. code-block:: python .. code-block:: python
@ -206,14 +170,6 @@ Python 源码介绍如下:
* 将捕获到的人脸数据和之前存的人脸数据进行对比计算欧式距离, 由此判断是否是同一个人; * 将捕获到的人脸数据和之前存的人脸数据进行对比计算欧式距离, 由此判断是否是同一个人;
#. face_reco_from_camera_ot_single_person/multi_people.py:
区别于 face_reco_from_camera.py (对每一帧都进行检测+识别),只会对初始帧做检测+识别,对后续帧做检测+质心跟踪;
#. (optional) face_descriptor_from_camera.py
调用摄像头进行实时特征描述子计算; / Real-time face descriptor computation;
More More
**** ****
@ -228,15 +184,14 @@ Tips:
#. 人脸录入的时候先建文件夹再保存图片, 先 ``N````S`` / Press ``N`` before ``S`` #. 人脸录入的时候先建文件夹再保存图片, 先 ``N````S`` / Press ``N`` before ``S``
#. 关于人脸识别卡顿 FPS 低问题, 原因是特征描述子提取很费时间, 光跑 face_descriptor_from_camera.py 中 face_reco_model.compute_face_descriptor 在 CPU: i7-8700K 得到的最终 FPS: 5~6 (检测在 0.03s, 特征描述子提取在 0.158s, 和已知人脸进行遍历对比在 0.003s 左右), 所以主要提取特征时候耗资源, 可以用 OT 去做追踪,而不是对每一帧都做检测+识别
可以访问我的博客获取本项目的更详细介绍,如有问题可以邮件联系我 / 可以访问我的博客获取本项目的更详细介绍,如有问题可以邮件联系我 /
For more details, please refer to my blog (in chinese) or mail to me : For more details, please refer to my blog (in chinese) or mail to me :
* Blog: https://www.cnblogs.com/AdaminXie/p/9010298.html * Blog: https://www.cnblogs.com/AdaminXie/p/9010298.html
* 关于 OT 部分的更新在 Blog: https://www.cnblogs.com/AdaminXie/p/13566269.html
* Mail: coneypo@foxmail.com ( Dlib 相关 repo 问题请联系 @foxmail 而不是 @intel ) * Mail: coneypo@foxmail.com ( Dlib 相关 repo 问题请联系 @foxmail 而不是 @intel )
仅限于交流学习, 商业合作勿扰;
Thanks for your support. Thanks for your support.

Binary file not shown.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 88 KiB

View File

@ -1,75 +0,0 @@
# 摄像头实时人脸特征描述子计算 / Real-time face descriptor compute
import dlib # 人脸识别的库 Dlib
import cv2 # 图像处理的库 OpenCV
import time
# 1. Dlib 正向人脸检测器
detector = dlib.get_frontal_face_detector()
# 2. Dlib 人脸 landmark 特征点检测器
predictor = dlib.shape_predictor('data/data_dlib/shape_predictor_68_face_landmarks.dat')
# 3. Dlib Resnet 人脸识别模型,提取 128D 的特征矢量
face_reco_model = dlib.face_recognition_model_v1("data/data_dlib/dlib_face_recognition_resnet_model_v1.dat")
class Face_Descriptor:
def __init__(self):
self.frame_time = 0
self.frame_start_time = 0
self.fps = 0
def update_fps(self):
now = time.time()
self.frame_time = now - self.frame_start_time
self.fps = 1.0 / self.frame_time
self.frame_start_time = now
def run(self):
cap = cv2.VideoCapture(0)
cap.set(3, 480)
self.process(cap)
cap.release()
cv2.destroyAllWindows()
def process(self, stream):
while stream.isOpened():
flag, img_rd = stream.read()
k = cv2.waitKey(1)
faces = detector(img_rd, 0)
font = cv2.FONT_HERSHEY_SIMPLEX
# 检测到人脸
if len(faces) != 0:
for face in faces:
face_shape = predictor(img_rd, face)
face_desc = face_reco_model.compute_face_descriptor(img_rd, face_shape)
# 添加说明
cv2.putText(img_rd, "Face Descriptor", (20, 40), font, 1, (255, 255, 255), 1, cv2.LINE_AA)
cv2.putText(img_rd, "FPS: " + str(self.fps.__round__(2)), (20, 100), font, 0.8, (0, 255, 0), 1, cv2.LINE_AA)
cv2.putText(img_rd, "Faces: " + str(len(faces)), (20, 140), font, 0.8, (0, 255, 0), 1, cv2.LINE_AA)
cv2.putText(img_rd, "S: Save current face", (20, 400), font, 0.8, (255, 255, 255), 1, cv2.LINE_AA)
cv2.putText(img_rd, "Q: Quit", (20, 450), font, 0.8, (255, 255, 255), 1, cv2.LINE_AA)
# 按下 'q' 键退出
if k == ord('q'):
break
self.update_fps()
cv2.namedWindow("camera", 1)
cv2.imshow("camera", img_rd)
def main():
Face_Descriptor_con = Face_Descriptor()
Face_Descriptor_con.run()
if __name__ == '__main__':
main()

View File

@ -1,186 +1,166 @@
# Copyright (C) 2020 coneypo # 摄像头实时人脸识别
# SPDX-License-Identifier: MIT # Real-time face recognition
# Author: coneypo # Author: coneypo
# Blog: http://www.cnblogs.com/AdaminXie # Blog: http://www.cnblogs.com/AdaminXie
# GitHub: https://github.com/coneypo/Dlib_face_recognition_from_camera # GitHub: https://github.com/coneypo/Dlib_face_recognition_from_camera
# Mail: coneypo@foxmail.com
# 人脸识别 / Real-time face detection and recognition from images # Created at 2018-05-11
# Updated at 2019-04-09
import dlib import dlib # 人脸处理的库 Dlib
import numpy as np import numpy as np # 数据处理的库 numpy
import cv2 import cv2 # 图像处理的库 OpenCv
import pandas as pd import pandas as pd # 数据处理的库 Pandas
import os
import time
from PIL import Image, ImageDraw, ImageFont
# Dlib 正向人脸检测器 / Use frontal face detector of Dlib # 人脸识别模型,提取128D的特征矢量
# face recognition model, the object maps human faces into 128D vectors
# Refer this tutorial: http://dlib.net/python/index.html#dlib.face_recognition_model_v1
facerec = dlib.face_recognition_model_v1("data/data_dlib/dlib_face_recognition_resnet_model_v1.dat")
# 计算两个128D向量间的欧式距离
# compute the e-distance between two 128D features
def return_euclidean_distance(feature_1, feature_2):
feature_1 = np.array(feature_1)
feature_2 = np.array(feature_2)
dist = np.sqrt(np.sum(np.square(feature_1 - feature_2)))
return dist
# 处理存放所有人脸特征的 csv
path_features_known_csv = "data/features_all.csv"
csv_rd = pd.read_csv(path_features_known_csv, header=None)
# 用来存放所有录入人脸特征的数组
# the array to save the features of faces in the database
features_known_arr = []
# 读取已知人脸数据
# print known faces
for i in range(csv_rd.shape[0]):
features_someone_arr = []
for j in range(0, len(csv_rd.ix[i, :])):
features_someone_arr.append(csv_rd.ix[i, :][j])
features_known_arr.append(features_someone_arr)
print("Faces in Database:", len(features_known_arr))
# Dlib 检测器和预测器
# The detector and predictor will be used
detector = dlib.get_frontal_face_detector() detector = dlib.get_frontal_face_detector()
# Dlib 人脸 landmark 特征点检测器 / Get face landmarks
predictor = dlib.shape_predictor('data/data_dlib/shape_predictor_68_face_landmarks.dat') predictor = dlib.shape_predictor('data/data_dlib/shape_predictor_68_face_landmarks.dat')
# Dlib Resnet 人脸识别模型,提取 128D 的特征矢量 / Use Dlib resnet50 model to get 128D face descriptor # 创建 cv2 摄像头对象
face_reco_model = dlib.face_recognition_model_v1("data/data_dlib/dlib_face_recognition_resnet_model_v1.dat") # cv2.VideoCapture(0) to use the default camera of PC,
# and you can use local video name by use cv2.VideoCapture(filename)
cap = cv2.VideoCapture(0)
# cap.set(propId, value)
# 设置视频参数,propId 设置的视频参数,value 设置的参数值
cap.set(3, 480)
class Face_Recognizer: # cap.isOpened() 返回 true/false 检查初始化是否成功
def __init__(self): # when the camera is open
self.feature_known_list = [] # 用来存放所有录入人脸特征的数组 / Save the features of faces in the database while cap.isOpened():
self.name_known_list = [] # 存储录入人脸名字 / Save the name of faces in the database
self.current_frame_face_cnt = 0 # 存储当前摄像头中捕获到的人脸数 / Counter for faces in current frame flag, img_rd = cap.read()
self.current_frame_feature_list = [] # 存储当前摄像头中捕获到的人脸特征 / Features of faces in current frame kk = cv2.waitKey(1)
self.current_frame_name_position_list = [] # 存储当前摄像头中捕获到的所有人脸的名字坐标 / Positions of faces in current frame
self.current_frame_name_list = [] # 存储当前摄像头中捕获到的所有人脸的名字 / Names of faces in current frame
# Update FPS # 取灰度
self.fps = 0 img_gray = cv2.cvtColor(img_rd, cv2.COLOR_RGB2GRAY)
self.frame_start_time = 0
# 从 "features_all.csv" 读取录入人脸特征 / Get known faces from "features_all.csv" # 人脸数 faces
def get_face_database(self): faces = detector(img_gray, 0)
if os.path.exists("data/features_all.csv"):
path_features_known_csv = "data/features_all.csv" # 待会要写的字体 font to write later
csv_rd = pd.read_csv(path_features_known_csv, header=None) font = cv2.FONT_HERSHEY_COMPLEX
for i in range(csv_rd.shape[0]):
features_someone_arr = [] # 存储当前摄像头中捕获到的所有人脸的坐标/名字
for j in range(0, 128): # the list to save the positions and names of current faces captured
if csv_rd.iloc[i][j] == '': pos_namelist = []
features_someone_arr.append('0') name_namelist = []
# 按下 q 键退出
# press 'q' to exit
if kk == ord('q'):
break
else:
# 检测到人脸 when face detected
if len(faces) != 0:
# 获取当前捕获到的图像的所有人脸的特征,存储到 features_cap_arr
# get the features captured and save into features_cap_arr
features_cap_arr = []
for i in range(len(faces)):
shape = predictor(img_rd, faces[i])
features_cap_arr.append(facerec.compute_face_descriptor(img_rd, shape))
# 遍历捕获到的图像中所有的人脸
# traversal all the faces in the database
for k in range(len(faces)):
print("##### camera person", k+1, "#####")
# 让人名跟随在矩形框的下方
# 确定人名的位置坐标
# 先默认所有人不认识,是 unknown
# set the default names of faces with "unknown"
name_namelist.append("unknown")
# 每个捕获人脸的名字坐标 the positions of faces captured
pos_namelist.append(tuple([faces[k].left(), int(faces[k].bottom() + (faces[k].bottom() - faces[k].top())/4)]))
# 对于某张人脸,遍历所有存储的人脸特征
# for every faces detected, compare the faces in the database
e_distance_list = []
for i in range(len(features_known_arr)):
# 如果 person_X 数据不为空
if str(features_known_arr[i][0]) != '0.0':
print("with person", str(i + 1), "the e distance: ", end='')
e_distance_tmp = return_euclidean_distance(features_cap_arr[k], features_known_arr[i])
print(e_distance_tmp)
e_distance_list.append(e_distance_tmp)
else: else:
features_someone_arr.append(csv_rd.iloc[i][j]) # 空数据 person_X
self.feature_known_list.append(features_someone_arr) e_distance_list.append(999999999)
self.name_known_list.append("Person_"+str(i+1)) # Find the one with minimum e distance
print("Faces in Database:", len(self.feature_known_list)) similar_person_num = e_distance_list.index(min(e_distance_list))
return 1 print("Minimum e distance with person", int(similar_person_num)+1)
else:
print('##### Warning #####', '\n')
print("'features_all.csv' not found!")
print(
"Please run 'get_faces_from_camera.py' and 'features_extraction_to_csv.py' before 'face_reco_from_camera.py'",
'\n')
print('##### End Warning #####')
return 0
# 计算两个128D向量间的欧式距离 / Compute the e-distance between two 128D features if min(e_distance_list) < 0.4:
@staticmethod # 在这里修改 person_1, person_2 ... 的名字
def return_euclidean_distance(feature_1, feature_2): # 可以在这里改称 Jack, Tom and others
feature_1 = np.array(feature_1) # Here you can modify the names shown on the camera
feature_2 = np.array(feature_2) name_namelist[k] = str("Person "+str(int(similar_person_num)+1))\
dist = np.sqrt(np.sum(np.square(feature_1 - feature_2))) .replace("Person 1", "Sherry")\
return dist .replace("Person 2", "Jack")\
.replace("Person 3", "Ronnie")\
.replace("Person 4", "Terry")\
.replace("Person 5", "Wilson")
# print("May be person "+str(int(similar_person_num)+1))
else:
print("Unknown person")
# 更新 FPS / Update FPS of Video stream # 矩形框
def update_fps(self): # draw rectangle
now = time.time() for kk, d in enumerate(faces):
self.frame_time = now - self.frame_start_time # 绘制矩形框
self.fps = 1.0 / self.frame_time cv2.rectangle(img_rd, tuple([d.left(), d.top()]), tuple([d.right(), d.bottom()]), (0, 255, 255), 2)
self.frame_start_time = now print('\n')
def draw_note(self, img_rd): # 在人脸框下面写人脸名字
font = cv2.FONT_ITALIC # write names under rectangle
for i in range(len(faces)):
cv2.putText(img_rd, name_namelist[i], pos_namelist[i], font, 0.8, (0, 255, 255), 1, cv2.LINE_AA)
cv2.putText(img_rd, "Face Recognizer", (20, 40), font, 1, (255, 255, 255), 1, cv2.LINE_AA) print("Faces in camera now:", name_namelist, "\n")
cv2.putText(img_rd, "FPS: " + str(self.fps.__round__(2)), (20, 100), font, 0.8, (0, 255, 0), 1, cv2.LINE_AA)
cv2.putText(img_rd, "Faces: " + str(self.current_frame_face_cnt), (20, 140), font, 0.8, (0, 255, 0), 1, cv2.LINE_AA)
cv2.putText(img_rd, "Q: Quit", (20, 450), font, 0.8, (255, 255, 255), 1, cv2.LINE_AA)
def draw_name(self, img_rd): cv2.putText(img_rd, "Press 'q': Quit", (20, 450), font, 0.8, (84, 255, 159), 1, cv2.LINE_AA)
# 在人脸框下面写人脸名字 / Write names under rectangle cv2.putText(img_rd, "Face Recognition", (20, 40), font, 1, (0, 0, 0), 1, cv2.LINE_AA)
font = ImageFont.truetype("simsun.ttc", 30) cv2.putText(img_rd, "Faces: " + str(len(faces)), (20, 100), font, 1, (0, 0, 255), 1, cv2.LINE_AA)
img = Image.fromarray(cv2.cvtColor(img_rd, cv2.COLOR_BGR2RGB))
draw = ImageDraw.Draw(img)
for i in range(self.current_frame_face_cnt):
# cv2.putText(img_rd, self.current_frame_name_list[i], self.current_frame_name_position_list[i], font, 0.8, (0, 255, 255), 1, cv2.LINE_AA)
draw.text(xy=self.current_frame_name_position_list[i], text=self.current_frame_name_list[i], font=font)
img_with_name = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
return img_with_name
# 修改显示人名 / Show names in chinese # 窗口显示 show with opencv
def show_chinese_name(self): cv2.imshow("camera", img_rd)
# Default known name: person_1, person_2, person_3
if self.current_frame_face_cnt >= 1:
self.name_known_list[0] ='张三'.encode('utf-8').decode()
# self.name_known_list[1] ='李四'.encode('utf-8').decode()
# self.name_known_list[2] ='xx'.encode('utf-8').decode()
# self.name_known_list[3] ='xx'.encode('utf-8').decode()
# self.name_known_list[4] ='xx'.encode('utf-8').decode()
# 处理获取的视频流,进行人脸识别 / Face detection and recognition from input video stream # 释放摄像头 release camera
def process(self): cap.release()
# 1. 读取存放所有人脸特征的 csv / Get faces known from "features.all.csv"
if self.get_face_database():
print(">>> Frame start") # 删除建立的窗口 delete all the windows
img_rd = cv2.imread("data/data_faces_for_test/test_faces_1.jpg") cv2.destroyAllWindows()
faces = detector(img_rd, 1)
self.draw_note(img_rd)
# 2. 检测到人脸 / Face detected in current frame
if len(faces) != 0:
# 3. 获取当前捕获到的图像的所有人脸的特征 / Compute the face descriptors for faces in current frame
for i in range(len(faces)):
shape = predictor(img_rd, faces[i])
self.current_frame_feature_list.append(face_reco_model.compute_face_descriptor(img_rd, shape))
# 4. 遍历捕获到的图像中所有的人脸 / Traversal all the faces in the database
for k in range(len(faces)):
print(">>>>>> For face", k+1, " in camera")
# 先默认所有人不认识,是 unknown / Set the default names of faces with "unknown"
self.current_frame_name_list.append("unknown")
# 每个捕获人脸的名字坐标 / Positions of faces captured
self.current_frame_name_position_list.append(tuple(
[faces[k].left(), int(faces[k].bottom() + (faces[k].bottom() - faces[k].top()) / 4)]))
# 5. 对于某张人脸,遍历所有存储的人脸特征
# For every faces detected, compare the faces in the database
current_frame_e_distance_list = []
for i in range(len(self.feature_known_list)):
# 如果 person_X 数据不为空
if str(self.feature_known_list[i][0]) != '0.0':
print(" >>> With person", str(i + 1), ", the e distance: ", end='')
e_distance_tmp = self.return_euclidean_distance(self.current_frame_feature_list[k],
self.feature_known_list[i])
print(e_distance_tmp)
current_frame_e_distance_list.append(e_distance_tmp)
else:
# 空数据 person_X
current_frame_e_distance_list.append(999999999)
# 6. 寻找出最小的欧式距离匹配 / Find the one with minimum e distance
similar_person_num = current_frame_e_distance_list.index(min(current_frame_e_distance_list))
print(" >>> Minimum e distance with ", self.name_known_list[similar_person_num], ": ", min(current_frame_e_distance_list))
if min(current_frame_e_distance_list) < 0.4:
self.current_frame_name_list[k] = self.name_known_list[similar_person_num]
print(" >>> Face recognition result: " + str(self.name_known_list[similar_person_num]))
else:
print(" >>> Face recognition result: Unknown person")
# 矩形框 / Draw rectangle
for kk, d in enumerate(faces):
# 绘制矩形框
cv2.rectangle(img_rd, tuple([d.left(), d.top()]), tuple([d.right(), d.bottom()]),
(0, 255, 255), 2)
self.current_frame_face_cnt = len(faces)
img_rd = self.draw_name(img_rd)
print(">>>>>> Faces in camera now:", self.current_frame_name_list)
cv2.imshow("camera", img_rd)
cv2.waitKey(0)
print(">>> Frame ends\n\n")
def main():
Face_Recognizer_con = Face_Recognizer()
Face_Recognizer_con.process()
if __name__ == '__main__':
main()

View File

@ -1,285 +0,0 @@
# Copyright (C) 2020 coneypo
# SPDX-License-Identifier: MIT
# Author: coneypo
# Blog: http://www.cnblogs.com/AdaminXie
# GitHub: https://github.com/coneypo/Dlib_face_recognition_from_camera
# Mail: coneypo@foxmail.com
# 利用 OT 人脸追踪, 实时人脸识别 / Real-time face detection and recognition via OT for single face
import dlib
import numpy as np
import cv2
import os
import pandas as pd
import time
# Dlib 正向人脸检测器 / Use frontal face detector of Dlib
detector = dlib.get_frontal_face_detector()
# Dlib 人脸 landmark 特征点检测器 / Get face landmarks
predictor = dlib.shape_predictor('data/data_dlib/shape_predictor_68_face_landmarks.dat')
# Dlib Resnet 人脸识别模型,提取 128D 的特征矢量 / Use Dlib resnet50 model to get 128D face descriptor
face_reco_model = dlib.face_recognition_model_v1("data/data_dlib/dlib_face_recognition_resnet_model_v1.dat")
class Face_Recognizer:
def __init__(self):
self.font = cv2.FONT_ITALIC
# For FPS
self.frame_time = 0
self.frame_start_time = 0
self.fps = 0
# cnt for frame
self.frame_cnt = 0
# 用来存放所有录入人脸特征的数组 / Save the features of faces in the database
self.features_known_list = []
# 存储录入人脸名字 / Save the name of faces in the database
self.name_known_list = []
# 用来存储上一帧和当前帧 ROI 的质心坐标 / List to save centroid positions of ROI in frame N-1 and N
self.last_frame_centroid_list = []
self.current_frame_centroid_list = []
# 用来存储上一帧和当前帧检测出目标的名字 / List to save names of objects in frame N-1 and N
self.last_frame_names_list = []
self.current_frame_face_name_list = []
# 上一帧和当前帧中人脸数的计数器 / cnt for faces in frame N-1 and N
self.last_frame_face_cnt = 0
self.current_frame_face_cnt = 0
# 用来存放进行识别时候对比的欧氏距离 / Save the e-distance for faceX when recognizing
self.current_frame_face_X_e_distance_list = []
# 存储当前摄像头中捕获到的所有人脸的坐标名字 / Save the positions and names of current faces captured
self.current_frame_face_position_list = []
# 存储当前摄像头中捕获到的人脸特征 / Save the features of people in current frame
self.current_frame_face_features_list = []
# e distance between centroid of ROI in last and current frame
self.last_current_frame_centroid_e_distance = 0
# 从 "features_all.csv" 读取录入人脸特征 / Get known faces from "features_all.csv"
def get_face_database(self):
if os.path.exists("data/features_all.csv"):
path_features_known_csv = "data/features_all.csv"
csv_rd = pd.read_csv(path_features_known_csv, header=None)
for i in range(csv_rd.shape[0]):
features_someone_arr = []
for j in range(0, 128):
if csv_rd.iloc[i][j] == '':
features_someone_arr.append('0')
else:
features_someone_arr.append(csv_rd.iloc[i][j])
self.features_known_list.append(features_someone_arr)
self.name_known_list.append("Person_" + str(i + 1))
print("Faces in Database:", len(self.features_known_list))
return 1
else:
print('##### Warning #####', '\n')
print("'features_all.csv' not found!")
print(
"Please run 'get_faces_from_camera.py' and 'features_extraction_to_csv.py' before 'face_reco_from_camera.py'",
'\n')
print('##### End Warning #####')
return 0
# 获取处理之后 stream 的帧数 / Get the fps of video stream
def update_fps(self):
now = time.time()
self.frame_time = now - self.frame_start_time
self.fps = 1.0 / self.frame_time
self.frame_start_time = now
# 计算两个128D向量间的欧式距离 / Compute the e-distance between two 128D features
@staticmethod
def return_euclidean_distance(feature_1, feature_2):
feature_1 = np.array(feature_1)
feature_2 = np.array(feature_2)
dist = np.sqrt(np.sum(np.square(feature_1 - feature_2)))
return dist
# / Use centroid tracker to link face_x in current frame with person_x in last frame
def centroid_tracker(self):
for i in range(len(self.current_frame_centroid_list)):
e_distance_current_frame_person_x_list = []
# For object 1 in current_frame, compute e-distance with object 1/2/3/4/... in last frame
for j in range(len(self.last_frame_centroid_list)):
self.last_current_frame_centroid_e_distance = self.return_euclidean_distance(
self.current_frame_centroid_list[i], self.last_frame_centroid_list[j])
e_distance_current_frame_person_x_list.append(
self.last_current_frame_centroid_e_distance)
last_frame_num = e_distance_current_frame_person_x_list.index(
min(e_distance_current_frame_person_x_list))
self.current_frame_face_name_list[i] = self.last_frame_face_name_list[last_frame_num]
# 生成的 cv2 window 上面添加说明文字 / putText on cv2 window
def draw_note(self, img_rd):
# 添加说明 / Add some statements
cv2.putText(img_rd, "Face recognizer with OT", (20, 40), self.font, 1, (255, 255, 255), 1, cv2.LINE_AA)
cv2.putText(img_rd, "FPS: " + str(self.fps.__round__(2)), (20, 100), self.font, 0.8, (0, 255, 0), 1,
cv2.LINE_AA)
cv2.putText(img_rd, "Faces: " + str(self.current_frame_face_cnt), (20, 130), self.font, 0.8, (0, 255, 0), 1,
cv2.LINE_AA)
cv2.putText(img_rd, "Q: Quit", (20, 450), self.font, 0.8, (255, 255, 255), 1, cv2.LINE_AA)
for i in range(len(self.current_frame_face_name_list)):
cv2.putText(img_rd, "Face " + str(i + 1), tuple(
[int(self.current_frame_centroid_list[i][0]), int(self.current_frame_centroid_list[i][1])]), self.font,
0.8, (255, 190, 0),
1,
cv2.LINE_AA)
# 处理获取的视频流,进行人脸识别 / Face detection and recognition wit OT from input video stream
def process(self, stream):
# 1. 读取存放所有人脸特征的 csv / Get faces known from "features.all.csv"
if self.get_face_database():
while stream.isOpened():
self.frame_cnt += 1
print(">>> Frame " + str(self.frame_cnt) + " starts")
flag, img_rd = stream.read()
kk = cv2.waitKey(1)
# 2. 检测人脸 / Detect faces for frame X
faces = detector(img_rd, 0)
if self.current_frame_face_name_list == ['Person_2', 'Person_2']:
break
# Update cnt for faces in frames
self.last_frame_face_cnt = self.current_frame_face_cnt
self.current_frame_face_cnt = len(faces)
# Update the face name list in last frame
self.last_frame_face_name_list = self.current_frame_face_name_list[:]
# update frame centroid list
self.last_frame_centroid_list = self.current_frame_centroid_list
self.current_frame_centroid_list = []
print(" >>> current_frame_face_cnt: ", self.current_frame_face_cnt)
# 2.1. if cnt not changes
if self.current_frame_face_cnt == self.last_frame_face_cnt:
print(" >>> scene 1: 当前帧和上一帧相比没有发生人脸数变化 / no faces cnt changes in this frame!!!")
self.current_frame_face_position_list = []
if self.current_frame_face_cnt != 0:
# 2.1.1 Get ROI positions
for k, d in enumerate(faces):
self.current_frame_face_position_list.append(tuple(
[faces[k].left(), int(faces[k].bottom() + (faces[k].bottom() - faces[k].top()) / 4)]))
self.current_frame_centroid_list.append(
[int(faces[k].left() + faces[k].right()) / 2,
int(faces[k].top() + faces[k].bottom()) / 2])
# 计算矩形框大小 / Compute the size of rectangle box
height = (d.bottom() - d.top())
width = (d.right() - d.left())
hh = int(height / 2)
ww = int(width / 2)
cv2.rectangle(img_rd,
tuple([d.left() - ww, d.top() - hh]),
tuple([d.right() + ww, d.bottom() + hh]),
(255, 255, 255), 2)
# multi-faces in current frames, use centroid tracker to track
if self.current_frame_face_cnt != 1:
self.centroid_tracker()
for i in range(self.current_frame_face_cnt):
# 6.2 write names under ROI
cv2.putText(img_rd, self.current_frame_face_name_list[i],
self.current_frame_face_position_list[i], self.font, 0.8, (0, 255, 255), 1,
cv2.LINE_AA)
# 2.2 if cnt of faces changes, 0->1 or 1->0 or ...
else:
print(" >>> scene 2: 当前帧和上一帧相比人脸数发生变化 / Faces cnt changes in this frame")
self.current_frame_face_position_list = []
self.current_frame_face_X_e_distance_list = []
# 2.2.1 face cnt decrease: 1->0, 2->1, ...
if self.current_frame_face_cnt == 0:
print(" >>> scene 2.1 人脸消失, 当前帧中没有人脸 / No guy in this frame!!!")
# clear list of names and features
self.current_frame_face_name_list = []
self.current_frame_face_features_list = []
# 2.2.2 face cnt increase: 0->1, 0->2, ..., 1->2, ...
else:
print(" >>> scene 2.2 出现人脸,进行人脸识别 / Do face recognition for people detected in this frame")
self.current_frame_face_name_list = []
for i in range(len(faces)):
shape = predictor(img_rd, faces[i])
self.current_frame_face_features_list.append(
face_reco_model.compute_face_descriptor(img_rd, shape))
self.current_frame_face_name_list.append("unknown")
# 2.2.2.1 遍历捕获到的图像中所有的人脸 / Traversal all the faces in the database
for k in range(len(faces)):
print(" >>> For face " + str(k+1) + " in current frame:")
self.current_frame_centroid_list.append(
[int(faces[k].left() + faces[k].right()) / 2,
int(faces[k].top() + faces[k].bottom()) / 2])
self.current_frame_face_X_e_distance_list = []
# 2.2.2.2 每个捕获人脸的名字坐标 / Positions of faces captured
self.current_frame_face_position_list.append(tuple(
[faces[k].left(), int(faces[k].bottom() + (faces[k].bottom() - faces[k].top()) / 4)]))
# 2.2.2.3 对于某张人脸,遍历所有存储的人脸特征
# For every faces detected, compare the faces in the database
for i in range(len(self.features_known_list)):
# 如果 person_X 数据不为空
if str(self.features_known_list[i][0]) != '0.0':
print(" >>> with person", str(i + 1), "the e distance: ", end='')
e_distance_tmp = self.return_euclidean_distance(
self.current_frame_face_features_list[k],
self.features_known_list[i])
print(e_distance_tmp)
self.current_frame_face_X_e_distance_list.append(e_distance_tmp)
else:
# 空数据 person_X
self.current_frame_face_X_e_distance_list.append(999999999)
# 2.2.2.4 寻找出最小的欧式距离匹配 / Find the one with minimum e distance
similar_person_num = self.current_frame_face_X_e_distance_list.index(
min(self.current_frame_face_X_e_distance_list))
if min(self.current_frame_face_X_e_distance_list) < 0.4:
self.current_frame_face_name_list[k] = self.name_known_list[similar_person_num]
print(" >>> recognition result for face " + str(k+1) +": "+ self.name_known_list[similar_person_num])
else:
print(" >>> recognition result for face " + str(k + 1) + ": " + "unknown")
# 3. 生成的窗口添加说明文字 / Add note on cv2 window
self.draw_note(img_rd)
# 4. 按下 'q' 键退出 / Press 'q' to exit
if kk == ord('q'):
break
self.update_fps()
cv2.namedWindow("camera", 1)
cv2.imshow("camera", img_rd)
print(">>> Frame ends\n\n")
def run(self):
cap = cv2.VideoCapture(0)
self.process(cap)
cap.release()
cv2.destroyAllWindows()
def main():
Face_Recognizer_con = Face_Recognizer()
Face_Recognizer_con.run()
if __name__ == '__main__':
main()

View File

@ -1,247 +0,0 @@
# Copyright (C) 2020 coneypo
# SPDX-License-Identifier: MIT
# Author: coneypo
# Blog: http://www.cnblogs.com/AdaminXie
# GitHub: https://github.com/coneypo/Dlib_face_recognition_from_camera
# Mail: coneypo@foxmail.com
# 利用 OT 对于单张人脸追踪, 实时人脸识别 / Real-time face detection and recognition via OT for single face
import dlib
import numpy as np
import cv2
import os
import pandas as pd
import time
# Dlib 正向人脸检测器 / Use frontal face detector of Dlib
detector = dlib.get_frontal_face_detector()
# Dlib 人脸 landmark 特征点检测器 / Get face landmarks
predictor = dlib.shape_predictor('data/data_dlib/shape_predictor_68_face_landmarks.dat')
# Dlib Resnet 人脸识别模型,提取 128D 的特征矢量 / Use Dlib resnet50 model to get 128D face descriptor
face_reco_model = dlib.face_recognition_model_v1("data/data_dlib/dlib_face_recognition_resnet_model_v1.dat")
class Face_Recognizer:
def __init__(self):
self.font = cv2.FONT_ITALIC
# For FPS
self.frame_time = 0
self.frame_start_time = 0
self.fps = 0
# cnt for frame
self.frame_cnt = 0
# 用来存储所有录入人脸特征的数组 / Save the features of faces in the database
self.features_known_list = []
# 用来存储录入人脸名字 / Save the name of faces in the database
self.name_known_list = []
# 用来存储上一帧和当前帧 ROI 的质心坐标 / List to save centroid positions of ROI in frame N-1 and N
self.last_frame_centroid_list = []
self.current_frame_centroid_list = []
# 用来存储上一帧和当前帧检测出目标的名字 / List to save names of objects in frame N-1 and N
self.last_frame_names_list = []
self.current_frame_face_names_list = []
# 上一帧和当前帧中人脸数的计数器 / cnt for faces in frame N-1 and N
self.last_frame_faces_cnt = 0
self.current_frame_face_cnt = 0
# 用来存放进行识别时候对比的欧氏距离 / Save the e-distance for faceX when recognizing
self.current_frame_face_X_e_distance_list = []
# 存储当前摄像头中捕获到的所有人脸的坐标名字 / Save the positions and names of current faces captured
self.current_frame_face_position_list = []
# 存储当前摄像头中捕获到的人脸特征 / Save the features of people in current frame
self.current_frame_face_features_list = []
# 从 "features_all.csv" 读取录入人脸特征 / Get known faces from "features_all.csv"
def get_face_database(self):
if os.path.exists("data/features_all.csv"):
path_features_known_csv = "data/features_all.csv"
csv_rd = pd.read_csv(path_features_known_csv, header=None)
for i in range(csv_rd.shape[0]):
features_someone_arr = []
for j in range(0, 128):
if csv_rd.iloc[i][j] == '':
features_someone_arr.append('0')
else:
features_someone_arr.append(csv_rd.iloc[i][j])
self.features_known_list.append(features_someone_arr)
self.name_known_list.append("Person_" + str(i + 1))
print("Faces in Database:", len(self.features_known_list))
return 1
else:
print('##### Warning #####', '\n')
print("'features_all.csv' not found!")
print(
"Please run 'get_faces_from_camera.py' and 'features_extraction_to_csv.py' before 'face_reco_from_camera.py'",
'\n')
print('##### End Warning #####')
return 0
# 计算两个128D向量间的欧式距离 / Compute the e-distance between two 128D features
# 更新 FPS / Update FPS of Video stream
def update_fps(self):
now = time.time()
self.frame_time = now - self.frame_start_time
self.fps = 1.0 / self.frame_time
self.frame_start_time = now
# 计算两个128D向量间的欧式距离 / Compute the e-distance between two 128D features
@staticmethod
def return_euclidean_distance(feature_1, feature_2):
feature_1 = np.array(feature_1)
feature_2 = np.array(feature_2)
dist = np.sqrt(np.sum(np.square(feature_1 - feature_2)))
return dist
# 生成的 cv2 window 上面添加说明文字 / putText on cv2 window
def draw_note(self, img_rd):
# 添加说明 / Add some statements
cv2.putText(img_rd, "Face Recognizer with OT (one person)", (20, 40), self.font, 1, (255, 255, 255), 1, cv2.LINE_AA)
cv2.putText(img_rd, "FPS: " + str(self.fps.__round__(2)), (20, 100), self.font, 0.8, (0, 255, 0), 1,
cv2.LINE_AA)
cv2.putText(img_rd, "Q: Quit", (20, 450), self.font, 0.8, (255, 255, 255), 1, cv2.LINE_AA)
# 处理获取的视频流,进行人脸识别 / Face detection and recognition wit OT from input video stream
def process(self, stream):
# 1. 读取存放所有人脸特征的 csv / Get faces known from "features.all.csv"
if self.get_face_database():
while stream.isOpened():
self.frame_cnt += 1
print(">>> Frame " + str(self.frame_cnt) + " starts")
flag, img_rd = stream.read()
kk = cv2.waitKey(1)
# 2. 检测人脸 / Detect faces for frame X
faces = detector(img_rd, 0)
# Update cnt for faces in frames
self.last_frame_faces_cnt = self.current_frame_face_cnt
self.current_frame_face_cnt = len(faces)
print(" >>> current_frame_face_cnt: ", self.current_frame_face_cnt)
# 2.1 If cnt not changes, 1->1 or 0->0
if self.current_frame_face_cnt == self.last_frame_faces_cnt:
print(" >>> scene 1: 当前帧和上一帧相比没有发生人脸数变化 / no faces cnt changes in this frame!!!")
# One face in this frame
if self.current_frame_face_cnt != 0:
# 2.1.1 Get ROI positions
for k, d in enumerate(faces):
# 计算矩形框大小 / Compute the size of rectangle box
height = (d.bottom() - d.top())
width = (d.right() - d.left())
hh = int(height / 2)
ww = int(width / 2)
cv2.rectangle(img_rd,
tuple([d.left() - ww, d.top() - hh]),
tuple([d.right() + ww, d.bottom() + hh]),
(255, 255, 255), 2)
self.current_frame_face_position_list[k] = tuple(
[faces[k].left(), int(faces[k].bottom() + (faces[k].bottom() - faces[k].top()) / 4)])
print(" >>> self.current_frame_face_names_list[k]: ",
self.current_frame_face_names_list[k])
print(" >>> self.current_frame_face_position_list[k]: ",
self.current_frame_face_position_list[k])
# 2.1.2 写名字 / Write names under ROI
cv2.putText(img_rd, self.current_frame_face_names_list[k],
self.current_frame_face_position_list[k], self.font, 0.8, (0, 255, 255), 1,
cv2.LINE_AA)
# 2.2 if cnt of faces changes, 0->1 or 1->0
else:
print(" >>> scene 2: 当前帧和上一帧相比人脸数发生变化 / Faces cnt changes in this frame")
self.current_frame_face_position_list = []
self.current_frame_face_X_e_distance_list = []
# 2.2.1 face cnt: 1->0, no faces in this frame
if self.current_frame_face_cnt == 0:
print(" >>> scene 2.1 人脸消失, 当前帧中没有人脸 / no guy in this frame!!!")
# clear list of names and
self.current_frame_face_names_list = []
self.current_frame_face_features_list = []
# 2.2.2 face cnt: 0->1, get the new face
elif self.current_frame_face_cnt == 1:
print(" >>> scene 2.2 出现人脸,进行人脸识别 / Get person in this frame and do face recognition")
self.current_frame_face_names_list = []
for i in range(len(faces)):
shape = predictor(img_rd, faces[i])
self.current_frame_face_features_list.append(
face_reco_model.compute_face_descriptor(img_rd, shape))
# 2.2.2.1 遍历捕获到的图像中所有的人脸 / Traversal all the faces in the database
for k in range(len(faces)):
self.current_frame_face_names_list.append("unknown")
# 2.2.2.2 每个捕获人脸的名字坐标 / Positions of faces captured
self.current_frame_face_position_list.append(tuple(
[faces[k].left(), int(faces[k].bottom() + (faces[k].bottom() - faces[k].top()) / 4)]))
# 2.2.2.3 对于某张人脸,遍历所有存储的人脸特征
# For every faces detected, compare the faces in the database
for i in range(len(self.features_known_list)):
# 如果 person_X 数据不为空
if str(self.features_known_list[i][0]) != '0.0':
print(" >>> with person", str(i + 1), "the e distance: ", end='')
e_distance_tmp = self.return_euclidean_distance(
self.current_frame_face_features_list[k],
self.features_known_list[i])
print(e_distance_tmp)
self.current_frame_face_X_e_distance_list.append(e_distance_tmp)
else:
# 空数据 person_X
self.current_frame_face_X_e_distance_list.append(999999999)
# 2.2.2.4 寻找出最小的欧式距离匹配 / Find the one with minimum e distance
similar_person_num = self.current_frame_face_X_e_distance_list.index(min(self.current_frame_face_X_e_distance_list))
if min(self.current_frame_face_X_e_distance_list) < 0.4:
self.current_frame_face_names_list[k] = self.name_known_list[similar_person_num]
print(" >>> recognition result for face " + str(k + 1) + ": " +
self.name_known_list[similar_person_num])
else:
print(" >>> recognition result for face " + str(k + 1) + ": " + "unknown")
# 3. 生成的窗口添加说明文字 / Add note on cv2 window
self.draw_note(img_rd)
if kk == ord('q'):
break
self.update_fps()
cv2.namedWindow("camera", 1)
cv2.imshow("camera", img_rd)
print(">>> Frame ends\n\n")
def run(self):
cap = cv2.VideoCapture(0)
self.process(cap)
cap.release()
cv2.destroyAllWindows()
def main():
Face_Recognizer_con = Face_Recognizer()
Face_Recognizer_con.run()
if __name__ == '__main__':
main()

View File

@ -1,64 +1,66 @@
# Copyright (C) 2020 coneypo # 从人脸图像文件中提取人脸特征存入 CSV
# SPDX-License-Identifier: MIT # Features extraction from images and save into features_all.csv
# Author: coneypo # Author: coneypo
# Blog: http://www.cnblogs.com/AdaminXie # Blog: http://www.cnblogs.com/AdaminXie
# GitHub: https://github.com/coneypo/Dlib_face_recognition_from_camera # GitHub: https://github.com/coneypo/Dlib_face_recognition_from_camera
# Mail: coneypo@foxmail.com # Mail: coneypo@foxmail.com
# 从人脸图像文件中提取人脸特征存入 "features_all.csv" / Extract features from images and save into "features_all.csv" # Created at 2018-05-11
# Updated at 2019-04-04
import cv2
import os import os
import dlib import dlib
from skimage import io from skimage import io
import csv import csv
import numpy as np import numpy as np
# 要读取人脸图像文件的路径 / Path of cropped faces # 要读取人脸图像文件的路径
path_images_from_camera = "data/data_faces_from_camera/" path_images_from_camera = "data/data_faces_from_camera/"
# Dlib 正向人脸检测器 / Use frontal face detector of Dlib # Dlib 正向人脸检测器
detector = dlib.get_frontal_face_detector() detector = dlib.get_frontal_face_detector()
# Dlib 人脸 landmark 特征点检测器 / Get face landmarks # Dlib 人脸预测器
predictor = dlib.shape_predictor('data/data_dlib/shape_predictor_68_face_landmarks.dat') predictor = dlib.shape_predictor("data/data_dlib/shape_predictor_5_face_landmarks.dat")
# Dlib Resnet 人脸识别模型,提取 128D 的特征矢量 / Use Dlib resnet50 model to get 128D face descriptor # Dlib 人脸识别模型
face_reco_model = dlib.face_recognition_model_v1("data/data_dlib/dlib_face_recognition_resnet_model_v1.dat") # Face recognition model, the object maps human faces into 128D vectors
face_rec = dlib.face_recognition_model_v1("data/data_dlib/dlib_face_recognition_resnet_model_v1.dat")
# 返回单张图像的 128D 特征 / Return 128D features for single image # 返回单张图像的 128D 特征
# Input: path_img <class 'str'>
# Output: face_descriptor <class 'dlib.vector'>
def return_128d_features(path_img): def return_128d_features(path_img):
img_rd = io.imread(path_img) img_rd = io.imread(path_img)
faces = detector(img_rd, 1) img_gray = cv2.cvtColor(img_rd, cv2.COLOR_BGR2RGB)
faces = detector(img_gray, 1)
print("%-40s %-20s" % ("检测到人脸的图像 / Image with faces detected:", path_img), '\n') print("%-40s %-20s" % ("检测到人脸的图像 / image with faces detected:", path_img), '\n')
# 因为有可能截下来的人脸再去检测,检测不出来人脸了, 所以要确保是 检测到人脸的人脸图像拿去算特征 # 因为有可能截下来的人脸再去检测,检测不出来人脸了
# For photos of faces saved, we need to make sure that we can detect faces from the cropped images # 所以要确保是 检测到人脸的人脸图像 拿去算特征
if len(faces) != 0: if len(faces) != 0:
shape = predictor(img_rd, faces[0]) shape = predictor(img_gray, faces[0])
face_descriptor = face_reco_model.compute_face_descriptor(img_rd, shape) face_descriptor = face_rec.compute_face_descriptor(img_gray, shape)
else: else:
face_descriptor = 0 face_descriptor = 0
print("no face") print("no face")
return face_descriptor return face_descriptor
# 返回 personX 的 128D 特征均值 / Return the mean value of 128D face descriptor for person X # 将文件夹中照片特征提取出来, 写入 CSV
# Input: path_faces_personX <class 'str'>
# Output: features_mean_personX <class 'numpy.ndarray'>
def return_features_mean_personX(path_faces_personX): def return_features_mean_personX(path_faces_personX):
features_list_personX = [] features_list_personX = []
photos_list = os.listdir(path_faces_personX) photos_list = os.listdir(path_faces_personX)
if photos_list: if photos_list:
for i in range(len(photos_list)): for i in range(len(photos_list)):
# 调用 return_128d_features() 得到 128D 特征 / Get 128D features for single image of personX # 调用return_128d_features()得到128d特征
print("%-40s %-20s" % ("正在读的人脸图像 / Reading image:", path_faces_personX + "/" + photos_list[i])) print("%-40s %-20s" % ("正在读的人脸图像 / image to read:", path_faces_personX + "/" + photos_list[i]))
features_128d = return_128d_features(path_faces_personX + "/" + photos_list[i]) features_128d = return_128d_features(path_faces_personX + "/" + photos_list[i])
# 遇到没有检测出人脸的图片跳过 / Jump if no face detected from image # print(features_128d)
# 遇到没有检测出人脸的图片跳过
if features_128d == 0: if features_128d == 0:
i += 1 i += 1
else: else:
@ -66,17 +68,17 @@ def return_features_mean_personX(path_faces_personX):
else: else:
print("文件夹内图像文件为空 / Warning: No images in " + path_faces_personX + '/', '\n') print("文件夹内图像文件为空 / Warning: No images in " + path_faces_personX + '/', '\n')
# 计算 128D 特征的均值 / Compute the mean # 计算 128D 特征的均值
# personX 的 N 张图像 x 128D -> 1 x 128D # personX 的 N 张图像 x 128D -> 1 x 128D
if features_list_personX: if features_list_personX:
features_mean_personX = np.array(features_list_personX).mean(axis=0) features_mean_personX = np.array(features_list_personX).mean(axis=0)
else: else:
features_mean_personX = np.zeros(128, dtype=int, order='C') features_mean_personX = '0'
print(type(features_mean_personX))
return features_mean_personX return features_mean_personX
# 获取已录入的最后一个人脸序号 / Get the order of latest person # 获取已录入的最后一个人脸序号 / get the num of latest person
person_list = os.listdir("data/data_faces_from_camera/") person_list = os.listdir("data/data_faces_from_camera/")
person_num_list = [] person_num_list = []
for person in person_list: for person in person_list:
@ -87,8 +89,8 @@ with open("data/features_all.csv", "w", newline="") as csvfile:
writer = csv.writer(csvfile) writer = csv.writer(csvfile)
for person in range(person_cnt): for person in range(person_cnt):
# Get the mean/average features of face/personX, it will be a list with a length of 128D # Get the mean/average features of face/personX, it will be a list with a length of 128D
print(path_images_from_camera + "person_" + str(person + 1)) print(path_images_from_camera + "person_"+str(person+1))
features_mean_personX = return_features_mean_personX(path_images_from_camera + "person_" + str(person + 1)) features_mean_personX = return_features_mean_personX(path_images_from_camera + "person_"+str(person+1))
writer.writerow(features_mean_personX) writer.writerow(features_mean_personX)
print("特征均值 / The mean of features:", list(features_mean_personX)) print("特征均值 / The mean of features:", list(features_mean_personX))
print('\n') print('\n')

View File

@ -1,188 +1,196 @@
# Copyright (C) 2020 coneypo # 进行人脸录入 / face register
# SPDX-License-Identifier: MIT # 录入多张人脸 / support multi-faces
# Author: coneypo # Author: coneypo
# Blog: http://www.cnblogs.com/AdaminXie # Blog: http://www.cnblogs.com/AdaminXie
# GitHub: https://github.com/coneypo/Dlib_face_recognition_from_camera # GitHub: https://github.com/coneypo/Dlib_face_recognition_from_camera
# Mail: coneypo@foxmail.com # Mail: coneypo@foxmail.com
# 进行人脸录入 / Face register # Created at 2018-05-11
# Updated at 2019-04-12
import dlib import dlib # 人脸处理的库 Dlib
import numpy as np import numpy as np # 数据处理的库 Numpy
import cv2 import cv2 # 图像处理的库 OpenCv
import os
import shutil
import time
# Dlib 正向人脸检测器 / Use frontal face detector of Dlib import os # 读写文件
import shutil # 读写文件
# Dlib 正向人脸检测器 / frontal face detector
detector = dlib.get_frontal_face_detector() detector = dlib.get_frontal_face_detector()
# Dlib 68 点特征预测器 / 68 points features predictor
predictor = dlib.shape_predictor('data/data_dlib/shape_predictor_68_face_landmarks.dat')
class Face_Register: # OpenCv 调用摄像头 use camera
def __init__(self): cap = cv2.VideoCapture(0)
self.path_photos_from_camera = "data/data_faces_from_camera/"
self.font = cv2.FONT_ITALIC
self.existing_faces_cnt = 0 # 已录入的人脸计数器 / cnt for counting saved faces # 设置视频参数 set camera
self.ss_cnt = 0 # 录入 personX 人脸时图片计数器 / cnt for screen shots cap.set(3, 480)
self.current_frame_faces_cnt = 0 # 录入人脸计数器 / cnt for counting faces in current frame
self.save_flag = 1 # 之后用来控制是否保存图像的 flag / The flag to control if save # 人脸截图的计数器 the counter for screen shoot
self.press_n_flag = 0 # 之后用来检查是否先按 'n' 再按 's' / The flag to check if press 'n' before 's' cnt_ss = 0
# FPS # 存储人脸的文件夹 the folder to save faces
self.frame_time = 0 current_face_dir = ""
self.frame_start_time = 0
self.fps = 0
# 新建保存人脸图像文件和数据CSV文件夹 / Make dir for saving photos and csv # 保存 faces images 的路径 the directory to save images of faces
def pre_work_mkdir(self): path_photos_from_camera = "data/data_faces_from_camera/"
# 新建文件夹 / Create folders to save faces images and csv
if os.path.isdir(self.path_photos_from_camera):
pass
else:
os.mkdir(self.path_photos_from_camera)
# 删除之前存的人脸数据文件夹 / Delete the old data of faces
def pre_work_del_old_face_folders(self):
# 删除之前存的人脸数据文件夹, 删除 "/data_faces_from_camera/person_x/"...
folders_rd = os.listdir(self.path_photos_from_camera)
for i in range(len(folders_rd)):
shutil.rmtree(self.path_photos_from_camera+folders_rd[i])
if os.path.isfile("data/features_all.csv"):
os.remove("data/features_all.csv")
# 如果有之前录入的人脸, 在之前 person_x 的序号按照 person_x+1 开始录入 / Start from person_x+1 # 新建保存人脸图像文件和数据CSV文件夹
def check_existing_faces_cnt(self): # mkdir for saving photos and csv
if os.listdir("data/data_faces_from_camera/"): def pre_work_mkdir():
# 获取已录入的最后一个人脸序号 / Get the order of latest person
person_list = os.listdir("data/data_faces_from_camera/")
person_num_list = []
for person in person_list:
person_num_list.append(int(person.split('_')[-1]))
self.existing_faces_cnt = max(person_num_list)
# 如果第一次存储或者没有之前录入的人脸, 按照 person_1 开始录入 / Start from person_1 # 新建文件夹 / make folders to save faces images and csv
else: if os.path.isdir(path_photos_from_camera):
self.existing_faces_cnt = 0 pass
else:
os.mkdir(path_photos_from_camera)
# 获取处理之后 stream 的帧数 / Update FPS of video stream
def update_fps(self):
now = time.time()
self.frame_time = now - self.frame_start_time
self.fps = 1.0 / self.frame_time
self.frame_start_time = now
# 生成的 cv2 window 上面添加说明文字 / PutText on cv2 window pre_work_mkdir()
def draw_note(self, img_rd):
# 添加说明 / Add some notes
cv2.putText(img_rd, "Face Register", (20, 40), self.font, 1, (255, 255, 255), 1, cv2.LINE_AA)
cv2.putText(img_rd, "FPS: " + str(self.fps.__round__(2)), (20, 100), self.font, 0.8, (0, 255, 0), 1,
cv2.LINE_AA)
cv2.putText(img_rd, "Faces: " + str(self.current_frame_faces_cnt), (20, 140), self.font, 0.8, (0, 255, 0), 1, cv2.LINE_AA)
cv2.putText(img_rd, "N: Create face folder", (20, 350), self.font, 0.8, (255, 255, 255), 1, cv2.LINE_AA)
cv2.putText(img_rd, "S: Save current face", (20, 400), self.font, 0.8, (255, 255, 255), 1, cv2.LINE_AA)
cv2.putText(img_rd, "Q: Quit", (20, 450), self.font, 0.8, (255, 255, 255), 1, cv2.LINE_AA)
# 获取人脸 / Main process of face detection and saving
def process(self, stream):
# 1. 新建储存人脸图像文件目录 / Create folders to save photos
self.pre_work_mkdir()
# 2. 删除 "/data/data_faces_from_camera" 中已有人脸图像文件 / Uncomment if want to delete the saved faces and start from person_1 ##### optional/可选, 默认关闭 #####
if os.path.isdir(self.path_photos_from_camera): # 删除之前存的人脸数据文件夹
self.pre_work_del_old_face_folders() # delete the old data of faces
def pre_work_del_old_face_folders():
# 删除之前存的人脸数据文件夹
# 删除 "/data_faces_from_camera/person_x/"...
folders_rd = os.listdir(path_photos_from_camera)
for i in range(len(folders_rd)):
shutil.rmtree(path_photos_from_camera+folders_rd[i])
# 3. 检查 "/data/data_faces_from_camera" 中已有人脸文件 if os.path.isfile("data/features_all.csv"):
self.check_existing_faces_cnt() os.remove("data/features_all.csv")
while stream.isOpened(): # 这里在每次程序录入之前, 删掉之前存的人脸数据
flag, img_rd = stream.read() # Get camera video stream # 如果这里打开,每次进行人脸录入的时候都会删掉之前的人脸图像文件夹 person_1/,person_2/,person_3/...
kk = cv2.waitKey(1) # If enable this function, it will delete all the old data in dir person_1/,person_2/,/person_3/...
faces = detector(img_rd, 0) # Use Dlib face detector # pre_work_del_old_face_folders()
##################################
# 4. 按下 'n' 新建存储人脸的文件夹 / Press 'n' to create the folders for saving faces
if kk == ord('n'):
self.existing_faces_cnt += 1
current_face_dir = self.path_photos_from_camera + "person_" + str(self.existing_faces_cnt)
os.makedirs(current_face_dir)
print('\n')
print("新建的人脸文件夹 / Create folders: ", current_face_dir)
self.ss_cnt = 0 # 将人脸计数器清零 / Clear the cnt of screen shots # 如果有之前录入的人脸 / if the old folders exists
self.press_n_flag = 1 # 已经按下 'n' / Pressed 'n' already # 在之前 person_x 的序号按照 person_x+1 开始录入 / start from person_x+1
if os.listdir("data/data_faces_from_camera/"):
# 获取已录入的最后一个人脸序号 / get the num of latest person
person_list = os.listdir("data/data_faces_from_camera/")
person_num_list = []
for person in person_list:
person_num_list.append(int(person.split('_')[-1]))
person_cnt = max(person_num_list)
# 5. 检测到人脸 / Face detected # 如果第一次存储或者没有之前录入的人脸, 按照 person_1 开始录入
if len(faces) != 0: # start from person_1
# 矩形框 / Show the ROI of faces else:
for k, d in enumerate(faces): person_cnt = 0
# 计算矩形框大小 / Compute the size of rectangle box
height = (d.bottom() - d.top())
width = (d.right() - d.left())
hh = int(height/2)
ww = int(width/2)
# 6. 判断人脸矩形框是否超出 480x640 / If the size of ROI > 480x640 # 之后用来控制是否保存图像的 flag / the flag to control if save
if (d.right()+ww) > 640 or (d.bottom()+hh > 480) or (d.left()-ww < 0) or (d.top()-hh < 0): save_flag = 1
cv2.putText(img_rd, "OUT OF RANGE", (20, 300), self.font, 0.8, (0, 0, 255), 1, cv2.LINE_AA)
color_rectangle = (0, 0, 255) # 之后用来检查是否先按 'n' 再按 's' / the flag to check if press 'n' before 's'
save_flag = 0 press_n_flag = 0
if kk == ord('s'):
print("请调整位置 / Please adjust your position") while cap.isOpened():
flag, img_rd = cap.read()
# print(img_rd.shape)
# It should be 480 height * 640 width
kk = cv2.waitKey(1)
img_gray = cv2.cvtColor(img_rd, cv2.COLOR_RGB2GRAY)
# 人脸数 faces
faces = detector(img_gray, 0)
# 待会要写的字体 / font to write
font = cv2.FONT_HERSHEY_COMPLEX
# 按下 'n' 新建存储人脸的文件夹 / press 'n' to create the folders for saving faces
if kk == ord('n'):
person_cnt += 1
current_face_dir = path_photos_from_camera + "person_" + str(person_cnt)
os.makedirs(current_face_dir)
print('\n')
print("新建的人脸文件夹 / Create folders: ", current_face_dir)
cnt_ss = 0 # 将人脸计数器清零 / clear the cnt of faces
press_n_flag = 1 # 已经按下 'n' / have pressed 'n'
# 检测到人脸 / if face detected
if len(faces) != 0:
# 矩形框 / show the rectangle box
for k, d in enumerate(faces):
# 计算矩形大小
# we need to compute the width and height of the box
# (x,y), (宽度width, 高度height)
pos_start = tuple([d.left(), d.top()])
pos_end = tuple([d.right(), d.bottom()])
# 计算矩形框大小 / compute the size of rectangle box
height = (d.bottom() - d.top())
width = (d.right() - d.left())
hh = int(height/2)
ww = int(width/2)
# 设置颜色 / the color of rectangle of faces detected
color_rectangle = (255, 255, 255)
# 判断人脸矩形框是否超出 480x640
if (d.right()+ww) > 640 or (d.bottom()+hh > 480) or (d.left()-ww < 0) or (d.top()-hh < 0):
cv2.putText(img_rd, "OUT OF RANGE", (20, 300), font, 0.8, (0, 0, 255), 1, cv2.LINE_AA)
color_rectangle = (0, 0, 255)
save_flag = 0
if kk == ord('s'):
print("请调整位置 / Please adjust your position")
else:
color_rectangle = (255, 255, 255)
save_flag = 1
cv2.rectangle(img_rd,
tuple([d.left() - ww, d.top() - hh]),
tuple([d.right() + ww, d.bottom() + hh]),
color_rectangle, 2)
# 根据人脸大小生成空的图像 / create blank image according to the size of face detected
im_blank = np.zeros((int(height*2), width*2, 3), np.uint8)
if save_flag:
# 按下 's' 保存摄像头中的人脸到本地 / press 's' to save faces into local images
if kk == ord('s'):
# 检查有没有先按'n'新建文件夹 / check if you have pressed 'n'
if press_n_flag:
cnt_ss += 1
for ii in range(height*2):
for jj in range(width*2):
im_blank[ii][jj] = img_rd[d.top()-hh + ii][d.left()-ww + jj]
cv2.imwrite(current_face_dir + "/img_face_" + str(cnt_ss) + ".jpg", im_blank)
print("写入本地 / Save into:", str(current_face_dir) + "/img_face_" + str(cnt_ss) + ".jpg")
else: else:
color_rectangle = (255, 255, 255) print("请在按 'S' 之前先按 'N' 来建文件夹 / Please press 'N' before 'S'")
save_flag = 1
cv2.rectangle(img_rd, # 显示人脸数 / show the numbers of faces detected
tuple([d.left() - ww, d.top() - hh]), cv2.putText(img_rd, "Faces: " + str(len(faces)), (20, 100), font, 0.8, (0, 255, 0), 1, cv2.LINE_AA)
tuple([d.right() + ww, d.bottom() + hh]),
color_rectangle, 2)
# 7. 根据人脸大小生成空的图像 / Create blank image according to the size of face detected # 添加说明 / add some statements
img_blank = np.zeros((int(height*2), width*2, 3), np.uint8) cv2.putText(img_rd, "Face Register", (20, 40), font, 1, (0, 0, 0), 1, cv2.LINE_AA)
cv2.putText(img_rd, "N: New face folder", (20, 350), font, 0.8, (0, 0, 0), 1, cv2.LINE_AA)
cv2.putText(img_rd, "S: Save current face", (20, 400), font, 0.8, (0, 0, 0), 1, cv2.LINE_AA)
cv2.putText(img_rd, "Q: Quit", (20, 450), font, 0.8, (0, 0, 0), 1, cv2.LINE_AA)
if save_flag: # 按下 'q' 键退出 / press 'q' to exit
# 8. 按下 's' 保存摄像头中的人脸到本地 / Press 's' to save faces into local images if kk == ord('q'):
if kk == ord('s'): break
# 检查有没有先按'n'新建文件夹 / Check if you have pressed 'n'
if self.press_n_flag:
self.ss_cnt += 1
for ii in range(height*2):
for jj in range(width*2):
img_blank[ii][jj] = img_rd[d.top()-hh + ii][d.left()-ww + jj]
cv2.imwrite(current_face_dir + "/img_face_" + str(self.ss_cnt) + ".jpg", img_blank)
print("写入本地 / Save into:", str(current_face_dir) + "/img_face_" + str(self.ss_cnt) + ".jpg")
else:
print("请先按 'N' 来建文件夹, 按 'S' / Please press 'N' and press 'S'")
self.current_frame_faces_cnt = len(faces) # 如果需要摄像头窗口大小可调 / uncomment this line if you want the camera window is resizeable
# cv2.namedWindow("camera", 0)
# 9. 生成的窗口添加说明文字 / Add note on cv2 window cv2.imshow("camera", img_rd)
self.draw_note(img_rd)
# 10. 按下 'q' 键退出 / Press 'q' to exit # 释放摄像头 / release camera
if kk == ord('q'): cap.release()
break
# 11. Update FPS cv2.destroyAllWindows()
self.update_fps()
cv2.namedWindow("camera", 1)
cv2.imshow("camera", img_rd)
def run(self):
cap = cv2.VideoCapture(0)
self.process(cap)
cap.release()
cv2.destroyAllWindows()
def main():
Face_Register_con = Face_Register()
Face_Register_con.run()
if __name__ == '__main__':
main()

View File

@ -1,3 +1,6 @@
# OpenCv 调用摄像头
# 默认调用笔记本摄像头
# Author: coneypo # Author: coneypo
# Blog: http://www.cnblogs.com/AdaminXie # Blog: http://www.cnblogs.com/AdaminXie
# GitHub: https://github.com/coneypo/Dlib_face_recognition_from_camera # GitHub: https://github.com/coneypo/Dlib_face_recognition_from_camera
@ -9,31 +12,7 @@ cap = cv2.VideoCapture(0)
# cap.set(propId, value) # cap.set(propId, value)
# 设置视频参数: propId - 设置的视频参数, value - 设置的参数值 # 设置视频参数: propId - 设置的视频参数, value - 设置的参数值
""" cap.set(3, 480)
0. cv2.CAP_PROP_POS_MSEC Current position of the video file in milliseconds.
1. cv2.CAP_PROP_POS_FRAMES 0-based index of the frame to be decoded/captured next.
2. cv2.CAP_PROP_POS_AVI_RATIO Relative position of the video file
3. cv2.CAP_PROP_FRAME_WIDTH Width of the frames in the video stream.
4. cv2.CAP_PROP_FRAME_HEIGHT Height of the frames in the video stream.
5. cv2.CAP_PROP_FPS Frame rate.
6. cv2.CAP_PROP_FOURCC 4-character code of codec.
7. cv2.CAP_PROP_FRAME_COUNT Number of frames in the video file.
8. cv2.CAP_PROP_FORMAT Format of the Mat objects returned by retrieve() .
9. cv2.CAP_PROP_MODE Backend-specific value indicating the current capture mode.
10. cv2.CAP_PROP_BRIGHTNESS Brightness of the image (only for cameras).
11. cv2.CAP_PROP_CONTRAST Contrast of the image (only for cameras).
12. cv2.CAP_PROP_SATURATION Saturation of the image (only for cameras).
13. cv2.CAP_PROP_HUE Hue of the image (only for cameras).
14. cv2.CAP_PROP_GAIN Gain of the image (only for cameras).
15. cv2.CAP_PROP_EXPOSURE Exposure (only for cameras).
16. cv2.CAP_PROP_CONVERT_RGB Boolean flags indicating whether images should be converted to RGB.
17. cv2.CAP_PROP_WHITE_BALANCE Currently unsupported
18. cv2.CAP_PROP_RECTIFICATION Rectification flag for stereo cameras (note: only supported by DC1394 v 2.x backend currently)
"""
# The default size of frame from camera will be 640x480 in Windows or Ubuntu
# So we will not set "cap.set" here, it doesn't work
# cap.set(propId=cv2.CAP_PROP_FRAME_WIDTH, value=cap.get(cv2.CAP_PROP_FRAME_WIDTH))
# cap.isOpened() 返回 true/false, 检查摄像头初始化是否成功 # cap.isOpened() 返回 true/false, 检查摄像头初始化是否成功
print(cap.isOpened()) print(cap.isOpened())
@ -59,11 +38,6 @@ print(cap.isOpened())
while cap.isOpened(): while cap.isOpened():
ret_flag, img_camera = cap.read() ret_flag, img_camera = cap.read()
print("height: ", img_camera.shape[0])
print("width: ", img_camera.shape[1])
print('\n')
cv2.imshow("camera", img_camera) cv2.imshow("camera", img_camera)
# 每帧数据延时 1ms, 延时为0, 读取的是静态帧 # 每帧数据延时 1ms, 延时为0, 读取的是静态帧

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.3 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 157 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 184 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.5 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 358 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 360 KiB

BIN
introduction/face_reco_single_person.png Normal file → Executable file

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.1 MiB

After

Width:  |  Height:  |  Size: 428 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 457 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.3 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 499 KiB

BIN
introduction/face_reco_two_people_in_database.png Normal file → Executable file

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.5 MiB

After

Width:  |  Height:  |  Size: 425 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 161 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 345 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 332 KiB

BIN
introduction/get_face_from_camera.png Normal file → Executable file

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.3 MiB

After

Width:  |  Height:  |  Size: 416 KiB

BIN
introduction/get_face_from_camera_out_of_range.png Normal file → Executable file

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.4 MiB

After

Width:  |  Height:  |  Size: 433 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 267 KiB

After

Width:  |  Height:  |  Size: 324 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 74 KiB

View File

@ -1,3 +1,3 @@
31231dlib==19.17.0 dlib==19.17.0
numpy==1.15.1 numpy==1.15.1
scikit-image==0.14.0 scikit-image==0.14.0