Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 12 additions & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
FROM python:3.5-slim
# Installing curl to download the pre-trained model, det1.npy, det2.npy, det3.npy and requirements.txt - Installing g++, libglib2.0-0, libsm6, libxrender1, libxext6 which are required for the installment of grpcio, which is required for the installment of python packages by pip - Installing libgl1 which is required by cv2 package.
RUN apt-get update && apt-get install -y curl g++ libgl1 libglib2.0-0 libsm6 libxrender1 libxext6 && rm -rf /var/cache/apt/archives /var/lib/apt/lists/*
# Downloading pre-trained model, required .npy files and requirements.txt.
RUN curl -L -O https://github.com/barisgecer/facegan/raw/master/facenet_model/20170511-185253.pb && curl -L -O https://github.com/davidsandberg/facenet/raw/master/src/align/det[1-3].npy && curl -L -O https://github.com/davidsandberg/facenet/raw/master/requirements.txt
RUN python3 -m pip install -r requirements.txt
# misc.imresize is deprecated in scipy package with versions newer than 1.1.0.
RUN pip uninstall -y scipy && pip install scipy==1.1.0
COPY . .
RUN python3 Make_aligndata_git.py && python3 Make_classifier_git.py
ENTRYPOINT ["python3"]
CMD ["realtime_facenet_git.py"]
6 changes: 3 additions & 3 deletions Make_aligndata_git.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,20 +13,20 @@
import random
from time import sleep

output_dir_path = '/..Path to output folder../'
output_dir_path = './output_dir' # Path to output folder
output_dir = os.path.expanduser(output_dir_path)
if not os.path.exists(output_dir):
os.makedirs(output_dir)

datadir = '/..Path to human img data folder../'
datadir = './data' # Path to human img data folder
dataset = facenet.get_dataset(datadir)

print('Creating networks and loading parameters')
with tf.Graph().as_default():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
with sess.as_default():
pnet, rnet, onet = detect_face.create_mtcnn(sess, './Path to det1.npy,..')
pnet, rnet, onet = detect_face.create_mtcnn(sess, './') # './Path to det1.npy,..' as the second argument

minsize = 20 # minimum size of face
threshold = [0.6, 0.7, 0.7] # three steps's threshold
Expand Down
6 changes: 3 additions & 3 deletions Make_classifier_git.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,14 +18,14 @@

with tf.Session() as sess:

datadir = '/..Path to align face data../'
datadir = './output_dir' # Path to aligned face data
dataset = facenet.get_dataset(datadir)
paths, labels = facenet.get_image_paths_and_labels(dataset)
print('Number of classes: %d' % len(dataset))
print('Number of images: %d' % len(paths))

print('Loading feature extraction model')
modeldir = '/..Path to Pre-trained model../20170512-110547/20170512-110547.pb'
modeldir = './20170511-185253.pb' # Pre-trained model's path
facenet.load_model(modeldir)

images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
Expand All @@ -48,7 +48,7 @@
feed_dict = {images_placeholder: images, phase_train_placeholder: False}
emb_array[start_index:end_index, :] = sess.run(embeddings, feed_dict=feed_dict)

classifier_filename = '/..Path to save classifier../my_classifier.pkl'
classifier_filename = './classifier/my_classifier.pkl' # Path to classifier
classifier_filename_exp = os.path.expanduser(classifier_filename)

# Train classifier
Expand Down
4 changes: 3 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,5 +20,7 @@ Real-time face recognition program using Google's facenet.
</br>Your own classifier is a ~.pkl file that loads the previously mentioned pre-trained model ('[20170511-185253.pb](https://drive.google.com/file/d/0B5MzpY9kBtDVOTVnU3NIaUdySFE/edit)') and embeds the face for each person.<br/>All of these can be obtained by running 'Make_classifier.py'.<br/>
* Finally, we load our own 'my_classifier.pkl' obtained above and then open the sensor and start recognition.
</br> (Note that, look carefully at the paths of files and folders in all .py)
### Docker
* First, clone this repo and change current directory to its directory on your device. Next, put the images of each person in separated directories with the names that you want to be shown for each person inside the '*data*' directory; for example, Daehyun's photos are in: '*./data/Daehyun*' and Byeonggil's are in: '*./data/Byeonggil*'. Then, you can run it in two ways: <br/>1. Build the Docker image by running: <br/>`docker build -t "your_image_name":"your_image_tag" .` <br/>After the build is done, run: <br/>`docker run --device /dev/video0:/dev/video0 "your_image_name":"your_image_tag` <br/>2. Run: <br/>`docker compose up`. <br/> (Note that Windows doesn’t give camera access to Docker containers. So, you can just run this container on Linux.)
## Result
<img src="https://github.com/bearsprogrammer/real-time-deep-face-recogniton/blob/master/realtime_demo_pic.jpg" width="60%">
<img src="https://github.com/bearsprogrammer/real-time-deep-face-recogniton/blob/master/realtime_demo_pic.jpg" width="60%">
4 changes: 4 additions & 0 deletions classifier/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
# Ignore every file in this folder
*
# Except this one
!.gitignore
4 changes: 4 additions & 0 deletions data/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
# Ignore every file in this folder
*
# Except this one
!.gitignore
2 changes: 1 addition & 1 deletion detect_face.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ def load(self, data_path, session, ignore_missing=False):
session: The current TensorFlow session
ignore_missing: If true, serialized weights for missing layers are ignored.
'''
data_dict = np.load(data_path, encoding='latin1').item() #pylint: disable=no-member
data_dict = np.load(data_path, encoding='latin1', allow_pickle=True).item() #pylint: disable=no-member

for op_name in data_dict:
with tf.variable_scope(op_name, reuse=True):
Expand Down
5 changes: 5 additions & 0 deletions docker-compose.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
services:
real-time-deep-face-recognition:
build: .
devices:
- /dev/video0:/dev/video0
4 changes: 4 additions & 0 deletions output_dir/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
# Ignore every file in this folder
*
# Except this one
!.gitignore
8 changes: 4 additions & 4 deletions realtime_facenet_git.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.6)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
with sess.as_default():
pnet, rnet, onet = detect_face.create_mtcnn(sess, './Path to det1.npy,..')
pnet, rnet, onet = detect_face.create_mtcnn(sess, './') # './Path to det1.npy,..' as the second argument

minsize = 20 # minimum size of face
threshold = [0.6, 0.7, 0.7] # three steps's threshold
Expand All @@ -36,18 +36,18 @@
image_size = 182
input_image_size = 160

HumanNames = ['Human_a','Human_b','Human_c','...','Human_h'] #train human name
HumanNames = [Human for Human in os.listdir("./data")] #training human name

print('Loading feature extraction model')
modeldir = '/..Path to pre-trained model../20170512-110547/20170512-110547.pb'
modeldir = './20170511-185253.pb' # Pre-trained model's path
facenet.load_model(modeldir)

images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
embedding_size = embeddings.get_shape()[1]

classifier_filename = '/..Path to classifier model../my_classifier.pkl'
classifier_filename = './classifier/my_classifier.pkl' # Classifier Path
classifier_filename_exp = os.path.expanduser(classifier_filename)
with open(classifier_filename_exp, 'rb') as infile:
(model, class_names) = pickle.load(infile)
Expand Down