diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 0000000..ad35721
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,12 @@
+FROM python:3.5-slim
+# Installing curl to download the pre-trained model, det1.npy, det2.npy, det3.npy and requirements.txt - Installing g++, libglib2.0-0, libsm6, libxrender1, libxext6 which are required for the installment of grpcio, which is required for the installment of python packages by pip - Installing libgl1 which is required by cv2 package.
+RUN apt-get update && apt-get install -y curl g++ libgl1 libglib2.0-0 libsm6 libxrender1 libxext6 && rm -rf /var/cache/apt/archives /var/lib/apt/lists/*
+# Downloading pre-trained model, required .npy files and requirements.txt.
+RUN curl -L -O https://github.com/barisgecer/facegan/raw/master/facenet_model/20170511-185253.pb && curl -L -O https://github.com/davidsandberg/facenet/raw/master/src/align/det[1-3].npy && curl -L -O https://github.com/davidsandberg/facenet/raw/master/requirements.txt
+RUN python3 -m pip install -r requirements.txt
+# misc.imresize is deprecated in scipy package with versions newer than 1.1.0.
+RUN pip uninstall -y scipy && pip install scipy==1.1.0
+COPY . .
+RUN python3 Make_aligndata_git.py && python3 Make_classifier_git.py
+ENTRYPOINT ["python3"]
+CMD ["realtime_facenet_git.py"]
diff --git a/Make_aligndata_git.py b/Make_aligndata_git.py
index e77d94c..6768cb1 100644
--- a/Make_aligndata_git.py
+++ b/Make_aligndata_git.py
@@ -13,12 +13,12 @@
import random
from time import sleep
-output_dir_path = '/..Path to output folder../'
+output_dir_path = './output_dir' # Path to output folder
output_dir = os.path.expanduser(output_dir_path)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
-datadir = '/..Path to human img data folder../'
+datadir = './data' # Path to human img data folder
dataset = facenet.get_dataset(datadir)
print('Creating networks and loading parameters')
@@ -26,7 +26,7 @@
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
with sess.as_default():
- pnet, rnet, onet = detect_face.create_mtcnn(sess, './Path to det1.npy,..')
+ pnet, rnet, onet = detect_face.create_mtcnn(sess, './') # './Path to det1.npy,..' as the second argument
minsize = 20 # minimum size of face
threshold = [0.6, 0.7, 0.7] # three steps's threshold
diff --git a/Make_classifier_git.py b/Make_classifier_git.py
index 705a914..189cb5c 100644
--- a/Make_classifier_git.py
+++ b/Make_classifier_git.py
@@ -18,14 +18,14 @@
with tf.Session() as sess:
- datadir = '/..Path to align face data../'
+ datadir = './output_dir' # Path to aligned face data
dataset = facenet.get_dataset(datadir)
paths, labels = facenet.get_image_paths_and_labels(dataset)
print('Number of classes: %d' % len(dataset))
print('Number of images: %d' % len(paths))
print('Loading feature extraction model')
- modeldir = '/..Path to Pre-trained model../20170512-110547/20170512-110547.pb'
+ modeldir = './20170511-185253.pb' # Pre-trained model's path
facenet.load_model(modeldir)
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
@@ -48,7 +48,7 @@
feed_dict = {images_placeholder: images, phase_train_placeholder: False}
emb_array[start_index:end_index, :] = sess.run(embeddings, feed_dict=feed_dict)
- classifier_filename = '/..Path to save classifier../my_classifier.pkl'
+ classifier_filename = './classifier/my_classifier.pkl' # Path to classifier
classifier_filename_exp = os.path.expanduser(classifier_filename)
# Train classifier
diff --git a/README.md b/README.md
index f4bc29e..0f6beeb 100644
--- a/README.md
+++ b/README.md
@@ -20,5 +20,7 @@ Real-time face recognition program using Google's facenet.
Your own classifier is a ~.pkl file that loads the previously mentioned pre-trained model ('[20170511-185253.pb](https://drive.google.com/file/d/0B5MzpY9kBtDVOTVnU3NIaUdySFE/edit)') and embeds the face for each person.
All of these can be obtained by running 'Make_classifier.py'.
* Finally, we load our own 'my_classifier.pkl' obtained above and then open the sensor and start recognition.
(Note that, look carefully at the paths of files and folders in all .py)
+### Docker
+* First, clone this repo and change current directory to its directory on your device. Next, put the images of each person in separated directories with the names that you want to be shown for each person inside the '*data*' directory; for example, Daehyun's photos are in: '*./data/Daehyun*' and Byeonggil's are in: '*./data/Byeonggil*'. Then, you can run it in two ways:
1. Build the Docker image by running:
`docker build -t "your_image_name":"your_image_tag" .`
After the build is done, run:
`docker run --device /dev/video0:/dev/video0 "your_image_name":"your_image_tag`
2. Run:
`docker compose up`.
(Note that Windows doesn’t give camera access to Docker containers. So, you can just run this container on Linux.)
## Result
-
+
\ No newline at end of file
diff --git a/classifier/.gitignore b/classifier/.gitignore
new file mode 100644
index 0000000..6b3a2a1
--- /dev/null
+++ b/classifier/.gitignore
@@ -0,0 +1,4 @@
+# Ignore every file in this folder
+*
+# Except this one
+!.gitignore
\ No newline at end of file
diff --git a/data/.gitignore b/data/.gitignore
new file mode 100644
index 0000000..6b3a2a1
--- /dev/null
+++ b/data/.gitignore
@@ -0,0 +1,4 @@
+# Ignore every file in this folder
+*
+# Except this one
+!.gitignore
\ No newline at end of file
diff --git a/detect_face.py b/detect_face.py
index 619f42f..a3f2aa1 100644
--- a/detect_face.py
+++ b/detect_face.py
@@ -82,7 +82,7 @@ def load(self, data_path, session, ignore_missing=False):
session: The current TensorFlow session
ignore_missing: If true, serialized weights for missing layers are ignored.
'''
- data_dict = np.load(data_path, encoding='latin1').item() #pylint: disable=no-member
+ data_dict = np.load(data_path, encoding='latin1', allow_pickle=True).item() #pylint: disable=no-member
for op_name in data_dict:
with tf.variable_scope(op_name, reuse=True):
diff --git a/docker-compose.yml b/docker-compose.yml
new file mode 100644
index 0000000..c0cca7e
--- /dev/null
+++ b/docker-compose.yml
@@ -0,0 +1,5 @@
+services:
+ real-time-deep-face-recognition:
+ build: .
+ devices:
+ - /dev/video0:/dev/video0
\ No newline at end of file
diff --git a/output_dir/.gitignore b/output_dir/.gitignore
new file mode 100644
index 0000000..6b3a2a1
--- /dev/null
+++ b/output_dir/.gitignore
@@ -0,0 +1,4 @@
+# Ignore every file in this folder
+*
+# Except this one
+!.gitignore
\ No newline at end of file
diff --git a/realtime_facenet_git.py b/realtime_facenet_git.py
index 0376cfe..e67bee7 100644
--- a/realtime_facenet_git.py
+++ b/realtime_facenet_git.py
@@ -25,7 +25,7 @@
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.6)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
with sess.as_default():
- pnet, rnet, onet = detect_face.create_mtcnn(sess, './Path to det1.npy,..')
+ pnet, rnet, onet = detect_face.create_mtcnn(sess, './') # './Path to det1.npy,..' as the second argument
minsize = 20 # minimum size of face
threshold = [0.6, 0.7, 0.7] # three steps's threshold
@@ -36,10 +36,10 @@
image_size = 182
input_image_size = 160
- HumanNames = ['Human_a','Human_b','Human_c','...','Human_h'] #train human name
+ HumanNames = [Human for Human in os.listdir("./data")] #training human name
print('Loading feature extraction model')
- modeldir = '/..Path to pre-trained model../20170512-110547/20170512-110547.pb'
+ modeldir = './20170511-185253.pb' # Pre-trained model's path
facenet.load_model(modeldir)
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
@@ -47,7 +47,7 @@
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
embedding_size = embeddings.get_shape()[1]
- classifier_filename = '/..Path to classifier model../my_classifier.pkl'
+ classifier_filename = './classifier/my_classifier.pkl' # Classifier Path
classifier_filename_exp = os.path.expanduser(classifier_filename)
with open(classifier_filename_exp, 'rb') as infile:
(model, class_names) = pickle.load(infile)