Skip to content

Commit 11b1e14

Browse files
committed
fix integrated system
1 parent 709a297 commit 11b1e14

File tree

4 files changed

+67
-85
lines changed

4 files changed

+67
-85
lines changed

boss_input.py

+6-6
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,10 @@
44
import numpy as np
55
import cv2
66

7+
IMAGE_SIZE = 64
78

8-
def resize_with_pad(image, height, width):
9+
10+
def resize_with_pad(image, height=IMAGE_SIZE, width=IMAGE_SIZE):
911

1012
def get_padding_size(image):
1113
h, w, _ = image.shape
@@ -24,7 +26,7 @@ def get_padding_size(image):
2426
return top, bottom, left, right
2527

2628
top, bottom, left, right = get_padding_size(image)
27-
BLACK = [0,0,0]
29+
BLACK = [0, 0, 0]
2830
constant = cv2.copyMakeBorder(image, top , bottom, left, right, cv2.BORDER_CONSTANT, value=BLACK)
2931

3032
resized_image = cv2.resize(constant, (height, width))
@@ -45,10 +47,8 @@ def traverse_dir(path):
4547
image = read_image(abs_path)
4648
images.append(image)
4749
labels.append(path)
48-
return images, labels
49-
5050

51-
IMAGE_SIZE = 32
51+
return images, labels
5252

5353

5454
def read_image(file_path):
@@ -60,8 +60,8 @@ def read_image(file_path):
6060

6161
def extract_data(path):
6262
images, labels = traverse_dir(path)
63-
#images = np.array([np.reshape(image, -1) for image in images])
6463
images = np.array(images)
6564
dic = dict([(label, i) for i, label in enumerate(set(labels))])
6665
labels = np.array([dic[label] for label in labels])
66+
6767
return images, labels

boss_train.py

+19-18
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@
33
import random
44

55
import numpy as np
6-
import tensorflow as tf
76
from sklearn.cross_validation import train_test_split
87
from keras.preprocessing.image import ImageDataGenerator
98
from keras.models import Sequential
@@ -14,7 +13,7 @@
1413
from keras.models import load_model
1514
from keras import backend as K
1615

17-
from boss_input import extract_data, resize_with_pad
16+
from boss_input import extract_data, resize_with_pad, IMAGE_SIZE
1817

1918

2019
class Dataset(object):
@@ -25,7 +24,7 @@ def __init__(self):
2524
self.Y_train = None
2625
self.Y_test = None
2726

28-
def read(self, img_rows=32, img_cols=32, img_channels=3, nb_classes=2):
27+
def read(self, img_rows=IMAGE_SIZE, img_cols=IMAGE_SIZE, img_channels=3, nb_classes=2):
2928
images, labels = extract_data('./data/')
3029
labels = np.reshape(labels, [-1])
3130
# numpy.reshape
@@ -141,11 +140,12 @@ def load(self, file_path=FILE_PATH):
141140
self.model = load_model(file_path)
142141

143142
def predict(self, image):
144-
#result = self.model.predict_proba(image)
145-
if image.shape != (1, 3, 32, 32):
146-
image = resize_with_pad(image, 32, 32)
147-
image = image.reshape((1, 3, 32, 32))
143+
if image.shape != (1, 3, IMAGE_SIZE, IMAGE_SIZE):
144+
image = resize_with_pad(image)
145+
image = image.reshape((1, 3, IMAGE_SIZE, IMAGE_SIZE))
148146
result = self.model.predict_classes(image)
147+
# result = self.model.predict_proba(image)
148+
149149
return result[0]
150150

151151
def evaluate(self, dataset):
@@ -154,21 +154,21 @@ def evaluate(self, dataset):
154154

155155
if __name__ == '__main__':
156156

157-
#dataset = Dataset()
158-
#dataset.read()
159-
"""
157+
dataset = Dataset()
158+
dataset.read()
159+
160160
model = Model()
161161
model.build_model(dataset)
162162
model.train(dataset, nb_epoch=10)
163163
model.save()
164-
"""
164+
165165
model = Model()
166166
model.load()
167-
# model.evaluate(dataset)
168-
# for image, label in zip(dataset.X_test, dataset.Y_test):
169-
# model.predict(image.reshape(1, 3, 32, 32))
170-
# print(label)
171-
167+
model.evaluate(dataset)
168+
for image, label in zip(dataset.X_test, dataset.Y_test):
169+
model.predict(image.reshape(1, 3, IMAGE_SIZE, IMAGE_SIZE))
170+
print(label)
171+
"""
172172
import cv2
173173
import os
174174
@@ -178,7 +178,8 @@ def evaluate(self, dataset):
178178
if file_name.endswith('.jpg'):
179179
print(file_name)
180180
image = cv2.imread('./data/boss/' + file_name)
181-
image = resize_with_pad(image, 32, 32)
182-
image = image.reshape((1, 3, 32, 32))
181+
image = resize_with_pad(image)
182+
image = image.reshape((1, 3, IMAGE_SIZE, IMAGE_SIZE))
183183
result = model.predict(image)
184184
print(result)
185+
"""

capture.py camera_reader.py

+41-43
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,4 @@
11
# -*- coding:utf-8 -*-
2-
#webカメラの映像から顔を探し白の枠線をつけて保存するプログラム
3-
42
import threading
53
from datetime import datetime
64
import cv2
@@ -68,46 +66,46 @@ def run(self):
6866
cv2.destroyAllWindows()
6967
"""
7068

69+
if __name__ == '__main__':
70+
cap = cv2.VideoCapture(1)
71+
cascade_path = "/usr/local/opt/opencv/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml"
72+
model = Model()
73+
model.load()
74+
while True:
75+
ret, frame = cap.read()
76+
# グレースケール変換
77+
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
7178

72-
cap = cv2.VideoCapture(1)
73-
cascade_path = "/usr/local/opt/opencv/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml"
74-
model = Model()
75-
model.load()
76-
while True:
77-
ret, frame = cap.read()
78-
# グレースケール変換
79-
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
80-
81-
# カスケード分類器の特徴量を取得する
82-
cascade = cv2.CascadeClassifier(cascade_path)
83-
84-
# 物体認識(顔認識)の実行
85-
facerect = cascade.detectMultiScale(frame_gray, scaleFactor=1.2, minNeighbors=3, minSize=(10, 10))
86-
if len(facerect) > 0:
87-
print('face detected')
88-
color = (255, 255, 255) # 白
89-
for rect in facerect:
90-
# 検出した顔を囲む矩形の作成
91-
#cv2.rectangle(frame, tuple(rect[0:2]), tuple(rect[0:2] + rect[2:4]), color, thickness=2)
92-
93-
x, y = rect[0:2]
94-
width, height = rect[2:4]
95-
image = frame[y-50: y + height, x: x + width + 50]
96-
cv2.imwrite('test.jpg', image)
97-
result = model.predict(image)
98-
print(result)
99-
if result == 1: # boss
100-
print('Boss is approaching')
101-
show_image()
102-
else:
103-
print('Not boss')
104-
105-
#10msecキー入力待ち
106-
k = cv2.waitKey(100)
107-
#Escキーを押されたら終了
108-
if k == 27:
109-
break
79+
# カスケード分類器の特徴量を取得する
80+
cascade = cv2.CascadeClassifier(cascade_path)
11081

111-
#キャプチャを終了
112-
cap.release()
113-
cv2.destroyAllWindows()
82+
# 物体認識(顔認識)の実行
83+
facerect = cascade.detectMultiScale(frame_gray, scaleFactor=1.2, minNeighbors=3, minSize=(10, 10))
84+
if len(facerect) > 0:
85+
print('face detected')
86+
color = (255, 255, 255) # 白
87+
for rect in facerect:
88+
# 検出した顔を囲む矩形の作成
89+
#cv2.rectangle(frame, tuple(rect[0:2]), tuple(rect[0:2] + rect[2:4]), color, thickness=2)
90+
91+
x, y = rect[0:2]
92+
width, height = rect[2:4]
93+
image = frame[y-50: y + height, x: x + width + 50]
94+
cv2.imwrite('test.jpg', image)
95+
result = model.predict(image)
96+
print(result)
97+
if result == 1: # boss
98+
print('Boss is approaching')
99+
show_image()
100+
else:
101+
print('Not boss')
102+
103+
#10msecキー入力待ち
104+
k = cv2.waitKey(100)
105+
#Escキーを押されたら終了
106+
if k == 27:
107+
break
108+
109+
#キャプチャを終了
110+
cap.release()
111+
cv2.destroyAllWindows()

image_show.py

+1-18
Original file line numberDiff line numberDiff line change
@@ -1,27 +1,10 @@
11
# -*- coding: utf-8 -*-
22
import sys
33

4-
import cv2
54
from PyQt4 import QtGui
65

76

87
def show_image(image_path='s_pycharm.jpg'):
9-
"""
10-
# 画像の読み込み
11-
RGB = 1
12-
img = cv2.imread(image_path, RGB)
13-
14-
# 画像の表示
15-
cv2.imshow('img', img)
16-
17-
# キーが押させるまで画像を表示したままにする
18-
# 第一引数:キーイベントを待つ時間 0: 無限, 0以上: 指定ミリ秒待つ
19-
cv2.waitKey(0)
20-
21-
# 作成したウィンドウを全て破棄
22-
cv2.destroyAllWindows()
23-
"""
24-
258
app = QtGui.QApplication(sys.argv)
269
pixmap = QtGui.QPixmap(image_path)
2710
screen = QtGui.QLabel()
@@ -31,4 +14,4 @@ def show_image(image_path='s_pycharm.jpg'):
3114

3215

3316
if __name__ == '__main__':
34-
show_image()
17+
show_image()

0 commit comments

Comments
 (0)