-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtest_with_drone.py
156 lines (120 loc) · 4.4 KB
/
test_with_drone.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
# -*- coding:utf-8 -*-
import tensorflow as tf
from keras.models import Model
from models import c3d_model
from keras.layers import Dense,Dropout,Conv3D,Input,MaxPool3D,Flatten,Activation, GlobalAveragePooling3D, ZeroPadding3D
import numpy as np
import cv2
import h5py
from keras.models import load_model
from keras.utils import multi_gpu_model
from keras.optimizers import SGD,Adam
from keras.models import model_from_json
'''
# 이미 훈련된 모델 weight를 불러와서 드론에서 받아온 영상으로 테스트
1. input = (128,171,16,3) numpy array
2. output = model test result
'''
#
# def video2batch(filePath):
# videoIn = cv2.VideoCapture(filePath)
#
# IMG_WIDTH = 171
# IMG_HEIGHT = 128
# TOTAL_FRAME = int(videoIn.get(cv2.CAP_PROP_FRAME_COUNT)) # number of frames in given video
#
# batches = []
# frames = []
# while videoIn.isOpened():
# if len(frames) < 16:
# ret, frame = videoIn.read()
# try:
# frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# frame = cv2.resize(frame, (IMG_WIDTH, IMG_HEIGHT))
# frames.append(frame)
# except:
# pass
# else:
# batches.append(frames)
# frames == []
#
# if len(batches) == TOTAL_FRAME // 16:
# break
#
# batches = np.array(batches).astype(np.float32)
# # batches[..., 0] -= 99.9
# # batches[..., 1] -= 92.1
# # batches[..., 2] -= 82.6
# # batches[..., 0] /= 65.8
# # batches[..., 1] /= 62.3
# # batches[..., 2] /= 60.3
# batches = np.transpose(batches, (0, 2, 3, 1, 4))
# # print(batches.shape)
# return batches
def videoSeg(filePath):
videoIn = cv2.VideoCapture(filePath)
IMG_WIDTH = 171
IMG_HEIGHT = 128
TOTAL_FRAME = int(videoIn.get(cv2.CAP_PROP_FRAME_COUNT)) # number of frames in given video
# batch = np.zeros((TOTAL_FRAME, IMG_HEIGHT, IMG_WIDTH, 3), dtype='float32')
batch =[]
idx = 0
while videoIn.isOpened():
ret, frame = videoIn.read()
curFrame = int(videoIn.get(cv2.CAP_PROP_POS_FRAMES)) # current frame number
if (curFrame == TOTAL_FRAME ):
break
if frame is None:
break
# resize (128, 171). resize함수는 변수 순서가(넓이, 높이)로 정의되어있음.
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = cv2.resize(frame, (IMG_WIDTH, IMG_HEIGHT))
# print(np.shape(frame))
# batch[..., 0] -= 99.9
# batch[..., 1] -= 92.1
# batch[..., 2] -= 82.6
# batch[..., 0] /= 65.8
# batch[..., 1] /= 62.3
# batch[..., 2] /= 60.3
# batch /= 255.0
if idx <= TOTAL_FRAME:
batch.append(frame)
idx += 1
videoIn.release()
# print(np.shape(batch))
batch = np.array(batch)
res = np.array(batch[0:(TOTAL_FRAME // 16) * 16]).reshape(-1, 16, 128, 171, 3)
res = np.moveaxis(res, 1, 3)
# print(np.shape(res))
return res
def modelPredict(weightPath, modelPath):
json_file = open(modelPath, 'r')
model = json_file.read()
json_file.close()
model = model_from_json(model)
model.summary()
# model load
model.load_weights(weightPath, by_name=True)
# compile
optimizer = Adam(lr=0.0001, beta_1=0.9, beta_2=0.999)
model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
model.summary()
# normal: walking
# res = videoSeg('/home/pirl/Downloads/walking(1).mp4')
# violence: fight
res = videoSeg('/home/pirl/Downloads/fight(4).mp4')
# for i in range(1, 10):
# res = videoSeg('/home/pirl/PycharmProjects/cnnTest/Real Life Violence Dataset/Violence/V_'+str(i)+'.mp4')
# print(model.predict(res, verbose=1, batch_size=16))
#
# for i in range(11, 20):
# res = videoSeg('/home/pirl/PycharmProjects/cnnTest/Real Life Violence Dataset/NonViolence/NV_'+str(i)+'.mp4')
pred = model.predict(res, verbose=1, batch_size=16)
for i in range(len(pred)):
if pred[i][0] < pred[i][1]:
print("violence detected!")
else:
print("Normal")
modelPredict(weightPath='/home/pirl/PycharmProjects/cnnTest/FinalWeightJson/weights_c3d(lr=0.0001,ADAM,binary,epoch10).h5',
modelPath='/home/pirl/PycharmProjects/cnnTest/FinalWeightJson/model(lr=0.0001,ADAM,binary,epoch10).json'
)