diff --git a/BCJA.py b/BCJA.py index 3d437b6..bcad058 100644 --- a/BCJA.py +++ b/BCJA.py @@ -1,12 +1,13 @@ # -# _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ -# /\ _ `\ /\ _ _ _\/\_ _ _ \/\ _ _ `\ -# \ \ \_\ \_\ \ \_ _ /\/_ _ _/\ \ \ \ \ \ -# \ \ _ _ `\ \ \ _ \ \ \ \ \_ _\ \ -# \ \ \_ _\ \ \ \_ _ _ /\`\_\/ \ \ _ _ \ -# \ \_ _ _ _\ \_ _ _ _\ \_ _ _ _\ \_\_ /\_\ -# \/_ _ _ _/\/_ _ _ _/\/_ _ _ _/\/_/ \/_/ -# +# _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +# /\ _ `\ /\ _ _ _\ /\_ _ _ \ /\ _ _ `\ +# \ \ \_\ \_ \ \ \_ _ / \/_ _ _/\ \ \ \ \ \ \ +# \ \ _ _ `\ \ \ \ \ \ \ \ \ \_ _\ \ +# \ \ \_ _\ \ \ \ \_ _ _ \`\_\/ \ \ \ _ _ \ +# \ \_ _ _ _\ \ \_ _ _ _\ \_ _ _ _\ \ \_\_ /\_ \ +# \/_ _ _ _/ \_/_ _ _ _/ \/_ _ _/ \ _/_/ \ / + + # ================================================ # Library used to detect facial features, gestures # and other thigs, finally used to detect and cla- @@ -123,4 +124,4 @@ def gaze_direction(video_capture): def head_pose(video_capture): predictor = getcascades.facial_landmarks() - head.face_pose(video_capture,predictor) \ No newline at end of file + head.face_pose(video_capture,predictor)