|
| 1 | +#!/usr/bin/env python3 |
| 2 | + |
| 3 | +""" |
| 4 | +Computer vision workshop template - after filling! |
| 5 | +
|
| 6 | +NOTE: This wasn't a workshop on a correct Python program composition. |
| 7 | +
|
| 8 | +Prepared for "Exatel Security Days Programming Workshop" @07.06.2019 |
| 9 | +Author: Tomasz bla Fortuna. |
| 10 | +License: Apache |
| 11 | +""" |
| 12 | + |
| 13 | +import sys |
| 14 | +from time import time |
| 15 | + |
| 16 | +import cv2 |
| 17 | +import numpy as np |
| 18 | + |
| 19 | + |
| 20 | +class Servo: |
| 21 | + """ |
| 22 | + Servo control using PWM on Raspberry PI. |
| 23 | + """ |
| 24 | + |
| 25 | + def __init__(self, servo_pin=18): |
| 26 | + try: |
| 27 | + import pigpio |
| 28 | + self.pi = pigpio.pi() |
| 29 | + except ImportError: |
| 30 | + print("No pigpio - simulating SERVO") |
| 31 | + self.pi = None |
| 32 | + |
| 33 | + def set(self, value): |
| 34 | + if self.pi is not None: |
| 35 | + self.pi.set_servo_pulsewidth(18, value) |
| 36 | + |
| 37 | + |
| 38 | +servo = Servo() |
| 39 | + |
| 40 | + |
| 41 | +class Capture: |
| 42 | + """ |
| 43 | + Read data from camera or from a file. |
| 44 | + """ |
| 45 | + def __init__(self, filename=None, camera=None, size=None): |
| 46 | + """ |
| 47 | + Push filename if reading from file or camera if reading from camera. Don't use both. |
| 48 | +
|
| 49 | + Size if you want to force capture size. |
| 50 | + """ |
| 51 | + self.filename = filename |
| 52 | + self.camera = camera |
| 53 | + |
| 54 | + if camera is not None: |
| 55 | + self.capture = cv2.VideoCapture(camera) |
| 56 | + |
| 57 | + if size is not None: |
| 58 | + width, height = size |
| 59 | + self.capture.set(cv2.CAP_PROP_FRAME_WIDTH, width) |
| 60 | + self.capture.set(cv2.CAP_PROP_FRAME_HEIGHT, height) |
| 61 | + print("Set size to %dx%d" % (width, height)) |
| 62 | + |
| 63 | + elif filename is not None: |
| 64 | + self.capture = cv2.VideoCapture(filename) |
| 65 | + else: |
| 66 | + raise Exception("You failed at thinking") |
| 67 | + |
| 68 | + self.width = int(self.capture.get(cv2.CAP_PROP_FRAME_WIDTH)) |
| 69 | + self.height = int(self.capture.get(cv2.CAP_PROP_FRAME_HEIGHT)) |
| 70 | + |
| 71 | + print("Initialized capturing device, size: %dx%d" % (self.width, |
| 72 | + self.height)) |
| 73 | + |
| 74 | + def frames(self): |
| 75 | + "Frame iterator - grab and yield frame." |
| 76 | + while True: |
| 77 | + (grabbed, frame) = self.capture.read() |
| 78 | + |
| 79 | + if frame is None: |
| 80 | + break |
| 81 | + |
| 82 | + yield frame |
| 83 | + |
| 84 | + |
| 85 | +def detect_kulka(frame, diff): |
| 86 | + """ |
| 87 | + Ball (kulka) detection algorithm. |
| 88 | +
|
| 89 | + Args: |
| 90 | + diff: difference from the averaged background. |
| 91 | + frame: current color frame. |
| 92 | + """ |
| 93 | + # Erosion/dilatation morphology filtering: |
| 94 | + # Erosion removes small artefacts, but "thins" our difference. |
| 95 | + # Dilatation brings "thickness" back, but not on removed artefacts. |
| 96 | + kernel = np.ones((5,5),np.uint8) |
| 97 | + diff = cv2.erode(diff, kernel, iterations=1) |
| 98 | + diff = cv2.dilate(diff, kernel, iterations=3) |
| 99 | + |
| 100 | + # Treshold "diff" to get a "mask" with our ball. |
| 101 | + status, mask = cv2.threshold(diff, 20, 255, |
| 102 | + cv2.THRESH_BINARY) |
| 103 | + |
| 104 | + # Convert BGR frame to HSV to get "Hue". |
| 105 | + hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) |
| 106 | + |
| 107 | + # Calculate histogram of the Hue (channel 0 of HSV) |
| 108 | + h_hist = cv2.calcHist([hsv], channels=[0], |
| 109 | + mask=mask, histSize=[6], |
| 110 | + ranges=[0, 179]) |
| 111 | + |
| 112 | + # Heuristic to differentiate red and non-red color (edges of histogram vs center) |
| 113 | + edge = h_hist[0] + h_hist[-1] |
| 114 | + center = sum(h_hist) - edge |
| 115 | + |
| 116 | + if abs(edge - center) < 200: |
| 117 | + # Difference not big enough. |
| 118 | + print("INVALID KULKA", edge, center) |
| 119 | + return None |
| 120 | + if edge > 2000: |
| 121 | + # Red ball found. |
| 122 | + print("RED KULKA", edge, center) |
| 123 | + servo.set(1800) |
| 124 | + else: |
| 125 | + # Non-red ball found. |
| 126 | + print("NON-RED KULKA", edge, center) |
| 127 | + servo.set(1500) |
| 128 | + |
| 129 | + |
| 130 | +def main_loop(filename): |
| 131 | + "Loop and detect" |
| 132 | + if '/dev/' in filename: |
| 133 | + # A bit hacky, but works. |
| 134 | + capture = Capture(camera=filename) |
| 135 | + else: |
| 136 | + capture = Capture(filename) |
| 137 | + |
| 138 | + # Stats |
| 139 | + start = time() |
| 140 | + frame_no = 0 |
| 141 | + |
| 142 | + # "Motion detected" frame counter. |
| 143 | + det_cnt = 0 |
| 144 | + |
| 145 | + # Previous frame (for motion detection) |
| 146 | + prev_frame = None |
| 147 | + |
| 148 | + # Averaged background (for ball-mask calculation) |
| 149 | + background = None |
| 150 | + |
| 151 | + for frame in capture.frames(): |
| 152 | + # Let's count FPS |
| 153 | + frame_no += 1 |
| 154 | + if frame_no % 10 == 0: |
| 155 | + took = time() - start |
| 156 | + print("fps: %.2f" % (frame_no/took)) |
| 157 | + |
| 158 | + # Get grey frame to speed up "motion detection" on rPI. 1 channel needs |
| 159 | + # to be calculated later, not 3 of them. |
| 160 | + grey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) |
| 161 | + |
| 162 | + if prev_frame is None: |
| 163 | + # Initialize previous frame and background. |
| 164 | + prev_frame = grey |
| 165 | + background = grey |
| 166 | + continue |
| 167 | + |
| 168 | + # absdiff calculates absolute difference on each pixel without problems |
| 169 | + # of saturation of overflowing int8 (0-255) ranges. |
| 170 | + diff = cv2.absdiff(grey, prev_frame) |
| 171 | + |
| 172 | + # Saturated subtraction - when subtracting 30 from 20 we get 0. So we |
| 173 | + # remove all the low-value noise from the diff. |
| 174 | + diff = cv2.subtract(diff, 30) |
| 175 | + |
| 176 | + # Calculate total brightness of all not-filtered pixels on the |
| 177 | + # difference image. |
| 178 | + diff_total = diff.sum() |
| 179 | + |
| 180 | + if diff_total > 1000: |
| 181 | + # Difference exceeds some threshold - movement detected, count frames. |
| 182 | + det_cnt += 1 |
| 183 | + else: |
| 184 | + # No movement - reset movement counters. |
| 185 | + det_cnt = 0 |
| 186 | + # And "running average" our background. |
| 187 | + background = cv2.addWeighted(background, 0.9, grey, 0.1, 0) |
| 188 | + |
| 189 | + if det_cnt == 3: |
| 190 | + # On Xth moving frame we detect the ball color. |
| 191 | + |
| 192 | + # Calculate difference from the background - does better job then |
| 193 | + # difference on the two consecutive frames, because we get the |
| 194 | + # difference only where the ball IS and not - where is was on the |
| 195 | + # previous frame. |
| 196 | + diff = cv2.absdiff(background, grey) |
| 197 | + |
| 198 | + # That's how you can display some frame: |
| 199 | + #cv2.imshow("NEW DIFF", diff) |
| 200 | + |
| 201 | + # Call ball detection algorithm. |
| 202 | + detect_kulka(frame, diff) |
| 203 | + |
| 204 | + prev_frame = grey |
| 205 | + |
| 206 | + # Remove imshows if running on rpi. |
| 207 | + cv2.imshow("Frame", frame) |
| 208 | + |
| 209 | + x = cv2.waitKey(25) |
| 210 | + if x == ord('q'): |
| 211 | + break |
| 212 | + |
| 213 | + # You can easily save some interesting frames to the disc. |
| 214 | + #output.write(frame) |
| 215 | + |
| 216 | + # Destroy windows. If any. |
| 217 | + cv2.destroyAllWindows() |
| 218 | + |
| 219 | + |
| 220 | +if __name__ == "__main__": |
| 221 | + try: |
| 222 | + main_loop(sys.argv[1]) |
| 223 | + except KeyboardInterrupt: |
| 224 | + print("Exiting on keyboard interrupt at", end=' ') |
0 commit comments