-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmotion_detector001.py
66 lines (52 loc) · 3.76 KB
/
motion_detector001.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
import cv2, time
first_frame=None # == nothing...prevents from getting 'variable not defined error...'
# method that triggers a video capture object...the camera in this case...
video=cv2.VideoCapture(0) # numbers are for cameras...0 is for internal cam...you can pass video too like "movie.mp4"...
start_time=0
while True:
check, frame = video.read()
gray=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# parameters of the bluriness (notice its a tuple)--->(21,21),0) plus the parameter for standard deviation....
gray=cv2.GaussianBlur(gray,(21,21),0) # increases the accuracy by exaggerating the differences, and removing noise as well, when comparing the difference
#between the first_frame and the delta frame (proceeding frames)...
# in order to detect motion in the proceeding frames...
if first_frame is None:
first_frame=gray
continue # continue to the beginning of the loop again...from this point...
# thus neverminding the code below...
# delta_frame is the object being detected, coming into focus...
delta_frame=cv2.absdiff(first_frame,gray) # comparing two blurred images...this results in yet another image...
# threshold() returns a tuple with two values...first value at index[0] is needed when using other threshold methods...
# the first item of the tuple suggests a value for the threhold...like 30 etc... for THRESH_BINARY method you just need the 2nd item of the tuple...
# which is the actual frame that is returned...delta_frame in this case...
thresh_frame=cv2.threshold(delta_frame, 30, 255, cv2.THRESH_BINARY)[1] # threshold of 30 and color 255 or white to denote a motion detected object in focus...
# number of iterations through the image to remove the black holes...the more iterations the smoother the image will be...
thresh_frame=cv2.dilate(thresh_frame, None, iterations=2) # no kernel array being used so we pass the None value as 2nd parameter...
(_,cnts,_)=cv2.findContours(thresh_frame.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) # use the copy of the thresh_frame so you don't modify the original thresh_frame object...
# cv2.RETR_EXTERNAL: this method draws the external contours of the objects you will be finding in the image...
# cv2.CHAIN_APPROX_SIMPLE: method cv2 will apply for retrieving the contours...
for contour in cnts: # variable of contours (cnts)
if cv2.contourArea(contour) < 1000: # if less than 1000 pixels...
continue
# code below draws a rectangle...
(x, y, w, h)=cv2.boundingRect(contour) # if area of contour is greater or equal to 1000 pixels draw a rectangle onto the current frame...
# passing the color frame... color of rectangle is (green), 3 for the width...
cv2.rectangle(frame, (x, y), (x+w, y+h), (0,255,0), 3) # coordinates of the rectangle... upper left and lower right... drawing a rectangle on the color frame based on the
# contours computed...
# wait before turning off camera...
cv2.imshow("Gray Frame",gray)
cv2.imshow("Delta Frame",delta_frame)
cv2.imshow("Threshold Frame", thresh_frame)
cv2.imshow("Color Frame",frame)
#cv2.imshow("Capturing Marty",frame)
key=cv2.waitKey(1) # if you pass 0, then any key pressed will stop the while loop or the script...
print("Gray Frame pixel data: below...")
print(gray)
print("Delta Frame pixel data: below...")
print(delta_frame)
if key==ord('q'):
print("Q was pressed...program stopped...")
break
# releases the camera...
video.release()
cv2.destroyAllWindows