-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathCamera.py
274 lines (223 loc) · 8.5 KB
/
Camera.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
import cv2
import threading
import numpy as np
from common import Sketcher
import os
from datetime import datetime
import os.path
import json
from send_email import SendEmail
from twilio.rest import Client
from flask import Flask, request, redirect, send_from_directory
directory = ""
timestamp = datetime.now()
# User Set Variables
# todo add ability for end users to set these variables
sensitivity = 500
resize = 30
time_thresh = 10
config_file_path = "C:\\Users\\pheig\\Documents\\GitHub\\security\\config_test.json"
photo = ""
date_folder = ""
file_name = ""
path = '/26.11.2020.10.53.26/'
filename = 'frame11.jpg'
def send_text_message(frame):
print("Sending text message")
with open(config_file_path) as json_data_file:
data = json.load(json_data_file)
twilio = data['twilio']
# /usr/bin/env python
# Download the twilio-python library from twilio.com/docs/libraries/python
from twilio.rest import Client
# Find these values at https://twilio.com/user/account
# To set up environmental variables, see http://twil.io/secure
account_sid = twilio['account_sid']
auth_token = twilio['auth_token']
client = Client(account_sid, auth_token)
client.api.account.messages.create(
to=twilio['phone'],
from_=twilio['twilio_phone'],
body="Motion Detected")
def connection_string():
with open(config_file_path) as json_data_file:
data = json.load(json_data_file)
cam = data['camera']
string = cam['url_pre'] + cam['user'] + ':' + cam['password'] + cam['url_post'] + ':' + cam['port']
return string
def create_mask(image):
print('Create mask function called')
# Create an image for sketching the mask
# If mask was created before then import it as grayscale
if os.path.isfile('mask.png'):
image_mask = cv2.imread('mask.png', 0)
image_mask = cv2.bitwise_not(image_mask)
return image_mask
else:
image_mask = image.copy()
sketch = Sketcher('Image', [image_mask], lambda : ((255, 255, 255), 255))
# Sketch a mask
# todo - add a window with instruction for the end user
while True:
ch = cv2.waitKey()
if ch == 27: # ESC - exit
break
if ch == ord('r'): # r - mask the image
break
if ch == ord(' '): # SPACE - reset the inpainting mask
image_mask[:] = image
sketch.show()
# define range of white color in HSV
lower_white = np.array([0,0,255])
upper_white = np.array([255,255,255])
# Create the mask
image_mask = cv2.inRange(image_mask, lower_white, upper_white)
cv2.imwrite('mask.png', image_mask)
return image_mask
def resize_frame(frame, resize):
# Resize the image by given percentage
width = int(frame.shape[1] * (resize / 100.0))
height = int(frame.shape[0] * (resize / 100.0))
dim = (width, height)
frame = cv2.resize(frame, dim)
return frame
def motion(motion_count, frame, main_path):
# If motion is for longer than a set time begin capturing
global timestamp
global directory
global date_folder
global file_name
if motion_count == time_thresh: # 25 frames p/s for 3 seconds of motion
now = datetime.now()
delta = timestamp - now
minute, second = divmod(delta.seconds, 60)
# if it has been longer than 5 minutes since movement
# create new folder
# todo this feels messy need to cleanup logic
print(second)
if second > 5:
# If it is not the first pass then generate video from images
if directory != "":
threading.Thread(target=generate_video, args=()).start()
generate_video()
dt_string = now.strftime("%d.%m.%Y.%H.%M.%S")
date_folder = dt_string
directory = main_path + dt_string
os.mkdir(directory)
print(directory)
timestamp = now
if motion_count >= time_thresh:
# todo make this more dynamic for a user
file = "frame" + str(motion_count) + ".jpg"
file_name = file
path = directory + "\\" + file
print(path)
global photo
photo = path
cv2.imwrite(path, frame)
# Video Generating function
def generate_video():
image_folder = directory # make sure to use your folder
video_name = 'capture.avi'
os.chdir(image_folder)
images = [img for img in os.listdir(image_folder)
if img.endswith(".jpg") or
img.endswith(".jpeg") or
img.endswith("png")]
# Array images should only consider
# the image files ignoring others if any
print(images)
frame = cv2.imread(os.path.join(image_folder, images[0]))
# setting the frame width, height width
# the width, height of first image
height, width, layers = frame.shape
# todo decide if end user should be able to pick fourcc format
fourcc = cv2.VideoWriter_fourcc(*'MP42')
video = cv2.VideoWriter(video_name, fourcc, 60.0, (width, height))
# Appending the images to the video one by one
for image in images:
video.write(cv2.imread(os.path.join(image_folder, image)))
# Deallocating memories taken for window creation
cv2.destroyAllWindows()
video.release() # releasing the video generated
# main video feed
def show_stream():
print("show stream")
connection = connection_string()
capture = cv2.VideoCapture(connection)
while True:
ret, frame = capture.read()
# Check for empty frame
if frame.size == 0:
print('Empty frame')
continue
frame = resize_frame(frame, resize)
cv2.imshow("Main", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
capture.release()
cv2.destroyAllWindows()
def main():
print("I am starting from main")
SendEmail()
# Make empty frame objects using an empty list and np.array
list = []
gray2 = np.array(list)
image_mask = np.array(list)
# Establish connection to the webcam
threading.Thread(target=show_stream, args=()).start()
motion_count = 0
main_path = "C:\\Users\\pheig\\Documents\\GitHub\\video\\"
connection = connection_string()
cap = cv2.VideoCapture(connection)
while True:
ret, frame = cap.read()
# Check for empty frame
if frame.size == 0:
continue
frame = resize_frame(frame, resize)
# If mask is empty, set mask
if image_mask.size == 0:
image_mask = create_mask(frame)
# If the resize parameter has changed after the mask was already set
# apply the new resize to the mask
if image_mask.shape[0:2] != frame.shape[0:2]:
image_mask = cv2.resize(image_mask, (frame.shape[1], frame.shape[0]))
# Convert frame to grayscale and apply gaussian blur
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (25, 25), 0)
# If the initial frame is not set then set it
if gray2.size == 0:
gray2 = gray
# Get the difference between the original frame
# and the latest frame
deltaframe = cv2.absdiff(gray, gray2)
threshold = cv2.threshold(deltaframe, 25, 255, cv2.THRESH_BINARY)[1]
threshold = cv2.dilate(threshold, None)
# threshold = cv2.bitwise_and(threshold, threshold, mask=image_mask)
# Draw rectangles around where motion is detected
contour, hierarchy = cv2.findContours(threshold, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if contour:
# increment frame count for motion capture
motion_count += 1
else:
# set motion capture back to 0
motion_count = 0
motion(motion_count, frame, main_path)
for i in contour:
if cv2.contourArea(i) < sensitivity:
continue
(x, y, w, h) = cv2.boundingRect(i)
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
cv2.imshow("Capturing", frame)
# Process(target=cv2.imshow("Capturing", frame), args=()).start()
#cv2.imshow("Threshold", threshold)
# cv2.imshow("Mask", image_mask)
if cv2.waitKey(1) & 0xFF == ord('q'):
send_text_message(frame)
threading.Thread(target=generate_video, args=()).start()
break
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()