This repository has been archived by the owner on Dec 24, 2022. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 170
/
main.py
170 lines (147 loc) · 5.82 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
'''
VCS entry point.
'''
# pylint: disable=wrong-import-position
import sys
import time
import cv2
from dotenv import load_dotenv
load_dotenv()
import settings
from util.logger import init_logger
from util.image import take_screenshot
from util.logger import get_logger
from util.debugger import mouse_callback
from ObjectCounter import ObjectCounter
init_logger()
logger = get_logger()
def run():
'''
Initialize object counter class and run counting loop.
'''
video = settings.VIDEO
wfc = settings.WAIT_FOR_CAPTURE
wfc_timeout = settings.WAIT_FOR_CAPTURE_TIMEOUT
waited_to_capture_for_seconds = 0
wait_for_seconds = 10
cap = cv2.VideoCapture(video)
while not cap.isOpened():
logger.error('Invalid video source %s', video, extra={
'meta': {'label': 'INVALID_VIDEO_SOURCE'},
})
if wfc and waited_to_capture_for_seconds < wfc_timeout:
# wait and then try to capture again
time.sleep(wait_for_seconds)
waited_to_capture_for_seconds += wait_for_seconds
cap = cv2.VideoCapture(video)
continue
sys.exit()
retval, frame = cap.read()
f_height, f_width, _ = frame.shape
detection_interval = settings.DI
mcdf = settings.MCDF
mctf = settings.MCTF
detector = settings.DETECTOR
tracker = settings.TRACKER
use_droi = settings.USE_DROI
# create detection region of interest polygon
droi = settings.DROI \
if use_droi \
else [(0, 0), (f_width, 0), (f_width, f_height), (0, f_height)]
show_droi = settings.SHOW_DROI
counting_lines = settings.COUNTING_LINES
show_counts = settings.SHOW_COUNTS
hud_color = settings.HUD_COLOR
object_counter = ObjectCounter(frame, detector, tracker, droi, show_droi, mcdf, mctf,
detection_interval, counting_lines, show_counts, hud_color)
record = settings.RECORD
if record:
# initialize video object to record counting
output_video = cv2.VideoWriter(settings.OUTPUT_VIDEO_PATH, \
cv2.VideoWriter_fourcc(*'MJPG'), \
30, \
(f_width, f_height))
logger.info('Processing started.', extra={
'meta': {
'label': 'START_PROCESS',
'counter_config': {
'di': detection_interval,
'mcdf': mcdf,
'mctf': mctf,
'detector': detector,
'tracker': tracker,
'use_droi': use_droi,
'droi': droi,
'counting_lines': counting_lines
},
},
})
headless = settings.HEADLESS
if not headless:
# capture mouse events in the debug window
cv2.namedWindow('Debug')
cv2.setMouseCallback('Debug', mouse_callback, {'frame_width': f_width, 'frame_height': f_height})
is_paused = False
output_frame = None
frames_processed = 0
try:
# main loop
while retval:
k = cv2.waitKey(1) & 0xFF
if k == ord('p'): # pause/play loop if 'p' key is pressed
is_paused = False if is_paused else True
logger.info('Loop paused/played.', extra={'meta': {'label': 'PAUSE_PLAY_LOOP', 'is_paused': is_paused}})
if k == ord('s') and output_frame is not None: # save frame if 's' key is pressed
take_screenshot(output_frame)
if k == ord('q'): # end video loop if 'q' key is pressed
logger.info('Loop stopped.', extra={'meta': {'label': 'STOP_LOOP'}})
break
if is_paused:
time.sleep(0.5)
continue
_timer = cv2.getTickCount() # set timer to calculate processing frame rate
object_counter.count(frame)
output_frame = object_counter.visualize()
if record:
output_video.write(output_frame)
if not headless:
debug_window_size = settings.DEBUG_WINDOW_SIZE
resized_frame = cv2.resize(output_frame, debug_window_size)
cv2.imshow('Debug', resized_frame)
frames_count = round(cap.get(cv2.CAP_PROP_FRAME_COUNT))
processing_frame_rate = round(cv2.getTickFrequency() / (cv2.getTickCount() - _timer), 2)
frames_processed += 1
blobs = object_counter.get_blobs()
logger.debug('Frame processed.', extra={
'meta': {
'label': 'FRAME_PROCESS',
'frames_count': frames_count,
'frames_processed': frames_processed,
'video_frame_rate': round(cap.get(cv2.CAP_PROP_FPS), 2),
'video_frame_size': {'width': f_width, 'height': f_height},
'processing_frame_rate': processing_frame_rate,
'percentage_processed': round((frames_processed / frames_count) * 100, 2),
'time_in_seconds': round(cap.get(cv2.CAP_PROP_POS_MSEC) / 1000),
'blobs': blobs,
'blobs_count': len(blobs),
'counts': object_counter.get_counts(),
},
})
retval, frame = cap.read()
finally:
# end capture, close window, close log file and video object if any
frames_count = round(cap.get(cv2.CAP_PROP_FRAME_COUNT))
cap.release()
if not headless:
cv2.destroyAllWindows()
if record:
output_video.release()
logger.info('Processing ended.', extra={
'meta': {
'label': 'END_PROCESS',
'counts': object_counter.get_counts(),
'completed': frames_count - frames_processed == 0,
},
})
if __name__ == '__main__':
run()