-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathmain.py
executable file
·135 lines (112 loc) · 5 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
import argparse
import datetime
import itertools
import math
import threading
import time
import traceback
from pathlib import Path
import cv2
import pkg_resources
from deep_sort_realtime.deepsort_tracker import DeepSort
from drawer import Drawer
from misc import draw_frame, save_chips
from scaledyolov4.scaled_yolov4 import ScaledYOLOV4
# change this for different video naming
def cam_name_func(file):
# name = file.stem.split('_')[1] # FRone
# name = f"{file.parent.name.split('_')[0]}-{file.stem}" # NDP
name = file.stem # testvideo
# print(f'name: {name}')
return name
parser = argparse.ArgumentParser()
parser.add_argument('--vid_list', help='Path of text file containing all video paths, 1 in each row', type=str, required=True)
parser.add_argument('--infer_fps', help='FPS for inference (use higher fps for deepsort to work better)', type=int, default=4)
parser.add_argument('--gpu_dev', help='Gpu device number to use. Default: 0', type=int, default=0)
parser.add_argument('--output_dir', help='Path of output directory', default='output')
parser.add_argument('--save_chips', help='Whether to save cropped chips', action='store_true')
parser.add_argument('--seconds', help='Number of seconds between each chip save', type=int, default=1)
parser.add_argument('--record_tracks', help='Whether to save inference video', action='store_true')
args = parser.parse_args()
seconds = args.seconds
infer_fps = args.infer_fps
classes_list = ['person']
output_dir = Path(args.output_dir)
crop_chips = args.save_chips
record_tracks = args.record_tracks
with open(args.vid_list) as f:
input_vids = f.read().splitlines()
# print(input_vids)
od = ScaledYOLOV4(
bgr=True,
gpu_device=args.gpu_dev,
# model_image_size=608,
model_image_size=896, # to detect mini hoomans
# model_image_size=1280,
# model_image_size=1536,
max_batch_size=1,
half=True,
same_size=True,
weights=pkg_resources.resource_filename('scaledyolov4', 'weights/yolov4-p6_-state.pt'),
cfg=pkg_resources.resource_filename('scaledyolov4', 'configs/yolov4-p6.yaml'),
)
drawer = Drawer(color=(255, 0, 0))
start_whole = time.time()
for filename in input_vids:
filename = Path(filename)
vidcap = cv2.VideoCapture(str(filename))
tracker = DeepSort(max_age=30, nn_budget=10)
cam_name = cam_name_func(filename)
fps = vidcap.get(cv2.CAP_PROP_FPS)
fps = 25 if math.isinf(fps) else fps
save_frame_skip = round(fps * seconds)
vid_width = int(vidcap.get(cv2.CAP_PROP_FRAME_WIDTH))
vid_height = int(vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT))
infer_frame_skip = round(fps / infer_fps)
print(f'{filename.stem} -- fps: {fps}, vid_width: {vid_width}, vid_height: {vid_height}, saving frame_skip: {save_frame_skip}, inference frame_skip: {infer_frame_skip}')
if record_tracks:
output_dir.mkdir(parents=True, exist_ok=True)
out_track_fp = output_dir / f'{cam_name}_inference.avi'
out_track = cv2.VideoWriter(str(out_track_fp), cv2.VideoWriter_fourcc(*'MJPG'), infer_fps, (vid_width, vid_height))
if crop_chips:
chips_save_dir = output_dir / f'{cam_name}_crops'
chips_save_dir.mkdir(parents=True, exist_ok=True)
start_vid = time.time()
for frame_count in itertools.count():
try:
status, frame = vidcap.read()
if not status:
break
if (frame_count % infer_frame_skip == 0) or (frame_count % save_frame_skip == 0):
all_detections = od.detect_get_box_in([frame], box_format='ltwh', classes=classes_list)[0]
# print(f'all detections: {all_detections}')
all_tracks = tracker.update_tracks(frame=frame, raw_detections=all_detections)
# print(f'all tracks: {all_tracks}')
if record_tracks:
threading.Thread(target=draw_frame, args=(frame, all_tracks, out_track, drawer), daemon=True).start()
if (frame_count % save_frame_skip == 0) and crop_chips:
threading.Thread(target=save_chips, args=(frame, frame_count, all_tracks, chips_save_dir, cam_name), daemon=True).start()
except Exception as e:
traceback.print_exc()
print(f'Error: {e}')
print(f'Killing {cam_name}..')
vidcap.release()
if record_tracks:
out_track.release()
except KeyboardInterrupt:
print(f'Interrupting {cam_name}..')
vidcap.release()
if record_tracks:
out_track.release()
time.sleep(0.1)
vidcap.release()
if record_tracks:
out_track.release()
seconds_taken = time.time() - start_vid
time_taken = datetime.timedelta(seconds=seconds_taken)
print(f'Time taken for {filename.stem}: {time_taken}')
print(f'Avg FPS: {frame_count / seconds_taken}')
print(f'Complete {cam_name}')
seconds_taken = time.time() - start_whole
time_taken = datetime.timedelta(seconds=seconds_taken)
print(f'Total time taken: {time_taken}')