|
9 | 9 |
|
10 | 10 | parser.add_argument("--input", "-iv", required=True, help="The location of the input video file") |
11 | 11 | parser.add_argument("--output", "-ov", required=True, help="The location of the output video file") |
12 | | -parser.add_argument("--config", "-cfg", required=False, help="The model configuration file") |
13 | | -parser.add_argument("--weights", "-w", required=False, help="The model weights file") |
14 | | -parser.add_argument("--classes", "-c", required=False, help="The file containing the class descriptions") |
| 12 | +parser.add_argument("--config", "-cfg", required=False, default="models/yolov4/yolov4-custom.cfg", help="The model configuration file") |
| 13 | +parser.add_argument("--weights", "-w", required=False, default="models/yolov4/yolov4-custom_4000.weights", help="The model weights file") |
| 14 | +parser.add_argument("--classes", "-c", required=False, default="models/yolov4/classes-3.names", help="The file containing the class descriptions") |
15 | 15 | args = parser.parse_args() |
16 | 16 |
|
17 | 17 | # Default usage |
|
22 | 22 | -cfg models/yolov3/cfg/yolov3_custom.cfg \ |
23 | 23 | -w models/yolov3/weights/yolov3_custom_final.weights \ |
24 | 24 | -c models/yolov3/classes.names |
| 25 | +
|
| 26 | + OR |
| 27 | +
|
| 28 | +python3 scripts/detect-trees.py -iv videos/video1.mp4 -ov videos/test.mp4 |
| 29 | +
|
25 | 30 | """ |
26 | 31 | #--------------------------------------------------------- |
27 | 32 |
|
28 | 33 | # set the VideoCapture and VideoWriter objects------------ |
29 | 34 | cap = cv2.VideoCapture(str(args.input)) |
30 | 35 |
|
31 | | -fourcc = cv2.VideoWriter_fourcc(*'DIVX') |
| 36 | +fourcc = cv2.VideoWriter_fourcc(*'mp4v') |
32 | 37 | out = cv2.VideoWriter(str(args.output),fourcc, 25.0, (800,600)) |
33 | 38 | #--------------------------------------------------------- |
34 | 39 |
|
|
44 | 49 | frameSkip = 25 |
45 | 50 | #--------------------------------------------------------- |
46 | 51 |
|
47 | | -# LOS configurations-------------------------------------- |
| 52 | +# LOS configurations (If the centroid crosses this line, count it) |
48 | 53 | line_start = (0, 500) |
49 | 54 | line_end = (800, 500) |
50 | 55 |
|
|
58 | 63 | #--------------------------------------------------------- |
59 | 64 |
|
60 | 65 | # Load the network---------------------------------------- |
61 | | -#net = cv2.dnn.readNetFromDarknet(config, weights) |
62 | 66 | net = cv2.dnn.readNetFromDarknet(config, weights) |
63 | 67 | net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV) |
64 | 68 | net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU) |
|
72 | 76 | # Capture frame-by-frame |
73 | 77 | ret, frame = cap.read() |
74 | 78 | img = cv2.resize(frame,(800, 600)) |
75 | | - # Our operations on the frame come here |
76 | | - #gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) |
| 79 | + |
77 | 80 | height, width= img.shape[:2] |
78 | 81 |
|
79 | 82 | blob = cv2.dnn.blobFromImage(img, 0.00392, (416, 416), swapRB=True, crop=False) |
80 | 83 | net.setInput(blob) |
| 84 | + |
| 85 | + # Perform a forward pass through the net |
81 | 86 | layer_outputs = net.forward(output_layers) |
82 | 87 |
|
83 | 88 | class_ids, confidences, b_boxes = [], [], [] |
|
94 | 99 | x = int(center_x - w / 2) |
95 | 100 | y = int(center_y - h / 2) |
96 | 101 | b_boxes.append([x, y, int(w), int(h)]) |
| 102 | + |
| 103 | + # Obtain the centroids for each bounding box |
97 | 104 | centroid_x, centroid_y = utils.get_centroid(x, y, x+int(w), y+int(h)) |
98 | 105 | centroids.append([centroid_x, centroid_y]) |
99 | | - # Add a horizontal line, and count |
100 | | - |
101 | | - |
| 106 | + |
102 | 107 | confidences.append(float(confidence)) |
103 | 108 | class_ids.append(int(class_id)) |
104 | 109 |
|
|
110 | 115 | classes = [line.strip() for line in f.readlines()] |
111 | 116 | colors = np.random.uniform(0, 255, size=(len(classes), 3)) |
112 | 117 |
|
| 118 | + |
113 | 119 | for index in indices: |
114 | 120 | x, y, w, h = b_boxes[index] |
115 | 121 | cv2.rectangle(img, (x, y), (x + w, y + h), (255, 255, 255), 2) |
116 | 122 | cv2.putText(img, classes[class_ids[index]], (x - 10, y - 5), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255, 255, 255), 2) |
117 | 123 |
|
118 | 124 | for c in centroids: |
119 | 125 | c_x, c_y = c[0], c[1] |
120 | | - cv2.circle(img, (c_x, c_y), radius=2, color=(0, 255, 255), thickness=2) |
| 126 | + cv2.circle(img, (c_x, c_y), radius=2, color=(0, 255, 255), thickness=1) |
121 | 127 | cv2.line(img, line_start, line_end, line_color, line_thickness) |
122 | 128 |
|
123 | | - #if(c_y > 500): |
| 129 | + # This is a VERY stupid way of counting |
| 130 | + # as it will count the same object in multiple |
| 131 | + # frames |
| 132 | + #if(c_y > 500): |
124 | 133 | # treeNum = treeNum + 1 |
125 | 134 |
|
126 | 135 | #cv2.putText(img, str(treeNum), (100, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (128, 120, 68), 2) |
127 | 136 |
|
| 137 | + # Show a red line for the distance from the centroid to the counting line(at 500 px height) |
128 | 138 | cv2.line(img, (c_x, c_y), (c_x, 500), alt_line_color, line_thickness) |
| 139 | + |
129 | 140 | # Display the resulting frame |
130 | 141 | cv2.imshow('image',img) |
131 | 142 | out.write(img) |
|
0 commit comments