--- Hourly_db_schedular.py
+++ Hourly_db_schedular.py
... | ... | @@ -50,7 +50,6 @@ |
50 | 50 |
previous_hour = (now - timedelta(hours=1)).replace(minute=0, second=0, microsecond=0) |
51 | 51 |
current_hour = now.replace(minute=0, second=0, microsecond=0) |
52 | 52 |
|
53 |
- # SQL to fetch data |
|
54 | 53 |
fetch_sql = """ |
55 | 54 |
SELECT eqpmn_id, COUNT(*) AS flooding_cnt |
56 | 55 |
FROM flooding_detect_event |
... | ... | @@ -61,7 +60,6 @@ |
61 | 60 |
rows = cursor.fetchall() |
62 | 61 |
df = pd.DataFrame(rows, columns=['eqpmn_id', 'flooding_cnt']) |
63 | 62 |
|
64 |
- # Insert results into flooding_anals_event_data_hr |
|
65 | 63 |
insert_sql = """ |
66 | 64 |
INSERT INTO flooding_anals_event_data_hr (clct_dt, eqpmn_id, flooding_cnt) |
67 | 65 |
VALUES (%s, %s, %s); |
... | ... | @@ -71,7 +69,7 @@ |
71 | 69 |
cursor.execute(insert_sql, (previous_hour, row['eqpmn_id'], row['flooding_cnt'])) |
72 | 70 |
conn.commit() |
73 | 71 |
|
74 |
-# Scheduler configuration |
|
72 |
+ |
|
75 | 73 |
scheduler = BackgroundScheduler() |
76 | 74 |
scheduler.add_job(func=fetch_and_update, trigger='cron', minute=5) |
77 | 75 |
scheduler.start() |
+++ yoloseg/inference_gpu_.py
... | ... | @@ -0,0 +1,298 @@ |
1 | +import cv2 | |
2 | +import numpy as np | |
3 | +import random | |
4 | +import onnxruntime as ort | |
5 | +from config_files.yolo_config import CLASS_NAME, CLASS_NUM | |
6 | +from typing import List, Tuple | |
7 | + | |
8 | + | |
9 | +class Inference: | |
10 | + def __init__(self, onnx_model_path, model_input_shape, classes_txt_file, run_with_cuda): | |
11 | + self.model_path = onnx_model_path | |
12 | + self.model_shape = model_input_shape | |
13 | + self.classes_path = classes_txt_file | |
14 | + self.cuda_enabled = run_with_cuda | |
15 | + self.letter_box_for_square = True | |
16 | + self.model_score_threshold = 0.3 | |
17 | + self.model_nms_threshold = 0.6 | |
18 | + self.classes = [] | |
19 | + self.session = None | |
20 | + | |
21 | + self.load_onnx_network() | |
22 | + self.load_classes_from_file() | |
23 | + | |
24 | + def sigmoid(self, x): | |
25 | + return 1 / (1 + np.exp(-x)) | |
26 | + | |
27 | + def run_inference(self, input_image): | |
28 | + model_input = input_image | |
29 | + if self.letter_box_for_square and self.model_shape[0] == self.model_shape[1]: | |
30 | + model_input = self.format_to_square(model_input) | |
31 | + | |
32 | + blob = cv2.dnn.blobFromImage(model_input, 1.0 / 255.0, self.model_shape, (0, 0, 0), True, False) | |
33 | + | |
34 | + # Prepare input data as a dictionary | |
35 | + inputs = {self.session.get_inputs()[0].name: blob} | |
36 | + # Run model | |
37 | + outputs = self.session.run(None, inputs) | |
38 | + outputs_bbox = outputs[0] | |
39 | + outputs_mask = outputs[1] | |
40 | + | |
41 | + detections = self.process_detections(outputs_bbox, model_input) | |
42 | + mask_maps = self.process_mask_output(detections, outputs_mask, model_input.shape) | |
43 | + | |
44 | + return detections, mask_maps | |
45 | + | |
46 | + def load_onnx_network(self): | |
47 | + # Set up the ONNX Runtime session with appropriate device settings | |
48 | + try: | |
49 | + if self.cuda_enabled: | |
50 | + providers = [('CUDAExecutionProvider', {'device_id': 0})] | |
51 | + else: | |
52 | + providers = ['CPUExecutionProvider'] | |
53 | + | |
54 | + self.session = ort.InferenceSession(self.model_path, providers=providers) | |
55 | + print(f"Running on {'CUDA' if self.cuda_enabled else 'CPU'}") | |
56 | + print(f"Model loaded successfully. Input name: {self.session.get_inputs()[0].name}") | |
57 | + except Exception as e: | |
58 | + print(f"Failed to load the ONNX model: {e}") | |
59 | + self.session = None | |
60 | + | |
61 | + def load_classes_from_file(self): | |
62 | + with open(self.classes_path, 'r') as f: | |
63 | + self.classes = f.read().strip().split('\n') | |
64 | + | |
65 | + def format_to_square(self, source): | |
66 | + col, row = source.shape[1], source.shape[0] | |
67 | + max_side = max(col, row) | |
68 | + result = np.zeros((max_side, max_side, 3), dtype=np.uint8) | |
69 | + result[0:row, 0:col] = source | |
70 | + return result | |
71 | + | |
72 | + def process_detections(self, outputs_bbox, model_input): | |
73 | + # Assuming outputs_bbox is already in the (x, y, w, h, confidence, class_probs...) format | |
74 | + x_factor = model_input.shape[1] / self.model_shape[0] | |
75 | + y_factor = model_input.shape[0] / self.model_shape[1] | |
76 | + | |
77 | + class_ids = [] | |
78 | + confidences = [] | |
79 | + mask_coefficients = [] | |
80 | + boxes = [] | |
81 | + | |
82 | + for detection in outputs_bbox[0].T: | |
83 | + # This segmentation model uses yolact architecture to predict mask | |
84 | + # the output tensor dimension for yolo-v8-seg is B x [X, Y, W, H, C1, C2, ..., P1, ...,P32] * 8400 | |
85 | + # where C{n} are confidence score for each class | |
86 | + # and P{n} are coefficient for each proto masks. (32 by default) | |
87 | + scores_classification = detection[4:4+CLASS_NUM] | |
88 | + scores_segmentation = detection[4+CLASS_NUM:] | |
89 | + class_id = np.argmax(scores_classification, axis=0) | |
90 | + confidence = scores_classification[class_id] | |
91 | + | |
92 | + thres = self.model_score_threshold | |
93 | + w_thres = 40 | |
94 | + h_thres = 40 | |
95 | + | |
96 | + x, y, w, h = detection[:4] | |
97 | + # if bboxes are too small, it just skips, and it is not a bad idea since we do not need to detect small areas | |
98 | + if w < w_thres or h < h_thres: | |
99 | + continue | |
100 | + | |
101 | + if confidence > thres: | |
102 | + | |
103 | + left = int((x - 0.5 * w) * x_factor) | |
104 | + top = int((y - 0.5 * h) * y_factor) | |
105 | + width = int(w * x_factor) | |
106 | + height = int(h * y_factor) | |
107 | + | |
108 | + boxes.append([left, top, width, height]) | |
109 | + confidences.append(float(confidence)) | |
110 | + mask_coefficients.append(scores_segmentation) | |
111 | + class_ids.append(class_id) | |
112 | + confidences = (confidences) | |
113 | + indices = cv2.dnn.NMSBoxes(boxes, confidences, self.model_score_threshold, self.model_nms_threshold) | |
114 | + | |
115 | + detections = [] | |
116 | + for i in indices: | |
117 | + idx = i | |
118 | + result = { | |
119 | + 'class_id': class_ids[i], | |
120 | + 'confidence': confidences[i], | |
121 | + 'mask_coefficients': np.array(mask_coefficients[i]), | |
122 | + 'box': boxes[idx], | |
123 | + 'class_name': self.classes[class_ids[i]], | |
124 | + 'color': (random.randint(100, 255), random.randint(100, 255), random.randint(100, 255)) | |
125 | + } | |
126 | + detections.append(result) | |
127 | + | |
128 | + return detections | |
129 | + | |
130 | + def process_mask_output(self, detections, proto_masks, image_shape): | |
131 | + if not detections: | |
132 | + return [] | |
133 | + | |
134 | + batch_size, num_protos, proto_height, proto_width = proto_masks.shape | |
135 | + full_masks = np.zeros((len(detections), image_shape[0], image_shape[1]), dtype=np.float32) | |
136 | + | |
137 | + for idx, det in enumerate(detections): | |
138 | + box = det['box'] | |
139 | + | |
140 | + x1, y1, w, h = self.adjust_box_coordinates(box, (image_shape[0], image_shape[1])) | |
141 | + | |
142 | + if w <=1 or h <= 1: | |
143 | + continue | |
144 | + | |
145 | + # Get the corresponding mask coefficients for this detection | |
146 | + coeffs = det["mask_coefficients"] | |
147 | + | |
148 | + # Compute the linear combination of proto masks | |
149 | + # for now, plural batch operation is not supported, and this is the point where you should start. | |
150 | + # instead of hardcoded proto_masks[0], do some iterative operation. | |
151 | + mask = np.tensordot(coeffs, proto_masks[0], axes=[0, 0]) # Dot product along the number of prototypes | |
152 | + | |
153 | + # Resize mask to the bounding box size, using sigmoid to normalize | |
154 | + resized_mask = cv2.resize(mask, (w, h)) | |
155 | + resized_mask = self.sigmoid(resized_mask) | |
156 | + | |
157 | + # Threshold to create a binary mask | |
158 | + final_mask = (resized_mask > 0.5).astype(np.uint8) | |
159 | + | |
160 | + # Place the mask in the corresponding location on a full-sized mask image_binary | |
161 | + full_mask = np.zeros((image_shape[0], image_shape[1]), dtype=np.uint8) | |
162 | + # print("---------") | |
163 | + # print(f"x1 : {x1}, y1 : {y1}, w: {w}, h: {h}") | |
164 | + # print(f"x2: {x2}, y2 : {y2}") | |
165 | + # print(final_mask.shape) | |
166 | + # print(full_mask[y1:y2, x1:x2].shape) | |
167 | + full_mask[y1:y1+h, x1:x1+w] = final_mask | |
168 | + | |
169 | + # Combine the mask with the masks of other detections | |
170 | + full_masks[idx] = full_mask | |
171 | + | |
172 | + | |
173 | + all_mask = full_masks.sum(axis=0) | |
174 | + all_mask = np.clip(all_mask, 0, 1) | |
175 | + # Append a dimension so that cv2 can understand ```all_mask``` argument as an image. | |
176 | + # This is because for this particular application, there is only single class ```water_body``` | |
177 | + # However, if that is not the case, you must modify this part. | |
178 | + all_mask = all_mask.reshape((image_shape[0], image_shape[1], 1)) | |
179 | + return all_mask.astype(np.uint8) | |
180 | + | |
181 | + def adjust_box_coordinates(self, box: List[int], image_shape: Tuple[int, int]) -> Tuple[int, int, int, int]: | |
182 | + """ | |
183 | + Adjusts bounding box coordinates to ensure they lie within image boundaries. | |
184 | + """ | |
185 | + x1, y1, w, h = box | |
186 | + x2, y2 = x1 + w, y1 + h | |
187 | + | |
188 | + # Clamp coordinates to image boundaries | |
189 | + x1 = max(0, x1) | |
190 | + y1 = max(0, y1) | |
191 | + x2 = min(image_shape[1], x2) | |
192 | + y2 = min(image_shape[0], y2) | |
193 | + | |
194 | + # Recalculate width and height | |
195 | + w = x2 - x1 | |
196 | + h = y2 - y1 | |
197 | + | |
198 | + return x1, y1, w, h | |
199 | + | |
200 | + | |
201 | + def load_classes_from_file(self): | |
202 | + with open(self.classes_path, 'r') as f: | |
203 | + self.classes = f.read().strip().split('\n') | |
204 | + | |
205 | + def format_to_square(self, source): | |
206 | + col, row = source.shape[1], source.shape[0] | |
207 | + max_side = max(col, row) | |
208 | + result = np.zeros((max_side, max_side, 3), dtype=np.uint8) | |
209 | + result[0:row, 0:col] = source | |
210 | + return result | |
211 | + | |
212 | +def overlay_mask(image, mask, color=(0, 255, 0), alpha=0.5): | |
213 | + """ | |
214 | + Overlays a mask onto an image_binary using a specified color and transparency level. | |
215 | + | |
216 | + Parameters: | |
217 | + image (np.ndarray): The original image_binary. | |
218 | + mask (np.ndarray): The mask to overlay. Must be the same size as the image_binary. | |
219 | + color (tuple): The color for the mask overlay in BGR format (default is green). | |
220 | + alpha (float): Transparency factor for the mask; 0 is fully transparent, 1 is opaque. | |
221 | + | |
222 | + Returns: | |
223 | + np.ndarray: The image_binary with the overlay. | |
224 | + """ | |
225 | + assert alpha <= 1 and 0 <= alpha, (f"Error! invalid alpha value, it must be float, inbetween including 0 to 1, " | |
226 | + f"\n given alpha : {alpha}") | |
227 | + | |
228 | + # Ensure the mask is a binary mask | |
229 | + mask = (mask > 0).astype(np.uint8) # Convert mask to binary if not already | |
230 | + | |
231 | + # Create an overlay with the same size as the image_binary but only using the mask area | |
232 | + overlay = np.zeros_like(image, dtype=np.uint8) | |
233 | + overlay[mask == 1] = color | |
234 | + | |
235 | + # Blend the overlay with the image_binary using the alpha factor | |
236 | + return cv2.addWeighted(src1=overlay, alpha=alpha, src2=image, beta=1 - alpha, gamma=0) | |
237 | + | |
238 | + | |
239 | +def test(): | |
240 | + import time | |
241 | + | |
242 | + # Path to your ONNX model and classes text file | |
243 | + model_path = 'yoloseg/weight/best.onnx' | |
244 | + classes_txt_file = 'config_files/yolo_config.txt' | |
245 | + # image_path = 'yoloseg/img3.jpg' | |
246 | + image_path = 'testing.png' | |
247 | + | |
248 | + model_input_shape = (640, 640) | |
249 | + inference_engine = Inference( | |
250 | + onnx_model_path=model_path, | |
251 | + model_input_shape=model_input_shape, | |
252 | + classes_txt_file=classes_txt_file, | |
253 | + run_with_cuda=True | |
254 | + ) | |
255 | + | |
256 | + # Load an image_binary | |
257 | + img = cv2.imread(image_path) | |
258 | + if img is None: | |
259 | + print("Error loading image_binary") | |
260 | + return | |
261 | + img = cv2.resize(img, model_input_shape) | |
262 | + # Run inference | |
263 | + | |
264 | + for i in range(10): | |
265 | + t1 = time.time() | |
266 | + detections, mask_maps = inference_engine.run_inference(img) | |
267 | + t2 = time.time() | |
268 | + print(t2 - t1) | |
269 | + | |
270 | + | |
271 | + | |
272 | + # Display results | |
273 | + for detection in detections: | |
274 | + x, y, w, h = detection['box'] | |
275 | + class_name = detection['class_name'] | |
276 | + confidence = detection['confidence'] | |
277 | + cv2.rectangle(img, (x, y), (x+w, y+h), detection['color'], 2) | |
278 | + label = f"{class_name}: {confidence:.2f}" | |
279 | + cv2.putText(img, label, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, detection['color'], 2) | |
280 | + | |
281 | + # Show the image_binary | |
282 | + # cv2.imshow('Detections', img) | |
283 | + # cv2.waitKey(0) | |
284 | + # cv2.destroyAllWindows() | |
285 | + | |
286 | + # If you also want to display segmentation maps, you would need additional handling here | |
287 | + # Example for displaying first mask if available: | |
288 | + if len(mask_maps) != 0: | |
289 | + | |
290 | + seg_image = overlay_mask(img, mask_maps[:,:,0], color=(0, 255, 0), alpha=0.3) | |
291 | + cv2.imshow("segmentation", seg_image) | |
292 | + cv2.waitKey(0) | |
293 | + cv2.destroyAllWindows() | |
294 | + | |
295 | + | |
296 | +if __name__ == "__main__": | |
297 | + test() | |
298 | + # test()(파일 끝에 줄바꿈 문자 없음) |
+++ yoloseg/weight/best.pt
This file is too big to display. |
Add a comment
Delete comment
Once you delete this comment, you won't be able to recover it. Are you sure you want to delete this comment?