1. code cleanup of inference_gpu_.py and inference_.py is now inference_cpu_.py 2. streaming_url_updator.py CORS fix 3. working DB INSERT of postprocess_draft.py
2. streaming_url_updator.py CORS fix 3. working DB INSERT of postprocess_draft.py
@cfcadeeb2c3b9e6939aa5b917b50d0891825cdfa
--- inference_endpoint.py
+++ inference_endpoint.py
... | ... | @@ -11,7 +11,7 @@ |
11 | 11 |
from requests_toolbelt import MultipartEncoder |
12 | 12 |
import base64 |
13 | 13 |
|
14 |
-from yoloseg.inference_ import Inference, overlay_mask |
|
14 |
+from yoloseg.inference_gpu_ import Inference, overlay_mask |
|
15 | 15 |
from config_files.endpoints import POSTPROCESS_ENDPOINT |
16 | 16 |
|
17 | 17 |
# from config_files import API_ENDPOINT_MAIN |
--- postprocess_draft.py
+++ postprocess_draft.py
... | ... | @@ -3,19 +3,19 @@ |
3 | 3 |
from flask_restx import Api, Resource, fields |
4 | 4 |
import os |
5 | 5 |
import psycopg2 |
6 |
-from datetime import datetime |
|
7 |
-from yoloseg.inference_ import Inference, overlay_mask |
|
8 |
-import cv2 |
|
9 | 6 |
import time |
10 | 7 |
import base64 |
11 | 8 |
import json |
9 |
+import cv2 |
|
12 | 10 |
import requests |
13 | 11 |
import typing |
14 | 12 |
from requests_toolbelt import MultipartEncoder |
15 | 13 |
|
16 |
-# from config_files import API_ENDPOINT_MAIN |
|
17 | 14 |
|
18 | 15 |
debug = True |
16 |
+ |
|
17 |
+with open('config_files/MAIN_DB_ENDPOINT.json', 'r') as file: |
|
18 |
+ db_config = json.load(file) |
|
19 | 19 |
|
20 | 20 |
app = Flask(__name__) |
21 | 21 |
api = Api(app, version='1.0', title='CCTV Image Upload API', |
... | ... | @@ -75,7 +75,6 @@ |
75 | 75 |
|
76 | 76 |
|
77 | 77 |
def __setitem__(self, key, value): |
78 |
- print(self.sources.keys) |
|
79 | 78 |
if key not in self.sources: |
80 | 79 |
self.sources[key] = { |
81 | 80 |
"status_counts": [], |
... | ... | @@ -100,7 +99,7 @@ |
100 | 99 |
def __call__(self): |
101 | 100 |
return self.sources |
102 | 101 |
|
103 |
- def add_status(self, source, status): |
|
102 |
+ def add_status(self, source, status, image, seg_image): |
|
104 | 103 |
assert status in ["OK", "FAIL"],\ |
105 | 104 |
f"Invalid status was given!, status must be one of 'OK' or 'FAIL', but given '{status}'!" |
106 | 105 |
|
... | ... | @@ -150,6 +149,8 @@ |
150 | 149 |
self.sources[source]["last_send_before"] += 1 |
151 | 150 |
|
152 | 151 |
if flag_send_event: |
152 |
+ self.sources[source]["most_recent_image"] = image |
|
153 |
+ self.sources[source]["most_recent_seg_image"] = seg_image |
|
153 | 154 |
self.send_event(source) |
154 | 155 |
|
155 | 156 |
# alert alarms only once |
... | ... | @@ -189,6 +190,17 @@ |
189 | 190 |
normal_mode_thres=8, |
190 | 191 |
normal_mode_check_past_n=12, |
191 | 192 |
) |
193 |
+ |
|
194 |
+ |
|
195 |
+def get_base64_encoded_image_from_file_binary(image): |
|
196 |
+ image = np.frombuffer(image, np.uint8) |
|
197 |
+ image = cv2.imdecode(image, cv2.IMREAD_COLOR) |
|
198 |
+ _, image = cv2.imencode('.jpg', image) |
|
199 |
+ image = image.tobytes() |
|
200 |
+ image = base64.b64encode(image) |
|
201 |
+ return image |
|
202 |
+ |
|
203 |
+ |
|
192 | 204 |
@ns.route('/postprocess', ) |
193 | 205 |
class PostProcesser(Resource): |
194 | 206 |
def __init__(self, *args, **kargs): |
... | ... | @@ -211,6 +223,7 @@ |
211 | 223 |
@ns.response(400, 'Validation Error') |
212 | 224 |
def post(self): |
213 | 225 |
try: |
226 |
+ # Gathering values |
|
214 | 227 |
self.image_type = request.headers.get('Content-Type') |
215 | 228 |
self.cctv_name = base64.b64decode(request.headers.get('x-cctv-name', '')).decode('UTF-8') |
216 | 229 |
self.time_sent = request.headers.get('x-time-sent', '') |
... | ... | @@ -230,6 +243,8 @@ |
230 | 243 |
self.area_percent = float(self.area_percent) |
231 | 244 |
except (TypeError, ValueError) as e: |
232 | 245 |
raise ValueError(f"Invalid value for x-area-percentage: {self.area_percent}") |
246 |
+ |
|
247 |
+ # gathering files |
|
233 | 248 |
self.image = request.files.get('image') |
234 | 249 |
self.mask = request.files.get('mask') |
235 | 250 |
self.seg_image = request.files.get('seg_mask') |
... | ... | @@ -240,15 +255,18 @@ |
240 | 255 |
self.mask.save(f"network_test/mask_p{time.time()}.png") |
241 | 256 |
self.seg_image.save(f"network_test/seg_p{time.time()}.png") |
242 | 257 |
|
258 |
+ image_b64 = get_base64_encoded_image_from_file_binary(self.image) |
|
259 |
+ seg_image_b64 = get_base64_encoded_image_from_file_binary(self.seg_image.read()) |
|
260 |
+ |
|
243 | 261 |
self.time_sent = time.time() |
244 | 262 |
|
245 | 263 |
self.cctv_info = { |
246 | 264 |
'cctv_name': self.cctv_name, |
247 | 265 |
'cctv_latitude': self.cctv_latitude, |
248 | 266 |
'cctv_longitude': self.cctv_longitude, |
249 |
- 'source_frame': self.image, |
|
250 |
- 'frame_mask': self.mask, |
|
251 |
- 'seg_frame': self.seg_image, |
|
267 |
+ 'source_frame': image_b64, |
|
268 |
+ # 'frame_mask': self.mask, |
|
269 |
+ 'seg_frame': seg_image_b64, |
|
252 | 270 |
'time_sent': self.time_sent |
253 | 271 |
} |
254 | 272 |
# if self.cctv_name in memory: |
... | ... | @@ -257,7 +275,9 @@ |
257 | 275 |
except : |
258 | 276 |
pass |
259 | 277 |
pass_fail = self.pass_fail() |
260 |
- memory.add_status(self.cctv_name, pass_fail) |
|
278 |
+ |
|
279 |
+ memory.add_status(self.cctv_name, pass_fail, image_b64, seg_image_b64) |
|
280 |
+ |
|
261 | 281 |
if debug: |
262 | 282 |
print(memory()) |
263 | 283 |
|
--- postprocessing.py
+++ postprocessing.py
... | ... | @@ -3,7 +3,7 @@ |
3 | 3 |
from flask_restx import Api, Resource, fields |
4 | 4 |
import os |
5 | 5 |
from datetime import datetime |
6 |
-from yoloseg.inference_ import Inference, overlay_mask |
|
6 |
+from yoloseg.inference_cpu_ import Inference, overlay_mask |
|
7 | 7 |
import cv2 |
8 | 8 |
import time |
9 | 9 |
import base64 |
--- streaming_url_updator.py
+++ streaming_url_updator.py
... | ... | @@ -3,14 +3,15 @@ |
3 | 3 |
|
4 | 4 |
from flask import Flask |
5 | 5 |
from flask_restx import Api |
6 |
+from flask_cors import CORS |
|
6 | 7 |
from apscheduler.schedulers.background import BackgroundScheduler |
7 | 8 |
from apscheduler.triggers.interval import IntervalTrigger |
8 | 9 |
|
9 |
-API_ENDPOINT = "" |
|
10 |
+API_ENDPOINT = "http://165.229.169.148:8080/EquipmentUrlChanger.json" |
|
10 | 11 |
|
11 | 12 |
app = Flask(__name__) |
12 | 13 |
print("ITS API Updater START") |
13 |
- |
|
14 |
+CORS(app) |
|
14 | 15 |
api = Api(app, |
15 | 16 |
version='0.1', |
16 | 17 |
title="monitoring", |
... | ... | @@ -25,8 +26,9 @@ |
25 | 26 |
df = df.drop("roadsectionid", axis=1) |
26 | 27 |
df = df.drop("cctvresolution", axis=1) |
27 | 28 |
df = df.drop("filecreatetime", axis=1) |
28 |
- payload = df.T.to_json() |
|
29 |
- requests.post(API_ENDPOINT, json=payload) |
|
29 |
+ payload = df.T.to_json(force_ascii=False) |
|
30 |
+ respond = requests.post(API_ENDPOINT, json=payload) |
|
31 |
+ print(respond) |
|
30 | 32 |
|
31 | 33 |
url_list_sender() |
32 | 34 |
scheduler = BackgroundScheduler() |
--- yoloseg/inference_.py
+++ yoloseg/inference_cpu_.py
No changes |
--- yoloseg/inference_gpu_.py
+++ yoloseg/inference_gpu_.py
... | ... | @@ -36,24 +36,11 @@ |
36 | 36 |
# Prepare input data as a dictionary |
37 | 37 |
inputs = {self.session.get_inputs()[0].name: blob} |
38 | 38 |
# Run model |
39 |
- t1 = time.time() |
|
40 | 39 |
outputs = self.session.run(None, inputs) |
41 |
- t2 = time.time() |
|
42 |
- print("model infer :") |
|
43 |
- print(t2-t1) |
|
44 | 40 |
outputs_bbox = outputs[0] |
45 | 41 |
outputs_mask = outputs[1] |
46 |
- t1 = time.time() |
|
47 | 42 |
detections = self.process_detections(outputs_bbox, model_input) |
48 |
- t2 = time.time() |
|
49 |
- print("bbox :") |
|
50 |
- print(t2-t1) |
|
51 |
- t1 = time.time() |
|
52 | 43 |
mask_maps = self.process_mask_output(detections, outputs_mask, model_input.shape) |
53 |
- t2 = time.time() |
|
54 |
- print("mask :") |
|
55 |
- print(t2-t1) |
|
56 |
- |
|
57 | 44 |
return detections, mask_maps |
58 | 45 |
|
59 | 46 |
def load_onnx_network(self): |
... | ... | @@ -89,7 +76,7 @@ |
89 | 76 |
|
90 | 77 |
t1 = time.time() |
91 | 78 |
# Assuming outputs_bbox is an array with shape (N, 4+CLASS_NUM+32) where N is the number of detections |
92 |
- # Example outputs_bbox.shape -> (8400, 4+CLASS_NUM+32) |
|
79 |
+ # Example outputs_bbox.shape -> (batch_size, 4+CLASS_NUM+32, 8400) |
|
93 | 80 |
|
94 | 81 |
# Extract basic bbox coordinates and scores |
95 | 82 |
x, y, w, h = outputs_bbox[:, 0], outputs_bbox[:, 1], outputs_bbox[:, 2], outputs_bbox[:, 3] |
... | ... | @@ -100,7 +87,7 @@ |
100 | 87 |
class_ids = np.argmax(scores, axis=1) |
101 | 88 |
|
102 | 89 |
# Filter out small boxes |
103 |
- min_width, min_height = 40, 40 |
|
90 |
+ min_width, min_height = 20, 20 |
|
104 | 91 |
valid_size = (w >= min_width) & (h >= min_height) |
105 | 92 |
|
106 | 93 |
# Apply confidence threshold |
... | ... | @@ -130,27 +117,16 @@ |
130 | 117 |
|
131 | 118 |
# Prepare final arrays |
132 | 119 |
boxes = np.vstack([left, top, width, height]).T |
133 |
- mask_coefficients = scores_segmentation |
|
134 | 120 |
|
135 |
- # If you need to use integer types for some reason (e.g., indexing later on): |
|
121 |
+ # Change it into int for mask operation |
|
136 | 122 |
boxes = boxes.astype(int) |
137 |
- |
|
138 |
- # You can further process these arrays or convert them to lists if needed: |
|
139 | 123 |
boxes = boxes.tolist() |
140 | 124 |
filtered_confidences = filtered_confidences.tolist() |
141 | 125 |
filtered_class_ids = filtered_class_ids.tolist() |
142 |
- t2 = time.time() |
|
143 |
- print("cursed for loop") |
|
144 |
- print(t2-t1) |
|
145 |
- confidences = (confidences) |
|
146 |
- t1 = time.time() |
|
147 | 126 |
if not len(boxes) <= 0 : |
148 | 127 |
indices = cv2.dnn.NMSBoxes(boxes, filtered_confidences, self.model_score_threshold, self.model_nms_threshold) |
149 | 128 |
else: |
150 | 129 |
indices = [] |
151 |
- t2 = time.time() |
|
152 |
- print("nms : ") |
|
153 |
- print(t2-t1) |
|
154 | 130 |
|
155 | 131 |
detections = [] |
156 | 132 |
for i in indices: |
... | ... | @@ -187,7 +163,7 @@ |
187 | 163 |
|
188 | 164 |
# Compute the linear combination of proto masks |
189 | 165 |
# for now, plural batch operation is not supported, and this is the point where you should start. |
190 |
- # instead of hardcoded proto_masks[0], do some iterative operation. |
|
166 |
+ # instead of hardcoded proto_masks[0], do some iterative/vectorize operation |
|
191 | 167 |
mask = np.tensordot(coeffs, proto_masks[0], axes=[0, 0]) # Dot product along the number of prototypes |
192 | 168 |
|
193 | 169 |
# Resize mask to the bounding box size, using sigmoid to normalize |
... | ... | @@ -199,11 +175,6 @@ |
199 | 175 |
|
200 | 176 |
# Place the mask in the corresponding location on a full-sized mask image_binary |
201 | 177 |
full_mask = np.zeros((image_shape[0], image_shape[1]), dtype=np.uint8) |
202 |
- # print("---------") |
|
203 |
- # print(f"x1 : {x1}, y1 : {y1}, w: {w}, h: {h}") |
|
204 |
- # print(f"x2: {x2}, y2 : {y2}") |
|
205 |
- # print(final_mask.shape) |
|
206 |
- # print(full_mask[y1:y2, x1:x2].shape) |
|
207 | 178 |
full_mask[y1:y1+h, x1:x1+w] = final_mask |
208 | 179 |
|
209 | 180 |
# Combine the mask with the masks of other detections |
... | ... | @@ -334,5 +305,4 @@ |
334 | 305 |
|
335 | 306 |
|
336 | 307 |
if __name__ == "__main__": |
337 |
- test() |
|
338 |
- # test()(파일 끝에 줄바꿈 문자 없음) |
|
308 |
+ test()(파일 끝에 줄바꿈 문자 없음) |
Add a comment
Delete comment
Once you delete this comment, you won't be able to recover it. Are you sure you want to delete this comment?