윤영준 윤영준 05-29
1. code cleanup of inference_gpu_.py and inference_.py is now inference_cpu_.py 2. streaming_url_updator.py CORS fix 3. working DB INSERT of postprocess_draft.py
2. streaming_url_updator.py CORS fix
3. working DB INSERT of postprocess_draft.py
@cfcadeeb2c3b9e6939aa5b917b50d0891825cdfa
inference_endpoint.py
--- inference_endpoint.py
+++ inference_endpoint.py
@@ -11,7 +11,7 @@
 from requests_toolbelt import MultipartEncoder
 import base64
 
-from yoloseg.inference_ import Inference, overlay_mask
+from yoloseg.inference_gpu_ import Inference, overlay_mask
 from config_files.endpoints import POSTPROCESS_ENDPOINT
 
 # from config_files import API_ENDPOINT_MAIN
postprocess_draft.py
--- postprocess_draft.py
+++ postprocess_draft.py
@@ -3,19 +3,19 @@
 from flask_restx import Api, Resource, fields
 import os
 import psycopg2
-from datetime import datetime
-from yoloseg.inference_ import Inference, overlay_mask
-import cv2
 import time
 import base64
 import json
+import cv2
 import requests
 import typing
 from requests_toolbelt import MultipartEncoder
 
-# from config_files import API_ENDPOINT_MAIN
 
 debug = True
+
+with open('config_files/MAIN_DB_ENDPOINT.json', 'r') as file:
+    db_config = json.load(file)
 
 app = Flask(__name__)
 api = Api(app, version='1.0', title='CCTV Image Upload API',
@@ -75,7 +75,6 @@
 
 
     def __setitem__(self, key, value):
-        print(self.sources.keys)
         if key not in self.sources:
             self.sources[key] = {
                 "status_counts": [],
@@ -100,7 +99,7 @@
     def __call__(self):
         return self.sources
 
-    def add_status(self, source, status):
+    def add_status(self, source, status, image, seg_image):
         assert status in ["OK", "FAIL"],\
             f"Invalid status was given!, status must be one of 'OK' or 'FAIL', but given '{status}'!"
         
@@ -150,6 +149,8 @@
                 self.sources[source]["last_send_before"] += 1
 
         if flag_send_event:
+            self.sources[source]["most_recent_image"] = image
+            self.sources[source]["most_recent_seg_image"] = seg_image
             self.send_event(source)
 
         # alert alarms only once
@@ -189,6 +190,17 @@
             normal_mode_thres=8,
             normal_mode_check_past_n=12,
         )
+
+
+def get_base64_encoded_image_from_file_binary(image):
+    image = np.frombuffer(image, np.uint8)
+    image = cv2.imdecode(image, cv2.IMREAD_COLOR)
+    _, image = cv2.imencode('.jpg', image)
+    image = image.tobytes()
+    image = base64.b64encode(image)
+    return image
+
+
 @ns.route('/postprocess', )
 class PostProcesser(Resource):
     def __init__(self, *args, **kargs):
@@ -211,6 +223,7 @@
     @ns.response(400, 'Validation Error')
     def post(self):
         try:
+            # Gathering values
             self.image_type = request.headers.get('Content-Type')
             self.cctv_name = base64.b64decode(request.headers.get('x-cctv-name', '')).decode('UTF-8')
             self.time_sent = request.headers.get('x-time-sent', '')
@@ -230,6 +243,8 @@
                 self.area_percent = float(self.area_percent)
             except (TypeError, ValueError) as e:
                 raise ValueError(f"Invalid value for x-area-percentage: {self.area_percent}")
+
+            # gathering files
             self.image = request.files.get('image')
             self.mask = request.files.get('mask')
             self.seg_image = request.files.get('seg_mask')
@@ -240,15 +255,18 @@
                     self.mask.save(f"network_test/mask_p{time.time()}.png")
                     self.seg_image.save(f"network_test/seg_p{time.time()}.png")
 
+            image_b64 = get_base64_encoded_image_from_file_binary(self.image)
+            seg_image_b64 = get_base64_encoded_image_from_file_binary(self.seg_image.read())
+
             self.time_sent = time.time()
 
             self.cctv_info = {
                 'cctv_name': self.cctv_name,
                 'cctv_latitude': self.cctv_latitude,
                 'cctv_longitude': self.cctv_longitude,
-                'source_frame': self.image,
-                'frame_mask': self.mask,
-                'seg_frame': self.seg_image,
+                'source_frame': image_b64,
+                # 'frame_mask': self.mask,
+                'seg_frame': seg_image_b64,
                 'time_sent': self.time_sent
             }
             # if self.cctv_name in memory:
@@ -257,7 +275,9 @@
             except :
                 pass
             pass_fail = self.pass_fail()
-            memory.add_status(self.cctv_name, pass_fail)
+
+            memory.add_status(self.cctv_name, pass_fail, image_b64, seg_image_b64)
+
             if debug:
                 print(memory())
 
postprocessing.py
--- postprocessing.py
+++ postprocessing.py
@@ -3,7 +3,7 @@
 from flask_restx import Api, Resource, fields
 import os
 from datetime import datetime
-from yoloseg.inference_ import Inference, overlay_mask
+from yoloseg.inference_cpu_ import Inference, overlay_mask
 import cv2
 import time
 import base64
streaming_url_updator.py
--- streaming_url_updator.py
+++ streaming_url_updator.py
@@ -3,14 +3,15 @@
 
 from flask import Flask
 from flask_restx import Api
+from flask_cors import CORS
 from apscheduler.schedulers.background import BackgroundScheduler
 from apscheduler.triggers.interval import IntervalTrigger
 
-API_ENDPOINT = ""
+API_ENDPOINT = "http://165.229.169.148:8080/EquipmentUrlChanger.json"
 
 app = Flask(__name__)
 print("ITS API Updater START")
-
+CORS(app)
 api = Api(app,
           version='0.1',
           title="monitoring",
@@ -25,8 +26,9 @@
     df = df.drop("roadsectionid", axis=1)
     df = df.drop("cctvresolution", axis=1)
     df = df.drop("filecreatetime", axis=1)
-    payload = df.T.to_json()
-    requests.post(API_ENDPOINT, json=payload)
+    payload = df.T.to_json(force_ascii=False)
+    respond = requests.post(API_ENDPOINT, json=payload)
+    print(respond)
 
 url_list_sender()
 scheduler = BackgroundScheduler()
yoloseg/inference_cpu_.py (Renamed from yoloseg/inference_.py)
--- yoloseg/inference_.py
+++ yoloseg/inference_cpu_.py
No changes
yoloseg/inference_gpu_.py
--- yoloseg/inference_gpu_.py
+++ yoloseg/inference_gpu_.py
@@ -36,24 +36,11 @@
         # Prepare input data as a dictionary
         inputs = {self.session.get_inputs()[0].name: blob}
         # Run model
-        t1 = time.time()
         outputs = self.session.run(None, inputs)
-        t2 = time.time()
-        print("model infer :")
-        print(t2-t1)
         outputs_bbox = outputs[0]
         outputs_mask = outputs[1]
-        t1 = time.time()
         detections = self.process_detections(outputs_bbox, model_input)
-        t2 = time.time()
-        print("bbox :")
-        print(t2-t1)
-        t1 = time.time()
         mask_maps = self.process_mask_output(detections, outputs_mask, model_input.shape)
-        t2 = time.time()
-        print("mask :")
-        print(t2-t1)
-
         return detections, mask_maps
 
     def load_onnx_network(self):
@@ -89,7 +76,7 @@
 
         t1 = time.time()
         # Assuming outputs_bbox is an array with shape (N, 4+CLASS_NUM+32) where N is the number of detections
-        # Example outputs_bbox.shape -> (8400, 4+CLASS_NUM+32)
+        # Example outputs_bbox.shape -> (batch_size, 4+CLASS_NUM+32, 8400)
 
         # Extract basic bbox coordinates and scores
         x, y, w, h = outputs_bbox[:, 0], outputs_bbox[:, 1], outputs_bbox[:, 2], outputs_bbox[:, 3]
@@ -100,7 +87,7 @@
         class_ids = np.argmax(scores, axis=1)
 
         # Filter out small boxes
-        min_width, min_height = 40, 40
+        min_width, min_height = 20, 20
         valid_size = (w >= min_width) & (h >= min_height)
 
         # Apply confidence threshold
@@ -130,27 +117,16 @@
 
         # Prepare final arrays
         boxes = np.vstack([left, top, width, height]).T
-        mask_coefficients = scores_segmentation
 
-        # If you need to use integer types for some reason (e.g., indexing later on):
+        # Change it into int for mask operation
         boxes = boxes.astype(int)
-
-        # You can further process these arrays or convert them to lists if needed:
         boxes = boxes.tolist()
         filtered_confidences = filtered_confidences.tolist()
         filtered_class_ids = filtered_class_ids.tolist()
-        t2 = time.time()
-        print("cursed for loop")
-        print(t2-t1)
-        confidences = (confidences)
-        t1 = time.time()
         if not len(boxes) <= 0 :
             indices = cv2.dnn.NMSBoxes(boxes, filtered_confidences, self.model_score_threshold, self.model_nms_threshold)
         else:
             indices = []
-        t2 = time.time()
-        print("nms : ")
-        print(t2-t1)
 
         detections = []
         for i in indices:
@@ -187,7 +163,7 @@
 
             # Compute the linear combination of proto masks
             # for now, plural batch operation is not supported, and this is the point where you should start.
-            # instead of hardcoded proto_masks[0], do some iterative operation.
+            # instead of hardcoded proto_masks[0], do some iterative/vectorize operation
             mask = np.tensordot(coeffs, proto_masks[0], axes=[0, 0])  # Dot product along the number of prototypes
 
             # Resize mask to the bounding box size, using sigmoid to normalize
@@ -199,11 +175,6 @@
 
             # Place the mask in the corresponding location on a full-sized mask image_binary
             full_mask = np.zeros((image_shape[0], image_shape[1]), dtype=np.uint8)
-            # print("---------")
-            # print(f"x1 : {x1}, y1 : {y1}, w: {w}, h: {h}")
-            # print(f"x2: {x2}, y2 : {y2}")
-            # print(final_mask.shape)
-            # print(full_mask[y1:y2, x1:x2].shape)
             full_mask[y1:y1+h, x1:x1+w] = final_mask
 
             # Combine the mask with the masks of other detections
@@ -334,5 +305,4 @@
 
 
 if __name__ == "__main__":
-    test()
-    # test()
(파일 끝에 줄바꿈 문자 없음)
+    test()
(파일 끝에 줄바꿈 문자 없음)
Add a comment
List