inference result sending fix
exception handling when there is no mask
@dd791b5cd9c9458c00fa6fb59e296e152f32c1fe
--- inference_endpoint.py
+++ inference_endpoint.py
... | ... | @@ -6,6 +6,7 @@ |
6 | 6 |
from yoloseg.inference_ import Inference, overlay_mask |
7 | 7 |
import cv2 |
8 | 8 |
import time |
9 |
+from zoneinfo import ZoneInfo |
|
9 | 10 |
import base64 |
10 | 11 |
import requests |
11 | 12 |
from requests_toolbelt import MultipartEncoder |
... | ... | @@ -59,6 +60,7 @@ |
59 | 60 |
self.image_type = None |
60 | 61 |
self.seg_image = None |
61 | 62 |
self.area_percent = 0 |
63 |
+ self.time_zone = self.time_zone = ZoneInfo("Asia/Seoul") |
|
62 | 64 |
|
63 | 65 |
@ns.response(200, 'Success') |
64 | 66 |
@ns.response(400, 'Validation Error') |
... | ... | @@ -83,9 +85,10 @@ |
83 | 85 |
|
84 | 86 |
t2 = time.time() |
85 | 87 |
if len(self.mask) > 0: |
88 |
+ self.mask = cv2.resize(self.mask, (image.shape[0], image.shape[1])) |
|
86 | 89 |
self.mask_blob = cv2.imencode('.png', self.mask) |
87 | 90 |
self.mask_blob = self.mask.tobytes() |
88 |
- self.mask = cv2.resize(self.mask, (image.shape[0], image.shape[1])) |
|
91 |
+ |
|
89 | 92 |
|
90 | 93 |
print(t2 - t1) |
91 | 94 |
|
... | ... | @@ -95,7 +98,7 @@ |
95 | 98 |
else : |
96 | 99 |
self.area_percent = np.sum(self.mask) / image.shape[0] * image.shape[1] |
97 | 100 |
|
98 |
- # self.send_result() |
|
101 |
+ self.send_result() |
|
99 | 102 |
# write another post request for pushing a detection result |
100 | 103 |
return {"message": f"Image {self.mask} uploaded successfully!"} |
101 | 104 |
|
... | ... | @@ -110,27 +113,44 @@ |
110 | 113 |
'x-area-percentage' : str(self.area_percent), |
111 | 114 |
} |
112 | 115 |
session = requests.Session() |
113 |
- |
|
116 |
+ seg_binary = cv2.imencode('.png', self.seg_image) |
|
117 |
+ seg_binary = seg_binary[1].tobytes() |
|
118 |
+ print(type(self.seg_image)) |
|
119 |
+ print(type(self.mask)) |
|
120 |
+ print(type(self.image)) |
|
114 | 121 |
try: |
115 |
- multipart_data = MultipartEncoder( |
|
116 |
- fields={ |
|
117 |
- 'image': ( |
|
118 |
- f'frame_{self.cctv_name}.{self.image_type}', |
|
119 |
- self.image, |
|
120 |
- f'image/{self.image_type}' |
|
121 |
- ), |
|
122 |
- 'mask' : ( |
|
123 |
- f'frame_mask_{self.cctv_name}.{self.image_type}', |
|
124 |
- self.mask_blob, |
|
125 |
- f'image/{self.image_type}' |
|
126 |
- ), |
|
127 |
- 'seg_mask' : ( |
|
128 |
- f'frame_seg_{self.cctv_name}.{self.image_type}', |
|
129 |
- self.seg_image, |
|
130 |
- f'image/{self.image_type}' |
|
131 |
- ) |
|
132 |
- } |
|
133 |
- ) |
|
122 |
+ if len(self.mask) != 0: |
|
123 |
+ multipart_data = MultipartEncoder( |
|
124 |
+ fields={ |
|
125 |
+ 'image': ( |
|
126 |
+ f'frame_{self.cctv_name}.{self.image_type}', |
|
127 |
+ self.image, |
|
128 |
+ f'image/{self.image_type}' |
|
129 |
+ ), |
|
130 |
+ 'mask' : ( |
|
131 |
+ f'frame_mask_{self.cctv_name}.{self.image_type}', |
|
132 |
+ self.mask_blob, |
|
133 |
+ f'image/{self.image_type}' |
|
134 |
+ ), |
|
135 |
+ 'seg_mask' : ( |
|
136 |
+ f'frame_seg_{self.cctv_name}.{self.image_type}', |
|
137 |
+ seg_binary, |
|
138 |
+ f'image/{self.image_type}' |
|
139 |
+ ) |
|
140 |
+ } |
|
141 |
+ ) |
|
142 |
+ header["Content-Type"] = multipart_data.content_type |
|
143 |
+ response = session.post(self.endpoint, headers=header, data=multipart_data) |
|
144 |
+ else: |
|
145 |
+ multipart_data = MultipartEncoder( |
|
146 |
+ fields={ |
|
147 |
+ 'image': ( |
|
148 |
+ f'frame_{self.cctv_name}.{self.image_type}', |
|
149 |
+ self.image, |
|
150 |
+ f'image/{self.image_type}' |
|
151 |
+ ), |
|
152 |
+ } |
|
153 |
+ ) |
|
134 | 154 |
header["Content-Type"] = multipart_data.content_type |
135 | 155 |
response = session.post(self.endpoint, headers=header, data=multipart_data) |
136 | 156 |
|
--- yoloseg/inference_.py
+++ yoloseg/inference_.py
... | ... | @@ -59,8 +59,8 @@ |
59 | 59 |
confidence = scores_classification[class_id] |
60 | 60 |
|
61 | 61 |
thres = self.model_score_threshold |
62 |
- w_thres = 20 |
|
63 |
- h_thres = 20 |
|
62 |
+ w_thres = 40 |
|
63 |
+ h_thres = 40 |
|
64 | 64 |
|
65 | 65 |
x, y, w, h = detection[:4] |
66 | 66 |
# if bboxes are too small, it just skips, and it is not a bad idea since we do not need to detect small areas |
Add a comment
Delete comment
Once you delete this comment, you won't be able to recover it. Are you sure you want to delete this comment?