--- inference_endpoint.py
+++ inference_endpoint.py
... | ... | @@ -79,12 +79,14 @@ |
79 | 79 |
|
80 | 80 |
t1 = time.time() |
81 | 81 |
detections, self.mask = inference_engine.run_inference(cv2.resize(image, model_input_shape)) |
82 |
+ |
|
82 | 83 |
t2 = time.time() |
83 | 84 |
if len(self.mask) > 0: |
84 | 85 |
print(self.mask.shape) |
86 |
+ print(type(self.mask)) |
|
85 | 87 |
self.mask_blob = cv2.imencode('.png', self.mask) |
86 | 88 |
self.mask_blob = self.mask.tobytes() |
87 |
- |
|
89 |
+ self.mask = cv2.resize(self.mask, (image.shape[0], image.shape[1])) |
|
88 | 90 |
|
89 | 91 |
# print(t2 - t1) |
90 | 92 |
|
... | ... | @@ -93,8 +95,6 @@ |
93 | 95 |
self.area_percent = 0 |
94 | 96 |
else : |
95 | 97 |
self.area_percent = np.sum(self.mask) / image.shape[0] * image.shape[1] |
96 |
- |
|
97 |
- |
|
98 | 98 |
|
99 | 99 |
# self.send_result() |
100 | 100 |
# write another post request for pushing a detection result |
--- yoloseg/inference_.py
+++ yoloseg/inference_.py
... | ... | @@ -60,11 +60,14 @@ |
60 | 60 |
thres = self.model_score_threshold |
61 | 61 |
w_thres = 20 |
62 | 62 |
h_thres = 20 |
63 |
+ |
|
64 |
+ x, y, w, h = detection[:4] |
|
65 |
+ # if bboxes are too small, it just skips, and it is not a bad idea since we do not need to detect small areas |
|
66 |
+ if w < w_thres or h < h_thres: |
|
67 |
+ continue |
|
68 |
+ |
|
63 | 69 |
if confidence > thres: |
64 |
- x, y, w, h = detection[:4] |
|
65 |
- # if bboxes are too small, it just skips, and it is not a bad idea since we do not need to detect small areas |
|
66 |
- if w < w_thres or h < h_thres: |
|
67 |
- continue |
|
70 |
+ |
|
68 | 71 |
left = int((x - 0.5 * w) * x_factor) |
69 | 72 |
top = int((y - 0.5 * h) * y_factor) |
70 | 73 |
width = int(w * x_factor) |
... | ... | @@ -102,7 +105,7 @@ |
102 | 105 |
for idx, det in enumerate(detections): |
103 | 106 |
box = det['box'] |
104 | 107 |
x1, y1, w, h = box |
105 |
- print(f"x1 : {x1}, y1 : {y1}, w: {w}, h: {h}") |
|
108 |
+ # print(f"x1 : {x1}, y1 : {y1}, w: {w}, h: {h}") |
|
106 | 109 |
|
107 | 110 |
x1, y1, x2, y2 = x1, y1, x1 + w, y1 + h |
108 | 111 |
|
... | ... | @@ -120,7 +123,7 @@ |
120 | 123 |
if x2 > image_shape[0]: |
121 | 124 |
w = image_shape[1] - y1 |
122 | 125 |
|
123 |
- print(f"x2: {x2}, y2 : {y2}") |
|
126 |
+ # print(f"x2: {x2}, y2 : {y2}") |
|
124 | 127 |
|
125 | 128 |
# Get the corresponding mask coefficients for this detection |
126 | 129 |
coeffs = det["mask_coefficients"] |
... | ... | @@ -149,7 +152,9 @@ |
149 | 152 |
# Combine the mask with the masks of other detections |
150 | 153 |
full_masks[idx] = full_mask |
151 | 154 |
all_mask = full_masks.sum(axis=0) |
152 |
- return all_mask |
|
155 |
+ # Append a dimension so that cv2 can understand this as an image. |
|
156 |
+ all_mask = all_mask.reshape((image_shape[0], image_shape[1], 1)) |
|
157 |
+ return all_mask.astype(np.uint8) |
|
153 | 158 |
|
154 | 159 |
def load_classes_from_file(self): |
155 | 160 |
with open(self.classes_path, 'r') as f: |
Add a comment
Delete comment
Once you delete this comment, you won't be able to recover it. Are you sure you want to delete this comment?