fixed a critical bug
: OUTPUT mask의 dimension의 감지된 bbox 수 만큼 많아지는 문제
@e379e608a9884498ed3ec514af26c72d735fe4ee
--- inference_endpoint.py
+++ inference_endpoint.py
... | ... | @@ -9,7 +9,7 @@ |
9 | 9 |
import base64 |
10 | 10 |
import requests |
11 | 11 |
from requests_toolbelt import MultipartEncoder |
12 |
-from config_files import API_ENDPOINT_MAIN |
|
12 |
+# from config_files import API_ENDPOINT_MAIN |
|
13 | 13 |
|
14 | 14 |
app = Flask(__name__) |
15 | 15 |
api = Api(app, version='1.0', title='CCTV Image Upload API', |
... | ... | @@ -47,8 +47,8 @@ |
47 | 47 |
@ns.route('/infer', ) |
48 | 48 |
class ImageUpload(Resource): |
49 | 49 |
# @ns.expect(image_upload_model, validate=True) |
50 |
- def __init__(self): |
|
51 |
- super().__init__(api) |
|
50 |
+ def __init__(self, *args, **kargs): |
|
51 |
+ super().__init__(*args, **kargs) |
|
52 | 52 |
self.time_sent = None |
53 | 53 |
self.cctv_latitude = None |
54 | 54 |
self.cctv_longitude = None |
... | ... | @@ -78,12 +78,15 @@ |
78 | 78 |
# filename = f"{timestamp}_{self.cctv_info}.png" |
79 | 79 |
|
80 | 80 |
t1 = time.time() |
81 |
- detections, self.mask = inference_engine.run_inference(image) |
|
82 |
- self.mask_blob = cv2.imencode(self.mask) |
|
83 |
- self.mask_blob = self.mask.tobytes() |
|
81 |
+ detections, self.mask = inference_engine.run_inference(cv2.resize(image, model_input_shape)) |
|
84 | 82 |
t2 = time.time() |
83 |
+ if len(self.mask) > 0: |
|
84 |
+ print(self.mask.shape) |
|
85 |
+ self.mask_blob = cv2.imencode('.png', self.mask) |
|
86 |
+ self.mask_blob = self.mask.tobytes() |
|
85 | 87 |
|
86 |
- print(t2 - t1) |
|
88 |
+ |
|
89 |
+ # print(t2 - t1) |
|
87 | 90 |
|
88 | 91 |
if len(self.mask) != 0: |
89 | 92 |
seg_image = overlay_mask(image, self.mask[0], color=(0, 255, 0), alpha=0.3) |
... | ... | @@ -91,7 +94,9 @@ |
91 | 94 |
else : |
92 | 95 |
self.area_percent = np.sum(self.mask) / image.shape[0] * image.shape[1] |
93 | 96 |
|
94 |
- self.send_result() |
|
97 |
+ |
|
98 |
+ |
|
99 |
+ # self.send_result() |
|
95 | 100 |
# write another post request for pushing a detection result |
96 | 101 |
return {"message": f"Image {self.mask} uploaded successfully!"} |
97 | 102 |
|
+++ postprocess_draft_pingpong.py
... | ... | @@ -0,0 +1,78 @@ |
1 | +from flask import Flask, request, jsonify | |
2 | + | |
3 | +app = Flask(__name__) | |
4 | + | |
5 | +# Dictionary to store the status counts for each source | |
6 | +status_counts = { | |
7 | + "source1": [], | |
8 | + "source2": [], | |
9 | + "source3": [], | |
10 | + "source4": [], | |
11 | + "source5": [] | |
12 | +} | |
13 | + | |
14 | +# Dictionary to keep track of OK counts for each source | |
15 | +ok_counts = { | |
16 | + "source1": 0, | |
17 | + "source2": 0, | |
18 | + "source3": 0, | |
19 | + "source4": 0, | |
20 | + "source5": 0 | |
21 | +} | |
22 | + | |
23 | +# Dictionary to track if a source is in force send mode | |
24 | +force_send_mode = { | |
25 | + "source1": False, | |
26 | + "source2": False, | |
27 | + "source3": False, | |
28 | + "source4": False, | |
29 | + "source5": False | |
30 | +} | |
31 | + | |
32 | + | |
33 | +# Function to check if there are 5 consecutive failures | |
34 | +def check_consecutive_failures(source, status_list): | |
35 | + if len(status_list) >= 5 and status_list[-5:] == ['FAIL'] * 5: | |
36 | + print(f"Source {source} has 5 consecutive FAILs!") | |
37 | + force_send_mode[source] = True | |
38 | + send_message(source, force_send=True) | |
39 | + | |
40 | + | |
41 | +# Function to send a message | |
42 | +def send_message(source, force_send=False): | |
43 | + if force_send or ok_counts[source] >= 10: | |
44 | + print(f"Sending message for {source} - Status: {'FORCE SEND' if force_send else 'OK SEND'}") | |
45 | + ok_counts[source] = 0 # Reset counter after sending | |
46 | + | |
47 | + | |
48 | +# Route to receive data from sources | |
49 | [email protected]('/status', methods=['POST']) | |
50 | +def receive_status(): | |
51 | + data = request.json | |
52 | + source = data.get('source') | |
53 | + status = data.get('status') | |
54 | + | |
55 | + if source in status_counts: | |
56 | + status_counts[source].append(status) | |
57 | + # Ensure we keep only the latest 5 statuses to check for consecutive FAILs | |
58 | + if len(status_counts[source]) > 5: | |
59 | + status_counts[source].pop(0) | |
60 | + | |
61 | + if status == 'OK': | |
62 | + ok_counts[source] += 1 | |
63 | + if force_send_mode[source] and ok_counts[source] >= 10: | |
64 | + # Revert to normal mode after 10 consecutive OKs | |
65 | + force_send_mode[source] = False | |
66 | + ok_counts[source] = 0 | |
67 | + elif not force_send_mode[source]: | |
68 | + send_message(source) | |
69 | + else: | |
70 | + ok_counts[source] = 0 # Reset counter on FAIL | |
71 | + | |
72 | + check_consecutive_failures(source, status_counts[source]) | |
73 | + | |
74 | + return jsonify({"message": "Status received"}), 200 | |
75 | + | |
76 | + | |
77 | +if __name__ == '__main__': | |
78 | + app.run(debug=True, port=5000) |
+++ postprocess_draft_test.py
... | ... | @@ -0,0 +1,65 @@ |
1 | +import requests | |
2 | +import time | |
3 | + | |
4 | +# Base URL for the server | |
5 | +BASE_URL = 'http://localhost:5000/status' | |
6 | + | |
7 | +# Function to send a POST request with status data | |
8 | +def send_status(source, status): | |
9 | + response = requests.post(BASE_URL, json={"source": source, "status": status}) | |
10 | + print(f"Sent {status} for {source}, Response: {response.json()}") | |
11 | + | |
12 | +# Function to test normal and force send modes for multiple sources | |
13 | +def test_server(): | |
14 | + sources = ['source1', 'source2', 'source3', 'source4', 'source5'] | |
15 | + | |
16 | + # Scenario 1: Normal behavior with source1 | |
17 | + print("\nScenario 1: Normal behavior with source1") | |
18 | + for _ in range(4): | |
19 | + send_status('source1', 'OK') | |
20 | + for _ in range(5): | |
21 | + send_status('source1', 'FAIL') | |
22 | + for _ in range(10): | |
23 | + send_status('source1', 'OK') | |
24 | + for _ in range(10): | |
25 | + send_status('source1', 'OK') | |
26 | + | |
27 | + # Scenario 2: Force send and revert with source2 | |
28 | + print("\nScenario 2: Force send and revert with source2") | |
29 | + for _ in range(5): | |
30 | + send_status('source2', 'FAIL') | |
31 | + for _ in range(10): | |
32 | + send_status('source2', 'OK') | |
33 | + for _ in range(10): | |
34 | + send_status('source2', 'OK') | |
35 | + | |
36 | + # Scenario 3: Random mix with source3 | |
37 | + print("\nScenario 3: Random mix with source3") | |
38 | + mix = ['OK', 'FAIL', 'OK', 'FAIL', 'OK', 'FAIL', 'OK', 'OK', 'OK', 'OK'] | |
39 | + for status in mix: | |
40 | + send_status('source3', status) | |
41 | + for _ in range(5): | |
42 | + send_status('source3', 'FAIL') | |
43 | + for _ in range(10): | |
44 | + send_status('source3', 'OK') | |
45 | + | |
46 | + # Scenario 4: Source4 fails and recovers | |
47 | + print("\nScenario 4: Source4 fails and recovers") | |
48 | + for _ in range(5): | |
49 | + send_status('source4', 'FAIL') | |
50 | + for _ in range(10): | |
51 | + send_status('source4', 'OK') | |
52 | + for _ in range(10): | |
53 | + send_status('source4', 'OK') | |
54 | + | |
55 | + # Scenario 5: Source5 normal operation | |
56 | + print("\nScenario 5: Source5 normal operation") | |
57 | + for _ in range(10): | |
58 | + send_status('source5', 'OK') | |
59 | + for _ in range(10): | |
60 | + send_status('source5', 'OK') | |
61 | + | |
62 | +if __name__ == '__main__': | |
63 | + # Give some time for the server to start | |
64 | + time.sleep(2) | |
65 | + test_server() |
--- postprocessing.py
+++ postprocessing.py
... | ... | @@ -40,5 +40,5 @@ |
40 | 40 |
self.image_type = request.headers.get('Content-Type') |
41 | 41 |
self.area_percent = request.headers.get('x-area-percentage') |
42 | 42 |
|
43 |
- |
|
44 |
- |
|
43 |
+ async def data_ping_pong(self): |
|
44 |
+ pass |
--- run_image_anal_backend.sh
+++ run_image_anal_backend.sh
... | ... | @@ -11,7 +11,7 @@ |
11 | 11 |
python streaming_process.py --cctv_num 1 & |
12 | 12 |
pids+=($!) |
13 | 13 |
|
14 |
- |
|
14 |
+#python test.py |
|
15 | 15 |
python inference_endpoint.py |
16 | 16 |
#gunicorn --workers=6 inference_endpoint:app |
17 | 17 |
pids+=($!) |
--- yoloseg/inference_.py
+++ yoloseg/inference_.py
... | ... | @@ -139,14 +139,17 @@ |
139 | 139 |
|
140 | 140 |
# Place the mask in the corresponding location on a full-sized mask image_binary |
141 | 141 |
full_mask = np.zeros((image_shape[0], image_shape[1]), dtype=np.uint8) |
142 |
- print(final_mask.shape) |
|
143 |
- print(full_mask[y1:y2, x1:x2].shape) |
|
142 |
+ # print("---------") |
|
143 |
+ # print(f"x1 : {x1}, y1 : {y1}, w: {w}, h: {h}") |
|
144 |
+ # print(f"x2: {x2}, y2 : {y2}") |
|
145 |
+ # print(final_mask.shape) |
|
146 |
+ # print(full_mask[y1:y2, x1:x2].shape) |
|
144 | 147 |
full_mask[y1:y2, x1:x2] = final_mask |
145 | 148 |
|
146 | 149 |
# Combine the mask with the masks of other detections |
147 | 150 |
full_masks[idx] = full_mask |
148 |
- |
|
149 |
- return full_masks |
|
151 |
+ all_mask = full_masks.sum(axis=0) |
|
152 |
+ return all_mask |
|
150 | 153 |
|
151 | 154 |
def load_classes_from_file(self): |
152 | 155 |
with open(self.classes_path, 'r') as f: |
Add a comment
Delete comment
Once you delete this comment, you won't be able to recover it. Are you sure you want to delete this comment?