--- DB/db.py
+++ DB/db.py
... | ... | @@ -1,12 +1,0 @@ |
1 |
-import psycopg2 |
|
2 |
-import json |
|
3 |
-import requests |
|
4 |
-import time |
|
5 |
- |
|
6 |
- |
|
7 |
-async def get_cctv_info(): |
|
8 |
- pass |
|
9 |
- |
|
10 |
-def setup_db(): |
|
11 |
- pass |
|
12 |
- |
--- hls_streaming/hls.py
+++ hls_streaming/hls.py
... | ... | @@ -9,7 +9,7 @@ |
9 | 9 |
|
10 | 10 |
|
11 | 11 |
class FrameCapturer: |
12 |
- def __init__(self, hls_url, cctv_id, interval=5, buffer_duration=15, buffer_size=600, time_zone="Asia/Seoul", endpoint="localhost:12345"): |
|
12 |
+ def __init__(self, hls_url, cctv_id, interval=5, buffer_duration=15, buffer_size=600, time_zone="Asia/Seoul", endpoint="http://localhost:12345/cctv/infer"): |
|
13 | 13 |
''' |
14 | 14 |
:param hls_url: hls address |
15 | 15 |
:param cctv_id: cctv_id number(whatever it is, this exists to distinguish from where. Further disscusion is needed with frontend developers.) |
... | ... | @@ -69,6 +69,7 @@ |
69 | 69 |
self.current_frame = cv2.cvtColor(np.array(self.current_frame), cv2.COLOR_RGB2BGR) |
70 | 70 |
frame_name = f"captured_frame_{self.captured_frame_count}.jpg" |
71 | 71 |
img_binary = cv2.imencode('.png', self.current_frame) |
72 |
+ img_binary = img_binary[1].tobytes() |
|
72 | 73 |
self.send_image_to_server(img_binary, self.endpoint) |
73 | 74 |
# cv2.imwrite(f'hls_streaming/captured_frame_/{datetime.now()}_{frame_name}', img) |
74 | 75 |
self.last_capture_time = current_time |
... | ... | @@ -87,7 +88,12 @@ |
87 | 88 |
'x-cctv-longitude' : '', |
88 | 89 |
} |
89 | 90 |
try: |
90 |
- requests.post(endpoint, headers=header, files=image) |
|
91 |
+ file = { |
|
92 |
+ 'image': (f'frame_{self.cctvid}.{image_type}', |
|
93 |
+ image, |
|
94 |
+ f'image/{image_type}') |
|
95 |
+ } |
|
96 |
+ requests.post(endpoint, headers=header, files=file) |
|
91 | 97 |
except: |
92 | 98 |
print("Can not connect to the analyzer server. Check the endpoint address or connection.\n" |
93 | 99 |
f"Can not connect to : {self.endpoint}") |
... | ... | @@ -121,3 +127,4 @@ |
121 | 127 |
t2 = time.time() |
122 | 128 |
with open("result.txt", "w") as file: |
123 | 129 |
file.write(f'{t2-t1} seconds before terminating') |
130 |
+ exit() |
+++ test.py
... | ... | @@ -0,0 +1,54 @@ |
1 | +from flask import Flask, request | |
2 | +from flask_restx import Api, Resource, fields | |
3 | +import os | |
4 | +from datetime import datetime | |
5 | + | |
6 | +app = Flask(__name__) | |
7 | +api = Api(app, version='1.0', title='CCTV Image Upload API', | |
8 | + description='A simple API for receiving CCTV images') | |
9 | + | |
10 | +# Namespace definition | |
11 | +ns = api.namespace('cctv', description='CCTV operations') | |
12 | + | |
13 | +# Define the expected model for incoming data | |
14 | +image_upload_model = api.model('ImageUpload', { | |
15 | + 'image': fields.String(required=True, description='Image file', dt='File'), | |
16 | + 'x-cctv-info': fields.String(required=False, description='CCTV identifier'), | |
17 | + 'x-time-sent': fields.String(required=False, description='Time image was sent'), | |
18 | + 'x-cctv-latitude': fields.String(required=False, description='Latitude of CCTV'), | |
19 | + 'x-cctv-longitude': fields.String(required=False, description='Longitude of CCTV') | |
20 | +}) | |
21 | + | |
22 | +# Define the directory where images will be saved | |
23 | +IMAGE_DIR = "received_images" | |
24 | +if not os.path.exists(IMAGE_DIR): | |
25 | + os.makedirs(IMAGE_DIR) | |
26 | + | |
27 | [email protected]('/infer', ) | |
28 | +class ImageUpload(Resource): | |
29 | + @ns.expect(image_upload_model, validate=True) | |
30 | + @ns.response(200, 'Success') | |
31 | + @ns.response(400, 'Validation Error') | |
32 | + def post(self): | |
33 | + if 'image' not in request.files: | |
34 | + ns.abort(400, 'No image part in the request') | |
35 | + | |
36 | + image = request.files['image'] | |
37 | + cctv_info = request.headers.get('x-cctv-info', '') | |
38 | + time_sent = request.headers.get('x-time-sent', '') | |
39 | + cctv_latitude = request.headers.get('x-cctv-latitude', 'Not provided') | |
40 | + cctv_longitude = request.headers.get('x-cctv-longitude', 'Not provided') | |
41 | + | |
42 | + if image.filename == '': | |
43 | + ns.abort(400, 'No selected image') | |
44 | + | |
45 | + # Use current timestamp to avoid filename conflicts | |
46 | + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") | |
47 | + filename = f"{timestamp}_{cctv_info}.png" | |
48 | + image_path = os.path.join(IMAGE_DIR, filename) | |
49 | + image.save(image_path) | |
50 | + | |
51 | + return {"message": f"Image {filename} uploaded successfully!"} | |
52 | + | |
53 | +if __name__ == '__main__': | |
54 | + app.run(debug=True, port=12345) |
--- yoloseg/inference_.py
+++ yoloseg/inference_.py
... | ... | @@ -105,7 +105,7 @@ |
105 | 105 |
y1 = 0 |
106 | 106 |
x1, y1, x2, y2 = x1, y1, x1 + w, y1 + h |
107 | 107 |
|
108 |
- # To handle edge cases where you get bboxes that pass beyond the original image |
|
108 |
+ # To handle edge cases where you get bboxes that pass beyond the original image_binary |
|
109 | 109 |
if y2 > image_shape[1]: |
110 | 110 |
h = h + image_shape[1] - h - y1 |
111 | 111 |
if x2 > image_shape[0]: |
... | ... | @@ -126,7 +126,7 @@ |
126 | 126 |
# Threshold to create a binary mask |
127 | 127 |
final_mask = (resized_mask > 0.5).astype(np.uint8) |
128 | 128 |
|
129 |
- # Place the mask in the corresponding location on a full-sized mask image |
|
129 |
+ # Place the mask in the corresponding location on a full-sized mask image_binary |
|
130 | 130 |
full_mask = np.zeros((image_shape[0], image_shape[1]), dtype=np.uint8) |
131 | 131 |
print(final_mask.shape) |
132 | 132 |
print(full_mask[y1:y2, x1:x2].shape) |
... | ... | @@ -161,25 +161,25 @@ |
161 | 161 |
|
162 | 162 |
def overlay_mask(image, mask, color=(0, 255, 0), alpha=0.5): |
163 | 163 |
""" |
164 |
- Overlays a mask onto an image using a specified color and transparency level. |
|
164 |
+ Overlays a mask onto an image_binary using a specified color and transparency level. |
|
165 | 165 |
|
166 | 166 |
Parameters: |
167 |
- image (np.ndarray): The original image. |
|
168 |
- mask (np.ndarray): The mask to overlay. Must be the same size as the image. |
|
167 |
+ image (np.ndarray): The original image_binary. |
|
168 |
+ mask (np.ndarray): The mask to overlay. Must be the same size as the image_binary. |
|
169 | 169 |
color (tuple): The color for the mask overlay in BGR format (default is green). |
170 | 170 |
alpha (float): Transparency factor for the mask; 0 is fully transparent, 1 is opaque. |
171 | 171 |
|
172 | 172 |
Returns: |
173 |
- np.ndarray: The image with the overlay. |
|
173 |
+ np.ndarray: The image_binary with the overlay. |
|
174 | 174 |
""" |
175 | 175 |
# Ensure the mask is a binary mask |
176 | 176 |
mask = (mask > 0).astype(np.uint8) # Convert mask to binary if not already |
177 | 177 |
|
178 |
- # Create an overlay with the same size as the image but only using the mask area |
|
178 |
+ # Create an overlay with the same size as the image_binary but only using the mask area |
|
179 | 179 |
overlay = np.zeros_like(image, dtype=np.uint8) |
180 | 180 |
overlay[mask == 1] = color |
181 | 181 |
|
182 |
- # Blend the overlay with the image using the alpha factor |
|
182 |
+ # Blend the overlay with the image_binary using the alpha factor |
|
183 | 183 |
return cv2.addWeighted(src1=overlay, alpha=alpha, src2=image, beta=1 - alpha, gamma=0) |
184 | 184 |
|
185 | 185 |
|
... | ... | @@ -199,10 +199,10 @@ |
199 | 199 |
run_with_cuda=True |
200 | 200 |
) |
201 | 201 |
|
202 |
- # Load an image |
|
202 |
+ # Load an image_binary |
|
203 | 203 |
img = cv2.imread(image_path) |
204 | 204 |
if img is None: |
205 |
- print("Error loading image") |
|
205 |
+ print("Error loading image_binary") |
|
206 | 206 |
return |
207 | 207 |
img = cv2.resize(img, model_input_shape) |
208 | 208 |
# Run inference |
... | ... | @@ -221,7 +221,7 @@ |
221 | 221 |
label = f"{class_name}: {confidence:.2f}" |
222 | 222 |
cv2.putText(img, label, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, detection['color'], 2) |
223 | 223 |
|
224 |
- # Show the image |
|
224 |
+ # Show the image_binary |
|
225 | 225 |
# cv2.imshow('Detections', img) |
226 | 226 |
# cv2.waitKey(0) |
227 | 227 |
# cv2.destroyAllWindows() |
... | ... | @@ -256,7 +256,7 @@ |
256 | 256 |
for iteration, image_path in enumerate(image_dir): |
257 | 257 |
img = cv2.imread(image_path) |
258 | 258 |
if img is None: |
259 |
- print("Error loading image") |
|
259 |
+ print("Error loading image_binary") |
|
260 | 260 |
return |
261 | 261 |
img = cv2.resize(img, model_input_shape) |
262 | 262 |
# Run inference |
... | ... | @@ -267,17 +267,17 @@ |
267 | 267 |
print(t2-t1) |
268 | 268 |
|
269 | 269 |
# Display results |
270 |
- for detection in detections: |
|
271 |
- x, y, w, h = detection['box'] |
|
272 |
- class_name = detection['class_name'] |
|
273 |
- confidence = detection['confidence'] |
|
274 |
- cv2.rectangle(img, (x, y), (x+w, y+h), detection['color'], 2) |
|
275 |
- label = f"{class_name}: {confidence:.2f}" |
|
276 |
- cv2.putText(img, label, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, detection['color'], 2) |
|
277 |
- |
|
278 |
- if len(mask_maps) > 0 : |
|
279 |
- seg_image = overlay_mask(img, mask_maps[0], color=(0, 255, 0), alpha=0.3) |
|
280 |
- cv2.imwrite(f"result/{iteration}.png", seg_image) |
|
270 |
+ # for detection in detections: |
|
271 |
+ # x, y, w, h = detection['box'] |
|
272 |
+ # class_name = detection['class_name'] |
|
273 |
+ # confidence = detection['confidence'] |
|
274 |
+ # cv2.rectangle(img, (x, y), (x+w, y+h), detection['color'], 2) |
|
275 |
+ # label = f"{class_name}: {confidence:.2f}" |
|
276 |
+ # cv2.putText(img, label, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, detection['color'], 2) |
|
277 |
+ # |
|
278 |
+ # if len(mask_maps) > 0 : |
|
279 |
+ # seg_image = overlay_mask(img, mask_maps[0], color=(0, 255, 0), alpha=0.3) |
|
280 |
+ # cv2.imwrite(f"result/{iteration}.png", seg_image) |
|
281 | 281 |
|
282 | 282 |
|
283 | 283 |
if __name__ == "__main__": |
Add a comment
Delete comment
Once you delete this comment, you won't be able to recover it. Are you sure you want to delete this comment?