+++ DB/db.py
... | ... | @@ -0,0 +1,12 @@ |
1 | +import psycopg2 | |
2 | +import json | |
3 | +import requests | |
4 | +import time | |
5 | + | |
6 | + | |
7 | +async def get_cctv_info(): | |
8 | + pass | |
9 | + | |
10 | +def setup_db(): | |
11 | + pass | |
12 | + |
--- ITS/api.py
+++ ITS/api.py
... | ... | @@ -66,6 +66,7 @@ |
66 | 66 |
# df.to_csv(f"result/pohang/listofcctv_포항_{x}_{y}.csv", index=False) |
67 | 67 |
time.sleep(1) |
68 | 68 |
print(f"{i}, {j}") |
69 |
+ all_data_df.to_csv(f"result/{xmin}_{xmax}_y{ymin}_{ymax}.csv") |
|
69 | 70 |
return all_data_df |
70 | 71 |
|
71 | 72 |
def get_jpeg(url): |
... | ... | @@ -77,6 +78,6 @@ |
77 | 78 |
|
78 | 79 |
|
79 | 80 |
if __name__ == "__main__": |
80 |
- df = gather_cctv_list(129.28, 129.35, 35.999, 36.07, 1, 1) |
|
81 |
+ df = gather_cctv_list(129.2, 129.3, 35.9, 36.07, 1, 1) |
|
81 | 82 |
pass |
82 | 83 |
# get_jpeg("http://cctvsec.ktict.co.kr:8090/74236/IM2NQs4/uHZcgnvJo3V/mjo3tswwgUj87kpcYZfR/BPxaQ4lk9agnl8ARIB9lhlgOD87VBx6RDHFl423kLkqHQ==")(파일 끝에 줄바꿈 문자 없음) |
--- hls_streaming/hls.py
+++ hls_streaming/hls.py
... | ... | @@ -23,6 +23,7 @@ |
23 | 23 |
self.buffer_duration = buffer_duration |
24 | 24 |
self.buffer_size = buffer_size |
25 | 25 |
self.frame_buffer = [] |
26 |
+ self.current_frame = [] |
|
26 | 27 |
self.frame_buffer_lock = Lock() # for no memory sharing between receive_stream_packet and process_frames |
27 | 28 |
self.captured_frame_count = 0 |
28 | 29 |
self.last_capture_time = 0 |
... | ... | @@ -37,6 +38,9 @@ |
37 | 38 |
self.cctvid = cctv_id |
38 | 39 |
self.time_zone = ZoneInfo(time_zone) |
39 | 40 |
self.endpoint = endpoint |
41 |
+ |
|
42 |
+ def __call__(self, *args, **kwargs): |
|
43 |
+ return self.current_frame |
|
40 | 44 |
|
41 | 45 |
|
42 | 46 |
# ```receive_stream_packet``` and ```process_frames``` work asynchronously (called with Thread) |
... | ... | @@ -61,10 +65,10 @@ |
61 | 65 |
self.frame_buffer = self.frame_buffer[-self.buffer_size:] |
62 | 66 |
buffered_frame = self.frame_buffer[-1] |
63 | 67 |
# print(len(self.frame_buffer)) |
64 |
- img = buffered_frame.to_image() |
|
65 |
- img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR) |
|
68 |
+ self.current_frame = buffered_frame.to_image() |
|
69 |
+ self.current_frame = cv2.cvtColor(np.array(self.current_frame), cv2.COLOR_RGB2BGR) |
|
66 | 70 |
frame_name = f"captured_frame_{self.captured_frame_count}.jpg" |
67 |
- img_binary = cv2.imencode('.png', img) |
|
71 |
+ img_binary = cv2.imencode('.png', self.current_frame) |
|
68 | 72 |
self.send_image_to_server(img_binary, self.endpoint) |
69 | 73 |
# cv2.imwrite(f'hls_streaming/captured_frame_/{datetime.now()}_{frame_name}', img) |
70 | 74 |
self.last_capture_time = current_time |
... | ... | @@ -79,6 +83,8 @@ |
79 | 83 |
'Content-Type': f'image/{image_type}', |
80 | 84 |
'x-time-sent': time_sent, |
81 | 85 |
'x-cctv-info': str(self.cctvid), |
86 |
+ 'x-cctv-latitude' : '', |
|
87 |
+ 'x-cctv-longitude' : '', |
|
82 | 88 |
} |
83 | 89 |
try: |
84 | 90 |
requests.post(endpoint, headers=header, files=image) |
... | ... | @@ -102,8 +108,7 @@ |
102 | 108 |
# Example usage |
103 | 109 |
if __name__ == "__main__": |
104 | 110 |
capturer = FrameCapturer( |
105 |
- 'http://cctvsec.ktict.co.kr/73496/' |
|
106 |
- '7xhDlyfDPK1AtaOUkAUDUJgZvfqvRXYYZUmRLxgPgKXk+eEtIJIfGkiC/gcQmysaz7zhDW2Jd8qhPCxgpo7cn5VqArnowyKjUePjdAmuQQ8=', |
|
111 |
+ 'http://cctvsec.ktict.co.kr/71187/bWDrL7fpStZDeDZgCybpJH8gagWJOynbaA/l91ExpmUPKzc3bCsHJtIblDkzG3Tff2tHy5NNkb6NtYTbie/jNQ0F+PnejViTbKHkpMWNGpc=', |
|
107 | 112 |
101, 10 |
108 | 113 |
) |
109 | 114 |
t1 = time.time() |
--- yoloseg/inference_.py
+++ yoloseg/inference_.py
... | ... | @@ -48,8 +48,10 @@ |
48 | 48 |
boxes = [] |
49 | 49 |
|
50 | 50 |
for detection in outputs_bbox[0].T: |
51 |
- # when your weight is trained from pretrained weight, the resulting wieght |
|
52 |
- # may have leftover classes (that does nothing), hence this. |
|
51 |
+ # This segmentation model uses yolact architecture to predict mask |
|
52 |
+ # the output tensor dimension for yolo-v8-seg is B x [X, Y, W, H, C1, C2, ..., P1, ...,P32] * 8400 |
|
53 |
+ # where C{n} are confidence score for each class |
|
54 |
+ # and P{n} are coefficient for each proto masks. (32 by default) |
|
53 | 55 |
scores_classification = detection[4:4+CLASS_NUM] |
54 | 56 |
scores_segmentation = detection[4+CLASS_NUM:] |
55 | 57 |
class_id = np.argmax(scores_classification, axis=0) |
... | ... | @@ -95,6 +97,12 @@ |
95 | 97 |
for idx, det in enumerate(detections): |
96 | 98 |
box = det['box'] |
97 | 99 |
x1, y1, w, h = box |
100 |
+ |
|
101 |
+ #... why the model outputs ... negative values?... |
|
102 |
+ if x1 <= 0 : |
|
103 |
+ x1 = 0 |
|
104 |
+ if y1 <= 0 : |
|
105 |
+ y1 = 0 |
|
98 | 106 |
x1, y1, x2, y2 = x1, y1, x1 + w, y1 + h |
99 | 107 |
|
100 | 108 |
# To handle edge cases where you get bboxes that pass beyond the original image |
... | ... | @@ -175,10 +183,8 @@ |
175 | 183 |
return cv2.addWeighted(src1=overlay, alpha=alpha, src2=image, beta=1 - alpha, gamma=0) |
176 | 184 |
|
177 | 185 |
|
178 |
- |
|
179 |
-def main(): |
|
186 |
+def test(): |
|
180 | 187 |
import time |
181 |
- |
|
182 | 188 |
|
183 | 189 |
# Path to your ONNX model and classes text file |
184 | 190 |
model_path = 'yoloseg/weight/best.onnx' |
... | ... | @@ -229,5 +235,50 @@ |
229 | 235 |
cv2.waitKey(0) |
230 | 236 |
cv2.destroyAllWindows() |
231 | 237 |
|
238 |
+def test2(): |
|
239 |
+ import time |
|
240 |
+ import glob |
|
241 |
+ |
|
242 |
+ # Path to your ONNX model and classes text file |
|
243 |
+ model_path = 'yoloseg/weight/best.onnx' |
|
244 |
+ classes_txt_file = 'yoloseg/config/classes.txt' |
|
245 |
+ |
|
246 |
+ model_input_shape = (640, 640) |
|
247 |
+ inference_engine = Inference( |
|
248 |
+ onnx_model_path=model_path, |
|
249 |
+ model_input_shape=model_input_shape, |
|
250 |
+ classes_txt_file=classes_txt_file, |
|
251 |
+ run_with_cuda=True |
|
252 |
+ ) |
|
253 |
+ |
|
254 |
+ image_dir = glob.glob("/home/juni/사진/sample_data/ex1/*.png") |
|
255 |
+ |
|
256 |
+ for iteration, image_path in enumerate(image_dir): |
|
257 |
+ img = cv2.imread(image_path) |
|
258 |
+ if img is None: |
|
259 |
+ print("Error loading image") |
|
260 |
+ return |
|
261 |
+ img = cv2.resize(img, model_input_shape) |
|
262 |
+ # Run inference |
|
263 |
+ t1 = time.time() |
|
264 |
+ detections, mask_maps = inference_engine.run_inference(img) |
|
265 |
+ t2 = time.time() |
|
266 |
+ |
|
267 |
+ print(t2-t1) |
|
268 |
+ |
|
269 |
+ # Display results |
|
270 |
+ for detection in detections: |
|
271 |
+ x, y, w, h = detection['box'] |
|
272 |
+ class_name = detection['class_name'] |
|
273 |
+ confidence = detection['confidence'] |
|
274 |
+ cv2.rectangle(img, (x, y), (x+w, y+h), detection['color'], 2) |
|
275 |
+ label = f"{class_name}: {confidence:.2f}" |
|
276 |
+ cv2.putText(img, label, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, detection['color'], 2) |
|
277 |
+ |
|
278 |
+ if len(mask_maps) > 0 : |
|
279 |
+ seg_image = overlay_mask(img, mask_maps[0], color=(0, 255, 0), alpha=0.3) |
|
280 |
+ cv2.imwrite(f"result/{iteration}.png", seg_image) |
|
281 |
+ |
|
282 |
+ |
|
232 | 283 |
if __name__ == "__main__": |
233 |
- main()(파일 끝에 줄바꿈 문자 없음) |
|
284 |
+ test2()(파일 끝에 줄바꿈 문자 없음) |
Add a comment
Delete comment
Once you delete this comment, you won't be able to recover it. Are you sure you want to delete this comment?