--- hls_streaming/hls.py
+++ hls_streaming/hls.py
... | ... | @@ -9,7 +9,8 @@ |
9 | 9 |
from datetime import datetime |
10 | 10 |
from threading import Lock, Thread, Event |
11 | 11 |
|
12 |
-DEBUG = False |
|
12 |
+DEBUG = True |
|
13 |
+SUPPRESS_MESSAGE = True |
|
13 | 14 |
|
14 | 15 |
|
15 | 16 |
class FrameCapturer: |
... | ... | @@ -125,9 +126,10 @@ |
125 | 126 |
response = session.post(self.endpoint, headers=header, data=multipart_data) |
126 | 127 |
|
127 | 128 |
except Exception as e: |
128 |
- print(e) |
|
129 |
- print("Can not connect to the analyzer server. Check the endpoint address or connection.\n" |
|
130 |
- f"Can not connect to : {self.endpoint}") |
|
129 |
+ if not SUPPRESS_MESSAGE: |
|
130 |
+ print(e) |
|
131 |
+ print("Can not connect to the analyzer server. Check the endpoint address or connection.\n" |
|
132 |
+ f"Can not connect to : {self.endpoint}") |
|
131 | 133 |
|
132 | 134 |
def start(self): |
133 | 135 |
print(self.hls_url) |
... | ... | @@ -146,7 +148,7 @@ |
146 | 148 |
# Example usage |
147 | 149 |
if __name__ == "__main__": |
148 | 150 |
capturer = FrameCapturer( |
149 |
- 'http://cctvsec.ktict.co.kr/5545/LFkDslDT81tcSYh3G4306+mcGlLb3yShF9rx2vcPfltwUL4+I950kcBlD15uWm6K0cKCtAMlxsIptMkCDo5lGQiLlARP+SyUloz8vIMNB18=', |
|
151 |
+ 'http://cctvsec.ktict.co.kr/72957/LkVa3bIbXIBudmzokRTyNtAbqEoZCk+DHRZ8i6cs+rL6QIEM7Fv1N+wfJPjSFQOE2mi3PFCExH9gQhypA++a5DTmeAp2DUZ6xqXs22FgPvI=', |
|
150 | 152 |
"[국도] 테해란로", 10, 5 |
151 | 153 |
) |
152 | 154 |
t1 = time.time() |
--- inference_endpoint.py
+++ inference_endpoint.py
... | ... | @@ -8,7 +8,7 @@ |
8 | 8 |
from zoneinfo import ZoneInfo |
9 | 9 |
|
10 | 10 |
from flask import Flask, request |
11 |
-from flask_restx import Api, Resource, fields |
|
11 |
+from flask_restx import Api, Resource, fields, reqparse |
|
12 | 12 |
import requests |
13 | 13 |
from requests_toolbelt import MultipartEncoder |
14 | 14 |
import base64 |
... | ... | @@ -38,13 +38,13 @@ |
38 | 38 |
) |
39 | 39 |
|
40 | 40 |
# Define the expected model for incoming data |
41 |
-image_upload_model = api.model('ImageUpload', { |
|
42 |
- 'image': fields.String(required=True, description='Image file', dt='File'), |
|
43 |
- 'x-cctv-info': fields.String(required=False, description='CCTV identifier'), |
|
44 |
- 'x-time-sent': fields.String(required=False, description='Time image was sent'), |
|
45 |
- 'x-cctv-latitude': fields.String(required=False, description='Latitude of CCTV'), |
|
46 |
- 'x-cctv-longitude': fields.String(required=False, description='Longitude of CCTV') |
|
47 |
-}) |
|
41 |
+upload_parser = reqparse.RequestParser() |
|
42 |
+upload_parser.add_argument('file', location='files', type='FileStorage', required=True, help='Image file to upload') |
|
43 |
+upload_parser.add_argument('x-cctv-name', location='headers', required=False, help='CCTV identifier') |
|
44 |
+upload_parser.add_argument('x-time-sent', location='headers', required=False, help='Time image was sent') |
|
45 |
+upload_parser.add_argument('x-cctv-latitude', location='headers', required=False, help='Latitude of CCTV') |
|
46 |
+upload_parser.add_argument('x-cctv-longitude', location='headers', required=False, help='Longitude of CCTV') |
|
47 |
+ |
|
48 | 48 |
|
49 | 49 |
# Define the directory where images will be saved |
50 | 50 |
IMAGE_DIR = "network_test" |
... | ... | @@ -53,7 +53,7 @@ |
53 | 53 |
|
54 | 54 |
@ns.route('/infer', ) |
55 | 55 |
class ImageUpload(Resource): |
56 |
- # @ns.expect(image_upload_model, validate=True) |
|
56 |
+ @ns.expect(upload_parser) |
|
57 | 57 |
def __init__(self, *args, **kargs): |
58 | 58 |
super().__init__(*args, **kargs) |
59 | 59 |
self.time_sent = None |
... | ... | @@ -185,4 +185,4 @@ |
185 | 185 |
|
186 | 186 |
|
187 | 187 |
if __name__ == '__main__': |
188 |
- app.run(debug=False, port=12345) |
|
188 |
+ app.run(debug=True, port=12345) |
+++ testing.html
... | ... | @@ -0,0 +1,178 @@ |
1 | +<!DOCTYPE html> | |
2 | +<html lang="en"> | |
3 | +<head> | |
4 | + <meta charset="UTF-8"> | |
5 | + <meta name="viewport" content="width=device-width, initial-scale=1.0"> | |
6 | + <title>이미지 처리</title> | |
7 | + <style> | |
8 | + body { | |
9 | + font-family: Arial, sans-serif; | |
10 | + display: flex; | |
11 | + flex-direction: column; | |
12 | + height: 100vh; | |
13 | + margin: 0; | |
14 | + } | |
15 | + .header { | |
16 | + background-color: #1E4E90; | |
17 | + color: white; | |
18 | + text-align: center; | |
19 | + padding: 10px 0; | |
20 | + position: fixed; | |
21 | + width: 100%; | |
22 | + top: 0; | |
23 | + z-index: 1; | |
24 | + } | |
25 | + .container { | |
26 | + text-align: center; | |
27 | + margin-top: 100px; /* .header의 높이 만큼 여백 추가 */ | |
28 | + } | |
29 | + #preview img { | |
30 | + max-width: 100%; | |
31 | + height: auto; | |
32 | + margin-top: 20px; | |
33 | + } | |
34 | + #processButton { | |
35 | + background-color: #1E4E90; | |
36 | + color: white; | |
37 | + border: none; | |
38 | + padding: 10px 20px; | |
39 | + text-align: center; | |
40 | + text-decoration: none; | |
41 | + display: inline-block; | |
42 | + font-size: 16px; | |
43 | + margin-top: 20px; | |
44 | + cursor: pointer; | |
45 | + } | |
46 | + #processButton:hover { | |
47 | + background-color: #1E4E90; | |
48 | + } | |
49 | + #copyright { | |
50 | + background-color: #f1f1f1; | |
51 | + text-align: center; | |
52 | + padding: 10px; | |
53 | + position: fixed; | |
54 | + width: 100%; | |
55 | + bottom: 0; | |
56 | + } | |
57 | + #messageContainer { | |
58 | + display: flex; | |
59 | + justify-content: center; | |
60 | + height: 100vh; | |
61 | + } | |
62 | + #messageContainer div{ | |
63 | + margin-top:30px; | |
64 | + } | |
65 | + </style> | |
66 | +</head> | |
67 | +<body> | |
68 | + <div class="header"> | |
69 | + <h1>침수감지모델 테스트도구</h1> | |
70 | + </div> | |
71 | + <div class="container"> | |
72 | + <div id="preview"></div> | |
73 | + <input type="file" id="imageInput"> | |
74 | + <button id="processButton">이미지 처리</button> | |
75 | + <button id="clearButton">Clear</button> <!-- Clear 버튼 추가 --> | |
76 | + | |
77 | + </div> | |
78 | + <div id="messageContainer"></div> | |
79 | + | |
80 | + <div id="copyright"> | |
81 | + <p>COPYRIGHT © 2024 SOLFAC. All Rights Reserved.</p> | |
82 | + </div> | |
83 | + | |
84 | + <script> | |
85 | + const fileInput = document.getElementById('imageInput'); | |
86 | + const processButton = document.getElementById('processButton'); | |
87 | + const preview = document.getElementById('preview'); | |
88 | + const messageContainer = document.getElementById('messageContainer'); // 메시지를 담을 컨테이너 추가 | |
89 | + | |
90 | + clearButton.addEventListener('click', function() { | |
91 | + // 메시지 컨테이너의 내용을 비움 | |
92 | + preview.innerHTML = ''; | |
93 | + messageContainer.innerHTML = ''; | |
94 | + }); | |
95 | + fileInput.addEventListener('change', function(event) { | |
96 | + const file = event.target.files[0]; | |
97 | + const reader = new FileReader(); | |
98 | + | |
99 | + reader.onloadend = function() { | |
100 | + // 이미지 미리보기를 생성하여 화면에 표시 | |
101 | + const img = new Image(); | |
102 | + img.src = reader.result; | |
103 | + img.style.width = '1080px'; // 미리보기 이미지의 너비 조절 | |
104 | + preview.innerHTML = ''; // 이전 미리보기 삭제 | |
105 | + preview.appendChild(img); // 새로운 미리보기 추가 | |
106 | + | |
107 | + // 파일 정보를 기반으로 FormData를 생성하여 서버에 전송할 준비 | |
108 | + const formData = new FormData(); | |
109 | + formData.append('file', file); // 이미지 파일을 FormData에 추가 | |
110 | + | |
111 | + const LAT = "36.123"; // Replace with the actual latitude value | |
112 | + const LON = "123.123"; // Replace with the actual longitude value | |
113 | + const FILENAME = file.name; // Use the selected file's name as the filename | |
114 | + const FILE_TYPE = file.type; // Use the selected file's type as the file type | |
115 | + | |
116 | + // 여기서 formData와 FILENAME, FILE_TYPE을 이용하여 서버에 요청을 보내면 됩니다. | |
117 | + // ... | |
118 | + }; | |
119 | + | |
120 | + if (file) { | |
121 | + reader.readAsDataURL(file); | |
122 | + } | |
123 | + }); | |
124 | + | |
125 | + processButton.addEventListener('click', async function() { | |
126 | + const file = fileInput.files[0]; | |
127 | + | |
128 | + if (file) { | |
129 | + | |
130 | + const LAT = "36.123"; // Replace with the actual latitude value | |
131 | + const LON = "123.123"; // Replace with the actual longitude value | |
132 | + const FILENAME = file.name; // Use the selected file's name as the filename | |
133 | + const FILE_TYPE = file.type; // Use the selected file's type as the file type | |
134 | + const formData = new FormData(); | |
135 | + formData.append("data", JSON.stringify({ gps_x: LAT, gps_y: LON, filename: FILENAME, file_type: FILE_TYPE })); | |
136 | + const previewBlob = await fetch(preview.querySelector('img').src).then(res => res.blob()); | |
137 | + formData.append('file', previewBlob, FILENAME); // Blob 객체와 원래 파일 이름을 전송 | |
138 | + | |
139 | + | |
140 | + | |
141 | + try { | |
142 | + const URL = "http://127.0.0.1:12345/cctv/infer"; // Replace with the actual server URL | |
143 | + const response = await fetch(URL, { | |
144 | + method: "POST", | |
145 | + body: formData | |
146 | + }); | |
147 | + | |
148 | + if (response.ok) { | |
149 | + const data = await response.json(); | |
150 | + console.log(data); // 서버에서 받은 응답 처리 | |
151 | + | |
152 | + // data 객체에서 rain이 true인 경우 메시지 표시 | |
153 | + if (data.rain === true) { | |
154 | + const message = document.createElement('div'); | |
155 | + message.textContent = '비 사진 감지되었습니다.'; | |
156 | + messageContainer.innerHTML = ''; // 기존 메시지 삭제 | |
157 | + messageContainer.appendChild(message); // 새로운 메시지 추가 | |
158 | + } else { | |
159 | + const message = document.createElement('div'); | |
160 | + message.textContent = '일반 사진 감지되었습니다.'; | |
161 | + messageContainer.innerHTML = ''; // 기존 메시지 삭제 | |
162 | + messageContainer.appendChild(message); // 새로운 메시지 추가 | |
163 | + } | |
164 | + } else { | |
165 | + console.error("Error occurred while sending the request:", response.statusText); | |
166 | + } | |
167 | + } catch (error) { | |
168 | + console.error("Error occurred:", error); | |
169 | + } | |
170 | + } else { | |
171 | + console.error("No file selected."); | |
172 | + } | |
173 | + }); | |
174 | + | |
175 | + | |
176 | + </script> | |
177 | +</body> | |
178 | +</html>(파일 끝에 줄바꿈 문자 없음) |
--- yoloseg/inference_gpu_.py
+++ yoloseg/inference_gpu_.py
... | ... | @@ -312,7 +312,103 @@ |
312 | 312 |
# cv2.destroyAllWindows() |
313 | 313 |
|
314 | 314 |
|
315 |
+def process_video(video_path, output_dir, model_input_shape=(480,480)): |
|
316 |
+ import os |
|
317 |
+ import av |
|
318 |
+ import csv |
|
319 |
+ |
|
320 |
+ |
|
321 |
+ model_path = 'yoloseg/weight/best.onnx' |
|
322 |
+ classes_txt_file = 'config_files/yolo_config.txt' |
|
323 |
+ |
|
324 |
+ model_input_shape = (480, 480) |
|
325 |
+ inference_engine = Inference( |
|
326 |
+ onnx_model_path=model_path, |
|
327 |
+ model_input_shape=model_input_shape, |
|
328 |
+ classes_txt_file=classes_txt_file, |
|
329 |
+ run_with_cuda=True |
|
330 |
+ ) |
|
331 |
+ |
|
332 |
+ # Open video using PyAV |
|
333 |
+ container = av.open(video_path) |
|
334 |
+ |
|
335 |
+ frame_times = [] # List to store inference time per frame |
|
336 |
+ frame_count = 0 |
|
337 |
+ |
|
338 |
+ # Create output directory if it doesn't exist |
|
339 |
+ if not os.path.exists(output_dir): |
|
340 |
+ os.makedirs(output_dir) |
|
341 |
+ |
|
342 |
+ # Get frame rate from video to control display speed |
|
343 |
+ video_fps = container.streams.video[0].average_rate |
|
344 |
+ if video_fps is None: |
|
345 |
+ video_fps = 25 # Default to 25 FPS if unavailable |
|
346 |
+ |
|
347 |
+ # Decode video frame by frame |
|
348 |
+ for frame in container.decode(video=0): |
|
349 |
+ frame_count += 1 |
|
350 |
+ |
|
351 |
+ |
|
352 |
+ # Convert PyAV frame to numpy array (OpenCV format) |
|
353 |
+ img = frame.to_ndarray(format='bgr24') |
|
354 |
+ |
|
355 |
+ t1 = time.time() |
|
356 |
+ |
|
357 |
+ # Resize frame to match model input shape |
|
358 |
+ resized_frame = cv2.resize(img, model_input_shape) |
|
359 |
+ |
|
360 |
+ # Run inference |
|
361 |
+ detections, mask_maps = inference_engine.run_inference(resized_frame) |
|
362 |
+ |
|
363 |
+ |
|
364 |
+ # Display detections |
|
365 |
+ for detection in detections: |
|
366 |
+ x, y, w, h = detection['box'] |
|
367 |
+ class_name = detection['class_name'] |
|
368 |
+ confidence = detection['confidence'] |
|
369 |
+ cv2.rectangle(resized_frame, (x, y), (x+w, y+h), detection['color'], 2) |
|
370 |
+ label = f"{class_name}: {confidence:.2f}" |
|
371 |
+ cv2.putText(resized_frame, label, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, detection['color'], 2) |
|
372 |
+ |
|
373 |
+ |
|
374 |
+ if len(mask_maps) != 0: |
|
375 |
+ for i in range(mask_maps.shape[2]): # Iterate over each mask |
|
376 |
+ resized_frame = overlay_mask(resized_frame, mask_maps[:, :, i], color=(0, 255, 0), alpha=0.3) |
|
377 |
+ |
|
378 |
+ # Show the processed frame |
|
379 |
+ # cv2.imshow('Processed Frame', resized_frame) |
|
380 |
+ # # Save the processed frame |
|
381 |
+ # output_frame_path = os.path.join(output_dir, f"frame_{frame_count}.jpg") |
|
382 |
+ # cv2.imwrite(output_frame_path, resized_frame) |
|
383 |
+ |
|
384 |
+ t2 = time.time() |
|
385 |
+ frame_time = t2 - t1 |
|
386 |
+ frame_times.append(frame_time) |
|
387 |
+ print(f"Frame {frame_count} inference time: {frame_time:.4f} seconds") |
|
388 |
+ |
|
389 |
+ # Adjust frame display rate based on FPS |
|
390 |
+ if cv2.waitKey(int(1000 / video_fps)) & 0xFF == ord('q'): |
|
391 |
+ break |
|
392 |
+ |
|
393 |
+ # Close OpenCV windows |
|
394 |
+ cv2.destroyAllWindows() |
|
395 |
+ |
|
396 |
+ # Calculate and save inference times to CSV |
|
397 |
+ avg_inference_time = sum(frame_times) / len(frame_times) if frame_times else 0 |
|
398 |
+ output_csv_path = os.path.join(output_dir, "inference.csv") |
|
399 |
+ |
|
400 |
+ with open(output_csv_path, mode='w', newline='') as csv_file: |
|
401 |
+ writer = csv.writer(csv_file) |
|
402 |
+ writer.writerow(["Frame", "Inference Time (seconds)"]) |
|
403 |
+ for i, time_val in enumerate(frame_times): |
|
404 |
+ writer.writerow([i + 1, time_val]) |
|
405 |
+ writer.writerow(["Average", avg_inference_time]) |
|
406 |
+ |
|
407 |
+ print(f"Average inference time: {avg_inference_time:.4f} seconds") |
|
408 |
+ print(f"Inference times saved to {output_csv_path}") |
|
315 | 409 |
|
316 | 410 |
|
317 | 411 |
if __name__ == "__main__": |
318 |
- test()(파일 끝에 줄바꿈 문자 없음) |
|
412 |
+ # test() |
|
413 |
+ |
|
414 |
+ process_video("/home/juni/사진/flood/test_video.mp4", "./")(파일 끝에 줄바꿈 문자 없음) |
Add a comment
Delete comment
Once you delete this comment, you won't be able to recover it. Are you sure you want to delete this comment?