윤영준 윤영준 10-08
testing suites
@d2813695829c67dfd6d951f9e996152980ab0278
hls_streaming/hls.py
--- hls_streaming/hls.py
+++ hls_streaming/hls.py
@@ -9,7 +9,8 @@
 from datetime import datetime
 from threading import Lock, Thread, Event
 
-DEBUG = False
+DEBUG = True
+SUPPRESS_MESSAGE = True
 
 
 class FrameCapturer:
@@ -125,9 +126,10 @@
             response = session.post(self.endpoint, headers=header, data=multipart_data)
 
         except Exception as e:
-            print(e)
-            print("Can not connect to the analyzer server. Check the endpoint address or connection.\n"
-                  f"Can not connect to : {self.endpoint}")
+            if not SUPPRESS_MESSAGE:
+                print(e)
+                print("Can not connect to the analyzer server. Check the endpoint address or connection.\n"
+                      f"Can not connect to : {self.endpoint}")
 
     def start(self):
         print(self.hls_url)
@@ -146,7 +148,7 @@
 # Example usage
 if __name__ == "__main__":
     capturer = FrameCapturer(
-        'http://cctvsec.ktict.co.kr/5545/LFkDslDT81tcSYh3G4306+mcGlLb3yShF9rx2vcPfltwUL4+I950kcBlD15uWm6K0cKCtAMlxsIptMkCDo5lGQiLlARP+SyUloz8vIMNB18=',
+        'http://cctvsec.ktict.co.kr/72957/LkVa3bIbXIBudmzokRTyNtAbqEoZCk+DHRZ8i6cs+rL6QIEM7Fv1N+wfJPjSFQOE2mi3PFCExH9gQhypA++a5DTmeAp2DUZ6xqXs22FgPvI=',
         "[국도] 테해란로", 10, 5
     )
     t1 = time.time()
inference_endpoint.py
--- inference_endpoint.py
+++ inference_endpoint.py
@@ -8,7 +8,7 @@
 from zoneinfo import ZoneInfo
 
 from flask import Flask, request
-from flask_restx import Api, Resource, fields
+from flask_restx import Api, Resource, fields, reqparse
 import requests
 from requests_toolbelt import MultipartEncoder
 import base64
@@ -38,13 +38,13 @@
 )
 
 # Define the expected model for incoming data
-image_upload_model = api.model('ImageUpload', {
-    'image': fields.String(required=True, description='Image file', dt='File'),
-    'x-cctv-info': fields.String(required=False, description='CCTV identifier'),
-    'x-time-sent': fields.String(required=False, description='Time image was sent'),
-    'x-cctv-latitude': fields.String(required=False, description='Latitude of CCTV'),
-    'x-cctv-longitude': fields.String(required=False, description='Longitude of CCTV')
-})
+upload_parser = reqparse.RequestParser()
+upload_parser.add_argument('file', location='files', type='FileStorage', required=True, help='Image file to upload')
+upload_parser.add_argument('x-cctv-name', location='headers', required=False, help='CCTV identifier')
+upload_parser.add_argument('x-time-sent', location='headers', required=False, help='Time image was sent')
+upload_parser.add_argument('x-cctv-latitude', location='headers', required=False, help='Latitude of CCTV')
+upload_parser.add_argument('x-cctv-longitude', location='headers', required=False, help='Longitude of CCTV')
+
 
 # Define the directory where images will be saved
 IMAGE_DIR = "network_test"
@@ -53,7 +53,7 @@
 
 @ns.route('/infer', )
 class ImageUpload(Resource):
-    # @ns.expect(image_upload_model, validate=True)
+    @ns.expect(upload_parser)
     def __init__(self, *args, **kargs):
         super().__init__(*args, **kargs)
         self.time_sent = None
@@ -185,4 +185,4 @@
 
 
 if __name__ == '__main__':
-    app.run(debug=False, port=12345)
+    app.run(debug=True, port=12345)
 
testing.html (added)
+++ testing.html
@@ -0,0 +1,178 @@
+<!DOCTYPE html>
+<html lang="en">
+<head>
+    <meta charset="UTF-8">
+    <meta name="viewport" content="width=device-width, initial-scale=1.0">
+    <title>이미지 처리</title>
+    <style>
+        body {
+            font-family: Arial, sans-serif;
+            display: flex;
+            flex-direction: column;
+            height: 100vh;
+            margin: 0;
+        }
+        .header {
+            background-color: #1E4E90;
+            color: white;
+            text-align: center;
+            padding: 10px 0;
+            position: fixed;
+            width: 100%;
+            top: 0;
+            z-index: 1;
+        }
+        .container {
+            text-align: center;
+            margin-top: 100px; /* .header의 높이 만큼 여백 추가 */
+        }
+        #preview img {
+            max-width: 100%;
+            height: auto;
+            margin-top: 20px;
+        }
+        #processButton {
+            background-color: #1E4E90;
+            color: white;
+            border: none;
+            padding: 10px 20px;
+            text-align: center;
+            text-decoration: none;
+            display: inline-block;
+            font-size: 16px;
+            margin-top: 20px;
+            cursor: pointer;
+        }
+        #processButton:hover {
+            background-color: #1E4E90;
+        }
+        #copyright {
+            background-color: #f1f1f1;
+            text-align: center;
+            padding: 10px;
+            position: fixed;
+            width: 100%;
+            bottom: 0;
+        }
+        #messageContainer {
+            display: flex;
+            justify-content: center;
+            height: 100vh;
+        }
+        #messageContainer div{
+            margin-top:30px;
+        }
+    </style>
+</head>
+<body>
+    <div class="header">
+        <h1>침수감지모델 테스트도구</h1>
+    </div>
+    <div class="container">
+        <div id="preview"></div>
+        <input type="file" id="imageInput">
+        <button id="processButton">이미지 처리</button>
+        <button id="clearButton">Clear</button> <!-- Clear 버튼 추가 -->
+
+    </div>
+    <div id="messageContainer"></div>
+
+    <div id="copyright">
+        <p>COPYRIGHT © 2024 SOLFAC. All Rights Reserved.</p>
+    </div>
+
+    <script>
+        const fileInput = document.getElementById('imageInput');
+        const processButton = document.getElementById('processButton');
+        const preview = document.getElementById('preview');
+        const messageContainer = document.getElementById('messageContainer'); // 메시지를 담을 컨테이너 추가
+
+        clearButton.addEventListener('click', function() {
+            // 메시지 컨테이너의 내용을 비움
+            preview.innerHTML = '';
+            messageContainer.innerHTML = '';
+        });
+        fileInput.addEventListener('change', function(event) {
+            const file = event.target.files[0];
+            const reader = new FileReader();
+
+            reader.onloadend = function() {
+                // 이미지 미리보기를 생성하여 화면에 표시
+                const img = new Image();
+                img.src = reader.result;
+                img.style.width = '1080px'; // 미리보기 이미지의 너비 조절
+                preview.innerHTML = ''; // 이전 미리보기 삭제
+                preview.appendChild(img); // 새로운 미리보기 추가
+
+                // 파일 정보를 기반으로 FormData를 생성하여 서버에 전송할 준비
+                const formData = new FormData();
+                formData.append('file', file); // 이미지 파일을 FormData에 추가
+
+                const LAT = "36.123"; // Replace with the actual latitude value
+                const LON = "123.123"; // Replace with the actual longitude value
+                const FILENAME = file.name; // Use the selected file's name as the filename
+                const FILE_TYPE = file.type; // Use the selected file's type as the file type
+
+                // 여기서 formData와 FILENAME, FILE_TYPE을 이용하여 서버에 요청을 보내면 됩니다.
+                // ...
+            };
+
+            if (file) {
+                reader.readAsDataURL(file);
+            }
+        });
+
+        processButton.addEventListener('click', async function() {
+            const file = fileInput.files[0];
+
+            if (file) {
+
+                const LAT = "36.123"; // Replace with the actual latitude value
+                const LON = "123.123"; // Replace with the actual longitude value
+                const FILENAME = file.name; // Use the selected file's name as the filename
+                const FILE_TYPE = file.type; // Use the selected file's type as the file type
+                const formData = new FormData();
+                formData.append("data", JSON.stringify({ gps_x: LAT, gps_y: LON, filename: FILENAME, file_type: FILE_TYPE }));
+                const previewBlob = await fetch(preview.querySelector('img').src).then(res => res.blob());
+                formData.append('file', previewBlob, FILENAME); // Blob 객체와 원래 파일 이름을 전송
+            
+            
+
+                try {
+                    const URL = "http://127.0.0.1:12345/cctv/infer"; // Replace with the actual server URL
+                    const response = await fetch(URL, {
+                        method: "POST",
+                        body: formData
+                    });
+            
+                    if (response.ok) {
+                        const data = await response.json();
+                        console.log(data); // 서버에서 받은 응답 처리
+            
+                        // data 객체에서 rain이 true인 경우 메시지 표시
+                        if (data.rain === true) {
+                            const message = document.createElement('div');
+                            message.textContent = '비 사진 감지되었습니다.';
+                            messageContainer.innerHTML = ''; // 기존 메시지 삭제
+                            messageContainer.appendChild(message); // 새로운 메시지 추가
+                        } else {
+                            const message = document.createElement('div');
+                            message.textContent = '일반 사진 감지되었습니다.';
+                            messageContainer.innerHTML = ''; // 기존 메시지 삭제
+                            messageContainer.appendChild(message); // 새로운 메시지 추가
+                        }
+                    } else {
+                        console.error("Error occurred while sending the request:", response.statusText);
+                    }
+                } catch (error) {
+                    console.error("Error occurred:", error);
+                }
+            } else {
+                console.error("No file selected.");
+            }
+        });
+
+
+    </script>
+</body>
+</html>(파일 끝에 줄바꿈 문자 없음)
yoloseg/inference_gpu_.py
--- yoloseg/inference_gpu_.py
+++ yoloseg/inference_gpu_.py
@@ -312,7 +312,103 @@
     # cv2.destroyAllWindows()
 
 
+def process_video(video_path, output_dir, model_input_shape=(480,480)):
+    import os
+    import av
+    import csv
+
+
+    model_path = 'yoloseg/weight/best.onnx'
+    classes_txt_file = 'config_files/yolo_config.txt'
+
+    model_input_shape = (480, 480)
+    inference_engine = Inference(
+        onnx_model_path=model_path,
+        model_input_shape=model_input_shape,
+        classes_txt_file=classes_txt_file,
+        run_with_cuda=True
+    )
+
+    # Open video using PyAV
+    container = av.open(video_path)
+
+    frame_times = []  # List to store inference time per frame
+    frame_count = 0
+
+    # Create output directory if it doesn't exist
+    if not os.path.exists(output_dir):
+        os.makedirs(output_dir)
+
+    # Get frame rate from video to control display speed
+    video_fps = container.streams.video[0].average_rate
+    if video_fps is None:
+        video_fps = 25  # Default to 25 FPS if unavailable
+
+    # Decode video frame by frame
+    for frame in container.decode(video=0):
+        frame_count += 1
+
+
+        # Convert PyAV frame to numpy array (OpenCV format)
+        img = frame.to_ndarray(format='bgr24')
+
+        t1 = time.time()
+
+        # Resize frame to match model input shape
+        resized_frame = cv2.resize(img, model_input_shape)
+
+        # Run inference
+        detections, mask_maps = inference_engine.run_inference(resized_frame)
+
+
+        # Display detections
+        for detection in detections:
+            x, y, w, h = detection['box']
+            class_name = detection['class_name']
+            confidence = detection['confidence']
+            cv2.rectangle(resized_frame, (x, y), (x+w, y+h), detection['color'], 2)
+            label = f"{class_name}: {confidence:.2f}"
+            cv2.putText(resized_frame, label, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, detection['color'], 2)
+
+
+        if len(mask_maps) != 0:
+            for i in range(mask_maps.shape[2]):  # Iterate over each mask
+                resized_frame = overlay_mask(resized_frame, mask_maps[:, :, i], color=(0, 255, 0), alpha=0.3)
+
+        # Show the processed frame
+        # cv2.imshow('Processed Frame', resized_frame)
+        # # Save the processed frame
+        # output_frame_path = os.path.join(output_dir, f"frame_{frame_count}.jpg")
+        # cv2.imwrite(output_frame_path, resized_frame)
+
+        t2 = time.time()
+        frame_time = t2 - t1
+        frame_times.append(frame_time)
+        print(f"Frame {frame_count} inference time: {frame_time:.4f} seconds")
+
+        # Adjust frame display rate based on FPS
+        if cv2.waitKey(int(1000 / video_fps)) & 0xFF == ord('q'):
+            break
+
+    # Close OpenCV windows
+    cv2.destroyAllWindows()
+
+    # Calculate and save inference times to CSV
+    avg_inference_time = sum(frame_times) / len(frame_times) if frame_times else 0
+    output_csv_path = os.path.join(output_dir, "inference.csv")
+
+    with open(output_csv_path, mode='w', newline='') as csv_file:
+        writer = csv.writer(csv_file)
+        writer.writerow(["Frame", "Inference Time (seconds)"])
+        for i, time_val in enumerate(frame_times):
+            writer.writerow([i + 1, time_val])
+        writer.writerow(["Average", avg_inference_time])
+
+    print(f"Average inference time: {avg_inference_time:.4f} seconds")
+    print(f"Inference times saved to {output_csv_path}")
 
 
 if __name__ == "__main__":
-    test()
(파일 끝에 줄바꿈 문자 없음)
+    # test()
+
+    process_video("/home/juni/사진/flood/test_video.mp4", "./")
(파일 끝에 줄바꿈 문자 없음)
Add a comment
List