윤영준 윤영준 2023-10-24
made ai analysis api and tested it. 25TPS using gunicorn app:app -w 12 -b 127.0.0.1:8080
@fc1d4dbbd0dc65b2d522b17d81fd01455ad2cbf8
action.py
--- action.py
+++ action.py
@@ -1,7 +1,7 @@
 from flask_restx import Resource, Namespace
-from flask import request
-from werkzeug.utils import secure_filename
+from flask import request, jsonify
 import os
+import json
 from database.database import DB
 import torch
 from torchvision.transforms import ToTensor
@@ -9,10 +9,27 @@
 from model.AttentiveRNN import AttentiveRNN
 from model.Classifier import Resnet as Classifier
 from subfuction.image_crop import crop_image
+import numpy as np
+import cv2
 
+db = DB()
 device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
 
-paths = os.getcwd()
+# pre-loading models
+arnn = AttentiveRNN(6, 3, 2)
+arnn.eval()
+arnn.load_state_dict(torch.load("weights/ARNN_trained_weight_6_3_2.pt"))
+arnn.to(device=device)
+
+classifier = Classifier()
+classifier.eval()
+classifier.load_state_dict(torch.load("weights/Classifier_512.pt"))
+classifier.to(device=device)
+
+tf_toTensor = ToTensor()
+crop_size = (512, 512)
+start_point = (750, 450)
+root_dir = os.getcwd()
 
 Action = Namespace(
     name="Action",
@@ -20,17 +37,17 @@
 )
 
 
[email protected]('/image_summit')
-class fileUpload(Resource):
-    @Action.doc(responses={200: 'Success'})
-    @Action.doc(responses={500: 'Register Failed'})
-    def post(self):
-        if request.method == 'POST':
-            f = request.files['file']
-            f.save(secure_filename(f.filename))
-            return {
-                'save': 'done'  # str으로 반환하여 return
-            }, 200
+# @Action.route('/image_summit')
+# class fileUpload(Resource):
+#     @Action.doc(responses={200: 'Success'})
+#     @Action.doc(responses={500: 'Register Failed'})
+#     def post(self):
+#         if request.method == 'POST':
+#             f = request.files['file']
+#             f.save(secure_filename(f.filename))
+#             return {
+#                 'save': 'done'  # str으로 반환하여 return
+#             }, 200
 
 
 @Action.route('/image_anal')
@@ -38,48 +55,49 @@
     @Action.doc(responses={200: 'Success'})
     @Action.doc(responses={500: 'Register Failed'})
     def post(self):
-        if request.method == 'POST':
-            db = DB()
-            arnn = AttentiveRNN(6, 3, 2)
-            arnn.load_state_dict(torch.load("weights/ARNN_trained_weight_6_3_2.pt"))
-            arnn.to(device=device)
-            crop_size = (512, 512)
-            start_point = (750, 450)
-            tf_toTensor = ToTensor()
-            classifier = Classifier()
-            classifier.load_state_dict(torch.load("weights/Classifier_512.pt"))
-            classifier.to(device=device)
-            dir = os.getcwd()
-            lat = float(request.json['gps_x'])
-            lon = float(request.json['gps_y'])
-            filename = request.json['filename']
-            file_type = request.json['file_type']
-            total_path = dir + os.path.sep + filename + file_type
-            image = crop_image(total_path, crop_size, start_point)
-            if not image:
-                return {
-                    'node': (lat, lon),
-                    'rain': None,
-                }, 500
-            image_tensor = tf_toTensor(image)
-            image_tensor = image_tensor.unsqueeze(0)
-            image_tensor = image_tensor.to(device)
+        # Extracting JSON data
+        json_data = request.form.get('data')
+        if not json_data:
+            return jsonify({"message": "Missing JSON data"}), 400
+        data = json.loads(json_data)
+
+        lat = float(data['gps_x'])
+        lon = float(data['gps_y'])
+        filename = data['filename']
+        file_type = data['file_type']
+
+        uploaded_file = request.files.get('file')
+        file_bytes = np.asarray(bytearray(uploaded_file.read()), dtype=np.uint8)
+        image = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)
+
+        image = crop_image(image, crop_size, start_point)
+
+        image_tensor = tf_toTensor(image)
+        image_tensor = image_tensor.unsqueeze(0)
+        image_tensor = image_tensor.to(device)
+        with torch.no_grad():
             image_arnn = arnn(image_tensor)
+            image_tensor.cpu()
+            del image_tensor
             result = classifier(image_arnn['x'])
-            result = result.to("cpu")
-            _, predicted = torch.max(result.data, 1)
-            if predicted == 0:
-                rain = False
-            else:  # elif result == 1
-                rain = True
-            user_id = 'test'
-            action_success = True
-            action_id = 'test'
-            db.db_add_action(action_id, lat, lon, user_id, action_success)
-            return {
-                'node': (lat, lon),
-                'rain': rain,
-            }, 200
+            image_arnn['x'].cpu()
+            del image_arnn
+
+        result = result.to("cpu")
+        _, predicted = torch.max(result.data, 1)
+        del result
+        if predicted == 0:
+            rain = False
+        else:  # elif result == 1
+            rain = True
+        user_id = 'test'
+        action_success = True
+        action_id = 'test'
+        db.db_add_action(action_id, lat, lon, user_id, action_success)
+        return {
+            'node': (lat, lon),
+            'rain': rain,
+        }, 200
 
 
 @Action.route('/action_display')
app.py
--- app.py
+++ app.py
@@ -1,12 +1,10 @@
-from flask import Flask
+import flask
 from flask_restx import Api
 from auth import Auth
 from action import Action
 
-app = Flask(__name__)
+app = flask.Flask(__name__)
 
-
-print("Api Start")
 api = Api(    app,
     version='0.1',
     title="RDS",
@@ -15,10 +13,7 @@
     contact="[email protected]",
     license="MIT")
 
-
-
 api.add_namespace(Auth, '/auth')
-print("Api Add Auth")
 
 api.add_namespace(Action, '/action')
 
 
asset/rain-svgrepo-com.png (Binary) (added)
+++ asset/rain-svgrepo-com.png
Binary file is not shown
 
asset/sun-svgrepo-com.png (Binary) (added)
+++ asset/sun-svgrepo-com.png
Binary file is not shown
auth.py
--- auth.py
+++ auth.py
@@ -1,5 +1,5 @@
 import hashlib
-from flask import request, jsonify, render_template,redirect,url_for
+from flask import request, jsonify
 from flask_restx import Resource, Api, Namespace, fields
 from database.database import DB
 import datetime
 
demonstration.py (added)
+++ demonstration.py
@@ -0,0 +1,107 @@
+import random
+import time
+import visdom
+import glob
+import torch
+import cv2
+from torchvision.transforms import ToTensor, Compose, Normalize
+from flask import request
+from model.AttentiveRNN import AttentiveRNN
+from model.Classifier import Resnet as Classifier
+from subfuction.image_crop import crop_image
+
+device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
+
+# execute visdom instance first
+# to do that, install visdom via pip and execute in terminal
+
+def process_image():
+    vis = visdom.Visdom()
+    arnn = AttentiveRNN(6, 3, 2)
+    arnn.load_state_dict(torch.load("weights/ARNN_trained_weight_6_3_2.pt"))
+    arnn.to(device=device)
+    arnn.eval()
+    crop_size = (512, 512)
+    start_point = (750, 450)
+    tf_toTensor = ToTensor()
+    classifier = Classifier(in_ch=1)
+    classifier.load_state_dict(torch.load("weights/classifier_e19_weight_1080p_512512_fixed_wrong_resolution_and_ch.pt"))
+    classifier.to(device=device)
+    classifier.eval()
+    rainy_data_path = glob.glob("/home/takensoft/Pictures/화창한날, 비오는날 프레임2000장/SUNNY/**/**/*.png")
+    # rainy_data_path = glob.glob("/home/takensoft/Pictures/폭우 빗방울 (475개)/*.png")
+    img_path = rainy_data_path
+    # clean_data_path = glob.glob("/home/takensoft/Documents/AttentiveRNNClassifier/output/original/*.png")
+
+    # img_path = rainy_data_path + clean_data_path
+    # normalize = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+    random.shuffle(img_path)
+
+    for i in iter(range(len(img_path))):
+        image = crop_image(img_path[i], crop_size, start_point)
+        if not image.any():
+            continue
+        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
+        image_tensor = tf_toTensor(image)
+        image_tensor = image_tensor.unsqueeze(0)
+        image_tensor = image_tensor.to(device)
+        image_arnn = arnn(image_tensor)
+
+        input_win = 'input_window'
+        attention_map_wins = [f'attention_map_{i}' for i in range(6)]
+        prediction_win = 'prediction_window'
+
+        # Visualize attention maps using visdom
+        vis.images(
+            image_tensor,
+            opts=dict(title=f"input"),
+            win=input_win
+        )
+        for idx, attention_map in enumerate(image_arnn['attention_map_list']):
+            if idx == 0 or idx == 5:
+                vis.images(
+                    attention_map.cpu(),  # Expected shape: (batch_size, C, H, W)
+                    opts=dict(title=f'Attention Map {idx + 1}'),
+                    win=attention_map_wins[idx]
+                )
+        # arnn_result = normalize(image_arnn['x'])
+        result = classifier(image_arnn['attention_map_list'][-1])
+        result = result.to("cpu")
+        _, predicted = torch.max(result.data, 1)
+        print(result.data)
+        print(_)
+        print(predicted)
+        # Load and display the corresponding icon
+        if predicted == 0:
+            icon_path = 'asset/sun-svgrepo-com.png'
+        else:  # elif result == 1
+            icon_path = 'asset/rain-svgrepo-com.png'
+
+        # Load icon and convert to tensor
+        icon_image = cv2.imread(icon_path, cv2.IMREAD_UNCHANGED)
+        transform = Compose([
+            ToTensor()
+        ])
+        icon_tensor = transform(icon_image).unsqueeze(0)  # Add batch dimension
+
+        # Visualize icon using visdom
+        vis.images(
+            icon_tensor,
+            opts=dict(title='Weather Prediction'),
+            win=prediction_win
+        )
+        time.sleep(1)
+
+    # result = classifier(image_arnn['x'])
+    # result = result.to("cpu")
+    # _, predicted = torch.max(result.data, 1)
+    # if predicted == 0:
+    #     rain = False
+    # else:  # elif result == 1
+    #     rain = True
+    # return {
+    #     'rain': rain,
+    # }, 200
+
+if __name__ == "__main__":
+    process_image()(파일 끝에 줄바꿈 문자 없음)
model/AttentiveRNN.py
--- model/AttentiveRNN.py
+++ model/AttentiveRNN.py
@@ -217,8 +217,8 @@
             lstm_feats.append(lstm_feats_i)
         ret = {
             'x' : x,
-            'attention_map_list' : attention_map,
-            'lstm_feats' : lstm_feats
+            # 'attention_map_list' : attention_map,
+            # 'lstm_feats' : lstm_feats
         }
         return ret
 
 
requirements.txt (deleted)
--- requirements.txt
@@ -1,11 +0,0 @@
-torch~=2.0.1+cu118
-flask~=2.3.3
-jwt~=1.3.1
-werkzeug~=2.3.7
-torchvision~=0.15.2+cu118
-networkx~=3.0
-geojson~=3.0.1
-haversine~=2.8.0
-opencv-python~=4.8.0.76
-joblib~=1.3.2
-pandas~=2.0.3(파일 끝에 줄바꿈 문자 없음)
subfuction/image_crop.py
--- subfuction/image_crop.py
+++ subfuction/image_crop.py
@@ -1,14 +1,12 @@
 import cv2
 import os
 
-def crop_image(image_path, crop_size, start_point):
-    if image_path.endswith(".jpg") or image_path.endswith(".png"):
-        image = cv2.imread(image_path)
-        height, width = image.shape[:2]
+def crop_image(image, crop_size, start_point):
+    height, width = image.shape[:2]
 
-        if width > start_point[0] + crop_size[0] and height > start_point[1] + crop_size[1]:
-            cropped_image = image[start_point[1]:start_point[1]+crop_size[1], start_point[0]:start_point[0]+crop_size[0]]
-            return cropped_image
-        else:
-            print(f"Image {os.path.basename(image_path)} is too small to be cropped with the current settings.")
-            return False
(파일 끝에 줄바꿈 문자 없음)
+    if width > start_point[0] + crop_size[0] and height > start_point[1] + crop_size[1]:
+        cropped_image = image[start_point[1]:start_point[1]+crop_size[1], start_point[0]:start_point[0]+crop_size[0]]
+        return cropped_image
+    else:
+        print(f"Image is too small to be cropped with the current settings.")
+        return False
(파일 끝에 줄바꿈 문자 없음)
Add a comment
List