윤영준 윤영준 2023-08-21
Hello Yona
@07a06e356fdaef0e8edcb99170d5242c20877425
 
README.md (added)
+++ README.md
@@ -0,0 +1,1 @@
+# Altron_Backend
 
action.py (added)
+++ action.py
@@ -0,0 +1,96 @@
+from flask_restx import Resource, Namespace
+from flask import request
+from werkzeug.utils import secure_filename
+import os
+from database.database import DB
+import torch
+from torchvision.transforms import ToTensor
+from datetime import datetime
+from model.AttentiveRNN import AttentiveRNN
+from model.Classifier import Resnet as Classifier
+from subfuction.image_crop import crop_image
+
+device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
+
+paths = os.getcwd()
+
+Action = Namespace(
+    name="Action",
+    description="노드 분석을 위해 사용하는 api.",
+)
+
+
[email protected]('/image_summit')
+class fileUpload(Resource):
+    @Action.doc(responses={200: 'Success'})
+    @Action.doc(responses={500: 'Register Failed'})
+    def post(self):
+        if request.method == 'POST':
+            f = request.files['file']
+            f.save(secure_filename(f.filename))
+            return {
+                'save': 'done'  # str으로 반환하여 return
+            }, 200
+
+
+crop_size = (512, 512)
+start_point = (750, 450)
+tf_toTensor = ToTensor()
+
[email protected]('/image_anal')
+class fileUpload(Resource):
+    @Action.doc(responses={200: 'Success'})
+    @Action.doc(responses={500: 'Register Failed'})
+    def post(self):
+        if request.method == 'POST':
+            db = DB()
+            arnn = AttentiveRNN(6, 3, 2)
+            arnn.load_state_dict(torch.load("weights/ARNN_trained_weight_6_3_2.pt"))
+            arnn.to(device=device)
+            clasifier = Classifier()
+            clasifier.load_state_dict(torch.load("weights/Classifier_512.pt"))
+            clasifier.to(device=device)
+            dir = os.getcwd()
+            lat = float(request.json['gps_x'])
+            lon = float(request.json['gps_y'])
+            filename = request.json['filename']
+            file_type = request.json['file_type']
+            total_path = dir + os.path.sep + filename + file_type
+            image = crop_image(total_path, crop_size, start_point)
+            if not image:
+                return {
+                    'node': (lat, lon),
+                    'rain': 'rain',
+                }, 500
+            image_tensor = tf_toTensor(image)
+            image_tensor.to(device)
+            image_arnn = AttentiveRNN(image_tensor)
+            result = Classifier(image_arnn)
+            result = result.to("cpu")
+            if result == 0:
+                rain = False
+            else: # elif result == 1
+                rain = True
+            user_id = 'test'
+            action_success = True
+            action_id = 'test'
+            db.db_add_action(action_id, lat, lon, user_id, action_success)
+            return {
+                'node': (lat, lon),
+                'rain': rain,
+            }, 200
+
+
[email protected]('/action_display')
+class fileUpload(Resource):
+    @Action.doc(responses={200: 'Success'})
+    @Action.doc(responses={500: 'Register Failed'})
+    def post(self):
+        if request.method == 'GET':
+            db = DB()
+            now = datetime.now()
+            d = now.strftime('%Y-%m-%d %X')
+            value = db.db_display_action(d)
+            return {
+                'report': list(value)
+            }, 200
 
auth.py (added)
+++ auth.py
@@ -0,0 +1,114 @@
+import hashlib
+from flask import request, jsonify, render_template,redirect,url_for
+from flask_restx import Resource, Api, Namespace, fields
+from database.database import DB
+import datetime
+import jwt
+
+
+
+
+
+users = {}
+
+Auth = Namespace(
+    name="Auth",
+    description="사용자 인증을 위한 API",
+)
+
+
+user_fields = Auth.model('User', {  # Model 객체 생성
+    'id': fields.String(description='a User Name', required=True, example="id")
+})
+
+
+user_fields_auth = Auth.inherit('User Auth', user_fields, {
+    'password': fields.String(description='Password', required=True)
+
+})
+
+user_fields_register = Auth.inherit('User reigster', user_fields, {
+    'password': fields.String(description='Password', required=True),'email': fields.String(description='email', required=True),'user_sex': fields.String(description='sex', required=True),'phone': fields.String(description='phone', required=True)
+
+})
+
+
+
[email protected]('/id')
+class AuthCheck(Resource):
+    @Auth.doc(responses={200: 'Success'})
+    @Auth.doc(responses={500: 'Register Failed'})
+    def post(self):
+        db=DB()
+        id = request.json['id']
+        value=db.db_check_id(id)
+        if value != None:
+            return {
+                "message": "중복 아이디가 있습니다"
+            }, 500
+        else:
+            return {
+                'message': '사용가능한 아이디입니다'  # str으로 반환하여 return
+            }, 200
+
+
+
+
[email protected]('/register')
+class AuthRegister(Resource):
+    @Auth.expect(user_fields_register)
+    @Auth.doc(responses={200: 'Success'})
+    @Auth.doc(responses={500: 'Register Failed'})
+    def post(self):
+        db=DB()
+        id = request.json['id']
+        password = request.json['password']
+        user_email = request.json['email']
+        sex = request.json['user_sex']
+        phone = request.json['phone']
+        pw_has = hashlib.sha256(password.encode('utf-8')).hexdigest()
+        value=db.db_login(id,password)
+        if value != None:
+            return {
+                "message": "Register Failed"
+            }, 500
+        else:
+            db.db_add_id(id,pw_has,user_email,sex,phone)
+            return {
+                'Authorization': id  # str으로 반환하여 return
+            }, 200
+
[email protected]('/login')
+class AuthLogin(Resource):
+    @Auth.expect(user_fields_auth)
+    @Auth.doc(responses={200: 'Success'})
+    @Auth.doc(responses={404: 'User Not Found'})
+    @Auth.doc(responses={500: 'Auth Failed'})
+    def post(self):
+        db=DB()
+        id = request.json['id']
+        password = request.json['password']
+        pw_hash = hashlib.sha256(password.encode('utf-8')).hexdigest()
+        result = db.db_login(id,pw_hash)
+        if result is not None:
+            payload = {
+                'id' : id,
+                'exp' : datetime.datetime.utcnow() + datetime.timedelta(seconds=70)
+            }
+            token = jwt.encode(payload, "secret", algorithm='HS256')
+            return jsonify({'result': 'success', 'token': token})
+        else:
+            return jsonify({'result': 'fail', 'msg': '아이디/비밀번호가 일치하지 않습니다.'})
+
+
[email protected]('/secession')
+class AuthSecession(Resource):
+    def post(self):
+         db=DB()
+         id = request.json['token']
+         payload = jwt.decode(id, "secret", algorithms=['HS256'])
+         db.db_delete_id(payload['id'])
+         return {'secession':'success'}
+
+
+
 
database/database.py (added)
+++ database/database.py
@@ -0,0 +1,93 @@
+import psycopg2 # driver 임포트
+import time
+from datetime import datetime, timedelta
+
+
+class DB():
+    def __init__(self):
+        self.conn=psycopg2.connect(
+            host='localhost',
+            dbname='postgres',
+            user='postgres',
+            password='ts4430!@',
+            port='5432'
+            ) # db에 접속
+        self.conn.autocommit=True
+
+    def db_check_id(self,id):
+        cur = self.conn.cursor() # 커서를 생성한다
+
+        cur.execute(f'''
+        SELECT user_id
+        FROM rds.user_id
+        Where user_id = '{id}';
+        ''')
+        result=cur.fetchone()
+        cur.close()
+
+        return result
+
+    def db_login(self,id,pw):
+        cur = self.conn.cursor() # 커서를 생성한다
+
+        cur.execute(f'''
+        SELECT user_id, user_pw, user_email, user_sex, user_phone, user_time_stamp
+        FROM rds.user_id
+        Where user_id = '{id}' and user_pw='{pw}';
+        ''')
+        result=cur.fetchone()
+
+
+        cur.close()
+
+        return result
+
+    def db_add_id(self,user_id,user_pw,user_email,user_sex,user_phone) :
+        cur = self.conn.cursor() # 커서를 생성한다
+        now=time.localtime()
+        d=time.strftime('%Y-%m-%d %X', now)
+        cur.execute(f'''
+        insert into rds.user_id (user_id,user_pw,user_email,user_sex,user_phone,user_time_stamp)
+        values ('{user_id}','{user_pw}','{user_email}','{user_sex}','{user_phone}','{d}')
+        ''')
+        cur.close()
+        
+    def db_delete_id(self,user_id) :
+        cur = self.conn.cursor() # 커서를 생성한다
+        cur.execute(f'''
+        delete
+        from rds.user_id ui
+        where user_id  = '{user_id}'
+        ''')
+        cur.close()
+    
+
+    
+    def db_add_action(self,action_id,lat,lon,user_id,action_success) :
+        cur = self.conn.cursor() # 커서를 생성한다
+        now=datetime.now() 
+        d=now.strftime('%Y-%m-%d %X')
+        cur.execute(f'''
+        insert into rds.action (action_id,lat,lon,action_time_stamp,user_id,action_success)
+        values ('{action_id}','{lat}','{lon}','{d}','{user_id}','{action_success}')
+        ''')
+        
+        
+    def db_display_action(self,timestamp) :
+        cur = self.conn.cursor() # 커서를 생성한다
+        now=timestamp
+        d_plus=now +timedelta(hours=2)
+        d_plus=str("'"+d_plus.strftime('%Y-%m-%d %X')+"'")
+        d_minus=now -timedelta(hours=2)
+        d_minus=str("'"+d_minus.strftime('%Y-%m-%d %X')+"'")
+        cur.execute(f'''
+        select * from rds.pothole 
+        where timestamp between {d_minus} and {d_plus};
+        ''')
+        result=cur.fetchall()
+        return result
+    
+
+        
+
+    (No newline at end of file)
 
model/AttentiveRNN.py (added)
+++ model/AttentiveRNN.py
@@ -0,0 +1,246 @@
+import torch
+from torch import nn
+from torch.nn import functional as F
+
+# nn.Sequential does not handle multiple input by design, and this is a workaround
+# https://github.com/pytorch/pytorch/issues/19808#
+class mySequential(nn.Sequential):
+    def forward(self, *input):
+        for module in self._modules.values():
+            input = module(*input)
+        return input
+
+
+class ResNetBlock(nn.Module):
+    def __init__(self, blocks=3, layers=1, input_ch=3, out_ch=32, kernel_size=None, stride=1, padding=1, groups=1,
+                 dilation=1):
+        """
+        :type kernel_size: iterator or int
+        """
+        super(ResNetBlock, self).__init__()
+        if kernel_size is None:
+            kernel_size = [3, 3]
+        self.conv1 = nn.Conv2d(
+            input_ch, out_ch,
+            kernel_size=kernel_size,
+            stride=stride,
+            padding=padding,
+            groups=groups,
+            dilation=dilation
+        )
+        self.conv2 = nn.Sequential(
+            nn.Conv2d(
+                out_ch, out_ch,
+                kernel_size=kernel_size,
+                stride=stride,
+                padding=padding,
+                groups=groups,
+                dilation=dilation
+            ),
+            nn.LeakyReLU()
+        )
+        self.conv_hidden = nn.ModuleList()
+        for block in range(blocks):
+            for layer in range(layers):
+                self.conv_hidden.append(
+                    self.conv2
+                )
+        self.blocks = blocks
+        self.layers = layers
+
+    def forward(self, x):
+        x = self.conv1(x)
+        x = F.leaky_relu(x)
+        shortcut = x
+        for i, hidden_layer in enumerate(self.conv_hidden):
+            x = hidden_layer(x)
+            if (i % self.layers == 0) & (i != 0):
+                x = x + shortcut
+        return x
+
+
+class ConvLSTM(nn.Module):
+    def __init__(self, ch, kernel_size=3):
+        super(ConvLSTM, self).__init__()
+        self.padding = (len(kernel_size)-1)/2
+        self.conv_i = nn.Conv2d(in_channels=ch, out_channels=ch, kernel_size=kernel_size, stride=1, padding=1,
+                                bias=False)
+        self.conv_f = nn.Conv2d(in_channels=ch, out_channels=ch, kernel_size=kernel_size, stride=1, padding=1,
+                                bias=False)
+        self.conv_c = nn.Conv2d(in_channels=ch, out_channels=ch, kernel_size=kernel_size, stride=1, padding=1,
+                                bias=False)
+        self.conv_o = nn.Conv2d(in_channels=ch, out_channels=ch, kernel_size=kernel_size, stride=1, padding=1,
+                                bias=False)
+        self.conv_attention_map = nn.Conv2d(in_channels=ch, out_channels=1, kernel_size=kernel_size, stride=1,
+                                            padding=1, bias=False)
+        self.ch = ch
+
+    def init_hidden(self, batch_size, image_size, init=0.0):
+        height, width = image_size
+        return torch.ones(batch_size, self.ch, height, width).to(dtype=self.conv_i.weight.dtype , device=self.conv_i.weight.device) * init
+
+    def forward(self, input_tensor, input_cell_state=None):
+        if input_cell_state is None:
+            batch_size, _, height, width = input_tensor.size()
+            input_cell_state = self.init_hidden(batch_size, (height, width))
+
+        conv_i = self.conv_i(input_tensor)
+        sigmoid_i = torch.sigmoid(conv_i)
+
+        conv_f = self.conv_f(input_tensor)
+        sigmoid_f = torch.sigmoid(conv_f)
+
+        cell_state = sigmoid_f * input_cell_state + sigmoid_i * torch.tanh(self.conv_c(input_tensor))
+
+        conv_o = self.conv_o(input_tensor)
+        sigmoid_o = torch.sigmoid(conv_o)
+
+        lstm_feats = sigmoid_o * torch.tanh(cell_state)
+
+        attention_map = self.conv_attention_map(lstm_feats)
+        attention_map = torch.sigmoid(attention_map)
+
+        ret = {
+            "attention_map" : attention_map,
+            "cell_state" : cell_state,
+            "lstm_feats" : lstm_feats
+        }
+        return ret
+
+
+class AttentiveRNNBLCK(nn.Module):
+    def __init__(self, blocks=3, layers=1, input_ch=3, out_ch=32, kernel_size=None, stride=1, padding=1, groups=1,
+                 dilation=1):
+        """
+        :type kernel_size: iterator or int
+        """
+        super(AttentiveRNNBLCK, self).__init__()
+        if kernel_size is None:
+            kernel_size = [3, 3]
+        self.blocks = blocks
+        self.layers = layers
+        self.input_ch = input_ch
+        self.out_ch = out_ch
+        self.kernel_size = kernel_size
+        self.stride = stride
+        self.padding = padding
+        self.groups = groups
+        self.dilation = dilation
+        self.sigmoid = nn.Sigmoid()
+        self.resnet = nn.Sequential(
+            ResNetBlock(
+                blocks=self.blocks,
+                layers=self.layers,
+                input_ch=self.input_ch,
+                out_ch=self.out_ch,
+                kernel_size=self.kernel_size,
+                stride=self.stride,
+                padding=self.padding,
+                groups=self.groups,
+                dilation=self.dilation
+            )
+        )
+        self.LSTM = mySequential(
+            ConvLSTM(
+                ch=out_ch, kernel_size=kernel_size,
+            )
+        )
+
+    def forward(self, original_image, prev_cell_state=None):
+        x = self.resnet(original_image)
+        lstm_ret = self.LSTM(x, prev_cell_state)
+        attention_map = lstm_ret["attention_map"]
+        cell_state = lstm_ret['cell_state']
+        lstm_feats = lstm_ret["lstm_feats"]
+        x = attention_map * original_image
+        ret = {
+            'x' : x,
+            'attention_map' : attention_map,
+            'cell_state' : cell_state,
+            'lstm_feats' : lstm_feats
+        }
+        return ret
+
+
+class AttentiveRNN(nn.Module):
+    def __init__(self, repetition, blocks=3, layers=1, input_ch=3, out_ch=32, kernel_size=None, stride=1, padding=1,
+                 groups=1, dilation=1):
+        """
+        :type kernel_size: iterator or int
+        """
+        super(AttentiveRNN, self).__init__()
+        if kernel_size is None:
+            kernel_size = [3, 3]
+        self.blocks = blocks
+        self.layers = layers
+        self.input_ch = input_ch
+        self.out_ch = out_ch
+        self.kernel_size = kernel_size
+        self.stride = stride
+        self.padding = padding
+        self.groups = groups
+        self.dilation = dilation
+        self.repetition = repetition
+        self.arnn_block = mySequential(
+            AttentiveRNNBLCK(
+             blocks=blocks,
+             layers=layers,
+             input_ch=input_ch,
+             out_ch=out_ch,
+             kernel_size=kernel_size,
+             stride=stride,
+             padding=padding,
+             groups=groups,
+             dilation=dilation
+            )
+        )
+        self.arnn_blocks = nn.ModuleList()
+        for repetition in range(repetition):
+            self.arnn_blocks.append(
+                self.arnn_block
+            )
+        self.name = "AttentiveRNN"
+
+    def forward(self, input_img):
+        cell_state = None
+        attention_map = []
+        lstm_feats = []
+        x = input_img
+        for arnn_block in self.arnn_blocks:
+            arnn_block_return = arnn_block(x, cell_state)
+            attention_map_i = arnn_block_return['attention_map']
+            lstm_feats_i = arnn_block_return['lstm_feats']
+            cell_state = arnn_block_return['cell_state']
+            x = arnn_block_return['x']
+
+            attention_map.append(attention_map_i)
+            lstm_feats.append(lstm_feats_i)
+        ret = {
+            'x' : x,
+            'attention_map_list' : attention_map,
+            'lstm_feats' : lstm_feats
+        }
+        return ret
+
+    #
+    def loss(self, input_image_tensor, difference_maskmap, theta=0.8):
+        self.theta = theta
+        # Initialize attentive rnn model
+        inference_ret = self.forward(input_image_tensor)
+        loss = 0.0
+        n = len(inference_ret['attention_map_list'])
+        for index, attention_map in enumerate(inference_ret['attention_map_list']):
+            mse_loss = (self.theta ** (n - index + 1)) * nn.MSELoss()(attention_map, difference_maskmap)
+            loss += mse_loss
+        return loss
+
+# Need work
+
+if __name__ == "__main__":
+    from torchinfo import summary
+
+    torch.set_default_tensor_type(torch.FloatTensor)
+
+    generator = AttentiveRNN(3, blocks=2)
+    batch_size = 5
+    summary(generator, input_size=(batch_size, 3, 960,540))
 
model/Classifier.py (added)
+++ model/Classifier.py
@@ -0,0 +1,106 @@
+from torch import nn
+
+class Conv3by3(nn.Module):
+    def __init__(self, in_ch, out_ch):
+        super(Conv3by3, self).__init__()
+        self.conv3by3 = nn.Sequential(
+            nn.Conv2d(in_channels=in_ch, out_channels=out_ch, kernel_size=3, padding=1),
+            nn.ReLU()
+        )
+
+    def forward(self, x):
+        return self.conv3by3(x)
+
+class Resnet(nn.Module):
+    def __init__(self, classes=2, in_ch=3):
+        super(Resnet, self).__init__()
+        self.firstconv = nn.Sequential(
+            nn.Conv2d(in_channels=in_ch, out_channels=64, kernel_size=7),
+            nn.ReLU(),
+            nn.AvgPool2d(kernel_size=2, stride=2)
+        )
+        self.block1_1 = nn.Sequential(
+            Conv3by3(64, 64),
+            Conv3by3(64, 64),
+        )
+        self.block1_2 = nn.Sequential(
+            Conv3by3(64, 64),
+            Conv3by3(64, 64),
+        )
+        self.block1_3 = nn.Sequential(
+            Conv3by3(64, 64),
+            Conv3by3(64, 64),
+        )
+
+        self.blockshort_1to2 = nn.Sequential(
+            nn.AvgPool2d(kernel_size=2,stride=2)
+        )
+
+        self.block2_1 = nn.Sequential(
+            Conv3by3(64, 128),
+            Conv3by3(128, 128),
+        )
+        self.block2_2 = nn.Sequential(
+            Conv3by3(128, 128),
+            Conv3by3(128, 128),
+        )
+        self.block2_3 = nn.Sequential(
+            Conv3by3(128, 128),
+            Conv3by3(128, 128),
+        )
+
+        self.blockshort_2to3 = nn.Sequential(
+            nn.AvgPool2d(kernel_size=2, stride=2),
+            nn.Conv2d(in_channels=256, out_channels=256, kernel_size=1, stride=1)
+        )
+
+        self.block3_1 = nn.Sequential(
+            Conv3by3(128, 256),
+            Conv3by3(256, 256),
+        )
+        self.block3_2 = nn.Sequential(
+            Conv3by3(256, 256),
+            Conv3by3(256, 256),
+        )
+        self.block3_3 = nn.Sequential(
+            Conv3by3(256, 256),
+            Conv3by3(256, 256),
+        )
+
+        self.global_pool = nn.AdaptiveAvgPool2d((1, 1))
+        self.fc = nn.Linear(256, classes)  # assuming 10 classes for the classification
+
+    def forward(self, x):
+        x = self.firstconv(x)
+
+        identity = x
+        out = self.block1_1(x)
+        out = out + identity
+        out = self.block1_2(out)
+        out = out + identity
+        out = self.block1_3(out)
+        out = out + identity
+
+
+        out = self.block2_1(out)
+        out = self.blockshort_1to2(out)
+        identity = out
+        out = self.block2_2(out)
+        out = out + identity
+        out = self.block2_3(out)
+        out = out + identity
+
+
+        out = self.block3_1(out)
+        out = self.blockshort_2to3(out)
+        identity = out
+        out = self.block3_2(out)
+        out = out + identity
+        out = self.block3_3(out)
+        out = out + identity
+
+        out = self.global_pool(out)
+        out = out.view(out.size(0), -1)
+        out = self.fc(out)
+
+        return out(No newline at end of file)
 
subfuction/generate.py (added)
+++ subfuction/generate.py
@@ -0,0 +1,55 @@
+from haversine import haversine
+import networkx as nx
+import geojson
+
+
+with open("D:/takensoft/project2/data/기타 가공/데이터/osm.geojson",encoding='utf-8') as f:
+    gj = geojson.load(f)
+
+def swith_xy(tuples):
+    x,y=tuples
+    return (y,x)
+
+G = nx.Graph ()
+
+total_data_num= gj['features']
+for j in range(len(total_data_num)):
+    features = gj['features'][j]
+    lines=features['geometry']['coordinates'][0]
+    print(j)
+
+    for i in range(len(lines)-1):
+        G.add_edge(swith_xy(lines[i]),swith_xy(lines[i+1]),flcass=features['properties']['fclass'],oneway=features['properties']['oneway'])
+
+sg = (G.subgraph(c) for c in nx.connected_components(G)) #가져올 수 없는 패키지가 있는 경우
+sg = list(sg)[0]
+
+for n0, n1 in G.edges ():
+    dist = haversine(n0, n1,unit='m')
+    G.edges [n0,n1][" dist "] = dist
+
+df=nx.to_pandas_edgelist(G)
+
+li_source=list(df['source'])
+li_source_x= []
+li_source_y=[]
+
+for i in li_source:
+    li_source_x.append(str(i[0]))
+    li_source_y.append(str(i[1]))
+df['source_x']=li_source_x
+df['source_y']=li_source_y
+
+li_target=list(df['target'])
+li_target_x= []
+li_target_y=[]
+
+for i in li_target:
+    li_target_x.append(str(i[0]))
+    li_target_y.append(str(i[1]))
+df['target_x']=li_target_x
+df['target_y']=li_target_y
+df=df.drop(['source','target'],axis=1)
+df=df.reset_index()
+df.to_csv('node.csv',encoding='euc-kr')
+
 
subfuction/image_crop.py (added)
+++ subfuction/image_crop.py
@@ -0,0 +1,37 @@
+import cv2
+import os
+import glob
+from joblib import Parallel, delayed
+
+def crop_image(image_path, crop_size, start_point):
+    if image_path.endswith(".jpg") or image_path.endswith(".png"):
+        image = cv2.imread(image_path)
+        height, width = image.shape[:2]
+
+        if width > start_point[0] + crop_size[0] and height > start_point[1] + crop_size[1]:
+            cropped_image = image[start_point[1]:start_point[1]+crop_size[1], start_point[0]:start_point[0]+crop_size[0]]
+            return cropped_image
+        else:
+            print(f"Image {os.path.basename(image_path)} is too small to be cropped with the current settings.")
+            return False
+
+def crop_images_parallel(image_paths, output_directory, crop_size, start_point):
+    if not os.path.exists(output_directory):
+        os.makedirs(output_directory)
+
+    # run the cropping function in parallel
+    Parallel(n_jobs=-1)(delayed(crop_image)(image_path, output_directory, crop_size, start_point) for image_path in image_paths)
+
+
+output_directory = "/home/takensoft/Pictures/test512_512/rainy/"
+
+# get all image paths in the directory
+# image_paths = glob.glob("/home/takensoft/Pictures/화창한날, 비오는날 프레임2000장/화창한날 프레임 추출/하드디스크 화창한날(17개)/**/*.png")
+# image_paths += glob.glob("/home/takensoft/Pictures/화창한날, 비오는날 프레임2000장/화창한날 프레임 추출/7월19일 화창한날(8개)/**/*.png")
+image_paths = glob.glob("/home/takensoft/Pictures/화창한날, 비오는날 프레임2000장/비오는날 프레임 추출/7월11일 폭우(3개)/**/*.png")
+image_paths += glob.glob("/home/takensoft/Pictures/폭우 빗방울 (475개)/*.png")
+
+crop_size = (512, 512) # width and height you want for your cropped images
+start_point = (750, 450) # upper left point where the crop should start
+
+crop_images_parallel(image_paths, output_directory, crop_size, start_point)
 
subfuction/save_pickle.py (added)
+++ subfuction/save_pickle.py
@@ -0,0 +1,26 @@
+from database.database import DB
+import pandas as pd
+
+import networkx as nx
+from itertools import tee
+
+def pairwise( iterable ):
+    """Returns an iterable access binary tuple
+    s -> (s0,s1), (s1,s2), (s2, s3), ..."""
+    a, b = tee( iterable )
+    next(b, None)
+    return zip(a, b)
+
+def swith_xy(tuples):
+    x,y=tuples
+    return (y,x)
+    
+
+db=DB()
+df=pd.DataFrame(db.db_get_node())
+df.columns=['index','source_x','source_y','target_x','target_y','distance']
+G=nx.Graph()
+for j in range(len(df)):
+    G.add_edge((df['source_x'][j],df['source_y'][j]),(df['target_x'][j],df['target_y'][j]),length=df['distance'][j])
+nx.write_gpickle(G,'OSM_gpickle.gpickle')
+
 
weights/ARNN_trained_weight_6_3_2.pt (Binary) (added)
+++ weights/ARNN_trained_weight_6_3_2.pt
Binary file is not shown
 
weights/Classifier_512.pt (Binary) (added)
+++ weights/Classifier_512.pt
Binary file is not shown
Add a comment
List