
+++ README.md
... | ... | @@ -0,0 +1,1 @@ |
1 | +# Altron_Backend |
+++ action.py
... | ... | @@ -0,0 +1,96 @@ |
1 | +from flask_restx import Resource, Namespace | |
2 | +from flask import request | |
3 | +from werkzeug.utils import secure_filename | |
4 | +import os | |
5 | +from database.database import DB | |
6 | +import torch | |
7 | +from torchvision.transforms import ToTensor | |
8 | +from datetime import datetime | |
9 | +from model.AttentiveRNN import AttentiveRNN | |
10 | +from model.Classifier import Resnet as Classifier | |
11 | +from subfuction.image_crop import crop_image | |
12 | + | |
13 | +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') | |
14 | + | |
15 | +paths = os.getcwd() | |
16 | + | |
17 | +Action = Namespace( | |
18 | + name="Action", | |
19 | + description="노드 분석을 위해 사용하는 api.", | |
20 | +) | |
21 | + | |
22 | + | |
23 | [email protected]('/image_summit') | |
24 | +class fileUpload(Resource): | |
25 | + @Action.doc(responses={200: 'Success'}) | |
26 | + @Action.doc(responses={500: 'Register Failed'}) | |
27 | + def post(self): | |
28 | + if request.method == 'POST': | |
29 | + f = request.files['file'] | |
30 | + f.save(secure_filename(f.filename)) | |
31 | + return { | |
32 | + 'save': 'done' # str으로 반환하여 return | |
33 | + }, 200 | |
34 | + | |
35 | + | |
36 | +crop_size = (512, 512) | |
37 | +start_point = (750, 450) | |
38 | +tf_toTensor = ToTensor() | |
39 | + | |
40 | [email protected]('/image_anal') | |
41 | +class fileUpload(Resource): | |
42 | + @Action.doc(responses={200: 'Success'}) | |
43 | + @Action.doc(responses={500: 'Register Failed'}) | |
44 | + def post(self): | |
45 | + if request.method == 'POST': | |
46 | + db = DB() | |
47 | + arnn = AttentiveRNN(6, 3, 2) | |
48 | + arnn.load_state_dict(torch.load("weights/ARNN_trained_weight_6_3_2.pt")) | |
49 | + arnn.to(device=device) | |
50 | + clasifier = Classifier() | |
51 | + clasifier.load_state_dict(torch.load("weights/Classifier_512.pt")) | |
52 | + clasifier.to(device=device) | |
53 | + dir = os.getcwd() | |
54 | + lat = float(request.json['gps_x']) | |
55 | + lon = float(request.json['gps_y']) | |
56 | + filename = request.json['filename'] | |
57 | + file_type = request.json['file_type'] | |
58 | + total_path = dir + os.path.sep + filename + file_type | |
59 | + image = crop_image(total_path, crop_size, start_point) | |
60 | + if not image: | |
61 | + return { | |
62 | + 'node': (lat, lon), | |
63 | + 'rain': 'rain', | |
64 | + }, 500 | |
65 | + image_tensor = tf_toTensor(image) | |
66 | + image_tensor.to(device) | |
67 | + image_arnn = AttentiveRNN(image_tensor) | |
68 | + result = Classifier(image_arnn) | |
69 | + result = result.to("cpu") | |
70 | + if result == 0: | |
71 | + rain = False | |
72 | + else: # elif result == 1 | |
73 | + rain = True | |
74 | + user_id = 'test' | |
75 | + action_success = True | |
76 | + action_id = 'test' | |
77 | + db.db_add_action(action_id, lat, lon, user_id, action_success) | |
78 | + return { | |
79 | + 'node': (lat, lon), | |
80 | + 'rain': rain, | |
81 | + }, 200 | |
82 | + | |
83 | + | |
84 | [email protected]('/action_display') | |
85 | +class fileUpload(Resource): | |
86 | + @Action.doc(responses={200: 'Success'}) | |
87 | + @Action.doc(responses={500: 'Register Failed'}) | |
88 | + def post(self): | |
89 | + if request.method == 'GET': | |
90 | + db = DB() | |
91 | + now = datetime.now() | |
92 | + d = now.strftime('%Y-%m-%d %X') | |
93 | + value = db.db_display_action(d) | |
94 | + return { | |
95 | + 'report': list(value) | |
96 | + }, 200 |
+++ auth.py
... | ... | @@ -0,0 +1,114 @@ |
1 | +import hashlib | |
2 | +from flask import request, jsonify, render_template,redirect,url_for | |
3 | +from flask_restx import Resource, Api, Namespace, fields | |
4 | +from database.database import DB | |
5 | +import datetime | |
6 | +import jwt | |
7 | + | |
8 | + | |
9 | + | |
10 | + | |
11 | + | |
12 | +users = {} | |
13 | + | |
14 | +Auth = Namespace( | |
15 | + name="Auth", | |
16 | + description="사용자 인증을 위한 API", | |
17 | +) | |
18 | + | |
19 | + | |
20 | +user_fields = Auth.model('User', { # Model 객체 생성 | |
21 | + 'id': fields.String(description='a User Name', required=True, example="id") | |
22 | +}) | |
23 | + | |
24 | + | |
25 | +user_fields_auth = Auth.inherit('User Auth', user_fields, { | |
26 | + 'password': fields.String(description='Password', required=True) | |
27 | + | |
28 | +}) | |
29 | + | |
30 | +user_fields_register = Auth.inherit('User reigster', user_fields, { | |
31 | + 'password': fields.String(description='Password', required=True),'email': fields.String(description='email', required=True),'user_sex': fields.String(description='sex', required=True),'phone': fields.String(description='phone', required=True) | |
32 | + | |
33 | +}) | |
34 | + | |
35 | + | |
36 | + | |
37 | [email protected]('/id') | |
38 | +class AuthCheck(Resource): | |
39 | + @Auth.doc(responses={200: 'Success'}) | |
40 | + @Auth.doc(responses={500: 'Register Failed'}) | |
41 | + def post(self): | |
42 | + db=DB() | |
43 | + id = request.json['id'] | |
44 | + value=db.db_check_id(id) | |
45 | + if value != None: | |
46 | + return { | |
47 | + "message": "중복 아이디가 있습니다" | |
48 | + }, 500 | |
49 | + else: | |
50 | + return { | |
51 | + 'message': '사용가능한 아이디입니다' # str으로 반환하여 return | |
52 | + }, 200 | |
53 | + | |
54 | + | |
55 | + | |
56 | + | |
57 | [email protected]('/register') | |
58 | +class AuthRegister(Resource): | |
59 | + @Auth.expect(user_fields_register) | |
60 | + @Auth.doc(responses={200: 'Success'}) | |
61 | + @Auth.doc(responses={500: 'Register Failed'}) | |
62 | + def post(self): | |
63 | + db=DB() | |
64 | + id = request.json['id'] | |
65 | + password = request.json['password'] | |
66 | + user_email = request.json['email'] | |
67 | + sex = request.json['user_sex'] | |
68 | + phone = request.json['phone'] | |
69 | + pw_has = hashlib.sha256(password.encode('utf-8')).hexdigest() | |
70 | + value=db.db_login(id,password) | |
71 | + if value != None: | |
72 | + return { | |
73 | + "message": "Register Failed" | |
74 | + }, 500 | |
75 | + else: | |
76 | + db.db_add_id(id,pw_has,user_email,sex,phone) | |
77 | + return { | |
78 | + 'Authorization': id # str으로 반환하여 return | |
79 | + }, 200 | |
80 | + | |
81 | [email protected]('/login') | |
82 | +class AuthLogin(Resource): | |
83 | + @Auth.expect(user_fields_auth) | |
84 | + @Auth.doc(responses={200: 'Success'}) | |
85 | + @Auth.doc(responses={404: 'User Not Found'}) | |
86 | + @Auth.doc(responses={500: 'Auth Failed'}) | |
87 | + def post(self): | |
88 | + db=DB() | |
89 | + id = request.json['id'] | |
90 | + password = request.json['password'] | |
91 | + pw_hash = hashlib.sha256(password.encode('utf-8')).hexdigest() | |
92 | + result = db.db_login(id,pw_hash) | |
93 | + if result is not None: | |
94 | + payload = { | |
95 | + 'id' : id, | |
96 | + 'exp' : datetime.datetime.utcnow() + datetime.timedelta(seconds=70) | |
97 | + } | |
98 | + token = jwt.encode(payload, "secret", algorithm='HS256') | |
99 | + return jsonify({'result': 'success', 'token': token}) | |
100 | + else: | |
101 | + return jsonify({'result': 'fail', 'msg': '아이디/비밀번호가 일치하지 않습니다.'}) | |
102 | + | |
103 | + | |
104 | [email protected]('/secession') | |
105 | +class AuthSecession(Resource): | |
106 | + def post(self): | |
107 | + db=DB() | |
108 | + id = request.json['token'] | |
109 | + payload = jwt.decode(id, "secret", algorithms=['HS256']) | |
110 | + db.db_delete_id(payload['id']) | |
111 | + return {'secession':'success'} | |
112 | + | |
113 | + | |
114 | + |
+++ database/database.py
... | ... | @@ -0,0 +1,93 @@ |
1 | +import psycopg2 # driver 임포트 | |
2 | +import time | |
3 | +from datetime import datetime, timedelta | |
4 | + | |
5 | + | |
6 | +class DB(): | |
7 | + def __init__(self): | |
8 | + self.conn=psycopg2.connect( | |
9 | + host='localhost', | |
10 | + dbname='postgres', | |
11 | + user='postgres', | |
12 | + password='ts4430!@', | |
13 | + port='5432' | |
14 | + ) # db에 접속 | |
15 | + self.conn.autocommit=True | |
16 | + | |
17 | + def db_check_id(self,id): | |
18 | + cur = self.conn.cursor() # 커서를 생성한다 | |
19 | + | |
20 | + cur.execute(f''' | |
21 | + SELECT user_id | |
22 | + FROM rds.user_id | |
23 | + Where user_id = '{id}'; | |
24 | + ''') | |
25 | + result=cur.fetchone() | |
26 | + cur.close() | |
27 | + | |
28 | + return result | |
29 | + | |
30 | + def db_login(self,id,pw): | |
31 | + cur = self.conn.cursor() # 커서를 생성한다 | |
32 | + | |
33 | + cur.execute(f''' | |
34 | + SELECT user_id, user_pw, user_email, user_sex, user_phone, user_time_stamp | |
35 | + FROM rds.user_id | |
36 | + Where user_id = '{id}' and user_pw='{pw}'; | |
37 | + ''') | |
38 | + result=cur.fetchone() | |
39 | + | |
40 | + | |
41 | + cur.close() | |
42 | + | |
43 | + return result | |
44 | + | |
45 | + def db_add_id(self,user_id,user_pw,user_email,user_sex,user_phone) : | |
46 | + cur = self.conn.cursor() # 커서를 생성한다 | |
47 | + now=time.localtime() | |
48 | + d=time.strftime('%Y-%m-%d %X', now) | |
49 | + cur.execute(f''' | |
50 | + insert into rds.user_id (user_id,user_pw,user_email,user_sex,user_phone,user_time_stamp) | |
51 | + values ('{user_id}','{user_pw}','{user_email}','{user_sex}','{user_phone}','{d}') | |
52 | + ''') | |
53 | + cur.close() | |
54 | + | |
55 | + def db_delete_id(self,user_id) : | |
56 | + cur = self.conn.cursor() # 커서를 생성한다 | |
57 | + cur.execute(f''' | |
58 | + delete | |
59 | + from rds.user_id ui | |
60 | + where user_id = '{user_id}' | |
61 | + ''') | |
62 | + cur.close() | |
63 | + | |
64 | + | |
65 | + | |
66 | + def db_add_action(self,action_id,lat,lon,user_id,action_success) : | |
67 | + cur = self.conn.cursor() # 커서를 생성한다 | |
68 | + now=datetime.now() | |
69 | + d=now.strftime('%Y-%m-%d %X') | |
70 | + cur.execute(f''' | |
71 | + insert into rds.action (action_id,lat,lon,action_time_stamp,user_id,action_success) | |
72 | + values ('{action_id}','{lat}','{lon}','{d}','{user_id}','{action_success}') | |
73 | + ''') | |
74 | + | |
75 | + | |
76 | + def db_display_action(self,timestamp) : | |
77 | + cur = self.conn.cursor() # 커서를 생성한다 | |
78 | + now=timestamp | |
79 | + d_plus=now +timedelta(hours=2) | |
80 | + d_plus=str("'"+d_plus.strftime('%Y-%m-%d %X')+"'") | |
81 | + d_minus=now -timedelta(hours=2) | |
82 | + d_minus=str("'"+d_minus.strftime('%Y-%m-%d %X')+"'") | |
83 | + cur.execute(f''' | |
84 | + select * from rds.pothole | |
85 | + where timestamp between {d_minus} and {d_plus}; | |
86 | + ''') | |
87 | + result=cur.fetchall() | |
88 | + return result | |
89 | + | |
90 | + | |
91 | + | |
92 | + | |
93 | + (No newline at end of file) |
+++ model/AttentiveRNN.py
... | ... | @@ -0,0 +1,246 @@ |
1 | +import torch | |
2 | +from torch import nn | |
3 | +from torch.nn import functional as F | |
4 | + | |
5 | +# nn.Sequential does not handle multiple input by design, and this is a workaround | |
6 | +# https://github.com/pytorch/pytorch/issues/19808# | |
7 | +class mySequential(nn.Sequential): | |
8 | + def forward(self, *input): | |
9 | + for module in self._modules.values(): | |
10 | + input = module(*input) | |
11 | + return input | |
12 | + | |
13 | + | |
14 | +class ResNetBlock(nn.Module): | |
15 | + def __init__(self, blocks=3, layers=1, input_ch=3, out_ch=32, kernel_size=None, stride=1, padding=1, groups=1, | |
16 | + dilation=1): | |
17 | + """ | |
18 | + :type kernel_size: iterator or int | |
19 | + """ | |
20 | + super(ResNetBlock, self).__init__() | |
21 | + if kernel_size is None: | |
22 | + kernel_size = [3, 3] | |
23 | + self.conv1 = nn.Conv2d( | |
24 | + input_ch, out_ch, | |
25 | + kernel_size=kernel_size, | |
26 | + stride=stride, | |
27 | + padding=padding, | |
28 | + groups=groups, | |
29 | + dilation=dilation | |
30 | + ) | |
31 | + self.conv2 = nn.Sequential( | |
32 | + nn.Conv2d( | |
33 | + out_ch, out_ch, | |
34 | + kernel_size=kernel_size, | |
35 | + stride=stride, | |
36 | + padding=padding, | |
37 | + groups=groups, | |
38 | + dilation=dilation | |
39 | + ), | |
40 | + nn.LeakyReLU() | |
41 | + ) | |
42 | + self.conv_hidden = nn.ModuleList() | |
43 | + for block in range(blocks): | |
44 | + for layer in range(layers): | |
45 | + self.conv_hidden.append( | |
46 | + self.conv2 | |
47 | + ) | |
48 | + self.blocks = blocks | |
49 | + self.layers = layers | |
50 | + | |
51 | + def forward(self, x): | |
52 | + x = self.conv1(x) | |
53 | + x = F.leaky_relu(x) | |
54 | + shortcut = x | |
55 | + for i, hidden_layer in enumerate(self.conv_hidden): | |
56 | + x = hidden_layer(x) | |
57 | + if (i % self.layers == 0) & (i != 0): | |
58 | + x = x + shortcut | |
59 | + return x | |
60 | + | |
61 | + | |
62 | +class ConvLSTM(nn.Module): | |
63 | + def __init__(self, ch, kernel_size=3): | |
64 | + super(ConvLSTM, self).__init__() | |
65 | + self.padding = (len(kernel_size)-1)/2 | |
66 | + self.conv_i = nn.Conv2d(in_channels=ch, out_channels=ch, kernel_size=kernel_size, stride=1, padding=1, | |
67 | + bias=False) | |
68 | + self.conv_f = nn.Conv2d(in_channels=ch, out_channels=ch, kernel_size=kernel_size, stride=1, padding=1, | |
69 | + bias=False) | |
70 | + self.conv_c = nn.Conv2d(in_channels=ch, out_channels=ch, kernel_size=kernel_size, stride=1, padding=1, | |
71 | + bias=False) | |
72 | + self.conv_o = nn.Conv2d(in_channels=ch, out_channels=ch, kernel_size=kernel_size, stride=1, padding=1, | |
73 | + bias=False) | |
74 | + self.conv_attention_map = nn.Conv2d(in_channels=ch, out_channels=1, kernel_size=kernel_size, stride=1, | |
75 | + padding=1, bias=False) | |
76 | + self.ch = ch | |
77 | + | |
78 | + def init_hidden(self, batch_size, image_size, init=0.0): | |
79 | + height, width = image_size | |
80 | + return torch.ones(batch_size, self.ch, height, width).to(dtype=self.conv_i.weight.dtype , device=self.conv_i.weight.device) * init | |
81 | + | |
82 | + def forward(self, input_tensor, input_cell_state=None): | |
83 | + if input_cell_state is None: | |
84 | + batch_size, _, height, width = input_tensor.size() | |
85 | + input_cell_state = self.init_hidden(batch_size, (height, width)) | |
86 | + | |
87 | + conv_i = self.conv_i(input_tensor) | |
88 | + sigmoid_i = torch.sigmoid(conv_i) | |
89 | + | |
90 | + conv_f = self.conv_f(input_tensor) | |
91 | + sigmoid_f = torch.sigmoid(conv_f) | |
92 | + | |
93 | + cell_state = sigmoid_f * input_cell_state + sigmoid_i * torch.tanh(self.conv_c(input_tensor)) | |
94 | + | |
95 | + conv_o = self.conv_o(input_tensor) | |
96 | + sigmoid_o = torch.sigmoid(conv_o) | |
97 | + | |
98 | + lstm_feats = sigmoid_o * torch.tanh(cell_state) | |
99 | + | |
100 | + attention_map = self.conv_attention_map(lstm_feats) | |
101 | + attention_map = torch.sigmoid(attention_map) | |
102 | + | |
103 | + ret = { | |
104 | + "attention_map" : attention_map, | |
105 | + "cell_state" : cell_state, | |
106 | + "lstm_feats" : lstm_feats | |
107 | + } | |
108 | + return ret | |
109 | + | |
110 | + | |
111 | +class AttentiveRNNBLCK(nn.Module): | |
112 | + def __init__(self, blocks=3, layers=1, input_ch=3, out_ch=32, kernel_size=None, stride=1, padding=1, groups=1, | |
113 | + dilation=1): | |
114 | + """ | |
115 | + :type kernel_size: iterator or int | |
116 | + """ | |
117 | + super(AttentiveRNNBLCK, self).__init__() | |
118 | + if kernel_size is None: | |
119 | + kernel_size = [3, 3] | |
120 | + self.blocks = blocks | |
121 | + self.layers = layers | |
122 | + self.input_ch = input_ch | |
123 | + self.out_ch = out_ch | |
124 | + self.kernel_size = kernel_size | |
125 | + self.stride = stride | |
126 | + self.padding = padding | |
127 | + self.groups = groups | |
128 | + self.dilation = dilation | |
129 | + self.sigmoid = nn.Sigmoid() | |
130 | + self.resnet = nn.Sequential( | |
131 | + ResNetBlock( | |
132 | + blocks=self.blocks, | |
133 | + layers=self.layers, | |
134 | + input_ch=self.input_ch, | |
135 | + out_ch=self.out_ch, | |
136 | + kernel_size=self.kernel_size, | |
137 | + stride=self.stride, | |
138 | + padding=self.padding, | |
139 | + groups=self.groups, | |
140 | + dilation=self.dilation | |
141 | + ) | |
142 | + ) | |
143 | + self.LSTM = mySequential( | |
144 | + ConvLSTM( | |
145 | + ch=out_ch, kernel_size=kernel_size, | |
146 | + ) | |
147 | + ) | |
148 | + | |
149 | + def forward(self, original_image, prev_cell_state=None): | |
150 | + x = self.resnet(original_image) | |
151 | + lstm_ret = self.LSTM(x, prev_cell_state) | |
152 | + attention_map = lstm_ret["attention_map"] | |
153 | + cell_state = lstm_ret['cell_state'] | |
154 | + lstm_feats = lstm_ret["lstm_feats"] | |
155 | + x = attention_map * original_image | |
156 | + ret = { | |
157 | + 'x' : x, | |
158 | + 'attention_map' : attention_map, | |
159 | + 'cell_state' : cell_state, | |
160 | + 'lstm_feats' : lstm_feats | |
161 | + } | |
162 | + return ret | |
163 | + | |
164 | + | |
165 | +class AttentiveRNN(nn.Module): | |
166 | + def __init__(self, repetition, blocks=3, layers=1, input_ch=3, out_ch=32, kernel_size=None, stride=1, padding=1, | |
167 | + groups=1, dilation=1): | |
168 | + """ | |
169 | + :type kernel_size: iterator or int | |
170 | + """ | |
171 | + super(AttentiveRNN, self).__init__() | |
172 | + if kernel_size is None: | |
173 | + kernel_size = [3, 3] | |
174 | + self.blocks = blocks | |
175 | + self.layers = layers | |
176 | + self.input_ch = input_ch | |
177 | + self.out_ch = out_ch | |
178 | + self.kernel_size = kernel_size | |
179 | + self.stride = stride | |
180 | + self.padding = padding | |
181 | + self.groups = groups | |
182 | + self.dilation = dilation | |
183 | + self.repetition = repetition | |
184 | + self.arnn_block = mySequential( | |
185 | + AttentiveRNNBLCK( | |
186 | + blocks=blocks, | |
187 | + layers=layers, | |
188 | + input_ch=input_ch, | |
189 | + out_ch=out_ch, | |
190 | + kernel_size=kernel_size, | |
191 | + stride=stride, | |
192 | + padding=padding, | |
193 | + groups=groups, | |
194 | + dilation=dilation | |
195 | + ) | |
196 | + ) | |
197 | + self.arnn_blocks = nn.ModuleList() | |
198 | + for repetition in range(repetition): | |
199 | + self.arnn_blocks.append( | |
200 | + self.arnn_block | |
201 | + ) | |
202 | + self.name = "AttentiveRNN" | |
203 | + | |
204 | + def forward(self, input_img): | |
205 | + cell_state = None | |
206 | + attention_map = [] | |
207 | + lstm_feats = [] | |
208 | + x = input_img | |
209 | + for arnn_block in self.arnn_blocks: | |
210 | + arnn_block_return = arnn_block(x, cell_state) | |
211 | + attention_map_i = arnn_block_return['attention_map'] | |
212 | + lstm_feats_i = arnn_block_return['lstm_feats'] | |
213 | + cell_state = arnn_block_return['cell_state'] | |
214 | + x = arnn_block_return['x'] | |
215 | + | |
216 | + attention_map.append(attention_map_i) | |
217 | + lstm_feats.append(lstm_feats_i) | |
218 | + ret = { | |
219 | + 'x' : x, | |
220 | + 'attention_map_list' : attention_map, | |
221 | + 'lstm_feats' : lstm_feats | |
222 | + } | |
223 | + return ret | |
224 | + | |
225 | + # | |
226 | + def loss(self, input_image_tensor, difference_maskmap, theta=0.8): | |
227 | + self.theta = theta | |
228 | + # Initialize attentive rnn model | |
229 | + inference_ret = self.forward(input_image_tensor) | |
230 | + loss = 0.0 | |
231 | + n = len(inference_ret['attention_map_list']) | |
232 | + for index, attention_map in enumerate(inference_ret['attention_map_list']): | |
233 | + mse_loss = (self.theta ** (n - index + 1)) * nn.MSELoss()(attention_map, difference_maskmap) | |
234 | + loss += mse_loss | |
235 | + return loss | |
236 | + | |
237 | +# Need work | |
238 | + | |
239 | +if __name__ == "__main__": | |
240 | + from torchinfo import summary | |
241 | + | |
242 | + torch.set_default_tensor_type(torch.FloatTensor) | |
243 | + | |
244 | + generator = AttentiveRNN(3, blocks=2) | |
245 | + batch_size = 5 | |
246 | + summary(generator, input_size=(batch_size, 3, 960,540)) |
+++ model/Classifier.py
... | ... | @@ -0,0 +1,106 @@ |
1 | +from torch import nn | |
2 | + | |
3 | +class Conv3by3(nn.Module): | |
4 | + def __init__(self, in_ch, out_ch): | |
5 | + super(Conv3by3, self).__init__() | |
6 | + self.conv3by3 = nn.Sequential( | |
7 | + nn.Conv2d(in_channels=in_ch, out_channels=out_ch, kernel_size=3, padding=1), | |
8 | + nn.ReLU() | |
9 | + ) | |
10 | + | |
11 | + def forward(self, x): | |
12 | + return self.conv3by3(x) | |
13 | + | |
14 | +class Resnet(nn.Module): | |
15 | + def __init__(self, classes=2, in_ch=3): | |
16 | + super(Resnet, self).__init__() | |
17 | + self.firstconv = nn.Sequential( | |
18 | + nn.Conv2d(in_channels=in_ch, out_channels=64, kernel_size=7), | |
19 | + nn.ReLU(), | |
20 | + nn.AvgPool2d(kernel_size=2, stride=2) | |
21 | + ) | |
22 | + self.block1_1 = nn.Sequential( | |
23 | + Conv3by3(64, 64), | |
24 | + Conv3by3(64, 64), | |
25 | + ) | |
26 | + self.block1_2 = nn.Sequential( | |
27 | + Conv3by3(64, 64), | |
28 | + Conv3by3(64, 64), | |
29 | + ) | |
30 | + self.block1_3 = nn.Sequential( | |
31 | + Conv3by3(64, 64), | |
32 | + Conv3by3(64, 64), | |
33 | + ) | |
34 | + | |
35 | + self.blockshort_1to2 = nn.Sequential( | |
36 | + nn.AvgPool2d(kernel_size=2,stride=2) | |
37 | + ) | |
38 | + | |
39 | + self.block2_1 = nn.Sequential( | |
40 | + Conv3by3(64, 128), | |
41 | + Conv3by3(128, 128), | |
42 | + ) | |
43 | + self.block2_2 = nn.Sequential( | |
44 | + Conv3by3(128, 128), | |
45 | + Conv3by3(128, 128), | |
46 | + ) | |
47 | + self.block2_3 = nn.Sequential( | |
48 | + Conv3by3(128, 128), | |
49 | + Conv3by3(128, 128), | |
50 | + ) | |
51 | + | |
52 | + self.blockshort_2to3 = nn.Sequential( | |
53 | + nn.AvgPool2d(kernel_size=2, stride=2), | |
54 | + nn.Conv2d(in_channels=256, out_channels=256, kernel_size=1, stride=1) | |
55 | + ) | |
56 | + | |
57 | + self.block3_1 = nn.Sequential( | |
58 | + Conv3by3(128, 256), | |
59 | + Conv3by3(256, 256), | |
60 | + ) | |
61 | + self.block3_2 = nn.Sequential( | |
62 | + Conv3by3(256, 256), | |
63 | + Conv3by3(256, 256), | |
64 | + ) | |
65 | + self.block3_3 = nn.Sequential( | |
66 | + Conv3by3(256, 256), | |
67 | + Conv3by3(256, 256), | |
68 | + ) | |
69 | + | |
70 | + self.global_pool = nn.AdaptiveAvgPool2d((1, 1)) | |
71 | + self.fc = nn.Linear(256, classes) # assuming 10 classes for the classification | |
72 | + | |
73 | + def forward(self, x): | |
74 | + x = self.firstconv(x) | |
75 | + | |
76 | + identity = x | |
77 | + out = self.block1_1(x) | |
78 | + out = out + identity | |
79 | + out = self.block1_2(out) | |
80 | + out = out + identity | |
81 | + out = self.block1_3(out) | |
82 | + out = out + identity | |
83 | + | |
84 | + | |
85 | + out = self.block2_1(out) | |
86 | + out = self.blockshort_1to2(out) | |
87 | + identity = out | |
88 | + out = self.block2_2(out) | |
89 | + out = out + identity | |
90 | + out = self.block2_3(out) | |
91 | + out = out + identity | |
92 | + | |
93 | + | |
94 | + out = self.block3_1(out) | |
95 | + out = self.blockshort_2to3(out) | |
96 | + identity = out | |
97 | + out = self.block3_2(out) | |
98 | + out = out + identity | |
99 | + out = self.block3_3(out) | |
100 | + out = out + identity | |
101 | + | |
102 | + out = self.global_pool(out) | |
103 | + out = out.view(out.size(0), -1) | |
104 | + out = self.fc(out) | |
105 | + | |
106 | + return out(No newline at end of file) |
+++ subfuction/generate.py
... | ... | @@ -0,0 +1,55 @@ |
1 | +from haversine import haversine | |
2 | +import networkx as nx | |
3 | +import geojson | |
4 | + | |
5 | + | |
6 | +with open("D:/takensoft/project2/data/기타 가공/데이터/osm.geojson",encoding='utf-8') as f: | |
7 | + gj = geojson.load(f) | |
8 | + | |
9 | +def swith_xy(tuples): | |
10 | + x,y=tuples | |
11 | + return (y,x) | |
12 | + | |
13 | +G = nx.Graph () | |
14 | + | |
15 | +total_data_num= gj['features'] | |
16 | +for j in range(len(total_data_num)): | |
17 | + features = gj['features'][j] | |
18 | + lines=features['geometry']['coordinates'][0] | |
19 | + print(j) | |
20 | + | |
21 | + for i in range(len(lines)-1): | |
22 | + G.add_edge(swith_xy(lines[i]),swith_xy(lines[i+1]),flcass=features['properties']['fclass'],oneway=features['properties']['oneway']) | |
23 | + | |
24 | +sg = (G.subgraph(c) for c in nx.connected_components(G)) #가져올 수 없는 패키지가 있는 경우 | |
25 | +sg = list(sg)[0] | |
26 | + | |
27 | +for n0, n1 in G.edges (): | |
28 | + dist = haversine(n0, n1,unit='m') | |
29 | + G.edges [n0,n1][" dist "] = dist | |
30 | + | |
31 | +df=nx.to_pandas_edgelist(G) | |
32 | + | |
33 | +li_source=list(df['source']) | |
34 | +li_source_x= [] | |
35 | +li_source_y=[] | |
36 | + | |
37 | +for i in li_source: | |
38 | + li_source_x.append(str(i[0])) | |
39 | + li_source_y.append(str(i[1])) | |
40 | +df['source_x']=li_source_x | |
41 | +df['source_y']=li_source_y | |
42 | + | |
43 | +li_target=list(df['target']) | |
44 | +li_target_x= [] | |
45 | +li_target_y=[] | |
46 | + | |
47 | +for i in li_target: | |
48 | + li_target_x.append(str(i[0])) | |
49 | + li_target_y.append(str(i[1])) | |
50 | +df['target_x']=li_target_x | |
51 | +df['target_y']=li_target_y | |
52 | +df=df.drop(['source','target'],axis=1) | |
53 | +df=df.reset_index() | |
54 | +df.to_csv('node.csv',encoding='euc-kr') | |
55 | + |
+++ subfuction/image_crop.py
... | ... | @@ -0,0 +1,37 @@ |
1 | +import cv2 | |
2 | +import os | |
3 | +import glob | |
4 | +from joblib import Parallel, delayed | |
5 | + | |
6 | +def crop_image(image_path, crop_size, start_point): | |
7 | + if image_path.endswith(".jpg") or image_path.endswith(".png"): | |
8 | + image = cv2.imread(image_path) | |
9 | + height, width = image.shape[:2] | |
10 | + | |
11 | + if width > start_point[0] + crop_size[0] and height > start_point[1] + crop_size[1]: | |
12 | + cropped_image = image[start_point[1]:start_point[1]+crop_size[1], start_point[0]:start_point[0]+crop_size[0]] | |
13 | + return cropped_image | |
14 | + else: | |
15 | + print(f"Image {os.path.basename(image_path)} is too small to be cropped with the current settings.") | |
16 | + return False | |
17 | + | |
18 | +def crop_images_parallel(image_paths, output_directory, crop_size, start_point): | |
19 | + if not os.path.exists(output_directory): | |
20 | + os.makedirs(output_directory) | |
21 | + | |
22 | + # run the cropping function in parallel | |
23 | + Parallel(n_jobs=-1)(delayed(crop_image)(image_path, output_directory, crop_size, start_point) for image_path in image_paths) | |
24 | + | |
25 | + | |
26 | +output_directory = "/home/takensoft/Pictures/test512_512/rainy/" | |
27 | + | |
28 | +# get all image paths in the directory | |
29 | +# image_paths = glob.glob("/home/takensoft/Pictures/화창한날, 비오는날 프레임2000장/화창한날 프레임 추출/하드디스크 화창한날(17개)/**/*.png") | |
30 | +# image_paths += glob.glob("/home/takensoft/Pictures/화창한날, 비오는날 프레임2000장/화창한날 프레임 추출/7월19일 화창한날(8개)/**/*.png") | |
31 | +image_paths = glob.glob("/home/takensoft/Pictures/화창한날, 비오는날 프레임2000장/비오는날 프레임 추출/7월11일 폭우(3개)/**/*.png") | |
32 | +image_paths += glob.glob("/home/takensoft/Pictures/폭우 빗방울 (475개)/*.png") | |
33 | + | |
34 | +crop_size = (512, 512) # width and height you want for your cropped images | |
35 | +start_point = (750, 450) # upper left point where the crop should start | |
36 | + | |
37 | +crop_images_parallel(image_paths, output_directory, crop_size, start_point) |
+++ subfuction/save_pickle.py
... | ... | @@ -0,0 +1,26 @@ |
1 | +from database.database import DB | |
2 | +import pandas as pd | |
3 | + | |
4 | +import networkx as nx | |
5 | +from itertools import tee | |
6 | + | |
7 | +def pairwise( iterable ): | |
8 | + """Returns an iterable access binary tuple | |
9 | + s -> (s0,s1), (s1,s2), (s2, s3), ...""" | |
10 | + a, b = tee( iterable ) | |
11 | + next(b, None) | |
12 | + return zip(a, b) | |
13 | + | |
14 | +def swith_xy(tuples): | |
15 | + x,y=tuples | |
16 | + return (y,x) | |
17 | + | |
18 | + | |
19 | +db=DB() | |
20 | +df=pd.DataFrame(db.db_get_node()) | |
21 | +df.columns=['index','source_x','source_y','target_x','target_y','distance'] | |
22 | +G=nx.Graph() | |
23 | +for j in range(len(df)): | |
24 | + G.add_edge((df['source_x'][j],df['source_y'][j]),(df['target_x'][j],df['target_y'][j]),length=df['distance'][j]) | |
25 | +nx.write_gpickle(G,'OSM_gpickle.gpickle') | |
26 | + |
+++ weights/ARNN_trained_weight_6_3_2.pt
Binary file is not shown |
+++ weights/Classifier_512.pt
Binary file is not shown |
Add a comment
Delete comment
Once you delete this comment, you won't be able to recover it. Are you sure you want to delete this comment?