--- action.py
+++ action.py
... | ... | @@ -33,8 +33,6 @@ |
33 | 33 |
}, 200 |
34 | 34 |
|
35 | 35 |
|
36 |
- |
|
37 |
- |
|
38 | 36 |
@Action.route('/image_anal') |
39 | 37 |
class fileUpload(Resource): |
40 | 38 |
@Action.doc(responses={200: 'Success'}) |
... | ... | @@ -48,9 +46,9 @@ |
48 | 46 |
crop_size = (512, 512) |
49 | 47 |
start_point = (750, 450) |
50 | 48 |
tf_toTensor = ToTensor() |
51 |
- clasifier = Classifier() |
|
52 |
- clasifier.load_state_dict(torch.load("weights/Classifier_512.pt")) |
|
53 |
- clasifier.to(device=device) |
|
49 |
+ classifier = Classifier() |
|
50 |
+ classifier.load_state_dict(torch.load("weights/Classifier_512.pt")) |
|
51 |
+ classifier.to(device=device) |
|
54 | 52 |
dir = os.getcwd() |
55 | 53 |
lat = float(request.json['gps_x']) |
56 | 54 |
lon = float(request.json['gps_y']) |
... | ... | @@ -61,16 +59,18 @@ |
61 | 59 |
if not image: |
62 | 60 |
return { |
63 | 61 |
'node': (lat, lon), |
64 |
- 'rain': 'rain', |
|
62 |
+ 'rain': None, |
|
65 | 63 |
}, 500 |
66 | 64 |
image_tensor = tf_toTensor(image) |
67 |
- image_tensor.to(device) |
|
68 |
- image_arnn = AttentiveRNN(image_tensor) |
|
69 |
- result = Classifier(image_arnn) |
|
65 |
+ image_tensor = image_tensor.unsqueeze(0) |
|
66 |
+ image_tensor = image_tensor.to(device) |
|
67 |
+ image_arnn = arnn(image_tensor) |
|
68 |
+ result = classifier(image_arnn['x']) |
|
70 | 69 |
result = result.to("cpu") |
71 |
- if result == 0: |
|
70 |
+ _, predicted = torch.max(result.data, 1) |
|
71 |
+ if predicted == 0: |
|
72 | 72 |
rain = False |
73 |
- else: # elif result == 1 |
|
73 |
+ else: # elif result == 1 |
|
74 | 74 |
rain = True |
75 | 75 |
user_id = 'test' |
76 | 76 |
action_success = True |
--- auth.py
+++ auth.py
... | ... | @@ -6,9 +6,6 @@ |
6 | 6 |
import jwt |
7 | 7 |
|
8 | 8 |
|
9 |
- |
|
10 |
- |
|
11 |
- |
|
12 | 9 |
users = {} |
13 | 10 |
|
14 | 11 |
Auth = Namespace( |
... | ... | @@ -31,7 +28,6 @@ |
31 | 28 |
'password': fields.String(description='Password', required=True),'email': fields.String(description='email', required=True),'user_sex': fields.String(description='sex', required=True),'phone': fields.String(description='phone', required=True) |
32 | 29 |
|
33 | 30 |
}) |
34 |
- |
|
35 | 31 |
|
36 | 32 |
|
37 | 33 |
@Auth.route('/id') |
--- model/Classifier.py
+++ model/Classifier.py
... | ... | @@ -68,7 +68,7 @@ |
68 | 68 |
) |
69 | 69 |
|
70 | 70 |
self.global_pool = nn.AdaptiveAvgPool2d((1, 1)) |
71 |
- self.fc = nn.Linear(256, classes) # assuming 10 classes for the classification |
|
71 |
+ self.fc = nn.Linear(256, classes) |
|
72 | 72 |
|
73 | 73 |
def forward(self, x): |
74 | 74 |
x = self.firstconv(x) |
+++ requirements.txt
... | ... | @@ -0,0 +1,11 @@ |
1 | +torch~=2.0.1+cu118 | |
2 | +flask~=2.3.3 | |
3 | +jwt~=1.3.1 | |
4 | +werkzeug~=2.3.7 | |
5 | +torchvision~=0.15.2+cu118 | |
6 | +networkx~=3.0 | |
7 | +geojson~=3.0.1 | |
8 | +haversine~=2.8.0 | |
9 | +opencv-python~=4.8.0.76 | |
10 | +joblib~=1.3.2 | |
11 | +pandas~=2.0.3(파일 끝에 줄바꿈 문자 없음) |
--- subfuction/image_crop.py
+++ subfuction/image_crop.py
... | ... | @@ -22,16 +22,16 @@ |
22 | 22 |
# run the cropping function in parallel |
23 | 23 |
Parallel(n_jobs=-1)(delayed(crop_image)(image_path, output_directory, crop_size, start_point) for image_path in image_paths) |
24 | 24 |
|
25 |
+if __name__ == "__main__": |
|
26 |
+ output_directory = "/home/takensoft/Pictures/test512_512/rainy/" |
|
25 | 27 |
|
26 |
-output_directory = "/home/takensoft/Pictures/test512_512/rainy/" |
|
28 |
+ # get all image paths in the directory |
|
29 |
+ # image_paths = glob.glob("/home/takensoft/Pictures/화창한날, 비오는날 프레임2000장/화창한날 프레임 추출/하드디스크 화창한날(17개)/**/*.png") |
|
30 |
+ # image_paths += glob.glob("/home/takensoft/Pictures/화창한날, 비오는날 프레임2000장/화창한날 프레임 추출/7월19일 화창한날(8개)/**/*.png") |
|
31 |
+ image_paths = glob.glob("/home/takensoft/Pictures/화창한날, 비오는날 프레임2000장/비오는날 프레임 추출/7월11일 폭우(3개)/**/*.png") |
|
32 |
+ image_paths += glob.glob("/home/takensoft/Pictures/폭우 빗방울 (475개)/*.png") |
|
27 | 33 |
|
28 |
-# get all image paths in the directory |
|
29 |
-# image_paths = glob.glob("/home/takensoft/Pictures/화창한날, 비오는날 프레임2000장/화창한날 프레임 추출/하드디스크 화창한날(17개)/**/*.png") |
|
30 |
-# image_paths += glob.glob("/home/takensoft/Pictures/화창한날, 비오는날 프레임2000장/화창한날 프레임 추출/7월19일 화창한날(8개)/**/*.png") |
|
31 |
-image_paths = glob.glob("/home/takensoft/Pictures/화창한날, 비오는날 프레임2000장/비오는날 프레임 추출/7월11일 폭우(3개)/**/*.png") |
|
32 |
-image_paths += glob.glob("/home/takensoft/Pictures/폭우 빗방울 (475개)/*.png") |
|
34 |
+ crop_size = (512, 512) # width and height you want for your cropped images |
|
35 |
+ start_point = (750, 450) # upper left point where the crop should start |
|
33 | 36 |
|
34 |
-crop_size = (512, 512) # width and height you want for your cropped images |
|
35 |
-start_point = (750, 450) # upper left point where the crop should start |
|
36 |
- |
|
37 |
-crop_images_parallel(image_paths, output_directory, crop_size, start_point) |
|
37 |
+ crop_images_parallel(image_paths, output_directory, crop_size, start_point) |
Add a comment
Delete comment
Once you delete this comment, you won't be able to recover it. Are you sure you want to delete this comment?