
--- model/Autoencoder.py
+++ model/Autoencoder.py
... | ... | @@ -39,8 +39,12 @@ |
39 | 39 |
self.skip_output3 = nn.Conv2d(in_channels=32, out_channels=3, kernel_size=3, stride=1, padding=1, bias=False) |
40 | 40 |
|
41 | 41 |
# Loss specific definitions |
42 |
- # the paper uses vgg16 for features extraction, |
|
43 |
- # however, since vgg16 is not a light model, we may consider it to be replaced |
|
42 |
+ # TODO |
|
43 |
+ # the paper uses vgg16 for features extraction, however, since vgg16 is not a light |
|
44 |
+ # model, we may consider it to be replaced Loss function for Autoencoder, also we may have to use other |
|
45 |
+ # pretrained network for feature extractor for the loss function or even not using feature extractor. |
|
46 |
+ # I honestly do not think using neural network for VGG is strictly necessary, and may have to be replaced with |
|
47 |
+ # other image preprocessing like MSCN which was implemented before. |
|
44 | 48 |
self.vgg = vgg16(pretrained=True).features |
45 | 49 |
self.vgg.eval() |
46 | 50 |
for param in self.vgg.parameters(): |
Add a comment
Delete comment
Once you delete this comment, you won't be able to recover it. Are you sure you want to delete this comment?