윤영준 윤영준 2023-06-27
Added comment
@a3a1a70292a295add9a1d0de6f2ccc39111dc3ad
model/Autoencoder.py
--- model/Autoencoder.py
+++ model/Autoencoder.py
@@ -39,8 +39,12 @@
         self.skip_output3 = nn.Conv2d(in_channels=32, out_channels=3, kernel_size=3, stride=1, padding=1, bias=False)
 
         # Loss specific definitions
-        # the paper uses vgg16 for features extraction,
-        # however, since vgg16 is not a light model, we may consider it to be replaced
+        # TODO
+        # the paper uses vgg16 for features extraction, however, since vgg16 is not a light
+        # model, we may consider it to be replaced Loss function for Autoencoder, also we may have to use other
+        # pretrained network for feature extractor for the loss function or even not using feature extractor.
+        # I honestly do not think using neural network for VGG is strictly necessary, and may have to be replaced with
+        # other image preprocessing like MSCN which was implemented before.
         self.vgg = vgg16(pretrained=True).features
         self.vgg.eval()
         for param in self.vgg.parameters():
Add a comment
List