Skip to content

Commit cc0df99

Browse files
reran and refined old tutorials
1 parent 088bdb6 commit cc0df99

File tree

9 files changed

+52
-3124
lines changed

9 files changed

+52
-3124
lines changed

.gitignore

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,6 @@
11
.idea/
22
ML/Pytorch/more_advanced/image_captioning/flickr8k/
33
ML/algorithms/svm/__pycache__/utils.cpython-38.pyc
4-
__pycache__/
4+
__pycache__/
5+
*.pth.tar
6+
*.DS_STORE

ML/Pytorch/Basics/custom_dataset/custom_FCNN.py

Lines changed: 0 additions & 131 deletions
This file was deleted.

ML/Pytorch/Basics/custom_dataset/custom_dataset.py

Lines changed: 13 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
77
Programmed by Aladdin Persson <aladdin.persson at hotmail dot com>
88
* 2020-04-03 Initial coding
9-
9+
* 2022-12-19 Updated with better comments, improved code using PIL, and checked code still functions as intended.
1010
"""
1111

1212
# Imports
@@ -17,7 +17,7 @@
1717
import torchvision
1818
import os
1919
import pandas as pd
20-
from skimage import io
20+
from PIL import Image
2121
from torch.utils.data import (
2222
Dataset,
2323
DataLoader,
@@ -35,7 +35,7 @@ def __len__(self):
3535

3636
def __getitem__(self, index):
3737
img_path = os.path.join(self.root_dir, self.annotations.iloc[index, 0])
38-
image = io.imread(img_path)
38+
image = Image.open(img_path)
3939
y_label = torch.tensor(int(self.annotations.iloc[index, 1]))
4040

4141
if self.transform:
@@ -50,7 +50,7 @@ def __getitem__(self, index):
5050
# Hyperparameters
5151
in_channel = 3
5252
num_classes = 2
53-
learning_rate = 1e-3
53+
learning_rate = 3e-4
5454
batch_size = 32
5555
num_epochs = 10
5656

@@ -69,12 +69,19 @@ def __getitem__(self, index):
6969
test_loader = DataLoader(dataset=test_set, batch_size=batch_size, shuffle=True)
7070

7171
# Model
72-
model = torchvision.models.googlenet(pretrained=True)
72+
model = torchvision.models.googlenet(weights="DEFAULT")
73+
74+
# freeze all layers, change final linear layer with num_classes
75+
for param in model.parameters():
76+
param.requires_grad = False
77+
78+
# final layer is not frozen
79+
model.fc = nn.Linear(in_features=1024, out_features=num_classes)
7380
model.to(device)
7481

7582
# Loss and optimizer
7683
criterion = nn.CrossEntropyLoss()
77-
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
84+
optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=1e-5)
7885

7986
# Train Network
8087
for epoch in range(num_epochs):

0 commit comments

Comments
 (0)