Skip to content

Commit 3f53d68

Browse files
update pretrain progress bar tutorial
1 parent 058742e commit 3f53d68

File tree

2 files changed

+9
-26
lines changed

2 files changed

+9
-26
lines changed

ML/Pytorch/Basics/pytorch_pretrain_finetune.py

Lines changed: 5 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -3,11 +3,9 @@
33
and modifies this to train on the CIFAR10 dataset. The same method generalizes
44
well to other datasets, but the modifications to the network may need to be changed.
55
6-
Video explanation: https://youtu.be/U4bHxEhMGNk
7-
Got any questions leave a comment on youtube :)
8-
96
Programmed by Aladdin Persson <aladdin.persson at hotmail dot com>
107
* 2020-04-08 Initial coding
8+
* 2022-12-19 Updated comments, minor code changes, made sure it works with latest PyTorch
119
1210
"""
1311

@@ -22,8 +20,8 @@
2220
) # Gives easier dataset managment and creates mini batches
2321
import torchvision.datasets as datasets # Has standard datasets we can import in a nice way
2422
import torchvision.transforms as transforms # Transformations we can perform on our dataset
23+
from tqdm import tqdm
2524

26-
# Set device
2725
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
2826

2927
# Hyperparameters
@@ -32,25 +30,16 @@
3230
batch_size = 1024
3331
num_epochs = 5
3432

35-
# Simple Identity class that let's input pass without changes
36-
class Identity(nn.Module):
37-
def __init__(self):
38-
super(Identity, self).__init__()
39-
40-
def forward(self, x):
41-
return x
42-
43-
4433
# Load pretrain model & modify it
45-
model = torchvision.models.vgg16(pretrained=True)
34+
model = torchvision.models.vgg16(weights="DEFAULT")
4635

4736
# If you want to do finetuning then set requires_grad = False
4837
# Remove these two lines if you want to train entire model,
4938
# and only want to load the pretrain weights.
5039
for param in model.parameters():
5140
param.requires_grad = False
5241

53-
model.avgpool = Identity()
42+
model.avgpool = nn.Identity()
5443
model.classifier = nn.Sequential(
5544
nn.Linear(512, 100), nn.ReLU(), nn.Linear(100, num_classes)
5645
)
@@ -71,7 +60,7 @@ def forward(self, x):
7160
for epoch in range(num_epochs):
7261
losses = []
7362

74-
for batch_idx, (data, targets) in enumerate(train_loader):
63+
for batch_idx, (data, targets) in enumerate(tqdm(train_loader)):
7564
# Get data to cuda if possible
7665
data = data.to(device=device)
7766
targets = targets.to(device=device)

ML/Pytorch/Basics/pytorch_progress_bar.py

Lines changed: 4 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -3,22 +3,20 @@
33
from tqdm import tqdm
44
from torch.utils.data import TensorDataset, DataLoader
55

6-
# Create a simple toy dataset example, normally this
7-
# would be doing custom class with __getitem__ etc,
8-
# which we have done in custom dataset tutorials
6+
# Create a simple toy dataset
97
x = torch.randn((1000, 3, 224, 224))
108
y = torch.randint(low=0, high=10, size=(1000, 1))
119
ds = TensorDataset(x, y)
1210
loader = DataLoader(ds, batch_size=8)
1311

1412

1513
model = nn.Sequential(
16-
nn.Conv2d(3, 10, kernel_size=3, padding=1, stride=1),
14+
nn.Conv2d(in_channels=3, out_channels=10, kernel_size=3, padding=1, stride=1),
1715
nn.Flatten(),
18-
nn.Linear(10*224*224, 10),
16+
nn.Linear(10 * 224 * 224, 10),
1917
)
2018

21-
NUM_EPOCHS = 100
19+
NUM_EPOCHS = 10
2220
for epoch in range(NUM_EPOCHS):
2321
loop = tqdm(loader)
2422
for idx, (x, y) in enumerate(loop):
@@ -35,7 +33,3 @@
3533
loop.set_postfix(loss=torch.rand(1).item(), acc=torch.rand(1).item())
3634

3735
# There you go. Hope it was useful :)
38-
39-
40-
41-

0 commit comments

Comments
 (0)