44
44
######################################################################
45
45
# Steps
46
46
# -----
47
- #
47
+ #
48
48
# Steps 1 through 4 set up our data and neural network for training. The
49
49
# process of zeroing out the gradients happens in step 5. If you already
50
50
# have your data and neural network built, skip to 5.
51
- #
51
+ #
52
52
# 1. Import all necessary libraries for loading our data
53
53
# 2. Load and normalize the dataset
54
54
# 3. Build the neural network
55
55
# 4. Define the loss function
56
56
# 5. Zero the gradients while training the network
57
- #
57
+ #
58
58
# 1. Import necessary libraries for loading our data
59
59
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
60
- #
60
+ #
61
61
# For this recipe, we will just be using ``torch`` and ``torchvision`` to
62
62
# access the dataset.
63
- #
63
+ #
64
64
65
65
import torch
66
66
76
76
######################################################################
77
77
# 2. Load and normalize the dataset
78
78
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
79
- #
79
+ #
80
80
# PyTorch features various built-in datasets (see the Loading Data recipe
81
81
# for more information).
82
- #
82
+ #
83
83
84
84
transform = transforms .Compose (
85
85
[transforms .ToTensor (),
102
102
######################################################################
103
103
# 3. Build the neural network
104
104
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
105
- #
105
+ #
106
106
# We will use a convolutional neural network. To learn more see the
107
107
# Defining a Neural Network recipe.
108
- #
108
+ #
109
109
110
110
class Net (nn .Module ):
111
111
def __init__ (self ):
@@ -130,9 +130,9 @@ def forward(self, x):
130
130
######################################################################
131
131
# 4. Define a Loss function and optimizer
132
132
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
133
- #
133
+ #
134
134
# Let’s use a Classification Cross-Entropy loss and SGD with momentum.
135
- #
135
+ #
136
136
137
137
net = Net ()
138
138
criterion = nn .CrossEntropyLoss ()
@@ -142,14 +142,14 @@ def forward(self, x):
142
142
######################################################################
143
143
# 5. Zero the gradients while training the network
144
144
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
145
- #
145
+ #
146
146
# This is when things start to get interesting. We simply have to loop
147
147
# over our data iterator, and feed the inputs to the network and optimize.
148
- #
148
+ #
149
149
# Notice that for each entity of data, we zero out the gradients. This is
150
150
# to ensure that we aren’t tracking any unnecessary information when we
151
151
# train our neural network.
152
- #
152
+ #
153
153
154
154
for epoch in range (2 ): # loop over the dataset multiple times
155
155
@@ -181,13 +181,13 @@ def forward(self, x):
181
181
# You can also use ``model.zero_grad()``. This is the same as using
182
182
# ``optimizer.zero_grad()`` as long as all your model parameters are in
183
183
# that optimizer. Use your best judgment to decide which one to use.
184
- #
184
+ #
185
185
# Congratulations! You have successfully zeroed out gradients PyTorch.
186
- #
186
+ #
187
187
# Learn More
188
188
# ----------
189
- #
189
+ #
190
190
# Take a look at these other recipes to continue your learning:
191
- #
192
- # - `Loading data in PyTorch <https://pytorch.org/tutorials/recipes/recipes/loading_data_recipe .html>`__
191
+ #
192
+ # - `Loading data in PyTorch <https://pytorch.org/tutorials/beginner/basics/data_tutorial .html>`__
193
193
# - `Saving and loading models across devices in PyTorch <https://pytorch.org/tutorials/recipes/recipes/save_load_across_devices.html>`__
0 commit comments