From 12d2470931902eca6d5acdd04efec174c27bef19 Mon Sep 17 00:00:00 2001 From: tgilbert09 <24855209+tgilbert09@users.noreply.github.com> Date: Sun, 25 Aug 2024 13:05:25 +0100 Subject: [PATCH] Change order of optimizer.step() in documentation Commit be6e86342233bdc932deace52d7388073fbb66b4 changed the order of optimization steps in the "# Backpropagation" section of the code (see lines 161 to 163). This commit updates the order of optimization steps in the description/comments to match the code. --- beginner_source/basics/optimization_tutorial.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beginner_source/basics/optimization_tutorial.py b/beginner_source/basics/optimization_tutorial.py index c6c327f8511..b5918dc09a1 100644 --- a/beginner_source/basics/optimization_tutorial.py +++ b/beginner_source/basics/optimization_tutorial.py @@ -134,9 +134,9 @@ def forward(self, x): ##################################### # Inside the training loop, optimization happens in three steps: -# * Call ``optimizer.zero_grad()`` to reset the gradients of model parameters. Gradients by default add up; to prevent double-counting, we explicitly zero them at each iteration. # * Backpropagate the prediction loss with a call to ``loss.backward()``. PyTorch deposits the gradients of the loss w.r.t. each parameter. # * Once we have our gradients, we call ``optimizer.step()`` to adjust the parameters by the gradients collected in the backward pass. +# * Call ``optimizer.zero_grad()`` to reset the gradients of model parameters. Gradients by default add up; to prevent double-counting, we explicitly zero them at each iteration. ########################################