Skip to content

Commit aade91c

Browse files
zero the gradients after updating weights
Manually zero the gradients after updating weights by using machine epsilon for standard float (64-bit)
1 parent fee83dd commit aade91c

File tree

1 file changed

+7
-5
lines changed

1 file changed

+7
-5
lines changed

beginner_source/examples_autograd/polynomial_custom_function.py

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,7 @@ def backward(ctx, grad_output):
7474
d = torch.full((), 0.3, device=device, dtype=dtype, requires_grad=True)
7575

7676
learning_rate = 5e-6
77-
for t in range(2000):
77+
for t in range(int(1/(learning_rate))):
7878
# To apply our Function, we use Function.apply method. We alias this as 'P3'.
7979
P3 = LegendrePolynomial3.apply
8080

@@ -98,9 +98,11 @@ def backward(ctx, grad_output):
9898
d -= learning_rate * d.grad
9999

100100
# Manually zero the gradients after updating weights
101-
a.grad = None
102-
b.grad = None
103-
c.grad = None
104-
d.grad = None
101+
# by using machine epsilon for standard float (64-bit)
102+
import sys
103+
a.grad = loss*sys.float_info.epsilon
104+
b.grad = loss*sys.float_info.epsilon
105+
c.grad = loss*sys.float_info.epsilon
106+
d.grad = loss*sys.float_info.epsilon
105107

106108
print(f'Result: y = {a.item()} + {b.item()} * P3({c.item()} + {d.item()} x)')

0 commit comments

Comments
 (0)