diff --git a/beginner_source/examples_autograd/polynomial_autograd.py b/beginner_source/examples_autograd/polynomial_autograd.py index 525d0c33ce9..d33ca8bcb90 100755 --- a/beginner_source/examples_autograd/polynomial_autograd.py +++ b/beginner_source/examples_autograd/polynomial_autograd.py @@ -1,4 +1,4 @@ -""" +r""" PyTorch: Tensors and autograd ------------------------------- @@ -27,8 +27,8 @@ # Create Tensors to hold input and outputs. # By default, requires_grad=False, which indicates that we do not need to # compute gradients with respect to these Tensors during the backward pass. -x = torch.linspace(-math.pi, math.pi, 2000, dtype=dtype) -y = torch.sin(x) +x = torch.linspace(-1, 1, 2000, dtype=dtype) +y = torch.exp(x) # A Taylor expansion would be 1 + x + (1/2) x**2 + (1/3!) x**3 + ... # Create random Tensors for weights. For a third order polynomial, we need # 4 weights: y = a + b x + c x^2 + d x^3 @@ -39,8 +39,9 @@ c = torch.randn((), dtype=dtype, requires_grad=True) d = torch.randn((), dtype=dtype, requires_grad=True) -learning_rate = 1e-6 -for t in range(2000): +initial_loss = 1. +learning_rate = 1e-5 +for t in range(5000): # Forward pass: compute predicted y using operations on Tensors. y_pred = a + b * x + c * x ** 2 + d * x ** 3 @@ -48,8 +49,13 @@ # Now loss is a Tensor of shape (1,) # loss.item() gets the scalar value held in the loss. loss = (y_pred - y).pow(2).sum() + + # Calculare initial loss, so we can report loss relative to it + if t==0: + initial_loss=loss.item() + if t % 100 == 99: - print(t, loss.item()) + print(f'Iteration t = {t:4d} loss(t)/loss(0) = {round(loss.item()/initial_loss, 6):10.6f} a = {a.item():10.6f} b = {b.item():10.6f} c = {c.item():10.6f} d = {d.item():10.6f}') # Use autograd to compute the backward pass. This call will compute the # gradient of loss with respect to all Tensors with requires_grad=True.