@@ -26,4 +26,44 @@ int main() {
26
26
std::cout << x.grad () << std::endl; // x.grad() = 2
27
27
std::cout << w.grad () << std::endl; // w.grad() = 1
28
28
std::cout << b.grad () << std::endl; // b.grad() = 1
29
+
30
+ // /////////////////////////////////////////////////////////
31
+ // BASIC AUTOGRAD EXAMPLE 2 //
32
+ // /////////////////////////////////////////////////////////
33
+
34
+ // Create Tensors of shapes
35
+ x = torch::randn ({10 , 3 });
36
+ y = torch::randn ({10 , 2 });
37
+
38
+
39
+ // Build a fully connected layer
40
+ auto linear = torch::nn::Linear (3 ,2 );
41
+ std::cout << " w: " << linear->weight << std::endl;
42
+ std::cout << " b: " << linear->bias << std::endl;
43
+
44
+ // Build loss function and optimizer
45
+ auto criterion = torch::nn::MSELoss ();
46
+ auto optimizer = torch::optim::SGD (linear->parameters , torch::optim::SGDOptions (0.01 ));
47
+
48
+ // Forward pass
49
+ auto pred = linear (x);
50
+
51
+ // Compute loss
52
+ auto loss = criterion (pred, y);
53
+ std::cout << " loss item: " << loss.item ().toFloat () << std::endl;
54
+
55
+ // Backward pass
56
+ loss.backward ();
57
+
58
+ // Print out the gradients
59
+ std::cout << " dL/dw: " << linear->weight .grad () << std::endl;
60
+ std::cout << " dL/db: " << linear->bias .grad () << std::endl;
61
+
62
+ // 1 step gradient descent
63
+ optimizer.step ();
64
+
65
+ // Print out the loss after 1-step gradient descent
66
+ pred = linear (x);
67
+ loss = criterion (pred, y);
68
+ std::cout << " loss after 1 step optimization: " << loss.item ().toFloat () << std::endl;
29
69
}
0 commit comments