File tree Expand file tree Collapse file tree 2 files changed +43
-4
lines changed
tutorials/basics/linear_regression Expand file tree Collapse file tree 2 files changed +43
-4
lines changed Original file line number Diff line number Diff line change @@ -7,10 +7,10 @@ find_package(Torch REQUIRED)
77add_executable (pytorch-cpp main.cpp)
88target_link_libraries (pytorch-cpp "${TORCH_LIBRARIES} " )
99
10- add_executable (pytorch_basics tutorials/basics/pytorch_basics/main.cpp)
11- target_link_libraries (pytorch_basics "${TORCH_LIBRARIES} " )
12- # add_executable(linear_regression tutorials/basics/linear_regression/main.cpp)
13- # target_link_libraries(linear_regression "${TORCH_LIBRARIES}")
10+ # add_executable(pytorch_basics tutorials/basics/pytorch_basics/main.cpp)
11+ # target_link_libraries(pytorch_basics "${TORCH_LIBRARIES}")
12+ add_executable (linear_regression tutorials/basics/linear_regression/main.cpp)
13+ target_link_libraries (linear_regression "${TORCH_LIBRARIES} " )
1414# add_executable(logistic_regression tutorials/basics/logistic_regression/main.cpp)
1515# target_link_libraries(logistic_regression "${TORCH_LIBRARIES}")
1616# add_executable(feedforward_neural_network tutorials/basics/feedforward_neural_network/main.cpp)
Original file line number Diff line number Diff line change 11// Copyright 2019 Omkar Prabhu
22#include < torch/torch.h>
3+ #include < ATen/ATen.h>
34#include < iostream>
5+ #include < vector>
46
57int main () {
68 std::cout << " Linear Regression" << std::endl;
9+
10+ // Hyper parameters
11+ int input_size = 1 ;
12+ int output_size = 1 ;
13+ int num_epochs = 60 ;
14+ double learning_rate = 0.001 ;
15+
16+ // Sample dataset
17+ auto x_train = torch::randint (0 , 10 , {15 , 1 });
18+ auto y_train = torch::randint (0 , 10 , {15 , 1 });
19+
20+ // Linear regression model
21+ auto model = torch::nn::Linear (input_size, output_size);
22+
23+ // Loss and optimizer
24+ auto criterion = torch::nn::L1Loss ();
25+ auto optimizer = torch::optim::SGD (model->parameters (), torch::optim::SGDOptions (learning_rate));
26+
27+ for (int epoch = 0 ; epoch < num_epochs; epoch++) {
28+ // Array to tensors
29+ auto inputs = x_train;
30+ auto targets = y_train;
31+
32+ // Forward pass
33+ auto outputs = model (inputs);
34+ auto loss = criterion (outputs, targets);
35+
36+ // Backward and optimize
37+ optimizer.zero_grad ();
38+ loss.backward ();
39+ optimizer.step ();
40+
41+ if ((epoch+1 ) % 5 == 0 ) {
42+ std::cout << " Epoch [" << (epoch+1 ) << " /" << num_epochs << " ], Loss: " << loss.item ().toFloat () << std::endl;
43+ }
44+ }
45+
746}
You can’t perform that action at this time.
0 commit comments