File tree Expand file tree Collapse file tree 2 files changed +43
-4
lines changed
tutorials/basics/linear_regression Expand file tree Collapse file tree 2 files changed +43
-4
lines changed Original file line number Diff line number Diff line change @@ -7,10 +7,10 @@ find_package(Torch REQUIRED)
7
7
add_executable (pytorch-cpp main.cpp )
8
8
target_link_libraries (pytorch-cpp "${TORCH_LIBRARIES} " )
9
9
10
- add_executable (pytorch_basics tutorials/basics/pytorch_basics/main.cpp )
11
- target_link_libraries (pytorch_basics "${TORCH_LIBRARIES} " )
12
- # add_executable(linear_regression tutorials/basics/linear_regression/main.cpp)
13
- # target_link_libraries(linear_regression "${TORCH_LIBRARIES}")
10
+ # add_executable(pytorch_basics tutorials/basics/pytorch_basics/main.cpp)
11
+ # target_link_libraries(pytorch_basics "${TORCH_LIBRARIES}")
12
+ add_executable (linear_regression tutorials/basics/linear_regression/main.cpp )
13
+ target_link_libraries (linear_regression "${TORCH_LIBRARIES} " )
14
14
# add_executable(logistic_regression tutorials/basics/logistic_regression/main.cpp)
15
15
# target_link_libraries(logistic_regression "${TORCH_LIBRARIES}")
16
16
# add_executable(feedforward_neural_network tutorials/basics/feedforward_neural_network/main.cpp)
Original file line number Diff line number Diff line change 1
1
// Copyright 2019 Omkar Prabhu
2
2
#include < torch/torch.h>
3
+ #include < ATen/ATen.h>
3
4
#include < iostream>
5
+ #include < vector>
4
6
5
7
int main () {
6
8
std::cout << " Linear Regression" << std::endl;
9
+
10
+ // Hyper parameters
11
+ int input_size = 1 ;
12
+ int output_size = 1 ;
13
+ int num_epochs = 60 ;
14
+ double learning_rate = 0.001 ;
15
+
16
+ // Sample dataset
17
+ auto x_train = torch::randint (0 , 10 , {15 , 1 });
18
+ auto y_train = torch::randint (0 , 10 , {15 , 1 });
19
+
20
+ // Linear regression model
21
+ auto model = torch::nn::Linear (input_size, output_size);
22
+
23
+ // Loss and optimizer
24
+ auto criterion = torch::nn::L1Loss ();
25
+ auto optimizer = torch::optim::SGD (model->parameters (), torch::optim::SGDOptions (learning_rate));
26
+
27
+ for (int epoch = 0 ; epoch < num_epochs; epoch++) {
28
+ // Array to tensors
29
+ auto inputs = x_train;
30
+ auto targets = y_train;
31
+
32
+ // Forward pass
33
+ auto outputs = model (inputs);
34
+ auto loss = criterion (outputs, targets);
35
+
36
+ // Backward and optimize
37
+ optimizer.zero_grad ();
38
+ loss.backward ();
39
+ optimizer.step ();
40
+
41
+ if ((epoch+1 ) % 5 == 0 ) {
42
+ std::cout << " Epoch [" << (epoch+1 ) << " /" << num_epochs << " ], Loss: " << loss.item ().toFloat () << std::endl;
43
+ }
44
+ }
45
+
7
46
}
You can’t perform that action at this time.
0 commit comments