Skip to content

Commit cf71fd9

Browse files
refactor!(core): Move everything into the gprat namespace
1 parent 5976c4d commit cf71fd9

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

44 files changed

+638
-418
lines changed

bindings/gprat_py.cpp

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -31,19 +31,19 @@ void init_gprat(py::module &m)
3131
// Set hyperparameters to default values in `AdamParams` class, unless
3232
// specified. Python object has full access to each hyperparameter and a
3333
// string representation `__repr__`.
34-
py::class_<gprat_hyper::AdamParams>(m, "AdamParams")
34+
py::class_<gprat::AdamParams>(m, "AdamParams")
3535
.def(py::init<double, double, double, double, int>(),
3636
py::arg("learning_rate") = 0.001,
3737
py::arg("beta1") = 0.9,
3838
py::arg("beta2") = 0.999,
3939
py::arg("epsilon") = 1e-8,
4040
py::arg("opt_iter") = 0)
41-
.def_readwrite("learning_rate", &gprat_hyper::AdamParams::learning_rate)
42-
.def_readwrite("beta1", &gprat_hyper::AdamParams::beta1)
43-
.def_readwrite("beta2", &gprat_hyper::AdamParams::beta2)
44-
.def_readwrite("epsilon", &gprat_hyper::AdamParams::epsilon)
45-
.def_readwrite("opt_iter", &gprat_hyper::AdamParams::opt_iter)
46-
.def("__repr__", &gprat_hyper::AdamParams::repr);
41+
.def_readwrite("learning_rate", &gprat::AdamParams::learning_rate)
42+
.def_readwrite("beta1", &gprat::AdamParams::beta1)
43+
.def_readwrite("beta2", &gprat::AdamParams::beta2)
44+
.def_readwrite("epsilon", &gprat::AdamParams::epsilon)
45+
.def_readwrite("opt_iter", &gprat::AdamParams::opt_iter)
46+
.def("__repr__", &gprat::AdamParams::repr);
4747

4848
// Initializes Gaussian Process with `GP` class. Sets default parameters for
4949
// squared exponential kernel, number of regressors and trainable, unless

bindings/utils_py.cpp

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ void start_hpx_wrapper(std::vector<std::string> args, std::size_t n_cores)
3232
}
3333
argv.push_back(nullptr);
3434
int argc = static_cast<int>(args.size());
35-
utils::start_hpx_runtime(argc, argv.data());
35+
gprat::start_hpx_runtime(argc, argv.data());
3636
}
3737

3838
/**
@@ -43,7 +43,7 @@ void start_hpx_wrapper(std::vector<std::string> args, std::size_t n_cores)
4343
void init_utils(py::module &m)
4444
{
4545
m.def("compute_train_tiles",
46-
&utils::compute_train_tiles,
46+
&gprat::compute_train_tiles,
4747
py::arg("n_samples"),
4848
py::arg("n_tile_size"),
4949
R"pbdoc(
@@ -58,7 +58,7 @@ void init_utils(py::module &m)
5858
)pbdoc");
5959

6060
m.def("compute_train_tile_size",
61-
&utils::compute_train_tile_size,
61+
&gprat::compute_train_tile_size,
6262
py::arg("n_samples"),
6363
py::arg("n_tiles"),
6464
R"pbdoc(
@@ -73,7 +73,7 @@ void init_utils(py::module &m)
7373
)pbdoc");
7474

7575
m.def("compute_test_tiles",
76-
&utils::compute_test_tiles,
76+
&gprat::compute_test_tiles,
7777
py::arg("m_samples"),
7878
py::arg("n_tiles"),
7979
py::arg("n_tile_size"),
@@ -90,19 +90,19 @@ void init_utils(py::module &m)
9090
)pbdoc");
9191

9292
m.def("print_vector",
93-
&utils::print_vector,
93+
&gprat::print_vector,
9494
py::arg("vec"),
9595
py::arg("start") = 0,
9696
py::arg("end") = -1,
9797
py::arg("separator") = " ",
9898
"Print elements of a vector with optional start, end, and separator parameters");
9999

100100
m.def("start_hpx", &start_hpx_wrapper, py::arg("args"), py::arg("n_cores")); // Using the wrapper function
101-
m.def("resume_hpx", &utils::resume_hpx_runtime);
102-
m.def("suspend_hpx", &utils::suspend_hpx_runtime);
103-
m.def("stop_hpx", &utils::stop_hpx_runtime);
101+
m.def("resume_hpx", &gprat::resume_hpx_runtime);
102+
m.def("suspend_hpx", &gprat::suspend_hpx_runtime);
103+
m.def("stop_hpx", &gprat::stop_hpx_runtime);
104104

105-
m.def("compiled_with_cuda", &utils::compiled_with_cuda, "Check if the code was compiled with CUDA support");
105+
m.def("compiled_with_cuda", &gprat::compiled_with_cuda, "Check if the code was compiled with CUDA support");
106106

107107
m.def("print_available_gpus", &gprat::print_available_gpus, "Print available GPUs with their properties");
108108
m.def("gpu_count", &gprat::gpu_count, "Return the number of available GPUs");

core/include/gprat/cpu/adapter_cblas_fp32.hpp

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,15 @@
1-
#ifndef CPU_ADAPTER_CBLAS_FP32_H
2-
#define CPU_ADAPTER_CBLAS_FP32_H
1+
#ifndef GPRAT_CPU_ADAPTER_CBLAS_FP32_HPP
2+
#define GPRAT_CPU_ADAPTER_CBLAS_FP32_HPP
3+
4+
#pragma once
5+
6+
#include <gprat/detail/config.hpp>
37

48
#include <hpx/future.hpp>
59
#include <vector>
10+
11+
GPRAT_NS_BEGIN
12+
613
using vector_future = hpx::shared_future<std::vector<float>>;
714

815
// Constants that are compatible with CBLAS
@@ -145,4 +152,6 @@ vector_future axpy(vector_future f_y, vector_future f_x, const int N);
145152
*/
146153
float dot(std::vector<float> a, std::vector<float> b, const int N);
147154

148-
#endif // end of CPU_ADAPTER_CBLAS_FP32_H
155+
GPRAT_NS_END
156+
157+
#endif

core/include/gprat/cpu/adapter_cblas_fp64.hpp

Lines changed: 11 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,18 @@
1-
#ifndef CPU_ADAPTER_CBLAS_FP64_H
2-
#define CPU_ADAPTER_CBLAS_FP64_H
1+
#ifndef GPRAT_CPU_ADAPTER_CBLAS_FP64_HPP
2+
#define GPRAT_CPU_ADAPTER_CBLAS_FP64_HPP
3+
4+
#pragma once
5+
6+
#include <gprat/detail/config.hpp>
37

48
#include <hpx/future.hpp>
59
#include <vector>
610

11+
GPRAT_NS_BEGIN
12+
713
using vector_future = hpx::shared_future<std::vector<double>>;
814

915
// Constants that are compatible with CBLAS
10-
1116
typedef enum BLAS_TRANSPOSE { Blas_no_trans = 111, Blas_trans = 112 } BLAS_TRANSPOSE;
1217

1318
typedef enum BLAS_SIDE { Blas_left = 141, Blas_right = 142 } BLAS_SIDE;
@@ -147,4 +152,6 @@ vector_future axpy(vector_future f_y, vector_future f_x, const int N);
147152
*/
148153
double dot(std::vector<double> a, std::vector<double> b, const int N);
149154

150-
#endif // end of CPU_ADAPTER_CBLAS_FP64_H
155+
GPRAT_NS_END
156+
157+
#endif

core/include/gprat/cpu/gp_algorithms.hpp

Lines changed: 17 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,15 @@
1-
#ifndef CPU_GP_ALGORITHMS_H
2-
#define CPU_GP_ALGORITHMS_H
1+
#ifndef GPRAT_CPU_GP_ALGORITHMS_HPP
2+
#define GPRAT_CPU_GP_ALGORITHMS_HPP
33

4-
#include "gp_kernels.hpp"
4+
#pragma once
5+
6+
#include <gprat/detail/config.hpp>
7+
8+
#include <gprat/gp_kernels.hpp>
59
#include <vector>
610

11+
GPRAT_NS_BEGIN
12+
713
namespace cpu
814
{
915

@@ -22,7 +28,7 @@ namespace cpu
2228
double compute_covariance_function(std::size_t i_global,
2329
std::size_t j_global,
2430
std::size_t n_regressors,
25-
const gprat_hyper::SEKParams &sek_params,
31+
const SEKParams &sek_params,
2632
const std::vector<double> &i_input,
2733
const std::vector<double> &j_input);
2834

@@ -44,7 +50,7 @@ std::vector<double> gen_tile_covariance(
4450
std::size_t col,
4551
std::size_t N,
4652
std::size_t n_regressors,
47-
const gprat_hyper::SEKParams &sek_params,
53+
const SEKParams &sek_params,
4854
const std::vector<double> &input);
4955

5056
/**
@@ -66,7 +72,7 @@ std::vector<double> gen_tile_full_prior_covariance(
6672
std::size_t col,
6773
std::size_t N,
6874
std::size_t n_regressors,
69-
const gprat_hyper::SEKParams &sek_params,
75+
const SEKParams &sek_params,
7076
const std::vector<double> &input);
7177

7278
/**
@@ -88,7 +94,7 @@ std::vector<double> gen_tile_prior_covariance(
8894
std::size_t col,
8995
std::size_t N,
9096
std::size_t n_regressors,
91-
const gprat_hyper::SEKParams &sek_params,
97+
const SEKParams &sek_params,
9298
const std::vector<double> &input);
9399

94100
/**
@@ -111,7 +117,7 @@ std::vector<double> gen_tile_cross_covariance(
111117
std::size_t N_row,
112118
std::size_t N_col,
113119
std::size_t n_regressors,
114-
const gprat_hyper::SEKParams &sek_params,
120+
const SEKParams &sek_params,
115121
const std::vector<double> &row_input,
116122
const std::vector<double> &col_input);
117123

@@ -170,4 +176,6 @@ std::vector<double> gen_tile_identity(std::size_t N);
170176

171177
} // end of namespace cpu
172178

173-
#endif // end of CPU_GP_ALGORITHMS_H
179+
GPRAT_NS_END
180+
181+
#endif

core/include/gprat/cpu/gp_functions.hpp

Lines changed: 22 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,16 @@
1-
#ifndef CPU_GP_FUNCTIONS_H
2-
#define CPU_GP_FUNCTIONS_H
1+
#ifndef GPRAT_CPU_GP_FUNCTIONS_HPP
2+
#define GPRAT_CPU_GP_FUNCTIONS_HPP
33

4-
#include "gp_hyperparameters.hpp"
5-
#include "gp_kernels.hpp"
4+
#pragma once
5+
6+
#include <gprat/detail/config.hpp>
7+
8+
#include "gprat/gp_hyperparameters.hpp"
9+
#include "gprat/gp_kernels.hpp"
610
#include <vector>
711

12+
GPRAT_NS_BEGIN
13+
814
namespace cpu
915
{
1016

@@ -22,7 +28,7 @@ namespace cpu
2228
*/
2329
std::vector<std::vector<double>>
2430
cholesky(const std::vector<double> &training_input,
25-
const gprat_hyper::SEKParams &sek_params,
31+
const SEKParams &sek_params,
2632
int n_tiles,
2733
int n_tile_size,
2834
int n_regressors);
@@ -46,7 +52,7 @@ std::vector<double>
4652
predict(const std::vector<double> &training_input,
4753
const std::vector<double> &training_output,
4854
const std::vector<double> &test_input,
49-
const gprat_hyper::SEKParams &sek_params,
55+
const SEKParams &sek_params,
5056
int n_tiles,
5157
int n_tile_size,
5258
int m_tiles,
@@ -72,7 +78,7 @@ std::vector<std::vector<double>> predict_with_uncertainty(
7278
const std::vector<double> &training_input,
7379
const std::vector<double> &training_output,
7480
const std::vector<double> &test_input,
75-
const gprat_hyper::SEKParams &sek_params,
81+
const SEKParams &sek_params,
7682
int n_tiles,
7783
int n_tile_size,
7884
int m_tiles,
@@ -98,7 +104,7 @@ std::vector<std::vector<double>> predict_with_full_cov(
98104
const std::vector<double> &training_input,
99105
const std::vector<double> &training_output,
100106
const std::vector<double> &test_data,
101-
const gprat_hyper::SEKParams &sek_params,
107+
const SEKParams &sek_params,
102108
int n_tiles,
103109
int n_tile_size,
104110
int m_tiles,
@@ -119,7 +125,7 @@ std::vector<std::vector<double>> predict_with_full_cov(
119125
*/
120126
double compute_loss(const std::vector<double> &training_input,
121127
const std::vector<double> &training_output,
122-
const gprat_hyper::SEKParams &sek_params,
128+
const SEKParams &sek_params,
123129
int n_tiles,
124130
int n_tile_size,
125131
int n_regressors);
@@ -146,8 +152,8 @@ optimize(const std::vector<double> &training_input,
146152
int n_tiles,
147153
int n_tile_size,
148154
int n_regressors,
149-
const gprat_hyper::AdamParams &adam_params,
150-
gprat_hyper::SEKParams &sek_params,
155+
const AdamParams &adam_params,
156+
SEKParams &sek_params,
151157
std::vector<bool> trainable_params);
152158

153159
/**
@@ -173,11 +179,13 @@ double optimize_step(const std::vector<double> &training_input,
173179
int n_tiles,
174180
int n_tile_size,
175181
int n_regressors,
176-
gprat_hyper::AdamParams &adam_params,
177-
gprat_hyper::SEKParams &sek_params,
182+
AdamParams &adam_params,
183+
SEKParams &sek_params,
178184
std::vector<bool> trainable_params,
179185
int iter);
180186

181187
} // end of namespace cpu
182188

183-
#endif // end of CPU_GP_FUNCTIONS_H
189+
GPRAT_NS_END
190+
191+
#endif

core/include/gprat/cpu/gp_optimizer.hpp

Lines changed: 19 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,16 @@
1-
#ifndef CPU_GP_OPTIMIZER_H
2-
#define CPU_GP_OPTIMIZER_H
1+
#ifndef GPRAT_CPU_GP_OPTIMIZER_H
2+
#define GPRAT_CPU_GP_OPTIMIZER_H
33

4-
#include "gp_hyperparameters.hpp"
5-
#include "gp_kernels.hpp"
4+
#pragma once
5+
6+
#include <gprat/detail/config.hpp>
7+
8+
#include "gprat/gp_hyperparameters.hpp"
9+
#include "gprat/gp_kernels.hpp"
610
#include <vector>
711

12+
GPRAT_NS_BEGIN
13+
814
namespace cpu
915
{
1016

@@ -54,7 +60,7 @@ double compute_sigmoid(double parameter);
5460
double compute_covariance_distance(std::size_t i_global,
5561
std::size_t j_global,
5662
std::size_t n_regressors,
57-
const gprat_hyper::SEKParams &sek_params,
63+
const SEKParams &sek_params,
5864
const std::vector<double> &i_input,
5965
const std::vector<double> &j_input);
6066

@@ -75,7 +81,7 @@ std::vector<double> gen_tile_distance(
7581
std::size_t col,
7682
std::size_t N,
7783
std::size_t n_regressors,
78-
const gprat_hyper::SEKParams &sek_params,
84+
const SEKParams &sek_params,
7985
const std::vector<double> &input);
8086

8187
/**
@@ -93,7 +99,7 @@ std::vector<double> gen_tile_covariance_with_distance(
9399
std::size_t row,
94100
std::size_t col,
95101
std::size_t N,
96-
const gprat_hyper::SEKParams &sek_params,
102+
const SEKParams &sek_params,
97103
const std::vector<double> &distance);
98104

99105
/**
@@ -106,7 +112,7 @@ std::vector<double> gen_tile_covariance_with_distance(
106112
* @return A quadratic tile of the derivative of v of size N x N
107113
*/
108114
std::vector<double>
109-
gen_tile_grad_v(std::size_t N, const gprat_hyper::SEKParams &sek_params, const std::vector<double> &distance);
115+
gen_tile_grad_v(std::size_t N, const SEKParams &sek_params, const std::vector<double> &distance);
110116

111117
/**
112118
* @brief Generate a derivative tile w.r.t. lengthscale l
@@ -118,7 +124,7 @@ gen_tile_grad_v(std::size_t N, const gprat_hyper::SEKParams &sek_params, const s
118124
* @return A quadratic tile of the derivative of l of size N x N
119125
*/
120126
std::vector<double>
121-
gen_tile_grad_l(std::size_t N, const gprat_hyper::SEKParams &sek_params, const std::vector<double> &distance);
127+
gen_tile_grad_l(std::size_t N, const SEKParams &sek_params, const std::vector<double> &distance);
122128

123129
/**
124130
* @brief Update biased first raw moment estimate: m_T+1 = beta_1 * m_T + (1 - beta_1) * g_T.
@@ -154,7 +160,7 @@ double update_second_moment(double gradient, double v_T, double beta_2);
154160
* @return The updated hyperparameter
155161
*/
156162
double adam_step(const double unconstrained_hyperparam,
157-
const gprat_hyper::AdamParams &adam_params,
163+
const AdamParams &adam_params,
158164
double m_T,
159165
double v_T,
160166
std::size_t iter);
@@ -230,4 +236,6 @@ double compute_trace_diag(const std::vector<double> &tile, double trace, std::si
230236

231237
} // end of namespace cpu
232238

233-
#endif // end of CPU_GP_OPTIMIZER_H
239+
GPRAT_NS_END
240+
241+
#endif

0 commit comments

Comments
 (0)