Skip to content

Commit 220cb22

Browse files
authored
Add seed_dc parameter and reorder Clusterer constructor arguments (#91)
* Separate dc parameter in two parameters * Reorder clusterer constructor arguments
1 parent 51ea784 commit 220cb22

File tree

12 files changed

+212
-116
lines changed

12 files changed

+212
-116
lines changed

CLUEstering/BindingModules/Run.hpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,13 +12,14 @@ template <uint8_t Ndim, typename Kernel>
1212
void run(float dc,
1313
float rhoc,
1414
float dm,
15+
float seed_dc,
1516
int pPBin,
1617
std::tuple<float*, int*>&& pData,
1718
uint32_t n_points,
1819
const Kernel& kernel,
1920
Queue queue,
2021
size_t block_size) {
21-
clue::Clusterer<Ndim> algo(dc, rhoc, dm, pPBin, queue);
22+
clue::Clusterer<Ndim> algo(queue, dc, rhoc, dm, seed_dc, pPBin);
2223

2324
// Create the host and device points
2425
clue::PointsHost<Ndim> h_points(

CLUEstering/BindingModules/cuda/binding_gpu_cuda.cpp

Lines changed: 25 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ namespace alpaka_cuda_async {
3030
void mainRun(float dc,
3131
float rhoc,
3232
float dm,
33+
float seed_dc,
3334
int pPBin,
3435
py::array_t<float> data,
3536
py::array_t<int> results,
@@ -46,118 +47,128 @@ namespace alpaka_cuda_async {
4647
const auto dev_acc = alpaka::getDevByIdx(alpaka::Platform<Acc1D>{}, device_id);
4748

4849
// Create the queue
49-
Queue queue_(dev_acc);
50+
Queue queue(dev_acc);
5051

5152
// Running the clustering algorithm //
5253
switch (Ndim) {
5354
[[unlikely]] case (1):
5455
run<1, Kernel>(dc,
5556
rhoc,
5657
dm,
58+
seed_dc,
5759
pPBin,
5860
std::make_tuple(pData, pResults),
5961
n_points,
6062
kernel,
61-
queue_,
63+
queue,
6264
block_size);
6365
return;
6466
[[likely]] case (2):
6567
run<2, Kernel>(dc,
6668
rhoc,
6769
dm,
70+
seed_dc,
6871
pPBin,
6972
std::make_tuple(pData, pResults),
7073
n_points,
7174
kernel,
72-
queue_,
75+
queue,
7376
block_size);
7477
return;
7578
[[likely]] case (3):
7679
run<3, Kernel>(dc,
7780
rhoc,
7881
dm,
82+
seed_dc,
7983
pPBin,
8084
std::make_tuple(pData, pResults),
8185
n_points,
8286
kernel,
83-
queue_,
87+
queue,
8488
block_size);
8589
return;
8690
[[unlikely]] case (4):
8791
run<4, Kernel>(dc,
8892
rhoc,
8993
dm,
94+
seed_dc,
9095
pPBin,
9196
std::make_tuple(pData, pResults),
9297
n_points,
9398
kernel,
94-
queue_,
99+
queue,
95100
block_size);
96101
return;
97102
[[unlikely]] case (5):
98103
run<5, Kernel>(dc,
99104
rhoc,
100105
dm,
106+
seed_dc,
101107
pPBin,
102108
std::make_tuple(pData, pResults),
103109
n_points,
104110
kernel,
105-
queue_,
111+
queue,
106112
block_size);
107113
return;
108114
[[unlikely]] case (6):
109115
run<6, Kernel>(dc,
110116
rhoc,
111117
dm,
118+
seed_dc,
112119
pPBin,
113120
std::make_tuple(pData, pResults),
114121
n_points,
115122
kernel,
116-
queue_,
123+
queue,
117124
block_size);
118125
return;
119126
[[unlikely]] case (7):
120127
run<7, Kernel>(dc,
121128
rhoc,
122129
dm,
130+
seed_dc,
123131
pPBin,
124132
std::make_tuple(pData, pResults),
125133
n_points,
126134
kernel,
127-
queue_,
135+
queue,
128136
block_size);
129137
return;
130138
[[unlikely]] case (8):
131139
run<8, Kernel>(dc,
132140
rhoc,
133141
dm,
142+
seed_dc,
134143
pPBin,
135144
std::make_tuple(pData, pResults),
136145
n_points,
137146
kernel,
138-
queue_,
147+
queue,
139148
block_size);
140149
return;
141150
[[unlikely]] case (9):
142151
run<9, Kernel>(dc,
143152
rhoc,
144153
dm,
154+
seed_dc,
145155
pPBin,
146156
std::make_tuple(pData, pResults),
147157
n_points,
148158
kernel,
149-
queue_,
159+
queue,
150160
block_size);
151161
return;
152162
[[unlikely]] case (10):
153163
run<10, Kernel>(dc,
154164
rhoc,
155165
dm,
166+
seed_dc,
156167
pPBin,
157168
std::make_tuple(pData, pResults),
158169
n_points,
159170
kernel,
160-
queue_,
171+
queue,
161172
block_size);
162173
return;
163174
[[unlikely]] default:
@@ -171,6 +182,7 @@ namespace alpaka_cuda_async {
171182
m.def("listDevices", &listDevices, "List the available devices for the CUDA backend");
172183
m.def("mainRun",
173184
pybind11::overload_cast<float,
185+
float,
174186
float,
175187
float,
176188
int,
@@ -184,6 +196,7 @@ namespace alpaka_cuda_async {
184196
"mainRun");
185197
m.def("mainRun",
186198
pybind11::overload_cast<float,
199+
float,
187200
float,
188201
float,
189202
int,
@@ -197,6 +210,7 @@ namespace alpaka_cuda_async {
197210
"mainRun");
198211
m.def("mainRun",
199212
pybind11::overload_cast<float,
213+
float,
200214
float,
201215
float,
202216
int,

CLUEstering/BindingModules/hip/binding_gpu_hip.cpp

Lines changed: 25 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@ namespace alpaka_rocm_async {
3131
void mainRun(float dc,
3232
float rhoc,
3333
float dm,
34+
float seed_dc,
3435
int pPBin,
3536
py::array_t<float> data,
3637
py::array_t<int> results,
@@ -47,118 +48,128 @@ namespace alpaka_rocm_async {
4748
const auto dev_acc = alpaka::getDevByIdx(alpaka::Platform<Acc1D>{}, device_id);
4849

4950
// Create the queue
50-
Queue queue_(dev_acc);
51+
Queue queue(dev_acc);
5152

5253
// Running the clustering algorithm //
5354
switch (Ndim) {
5455
[[unlikely]] case (1):
5556
run<1, Kernel>(dc,
5657
rhoc,
5758
dm,
59+
seed_dc,
5860
pPBin,
5961
std::make_tuple(pData, pResults),
6062
n_points,
6163
kernel,
62-
queue_,
64+
queue,
6365
block_size);
6466
return;
6567
[[likely]] case (2):
6668
run<2, Kernel>(dc,
6769
rhoc,
6870
dm,
71+
seed_dc,
6972
pPBin,
7073
std::make_tuple(pData, pResults),
7174
n_points,
7275
kernel,
73-
queue_,
76+
queue,
7477
block_size);
7578
return;
7679
[[likely]] case (3):
7780
run<3, Kernel>(dc,
7881
rhoc,
7982
dm,
83+
seed_dc,
8084
pPBin,
8185
std::make_tuple(pData, pResults),
8286
n_points,
8387
kernel,
84-
queue_,
88+
queue,
8589
block_size);
8690
return;
8791
[[unlikely]] case (4):
8892
run<4, Kernel>(dc,
8993
rhoc,
9094
dm,
95+
seed_dc,
9196
pPBin,
9297
std::make_tuple(pData, pResults),
9398
n_points,
9499
kernel,
95-
queue_,
100+
queue,
96101
block_size);
97102
return;
98103
[[unlikely]] case (5):
99104
run<5, Kernel>(dc,
100105
rhoc,
101106
dm,
107+
seed_dc,
102108
pPBin,
103109
std::make_tuple(pData, pResults),
104110
n_points,
105111
kernel,
106-
queue_,
112+
queue,
107113
block_size);
108114
return;
109115
[[unlikely]] case (6):
110116
run<6, Kernel>(dc,
111117
rhoc,
112118
dm,
119+
seed_dc,
113120
pPBin,
114121
std::make_tuple(pData, pResults),
115122
n_points,
116123
kernel,
117-
queue_,
124+
queue,
118125
block_size);
119126
return;
120127
[[unlikely]] case (7):
121128
run<7, Kernel>(dc,
122129
rhoc,
123130
dm,
131+
seed_dc,
124132
pPBin,
125133
std::make_tuple(pData, pResults),
126134
n_points,
127135
kernel,
128-
queue_,
136+
queue,
129137
block_size);
130138
return;
131139
[[unlikely]] case (8):
132140
run<8, Kernel>(dc,
133141
rhoc,
134142
dm,
143+
seed_dc,
135144
pPBin,
136145
std::make_tuple(pData, pResults),
137146
n_points,
138147
kernel,
139-
queue_,
148+
queue,
140149
block_size);
141150
return;
142151
[[unlikely]] case (9):
143152
run<9, Kernel>(dc,
144153
rhoc,
145154
dm,
155+
seed_dc,
146156
pPBin,
147157
std::make_tuple(pData, pResults),
148158
n_points,
149159
kernel,
150-
queue_,
160+
queue,
151161
block_size);
152162
return;
153163
[[unlikely]] case (10):
154164
run<10, Kernel>(dc,
155165
rhoc,
156166
dm,
167+
seed_dc,
157168
pPBin,
158169
std::make_tuple(pData, pResults),
159170
n_points,
160171
kernel,
161-
queue_,
172+
queue,
162173
block_size);
163174
return;
164175
[[unlikely]] default:
@@ -174,6 +185,7 @@ namespace alpaka_rocm_async {
174185
"List the available devices for the HIP/ROCm backend");
175186
m.def("mainRun",
176187
pybind11::overload_cast<float,
188+
float,
177189
float,
178190
float,
179191
int,
@@ -187,6 +199,7 @@ namespace alpaka_rocm_async {
187199
"mainRun");
188200
m.def("mainRun",
189201
pybind11::overload_cast<float,
202+
float,
190203
float,
191204
float,
192205
int,
@@ -200,6 +213,7 @@ namespace alpaka_rocm_async {
200213
"mainRun");
201214
m.def("mainRun",
202215
pybind11::overload_cast<float,
216+
float,
203217
float,
204218
float,
205219
int,

0 commit comments

Comments
 (0)