Skip to content

Commit e8146e9

Browse files
autofixes: Refactor if expression and clangformat (#4081)
* Refactor `if` expression * update clang format Signed-off-by: Wenqi Li <[email protected]> * [MONAI] python code formatting Signed-off-by: monai-bot <[email protected]> Co-authored-by: deepsource-autofix[bot] <62050782+deepsource-autofix[bot]@users.noreply.github.com> Co-authored-by: monai-bot <[email protected]>
1 parent b5e7fe1 commit e8146e9

File tree

12 files changed

+556
-566
lines changed

12 files changed

+556
-566
lines changed

monai/_extensions/gmm/gmm.cpp

Lines changed: 61 additions & 65 deletions
Original file line numberDiff line numberDiff line change
@@ -15,75 +15,71 @@ limitations under the License.
1515

1616
#include "gmm.h"
1717

18-
py::tuple init()
19-
{
20-
torch::Tensor gmm_tensor = torch::zeros({GMM_COUNT, GMM_COMPONENT_COUNT}, torch::dtype(torch::kFloat32).device(torch::kCUDA));
21-
torch::Tensor scratch_tensor = torch::empty({1}, torch::dtype(torch::kFloat32).device(torch::kCUDA));
22-
return py::make_tuple(gmm_tensor, scratch_tensor);
18+
py::tuple init() {
19+
torch::Tensor gmm_tensor =
20+
torch::zeros({GMM_COUNT, GMM_COMPONENT_COUNT}, torch::dtype(torch::kFloat32).device(torch::kCUDA));
21+
torch::Tensor scratch_tensor = torch::empty({1}, torch::dtype(torch::kFloat32).device(torch::kCUDA));
22+
return py::make_tuple(gmm_tensor, scratch_tensor);
2323
}
2424

25-
void learn(torch::Tensor gmm_tensor, torch::Tensor scratch_tensor, torch::Tensor input_tensor, torch::Tensor label_tensor)
26-
{
27-
c10::DeviceType device_type = input_tensor.device().type();
28-
29-
unsigned int batch_count = input_tensor.size(0);
30-
unsigned int element_count = input_tensor.stride(1);
31-
32-
unsigned int scratch_size = batch_count * (element_count + GMM_COMPONENT_COUNT * GMM_COUNT * (element_count / (32 * 32)));
33-
34-
if (scratch_tensor.size(0) < scratch_size)
35-
{
36-
scratch_tensor.resize_({scratch_size});
37-
}
38-
39-
float* gmm = gmm_tensor.data_ptr<float>();
40-
float* scratch = scratch_tensor.data_ptr<float>();
41-
float* input = input_tensor.data_ptr<float>();
42-
int* labels = label_tensor.data_ptr<int>();
43-
44-
if(device_type == torch::kCUDA)
45-
{
46-
learn_cuda(input, labels, gmm, scratch, batch_count, element_count);
47-
}
48-
else
49-
{
50-
learn_cpu(input, labels, gmm, scratch, batch_count, element_count);
51-
}
25+
void learn(
26+
torch::Tensor gmm_tensor,
27+
torch::Tensor scratch_tensor,
28+
torch::Tensor input_tensor,
29+
torch::Tensor label_tensor) {
30+
c10::DeviceType device_type = input_tensor.device().type();
31+
32+
unsigned int batch_count = input_tensor.size(0);
33+
unsigned int element_count = input_tensor.stride(1);
34+
35+
unsigned int scratch_size =
36+
batch_count * (element_count + GMM_COMPONENT_COUNT * GMM_COUNT * (element_count / (32 * 32)));
37+
38+
if (scratch_tensor.size(0) < scratch_size) {
39+
scratch_tensor.resize_({scratch_size});
40+
}
41+
42+
float* gmm = gmm_tensor.data_ptr<float>();
43+
float* scratch = scratch_tensor.data_ptr<float>();
44+
float* input = input_tensor.data_ptr<float>();
45+
int* labels = label_tensor.data_ptr<int>();
46+
47+
if (device_type == torch::kCUDA) {
48+
learn_cuda(input, labels, gmm, scratch, batch_count, element_count);
49+
} else {
50+
learn_cpu(input, labels, gmm, scratch, batch_count, element_count);
51+
}
5252
}
5353

54-
torch::Tensor apply(torch::Tensor gmm_tensor, torch::Tensor input_tensor)
55-
{
56-
c10::DeviceType device_type = input_tensor.device().type();
57-
58-
unsigned int dim = input_tensor.dim();
59-
unsigned int batch_count = input_tensor.size(0);
60-
unsigned int element_count = input_tensor.stride(1);
61-
62-
long int* output_size = new long int[dim];
63-
memcpy(output_size, input_tensor.sizes().data(), dim * sizeof(long int));
64-
output_size[1] = MIXTURE_COUNT;
65-
torch::Tensor output_tensor = torch::empty(c10::IntArrayRef(output_size, dim), torch::dtype(torch::kFloat32).device(device_type));
66-
delete output_size;
67-
68-
const float* gmm = gmm_tensor.data_ptr<float>();
69-
const float* input = input_tensor.data_ptr<float>();
70-
float* output = output_tensor.data_ptr<float>();
71-
72-
if(device_type == torch::kCUDA)
73-
{
74-
apply_cuda(gmm, input, output, batch_count, element_count);
75-
}
76-
else
77-
{
78-
apply_cpu(gmm, input, output, batch_count, element_count);
79-
}
80-
81-
return output_tensor;
54+
torch::Tensor apply(torch::Tensor gmm_tensor, torch::Tensor input_tensor) {
55+
c10::DeviceType device_type = input_tensor.device().type();
56+
57+
unsigned int dim = input_tensor.dim();
58+
unsigned int batch_count = input_tensor.size(0);
59+
unsigned int element_count = input_tensor.stride(1);
60+
61+
long int* output_size = new long int[dim];
62+
memcpy(output_size, input_tensor.sizes().data(), dim * sizeof(long int));
63+
output_size[1] = MIXTURE_COUNT;
64+
torch::Tensor output_tensor =
65+
torch::empty(c10::IntArrayRef(output_size, dim), torch::dtype(torch::kFloat32).device(device_type));
66+
delete output_size;
67+
68+
const float* gmm = gmm_tensor.data_ptr<float>();
69+
const float* input = input_tensor.data_ptr<float>();
70+
float* output = output_tensor.data_ptr<float>();
71+
72+
if (device_type == torch::kCUDA) {
73+
apply_cuda(gmm, input, output, batch_count, element_count);
74+
} else {
75+
apply_cpu(gmm, input, output, batch_count, element_count);
76+
}
77+
78+
return output_tensor;
8279
}
8380

84-
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
85-
{
86-
m.def("init", torch::wrap_pybind_function(init));
87-
m.def("learn", torch::wrap_pybind_function(learn));
88-
m.def("apply", torch::wrap_pybind_function(apply));
81+
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
82+
m.def("init", torch::wrap_pybind_function(init));
83+
m.def("learn", torch::wrap_pybind_function(learn));
84+
m.def("apply", torch::wrap_pybind_function(apply));
8985
}

monai/_extensions/gmm/gmm.h

Lines changed: 26 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -24,9 +24,30 @@ limitations under the License.
2424
#define GMM_COMPONENT_COUNT (MATRIX_COMPONENT_COUNT + 1)
2525
#define GMM_COUNT (MIXTURE_COUNT * MIXTURE_SIZE)
2626

27+
void learn_cpu(
28+
const float* input,
29+
const int* labels,
30+
float* gmm,
31+
float* scratch_memory,
32+
unsigned int batch_count,
33+
unsigned int element_count);
34+
void apply_cpu(
35+
const float* gmm,
36+
const float* input,
37+
float* output,
38+
unsigned int batch_count,
39+
unsigned int element_count);
2740

28-
void learn_cpu(const float* input, const int* labels, float* gmm, float* scratch_memory, unsigned int batch_count, unsigned int element_count);
29-
void apply_cpu(const float* gmm, const float* input, float* output, unsigned int batch_count, unsigned int element_count);
30-
31-
void learn_cuda(const float* input, const int* labels, float* gmm, float* scratch_memory, unsigned int batch_count, unsigned int element_count);
32-
void apply_cuda(const float* gmm, const float* input, float* output, unsigned int batch_count, unsigned int element_count);
41+
void learn_cuda(
42+
const float* input,
43+
const int* labels,
44+
float* gmm,
45+
float* scratch_memory,
46+
unsigned int batch_count,
47+
unsigned int element_count);
48+
void apply_cuda(
49+
const float* gmm,
50+
const float* input,
51+
float* output,
52+
unsigned int batch_count,
53+
unsigned int element_count);

monai/_extensions/gmm/gmm_cpu.cpp

Lines changed: 15 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -15,12 +15,21 @@ limitations under the License.
1515

1616
#include "gmm.h"
1717

18-
void learn_cpu(const float* input, const int* labels, float* gmm, float* scratch_memory, unsigned int batch_count, unsigned int element_count)
19-
{
20-
throw std::invalid_argument("GMM received a cpu tensor but is not yet implemented for the cpu");
18+
void learn_cpu(
19+
const float* input,
20+
const int* labels,
21+
float* gmm,
22+
float* scratch_memory,
23+
unsigned int batch_count,
24+
unsigned int element_count) {
25+
throw std::invalid_argument("GMM received a cpu tensor but is not yet implemented for the cpu");
2126
}
2227

23-
void apply_cpu(const float* gmm, const float* input, float* output, unsigned int batch_count, unsigned int element_count)
24-
{
25-
throw std::invalid_argument("GMM received a cpu tensor but is not yet implemented for the cpu");
28+
void apply_cpu(
29+
const float* gmm,
30+
const float* input,
31+
float* output,
32+
unsigned int batch_count,
33+
unsigned int element_count) {
34+
throw std::invalid_argument("GMM received a cpu tensor but is not yet implemented for the cpu");
2635
}

0 commit comments

Comments
 (0)