Skip to content

Commit a7a812a

Browse files
Fix deprecation warnings
1 parent 6e34d96 commit a7a812a

File tree

5 files changed

+41
-41
lines changed

5 files changed

+41
-41
lines changed

cuda/include/utils.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4,22 +4,22 @@
44

55
#define CHECK_CUDA(x) \
66
do { \
7-
AT_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor"); \
7+
TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor"); \
88
} while (0)
99

1010
#define CHECK_CONTIGUOUS(x) \
1111
do { \
12-
AT_CHECK(x.is_contiguous(), #x " must be a contiguous tensor"); \
12+
TORCH_CHECK(x.is_contiguous(), #x " must be a contiguous tensor"); \
1313
} while (0)
1414

1515
#define CHECK_IS_INT(x) \
1616
do { \
17-
AT_CHECK(x.scalar_type() == at::ScalarType::Int, \
17+
TORCH_CHECK(x.scalar_type() == at::ScalarType::Int, \
1818
#x " must be an int tensor"); \
1919
} while (0)
2020

2121
#define CHECK_IS_FLOAT(x) \
2222
do { \
23-
AT_CHECK(x.scalar_type() == at::ScalarType::Float, \
23+
TORCH_CHECK(x.scalar_type() == at::ScalarType::Float, \
2424
#x " must be a float tensor"); \
2525
} while (0)

cuda/src/ball_query.cpp

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,8 @@ void query_ball_point_kernel_dense_wrapper(int b, int n, int m, float radius,
77

88
void query_ball_point_kernel_partial_wrapper(long batch_size,
99
int size_x,
10-
int size_y,
11-
float radius,
10+
int size_y,
11+
float radius,
1212
int nsample,
1313
const float *x,
1414
const float *y,
@@ -33,10 +33,10 @@ at::Tensor ball_query_dense(at::Tensor new_xyz, at::Tensor xyz, const float radi
3333

3434
if (new_xyz.type().is_cuda()) {
3535
query_ball_point_kernel_dense_wrapper(xyz.size(0), xyz.size(1), new_xyz.size(1),
36-
radius, nsample, new_xyz.data<float>(),
37-
xyz.data<float>(), idx.data<int>());
36+
radius, nsample, new_xyz.data_ptr<float>(),
37+
xyz.data_ptr<float>(), idx.data_ptr<int>());
3838
} else {
39-
AT_CHECK(false, "CPU not supported");
39+
TORCH_CHECK(false, "CPU not supported");
4040
}
4141

4242
return idx;
@@ -68,13 +68,13 @@ std::pair<at::Tensor, at::Tensor> ball_query_partial_dense(at::Tensor x,
6868

6969
at::Tensor idx = torch::full({y.size(0), nsample}, x.size(0),
7070
at::device(y.device()).dtype(at::ScalarType::Long));
71-
71+
7272
at::Tensor dist = torch::full({y.size(0), nsample}, -1,
7373
at::device(y.device()).dtype(at::ScalarType::Float));
7474

7575
cudaSetDevice(x.get_device());
7676
auto batch_sizes = (int64_t *)malloc(sizeof(int64_t));
77-
cudaMemcpy(batch_sizes, batch_x[-1].data<int64_t>(), sizeof(int64_t),
77+
cudaMemcpy(batch_sizes, batch_x[-1].data_ptr<int64_t>(), sizeof(int64_t),
7878
cudaMemcpyDeviceToHost);
7979
auto batch_size = batch_sizes[0] + 1;
8080

@@ -88,14 +88,14 @@ std::pair<at::Tensor, at::Tensor> ball_query_partial_dense(at::Tensor x,
8888
x.size(0),
8989
y.size(0),
9090
radius, nsample,
91-
x.data<float>(),
92-
y.data<float>(),
93-
batch_x.data<long>(),
94-
batch_y.data<long>(),
95-
idx.data<long>(),
96-
dist.data<float>());
91+
x.data_ptr<float>(),
92+
y.data_ptr<float>(),
93+
batch_x.data_ptr<long>(),
94+
batch_y.data_ptr<long>(),
95+
idx.data_ptr<long>(),
96+
dist.data_ptr<float>());
9797
} else {
98-
AT_CHECK(false, "CPU not supported");
98+
TORCH_CHECK(false, "CPU not supported");
9999
}
100100

101101
return std::make_pair(idx, dist);

cuda/src/group_points.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -25,10 +25,10 @@ at::Tensor group_points(at::Tensor points, at::Tensor idx) {
2525

2626
if (points.type().is_cuda()) {
2727
group_points_kernel_wrapper(points.size(0), points.size(1), points.size(2),
28-
idx.size(1), idx.size(2), points.data<float>(),
29-
idx.data<int>(), output.data<float>());
28+
idx.size(1), idx.size(2), points.data_ptr<float>(),
29+
idx.data_ptr<int>(), output.data_ptr<float>());
3030
} else {
31-
AT_CHECK(false, "CPU not supported");
31+
TORCH_CHECK(false, "CPU not supported");
3232
}
3333

3434
return output;
@@ -51,9 +51,9 @@ at::Tensor group_points_grad(at::Tensor grad_out, at::Tensor idx, const int n) {
5151
if (grad_out.type().is_cuda()) {
5252
group_points_grad_kernel_wrapper(
5353
grad_out.size(0), grad_out.size(1), n, idx.size(1), idx.size(2),
54-
grad_out.data<float>(), idx.data<int>(), output.data<float>());
54+
grad_out.data_ptr<float>(), idx.data_ptr<int>(), output.data_ptr<float>());
5555
} else {
56-
AT_CHECK(false, "CPU not supported");
56+
TORCH_CHECK(false, "CPU not supported");
5757
}
5858

5959
return output;

cuda/src/interpolate.cpp

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -30,10 +30,10 @@ std::vector<at::Tensor> three_nn(at::Tensor unknowns, at::Tensor knows) {
3030

3131
if (unknowns.type().is_cuda()) {
3232
three_nn_kernel_wrapper(unknowns.size(0), unknowns.size(1), knows.size(1),
33-
unknowns.data<float>(), knows.data<float>(),
34-
dist2.data<float>(), idx.data<int>());
33+
unknowns.data_ptr<float>(), knows.data_ptr<float>(),
34+
dist2.data_ptr<float>(), idx.data_ptr<int>());
3535
} else {
36-
AT_CHECK(false, "CPU not supported");
36+
TORCH_CHECK(false, "CPU not supported");
3737
}
3838

3939
return {dist2, idx};
@@ -60,10 +60,10 @@ at::Tensor three_interpolate(at::Tensor points, at::Tensor idx,
6060
if (points.type().is_cuda()) {
6161
three_interpolate_kernel_wrapper(
6262
points.size(0), points.size(1), points.size(2), idx.size(1),
63-
points.data<float>(), idx.data<int>(), weight.data<float>(),
64-
output.data<float>());
63+
points.data_ptr<float>(), idx.data_ptr<int>(), weight.data_ptr<float>(),
64+
output.data_ptr<float>());
6565
} else {
66-
AT_CHECK(false, "CPU not supported");
66+
TORCH_CHECK(false, "CPU not supported");
6767
}
6868

6969
return output;
@@ -89,10 +89,10 @@ at::Tensor three_interpolate_grad(at::Tensor grad_out, at::Tensor idx,
8989
if (grad_out.type().is_cuda()) {
9090
three_interpolate_grad_kernel_wrapper(
9191
grad_out.size(0), grad_out.size(1), grad_out.size(2), m,
92-
grad_out.data<float>(), idx.data<int>(), weight.data<float>(),
93-
output.data<float>());
92+
grad_out.data_ptr<float>(), idx.data_ptr<int>(), weight.data_ptr<float>(),
93+
output.data_ptr<float>());
9494
} else {
95-
AT_CHECK(false, "CPU not supported");
95+
TORCH_CHECK(false, "CPU not supported");
9696
}
9797

9898
return output;

cuda/src/sampling.cpp

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -28,10 +28,10 @@ at::Tensor gather_points(at::Tensor points, at::Tensor idx) {
2828

2929
if (points.type().is_cuda()) {
3030
gather_points_kernel_wrapper(points.size(0), points.size(1), points.size(2),
31-
idx.size(1), points.data<float>(),
32-
idx.data<int>(), output.data<float>());
31+
idx.size(1), points.data_ptr<float>(),
32+
idx.data_ptr<int>(), output.data_ptr<float>());
3333
} else {
34-
AT_CHECK(false, "CPU not supported");
34+
TORCH_CHECK(false, "CPU not supported");
3535
}
3636

3737
return output;
@@ -54,10 +54,10 @@ at::Tensor gather_points_grad(at::Tensor grad_out, at::Tensor idx,
5454

5555
if (grad_out.type().is_cuda()) {
5656
gather_points_grad_kernel_wrapper(grad_out.size(0), grad_out.size(1), n,
57-
idx.size(1), grad_out.data<float>(),
58-
idx.data<int>(), output.data<float>());
57+
idx.size(1), grad_out.data_ptr<float>(),
58+
idx.data_ptr<int>(), output.data_ptr<float>());
5959
} else {
60-
AT_CHECK(false, "CPU not supported");
60+
TORCH_CHECK(false, "CPU not supported");
6161
}
6262

6363
return output;
@@ -76,10 +76,10 @@ at::Tensor furthest_point_sampling(at::Tensor points, const int nsamples) {
7676

7777
if (points.type().is_cuda()) {
7878
furthest_point_sampling_kernel_wrapper(
79-
points.size(0), points.size(1), nsamples, points.data<float>(),
80-
tmp.data<float>(), output.data<int>());
79+
points.size(0), points.size(1), nsamples, points.data_ptr<float>(),
80+
tmp.data_ptr<float>(), output.data_ptr<int>());
8181
} else {
82-
AT_CHECK(false, "CPU not supported");
82+
TORCH_CHECK(false, "CPU not supported");
8383
}
8484

8585
return output;

0 commit comments

Comments
 (0)