Skip to content

Commit 96859d6

Browse files
authored
Replace atomicadd with PyTorch implementation (#75)
1 parent 41d886b commit 96859d6

File tree

3 files changed

+2
-7
lines changed

3 files changed

+2
-7
lines changed

torchsparse/src/common/gpu.cuh

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -104,11 +104,6 @@ template <typename Dtype1, typename Dtype2>
104104
void print(const thrust::device_vector<Dtype1> &v1,
105105
const thrust::device_vector<Dtype2> &v2);
106106

107-
// atomicadd for half types (from aten/src/THC/THCAtomics.cuh)
108-
static inline __device__ at::Half atomicAdd(at::Half *address, at::Half val) {
109-
return atomicAdd(reinterpret_cast<__half*>(address), val);
110-
}
111-
112107
// AtomicAddition for double with cuda arch <= 600
113108
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
114109
#else

torchsparse/src/interpolation/devox_gpu.cu

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
#include <stdlib.h>
33
#include <thrust/device_vector.h>
44
#include <torch/extension.h>
5-
#include "../common/gpu.cuh"
5+
#include <THC/THCAtomics.cuh>
66

77

88
//input features (n, c), indices (N, 8), weight (N, 8) -> output features (N, c)

torchsparse/src/others/insertion_gpu.cu

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
#include <stdlib.h>
33
#include <cmath>
44
#include <torch/torch.h>
5-
#include "../common/gpu.cuh"
5+
#include <THC/THCAtomics.cuh>
66

77
//hashing
88
//input N*F float tensor, pointer to output N'*F int64 tensor, N*1 count tensor, N*1 index tensor

0 commit comments

Comments
 (0)