Skip to content

Commit a767158

Browse files
committed
update to 1.1.0
1 parent 32cce98 commit a767158

File tree

5 files changed

+13
-13
lines changed

5 files changed

+13
-13
lines changed

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ All included operations work on varying data types, are implemented both for CPU
3636

3737
## Installation
3838

39-
Ensure that at least PyTorch 1.0.0 is installed and verify that `cuda/bin` and `cuda/include` are in your `$PATH` and `$CPATH` respectively, *e.g.*:
39+
Ensure that at least PyTorch 1.1.0 is installed and verify that `cuda/bin` and `cuda/include` are in your `$PATH` and `$CPATH` respectively, *e.g.*:
4040

4141
```
4242
$ python -c "import torch; print(torch.__version__)"

cpu/scatter.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
void scatter_mul(at::Tensor src, at::Tensor index, at::Tensor out,
66
int64_t dim) {
77
int64_t elems_per_row = index.size(dim), i, idx;
8-
AT_DISPATCH_ALL_TYPES(src.type(), "scatter_mul", [&] {
8+
AT_DISPATCH_ALL_TYPES(src.scalar_type(), "scatter_mul", [&] {
99
DIM_APPLY3(scalar_t, src, int64_t, index, scalar_t, out, dim, {
1010
for (i = 0; i < elems_per_row; i++) {
1111
idx = index_data[i * index_stride];
@@ -18,7 +18,7 @@ void scatter_mul(at::Tensor src, at::Tensor index, at::Tensor out,
1818
void scatter_div(at::Tensor src, at::Tensor index, at::Tensor out,
1919
int64_t dim) {
2020
int64_t elems_per_row = index.size(dim), i, idx;
21-
AT_DISPATCH_ALL_TYPES(src.type(), "scatter_div", [&] {
21+
AT_DISPATCH_ALL_TYPES(src.scalar_type(), "scatter_div", [&] {
2222
DIM_APPLY3(scalar_t, src, int64_t, index, scalar_t, out, dim, {
2323
for (i = 0; i < elems_per_row; i++) {
2424
idx = index_data[i * index_stride];
@@ -31,7 +31,7 @@ void scatter_div(at::Tensor src, at::Tensor index, at::Tensor out,
3131
void scatter_max(at::Tensor src, at::Tensor index, at::Tensor out,
3232
at::Tensor arg, int64_t dim) {
3333
int64_t elems_per_row = index.size(dim), i, idx;
34-
AT_DISPATCH_ALL_TYPES(src.type(), "scatter_max", [&] {
34+
AT_DISPATCH_ALL_TYPES(src.scalar_type(), "scatter_max", [&] {
3535
DIM_APPLY4(scalar_t, src, int64_t, index, scalar_t, out, int64_t, arg, dim,
3636
{
3737
for (i = 0; i < elems_per_row; i++) {
@@ -48,7 +48,7 @@ void scatter_max(at::Tensor src, at::Tensor index, at::Tensor out,
4848
void scatter_min(at::Tensor src, at::Tensor index, at::Tensor out,
4949
at::Tensor arg, int64_t dim) {
5050
int64_t elems_per_row = index.size(dim), i, idx;
51-
AT_DISPATCH_ALL_TYPES(src.type(), "scatter_min", [&] {
51+
AT_DISPATCH_ALL_TYPES(src.scalar_type(), "scatter_min", [&] {
5252
DIM_APPLY4(scalar_t, src, int64_t, index, scalar_t, out, int64_t, arg, dim,
5353
{
5454
for (i = 0; i < elems_per_row; i++) {
@@ -65,7 +65,7 @@ void scatter_min(at::Tensor src, at::Tensor index, at::Tensor out,
6565
void index_backward(at::Tensor grad, at::Tensor index, at::Tensor arg,
6666
at::Tensor out, int64_t dim) {
6767
int64_t elems_per_row = index.size(dim), i, idx;
68-
AT_DISPATCH_ALL_TYPES(grad.type(), "index_backward", [&] {
68+
AT_DISPATCH_ALL_TYPES(grad.scalar_type(), "index_backward", [&] {
6969
DIM_APPLY4(scalar_t, grad, int64_t, index, int64_t, arg, scalar_t, out, dim,
7070
{
7171
for (i = 0; i < elems_per_row; i++) {

cuda/scatter_kernel.cu

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ scatter_mul_kernel(at::cuda::detail::TensorInfo<scalar_t, int64_t> src,
4444
void scatter_mul_cuda(at::Tensor src, at::Tensor index, at::Tensor out,
4545
int64_t dim) {
4646
cudaSetDevice(src.get_device());
47-
AT_DISPATCH_ALL_TYPES(src.type(), "scatter_mul_kernel", [&] {
47+
AT_DISPATCH_ALL_TYPES(src.scalar_type(), "scatter_mul_kernel", [&] {
4848
KERNEL_RUN(scatter_mul_kernel, index.dim(), index.numel(),
4949
at::cuda::detail::getTensorInfo<scalar_t, int64_t>(src),
5050
at::cuda::detail::getTensorInfo<int64_t, int64_t>(index),
@@ -71,7 +71,7 @@ scatter_div_kernel(at::cuda::detail::TensorInfo<scalar_t, int64_t> src,
7171
void scatter_div_cuda(at::Tensor src, at::Tensor index, at::Tensor out,
7272
int64_t dim) {
7373
cudaSetDevice(src.get_device());
74-
AT_DISPATCH_ALL_TYPES(src.type(), "scatter_div_kernel", [&] {
74+
AT_DISPATCH_ALL_TYPES(src.scalar_type(), "scatter_div_kernel", [&] {
7575
KERNEL_RUN(scatter_div_kernel, index.dim(), index.numel(),
7676
at::cuda::detail::getTensorInfo<scalar_t, int64_t>(src),
7777
at::cuda::detail::getTensorInfo<int64_t, int64_t>(index),
@@ -117,7 +117,7 @@ scatter_max_kernel(at::cuda::detail::TensorInfo<scalar_t, int64_t> src,
117117
void scatter_max_cuda(at::Tensor src, at::Tensor index, at::Tensor out,
118118
at::Tensor arg, int64_t dim) {
119119
cudaSetDevice(src.get_device());
120-
AT_DISPATCH_ALL_TYPES(src.type(), "scatter_max_kernel", [&] {
120+
AT_DISPATCH_ALL_TYPES(src.scalar_type(), "scatter_max_kernel", [&] {
121121
auto src_info = at::cuda::detail::getTensorInfo<scalar_t, int64_t>(src);
122122
auto index_info = at::cuda::detail::getTensorInfo<int64_t, int64_t>(index);
123123
auto out_info = at::cuda::detail::getTensorInfo<scalar_t, int64_t>(out);
@@ -148,7 +148,7 @@ scatter_min_kernel(at::cuda::detail::TensorInfo<scalar_t, int64_t> src,
148148
void scatter_min_cuda(at::Tensor src, at::Tensor index, at::Tensor out,
149149
at::Tensor arg, int64_t dim) {
150150
cudaSetDevice(src.get_device());
151-
AT_DISPATCH_ALL_TYPES(src.type(), "scatter_min_kernel", [&] {
151+
AT_DISPATCH_ALL_TYPES(src.scalar_type(), "scatter_min_kernel", [&] {
152152
auto src_info = at::cuda::detail::getTensorInfo<scalar_t, int64_t>(src);
153153
auto index_info = at::cuda::detail::getTensorInfo<int64_t, int64_t>(index);
154154
auto out_info = at::cuda::detail::getTensorInfo<scalar_t, int64_t>(out);
@@ -184,7 +184,7 @@ index_backward_kernel(at::cuda::detail::TensorInfo<scalar_t, int64_t> grad,
184184
void index_backward_cuda(at::Tensor grad, at::Tensor index, at::Tensor arg,
185185
at::Tensor out, int64_t dim) {
186186
cudaSetDevice(grad.get_device());
187-
AT_DISPATCH_ALL_TYPES(grad.type(), "index_backward_kernel", [&] {
187+
AT_DISPATCH_ALL_TYPES(grad.scalar_type(), "index_backward_kernel", [&] {
188188
KERNEL_RUN(index_backward_kernel, index.dim(), index.numel(),
189189
at::cuda::detail::getTensorInfo<scalar_t, int64_t>(grad),
190190
at::cuda::detail::getTensorInfo<int64_t, int64_t>(index),

setup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
['cuda/scatter.cpp', 'cuda/scatter_kernel.cu'])
2121
]
2222

23-
__version__ = '1.1.2'
23+
__version__ = '1.2.0'
2424
url = 'https://github.com/rusty1s/pytorch_scatter'
2525

2626
install_requires = []

torch_scatter/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
from .max import scatter_max
88
from .min import scatter_min
99

10-
__version__ = '1.1.2'
10+
__version__ = '1.2.0'
1111

1212
__all__ = [
1313
'scatter_add',

0 commit comments

Comments
 (0)