Skip to content

Commit cc8aa66

Browse files
authored
Merge branch 'master' into build-cpu-and-gpu-versions
2 parents 8a07c86 + ab4cd9d commit cc8aa66

File tree

5 files changed

+78
-14
lines changed

5 files changed

+78
-14
lines changed

csrc/scatter.cpp

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -74,6 +74,38 @@ class ScatterSum : public torch::autograd::Function<ScatterSum> {
7474
}
7575
};
7676

77+
class ScatterMul : public torch::autograd::Function<ScatterMul> {
78+
public:
79+
static variable_list forward(AutogradContext *ctx, Variable src,
80+
Variable index, int64_t dim,
81+
torch::optional<Variable> optional_out,
82+
torch::optional<int64_t> dim_size) {
83+
dim = dim < 0 ? src.dim() + dim : dim;
84+
ctx->saved_data["dim"] = dim;
85+
ctx->saved_data["src_shape"] = src.sizes();
86+
index = broadcast(index, src, dim);
87+
auto result = scatter_fw(src, index, dim, optional_out, dim_size, "mul");
88+
auto out = std::get<0>(result);
89+
ctx->save_for_backward({src, index, out});
90+
if (optional_out.has_value())
91+
ctx->mark_dirty({optional_out.value()});
92+
return {out};
93+
}
94+
95+
static variable_list backward(AutogradContext *ctx, variable_list grad_outs) {
96+
auto grad_out = grad_outs[0];
97+
auto saved = ctx->get_saved_variables();
98+
auto src = saved[0];
99+
auto index = saved[1];
100+
auto out = saved[2];
101+
auto dim = ctx->saved_data["dim"].toInt();
102+
auto src_shape = list2vec(ctx->saved_data["src_shape"].toIntList());
103+
auto grad_in = torch::gather(grad_out * out, dim, index, false).div_(src);
104+
grad_in.masked_fill_(grad_in.isnan(), 0);
105+
return {grad_in, Variable(), Variable(), Variable(), Variable()};
106+
}
107+
};
108+
77109
class ScatterMean : public torch::autograd::Function<ScatterMean> {
78110
public:
79111
static variable_list forward(AutogradContext *ctx, Variable src,
@@ -201,6 +233,12 @@ torch::Tensor scatter_sum(torch::Tensor src, torch::Tensor index, int64_t dim,
201233
return ScatterSum::apply(src, index, dim, optional_out, dim_size)[0];
202234
}
203235

236+
torch::Tensor scatter_mul(torch::Tensor src, torch::Tensor index, int64_t dim,
237+
torch::optional<torch::Tensor> optional_out,
238+
torch::optional<int64_t> dim_size) {
239+
return ScatterMul::apply(src, index, dim, optional_out, dim_size)[0];
240+
}
241+
204242
torch::Tensor scatter_mean(torch::Tensor src, torch::Tensor index, int64_t dim,
205243
torch::optional<torch::Tensor> optional_out,
206244
torch::optional<int64_t> dim_size) {
@@ -225,6 +263,7 @@ scatter_max(torch::Tensor src, torch::Tensor index, int64_t dim,
225263

226264
static auto registry = torch::RegisterOperators()
227265
.op("torch_scatter::scatter_sum", &scatter_sum)
266+
.op("torch_scatter::scatter_mul", &scatter_mul)
228267
.op("torch_scatter::scatter_mean", &scatter_mean)
229268
.op("torch_scatter::scatter_min", &scatter_min)
230269
.op("torch_scatter::scatter_max", &scatter_max);

test/test_scatter.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,13 +7,16 @@
77

88
from .utils import reductions, tensor, dtypes, devices
99

10+
reductions = reductions + ['mul']
11+
1012
tests = [
1113
{
1214
'src': [1, 3, 2, 4, 5, 6],
1315
'index': [0, 1, 0, 1, 1, 3],
1416
'dim': 0,
1517
'sum': [3, 12, 0, 6],
1618
'add': [3, 12, 0, 6],
19+
'mul': [2, 60, 1, 6],
1720
'mean': [1.5, 4, 0, 6],
1821
'min': [1, 3, 0, 6],
1922
'arg_min': [0, 1, 6, 5],
@@ -26,6 +29,7 @@
2629
'dim': 0,
2730
'sum': [[4, 6], [21, 24], [0, 0], [11, 12]],
2831
'add': [[4, 6], [21, 24], [0, 0], [11, 12]],
32+
'mul': [[1 * 3, 2 * 4], [5 * 7 * 9, 6 * 8 * 10], [1, 1], [11, 12]],
2933
'mean': [[2, 3], [7, 8], [0, 0], [11, 12]],
3034
'min': [[1, 2], [5, 6], [0, 0], [11, 12]],
3135
'arg_min': [[0, 0], [1, 1], [6, 6], [5, 5]],
@@ -38,6 +42,7 @@
3842
'dim': 1,
3943
'sum': [[4, 21, 0, 11], [12, 18, 12, 0]],
4044
'add': [[4, 21, 0, 11], [12, 18, 12, 0]],
45+
'mul': [[1 * 3, 5 * 7 * 9, 1, 11], [2 * 4 * 6, 8 * 10, 12, 1]],
4146
'mean': [[2, 7, 0, 11], [4, 9, 12, 0]],
4247
'min': [[1, 5, 0, 11], [2, 8, 12, 0]],
4348
'arg_min': [[0, 1, 6, 5], [0, 2, 5, 6]],
@@ -50,6 +55,7 @@
5055
'dim': 1,
5156
'sum': [[[4, 6], [5, 6], [0, 0]], [[7, 9], [0, 0], [22, 24]]],
5257
'add': [[[4, 6], [5, 6], [0, 0]], [[7, 9], [0, 0], [22, 24]]],
58+
'mul': [[[3, 8], [5, 6], [1, 1]], [[7, 9], [1, 1], [120, 11 * 13]]],
5359
'mean': [[[2, 3], [5, 6], [0, 0]], [[7, 9], [0, 0], [11, 12]]],
5460
'min': [[[1, 2], [5, 6], [0, 0]], [[7, 9], [0, 0], [10, 11]]],
5561
'arg_min': [[[0, 0], [1, 1], [3, 3]], [[1, 1], [3, 3], [0, 0]]],
@@ -62,6 +68,7 @@
6268
'dim': 1,
6369
'sum': [[4], [6]],
6470
'add': [[4], [6]],
71+
'mul': [[3], [8]],
6572
'mean': [[2], [3]],
6673
'min': [[1], [2]],
6774
'arg_min': [[0], [0]],
@@ -74,6 +81,7 @@
7481
'dim': 1,
7582
'sum': [[[4, 4]], [[6, 6]]],
7683
'add': [[[4, 4]], [[6, 6]]],
84+
'mul': [[[3, 3]], [[8, 8]]],
7785
'mean': [[[2, 2]], [[3, 3]]],
7886
'min': [[[1, 1]], [[2, 2]]],
7987
'arg_min': [[[0, 0]], [[0, 0]]],
@@ -125,6 +133,8 @@ def test_out(test, reduce, dtype, device):
125133

126134
if reduce == 'sum' or reduce == 'add':
127135
expected = expected - 2
136+
elif reduce == 'mul':
137+
expected = out # We can not really test this here.
128138
elif reduce == 'mean':
129139
expected = out # We can not really test this here.
130140
elif reduce == 'min':

torch_scatter/__init__.py

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,9 @@
1919
from .placeholder import cuda_version_placeholder
2020
torch.ops.torch_scatter.cuda_version = cuda_version_placeholder
2121

22+
from .placeholder import scatter_placeholder
23+
torch.ops.torch_scatter.scatter_mul = scatter_placeholder
24+
2225
from .placeholder import scatter_arg_placeholder
2326
torch.ops.torch_scatter.scatter_min = scatter_arg_placeholder
2427
torch.ops.torch_scatter.scatter_max = scatter_arg_placeholder
@@ -52,16 +55,16 @@
5255
major, minor = int(str(cuda_version)[0:2]), int(str(cuda_version)[3])
5356
t_major, t_minor = [int(x) for x in torch.version.cuda.split('.')]
5457

55-
if t_major != major or t_minor != minor:
58+
if t_major != major:
5659
raise RuntimeError(
5760
f'Detected that PyTorch and torch_scatter were compiled with '
5861
f'different CUDA versions. PyTorch has CUDA version '
5962
f'{t_major}.{t_minor} and torch_scatter has CUDA version '
6063
f'{major}.{minor}. Please reinstall the torch_scatter that '
6164
f'matches your PyTorch install.')
6265

63-
from .scatter import (scatter_sum, scatter_add, scatter_mean, scatter_min,
64-
scatter_max, scatter) # noqa
66+
from .scatter import (scatter_sum, scatter_add, scatter_mul, scatter_mean,
67+
scatter_min, scatter_max, scatter) # noqa
6568
from .segment_csr import (segment_sum_csr, segment_add_csr, segment_mean_csr,
6669
segment_min_csr, segment_max_csr, segment_csr,
6770
gather_csr) # noqa
@@ -74,6 +77,7 @@
7477
__all__ = [
7578
'scatter_sum',
7679
'scatter_add',
80+
'scatter_mul',
7781
'scatter_mean',
7882
'scatter_min',
7983
'scatter_max',

torch_scatter/scatter.py

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,13 @@ def scatter_add(src: torch.Tensor, index: torch.Tensor, dim: int = -1,
3131
return scatter_sum(src, index, dim, out, dim_size)
3232

3333

34+
@torch.jit.script
35+
def scatter_mul(src: torch.Tensor, index: torch.Tensor, dim: int = -1,
36+
out: Optional[torch.Tensor] = None,
37+
dim_size: Optional[int] = None) -> torch.Tensor:
38+
return torch.ops.torch_scatter.scatter_mul(src, index, dim, out, dim_size)
39+
40+
3441
@torch.jit.script
3542
def scatter_mean(src: torch.Tensor, index: torch.Tensor, dim: int = -1,
3643
out: Optional[torch.Tensor] = None,
@@ -127,8 +134,8 @@ def scatter(src: torch.Tensor, index: torch.Tensor, dim: int = -1,
127134
with size :attr:`dim_size` at dimension :attr:`dim`.
128135
If :attr:`dim_size` is not given, a minimal sized output tensor
129136
according to :obj:`index.max() + 1` is returned.
130-
:param reduce: The reduce operation (:obj:`"sum"`, :obj:`"mean"`,
131-
:obj:`"min"` or :obj:`"max"`). (default: :obj:`"sum"`)
137+
:param reduce: The reduce operation (:obj:`"sum"`, :obj:`"mul"`,
138+
:obj:`"mean"`, :obj:`"min"` or :obj:`"max"`). (default: :obj:`"sum"`)
132139
133140
:rtype: :class:`Tensor`
134141
@@ -150,6 +157,8 @@ def scatter(src: torch.Tensor, index: torch.Tensor, dim: int = -1,
150157
"""
151158
if reduce == 'sum' or reduce == 'add':
152159
return scatter_sum(src, index, dim, out, dim_size)
160+
if reduce == 'mul':
161+
return scatter_mul(src, index, dim, out, dim_size)
153162
elif reduce == 'mean':
154163
return scatter_mean(src, index, dim, out, dim_size)
155164
elif reduce == 'min':

torch_scatter/segment_csr.py

Lines changed: 11 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -22,16 +22,18 @@ def segment_mean_csr(src: torch.Tensor, indptr: torch.Tensor,
2222

2323

2424
@torch.jit.script
25-
def segment_min_csr(src: torch.Tensor, indptr: torch.Tensor,
26-
out: Optional[torch.Tensor] = None
27-
) -> Tuple[torch.Tensor, torch.Tensor]:
25+
def segment_min_csr(
26+
src: torch.Tensor, indptr: torch.Tensor,
27+
out: Optional[torch.Tensor] = None
28+
) -> Tuple[torch.Tensor, torch.Tensor]:
2829
return torch.ops.torch_scatter.segment_min_csr(src, indptr, out)
2930

3031

3132
@torch.jit.script
32-
def segment_max_csr(src: torch.Tensor, indptr: torch.Tensor,
33-
out: Optional[torch.Tensor] = None
34-
) -> Tuple[torch.Tensor, torch.Tensor]:
33+
def segment_max_csr(
34+
src: torch.Tensor, indptr: torch.Tensor,
35+
out: Optional[torch.Tensor] = None
36+
) -> Tuple[torch.Tensor, torch.Tensor]:
3537
return torch.ops.torch_scatter.segment_max_csr(src, indptr, out)
3638

3739

@@ -51,9 +53,9 @@ def segment_csr(src: torch.Tensor, indptr: torch.Tensor,
5153
Formally, if :attr:`src` and :attr:`indptr` are :math:`n`-dimensional and
5254
:math:`m`-dimensional tensors with
5355
size :math:`(x_0, ..., x_{m-1}, x_m, x_{m+1}, ..., x_{n-1})` and
54-
:math:`(x_0, ..., x_{m-1}, y)`, respectively, then :attr:`out` must be an
56+
:math:`(x_0, ..., x_{m-2}, y)`, respectively, then :attr:`out` must be an
5557
:math:`n`-dimensional tensor with size
56-
:math:`(x_0, ..., x_{m-1}, y - 1, x_{m+1}, ..., x_{n-1})`.
58+
:math:`(x_0, ..., x_{m-2}, y - 1, x_{m}, ..., x_{n-1})`.
5759
Moreover, the values of :attr:`indptr` must be between :math:`0` and
5860
:math:`x_m` in ascending order.
5961
The :attr:`indptr` tensor supports broadcasting in case its dimensions do
@@ -64,7 +66,7 @@ def segment_csr(src: torch.Tensor, indptr: torch.Tensor,
6466
6567
.. math::
6668
\mathrm{out}_i =
67-
\sum_{j = \mathrm{indptr}[i]}^{\mathrm{indptr}[i+i]}~\mathrm{src}_j.
69+
\sum_{j = \mathrm{indptr}[i]}^{\mathrm{indptr}[i+1]-1}~\mathrm{src}_j.
6870
6971
Due to the use of index pointers, :meth:`segment_csr` is the fastest
7072
method to apply for grouped reductions.

0 commit comments

Comments
 (0)