|
3 | 3 | #include "compat.h" |
4 | 4 | #include "index_info.h" |
5 | 5 |
|
| 6 | +#include <vector> |
| 7 | + |
6 | 8 | #define CHECK_CPU(x) AT_ASSERTM(!x.type().is_cuda(), #x " must be CPU tensor") |
7 | 9 |
|
8 | 10 | enum ReductionType { ADD, MEAN, MIN, MAX }; |
@@ -123,8 +125,9 @@ segment_csr(at::Tensor src, at::Tensor indptr, at::optional<at::Tensor> out_opt, |
123 | 125 | auto src_data = src.DATA_PTR<scalar_t>(); |
124 | 126 | auto out_data = out.DATA_PTR<scalar_t>(); |
125 | 127 |
|
126 | | - scalar_t vals[K]; |
127 | | - int64_t row_start, row_end, args[K]; |
| 128 | + std::vector<scalar_t> vals(K); |
| 129 | + int64_t row_start, row_end; |
| 130 | + std::vector<int64_t> args(K); |
128 | 131 | AT_DISPATCH_REDUCTION_TYPES(reduce, [&] { |
129 | 132 | for (int n = 0; n < N; n++) { |
130 | 133 | int offset = IndexPtrToOffset<int64_t>::get(n, indptr_info); |
@@ -195,8 +198,9 @@ segment_coo(at::Tensor src, at::Tensor index, at::Tensor out, |
195 | 198 | auto src_data = src.DATA_PTR<scalar_t>(); |
196 | 199 | auto out_data = out.DATA_PTR<scalar_t>(); |
197 | 200 |
|
198 | | - scalar_t vals[K]; |
199 | | - int64_t idx, next_idx, row_start, args[K]; |
| 201 | + std::vector<scalar_t> vals(K); |
| 202 | + int64_t idx, next_idx, row_start; |
| 203 | + std::vector<int64_t> args(K); |
200 | 204 | AT_DISPATCH_REDUCTION_TYPES(reduce, [&] { |
201 | 205 | for (int e_1 = 0; e_1 < E_1; e_1++) { |
202 | 206 | int offset = IndexToOffset<int64_t>::get(e_1 * E_2, index_info); |
|
0 commit comments