Skip to content

Commit a6ada96

Browse files
committed
Update for latest pytorch
1 parent 8e3cd02 commit a6ada96

File tree

10 files changed

+28
-29
lines changed

10 files changed

+28
-29
lines changed

mmcv/onnx/onnx_utils/symbolic_helper.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -134,7 +134,7 @@ def _if_scalar_type_as(g, self, tensor):
134134
if isinstance(self, torch._C.Value):
135135
return self
136136

137-
scalar_type = tensor.type().scalarType()
137+
scalar_type = tensor.scalar_type().scalarType()
138138
if scalar_type:
139139
ty = scalar_type.lower()
140140
return getattr(self, ty)()
@@ -151,7 +151,7 @@ def _is_value(x):
151151

152152

153153
def _is_tensor_list(x):
154-
return x.type().isSubtypeOf(ListType.ofTensors())
154+
return x.scalar_type().isSubtypeOf(ListType.ofTensors())
155155

156156

157157
def _unimplemented(op, msg):
@@ -162,7 +162,7 @@ def _unimplemented(op, msg):
162162
def _try_get_scalar_type(*args):
163163
for arg in args:
164164
try:
165-
return arg.type().scalarType()
165+
return arg.scalar_type().scalarType()
166166
except RuntimeError:
167167
pass
168168
return None
@@ -218,7 +218,7 @@ def _interpolate_size_to_scales(g, input, output_size, dim):
218218
else:
219219
scales_constant = [
220220
1. if i < 2 else float(output_size[-(dim - i)]) /
221-
float(input.type().sizes()[-(dim - i)]) for i in range(0, dim)
221+
float(input.scalar_type().sizes()[-(dim - i)]) for i in range(0, dim)
222222
]
223223
scales = g.op(
224224
'Constant',
@@ -233,9 +233,9 @@ def _interpolate_get_scales_if_available(g, scales):
233233
# scales[0] is TensorType with sizes = [] in Pytorch == 1.6.0
234234
# scales[0] is ListType in Pytorch == 1.7.0
235235
# scales[0] is TensorType with sizes = [2] in Pytorch == 1.8.0
236-
scale_desc = 'fs' if scales[0].type().kind() == 'ListType' or (
237-
scales[0].type().kind() == 'TensorType' and
238-
(sum(scales[0].type().sizes()) > 1)) else 'f'
236+
scale_desc = 'fs' if scales[0].scalar_type().kind() == 'ListType' or (
237+
scales[0].scalar_type().kind() == 'TensorType' and
238+
(sum(scales[0].scalar_type().sizes()) > 1)) else 'f'
239239
available_scales = _maybe_get_const(
240240
scales[0], scale_desc) != -1 and not _is_none(scales[0])
241241

@@ -276,7 +276,7 @@ def _get_interpolate_attributes(g, mode, args):
276276

277277
def _interpolate_get_scales(g, scale_factor, dim):
278278
offsets = g.op('Constant', value_t=torch.ones(2, dtype=torch.float32))
279-
if isinstance(scale_factor.type(), torch._C.ListType):
279+
if isinstance(scale_factor.scalar_type(), torch._C.ListType):
280280
return g.op('Concat', offsets, scale_factor, axis_i=0)
281281
else:
282282
scale_factor = _unsqueeze_helper(g, scale_factor, 0)

mmcv/onnx/symbolic.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ def symbolic_fn(g, input, output_size, *args):
2626
if scales is None:
2727
if 'ONNX_BACKEND' in os.environ and os.environ[
2828
'ONNX_BACKEND'] == 'TensorRT':
29-
input_size = input.type().sizes()
29+
input_size = input.scalar_type().sizes()
3030
# slice the first two dim
3131
input_size = input_size[:2]
3232
# convert output_size to int type
@@ -132,13 +132,13 @@ def constant_pad_nd(g, input, padding, value=None):
132132
mode = 'constant'
133133
value = sym_help._maybe_get_scalar(value)
134134
value = sym_help._if_scalar_type_as(g, value, input)
135-
pad = _prepare_onnx_paddings(g, input.type().dim(), padding)
135+
pad = _prepare_onnx_paddings(g, input.scalar_type().dim(), padding)
136136
return g.op('Pad', input, pad, value, mode_s=mode)
137137

138138

139139
def reflection_pad(g, input, padding):
140140
mode = 'reflect'
141-
paddings = _prepare_onnx_paddings(g, input.type().dim(), padding)
141+
paddings = _prepare_onnx_paddings(g, input.scalar_type().dim(), padding)
142142
return g.op('Pad', input, paddings, mode_s=mode)
143143

144144

@@ -294,7 +294,7 @@ def one_hot(g, self, num_classes):
294294

295295
@parse_args('v', 'i', 'none')
296296
def softmax(g, input, dim, dtype=None):
297-
input_dim = input.type().dim()
297+
input_dim = input.scalar_type().dim()
298298
if input_dim:
299299
# TODO: remove this as onnx opset 11 spec allows negative axes
300300
if dim < 0:
@@ -332,7 +332,7 @@ def symbolic_fn(g, input, output_size):
332332
return g.op('GlobalMaxPool', input), None
333333
raise NotImplementedError(
334334
'[Adaptive pool]:input size not accessible')
335-
dim = input.type().sizes()[2:]
335+
dim = input.scalar_type().sizes()[2:]
336336
if output_size == [1] * len(output_size) and type == 'MaxPool':
337337
return g.op('GlobalMaxPool', input), None
338338

@@ -375,7 +375,7 @@ def new_full(g,
375375
pin_memory=False):
376376
from torch.onnx.symbolic_opset9 import full
377377
if dtype is None and self.isCompleteTensor():
378-
dtype = self.type().scalarType()
378+
dtype = self.scalar_type().scalarType()
379379
dtype = sym_help.scalar_type_to_onnx.index(
380380
sym_help.cast_pytorch_to_onnx[dtype])
381381
return full(g, size, fill_value, dtype, layout, device, pin_memory)

mmcv/ops/csrc/parrots/ms_deform_attn.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ Tensor ms_deform_attn_forward(const Tensor &value, const Tensor &spatial_shapes,
3232
const Tensor &sampling_loc,
3333
const Tensor &attn_weight,
3434
const int im2col_step) {
35-
if (value.type().is_cuda()) {
35+
if (value.scalar_type().is_cuda()) {
3636
#ifdef MMCV_WITH_CUDA
3737
CHECK_CUDA_INPUT(value)
3838
CHECK_CUDA_INPUT(spatial_shapes)
@@ -55,7 +55,7 @@ void ms_deform_attn_backward(const Tensor &value, const Tensor &spatial_shapes,
5555
const Tensor &grad_output, Tensor &grad_value,
5656
Tensor &grad_sampling_loc,
5757
Tensor &grad_attn_weight, const int im2col_step) {
58-
if (value.type().is_cuda()) {
58+
if (value.scalar_type().is_cuda()) {
5959
#ifdef MMCV_WITH_CUDA
6060
CHECK_CUDA_INPUT(value)
6161
CHECK_CUDA_INPUT(spatial_shapes)

mmcv/ops/csrc/parrots/nms_rotated_cpu.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -11,9 +11,9 @@ Tensor nms_rotated_cpu_kernel(const Tensor dets, const Tensor scores,
1111
// however, the code in this function is much shorter because
1212
// we delegate the IoU computation for rotated boxes to
1313
// the single_box_iou_rotated function in box_iou_rotated_utils.h
14-
AT_ASSERTM(!dets.type().is_cuda(), "dets must be a CPU tensor");
15-
AT_ASSERTM(!scores.type().is_cuda(), "scores must be a CPU tensor");
16-
AT_ASSERTM(dets.type() == scores.type(),
14+
AT_ASSERTM(!dets.scalar_type().is_cuda(), "dets must be a CPU tensor");
15+
AT_ASSERTM(!scores.scalar_type().is_cuda(), "scores must be a CPU tensor");
16+
AT_ASSERTM(dets.scalar_type() == scores.scalar_type(),
1717
"dets should have the same type as scores");
1818

1919
if (dets.numel() == 0) {
@@ -59,7 +59,7 @@ Tensor nms_rotated_cpu_kernel(const Tensor dets, const Tensor scores,
5959
Tensor nms_rotated_cpu(const Tensor dets, const Tensor scores,
6060
const float iou_threshold) {
6161
auto result = at::empty({0}, dets.options());
62-
AT_DISPATCH_FLOATING_TYPES(dets.type(), "nms_rotated", [&] {
62+
AT_DISPATCH_FLOATING_TYPES(dets.scalar_type(), "nms_rotated", [&] {
6363
result = nms_rotated_cpu_kernel<scalar_t>(dets, scores, iou_threshold);
6464
});
6565
return result;

mmcv/ops/csrc/pytorch/cpu/nms_rotated.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ Tensor nms_rotated_cpu_kernel(const Tensor dets, const Tensor scores,
5959
Tensor nms_rotated_cpu(const Tensor dets, const Tensor scores,
6060
const float iou_threshold) {
6161
auto result = at::empty({0}, dets.options());
62-
AT_DISPATCH_FLOATING_TYPES(dets.type(), "nms_rotated", [&] {
62+
AT_DISPATCH_FLOATING_TYPES(dets.scalar_type(), "nms_rotated", [&] {
6363
result = nms_rotated_cpu_kernel<scalar_t>(dets, scores, iou_threshold);
6464
});
6565
return result;

mmcv/ops/csrc/pytorch/cuda/psamask_cuda.cu

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@
22
// Modified from
33
// https://github.com/hszhao/semseg/blob/master/lib/psa/src
44

5-
#include <THC/THC.h>
65
#include <torch/serialize/tensor.h>
76

87
#include <THC/THCDeviceUtils.cuh>

mmcv/ops/sync_bn.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ def forward(self, input, running_mean, running_var, weight, bias, momentum,
4646
assert isinstance(
4747
input, (torch.HalfTensor, torch.FloatTensor,
4848
torch.cuda.HalfTensor, torch.cuda.FloatTensor)), \
49-
f'only support Half or Float Tensor, but {input.type()}'
49+
f'only support Half or Float Tensor, but {input.scalar_type()}'
5050
output = torch.zeros_like(input)
5151
input3d = input.flatten(start_dim=2)
5252
output3d = output.view_as(input3d)

mmcv/parallel/data_container.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ def data(self):
6060
@property
6161
def datatype(self):
6262
if isinstance(self.data, torch.Tensor):
63-
return self.data.type()
63+
return self.data.scalar_type()
6464
else:
6565
return type(self.data)
6666

mmcv/runner/dist_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -149,7 +149,7 @@ def _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1):
149149
else:
150150
buckets = OrderedDict()
151151
for tensor in tensors:
152-
tp = tensor.type()
152+
tp = tensor.scalar_type()
153153
if tp not in buckets:
154154
buckets[tp] = []
155155
buckets[tp].append(tensor)

tests/test_ops/test_corner_pool.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -30,29 +30,29 @@ def test_corner_pool_device_and_dtypes_cpu():
3030
[0, 0, 0, 0, 0]]]])
3131
pool = CornerPool('left')
3232
left_tensor = pool(lr_tensor)
33-
assert left_tensor.type() == lr_tensor.type()
33+
assert left_tensor.scalar_type() == lr_tensor.scalar_type()
3434
assert torch.equal(left_tensor, left_answer)
3535
# Right Pool
3636
right_answer = torch.tensor([[[[0, 0, 0, 0, 0], [2, 2, 3, 3, 3],
3737
[5, 5, 5, 5, 6], [0, 0, 0, 0, 0],
3838
[0, 0, 0, 0, 0]]]])
3939
pool = CornerPool('right')
4040
right_tensor = pool(lr_tensor)
41-
assert right_tensor.type() == lr_tensor.type()
41+
assert right_tensor.scalar_type() == lr_tensor.scalar_type()
4242
assert torch.equal(right_tensor, right_answer)
4343
# Top Pool
4444
top_answer = torch.tensor([[[[0, 3, 4, 0, 0], [0, 3, 4, 0, 0],
4545
[0, 3, 4, 0, 0], [0, 2, 2, 0, 0],
4646
[0, 0, 2, 0, 0]]]])
4747
pool = CornerPool('top')
4848
top_tensor = pool(tb_tensor)
49-
assert top_tensor.type() == tb_tensor.type()
49+
assert top_tensor.scalar_type() == tb_tensor.scalar_type()
5050
assert torch.equal(top_tensor, top_answer)
5151
# Bottom Pool
5252
bottom_answer = torch.tensor([[[[0, 3, 1, 0, 0], [0, 3, 1, 0, 0],
5353
[0, 3, 4, 0, 0], [0, 3, 4, 0, 0],
5454
[0, 3, 4, 0, 0]]]])
5555
pool = CornerPool('bottom')
5656
bottom_tensor = pool(tb_tensor)
57-
assert bottom_tensor.type() == tb_tensor.type()
57+
assert bottom_tensor.scalar_type() == tb_tensor.scalar_type()
5858
assert torch.equal(bottom_tensor, bottom_answer)

0 commit comments

Comments
 (0)