Skip to content

Commit 0c9d05b

Browse files
[Fix] fix runtime error of "+=" inplace operation in PyTorch 1.10 (#8439)
* fix runtime error of "+=" inplace operation in PyTorch 1.10 * minor fix
1 parent 0f57742 commit 0c9d05b

File tree

8 files changed

+23
-18
lines changed

8 files changed

+23
-18
lines changed

mmdet/models/dense_heads/rpn_head.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ def _init_layers(self):
6262
def forward_single(self, x):
6363
"""Forward feature map of a single scale level."""
6464
x = self.rpn_conv(x)
65-
x = F.relu(x, inplace=True)
65+
x = F.relu(x, inplace=False)
6666
rpn_cls_score = self.rpn_cls(x)
6767
rpn_bbox_pred = self.rpn_reg(x)
6868
return rpn_cls_score, rpn_bbox_pred

mmdet/models/dense_heads/solov2_head.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -141,7 +141,9 @@ def forward(self, feats):
141141
input_p.device)
142142
input_p = torch.cat([input_p, coord_feat], 1)
143143

144-
feature_add_all_level += self.convs_all_levels[i](input_p)
144+
# fix runtime error of "+=" inplace operation in PyTorch 1.10
145+
feature_add_all_level = feature_add_all_level + \
146+
self.convs_all_levels[i](input_p)
145147

146148
feature_pred = self.conv_pred(feature_add_all_level)
147149
return feature_pred

mmdet/models/necks/dyhead.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,8 @@ def forward(self, x):
109109
summed_levels = 1
110110
if level > 0:
111111
low_feat = self.spatial_conv_low(x[level - 1], offset, mask)
112-
sum_feat += low_feat * self.scale_attn_module(low_feat)
112+
sum_feat = sum_feat + \
113+
low_feat * self.scale_attn_module(low_feat)
113114
summed_levels += 1
114115
if level < len(x) - 1:
115116
# this upsample order is weird, but faster than natural order
@@ -119,7 +120,8 @@ def forward(self, x):
119120
size=x[level].shape[-2:],
120121
mode='bilinear',
121122
align_corners=True)
122-
sum_feat += high_feat * self.scale_attn_module(high_feat)
123+
sum_feat = sum_feat + high_feat * \
124+
self.scale_attn_module(high_feat)
123125
summed_levels += 1
124126
outs.append(self.task_attn_module(sum_feat / summed_levels))
125127

mmdet/models/necks/pafpn.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -111,7 +111,8 @@ def forward(self, inputs):
111111
used_backbone_levels = len(laterals)
112112
for i in range(used_backbone_levels - 1, 0, -1):
113113
prev_shape = laterals[i - 1].shape[2:]
114-
laterals[i - 1] += F.interpolate(
114+
# fix runtime error of "+=" inplace operation in PyTorch 1.10
115+
laterals[i - 1] = laterals[i - 1] + F.interpolate(
115116
laterals[i], size=prev_shape, mode='nearest')
116117

117118
# build outputs

mmdet/models/roi_heads/htc_roi_head.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ def forward_dummy(self, x, proposals):
7272
if self.with_semantic and 'mask' in self.semantic_fusion:
7373
mask_semantic_feat = self.semantic_roi_extractor(
7474
[semantic_feat], mask_rois)
75-
mask_feats += mask_semantic_feat
75+
mask_feats = mask_feats + mask_semantic_feat
7676
last_feat = None
7777
for i in range(self.num_stages):
7878
mask_head = self.mask_head[i]
@@ -133,7 +133,7 @@ def _mask_forward_train(self,
133133
if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]:
134134
mask_semantic_feat = F.adaptive_avg_pool2d(
135135
mask_semantic_feat, mask_feats.shape[-2:])
136-
mask_feats += mask_semantic_feat
136+
mask_feats = mask_feats + mask_semantic_feat
137137

138138
# mask information flow
139139
# forward all previous mask heads to obtain last_feat, and fuse it
@@ -167,7 +167,7 @@ def _bbox_forward(self, stage, x, rois, semantic_feat=None):
167167
if bbox_semantic_feat.shape[-2:] != bbox_feats.shape[-2:]:
168168
bbox_semantic_feat = adaptive_avg_pool2d(
169169
bbox_semantic_feat, bbox_feats.shape[-2:])
170-
bbox_feats += bbox_semantic_feat
170+
bbox_feats = bbox_feats + bbox_semantic_feat
171171
cls_score, bbox_pred = bbox_head(bbox_feats)
172172

173173
bbox_results = dict(cls_score=cls_score, bbox_pred=bbox_pred)
@@ -186,7 +186,7 @@ def _mask_forward_test(self, stage, x, bboxes, semantic_feat=None):
186186
if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]:
187187
mask_semantic_feat = F.adaptive_avg_pool2d(
188188
mask_semantic_feat, mask_feats.shape[-2:])
189-
mask_feats += mask_semantic_feat
189+
mask_feats = mask_feats + mask_semantic_feat
190190
if self.mask_info_flow:
191191
last_feat = None
192192
last_pred = None
@@ -459,7 +459,7 @@ def simple_test(self, x, proposal_list, img_metas, rescale=False):
459459
if self.with_semantic and 'mask' in self.semantic_fusion:
460460
mask_semantic_feat = self.semantic_roi_extractor(
461461
[semantic_feat], mask_rois)
462-
mask_feats += mask_semantic_feat
462+
mask_feats = mask_feats + mask_semantic_feat
463463
last_feat = None
464464

465465
num_bbox_per_img = tuple(len(_bbox) for _bbox in _bboxes)
@@ -600,7 +600,7 @@ def aug_test(self, img_feats, proposal_list, img_metas, rescale=False):
600600
-2:]:
601601
mask_semantic_feat = F.adaptive_avg_pool2d(
602602
mask_semantic_feat, mask_feats.shape[-2:])
603-
mask_feats += mask_semantic_feat
603+
mask_feats = mask_feats + mask_semantic_feat
604604
last_feat = None
605605
for i in range(self.num_stages):
606606
mask_head = self.mask_head[i]

mmdet/models/roi_heads/roi_extractors/generic_roi_extractor.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,7 @@ def forward(self, feats, rois, roi_scale_factor=None):
6868
roi_feats_t = self.pre_module(roi_feats_t)
6969
if self.aggregation == 'sum':
7070
# and sum them all
71-
roi_feats += roi_feats_t
71+
roi_feats = roi_feats + roi_feats_t
7272
else:
7373
# and concat them along channel dimension
7474
roi_feats[:, start_channels:end_channels] = roi_feats_t

mmdet/models/roi_heads/roi_extractors/single_level_roi_extractor.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -91,11 +91,11 @@ def forward(self, feats, rois, roi_scale_factor=None):
9191
mask = mask.float().unsqueeze(-1)
9292
# select target level rois and reset the rest rois to zero.
9393
rois_i = rois.clone().detach()
94-
rois_i *= mask
94+
rois_i = rois_i * mask
9595
mask_exp = mask.expand(*expand_dims).reshape(roi_feats.shape)
9696
roi_feats_t = self.roi_layers[i](feats[i], rois_i)
97-
roi_feats_t *= mask_exp
98-
roi_feats += roi_feats_t
97+
roi_feats_t = roi_feats_t * mask_exp
98+
roi_feats = roi_feats + roi_feats_t
9999
continue
100100
inds = mask.nonzero(as_tuple=False).squeeze(1)
101101
if inds.numel() > 0:
@@ -109,7 +109,7 @@ def forward(self, feats, rois, roi_scale_factor=None):
109109
# in other GPUs and will cause a hanging error.
110110
# Therefore, we add it to ensure each feature pyramid is
111111
# included in the computation graph to avoid runtime bugs.
112-
roi_feats += sum(
112+
roi_feats = roi_feats + sum(
113113
x.view(-1)[0]
114114
for x in self.parameters()) * 0. + feats[i].sum() * 0.
115115
return roi_feats

mmdet/models/roi_heads/scnet_roi_head.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,7 @@ def _bbox_forward(self,
110110
if bbox_semantic_feat.shape[-2:] != bbox_feats.shape[-2:]:
111111
bbox_semantic_feat = adaptive_avg_pool2d(
112112
bbox_semantic_feat, bbox_feats.shape[-2:])
113-
bbox_feats += bbox_semantic_feat
113+
bbox_feats = bbox_feats + bbox_semantic_feat
114114
if self.with_glbctx and glbctx_feat is not None:
115115
bbox_feats = self._fuse_glbctx(bbox_feats, glbctx_feat, rois)
116116
cls_score, bbox_pred, relayed_feat = bbox_head(
@@ -137,7 +137,7 @@ def _mask_forward(self,
137137
if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]:
138138
mask_semantic_feat = F.adaptive_avg_pool2d(
139139
mask_semantic_feat, mask_feats.shape[-2:])
140-
mask_feats += mask_semantic_feat
140+
mask_feats = mask_feats + mask_semantic_feat
141141
if self.with_glbctx and glbctx_feat is not None:
142142
mask_feats = self._fuse_glbctx(mask_feats, glbctx_feat, rois)
143143
if self.with_feat_relay and relayed_feat is not None:

0 commit comments

Comments
 (0)