Skip to content

Can model be defined by nn.Sequential or does it need to use nn.ModuleList #285

@alecda573

Description

@alecda573

I am trying to understand what happens in your forward_once method here:

`
def forward_once(self, x, augment=False, verbose=False):
img_size = x.shape[-2:] # height, width
yolo_out, out = [], []
if verbose:
print('0', x.shape)
str = ''

    # Augment images (inference and test only)
    if augment:  # https://github.com/ultralytics/yolov3/issues/931
        nb = x.shape[0]  # batch size
        s = [0.83, 0.67]  # scales
        x = torch.cat((x,
                       torch_utils.scale_img(x.flip(3), s[0]),  # flip-lr and scale
                       torch_utils.scale_img(x, s[1]),  # scale
                       ), 0)

    for i, module in enumerate(self.module_list):
        name = module.__class__.__name__
        #print(name)
        if name in ['WeightedFeatureFusion', 'FeatureConcat', 'FeatureConcat2', 'FeatureConcat3', 'FeatureConcat_l', 'ScaleChannel', 'ShiftChannel', 'ShiftChannel2D', 'ControlChannel', 'ControlChannel2D', 'AlternateChannel', 'AlternateChannel2D', 'SelectChannel', 'SelectChannel2D', 'ScaleSpatial']:  # sum, concat
            if verbose:
                l = [i - 1] + module.layers  # layers
                sh = [list(x.shape)] + [list(out[i].shape) for i in module.layers]  # shapes
                str = ' >> ' + ' + '.join(['layer %g %s' % x for x in zip(l, sh)])
            x = module(x, out)  # WeightedFeatureFusion(), FeatureConcat()
        elif name in ['ImplicitA', 'ImplicitM', 'ImplicitC', 'Implicit2DA', 'Implicit2DM', 'Implicit2DC']:
            x = module()
        elif name == 'YOLOLayer':
            yolo_out.append(module(x, out))
        elif name == 'JDELayer':
            yolo_out.append(module(x, out))
        else:  # run module directly, i.e. mtype = 'convolutional', 'upsample', 'maxpool', 'batchnorm2d' etc.
            #print(module)
            #print(x.shape)
            x = module(x)

        out.append(x if self.routs[i] else [])
        if verbose:
            print('%g/%g %s -' % (i, len(self.module_list), name), list(x.shape), str)
            str = ''

    if self.training:  # train
        return yolo_out
    elif ONNX_EXPORT:  # export
        x = [torch.cat(x, 0) for x in zip(*yolo_out)]
        return x[0], torch.cat(x[1:3], 1)  # scores, boxes: 3780x80, 3780x4
    else:  # inference or test
        x, p = zip(*yolo_out)  # inference output, training output
        x = torch.cat(x, 1)  # cat yolo outputs
        if augment:  # de-augment results
            x = torch.split(x, nb, dim=0)
            x[1][..., :4] /= s[0]  # scale
            x[1][..., 0] = img_size[1] - x[1][..., 0]  # flip lr
            x[2][..., :4] /= s[1]  # scale
            x = torch.cat(x, 1)
        return x, p`

and why you choose to loop through an nn.ModuleList object in place of an nn.Sequential object.
Could this easily support an nn.Sequential object?

Metadata

Metadata

Assignees

No one assigned

    Labels

    No labels
    No labels

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions