Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
52 changes: 26 additions & 26 deletions backends/arm/test/ops/test_conv2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,26 +117,26 @@ def forward(self, x):
return x


conv2d_2x2_3x2x40x40_nobias = Conv2d(
conv2d_2x2_3x2x14x14_nobias = Conv2d(
in_channels=2,
out_channels=3,
kernel_size=(2, 2),
stride=1,
bias=False,
padding=0,
width=40,
height=40,
batches=3,
width=14,
height=14,
batches=2,
)

conv2d_3x3_1x3x256x256_st1 = Conv2d(
conv2d_3x3_1x3x24x24_st1 = Conv2d(
in_channels=3,
out_channels=10,
kernel_size=(3, 3),
stride=1,
padding=0,
width=256,
height=256,
width=24,
height=24,
batches=1,
)

Expand All @@ -151,14 +151,14 @@ def forward(self, x):
batches=1,
)

conv2d_1x1_1x2x128x128_st1 = Conv2d(
conv2d_1x1_1x2x16x16_st1 = Conv2d(
in_channels=2,
out_channels=1,
kernel_size=(1, 1),
stride=1,
padding=0,
width=128,
height=128,
width=16,
height=16,
batches=1,
)

Expand All @@ -173,25 +173,25 @@ def forward(self, x):
batches=1,
)

conv2d_5x5_3x2x128x128_st1 = Conv2d(
conv2d_5x5_3x2x24x24_st1 = Conv2d(
in_channels=2,
out_channels=3,
kernel_size=(5, 5),
stride=1,
padding=0,
width=128,
height=128,
batches=3,
width=24,
height=24,
batches=2,
)

conv2d_3x3_1x3x224x224_st2_pd1 = Conv2d(
conv2d_3x3_1x3x28x28_st2_pd1 = Conv2d(
in_channels=3,
out_channels=16,
kernel_size=(3, 3),
stride=2,
padding=1,
width=224,
height=224,
width=28,
height=28,
batches=1,
)

Expand Down Expand Up @@ -304,8 +304,8 @@ def forward(self, x):

two_conv2d_nobias = Conv2d(
nbr_conv=2,
width=256,
height=256,
width=32,
height=32,
in_channels=[3, 10],
out_channels=[10, 15],
kernel_size=[(5, 5), (5, 5)],
Expand All @@ -317,8 +317,8 @@ def forward(self, x):

two_conv2d = Conv2d(
nbr_conv=2,
width=256,
height=256,
width=32,
height=32,
in_channels=[3, 10],
out_channels=[10, 15],
kernel_size=[(5, 5), (5, 5)],
Expand Down Expand Up @@ -359,10 +359,10 @@ def forward(self, x):
# Shenanigan to get a nicer output when test fails. With unittest it looks like:
# FAIL: test_convolution_2d_tosa_INT_2_3x3_1x3x12x12_st2_pd1
test_data_FP = {
"2x2_3x2x40x40_nobias": lambda: conv2d_2x2_3x2x40x40_nobias,
"3x3_1x3x256x256_st1": lambda: conv2d_3x3_1x3x256x256_st1,
"2x2_3x2x14x14_nobias": lambda: conv2d_2x2_3x2x14x14_nobias,
"3x3_1x3x24x24_st1": lambda: conv2d_3x3_1x3x24x24_st1,
"3x3_1x3x12x12_st2_pd1": lambda: conv2d_3x3_1x3x12x12_st2_pd1,
"1x1_1x2x128x128_st1": lambda: conv2d_1x1_1x2x128x128_st1,
"1x1_1x2x16x16_st1": lambda: conv2d_1x1_1x2x16x16_st1,
"2x2_1x1x14x13_st2_needs_adjust_pass": lambda: conv2d_2x2_1x1x14x13_st2,
"5x5_1x3x14x15_st3_pd1_needs_adjust_pass": lambda: conv2d_5x5_1x3x14x15_st3_pd1,
"7x7_1x3x16x16_st2_pd1_dl2_needs_adjust_pass": lambda: conv2d_7x7_1x3x16x16_st2_pd1_dl2,
Expand All @@ -373,8 +373,8 @@ def forward(self, x):
"3x3_1x3x8x9_st3_pd0_dl1_needs_adjust_pass": lambda: conv2d_3x3_1x3x8x9_st3_pd0_dl1,
"3x4_1x3x7x7_st3_pd0_dl1_needs_adjust_pass": lambda: conv2d_3x4_1x3x7x7_st3_pd0_dl1,
"4x3_1x3x7x7_st3_pd0_dl1_needs_adjust_pass": lambda: conv2d_4x3_1x3x7x7_st3_pd0_dl1,
"5x5_3x2x128x128_st1": lambda: conv2d_5x5_3x2x128x128_st1,
"3x3_1x3x224x224_st2_pd1": lambda: conv2d_3x3_1x3x224x224_st2_pd1,
"5x5_3x2x24x24_st1": lambda: conv2d_5x5_3x2x24x24_st1,
"3x3_1x3x28x28_st2_pd1": lambda: conv2d_3x3_1x3x28x28_st2_pd1,
"two_conv2d_nobias": lambda: two_conv2d_nobias,
"two_conv2d": lambda: two_conv2d,
"groups": lambda: conv2d_groups,
Expand Down
30 changes: 15 additions & 15 deletions backends/arm/test/ops/test_conv_combos.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ def __init__(self):
# 1. 1x1 CONV2d + ReLU6 (Pointwise)
self.pointwise_conv2d = torch.nn.Conv2d(
in_channels=16, out_channels=96, kernel_size=1, stride=1, groups=1
) ## (1, 128, 81, 81)
) ## Example output shape (1, 96, 33, 33)
self.batch_norm2d_16 = torch.nn.BatchNorm2d(96, affine=False)
self.relu6 = torch.nn.ReLU6()

Expand All @@ -60,15 +60,15 @@ def __init__(self):
padding=1,
stride=1,
groups=96,
) ## (1, 128, H, W)
) ## Example output shape (1, 96, H, W)

# 3. Linear 1x1 Conv2d
self.pointwise_conv2d_linear = torch.nn.Conv2d(
in_channels=96, out_channels=16, kernel_size=1, stride=1, groups=1
) ## (1, 32, 81, 81)
) ## Example output shape (1, 16, 33, 33)

def get_inputs(self) -> Tuple[torch.Tensor]:
return (torch.randn(1, 16, 81, 81),)
return (torch.randn(1, 16, 33, 33),)

def forward(self, x):
input = x
Expand Down Expand Up @@ -106,7 +106,7 @@ def __init__(self):
self.adaptive_avg_pool2d = torch.nn.AdaptiveAvgPool2d((1, 1))

def get_inputs(self) -> Tuple[torch.Tensor]:
return (torch.randn(1, 3, 128, 128),)
return (torch.randn(1, 3, 48, 48),)

def forward(self, x):
x = self.conv2d(x)
Expand Down Expand Up @@ -145,7 +145,7 @@ def __init__(self, affine: bool):
self.relu6 = torch.nn.ReLU6()

def get_inputs(self) -> Tuple[torch.Tensor]:
return (torch.randn(1, 3, 256, 256),)
return (torch.randn(1, 3, 64, 64),)

def forward(self, x):
x = self.conv2d(x)
Expand All @@ -161,11 +161,11 @@ class ComboConvRelu6(torch.nn.Module):
]

test_data_FP = {
"combo_conv_relu_2_x_4d": lambda: (2 * torch.randn(1, 3, 256, 256),),
"combo_conv_relu_0_5_x_4d": lambda: (0.5 * torch.randn(1, 3, 256, 256),),
"combo_conv_relu_4d": lambda: (torch.randn(1, 3, 256, 256),),
"combo_conv_relu_neg_0_5_x_4d": lambda: (-0.5 * torch.randn(1, 3, 256, 256),),
"combo_conv_relu_neg_2_x_4d": lambda: (-2 * torch.randn(1, 3, 256, 256),),
"combo_conv_relu_2_x_4d": lambda: (2 * torch.randn(1, 3, 64, 64),),
"combo_conv_relu_0_5_x_4d": lambda: (0.5 * torch.randn(1, 3, 64, 64),),
"combo_conv_relu_4d": lambda: (torch.randn(1, 3, 64, 64),),
"combo_conv_relu_neg_0_5_x_4d": lambda: (-0.5 * torch.randn(1, 3, 64, 64),),
"combo_conv_relu_neg_2_x_4d": lambda: (-2 * torch.randn(1, 3, 64, 64),),
}

# Generate a new test set paired with per_channel_quant=True/False.
Expand Down Expand Up @@ -196,10 +196,10 @@ class ComboConvAvgPool2d(torch.nn.Module):
]

test_data_FP = {
"combo_conv_avgpool_20_x_4d": lambda: (20 * torch.randn(1, 3, 64, 32),),
"combo_conv_avgpool_4d": lambda: (torch.randn(1, 3, 100, 200),),
"combo_conv_avgpool_5_x_4d_randn": lambda: (5 * torch.randn(1, 3, 256, 256),),
"combo_conv_avgpool_2_x_4d": lambda: (torch.rand(1, 3, 512, 128),),
"combo_conv_avgpool_20_x_4d": lambda: (20 * torch.randn(1, 3, 48, 24),),
"combo_conv_avgpool_4d": lambda: (torch.randn(1, 3, 60, 120),),
"combo_conv_avgpool_5_x_4d_randn": lambda: (5 * torch.randn(1, 3, 64, 64),),
"combo_conv_avgpool_2_x_4d": lambda: (torch.rand(1, 3, 96, 32),),
}

# Generate a new test set paired with per_channel_quant=True/False.
Expand Down
50 changes: 25 additions & 25 deletions backends/arm/test/ops/test_depthwise_conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,68 +68,68 @@
batches=1,
)

dw_conv1d_3_1x3x256_gp3_st1 = Conv1d(
dw_conv1d_3_1x3x32_gp3_st1 = Conv1d(
in_channels=3,
out_channels=3,
kernel_size=3,
stride=1,
groups=3,
padding=0,
length=256,
length=32,
batches=1,
)

dw_conv2d_3x3_1x3x256x256_gp3_st1 = Conv2d(
dw_conv2d_3x3_1x3x24x24_gp3_st1 = Conv2d(
in_channels=3,
out_channels=3,
kernel_size=(3, 3),
stride=(1, 1),
groups=3,
padding=0,
width=256,
height=256,
width=24,
height=24,
batches=1,
)

dw_conv2d_3x3_1x4x256x256_gp4_st1 = Conv2d(
dw_conv2d_3x3_1x4x24x24_gp4_st1 = Conv2d(
in_channels=4,
out_channels=8,
kernel_size=(3, 3),
stride=(1, 1),
groups=4,
padding=0,
width=256,
height=256,
width=24,
height=24,
batches=1,
)

dw_conv2d_3x3_2x8x198x198_gp8_st3 = Conv2d(
dw_conv2d_3x3_2x8x27x27_gp8_st3 = Conv2d(
in_channels=8,
out_channels=16,
kernel_size=(3, 3),
stride=3,
groups=8,
padding=0,
width=198,
height=198,
width=27,
height=27,
batches=2,
)

dw_conv2d_3x3_1x4x256x256_gp4_nobias = Conv2d(
dw_conv2d_3x3_1x4x24x24_gp4_nobias = Conv2d(
in_channels=4,
out_channels=8,
kernel_size=(3, 3),
stride=1,
groups=4,
bias=False,
width=256,
height=256,
width=24,
height=24,
batches=1,
)

two_dw_conv1d = Conv1d(
nbr_conv=2,
length=64,
length=16,
in_channels=[4, 8],
out_channels=[8, 24],
kernel_size=[3, 3],
Expand All @@ -142,8 +142,8 @@

two_dw_conv2d = Conv2d(
nbr_conv=2,
width=64,
height=64,
width=24,
height=24,
in_channels=[4, 8],
out_channels=[8, 24],
kernel_size=[(3, 3), (3, 3)],
Expand All @@ -157,10 +157,10 @@
# Shenanigan to get a nicer output when test fails.
test_data_conv2d_FP = {
"2x2_1x6x4x4_gp6_st1": lambda: dw_conv2d_2x2_1x6x4x4_gp6_st1,
"3x3_1x3x256x256_gp3_st1": lambda: dw_conv2d_3x3_1x3x256x256_gp3_st1,
"3x3_1x4x256x256_gp4_nobias": lambda: dw_conv2d_3x3_1x4x256x256_gp4_nobias,
"3x3_1x4x256x256_gp4_st1": lambda: dw_conv2d_3x3_1x4x256x256_gp4_st1,
"3x3_2x8x198x198_gp8_st3": lambda: dw_conv2d_3x3_2x8x198x198_gp8_st3,
"3x3_1x3x24x24_gp3_st1": lambda: dw_conv2d_3x3_1x3x24x24_gp3_st1,
"3x3_1x4x24x24_gp4_nobias": lambda: dw_conv2d_3x3_1x4x24x24_gp4_nobias,
"3x3_1x4x24x24_gp4_st1": lambda: dw_conv2d_3x3_1x4x24x24_gp4_st1,
"3x3_2x8x27x27_gp8_st3": lambda: dw_conv2d_3x3_2x8x27x27_gp8_st3,
"two_dw_conv2d": lambda: two_dw_conv2d,
}

Expand All @@ -176,17 +176,17 @@
f"{k},per_channel_quant={q}": (lambda v=v, q=q: (v(), q))
for (k, v) in {
"2x2_1x6x4x4_gp6_st1": lambda: dw_conv2d_2x2_1x6x4x4_gp6_st1,
"3x3_1x3x256x256_gp3_st1": lambda: dw_conv2d_3x3_1x3x256x256_gp3_st1,
"3x3_1x4x256x256_gp4_st1": lambda: dw_conv2d_3x3_1x4x256x256_gp4_st1,
"3x3_1x4x256x256_gp4_nobias": lambda: dw_conv2d_3x3_1x4x256x256_gp4_nobias,
"3x3_1x3x24x24_gp3_st1": lambda: dw_conv2d_3x3_1x3x24x24_gp3_st1,
"3x3_1x4x24x24_gp4_st1": lambda: dw_conv2d_3x3_1x4x24x24_gp4_st1,
"3x3_1x4x24x24_gp4_nobias": lambda: dw_conv2d_3x3_1x4x24x24_gp4_nobias,
}.items()
for q in [True, False]
}

test_data_conv1d_FP = {
"2_1x6x4_gp6_st1": lambda: dw_conv1d_2_1x6x4_gp6_st1,
"two_dw_conv1d": lambda: two_dw_conv1d,
"3_1x3x256_gp3_st1": lambda: dw_conv1d_3_1x3x256_gp3_st1,
"3_1x3x32_gp3_st1": lambda: dw_conv1d_3_1x3x32_gp3_st1,
"3_1x3x14_gp3_st1": lambda: dw_conv1d_3_1x3x14_gp3_st1,
}

Expand Down
Loading