Skip to content

Commit 0dcf42e

Browse files
authored
Arm backend: Reduce conv2d unit test sizes (pytorch#15271)
Signed-off-by: Ryan O'Shea <[email protected]>
1 parent 50e5ec0 commit 0dcf42e

File tree

3 files changed

+66
-66
lines changed

3 files changed

+66
-66
lines changed

backends/arm/test/ops/test_conv2d.py

Lines changed: 26 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -117,26 +117,26 @@ def forward(self, x):
117117
return x
118118

119119

120-
conv2d_2x2_3x2x40x40_nobias = Conv2d(
120+
conv2d_2x2_3x2x14x14_nobias = Conv2d(
121121
in_channels=2,
122122
out_channels=3,
123123
kernel_size=(2, 2),
124124
stride=1,
125125
bias=False,
126126
padding=0,
127-
width=40,
128-
height=40,
129-
batches=3,
127+
width=14,
128+
height=14,
129+
batches=2,
130130
)
131131

132-
conv2d_3x3_1x3x256x256_st1 = Conv2d(
132+
conv2d_3x3_1x3x24x24_st1 = Conv2d(
133133
in_channels=3,
134134
out_channels=10,
135135
kernel_size=(3, 3),
136136
stride=1,
137137
padding=0,
138-
width=256,
139-
height=256,
138+
width=24,
139+
height=24,
140140
batches=1,
141141
)
142142

@@ -151,14 +151,14 @@ def forward(self, x):
151151
batches=1,
152152
)
153153

154-
conv2d_1x1_1x2x128x128_st1 = Conv2d(
154+
conv2d_1x1_1x2x16x16_st1 = Conv2d(
155155
in_channels=2,
156156
out_channels=1,
157157
kernel_size=(1, 1),
158158
stride=1,
159159
padding=0,
160-
width=128,
161-
height=128,
160+
width=16,
161+
height=16,
162162
batches=1,
163163
)
164164

@@ -173,25 +173,25 @@ def forward(self, x):
173173
batches=1,
174174
)
175175

176-
conv2d_5x5_3x2x128x128_st1 = Conv2d(
176+
conv2d_5x5_3x2x24x24_st1 = Conv2d(
177177
in_channels=2,
178178
out_channels=3,
179179
kernel_size=(5, 5),
180180
stride=1,
181181
padding=0,
182-
width=128,
183-
height=128,
184-
batches=3,
182+
width=24,
183+
height=24,
184+
batches=2,
185185
)
186186

187-
conv2d_3x3_1x3x224x224_st2_pd1 = Conv2d(
187+
conv2d_3x3_1x3x28x28_st2_pd1 = Conv2d(
188188
in_channels=3,
189189
out_channels=16,
190190
kernel_size=(3, 3),
191191
stride=2,
192192
padding=1,
193-
width=224,
194-
height=224,
193+
width=28,
194+
height=28,
195195
batches=1,
196196
)
197197

@@ -304,8 +304,8 @@ def forward(self, x):
304304

305305
two_conv2d_nobias = Conv2d(
306306
nbr_conv=2,
307-
width=256,
308-
height=256,
307+
width=32,
308+
height=32,
309309
in_channels=[3, 10],
310310
out_channels=[10, 15],
311311
kernel_size=[(5, 5), (5, 5)],
@@ -317,8 +317,8 @@ def forward(self, x):
317317

318318
two_conv2d = Conv2d(
319319
nbr_conv=2,
320-
width=256,
321-
height=256,
320+
width=32,
321+
height=32,
322322
in_channels=[3, 10],
323323
out_channels=[10, 15],
324324
kernel_size=[(5, 5), (5, 5)],
@@ -359,10 +359,10 @@ def forward(self, x):
359359
# Shenanigan to get a nicer output when test fails. With unittest it looks like:
360360
# FAIL: test_convolution_2d_tosa_INT_2_3x3_1x3x12x12_st2_pd1
361361
test_data_FP = {
362-
"2x2_3x2x40x40_nobias": lambda: conv2d_2x2_3x2x40x40_nobias,
363-
"3x3_1x3x256x256_st1": lambda: conv2d_3x3_1x3x256x256_st1,
362+
"2x2_3x2x14x14_nobias": lambda: conv2d_2x2_3x2x14x14_nobias,
363+
"3x3_1x3x24x24_st1": lambda: conv2d_3x3_1x3x24x24_st1,
364364
"3x3_1x3x12x12_st2_pd1": lambda: conv2d_3x3_1x3x12x12_st2_pd1,
365-
"1x1_1x2x128x128_st1": lambda: conv2d_1x1_1x2x128x128_st1,
365+
"1x1_1x2x16x16_st1": lambda: conv2d_1x1_1x2x16x16_st1,
366366
"2x2_1x1x14x13_st2_needs_adjust_pass": lambda: conv2d_2x2_1x1x14x13_st2,
367367
"5x5_1x3x14x15_st3_pd1_needs_adjust_pass": lambda: conv2d_5x5_1x3x14x15_st3_pd1,
368368
"7x7_1x3x16x16_st2_pd1_dl2_needs_adjust_pass": lambda: conv2d_7x7_1x3x16x16_st2_pd1_dl2,
@@ -373,8 +373,8 @@ def forward(self, x):
373373
"3x3_1x3x8x9_st3_pd0_dl1_needs_adjust_pass": lambda: conv2d_3x3_1x3x8x9_st3_pd0_dl1,
374374
"3x4_1x3x7x7_st3_pd0_dl1_needs_adjust_pass": lambda: conv2d_3x4_1x3x7x7_st3_pd0_dl1,
375375
"4x3_1x3x7x7_st3_pd0_dl1_needs_adjust_pass": lambda: conv2d_4x3_1x3x7x7_st3_pd0_dl1,
376-
"5x5_3x2x128x128_st1": lambda: conv2d_5x5_3x2x128x128_st1,
377-
"3x3_1x3x224x224_st2_pd1": lambda: conv2d_3x3_1x3x224x224_st2_pd1,
376+
"5x5_3x2x24x24_st1": lambda: conv2d_5x5_3x2x24x24_st1,
377+
"3x3_1x3x28x28_st2_pd1": lambda: conv2d_3x3_1x3x28x28_st2_pd1,
378378
"two_conv2d_nobias": lambda: two_conv2d_nobias,
379379
"two_conv2d": lambda: two_conv2d,
380380
"groups": lambda: conv2d_groups,

backends/arm/test/ops/test_conv_combos.py

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ def __init__(self):
4848
# 1. 1x1 CONV2d + ReLU6 (Pointwise)
4949
self.pointwise_conv2d = torch.nn.Conv2d(
5050
in_channels=16, out_channels=96, kernel_size=1, stride=1, groups=1
51-
) ## (1, 128, 81, 81)
51+
) ## Example output shape (1, 96, 33, 33)
5252
self.batch_norm2d_16 = torch.nn.BatchNorm2d(96, affine=False)
5353
self.relu6 = torch.nn.ReLU6()
5454

@@ -60,15 +60,15 @@ def __init__(self):
6060
padding=1,
6161
stride=1,
6262
groups=96,
63-
) ## (1, 128, H, W)
63+
) ## Example output shape (1, 96, H, W)
6464

6565
# 3. Linear 1x1 Conv2d
6666
self.pointwise_conv2d_linear = torch.nn.Conv2d(
6767
in_channels=96, out_channels=16, kernel_size=1, stride=1, groups=1
68-
) ## (1, 32, 81, 81)
68+
) ## Example output shape (1, 16, 33, 33)
6969

7070
def get_inputs(self) -> Tuple[torch.Tensor]:
71-
return (torch.randn(1, 16, 81, 81),)
71+
return (torch.randn(1, 16, 33, 33),)
7272

7373
def forward(self, x):
7474
input = x
@@ -106,7 +106,7 @@ def __init__(self):
106106
self.adaptive_avg_pool2d = torch.nn.AdaptiveAvgPool2d((1, 1))
107107

108108
def get_inputs(self) -> Tuple[torch.Tensor]:
109-
return (torch.randn(1, 3, 128, 128),)
109+
return (torch.randn(1, 3, 48, 48),)
110110

111111
def forward(self, x):
112112
x = self.conv2d(x)
@@ -145,7 +145,7 @@ def __init__(self, affine: bool):
145145
self.relu6 = torch.nn.ReLU6()
146146

147147
def get_inputs(self) -> Tuple[torch.Tensor]:
148-
return (torch.randn(1, 3, 256, 256),)
148+
return (torch.randn(1, 3, 64, 64),)
149149

150150
def forward(self, x):
151151
x = self.conv2d(x)
@@ -161,11 +161,11 @@ class ComboConvRelu6(torch.nn.Module):
161161
]
162162

163163
test_data_FP = {
164-
"combo_conv_relu_2_x_4d": lambda: (2 * torch.randn(1, 3, 256, 256),),
165-
"combo_conv_relu_0_5_x_4d": lambda: (0.5 * torch.randn(1, 3, 256, 256),),
166-
"combo_conv_relu_4d": lambda: (torch.randn(1, 3, 256, 256),),
167-
"combo_conv_relu_neg_0_5_x_4d": lambda: (-0.5 * torch.randn(1, 3, 256, 256),),
168-
"combo_conv_relu_neg_2_x_4d": lambda: (-2 * torch.randn(1, 3, 256, 256),),
164+
"combo_conv_relu_2_x_4d": lambda: (2 * torch.randn(1, 3, 64, 64),),
165+
"combo_conv_relu_0_5_x_4d": lambda: (0.5 * torch.randn(1, 3, 64, 64),),
166+
"combo_conv_relu_4d": lambda: (torch.randn(1, 3, 64, 64),),
167+
"combo_conv_relu_neg_0_5_x_4d": lambda: (-0.5 * torch.randn(1, 3, 64, 64),),
168+
"combo_conv_relu_neg_2_x_4d": lambda: (-2 * torch.randn(1, 3, 64, 64),),
169169
}
170170

171171
# Generate a new test set paired with per_channel_quant=True/False.
@@ -196,10 +196,10 @@ class ComboConvAvgPool2d(torch.nn.Module):
196196
]
197197

198198
test_data_FP = {
199-
"combo_conv_avgpool_20_x_4d": lambda: (20 * torch.randn(1, 3, 64, 32),),
200-
"combo_conv_avgpool_4d": lambda: (torch.randn(1, 3, 100, 200),),
201-
"combo_conv_avgpool_5_x_4d_randn": lambda: (5 * torch.randn(1, 3, 256, 256),),
202-
"combo_conv_avgpool_2_x_4d": lambda: (torch.rand(1, 3, 512, 128),),
199+
"combo_conv_avgpool_20_x_4d": lambda: (20 * torch.randn(1, 3, 48, 24),),
200+
"combo_conv_avgpool_4d": lambda: (torch.randn(1, 3, 60, 120),),
201+
"combo_conv_avgpool_5_x_4d_randn": lambda: (5 * torch.randn(1, 3, 64, 64),),
202+
"combo_conv_avgpool_2_x_4d": lambda: (torch.rand(1, 3, 96, 32),),
203203
}
204204

205205
# Generate a new test set paired with per_channel_quant=True/False.

backends/arm/test/ops/test_depthwise_conv.py

Lines changed: 25 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -68,68 +68,68 @@
6868
batches=1,
6969
)
7070

71-
dw_conv1d_3_1x3x256_gp3_st1 = Conv1d(
71+
dw_conv1d_3_1x3x32_gp3_st1 = Conv1d(
7272
in_channels=3,
7373
out_channels=3,
7474
kernel_size=3,
7575
stride=1,
7676
groups=3,
7777
padding=0,
78-
length=256,
78+
length=32,
7979
batches=1,
8080
)
8181

82-
dw_conv2d_3x3_1x3x256x256_gp3_st1 = Conv2d(
82+
dw_conv2d_3x3_1x3x24x24_gp3_st1 = Conv2d(
8383
in_channels=3,
8484
out_channels=3,
8585
kernel_size=(3, 3),
8686
stride=(1, 1),
8787
groups=3,
8888
padding=0,
89-
width=256,
90-
height=256,
89+
width=24,
90+
height=24,
9191
batches=1,
9292
)
9393

94-
dw_conv2d_3x3_1x4x256x256_gp4_st1 = Conv2d(
94+
dw_conv2d_3x3_1x4x24x24_gp4_st1 = Conv2d(
9595
in_channels=4,
9696
out_channels=8,
9797
kernel_size=(3, 3),
9898
stride=(1, 1),
9999
groups=4,
100100
padding=0,
101-
width=256,
102-
height=256,
101+
width=24,
102+
height=24,
103103
batches=1,
104104
)
105105

106-
dw_conv2d_3x3_2x8x198x198_gp8_st3 = Conv2d(
106+
dw_conv2d_3x3_2x8x27x27_gp8_st3 = Conv2d(
107107
in_channels=8,
108108
out_channels=16,
109109
kernel_size=(3, 3),
110110
stride=3,
111111
groups=8,
112112
padding=0,
113-
width=198,
114-
height=198,
113+
width=27,
114+
height=27,
115115
batches=2,
116116
)
117117

118-
dw_conv2d_3x3_1x4x256x256_gp4_nobias = Conv2d(
118+
dw_conv2d_3x3_1x4x24x24_gp4_nobias = Conv2d(
119119
in_channels=4,
120120
out_channels=8,
121121
kernel_size=(3, 3),
122122
stride=1,
123123
groups=4,
124124
bias=False,
125-
width=256,
126-
height=256,
125+
width=24,
126+
height=24,
127127
batches=1,
128128
)
129129

130130
two_dw_conv1d = Conv1d(
131131
nbr_conv=2,
132-
length=64,
132+
length=16,
133133
in_channels=[4, 8],
134134
out_channels=[8, 24],
135135
kernel_size=[3, 3],
@@ -142,8 +142,8 @@
142142

143143
two_dw_conv2d = Conv2d(
144144
nbr_conv=2,
145-
width=64,
146-
height=64,
145+
width=24,
146+
height=24,
147147
in_channels=[4, 8],
148148
out_channels=[8, 24],
149149
kernel_size=[(3, 3), (3, 3)],
@@ -157,10 +157,10 @@
157157
# Shenanigan to get a nicer output when test fails.
158158
test_data_conv2d_FP = {
159159
"2x2_1x6x4x4_gp6_st1": lambda: dw_conv2d_2x2_1x6x4x4_gp6_st1,
160-
"3x3_1x3x256x256_gp3_st1": lambda: dw_conv2d_3x3_1x3x256x256_gp3_st1,
161-
"3x3_1x4x256x256_gp4_nobias": lambda: dw_conv2d_3x3_1x4x256x256_gp4_nobias,
162-
"3x3_1x4x256x256_gp4_st1": lambda: dw_conv2d_3x3_1x4x256x256_gp4_st1,
163-
"3x3_2x8x198x198_gp8_st3": lambda: dw_conv2d_3x3_2x8x198x198_gp8_st3,
160+
"3x3_1x3x24x24_gp3_st1": lambda: dw_conv2d_3x3_1x3x24x24_gp3_st1,
161+
"3x3_1x4x24x24_gp4_nobias": lambda: dw_conv2d_3x3_1x4x24x24_gp4_nobias,
162+
"3x3_1x4x24x24_gp4_st1": lambda: dw_conv2d_3x3_1x4x24x24_gp4_st1,
163+
"3x3_2x8x27x27_gp8_st3": lambda: dw_conv2d_3x3_2x8x27x27_gp8_st3,
164164
"two_dw_conv2d": lambda: two_dw_conv2d,
165165
}
166166

@@ -176,17 +176,17 @@
176176
f"{k},per_channel_quant={q}": (lambda v=v, q=q: (v(), q))
177177
for (k, v) in {
178178
"2x2_1x6x4x4_gp6_st1": lambda: dw_conv2d_2x2_1x6x4x4_gp6_st1,
179-
"3x3_1x3x256x256_gp3_st1": lambda: dw_conv2d_3x3_1x3x256x256_gp3_st1,
180-
"3x3_1x4x256x256_gp4_st1": lambda: dw_conv2d_3x3_1x4x256x256_gp4_st1,
181-
"3x3_1x4x256x256_gp4_nobias": lambda: dw_conv2d_3x3_1x4x256x256_gp4_nobias,
179+
"3x3_1x3x24x24_gp3_st1": lambda: dw_conv2d_3x3_1x3x24x24_gp3_st1,
180+
"3x3_1x4x24x24_gp4_st1": lambda: dw_conv2d_3x3_1x4x24x24_gp4_st1,
181+
"3x3_1x4x24x24_gp4_nobias": lambda: dw_conv2d_3x3_1x4x24x24_gp4_nobias,
182182
}.items()
183183
for q in [True, False]
184184
}
185185

186186
test_data_conv1d_FP = {
187187
"2_1x6x4_gp6_st1": lambda: dw_conv1d_2_1x6x4_gp6_st1,
188188
"two_dw_conv1d": lambda: two_dw_conv1d,
189-
"3_1x3x256_gp3_st1": lambda: dw_conv1d_3_1x3x256_gp3_st1,
189+
"3_1x3x32_gp3_st1": lambda: dw_conv1d_3_1x3x32_gp3_st1,
190190
"3_1x3x14_gp3_st1": lambda: dw_conv1d_3_1x3x14_gp3_st1,
191191
}
192192

0 commit comments

Comments
 (0)