Skip to content

Commit bf50dad

Browse files
authored
[New Sample] Update ResNet18 and SwinTransformer and add more ResNet models. (#256)
* Update ResNet18 and SwinTransformer. * Add ResNet models. * Update bypass user name, check_validate ci and count_samples tools. * Add more cv models.
1 parent c7ca227 commit bf50dad

File tree

297 files changed

+275497
-65535
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

297 files changed

+275497
-65535
lines changed

.github/actions/check-bypass/action.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
name: "Check bypass"
2-
description: "A custom action to encapsulate PFCCLab/ci-bypass"
2+
description: "A custom action to encapsulate GraphNet"
33
inputs:
44
github-token:
55
description: "GitHub token"
@@ -18,7 +18,7 @@ runs:
1818
- id: check-bypass
1919
name: Check Bypass
2020
env:
21-
CI_TEAM_MEMBERS: '["SigureMo", "risemeup1", "tianshuo78520a", "0x3878f", "swgu98", "luotao1", "XieYunshen"]'
21+
CI_TEAM_MEMBERS: '["lixinqi", "Xreki"]'
2222
uses: PFCCLab/ci-bypass@v1
2323
with:
2424
github-token: ${{ inputs.github-token }}
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
66a9824881732b1a5e838d9694555595d675c8144d1ba6de1a665e5ea2a5e6e9

paddle_samples/vision-model/SwinTransformer_base_patch4_window12_384/subgraph_0/graph_net.json renamed to paddle_samples/PaddleX/DLinear/graph_net.json

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
{
22
"framework": "paddle",
3+
"model_name": "DLinear",
34
"num_devices_required": 1,
45
"num_nodes_required": 1
56
}
Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
class Program_weight_tensor_data_0:
2+
name = "data_0"
3+
shape = [16, 96, 1]
4+
dtype = "float32"
5+
min_val = float("-2.06142")
6+
max_val = float("-0.30026")
7+
mean = float("-1.28192")
8+
std = float("0.34683")
9+
data = None
Lines changed: 131 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,131 @@
1+
import paddle
2+
3+
4+
class GraphModule(paddle.nn.Layer):
5+
def __init__(self):
6+
super().__init__()
7+
8+
def forward(self, parameter_0, parameter_1, parameter_2, parameter_3, data_0):
9+
# pd_op.full_int_array: (1xi64) <- ()
10+
full_int_array_0 = [0]
11+
12+
# pd_op.full_int_array: (1xi64) <- ()
13+
full_int_array_1 = [1]
14+
15+
# pd_op.slice: (-1x1x1xf32) <- (-1x96x1xf32, 1xi64, 1xi64)
16+
slice_0 = paddle._C_ops.slice(
17+
data_0, [1], full_int_array_0, full_int_array_1, [1], []
18+
)
19+
del full_int_array_0, full_int_array_1
20+
21+
# pd_op.full_int_array: (3xi64) <- ()
22+
full_int_array_2 = [1, 12, 1]
23+
24+
# pd_op.tile: (-1x12x1xf32) <- (-1x1x1xf32, 3xi64)
25+
tile_0 = paddle._C_ops.tile(slice_0, full_int_array_2)
26+
del slice_0
27+
28+
# pd_op.full_int_array: (1xi64) <- ()
29+
full_int_array_3 = [-1]
30+
31+
# pd_op.full_int_array: (1xi64) <- ()
32+
full_int_array_4 = [2147483647]
33+
34+
# pd_op.slice: (-1x1x1xf32) <- (-1x96x1xf32, 1xi64, 1xi64)
35+
slice_1 = paddle._C_ops.slice(
36+
data_0, [1], full_int_array_3, full_int_array_4, [1], []
37+
)
38+
del full_int_array_3, full_int_array_4
39+
40+
# pd_op.tile: (-1x12x1xf32) <- (-1x1x1xf32, 3xi64)
41+
tile_1 = paddle._C_ops.tile(slice_1, full_int_array_2)
42+
del full_int_array_2, slice_1
43+
44+
# pd_op.full: (1xi32) <- ()
45+
full_0 = paddle._C_ops.full(
46+
[1], float("1"), paddle.int32, paddle.core.CPUPlace()
47+
)
48+
49+
# builtin.combine: ([-1x12x1xf32, -1x96x1xf32, -1x12x1xf32]) <- (-1x12x1xf32, -1x96x1xf32, -1x12x1xf32)
50+
combine_0 = [tile_0, data_0, tile_1]
51+
del tile_0, tile_1
52+
53+
# pd_op.concat: (-1x120x1xf32) <- ([-1x12x1xf32, -1x96x1xf32, -1x12x1xf32], 1xi32)
54+
concat_0 = paddle._C_ops.concat(combine_0, full_0)
55+
del combine_0, full_0
56+
57+
# pd_op.transpose: (-1x1x120xf32) <- (-1x120x1xf32)
58+
transpose_1 = paddle._C_ops.transpose(concat_0, [0, 2, 1])
59+
del concat_0
60+
61+
# pd_op.full_int_array: (1xi64) <- ()
62+
full_int_array_5 = [2]
63+
64+
# pd_op.unsqueeze: (-1x1x1x120xf32) <- (-1x1x120xf32, 1xi64)
65+
unsqueeze_0 = paddle._C_ops.unsqueeze(transpose_1, full_int_array_5)
66+
del transpose_1
67+
68+
# pd_op.full_int_array: (2xi64) <- ()
69+
full_int_array_6 = [1, 25]
70+
71+
# pd_op.pool2d: (-1x1x1x96xf32) <- (-1x1x1x120xf32, 2xi64)
72+
pool2d_0 = paddle._C_ops.pool2d(
73+
unsqueeze_0,
74+
full_int_array_6,
75+
[1, 1],
76+
[0, 0],
77+
False,
78+
True,
79+
"NCHW",
80+
"avg",
81+
False,
82+
False,
83+
"EXPLICIT",
84+
)
85+
del full_int_array_6, unsqueeze_0
86+
87+
# pd_op.squeeze: (-1x1x96xf32) <- (-1x1x1x96xf32, 1xi64)
88+
squeeze_0 = paddle._C_ops.squeeze(pool2d_0, full_int_array_5)
89+
del full_int_array_5, pool2d_0
90+
91+
# pd_op.transpose: (-1x96x1xf32) <- (-1x1x96xf32)
92+
transpose_2 = paddle._C_ops.transpose(squeeze_0, [0, 2, 1])
93+
del squeeze_0
94+
95+
# pd_op.subtract: (-1x96x1xf32) <- (-1x96x1xf32, -1x96x1xf32)
96+
subtract_0 = paddle._C_ops.subtract(data_0, transpose_2)
97+
del data_0
98+
99+
# pd_op.transpose: (-1x1x96xf32) <- (-1x96x1xf32)
100+
transpose_3 = paddle._C_ops.transpose(subtract_0, [0, 2, 1])
101+
del subtract_0
102+
103+
# pd_op.transpose: (-1x1x96xf32) <- (-1x96x1xf32)
104+
transpose_4 = paddle._C_ops.transpose(transpose_2, [0, 2, 1])
105+
del transpose_2
106+
107+
# pd_op.matmul: (-1x1x96xf32) <- (-1x1x96xf32, 96x96xf32)
108+
matmul_0 = paddle._C_ops.matmul(transpose_3, parameter_3, False, False)
109+
del parameter_3, transpose_3
110+
111+
# pd_op.add: (-1x1x96xf32) <- (-1x1x96xf32, 96xf32)
112+
add_0 = paddle._C_ops.add(matmul_0, parameter_2)
113+
del matmul_0, parameter_2
114+
115+
# pd_op.matmul: (-1x1x96xf32) <- (-1x1x96xf32, 96x96xf32)
116+
matmul_1 = paddle._C_ops.matmul(transpose_4, parameter_1, False, False)
117+
del parameter_1, transpose_4
118+
119+
# pd_op.add: (-1x1x96xf32) <- (-1x1x96xf32, 96xf32)
120+
add_1 = paddle._C_ops.add(matmul_1, parameter_0)
121+
del matmul_1, parameter_0
122+
123+
# pd_op.add: (-1x1x96xf32) <- (-1x1x96xf32, -1x1x96xf32)
124+
add_2 = paddle._C_ops.add(add_0, add_1)
125+
del add_0, add_1
126+
127+
# pd_op.transpose: (-1x96x1xf32) <- (-1x1x96xf32)
128+
transpose_0 = paddle._C_ops.transpose(add_2, [0, 2, 1])
129+
del add_2
130+
131+
return transpose_0
Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
class Program_weight_tensor_parameter_0:
2+
name = "parameter_0"
3+
shape = [96]
4+
dtype = "float32"
5+
min_val = float("-0.096893")
6+
max_val = float("0.0889171")
7+
mean = float("0.000213582")
8+
std = float("0.0441362")
9+
data = None
10+
11+
12+
class Program_weight_tensor_parameter_1:
13+
name = "parameter_1"
14+
shape = [96, 96]
15+
dtype = "float32"
16+
min_val = float("-0.115178")
17+
max_val = float("0.128102")
18+
mean = float("0.00947838")
19+
std = float("0.0583343")
20+
data = None
21+
22+
23+
class Program_weight_tensor_parameter_2:
24+
name = "parameter_2"
25+
shape = [96]
26+
dtype = "float32"
27+
min_val = float("-0.105537")
28+
max_val = float("0.0845581")
29+
mean = float("-0.012385")
30+
std = float("0.0442325")
31+
data = None
32+
33+
34+
class Program_weight_tensor_parameter_3:
35+
name = "parameter_3"
36+
shape = [96, 96]
37+
dtype = "float32"
38+
min_val = float("-0.125641")
39+
max_val = float("0.138936")
40+
mean = float("-0.000565255")
41+
std = float("0.0571832")
42+
data = None
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
66a9824881732b1a5e838d9694555595d675c8144d1ba6de1a665e5ea2a5e6e9
Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
{
2+
"framework": "paddle",
3+
"model_name": "DLinear_ad",
4+
"num_devices_required": 1,
5+
"num_nodes_required": 1
6+
}
Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
class Program_weight_tensor_data_0:
2+
name = "data_0"
3+
shape = [16, 96, 2]
4+
dtype = "float32"
5+
min_val = float("-6.37408")
6+
max_val = float("0.439645")
7+
mean = float("-1.35748")
8+
std = float("1.3301")
9+
data = None
Lines changed: 138 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,138 @@
1+
import paddle
2+
3+
4+
class GraphModule(paddle.nn.Layer):
5+
def __init__(self):
6+
super().__init__()
7+
8+
def forward(self, parameter_0, parameter_1, parameter_2, parameter_3, data_0):
9+
# pd_op.cast: (-1x96x2xf32) <- (-1x96x2xf32)
10+
cast_0 = paddle._C_ops.cast(data_0, paddle.float32)
11+
del data_0
12+
13+
# pd_op.assign: (-1x96x2xf32) <- (-1x96x2xf32)
14+
assign_0 = cast_0
15+
16+
# pd_op.full_int_array: (1xi64) <- ()
17+
full_int_array_0 = [0]
18+
19+
# pd_op.full_int_array: (1xi64) <- ()
20+
full_int_array_1 = [1]
21+
22+
# pd_op.slice: (-1x1x2xf32) <- (-1x96x2xf32, 1xi64, 1xi64)
23+
slice_0 = paddle._C_ops.slice(
24+
cast_0, [1], full_int_array_0, full_int_array_1, [1], []
25+
)
26+
del full_int_array_0, full_int_array_1
27+
28+
# pd_op.full_int_array: (3xi64) <- ()
29+
full_int_array_2 = [1, 12, 1]
30+
31+
# pd_op.tile: (-1x12x2xf32) <- (-1x1x2xf32, 3xi64)
32+
tile_0 = paddle._C_ops.tile(slice_0, full_int_array_2)
33+
del slice_0
34+
35+
# pd_op.full_int_array: (1xi64) <- ()
36+
full_int_array_3 = [-1]
37+
38+
# pd_op.full_int_array: (1xi64) <- ()
39+
full_int_array_4 = [2147483647]
40+
41+
# pd_op.slice: (-1x1x2xf32) <- (-1x96x2xf32, 1xi64, 1xi64)
42+
slice_1 = paddle._C_ops.slice(
43+
cast_0, [1], full_int_array_3, full_int_array_4, [1], []
44+
)
45+
del full_int_array_3, full_int_array_4
46+
47+
# pd_op.tile: (-1x12x2xf32) <- (-1x1x2xf32, 3xi64)
48+
tile_1 = paddle._C_ops.tile(slice_1, full_int_array_2)
49+
del full_int_array_2, slice_1
50+
51+
# pd_op.full: (1xi32) <- ()
52+
full_0 = paddle._C_ops.full(
53+
[1], float("1"), paddle.int32, paddle.core.CPUPlace()
54+
)
55+
56+
# builtin.combine: ([-1x12x2xf32, -1x96x2xf32, -1x12x2xf32]) <- (-1x12x2xf32, -1x96x2xf32, -1x12x2xf32)
57+
combine_0 = [tile_0, cast_0, tile_1]
58+
del tile_0, tile_1
59+
60+
# pd_op.concat: (-1x120x2xf32) <- ([-1x12x2xf32, -1x96x2xf32, -1x12x2xf32], 1xi32)
61+
concat_0 = paddle._C_ops.concat(combine_0, full_0)
62+
del combine_0, full_0
63+
64+
# pd_op.transpose: (-1x2x120xf32) <- (-1x120x2xf32)
65+
transpose_1 = paddle._C_ops.transpose(concat_0, [0, 2, 1])
66+
del concat_0
67+
68+
# pd_op.full_int_array: (1xi64) <- ()
69+
full_int_array_5 = [2]
70+
71+
# pd_op.unsqueeze: (-1x2x1x120xf32) <- (-1x2x120xf32, 1xi64)
72+
unsqueeze_0 = paddle._C_ops.unsqueeze(transpose_1, full_int_array_5)
73+
del transpose_1
74+
75+
# pd_op.full_int_array: (2xi64) <- ()
76+
full_int_array_6 = [1, 25]
77+
78+
# pd_op.pool2d: (-1x2x1x96xf32) <- (-1x2x1x120xf32, 2xi64)
79+
pool2d_0 = paddle._C_ops.pool2d(
80+
unsqueeze_0,
81+
full_int_array_6,
82+
[1, 1],
83+
[0, 0],
84+
False,
85+
True,
86+
"NCHW",
87+
"avg",
88+
False,
89+
False,
90+
"EXPLICIT",
91+
)
92+
del full_int_array_6, unsqueeze_0
93+
94+
# pd_op.squeeze: (-1x2x96xf32) <- (-1x2x1x96xf32, 1xi64)
95+
squeeze_0 = paddle._C_ops.squeeze(pool2d_0, full_int_array_5)
96+
del full_int_array_5, pool2d_0
97+
98+
# pd_op.transpose: (-1x96x2xf32) <- (-1x2x96xf32)
99+
transpose_2 = paddle._C_ops.transpose(squeeze_0, [0, 2, 1])
100+
del squeeze_0
101+
102+
# pd_op.subtract: (-1x96x2xf32) <- (-1x96x2xf32, -1x96x2xf32)
103+
subtract_0 = paddle._C_ops.subtract(cast_0, transpose_2)
104+
del cast_0
105+
106+
# pd_op.transpose: (-1x2x96xf32) <- (-1x96x2xf32)
107+
transpose_3 = paddle._C_ops.transpose(subtract_0, [0, 2, 1])
108+
del subtract_0
109+
110+
# pd_op.transpose: (-1x2x96xf32) <- (-1x96x2xf32)
111+
transpose_4 = paddle._C_ops.transpose(transpose_2, [0, 2, 1])
112+
del transpose_2
113+
114+
# pd_op.matmul: (-1x2x96xf32) <- (-1x2x96xf32, 96x96xf32)
115+
matmul_0 = paddle._C_ops.matmul(transpose_3, parameter_3, False, False)
116+
del parameter_3, transpose_3
117+
118+
# pd_op.add: (-1x2x96xf32) <- (-1x2x96xf32, 96xf32)
119+
add_0 = paddle._C_ops.add(matmul_0, parameter_2)
120+
del matmul_0, parameter_2
121+
122+
# pd_op.matmul: (-1x2x96xf32) <- (-1x2x96xf32, 96x96xf32)
123+
matmul_1 = paddle._C_ops.matmul(transpose_4, parameter_1, False, False)
124+
del parameter_1, transpose_4
125+
126+
# pd_op.add: (-1x2x96xf32) <- (-1x2x96xf32, 96xf32)
127+
add_1 = paddle._C_ops.add(matmul_1, parameter_0)
128+
del matmul_1, parameter_0
129+
130+
# pd_op.add: (-1x2x96xf32) <- (-1x2x96xf32, -1x2x96xf32)
131+
add_2 = paddle._C_ops.add(add_0, add_1)
132+
del add_0, add_1
133+
134+
# pd_op.transpose: (-1x96x2xf32) <- (-1x2x96xf32)
135+
transpose_0 = paddle._C_ops.transpose(add_2, [0, 2, 1])
136+
del add_2, assign_0
137+
138+
return transpose_0

0 commit comments

Comments
 (0)