Skip to content

Commit ae8d1ef

Browse files
authored
Merge branch 'develop' into describe-1
2 parents b76623f + 94f7ece commit ae8d1ef

File tree

10 files changed

+3030
-0
lines changed

10 files changed

+3030
-0
lines changed
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
6f3dcd3f048c2f76bf01f0b9d90372d402b7ed0431ad5102bd9f54d53cbada4c
Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
{
2+
"framework": "paddle",
3+
"model_name": "euler_beam",
4+
"num_devices_required": 1,
5+
"num_nodes_required": 1
6+
}
Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
class Program_weight_tensor_data_0:
2+
name = "data_0"
3+
shape = [100, 1]
4+
dtype = "float32"
5+
min_val = float("0")
6+
max_val = float("0.5")
7+
data = None
8+
9+
10+
class Program_weight_tensor_data_1:
11+
name = "data_1"
12+
shape = [100, 1]
13+
dtype = "float32"
14+
min_val = float("0")
15+
max_val = float("0.5")
16+
data = None
17+
18+
19+
class Program_weight_tensor_data_2:
20+
name = "data_2"
21+
shape = [100, 1]
22+
dtype = "float32"
23+
min_val = float("0")
24+
max_val = float("0.5")
25+
data = None
26+
27+
28+
class Program_weight_tensor_data_3:
29+
name = "data_3"
30+
shape = [100, 1]
31+
dtype = "float32"
32+
min_val = float("0")
33+
max_val = float("0.5")
34+
data = None
Lines changed: 103 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,103 @@
1+
import paddle
2+
3+
4+
class GraphModule(paddle.nn.Layer):
5+
def __init__(self):
6+
super().__init__()
7+
8+
def forward(
9+
self,
10+
parameter_0,
11+
parameter_1,
12+
parameter_2,
13+
parameter_3,
14+
parameter_4,
15+
parameter_5,
16+
parameter_6,
17+
parameter_7,
18+
data_0,
19+
data_1,
20+
data_2,
21+
data_3,
22+
):
23+
# pd_op.matmul: (100x20xf32) <- (100x1xf32, 1x20xf32)
24+
matmul_0 = paddle._C_ops.matmul(data_1, parameter_7, False, False)
25+
del data_1, parameter_7
26+
27+
# pd_op.add: (100x20xf32) <- (100x20xf32, 20xf32)
28+
add_0 = paddle._C_ops.add(matmul_0, parameter_6)
29+
del matmul_0, parameter_6
30+
31+
# pd_op.tanh: (100x20xf32) <- (100x20xf32)
32+
tanh_0 = paddle._C_ops.tanh(add_0)
33+
del add_0
34+
35+
# pd_op.matmul: (100x20xf32) <- (100x20xf32, 20x20xf32)
36+
matmul_1 = paddle._C_ops.matmul(tanh_0, parameter_5, False, False)
37+
del parameter_5, tanh_0
38+
39+
# pd_op.add: (100x20xf32) <- (100x20xf32, 20xf32)
40+
add_1 = paddle._C_ops.add(matmul_1, parameter_4)
41+
del matmul_1, parameter_4
42+
43+
# pd_op.tanh: (100x20xf32) <- (100x20xf32)
44+
tanh_1 = paddle._C_ops.tanh(add_1)
45+
del add_1
46+
47+
# pd_op.matmul: (100x20xf32) <- (100x20xf32, 20x20xf32)
48+
matmul_2 = paddle._C_ops.matmul(tanh_1, parameter_3, False, False)
49+
del parameter_3, tanh_1
50+
51+
# pd_op.add: (100x20xf32) <- (100x20xf32, 20xf32)
52+
add_2 = paddle._C_ops.add(matmul_2, parameter_2)
53+
del matmul_2, parameter_2
54+
55+
# pd_op.tanh: (100x20xf32) <- (100x20xf32)
56+
tanh_2 = paddle._C_ops.tanh(add_2)
57+
del add_2
58+
59+
# pd_op.matmul: (100x1xf32) <- (100x20xf32, 20x1xf32)
60+
matmul_3 = paddle._C_ops.matmul(tanh_2, parameter_1, False, False)
61+
del parameter_1, tanh_2
62+
63+
# pd_op.add: (100x1xf32) <- (100x1xf32, 1xf32)
64+
add_3 = paddle._C_ops.add(matmul_3, parameter_0)
65+
del matmul_3, parameter_0
66+
67+
# pd_op.subtract: (100x1xf32) <- (100x1xf32, 100x1xf32)
68+
subtract_0 = paddle._C_ops.subtract(add_3, data_2)
69+
del data_2
70+
71+
# pd_op.full: (xf32) <- ()
72+
full_0 = paddle._C_ops.full(
73+
[], float("2"), paddle.float32, paddle.framework._current_expected_place()
74+
)
75+
76+
# pd_op.elementwise_pow: (100x1xf32) <- (100x1xf32, xf32)
77+
elementwise_pow_0 = paddle._C_ops.elementwise_pow(subtract_0, full_0)
78+
del full_0, subtract_0
79+
80+
# pd_op.multiply: (100x1xf32) <- (100x1xf32, 100x1xf32)
81+
multiply_0 = paddle._C_ops.multiply(elementwise_pow_0, data_3)
82+
del data_3, elementwise_pow_0
83+
84+
# pd_op.full_int_array: (0xi64) <- ()
85+
full_int_array_0 = []
86+
87+
# pd_op.full_int_array: (2xi64) <- ()
88+
full_int_array_1 = [0, 1]
89+
90+
# pd_op.sum: (xf32) <- (100x1xf32, 2xi64)
91+
sum_0 = paddle._C_ops.sum(multiply_0, full_int_array_1, paddle.float32, False)
92+
del full_int_array_1, multiply_0
93+
94+
# pd_op.full: (xf32) <- ()
95+
full_1 = paddle._C_ops.full(
96+
[], float("100"), paddle.float32, paddle.framework._current_expected_place()
97+
)
98+
99+
# pd_op.divide: (xf32) <- (xf32, xf32)
100+
divide_0 = paddle._C_ops.divide(sum_0, full_1)
101+
del add_3, full_1, sum_0
102+
103+
return divide_0
Lines changed: 70 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,70 @@
1+
class Program_weight_tensor_parameter_0:
2+
name = "parameter_0"
3+
shape = [1]
4+
dtype = "float32"
5+
min_val = float("0")
6+
max_val = float("0.5")
7+
data = None
8+
9+
10+
class Program_weight_tensor_parameter_1:
11+
name = "parameter_1"
12+
shape = [20, 1]
13+
dtype = "float32"
14+
min_val = float("0")
15+
max_val = float("0.5")
16+
data = None
17+
18+
19+
class Program_weight_tensor_parameter_2:
20+
name = "parameter_2"
21+
shape = [20]
22+
dtype = "float32"
23+
min_val = float("0")
24+
max_val = float("0.5")
25+
data = None
26+
27+
28+
class Program_weight_tensor_parameter_3:
29+
name = "parameter_3"
30+
shape = [20, 20]
31+
dtype = "float32"
32+
min_val = float("0")
33+
max_val = float("0.5")
34+
data = None
35+
36+
37+
class Program_weight_tensor_parameter_4:
38+
name = "parameter_4"
39+
shape = [20]
40+
dtype = "float32"
41+
min_val = float("0")
42+
max_val = float("0.5")
43+
data = None
44+
45+
46+
class Program_weight_tensor_parameter_5:
47+
name = "parameter_5"
48+
shape = [20, 20]
49+
dtype = "float32"
50+
min_val = float("0")
51+
max_val = float("0.5")
52+
data = None
53+
54+
55+
class Program_weight_tensor_parameter_6:
56+
name = "parameter_6"
57+
shape = [20]
58+
dtype = "float32"
59+
min_val = float("0")
60+
max_val = float("0.5")
61+
data = None
62+
63+
64+
class Program_weight_tensor_parameter_7:
65+
name = "parameter_7"
66+
shape = [1, 20]
67+
dtype = "float32"
68+
min_val = float("0")
69+
max_val = float("0.5")
70+
data = None
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
8b51d7c27aa8386c19c24188ab1b30250189db4262db16ec473fd8581b36358d
Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
{
2+
"framework": "paddle",
3+
"model_name": "euler_beam",
4+
"num_devices_required": 1,
5+
"num_nodes_required": 1
6+
}
Lines changed: 67 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,67 @@
1+
class Program_weight_tensor_data_0:
2+
name = "data_0"
3+
shape = [100, 1]
4+
dtype = "float32"
5+
min_val = float("0")
6+
max_val = float("0.5")
7+
data = None
8+
9+
10+
class Program_weight_tensor_data_1:
11+
name = "data_1"
12+
shape = [100, 1]
13+
dtype = "float32"
14+
min_val = float("0")
15+
max_val = float("0.5")
16+
data = None
17+
18+
19+
class Program_weight_tensor_data_2:
20+
name = "data_2"
21+
shape = [4, 1]
22+
dtype = "float32"
23+
data = [-1.0, -1.0, 1.0, 1.0]
24+
25+
26+
class Program_weight_tensor_data_3:
27+
name = "data_3"
28+
shape = [4, 1]
29+
dtype = "float32"
30+
data = [0.0, 0.0, 1.0, 1.0]
31+
32+
33+
class Program_weight_tensor_data_4:
34+
name = "data_4"
35+
shape = [100, 1]
36+
dtype = "float32"
37+
min_val = float("0")
38+
max_val = float("0.5")
39+
data = None
40+
41+
42+
class Program_weight_tensor_data_5:
43+
name = "data_5"
44+
shape = [4, 1]
45+
dtype = "float32"
46+
data = [0.0, 0.0, 0.0, 0.0]
47+
48+
49+
class Program_weight_tensor_data_6:
50+
name = "data_6"
51+
shape = [4, 1]
52+
dtype = "float32"
53+
data = [0.0, 0.0, 0.0, 0.0]
54+
55+
56+
class Program_weight_tensor_data_7:
57+
name = "data_7"
58+
shape = [4, 1]
59+
dtype = "float32"
60+
data = [0.0, 0.0, 0.0, 0.0]
61+
62+
63+
class Program_weight_tensor_data_8:
64+
name = "data_8"
65+
shape = [4, 1]
66+
dtype = "float32"
67+
data = [0.0, 0.0, 0.0, 0.0]

0 commit comments

Comments
 (0)