Skip to content

Commit ecab3ab

Browse files
committed
[New Sample] Add test example of PaddleScience
1 parent c4c59de commit ecab3ab

File tree

8 files changed

+3077
-0
lines changed

8 files changed

+3077
-0
lines changed
Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
{
2+
"framework": "paddle",
3+
"model_name": "",
4+
"num_devices_required": 1,
5+
"num_nodes_required": 1
6+
}
Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
class Program_weight_tensor_data_0:
2+
name = "data_0"
3+
shape = [100, 1]
4+
dtype = "float32"
5+
min_val = 0
6+
max_val = 0.5
7+
mean = None
8+
std = None
9+
data = None
10+
11+
12+
class Program_weight_tensor_data_1:
13+
name = "data_1"
14+
shape = [100, 1]
15+
dtype = "float32"
16+
min_val = 0
17+
max_val = 0.5
18+
mean = None
19+
std = None
20+
data = None
21+
22+
23+
class Program_weight_tensor_data_2:
24+
name = "data_2"
25+
shape = [100, 1]
26+
dtype = "float32"
27+
min_val = 0
28+
max_val = 0.5
29+
mean = None
30+
std = None
31+
data = None
32+
33+
34+
class Program_weight_tensor_data_3:
35+
name = "data_3"
36+
shape = [100, 1]
37+
dtype = "float32"
38+
min_val = 0
39+
max_val = 0.5
40+
mean = None
41+
std = None
42+
data = None
Lines changed: 103 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,103 @@
1+
import paddle
2+
3+
4+
class GraphModule(paddle.nn.Layer):
5+
def __init__(self):
6+
super().__init__()
7+
8+
def forward(
9+
self,
10+
parameter_0,
11+
parameter_1,
12+
parameter_2,
13+
parameter_3,
14+
parameter_4,
15+
parameter_5,
16+
parameter_6,
17+
parameter_7,
18+
data_0,
19+
data_1,
20+
data_2,
21+
data_3,
22+
):
23+
# pd_op.matmul: (100x20xf32) <- (100x1xf32, 1x20xf32)
24+
matmul_0 = paddle._C_ops.matmul(data_1, parameter_7, False, False)
25+
del data_1, parameter_7
26+
27+
# pd_op.add: (100x20xf32) <- (100x20xf32, 20xf32)
28+
add_1 = paddle._C_ops.add(matmul_0, parameter_6)
29+
del matmul_0, parameter_6
30+
31+
# pd_op.tanh: (100x20xf32) <- (100x20xf32)
32+
tanh_0 = paddle._C_ops.tanh(add_1)
33+
del add_1
34+
35+
# pd_op.matmul: (100x20xf32) <- (100x20xf32, 20x20xf32)
36+
matmul_1 = paddle._C_ops.matmul(tanh_0, parameter_5, False, False)
37+
del parameter_5, tanh_0
38+
39+
# pd_op.add: (100x20xf32) <- (100x20xf32, 20xf32)
40+
add_2 = paddle._C_ops.add(matmul_1, parameter_4)
41+
del matmul_1, parameter_4
42+
43+
# pd_op.tanh: (100x20xf32) <- (100x20xf32)
44+
tanh_1 = paddle._C_ops.tanh(add_2)
45+
del add_2
46+
47+
# pd_op.matmul: (100x20xf32) <- (100x20xf32, 20x20xf32)
48+
matmul_2 = paddle._C_ops.matmul(tanh_1, parameter_3, False, False)
49+
del parameter_3, tanh_1
50+
51+
# pd_op.add: (100x20xf32) <- (100x20xf32, 20xf32)
52+
add_3 = paddle._C_ops.add(matmul_2, parameter_2)
53+
del matmul_2, parameter_2
54+
55+
# pd_op.tanh: (100x20xf32) <- (100x20xf32)
56+
tanh_2 = paddle._C_ops.tanh(add_3)
57+
del add_3
58+
59+
# pd_op.matmul: (100x1xf32) <- (100x20xf32, 20x1xf32)
60+
matmul_3 = paddle._C_ops.matmul(tanh_2, parameter_1, False, False)
61+
del parameter_1, tanh_2
62+
63+
# pd_op.add: (100x1xf32) <- (100x1xf32, 1xf32)
64+
add_0 = paddle._C_ops.add(matmul_3, parameter_0)
65+
del matmul_3, parameter_0
66+
67+
# pd_op.subtract: (100x1xf32) <- (100x1xf32, 100x1xf32)
68+
subtract_0 = paddle._C_ops.subtract(add_0, data_2)
69+
del data_2
70+
71+
# pd_op.full: (xf32) <- ()
72+
full_0 = paddle._C_ops.full(
73+
[], float("2"), paddle.float32, paddle.framework._current_expected_place()
74+
)
75+
76+
# pd_op.elementwise_pow: (100x1xf32) <- (100x1xf32, xf32)
77+
elementwise_pow_0 = paddle._C_ops.elementwise_pow(subtract_0, full_0)
78+
del full_0, subtract_0
79+
80+
# pd_op.multiply: (100x1xf32) <- (100x1xf32, 100x1xf32)
81+
multiply_0 = paddle._C_ops.multiply(elementwise_pow_0, data_3)
82+
del data_3, elementwise_pow_0
83+
84+
# pd_op.full_int_array: (0xi64) <- ()
85+
full_int_array_0 = []
86+
87+
# pd_op.full_int_array: (2xi64) <- ()
88+
full_int_array_1 = [0, 1]
89+
90+
# pd_op.sum: (xf32) <- (100x1xf32, 2xi64)
91+
sum_0 = paddle._C_ops.sum(multiply_0, full_int_array_1, paddle.float32, False)
92+
del full_int_array_1, multiply_0
93+
94+
# pd_op.full: (xf32) <- ()
95+
full_1 = paddle._C_ops.full(
96+
[], float("100"), paddle.float32, paddle.framework._current_expected_place()
97+
)
98+
99+
# pd_op.divide: (xf32) <- (xf32, xf32)
100+
divide_0 = paddle._C_ops.divide(sum_0, full_1)
101+
del full_1, sum_0
102+
103+
return add_0, divide_0
Lines changed: 86 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,86 @@
1+
class Program_weight_tensor_parameter_0:
2+
name = "parameter_0"
3+
shape = [1]
4+
dtype = "float32"
5+
min_val = 0
6+
max_val = 0.5
7+
mean = None
8+
std = None
9+
data = None
10+
11+
12+
class Program_weight_tensor_parameter_1:
13+
name = "parameter_1"
14+
shape = [20, 1]
15+
dtype = "float32"
16+
min_val = 0
17+
max_val = 0.5
18+
mean = None
19+
std = None
20+
data = None
21+
22+
23+
class Program_weight_tensor_parameter_2:
24+
name = "parameter_2"
25+
shape = [20]
26+
dtype = "float32"
27+
min_val = 0
28+
max_val = 0.5
29+
mean = None
30+
std = None
31+
data = None
32+
33+
34+
class Program_weight_tensor_parameter_3:
35+
name = "parameter_3"
36+
shape = [20, 20]
37+
dtype = "float32"
38+
min_val = 0
39+
max_val = 0.5
40+
mean = None
41+
std = None
42+
data = None
43+
44+
45+
class Program_weight_tensor_parameter_4:
46+
name = "parameter_4"
47+
shape = [20]
48+
dtype = "float32"
49+
min_val = 0
50+
max_val = 0.5
51+
mean = None
52+
std = None
53+
data = None
54+
55+
56+
class Program_weight_tensor_parameter_5:
57+
name = "parameter_5"
58+
shape = [20, 20]
59+
dtype = "float32"
60+
min_val = 0
61+
max_val = 0.5
62+
mean = None
63+
std = None
64+
data = None
65+
66+
67+
class Program_weight_tensor_parameter_6:
68+
name = "parameter_6"
69+
shape = [20]
70+
dtype = "float32"
71+
min_val = 0
72+
max_val = 0.5
73+
mean = None
74+
std = None
75+
data = None
76+
77+
78+
class Program_weight_tensor_parameter_7:
79+
name = "parameter_7"
80+
shape = [1, 20]
81+
dtype = "float32"
82+
min_val = 0
83+
max_val = 0.5
84+
mean = None
85+
std = None
86+
data = None
Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
{
2+
"framework": "paddle",
3+
"model_name": "",
4+
"num_devices_required": 1,
5+
"num_nodes_required": 1
6+
}
Lines changed: 73 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,73 @@
1+
class Program_weight_tensor_data_0:
2+
name = "data_0"
3+
shape = [100, 1]
4+
dtype = "float32"
5+
min_val = 0
6+
max_val = 0.5
7+
mean = None
8+
std = None
9+
data = None
10+
11+
12+
class Program_weight_tensor_data_1:
13+
name = "data_1"
14+
shape = [100, 1]
15+
dtype = "float32"
16+
min_val = 0
17+
max_val = 0.5
18+
mean = None
19+
std = None
20+
data = None
21+
22+
23+
class Program_weight_tensor_data_2:
24+
name = "data_2"
25+
shape = [4, 1]
26+
dtype = "float32"
27+
data = [-1.0, -1.0, 1.0, 1.0]
28+
29+
30+
class Program_weight_tensor_data_3:
31+
name = "data_3"
32+
shape = [4, 1]
33+
dtype = "float32"
34+
data = [0.0, 0.0, 1.0, 1.0]
35+
36+
37+
class Program_weight_tensor_data_4:
38+
name = "data_4"
39+
shape = [100, 1]
40+
dtype = "float32"
41+
min_val = 0
42+
max_val = 0.5
43+
mean = None
44+
std = None
45+
data = None
46+
47+
48+
class Program_weight_tensor_data_5:
49+
name = "data_5"
50+
shape = [4, 1]
51+
dtype = "float32"
52+
data = [0.0, 0.0, 0.0, 0.0]
53+
54+
55+
class Program_weight_tensor_data_6:
56+
name = "data_6"
57+
shape = [4, 1]
58+
dtype = "float32"
59+
data = [0.0, 0.0, 0.0, 0.0]
60+
61+
62+
class Program_weight_tensor_data_7:
63+
name = "data_7"
64+
shape = [4, 1]
65+
dtype = "float32"
66+
data = [0.0, 0.0, 0.0, 0.0]
67+
68+
69+
class Program_weight_tensor_data_8:
70+
name = "data_8"
71+
shape = [4, 1]
72+
dtype = "float32"
73+
data = [0.0, 0.0, 0.0, 0.0]

0 commit comments

Comments
 (0)