Skip to content

Commit 93342ee

Browse files
committed
[New Sample] Add microsoft deberta large mnli
1 parent a94c43b commit 93342ee

File tree

6 files changed

+160
-0
lines changed

6 files changed

+160
-0
lines changed
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
5f2c70e2cf8e9a078d107f0461e6546e1f3f0ef020361c727bcd6bbadd3da96f
Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
{
2+
"framework": "torch",
3+
"num_devices_required": 1,
4+
"num_nodes_required": 1,
5+
"dynamic": true
6+
}

samples/transformers-auto-model/microsoft_deberta_large_mnli/input_meta.py

Whitespace-only changes.

samples/transformers-auto-model/microsoft_deberta_large_mnli/input_tensor_constraints.py

Whitespace-only changes.
Lines changed: 85 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,85 @@
1+
import torch
2+
3+
4+
class GraphModule(torch.nn.Module):
5+
def forward(
6+
self,
7+
L_stack0_0_: torch.Tensor,
8+
L_self_modules_intermediate_modules_dense_parameters_weight_: torch.nn.parameter.Parameter,
9+
L_self_modules_intermediate_modules_dense_parameters_bias_: torch.nn.parameter.Parameter,
10+
L_self_modules_output_modules_dense_parameters_weight_: torch.nn.parameter.Parameter,
11+
L_self_modules_output_modules_dense_parameters_bias_: torch.nn.parameter.Parameter,
12+
L_self_modules_output_modules_LayerNorm_parameters_weight_: torch.nn.parameter.Parameter,
13+
L_self_modules_output_modules_LayerNorm_parameters_bias_: torch.nn.parameter.Parameter,
14+
):
15+
l_stack0_0_ = L_stack0_0_
16+
l_self_modules_intermediate_modules_dense_parameters_weight_ = (
17+
L_self_modules_intermediate_modules_dense_parameters_weight_
18+
)
19+
l_self_modules_intermediate_modules_dense_parameters_bias_ = (
20+
L_self_modules_intermediate_modules_dense_parameters_bias_
21+
)
22+
l_self_modules_output_modules_dense_parameters_weight_ = (
23+
L_self_modules_output_modules_dense_parameters_weight_
24+
)
25+
l_self_modules_output_modules_dense_parameters_bias_ = (
26+
L_self_modules_output_modules_dense_parameters_bias_
27+
)
28+
l_self_modules_output_modules_layer_norm_parameters_weight_ = (
29+
L_self_modules_output_modules_LayerNorm_parameters_weight_
30+
)
31+
l_self_modules_output_modules_layer_norm_parameters_bias_ = (
32+
L_self_modules_output_modules_LayerNorm_parameters_bias_
33+
)
34+
hidden_states = torch._C._nn.linear(
35+
l_stack0_0_,
36+
l_self_modules_intermediate_modules_dense_parameters_weight_,
37+
l_self_modules_intermediate_modules_dense_parameters_bias_,
38+
)
39+
l_self_modules_intermediate_modules_dense_parameters_weight_ = (
40+
l_self_modules_intermediate_modules_dense_parameters_bias_
41+
) = None
42+
hidden_states_1 = torch._C._nn.gelu(hidden_states)
43+
hidden_states = None
44+
hidden_states_2 = torch._C._nn.linear(
45+
hidden_states_1,
46+
l_self_modules_output_modules_dense_parameters_weight_,
47+
l_self_modules_output_modules_dense_parameters_bias_,
48+
)
49+
hidden_states_1 = (
50+
l_self_modules_output_modules_dense_parameters_weight_
51+
) = l_self_modules_output_modules_dense_parameters_bias_ = None
52+
hidden_states_3 = torch.nn.functional.dropout(
53+
hidden_states_2, 0.1, False, False
54+
)
55+
hidden_states_2 = None
56+
add = hidden_states_3 + l_stack0_0_
57+
hidden_states_3 = l_stack0_0_ = None
58+
hidden_states_4 = add.float()
59+
add = None
60+
mean = hidden_states_4.mean(-1, keepdim=True)
61+
sub = hidden_states_4 - mean
62+
pow_1 = sub.pow(2)
63+
sub = None
64+
variance = pow_1.mean(-1, keepdim=True)
65+
pow_1 = None
66+
sub_1 = hidden_states_4 - mean
67+
hidden_states_4 = mean = None
68+
add_1 = variance + 1e-07
69+
variance = None
70+
sqrt = torch.sqrt(add_1)
71+
add_1 = None
72+
hidden_states_5 = sub_1 / sqrt
73+
sub_1 = sqrt = None
74+
hidden_states_6 = hidden_states_5.to(torch.float32)
75+
hidden_states_5 = None
76+
mul = (
77+
l_self_modules_output_modules_layer_norm_parameters_weight_
78+
* hidden_states_6
79+
)
80+
l_self_modules_output_modules_layer_norm_parameters_weight_ = (
81+
hidden_states_6
82+
) = None
83+
y = mul + l_self_modules_output_modules_layer_norm_parameters_bias_
84+
mul = l_self_modules_output_modules_layer_norm_parameters_bias_ = None
85+
return (y,)
Lines changed: 68 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,68 @@
1+
class Program_weight_tensor_meta_L_stack0_0_:
2+
name = "L_stack0_0_"
3+
shape = [1, 10, 1024]
4+
dtype = "torch.float32"
5+
device = "cuda:0"
6+
mean = -0.006
7+
std = 1.098
8+
data = None
9+
10+
11+
class Program_weight_tensor_meta_L_self_modules_intermediate_modules_dense_parameters_weight_:
12+
name = "L_self_modules_intermediate_modules_dense_parameters_weight_"
13+
shape = [4096, 1024]
14+
dtype = "torch.float32"
15+
device = "cuda:0"
16+
mean = -0.000
17+
std = 0.026
18+
data = None
19+
20+
21+
class Program_weight_tensor_meta_L_self_modules_intermediate_modules_dense_parameters_bias_:
22+
name = "L_self_modules_intermediate_modules_dense_parameters_bias_"
23+
shape = [4096]
24+
dtype = "torch.float32"
25+
device = "cuda:0"
26+
mean = -0.033
27+
std = 0.022
28+
data = None
29+
30+
31+
class Program_weight_tensor_meta_L_self_modules_output_modules_dense_parameters_weight_:
32+
name = "L_self_modules_output_modules_dense_parameters_weight_"
33+
shape = [1024, 4096]
34+
dtype = "torch.float32"
35+
device = "cuda:0"
36+
mean = 0.000
37+
std = 0.026
38+
data = None
39+
40+
41+
class Program_weight_tensor_meta_L_self_modules_output_modules_dense_parameters_bias_:
42+
name = "L_self_modules_output_modules_dense_parameters_bias_"
43+
shape = [1024]
44+
dtype = "torch.float32"
45+
device = "cuda:0"
46+
mean = 0.000
47+
std = 0.014
48+
data = None
49+
50+
51+
class Program_weight_tensor_meta_L_self_modules_output_modules_LayerNorm_parameters_weight_:
52+
name = "L_self_modules_output_modules_LayerNorm_parameters_weight_"
53+
shape = [1024]
54+
dtype = "torch.float32"
55+
device = "cuda:0"
56+
mean = 1.131
57+
std = 0.063
58+
data = None
59+
60+
61+
class Program_weight_tensor_meta_L_self_modules_output_modules_LayerNorm_parameters_bias_:
62+
name = "L_self_modules_output_modules_LayerNorm_parameters_bias_"
63+
shape = [1024]
64+
dtype = "torch.float32"
65+
device = "cuda:0"
66+
mean = -0.001
67+
std = 0.032
68+
data = None

0 commit comments

Comments
 (0)