Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
affc8c7cd95d8acc9ee5582af79eef03a74c19d6a08a54cd80ec10a6086cdba1
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
{
"framework": "torch",
"num_devices_required": 1,
"num_nodes_required": 1,
"dynamic": false
}

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
@@ -0,0 +1,114 @@
class Program_weight_tensor_meta_L_stack0_encoder_last_hidden_state_text:
name = "L_stack0_encoder_last_hidden_state_text"
shape = [1, 7, 256]
dtype = "torch.float32"
device = "cuda:0"
mean = 0.000
std = 1.000
data = None


class Program_weight_tensor_meta_L_stack0_intermediate_hidden_states:
name = "L_stack0_intermediate_hidden_states"
shape = [1, 6, 900, 256]
dtype = "torch.float32"
device = "cuda:0"
mean = 0.000
std = 1.000
data = None


class Program_weight_tensor_meta_L_stack0_init_reference_points:
name = "L_stack0_init_reference_points"
shape = [1, 900, 4]
dtype = "torch.float32"
device = "cuda:0"
mean = 0.400
std = 0.296
data = None


class Program_weight_tensor_meta_L_stack0_intermediate_reference_points:
name = "L_stack0_intermediate_reference_points"
shape = [1, 6, 900, 4]
dtype = "torch.float32"
device = "cuda:0"
mean = 0.400
std = 0.296
data = None


class Program_weight_tensor_meta_L_attention_mask_:
name = "L_attention_mask_"
shape = [1, 7]
dtype = "torch.int64"
device = "cuda:0"
mean = None
std = None
data = [1, 1, 1, 1, 1, 1, 1]


class Program_weight_tensor_meta_L_self_modules_bbox_embed_modules_0_modules_layers_modules_0_parameters_weight_:
name = "L_self_modules_bbox_embed_modules_0_modules_layers_modules_0_parameters_weight_"
shape = [256, 256]
dtype = "torch.float32"
device = "cuda:0"
mean = -0.000
std = 0.020
data = None


class Program_weight_tensor_meta_L_self_modules_bbox_embed_modules_0_modules_layers_modules_0_parameters_bias_:
name = (
"L_self_modules_bbox_embed_modules_0_modules_layers_modules_0_parameters_bias_"
)
shape = [256]
dtype = "torch.float32"
device = "cuda:0"
mean = 0.000
std = 0.000
data = None


class Program_weight_tensor_meta_L_self_modules_bbox_embed_modules_0_modules_layers_modules_1_parameters_weight_:
name = "L_self_modules_bbox_embed_modules_0_modules_layers_modules_1_parameters_weight_"
shape = [256, 256]
dtype = "torch.float32"
device = "cuda:0"
mean = 0.000
std = 0.020
data = None


class Program_weight_tensor_meta_L_self_modules_bbox_embed_modules_0_modules_layers_modules_1_parameters_bias_:
name = (
"L_self_modules_bbox_embed_modules_0_modules_layers_modules_1_parameters_bias_"
)
shape = [256]
dtype = "torch.float32"
device = "cuda:0"
mean = 0.000
std = 0.000
data = None


class Program_weight_tensor_meta_L_self_modules_bbox_embed_modules_0_modules_layers_modules_2_parameters_weight_:
name = "L_self_modules_bbox_embed_modules_0_modules_layers_modules_2_parameters_weight_"
shape = [4, 256]
dtype = "torch.float32"
device = "cuda:0"
mean = 0.000
std = 0.000
data = None


class Program_weight_tensor_meta_L_self_modules_bbox_embed_modules_0_modules_layers_modules_2_parameters_bias_:
name = (
"L_self_modules_bbox_embed_modules_0_modules_layers_modules_2_parameters_bias_"
)
shape = [4]
dtype = "torch.float32"
device = "cuda:0"
mean = 0.000
std = 0.000
data = [0.000000, 0.000000, 0.000000, 0.000000]
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
3eb7ef4fc44c784bf246435c08ca58a7f6defb71f4707d5dfa27e4bf629fc09d
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
{
"framework": "torch",
"num_devices_required": 1,
"num_nodes_required": 1,
"dynamic": true
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,107 @@
import torch


class GraphModule(torch.nn.Module):
def forward(
self,
s61: torch.SymInt,
L_stack0_0_: torch.Tensor,
L_self_dropout: torch.Tensor,
L_third_residual_: torch.Tensor,
L_self_modules_encoder_attn_layer_norm_parameters_weight_: torch.nn.parameter.Parameter,
L_self_modules_encoder_attn_layer_norm_parameters_bias_: torch.nn.parameter.Parameter,
L_self_modules_encoder_attn_layer_norm_eps: torch.Tensor,
L_self_modules_fc1_parameters_weight_: torch.nn.parameter.Parameter,
L_self_modules_fc1_parameters_bias_: torch.nn.parameter.Parameter,
L_self_activation_dropout: torch.Tensor,
L_self_modules_fc2_parameters_weight_: torch.nn.parameter.Parameter,
L_self_modules_fc2_parameters_bias_: torch.nn.parameter.Parameter,
L_self_modules_final_layer_norm_parameters_weight_: torch.nn.parameter.Parameter,
L_self_modules_final_layer_norm_parameters_bias_: torch.nn.parameter.Parameter,
L_self_modules_final_layer_norm_eps: torch.Tensor,
):
l_stack0_0_ = L_stack0_0_
l_self_dropout = L_self_dropout
l_third_residual_ = L_third_residual_
l_self_modules_encoder_attn_layer_norm_parameters_weight_ = (
L_self_modules_encoder_attn_layer_norm_parameters_weight_
)
l_self_modules_encoder_attn_layer_norm_parameters_bias_ = (
L_self_modules_encoder_attn_layer_norm_parameters_bias_
)
l_self_modules_encoder_attn_layer_norm_eps = (
L_self_modules_encoder_attn_layer_norm_eps
)
l_self_modules_fc1_parameters_weight_ = L_self_modules_fc1_parameters_weight_
l_self_modules_fc1_parameters_bias_ = L_self_modules_fc1_parameters_bias_
l_self_activation_dropout = L_self_activation_dropout
l_self_modules_fc2_parameters_weight_ = L_self_modules_fc2_parameters_weight_
l_self_modules_fc2_parameters_bias_ = L_self_modules_fc2_parameters_bias_
l_self_modules_final_layer_norm_parameters_weight_ = (
L_self_modules_final_layer_norm_parameters_weight_
)
l_self_modules_final_layer_norm_parameters_bias_ = (
L_self_modules_final_layer_norm_parameters_bias_
)
l_self_modules_final_layer_norm_eps = L_self_modules_final_layer_norm_eps
item = l_self_dropout.item()
l_self_dropout = None
hidden_states = torch.nn.functional.dropout(l_stack0_0_, p=item, training=False)
l_stack0_0_ = None
hidden_states_1 = l_third_residual_ + hidden_states
l_third_residual_ = hidden_states = None
item_1 = l_self_modules_encoder_attn_layer_norm_eps.item()
l_self_modules_encoder_attn_layer_norm_eps = None
hidden_states_2 = torch.nn.functional.layer_norm(
hidden_states_1,
(256,),
l_self_modules_encoder_attn_layer_norm_parameters_weight_,
l_self_modules_encoder_attn_layer_norm_parameters_bias_,
item_1,
)
hidden_states_1 = (
l_self_modules_encoder_attn_layer_norm_parameters_weight_
) = l_self_modules_encoder_attn_layer_norm_parameters_bias_ = item_1 = None
linear = torch._C._nn.linear(
hidden_states_2,
l_self_modules_fc1_parameters_weight_,
l_self_modules_fc1_parameters_bias_,
)
l_self_modules_fc1_parameters_weight_ = (
l_self_modules_fc1_parameters_bias_
) = None
hidden_states_3 = torch.nn.functional.relu(linear, inplace=False)
linear = None
item_2 = l_self_activation_dropout.item()
l_self_activation_dropout = None
hidden_states_4 = torch.nn.functional.dropout(
hidden_states_3, p=item_2, training=False
)
hidden_states_3 = item_2 = None
hidden_states_5 = torch._C._nn.linear(
hidden_states_4,
l_self_modules_fc2_parameters_weight_,
l_self_modules_fc2_parameters_bias_,
)
hidden_states_4 = (
l_self_modules_fc2_parameters_weight_
) = l_self_modules_fc2_parameters_bias_ = None
hidden_states_6 = torch.nn.functional.dropout(
hidden_states_5, p=item, training=False
)
hidden_states_5 = item = None
hidden_states_7 = hidden_states_2 + hidden_states_6
hidden_states_2 = hidden_states_6 = None
item_3 = l_self_modules_final_layer_norm_eps.item()
l_self_modules_final_layer_norm_eps = None
hidden_states_8 = torch.nn.functional.layer_norm(
hidden_states_7,
(256,),
l_self_modules_final_layer_norm_parameters_weight_,
l_self_modules_final_layer_norm_parameters_bias_,
item_3,
)
hidden_states_7 = (
l_self_modules_final_layer_norm_parameters_weight_
) = l_self_modules_final_layer_norm_parameters_bias_ = item_3 = None
return (hidden_states_8,)
Original file line number Diff line number Diff line change
@@ -0,0 +1,148 @@
class Program_weight_tensor_meta_s61:
name = "s61"
shape = []
dtype = "torch.int64"
device = "cpu"
mean = None
std = None
data = [4]


class Program_weight_tensor_meta_L_stack0_0_:
name = "L_stack0_0_"
shape = [1, 900, 256]
dtype = "torch.float32"
device = "cuda:0"
mean = -0.063
std = 0.819
data = None


class Program_weight_tensor_meta_L_self_dropout:
name = "L_self_dropout"
shape = []
dtype = "torch.float64"
device = "cpu"
mean = 0.100
std = 0.000
data = [0.100000]


class Program_weight_tensor_meta_L_third_residual_:
name = "L_third_residual_"
shape = [1, 900, 256]
dtype = "torch.float32"
device = "cuda:0"
mean = -0.000
std = 1.000
data = None


class Program_weight_tensor_meta_L_self_modules_encoder_attn_layer_norm_parameters_weight_:
name = "L_self_modules_encoder_attn_layer_norm_parameters_weight_"
shape = [256]
dtype = "torch.float32"
device = "cuda:0"
mean = 1.000
std = 0.000
data = None


class Program_weight_tensor_meta_L_self_modules_encoder_attn_layer_norm_parameters_bias_:
name = "L_self_modules_encoder_attn_layer_norm_parameters_bias_"
shape = [256]
dtype = "torch.float32"
device = "cuda:0"
mean = 0.000
std = 0.000
data = None


class Program_weight_tensor_meta_L_self_modules_encoder_attn_layer_norm_eps:
name = "L_self_modules_encoder_attn_layer_norm_eps"
shape = []
dtype = "torch.float64"
device = "cpu"
mean = 0.000
std = 0.000
data = [0.000010]


class Program_weight_tensor_meta_L_self_modules_fc1_parameters_weight_:
name = "L_self_modules_fc1_parameters_weight_"
shape = [2048, 256]
dtype = "torch.float32"
device = "cuda:0"
mean = 0.000
std = 0.020
data = None


class Program_weight_tensor_meta_L_self_modules_fc1_parameters_bias_:
name = "L_self_modules_fc1_parameters_bias_"
shape = [2048]
dtype = "torch.float32"
device = "cuda:0"
mean = 0.000
std = 0.000
data = None


class Program_weight_tensor_meta_L_self_activation_dropout:
name = "L_self_activation_dropout"
shape = []
dtype = "torch.float64"
device = "cpu"
mean = 0.000
std = 0.000
data = [0.000000]


class Program_weight_tensor_meta_L_self_modules_fc2_parameters_weight_:
name = "L_self_modules_fc2_parameters_weight_"
shape = [256, 2048]
dtype = "torch.float32"
device = "cuda:0"
mean = -0.000
std = 0.020
data = None


class Program_weight_tensor_meta_L_self_modules_fc2_parameters_bias_:
name = "L_self_modules_fc2_parameters_bias_"
shape = [256]
dtype = "torch.float32"
device = "cuda:0"
mean = 0.000
std = 0.000
data = None


class Program_weight_tensor_meta_L_self_modules_final_layer_norm_parameters_weight_:
name = "L_self_modules_final_layer_norm_parameters_weight_"
shape = [256]
dtype = "torch.float32"
device = "cuda:0"
mean = 1.000
std = 0.000
data = None


class Program_weight_tensor_meta_L_self_modules_final_layer_norm_parameters_bias_:
name = "L_self_modules_final_layer_norm_parameters_bias_"
shape = [256]
dtype = "torch.float32"
device = "cuda:0"
mean = 0.000
std = 0.000
data = None


class Program_weight_tensor_meta_L_self_modules_final_layer_norm_eps:
name = "L_self_modules_final_layer_norm_eps"
shape = []
dtype = "torch.float64"
device = "cpu"
mean = 0.000
std = 0.000
data = [0.000010]
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
4be59c33ee4c3e088318f5641f37caefb8c1b421152f8beb426300cedc793ddb
Loading