Skip to content

Commit f70b892

Browse files
committed
fix rules
Signed-off-by: jenchen13 <[email protected]>
1 parent 89cbd97 commit f70b892

File tree

1 file changed

+6
-5
lines changed

1 file changed

+6
-5
lines changed

modelopt/torch/export/plugins/mcore_nemotron.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -43,11 +43,7 @@
4343
"linear_fc2": NameRemapping("model.layers.{}.mlp.down_proj."),
4444
"final_layernorm": NameRemapping("model.norm."),
4545
"output_layer": NameRemapping("lm_head."),
46-
# MoE
47-
"router": NameRemapping("model.layers.{}.mlp.gate."),
48-
"local_experts.linear_fc1": GatedMLPSlicing("model.layers.{}.mlp.experts.{}."),
49-
"local_experts.linear_fc2": NameRemapping("model.layers.{}.mlp.experts.{}.down_proj."),
50-
}
46+
}
5147

5248

5349
nemotron_h_causal_lm_import: dict[str, CustomModuleMapping] = {
@@ -101,4 +97,9 @@
10197
"pre_mlp_layernorm": NameRemapping("backbone.layers.{}.norm."),
10298
"linear_fc1": NameRemapping("backbone.layers.{}.mixer.up_proj."),
10399
"linear_fc2": NameRemapping("backbone.layers.{}.mixer.down_proj."),
100+
# MoE
101+
"router": NameRemapping("model.layers.{}.mlp.gate."),
102+
"local_experts.linear_fc1": GatedMLPSlicing("model.layers.{}.mlp.experts.{}."),
103+
"local_experts.linear_fc2": NameRemapping("model.layers.{}.mlp.experts.{}.down_proj."),
104+
104105
}

0 commit comments

Comments
 (0)