Skip to content
This repository was archived by the owner on Jan 15, 2026. It is now read-only.

Commit cf411a9

Browse files
authored
Merge pull request #232 from NVIDIA/chcui/remove_fp8_comment
Remove outdated fp8 comment
2 parents dbd0b45 + 690da03 commit cf411a9

File tree

6 files changed

+0
-6
lines changed

6 files changed

+0
-6
lines changed

auto_configurator/base_configs/llama2_7b.yaml

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -118,7 +118,6 @@ model:
118118
sequence_parallel: false # does not support sequence parallel
119119

120120
## Transformer Engine
121-
# fp8 training is currently not supported in the improved models
122121
transformer_engine: true
123122
fp8: False # enables fp8 in TransformerLayer forward
124123
fp8_e4m3: False # sets fp8_format = recipe.Format.E4M3

launcher_scripts/conf/training/llama/llama1_7b.yaml

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -117,7 +117,6 @@ model:
117117
sequence_parallel: false # does not support sequence parallel
118118

119119
## Transformer Engine
120-
# fp8 training is currently not supported in the improved models
121120
transformer_engine: True
122121
fp8: False # enables fp8 in TransformerLayer forward
123122
fp8_e4m3: False # sets fp8_format = recipe.Format.E4M3

launcher_scripts/conf/training/llama/llama2_7b.yaml

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -125,7 +125,6 @@ model:
125125
sequence_parallel: false
126126

127127
## Transformer Engine
128-
# fp8 training is currently not supported in the improved models
129128
transformer_engine: true
130129
fp8: False # enables fp8 in TransformerLayer forward
131130
fp8_e4m3: False # sets fp8_format = recipe.Format.E4M3

launcher_scripts/conf/training/nemotron/nemotron_15b.yaml

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -144,7 +144,6 @@ model:
144144
mcore_gpt: True
145145

146146
## Transformer Engine
147-
# fp8 training is currently not supported in the improved models
148147
transformer_engine: True
149148
fp8: False # enables fp8 in TransformerLayer forward
150149
fp8_e4m3: False # sets fp8_format = recipe.Format.E4M3

launcher_scripts/conf/training/nemotron/nemotron_22b.yaml

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -144,7 +144,6 @@ model:
144144
mcore_gpt: True
145145

146146
## Transformer Engine
147-
# fp8 training is currently not supported in the improved models
148147
transformer_engine: True
149148
fp8: False # enables fp8 in TransformerLayer forward
150149
fp8_e4m3: False # sets fp8_format = recipe.Format.E4M3

launcher_scripts/conf/training/nemotron/nemotron_8b.yaml

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -144,7 +144,6 @@ model:
144144
mcore_gpt: True
145145

146146
## Transformer Engine
147-
# fp8 training is currently not supported in the improved models
148147
transformer_engine: True
149148
fp8: False # enables fp8 in TransformerLayer forward
150149
fp8_e4m3: False # sets fp8_format = recipe.Format.E4M3

0 commit comments

Comments
 (0)