Skip to content

Commit 5821576

Browse files
committed
Merge branch 'main' into jingyux/megatron-lora
2 parents bde750b + 3a76d28 commit 5821576

File tree

7 files changed

+45
-19
lines changed

7 files changed

+45
-19
lines changed

.gitlab/tests.yml

Lines changed: 1 addition & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -54,20 +54,12 @@ example-torch:
5454
timeout: 30m
5555
parallel:
5656
matrix:
57-
- EXAMPLE: [llm_distill, llm_sparsity, speculative_decoding]
57+
- EXAMPLE: [llm_distill, llm_qat, llm_sparsity, speculative_decoding]
5858
script:
5959
- pip install ".[hf,dev-test]"
6060
- find examples/$EXAMPLE -name "requirements.txt" | while read req_file; do pip install -r "$req_file" || exit 1; done
6161
- pytest -s tests/examples/$EXAMPLE
6262

63-
# TODO: Fix llm_qat test hang in GitLab CI
64-
example-failing:
65-
extends: example-torch
66-
allow_failure: true
67-
parallel:
68-
matrix:
69-
- EXAMPLE: [llm_qat]
70-
7163
example-trtllm:
7264
extends: example-torch
7365
timeout: 60m

docs/source/guides/7_nas.rst

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -635,3 +635,12 @@ The difference between NAS and pruning is summarized below.
635635
increased training time.
636636
- May provide similar performance to NAS in particular applications, however, usually exhibits
637637
worse performance due to the limited search space and training time.
638+
639+
640+
[Advanced] Adding a new NAS/Prune Algorithm
641+
===========================================
642+
643+
* Please refer to this `template <https://github.com/NVIDIA/TensorRT-Model-Optimizer/compare/template/new-nas-mode>`_
644+
for adding a new NAS algorithm.
645+
* Please refer to `mcore_minitron.py <https://github.com/NVIDIA/TensorRT-Model-Optimizer/blob/main/modelopt/torch/prune/plugins/mcore_minitron.py>`_
646+
for an actual example of adding Minitron Pruning algorithm.

examples/llm_distill/main.py

Lines changed: 32 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -13,10 +13,11 @@
1313
# See the License for the specific language governing permissions and
1414
# limitations under the License.
1515

16-
import logging
1716
import os
1817
from dataclasses import dataclass
1918

19+
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
20+
2021
import datasets
2122
import torch
2223
import torch.distributed
@@ -29,10 +30,7 @@
2930
import modelopt.torch.opt as mto
3031
from modelopt.torch.distill.plugins.huggingface import KDTrainer, LMLogitsLoss
3132

32-
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:512"
33-
34-
logger = get_logger(__name__)
35-
logging.basicConfig(level=logging.INFO)
33+
logger = get_logger(__name__, log_level="INFO")
3634

3735

3836
@dataclass
@@ -69,6 +67,29 @@ class KDSFTTrainer(SFTTrainer, KDTrainer):
6967
pass
7068

7169

70+
def _save_model_fsdp_compat(
71+
self,
72+
output_dir: str | None = None,
73+
_internal_call: bool = False,
74+
*args,
75+
**kwargs,
76+
):
77+
output_dir = output_dir or self.args.output_dir
78+
model = self.accelerator.unwrap_model(self.model)
79+
if not _internal_call and self.is_fsdp_enabled:
80+
state_dict = self.accelerator.get_state_dict(self.model)
81+
if self.accelerator.is_main_process:
82+
model.save_pretrained(
83+
output_dir,
84+
is_main_process=self.accelerator.is_main_process,
85+
save_function=self.accelerator.save,
86+
state_dict=state_dict,
87+
)
88+
self.processing_class.save_pretrained(output_dir)
89+
else:
90+
super(SFTTrainer, self).save_model(output_dir, _internal_call, *args, **kwargs)
91+
92+
7293
def train():
7394
parser = transformers.HfArgumentParser((ModelArguments, TrainingArguments))
7495
model_args, training_args = parser.parse_args_into_dataclasses()
@@ -77,6 +98,9 @@ def train():
7798
# modelopt state will be saved automatically to "modelopt_state.pth"
7899
mto.enable_huggingface_checkpointing()
79100

101+
# HACK: Fix FSDP2-incompatible save_model() function for SFTTrainer
102+
SFTTrainer.save_model = _save_model_fsdp_compat
103+
80104
# Set total batch size across all ranks to equal 64
81105
total_batch_size = 64
82106
num_accum_steps = total_batch_size / (
@@ -91,19 +115,22 @@ def train():
91115
f"Using {int(num_accum_steps)} grad accumulation steps for effective batchsize of {total_batch_size}."
92116
)
93117

118+
# Dataset
94119
logger.info("Loading dataset...")
95120
dset = datasets.load_dataset("Open-Orca/OpenOrca", split="train")
96121
dset_splits = dset.train_test_split(train_size=25600, test_size=1700, seed=420)
97122
dset_train, dset_eval = dset_splits["train"], dset_splits["test"]
98123
logger.info("Dataset loaded.")
99124

125+
# Tokenizer
100126
logger.info("Loading tokenizer...")
101127
model_path = model_args.teacher_name_or_path or model_args.student_name_or_path
102128
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True)
103129
tokenizer.pad_token = tokenizer.eos_token
104130
tokenizer.padding_side = "right"
105131
logger.info("Tokenizer loaded.")
106132

133+
# Model
107134
if model_args.single_model:
108135
logger.info("Loading single model only...")
109136
model = transformers.AutoModelForCausalLM.from_pretrained(
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,3 @@
11
pyarrow
2+
transformers<5.0
23
trl>=0.23.0

modelopt/torch/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@
3434

3535
if not (_Version("4.48") <= _Version(_transformers_version) < _Version("5.0")):
3636
_warnings.warn(
37-
f"transformers version {_transformers_version} is incompatible with nvidia-modelopt and may cause issues. "
37+
f"transformers version {_transformers_version} is not tested with nvidia-modelopt and may cause issues. "
3838
"Please install recommended version with `pip install nvidia-modelopt[hf]` if working with HF models.",
3939
)
4040
except ImportError:

modelopt/torch/opt/plugins/__init__.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -19,9 +19,6 @@
1919

2020
from .huggingface import *
2121

22-
with import_plugin("megatron core model config"):
23-
from .megatron_model_config import *
24-
2522
with import_plugin("megatron core dist checkpointing"):
2623
from .mcore_dist_checkpointing import *
2724

tests/gpu/torch/nas/plugins/test_megatron_mamba_dynamic_modules.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -173,7 +173,7 @@ def _test_mamba_parameter_sorting(rank, size):
173173
prompt_tokens = torch.randint(0, vocab_size, (batch_size, max_sequence_length)).cuda()
174174
y1 = run_mcore_inference(model, prompt_tokens)
175175

176-
dynamic_space.sort_parameters()
176+
mtn.utils.sort_parameters(model)
177177

178178
# check if all mamba_num_heads, mamba_head_dim, hidden_size have been sorted
179179
sortable_per_pp = [

0 commit comments

Comments
 (0)