Skip to content

Commit a9a6332

Browse files
committed
fix: defer auto-round imports and fix tokenizer variable name
- Change default iters from 200 to 100 in usage comment - Remove unused imports (load_dataset, get_rank_partition) - Defer AutoRoundModifier import until after model loading - Use get_dataset from auto_round.calib_dataset directly - Fix model_name -> model_id for tokenizer loading Signed-off-by: yiliu30 <yi4.liu@intel.com>
1 parent e670838 commit a9a6332

File tree

1 file changed

+8
-6
lines changed

1 file changed

+8
-6
lines changed

experimental/ddp/ddp_qwen3_example.py

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
# torchrun --nproc_per_node=2 ddp_qwen3_example.py \
88
# --model Qwen/Qwen3-8B \
99
# --nsamples 128 \
10-
# --iters 200 \
10+
# --iters 100 \
1111
# --disable_torch_compile \
1212
# --deterministic
1313
#############################################################################
@@ -19,13 +19,10 @@
1919
import torch
2020
import torch.distributed as dist
2121
from compressed_tensors.offload import dispatch_model, init_dist, load_offloaded_model
22-
from datasets import load_dataset
2322
from loguru import logger
2423
from transformers import AutoModelForCausalLM, AutoTokenizer
2524
import torch.distributed as dist
2625
from llmcompressor import oneshot
27-
from llmcompressor.datasets.utils import get_rank_partition
28-
from llmcompressor.modifiers.autoround import AutoRoundModifier
2926

3027

3128
def fix_everything(seed=42):
@@ -87,14 +84,19 @@ def config_deterministic():
8784
)
8885
##################################
8986

90-
tokenizer = AutoTokenizer.from_pretrained(model_name)
87+
tokenizer = AutoTokenizer.from_pretrained(model_id)
9188

9289
# Select calibration dataset.
9390
NUM_CALIBRATION_SAMPLES = args.nsamples
9491
MAX_SEQUENCE_LENGTH = 2048
9592
ITERS = args.iters
96-
# Get aligned calibration dataset.
9793

94+
# Make sure model are loaded before importing auto-round related code.
95+
# This requirement will be lifted once https://github.com/intel/auto-round/pull/1460 is merged.
96+
from llmcompressor.modifiers.autoround import AutoRoundModifier
97+
98+
# Get aligned calibration dataset.
99+
from auto_round.calib_dataset import get_dataset
98100
ds = get_dataset(
99101
tokenizer=tokenizer,
100102
seqlen=MAX_SEQUENCE_LENGTH,

0 commit comments

Comments
 (0)