Skip to content

Commit eb3094c

Browse files
author
George
committed
comments
1 parent 5f1d383 commit eb3094c

File tree

2 files changed

+1
-3
lines changed

2 files changed

+1
-3
lines changed

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ Quantization is applied by selecting an algorithm and calling the `oneshot` API.
5858
```python
5959
from llmcompressor.modifiers.smoothquant import SmoothQuantModifier
6060
from llmcompressor.modifiers.quantization import GPTQModifier
61-
from llmcompressor.transformers import oneshot
61+
from llmcompressor import oneshot
6262

6363
# Select quantization algorithm. In this case, we:
6464
# * apply SmoothQuant to make the activations easier to quantize

examples/quantization_2of4_sparse_w4a16/llama7b_sparse_w4a16.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,6 @@
3333
bf16 = False # using full precision for training
3434
lr_scheduler_type = "cosine"
3535
warmup_ratio = 0.1
36-
preprocessing_num_workers = 8
3736

3837
# this will run the recipe stage by stage:
3938
# oneshot sparsification -> finetuning -> oneshot quantization
@@ -53,7 +52,6 @@
5352
learning_rate=learning_rate,
5453
lr_scheduler_type=lr_scheduler_type,
5554
warmup_ratio=warmup_ratio,
56-
preprocessing_num_workers=preprocessing_num_workers,
5755
)
5856
logger.info(
5957
"Note: llcompressor does not currently support running ",

0 commit comments

Comments
 (0)