We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 3538cae commit 5e876d2Copy full SHA for 5e876d2
auto_fp8/quantize.py
@@ -1,7 +1,6 @@
1
import gc
2
import re
3
from typing import List, Tuple
4
-from tqdm import tqdm
5
6
import torch
7
import tqdm
@@ -196,7 +195,8 @@ def quantize_weights(
196
195
quantize_config: BaseQuantizeConfig,
197
ignored_layers: List[str] = [],
198
):
199
- for name, linear in tqdm(model.named_modules(), desc="Quantizing weights"):
+ named_modules = list(model.named_modules())
+ for name, linear in tqdm.tqdm(named_modules, desc="Quantizing weights"):
200
if (
201
not isinstance(linear, torch.nn.Linear)
202
or name in quantize_config.ignored_layers
0 commit comments