Skip to content

Commit 5e876d2

Browse files
committed
Fix
1 parent 3538cae commit 5e876d2

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

auto_fp8/quantize.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
import gc
22
import re
33
from typing import List, Tuple
4-
from tqdm import tqdm
54

65
import torch
76
import tqdm
@@ -196,7 +195,8 @@ def quantize_weights(
196195
quantize_config: BaseQuantizeConfig,
197196
ignored_layers: List[str] = [],
198197
):
199-
for name, linear in tqdm(model.named_modules(), desc="Quantizing weights"):
198+
named_modules = list(model.named_modules())
199+
for name, linear in tqdm.tqdm(named_modules, desc="Quantizing weights"):
200200
if (
201201
not isinstance(linear, torch.nn.Linear)
202202
or name in quantize_config.ignored_layers

0 commit comments

Comments
 (0)