Skip to content

Commit 72ca14f

Browse files
committed
refactor: extract _process_tensors_microscale to reduce duplication
Signed-off-by: David Zheng <dqzheng1996@gmail.com>
1 parent b2c1e80 commit 72ca14f

File tree

1 file changed

+4
-4
lines changed
  • src/llmcompressor/entrypoints/model_free

1 file changed

+4
-4
lines changed

src/llmcompressor/entrypoints/model_free/process.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -177,9 +177,9 @@ def process_file_group_microscale_scheme(
177177
"Use `process_file` or `process_file_microscale_scheme` for "
178178
"non-microscale schemes"
179179
)
180-
assert len(file_paths) == len(save_paths), (
181-
"file_paths and save_paths must have the same length"
182-
)
180+
assert len(file_paths) == len(
181+
save_paths
182+
), "file_paths and save_paths must have the same length"
183183

184184
# Load all tensors from the group, tracking which output shard each belongs to
185185
tensor_to_shard: dict[str, str] = {}
@@ -308,4 +308,4 @@ def _process_tensors_microscale(
308308
if tensor_to_shard is not None:
309309
tensor_to_shard[key] = original_shard
310310

311-
return tensors, tensor_to_shard
311+
return tensors, tensor_to_shard

0 commit comments

Comments
 (0)