@@ -96,9 +96,7 @@ def model_free_ptq(
9696 )
9797
9898 # 1. validate quantizable tensors — fail fast before long-running quantization
99- validate_jobs = _make_validate_jobs (
100- jobs , model_files , scheme , ignore , device , converter
101- )
99+ validate_jobs = _make_validate_jobs (jobs )
102100 exec_jobs (validate_jobs , max_workers , desc = "Validating" )
103101
104102 # 2-5. quantize and compress weights
@@ -213,14 +211,7 @@ def _build_microscale_jobs(
213211 return jobs
214212
215213
216- def _make_validate_jobs (
217- jobs : list [tuple ],
218- model_files : dict [str , str ],
219- scheme : QuantizationScheme ,
220- ignore : Iterable [str ],
221- device : torch .device ,
222- converter : Converter | None ,
223- ) -> list [tuple ]:
214+ def _make_validate_jobs (jobs : list [tuple ]) -> list [tuple ]:
224215 """
225216 Build validate_file jobs corresponding to the quantization jobs.
226217 For group jobs, creates one validate_file call per file in the group.
@@ -231,13 +222,10 @@ def _make_validate_jobs(
231222 if fn is process_file_group_microscale_scheme :
232223 # job = (fn, file_paths, save_paths, scheme, ignore, device, converter)
233224 file_paths , save_paths = job [1 ], job [2 ]
225+ other_args = job [3 :]
234226 for fp , sp in zip (file_paths , save_paths ):
235- validate_jobs .append (
236- (validate_file , fp , sp , scheme , ignore , device , converter )
237- )
227+ validate_jobs .append ((validate_file , fp , sp , * other_args ))
238228 else :
239229 # job = (fn, file_path, save_path, scheme, ignore, device, converter)
240- validate_jobs .append (
241- (validate_file , job [1 ], job [2 ], scheme , ignore , device , converter )
242- )
230+ validate_jobs .append ((validate_file , * job [1 :]))
243231 return validate_jobs
0 commit comments