Skip to content

Commit b823629

Browse files
committed
fixes based on review comments
1 parent 84b2f0b commit b823629

File tree

4 files changed

+7
-7
lines changed

4 files changed

+7
-7
lines changed

model_compression_toolkit/core/common/mixed_precision/mixed_precision_search_manager.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -189,7 +189,6 @@ def ensure_maxbit_minimal_metric(node_candidates_metrics, max_ind):
189189
metrics[max_ind] = max_val
190190
return metrics
191191

192-
position = int(self.progress_info_controller is not None)
193192
if self.progress_info_controller is not None:
194193
self.progress_info_controller.set_description('Research Mixed Precision')
195194

model_compression_toolkit/core/common/progress_config/progress_info_controller.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ def __new__(cls, *args, **kwargs):
6464

6565
def __post_init__(self):
6666
"""Create progress bar after initialization."""
67-
# Initial single bar mode (position=0, top of screen)
67+
# Initial single bar mode
6868
self.pbar = tqdm(
6969
total=self.total_step,
7070
desc=self.description,
@@ -112,11 +112,9 @@ def close(self):
112112
self.pbar = None
113113

114114

115-
def research_progress_total(
116-
core_config: 'CoreConfig',
117-
target_resource_utilization: 'ResourceUtilization' = None,
118-
gptq_config: 'GradientPTQConfig' = None,
119-
) -> int:
115+
def research_progress_total(core_config: 'CoreConfig',
116+
target_resource_utilization: 'ResourceUtilization' = None,
117+
gptq_config: 'GradientPTQConfig' = None) -> int:
120118
"""
121119
Check whether specific processing will be executed based on input arguments
122120
and calculate the total number of processing steps.

model_compression_toolkit/core/common/quantization/debug_config.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ class DebugConfig:
3030
network_editor (List[EditRule]): A list of rules and actions to edit the network for quantization.
3131
simulate_scheduler (bool): Simulate scheduler behavior to compute operators' order and cuts.
3232
bypass (bool): A flag to enable MCT bypass, which skips MCT runner and returns the input model unchanged.
33+
progress_info_callback (Callable): A user-defined callback function for retrieving progress information.
3334
"""
3435

3536
analyze_similarity: bool = False

model_compression_toolkit/gptq/runner.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,7 @@ def _apply_gptq(gptq_config: GradientPTQConfig,
5858
fw_info: Information needed for quantization about the specific framework (e.g., kernel channels indices, groups of layers by how they should be quantized, etc.).
5959
fw_impl: Framework implementation per framework
6060
hessian_info_service: HessianInfoService to fetch information based on the hessian approximation for the float model.
61+
progress_info_controller: ProgressInfoController to display and manage overall progress information.
6162
Returns:
6263
6364
"""
@@ -100,6 +101,7 @@ def gptq_runner(tg: Graph,
100101
fw_impl: FrameworkImplementation object with a specific framework methods implementation.
101102
tb_w: A TensorBoardWriter object initialized with the logger dir path if it was set, or None otherwise.
102103
hessian_info_service: HessianScoresService to fetch approximations of the hessian scores for the float model.
104+
progress_info_controller: ProgressInfoController to display and manage overall progress information.
103105
104106
Returns:
105107
A graph after model weights GPTQ fine-tuning.

0 commit comments

Comments
 (0)