Skip to content

Commit 28c8bbf

Browse files
committed
remove post calib hook
Signed-off-by: Kinjal Patel <[email protected]>
1 parent 15ffb87 commit 28c8bbf

File tree

2 files changed

+0
-10
lines changed

2 files changed

+0
-10
lines changed

modelopt/torch/quantization/mode.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -208,8 +208,6 @@ def wrapped_calib_func(
208208
forward_loop and the relevant kwargs and are independent of the ModelOpt framework.
209209
So lets wrap them to be compatible with the ModelOpt convert entrypoint.
210210
"""
211-
from .plugins.custom import register_custom_post_calibration_plugins
212-
213211
kwargs = config.model_dump()
214212
method = kwargs.pop("method")
215213
if method is not None and "awq" in method:
@@ -220,7 +218,6 @@ def wrapped_calib_func(
220218
# Call the function with forward_loop as a separate argument
221219
func(model, forward_loop=forward_loop, **kwargs)
222220

223-
register_custom_post_calibration_plugins(model)
224221
# Lets get the latest metadata for the quantizer states
225222
metadata = {}
226223
update_quantize_metadata(model, config, metadata)

modelopt/torch/quantization/plugins/custom.py

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,6 @@
3030

3131
CUSTOM_MODEL_PLUGINS = set()
3232
CUSTOM_POST_CONVERSION_PLUGINS = set()
33-
CUSTOM_POST_CALIBRATION_PLUGINS = set()
3433

3534

3635
# TODO: This is a temporary solution
@@ -47,12 +46,6 @@ def register_custom_post_conversion_plugins(model):
4746
callback(model)
4847

4948

50-
def register_custom_post_calibration_plugins(model):
51-
"""Registers custom modules as QUANT_MODULE after calibration."""
52-
for callback in CUSTOM_POST_CALIBRATION_PLUGINS:
53-
callback(model)
54-
55-
5649
class _QuantFunctionalMixin(QuantModule):
5750
"""Mixin class for quantized functionals.
5851

0 commit comments

Comments
 (0)