File tree Expand file tree Collapse file tree 2 files changed +10
-2
lines changed Expand file tree Collapse file tree 2 files changed +10
-2
lines changed Original file line number Diff line number Diff line change @@ -1335,7 +1335,7 @@ def _quantize_ovbasemodel(
13351335 default_config = OVWeightQuantizationConfig (bits = 8 , sym = True )
13361336 else :
13371337 default_config = quantization_config
1338- else :
1338+ elif not isinstance ( quantization_config , OVPipelineQuantizationConfig ) :
13391339 #
13401340 # Hybrid/Full/Mixed quantization
13411341 #
@@ -1397,7 +1397,7 @@ def _quantize_ovbasemodel(
13971397 raise NotImplementedError ("Mixed precision quantization isn't supported for diffusers." )
13981398
13991399 default_config = quantization_config
1400- elif not isinstance ( quantization_config , OVPipelineQuantizationConfig ) :
1400+ else :
14011401 raise ValueError (f"Unsupported type of quantization config: { type (quantization_config )} " )
14021402
14031403 pipeline_quantization_config = (
Original file line number Diff line number Diff line change @@ -1487,6 +1487,14 @@ class OVPipelineQuantizationTest(unittest.TestCase):
14871487 maxDiff = None
14881488
14891489 PIPELINE_QUANTIZATION_SCOPE = [
1490+ (
1491+ OVModelForCausalLM ,
1492+ "gpt2" ,
1493+ False ,
1494+ dict (quantization_configs = {"model" : dict (bits = 8 , weight_only = True )}),
1495+ {"model" : 0 },
1496+ {"model" : {"int8" : 44 }},
1497+ ),
14901498 (
14911499 OVModelForCausalLM ,
14921500 "llama" ,
You can’t perform that action at this time.
0 commit comments