@@ -72,12 +72,33 @@ def quantize( # noqa C901
7272 if qmode == "int8" :
7373 # Add quantization mode options here: group size, bit width, etc.
7474 return WeightOnlyInt8QuantHandler (model ).quantized_model ()
75- elif qmode .startswith ("torchao:" ):
75+ elif qmode .startswith ("torchao:fpa" ):
76+ pattern = r"torchao:fpa(\d+)w"
77+ matches = re .findall (pattern , qmode )
78+ assert len (matches ) == 1 , f"Expected 1 match for pattern but got { len (matches )} "
79+ bitwidth = int (matches [0 ][0 ])
80+ _load_torchao_aten_lib (
81+ libname = "libtorchao_ops_mps_linear_fp_act_xbit_weight_aten"
82+ )
83+ from torchao .experimental .quant_api import UIntxWeightOnlyLinearQuantizer
84+
85+ with torch .no_grad ():
86+ model = UIntxWeightOnlyLinearQuantizer (
87+ device = "mps" ,
88+ precision = torch .float32 ,
89+ groupsize = group_size ,
90+ bitwidth = bitwidth
91+ ).quantize (model ).to ("cpu" )
92+
93+ if verbose :
94+ print ("quantized model:" , model )
95+ return model
96+ elif qmode .startswith ("torchao:8da" ):
7697 pattern = r"torchao:8da(\d+)w"
7798 matches = re .findall (pattern , qmode )
7899 assert len (matches ) == 1 , f"Expected 1 match for pattern but got { len (matches )} "
79100 bitwidth = int (matches [0 ][0 ])
80- _load_torchao_ops_aten ( )
101+ _load_torchao_aten_lib ( libname = "libtorchao_ops_aten" )
81102 from torchao .experimental .quant_api import Int8DynActIntxWeightLinearQuantizer
82103
83104 with torch .no_grad ():
@@ -729,7 +750,7 @@ def get_quant_embedding_transform(args):
729750 bitwidth , group_size = args .embedding_quantize .split (":" )[1 ].split ("," )
730751 group_size = int (group_size )
731752 bitwidth = int (bitwidth )
732- _load_torchao_ops_aten ( )
753+ _load_torchao_aten_lib ( libname = "libtorchao_ops_aten" )
733754 from torchao .experimental .quant_api import IntxWeightEmbeddingQuantizer
734755
735756 def _torchao_embedding_quantizer (model ):
@@ -785,15 +806,15 @@ def get_quant_weight_transform(args, dtype_override, verbose):
785806 )
786807
787808
788- def _load_torchao_ops_aten ( ):
809+ def _load_torchao_aten_lib ( libname ):
789810 import glob
790811 import os
791812
792813 libs = glob .glob (
793814 os .path .abspath (
794815 os .path .join (
795816 os .environ .get ("CMAKE_INSTALL_PREFIX" , "" ),
796- "lib/libtorchao_ops_aten .*" ,
817+ f "lib/{ libname } .*" ,
797818 )
798819 )
799820 )
0 commit comments