4242 QuantizationSpec ,
4343 Quantizer ,
4444)
45+ from torchao .quantization .pt2e .quantizer .quantizer import Q_ANNOTATION_KEY
4546
4647
4748act_qspec_asym8s = QuantizationSpec (
@@ -127,7 +128,7 @@ def annotate(self, model: torch.fx.GraphModule) -> torch.fx.GraphModule:
127128
128129 for output , * custom_spec in anchors .output :
129130 # pyre-ignore[16]: no attribute
130- output .meta ["quantization_annotation" ] = QuantizationAnnotation (
131+ output .meta [Q_ANNOTATION_KEY ] = QuantizationAnnotation (
131132 # pyre-ignore[6]: incompatible parameter type
132133 output_qspec = (custom_spec [0 ] if custom_spec else output_act_qspec ),
133134 _annotated = True ,
@@ -143,7 +144,7 @@ def annotate_inputs(
143144 for node , idx , * custom_spec in inputs :
144145 # pyre-ignore[16]: no attribute
145146 annotation = node .meta .get (
146- "quantization_annotation" ,
147+ Q_ANNOTATION_KEY ,
147148 QuantizationAnnotation (_annotated = True ),
148149 )
149150 arg = (
@@ -157,21 +158,21 @@ def annotate_inputs(
157158 custom_spec [0 ] if custom_spec else spec
158159 )
159160 # pyre-ignore[16]: no attribute
160- node .meta ["quantization_annotation" ] = annotation
161+ node .meta [Q_ANNOTATION_KEY ] = annotation
161162
162163 def annotate_weights_or_biases (
163164 weights_or_biases : List [Tuple [fx .Node , int ]],
164165 spec : Optional [QuantizationSpec ],
165166 ) -> None :
166167 for node , idx , * custom_spec in weights_or_biases :
167168 annotation = node .meta .get (
168- "quantization_annotation" ,
169+ Q_ANNOTATION_KEY ,
169170 QuantizationAnnotation (_annotated = True ),
170171 )
171172 annotation .input_qspec_map [node .args [idx ]] = (
172173 custom_spec [0 ] if custom_spec else spec
173174 )
174- node .meta ["quantization_annotation" ] = annotation
175+ node .meta [Q_ANNOTATION_KEY ] = annotation
175176
176177 # pyre-ignore[6]: incompatible parameter type
177178 annotate_inputs (anchors .inputs , input_act_qspec )
0 commit comments