File tree Expand file tree Collapse file tree 3 files changed +3
-3
lines changed
examples/models/llama/source_transformation Expand file tree Collapse file tree 3 files changed +3
-3
lines changed Original file line number Diff line number Diff line change 1313
1414import torch
1515from torch import nn
16- from torchao .quantization .GPTQ import Int8DynActInt4WeightLinear
16+ from torchao .quantization .linear_quant_modules import Int8DynActInt4WeightLinear
1717from torchao .quantization .quant_api import _replace_with_custom_fn_if_matches_filter
1818
1919
Original file line number Diff line number Diff line change 1313import torch
1414from torch import nn
1515
16- from torchao .quantization .GPTQ import _check_linear_int4_k , Int8DynActInt4WeightLinear
16+ from torchao .quantization .linear_quant_modules import _check_linear_int4_k , Int8DynActInt4WeightLinear
1717from torchao .quantization .quant_api import _replace_with_custom_fn_if_matches_filter
1818
1919from .quantize import Int8DynActInt8WeightLinear , QuantizedGroupEmbedding
Original file line number Diff line number Diff line change @@ -847,7 +847,7 @@ def set_8da4w_computation_dtype(
847847 module : nn .Module , computation_dtype : torch .dtype
848848) -> nn .Module :
849849
850- from torchao .quantization .GPTQ import Int8DynActInt4WeightLinear
850+ from torchao .quantization .linear_quant_modules import Int8DynActInt4WeightLinear
851851
852852 def _set_8da4w_computation_dtype (module : nn .Module , dtype : torch .dtype ) -> None :
853853 """
You can’t perform that action at this time.
0 commit comments