We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent fecbba3 commit aff0d9eCopy full SHA for aff0d9e
test/3x/torch/quantization/weight_only/test_transformers.py
@@ -212,6 +212,7 @@ def test_use_layer_wise(self):
212
woq_output_download = woq_model(dummy_input)[0]
213
assert torch.equal(woq_output_download, woq_output)
214
215
+ @pytest.mark.skipif(Version(transformers.__version__) > Version("4.52.0"), reason="modeling_opt.py changed.")
216
def test_loading_autoawq_model(self):
217
user_model = AutoModelForCausalLM.from_pretrained(self.autoawq_model)
218
tokenizer = AutoTokenizer.from_pretrained(self.autoawq_model)
0 commit comments