From bfb5143cfa1ac76d7823de24229168443368f9db Mon Sep 17 00:00:00 2001 From: Guang Yang Date: Mon, 28 Oct 2024 17:14:19 -0700 Subject: [PATCH] Albert is ExecuTorch compatible --- tests/models/albert/test_modeling_albert.py | 46 ++++++++++++++++++++- 1 file changed, 45 insertions(+), 1 deletion(-) diff --git a/tests/models/albert/test_modeling_albert.py b/tests/models/albert/test_modeling_albert.py index d1e5631b342d..970f1dd8555e 100644 --- a/tests/models/albert/test_modeling_albert.py +++ b/tests/models/albert/test_modeling_albert.py @@ -16,7 +16,9 @@ import unittest -from transformers import AlbertConfig, is_torch_available +from packaging import version + +from transformers import AlbertConfig, AutoTokenizer, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device @@ -342,3 +344,45 @@ def test_inference_no_head_absolute_embedding(self): ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4)) + + @slow + def test_export(self): + if version.parse(torch.__version__) < version.parse("2.4.0"): + self.skipTest(reason="This test requires torch >= 2.4 to run.") + + distilbert_model = "albert/albert-base-v2" + device = "cpu" + attn_implementation = "sdpa" + max_length = 64 + + tokenizer = AutoTokenizer.from_pretrained(distilbert_model) + inputs = tokenizer( + f"Paris is the {tokenizer.mask_token} of France.", + return_tensors="pt", + padding="max_length", + max_length=max_length, + ) + + model = AlbertForMaskedLM.from_pretrained( + distilbert_model, + device_map=device, + attn_implementation=attn_implementation, + ) + + logits = model(**inputs).logits + eg_predicted_mask = tokenizer.decode(logits[0, 4].topk(5).indices) + self.assertEqual( + eg_predicted_mask.split(), + ["capital", "capitol", "comune", "arrondissement", "bastille"], + ) + + exported_program = torch.export.export( + model, + args=(inputs["input_ids"],), + kwargs={"attention_mask": inputs["attention_mask"]}, + strict=True, + ) + + result = exported_program.module().forward(inputs["input_ids"], inputs["attention_mask"]) + ep_predicted_mask = tokenizer.decode(result.logits[0, 4].topk(5).indices) + self.assertEqual(eg_predicted_mask, ep_predicted_mask)