|
| 1 | +import argparse |
| 2 | +import subprocess |
| 3 | +import tempfile |
| 4 | +from pathlib import Path |
| 5 | + |
| 6 | +import torch |
| 7 | +from datasets import load_dataset |
| 8 | + |
| 9 | +from optimum.executorch import ( |
| 10 | + ExecuTorchModelForCausalLM, |
| 11 | + ExecuTorchModelForImageClassification, |
| 12 | + ExecuTorchModelForMaskedLM, |
| 13 | + ExecuTorchModelForSeq2SeqLM, |
| 14 | + ExecuTorchModelForSpeechSeq2Seq, |
| 15 | +) |
| 16 | +from transformers import ( |
| 17 | + AutoConfig, |
| 18 | + AutoModelForImageClassification, |
| 19 | + AutoProcessor, |
| 20 | + AutoTokenizer, |
| 21 | +) |
| 22 | + |
| 23 | + |
| 24 | +def cli_export(command, model_dir): |
| 25 | + p = Path(model_dir) |
| 26 | + if p.exists(): |
| 27 | + if not p.is_dir(): |
| 28 | + raise Exception(f"Path {model_dir} already exists and is not a directory.") |
| 29 | + if any(p.iterdir()): |
| 30 | + raise Exception( |
| 31 | + f"Existing directory {model_dir} is non-empty. Please remove it first." |
| 32 | + ) |
| 33 | + try: |
| 34 | + subprocess.run(command, check=True) |
| 35 | + print("Export completed successfully.") |
| 36 | + except subprocess.CalledProcessError as e: |
| 37 | + print(f"Export failed with error: {e}") |
| 38 | + |
| 39 | + |
| 40 | +def test_text_generation(model_id, model_dir, recipe, *, quantize=True, run_only=False): |
| 41 | + command = [ |
| 42 | + "optimum-cli", |
| 43 | + "export", |
| 44 | + "executorch", |
| 45 | + "--model", |
| 46 | + model_id, |
| 47 | + "--task", |
| 48 | + "text-generation", |
| 49 | + "--recipe", |
| 50 | + recipe, |
| 51 | + "--output_dir", |
| 52 | + model_dir, |
| 53 | + ] |
| 54 | + if "coreml" in recipe: |
| 55 | + command += [ |
| 56 | + "--disable_dynamic_shapes", |
| 57 | + ] |
| 58 | + if quantize: |
| 59 | + command += [ |
| 60 | + "--qlinear", |
| 61 | + "4w", |
| 62 | + "--qembedding", |
| 63 | + "8w", |
| 64 | + ] |
| 65 | + else: |
| 66 | + assert not quantize, "Quantization is not supported for non-CoreML recipes yet" |
| 67 | + |
| 68 | + if not run_only: |
| 69 | + cli_export(command, model_dir) |
| 70 | + |
| 71 | + tokenizer = AutoTokenizer.from_pretrained(model_id) |
| 72 | + tokenizer.save_pretrained(model_dir) |
| 73 | + model = ExecuTorchModelForCausalLM.from_pretrained(model_dir) |
| 74 | + generated_text = model.text_generation( |
| 75 | + tokenizer=tokenizer, |
| 76 | + prompt="Simply put, the theory of relativity states that", |
| 77 | + max_seq_len=64, |
| 78 | + ) |
| 79 | + print(f"\nGenerated text:\n\t{generated_text}") |
| 80 | + |
| 81 | + |
| 82 | +def test_fill_mask(model_id, model_dir, recipe, *, quantize=True, run_only=False): |
| 83 | + command = [ |
| 84 | + "optimum-cli", |
| 85 | + "export", |
| 86 | + "executorch", |
| 87 | + "--model", |
| 88 | + model_id, |
| 89 | + "--task", |
| 90 | + "fill-mask", |
| 91 | + "--recipe", |
| 92 | + recipe, |
| 93 | + "--output_dir", |
| 94 | + model_dir, |
| 95 | + ] |
| 96 | + if "coreml" in recipe and quantize: |
| 97 | + command += [ |
| 98 | + "--qlinear", |
| 99 | + "4w", |
| 100 | + "--qembedding", |
| 101 | + "8w", |
| 102 | + ] |
| 103 | + else: |
| 104 | + assert not quantize, "Quantization is not supported for non-CoreML recipes yet" |
| 105 | + |
| 106 | + if not run_only: |
| 107 | + cli_export(command, model_dir) |
| 108 | + |
| 109 | + tokenizer = AutoTokenizer.from_pretrained(model_id) |
| 110 | + model = ExecuTorchModelForMaskedLM.from_pretrained(model_dir) |
| 111 | + input_text = f"Paris is the {tokenizer.mask_token} of France." |
| 112 | + inputs = tokenizer( |
| 113 | + input_text, |
| 114 | + return_tensors="pt", |
| 115 | + padding="max_length", |
| 116 | + max_length=10, |
| 117 | + ) |
| 118 | + |
| 119 | + # Test inference using ExecuTorch model |
| 120 | + exported_outputs = model.forward(inputs["input_ids"], inputs["attention_mask"]) |
| 121 | + predicted_masks = tokenizer.decode(exported_outputs[0, 4].topk(5).indices) |
| 122 | + print(f"\nInput text:\n\t{input_text}\nPredicted masks:\n\t{predicted_masks}") |
| 123 | + |
| 124 | + |
| 125 | +def test_t5(model_id, model_dir, recipe, *, quantize=False, run_only=False): |
| 126 | + assert not quantize, "Quantization is not supported for T5 model yet" |
| 127 | + |
| 128 | + assert model_id == "google-t5/t5-small" |
| 129 | + command = [ |
| 130 | + "optimum-cli", |
| 131 | + "export", |
| 132 | + "executorch", |
| 133 | + "--model", |
| 134 | + model_id, |
| 135 | + "--task", |
| 136 | + "text2text-generation", |
| 137 | + "--recipe", |
| 138 | + recipe, |
| 139 | + "--output_dir", |
| 140 | + model_dir, |
| 141 | + ] |
| 142 | + if not run_only: |
| 143 | + cli_export(command, model_dir) |
| 144 | + |
| 145 | + tokenizer = AutoTokenizer.from_pretrained(model_id) |
| 146 | + model = ExecuTorchModelForSeq2SeqLM.from_pretrained(model_dir) |
| 147 | + article = ( |
| 148 | + " New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County, New York. A" |
| 149 | + " year later, she got married again in Westchester County, but to a different man and without divorcing" |
| 150 | + " her first husband. Only 18 days after that marriage, she got hitched yet again. Then, Barrientos" |
| 151 | + ' declared "I do" five more times, sometimes only within two weeks of each other. In 2010, she married' |
| 152 | + " once more, this time in the Bronx. In an application for a marriage license, she stated it was her" |
| 153 | + ' "first and only" marriage. Barrientos, now 39, is facing two criminal counts of "offering a false' |
| 154 | + ' instrument for filing in the first degree," referring to her false statements on the 2010 marriage' |
| 155 | + " license application, according to court documents. Prosecutors said the marriages were part of an" |
| 156 | + " immigration scam. On Friday, she pleaded not guilty at State Supreme Court in the Bronx, according to" |
| 157 | + " her attorney, Christopher Wright, who declined to comment further. After leaving court, Barrientos was" |
| 158 | + " arrested and charged with theft of service and criminal trespass for allegedly sneaking into the New" |
| 159 | + " York subway through an emergency exit, said Detective Annette Markowski, a police spokeswoman. In total," |
| 160 | + " Barrientos has been married 10 times, with nine of her marriages occurring between 1999 and 2002. All" |
| 161 | + " occurred either in Westchester County, Long Island, New Jersey or the Bronx. She is believed to still be" |
| 162 | + " married to four men, and at one time, she was married to eight men at once, prosecutors say. Prosecutors" |
| 163 | + " said the immigration scam involved some of her husbands, who filed for permanent residence status" |
| 164 | + " shortly after the marriages. Any divorces happened only after such filings were approved. It was" |
| 165 | + " unclear whether any of the men will be prosecuted. The case was referred to the Bronx District" |
| 166 | + " Attorney's Office by Immigration and Customs Enforcement and the Department of Homeland Security's" |
| 167 | + ' Investigation Division. Seven of the men are from so-called "red-flagged" countries, including Egypt,' |
| 168 | + " Turkey, Georgia, Pakistan and Mali. Her eighth husband, Rashid Rajput, was deported in 2006 to his" |
| 169 | + " native Pakistan after an investigation by the Joint Terrorism Task Force." |
| 170 | + ) |
| 171 | + article = "summarize: " + article.strip() |
| 172 | + |
| 173 | + tokenizer = AutoTokenizer.from_pretrained(model_id) |
| 174 | + generated_text = model.text_generation( |
| 175 | + tokenizer=tokenizer, |
| 176 | + prompt=article, |
| 177 | + ) |
| 178 | + expected_text = 'a year later, she got married again in westchester county, new york. she was married to a different man, but only 18 days after that marriage. she is facing two criminal counts of "offering a false instrument"' |
| 179 | + print(f"Generated text:\n\t{generated_text}") |
| 180 | + print(f"Expected text:\n\t{expected_text}") |
| 181 | + |
| 182 | + |
| 183 | +def test_whisper(model_id, model_dir, recipe, *, quantize=False, run_only=False): |
| 184 | + assert not quantize, "Quantization is not supported for whisper model yet" |
| 185 | + |
| 186 | + assert model_id == "openai/whisper-tiny" |
| 187 | + command = [ |
| 188 | + "optimum-cli", |
| 189 | + "export", |
| 190 | + "executorch", |
| 191 | + "--model", |
| 192 | + model_id, |
| 193 | + "--task", |
| 194 | + "automatic-speech-recognition", |
| 195 | + "--recipe", |
| 196 | + recipe, |
| 197 | + "--output_dir", |
| 198 | + model_dir, |
| 199 | + ] |
| 200 | + if not run_only: |
| 201 | + cli_export(command, model_dir) |
| 202 | + |
| 203 | + tokenizer = AutoTokenizer.from_pretrained(model_id) |
| 204 | + model = ExecuTorchModelForSpeechSeq2Seq.from_pretrained(model_dir) |
| 205 | + processor = AutoProcessor.from_pretrained(model_id) |
| 206 | + dataset = load_dataset( |
| 207 | + "distil-whisper/librispeech_long", "clean", split="validation" |
| 208 | + ) |
| 209 | + sample = dataset[0]["audio"] |
| 210 | + |
| 211 | + input_features = processor( |
| 212 | + sample["array"], |
| 213 | + return_tensors="pt", |
| 214 | + truncation=False, |
| 215 | + sampling_rate=sample["sampling_rate"], |
| 216 | + ).input_features |
| 217 | + |
| 218 | + # Current implementation of the transcibe method accepts up to 30 seconds of audio, therefore I trim the audio here. |
| 219 | + input_features_trimmed = input_features[:, :, :3000].contiguous() |
| 220 | + |
| 221 | + generated_transcription = model.transcribe(tokenizer, input_features_trimmed) |
| 222 | + expected_text = " Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel. Nor is Mr. Quilter's manner less interesting than his matter. He tells us that at this festive season of the year, with Christmas and roast beef looming before us, similarly drawn from eating and its results occur most readily to the mind. He has grave doubts whether Sir Frederick Latins work is really Greek after all, and can discover that." |
| 223 | + print(f"Generated transcription: {generated_transcription}") |
| 224 | + print(f"Expected transcription: {expected_text}") |
| 225 | + |
| 226 | + |
| 227 | +def test_vit(model_id, model_dir, recipe, *, quantize=False, run_only=False): |
| 228 | + assert not quantize, "Quantization is not supported for ViT models yet." |
| 229 | + |
| 230 | + assert model_id == "google/vit-base-patch16-224" |
| 231 | + command = [ |
| 232 | + "optimum-cli", |
| 233 | + "export", |
| 234 | + "executorch", |
| 235 | + "--model", |
| 236 | + model_id, |
| 237 | + "--task", |
| 238 | + "image-classification", |
| 239 | + "--recipe", |
| 240 | + recipe, |
| 241 | + "--output_dir", |
| 242 | + model_dir, |
| 243 | + ] |
| 244 | + if not run_only: |
| 245 | + cli_export(command, model_dir) |
| 246 | + |
| 247 | + config = AutoConfig.from_pretrained(model_id) |
| 248 | + batch_size = 1 |
| 249 | + num_channels = config.num_channels |
| 250 | + height = config.image_size |
| 251 | + width = config.image_size |
| 252 | + pixel_values = torch.rand(batch_size, num_channels, height, width) |
| 253 | + |
| 254 | + # Test fetching and lowering the model to ExecuTorch |
| 255 | + et_model = ExecuTorchModelForImageClassification.from_pretrained(model_id=model_dir) |
| 256 | + eager_model = ( |
| 257 | + AutoModelForImageClassification.from_pretrained(model_id).eval().to("cpu") |
| 258 | + ) |
| 259 | + with torch.no_grad(): |
| 260 | + eager_output = eager_model(pixel_values) |
| 261 | + et_output = et_model.forward(pixel_values) |
| 262 | + |
| 263 | + assert torch.allclose( |
| 264 | + eager_output.logits, et_output, atol=1e-02, rtol=1e-02 |
| 265 | + ), "CoreML output does not match eager" |
| 266 | + |
| 267 | + |
| 268 | +if __name__ == "__main__": |
| 269 | + parser = argparse.ArgumentParser() |
| 270 | + parser.add_argument("--model", type=str, required=True) |
| 271 | + parser.add_argument("--recipe", type=str, required=True) |
| 272 | + parser.add_argument("--quantize", action="store_true", help="Enable quantization") |
| 273 | + args = parser.parse_args() |
| 274 | + |
| 275 | + model_to_model_id_and_test_function = { |
| 276 | + "smollm": ("HuggingFaceTB/SmolLM2-135M", test_text_generation), # works |
| 277 | + "qwen3": ("Qwen/Qwen3-0.6B", test_text_generation), # works |
| 278 | + "olmo": ("allenai/OLMo-1B-hf", test_text_generation), # works |
| 279 | + "gemma3": ("unsloth/gemma-3-1b-it", test_text_generation), # does not export |
| 280 | + "phi4": ( |
| 281 | + "microsoft/Phi-4-mini-instruct", |
| 282 | + test_text_generation, |
| 283 | + ), # fails to lower |
| 284 | + "llama3": ("NousResearch/Llama-3.2-1B", test_text_generation), # works |
| 285 | + "bert": ("google-bert/bert-base-uncased", test_fill_mask), # works |
| 286 | + "roberta": ("FacebookAI/xlmcl-roberta-base", test_fill_mask), # works |
| 287 | + "distilbert": ("distilbert/distilbert-base-uncased", test_fill_mask), # works |
| 288 | + "whisper": ("openai/whisper-tiny", test_whisper), # works |
| 289 | + "t5": ("google-t5/t5-small", test_t5), # CoreML runime failure |
| 290 | + "vit": ("google/vit-base-patch16-224", test_vit), # works |
| 291 | + } |
| 292 | + if args.model not in model_to_model_id_and_test_function: |
| 293 | + raise ValueError( |
| 294 | + f"Unknown model name: {args.model}. Available models: {model_to_model_id_and_test_function.keys()}" |
| 295 | + ) |
| 296 | + |
| 297 | + with tempfile.TemporaryDirectory() as tmp_dir: |
| 298 | + model_id, test_fn = model_to_model_id_and_test_function[args.model] |
| 299 | + test_fn( |
| 300 | + model_id=model_id, |
| 301 | + model_dir=tmp_dir, |
| 302 | + recipe=args.recipe, |
| 303 | + quantize=args.quantize, |
| 304 | + ) |
0 commit comments