Skip to content

Commit 7750116

Browse files
authored
Add CoreML CI jobs for optimum-executorch models (#12870)
This PR: * Bumps the optimum-executorch pin in our CI * Adds a new script .ci/scripts/test_huggingface_optimum_model.py to test optimum-executorch models * Adds a new trunk job to run CoreML models Currently the following models are added: * qwen3 (quantized) * smollm (quantized) * llama3 (quantized) * olmo (quantized) The following models can run for me locally with the script .ci/scripts/test_huggingface_optimum_model.py, but are not running in CI (requires investigation): * bert (quantized) * roberta (quantized) * distilbert (quantized) * vit This CI job complements the other CI script .ci/scripts/test_model.sh that is based on the ExecuTorch examples repo.
1 parent cf2f170 commit 7750116

File tree

3 files changed

+360
-1
lines changed

3 files changed

+360
-1
lines changed
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
eea657ddbdeb1118943a92fb73c289985c3ee1ba
1+
36e3dd54effb3f6d13d792029609292fdd5502bb
Lines changed: 304 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,304 @@
1+
import argparse
2+
import subprocess
3+
import tempfile
4+
from pathlib import Path
5+
6+
import torch
7+
from datasets import load_dataset
8+
9+
from optimum.executorch import (
10+
ExecuTorchModelForCausalLM,
11+
ExecuTorchModelForImageClassification,
12+
ExecuTorchModelForMaskedLM,
13+
ExecuTorchModelForSeq2SeqLM,
14+
ExecuTorchModelForSpeechSeq2Seq,
15+
)
16+
from transformers import (
17+
AutoConfig,
18+
AutoModelForImageClassification,
19+
AutoProcessor,
20+
AutoTokenizer,
21+
)
22+
23+
24+
def cli_export(command, model_dir):
25+
p = Path(model_dir)
26+
if p.exists():
27+
if not p.is_dir():
28+
raise Exception(f"Path {model_dir} already exists and is not a directory.")
29+
if any(p.iterdir()):
30+
raise Exception(
31+
f"Existing directory {model_dir} is non-empty. Please remove it first."
32+
)
33+
try:
34+
subprocess.run(command, check=True)
35+
print("Export completed successfully.")
36+
except subprocess.CalledProcessError as e:
37+
print(f"Export failed with error: {e}")
38+
39+
40+
def test_text_generation(model_id, model_dir, recipe, *, quantize=True, run_only=False):
41+
command = [
42+
"optimum-cli",
43+
"export",
44+
"executorch",
45+
"--model",
46+
model_id,
47+
"--task",
48+
"text-generation",
49+
"--recipe",
50+
recipe,
51+
"--output_dir",
52+
model_dir,
53+
]
54+
if "coreml" in recipe:
55+
command += [
56+
"--disable_dynamic_shapes",
57+
]
58+
if quantize:
59+
command += [
60+
"--qlinear",
61+
"4w",
62+
"--qembedding",
63+
"8w",
64+
]
65+
else:
66+
assert not quantize, "Quantization is not supported for non-CoreML recipes yet"
67+
68+
if not run_only:
69+
cli_export(command, model_dir)
70+
71+
tokenizer = AutoTokenizer.from_pretrained(model_id)
72+
tokenizer.save_pretrained(model_dir)
73+
model = ExecuTorchModelForCausalLM.from_pretrained(model_dir)
74+
generated_text = model.text_generation(
75+
tokenizer=tokenizer,
76+
prompt="Simply put, the theory of relativity states that",
77+
max_seq_len=64,
78+
)
79+
print(f"\nGenerated text:\n\t{generated_text}")
80+
81+
82+
def test_fill_mask(model_id, model_dir, recipe, *, quantize=True, run_only=False):
83+
command = [
84+
"optimum-cli",
85+
"export",
86+
"executorch",
87+
"--model",
88+
model_id,
89+
"--task",
90+
"fill-mask",
91+
"--recipe",
92+
recipe,
93+
"--output_dir",
94+
model_dir,
95+
]
96+
if "coreml" in recipe and quantize:
97+
command += [
98+
"--qlinear",
99+
"4w",
100+
"--qembedding",
101+
"8w",
102+
]
103+
else:
104+
assert not quantize, "Quantization is not supported for non-CoreML recipes yet"
105+
106+
if not run_only:
107+
cli_export(command, model_dir)
108+
109+
tokenizer = AutoTokenizer.from_pretrained(model_id)
110+
model = ExecuTorchModelForMaskedLM.from_pretrained(model_dir)
111+
input_text = f"Paris is the {tokenizer.mask_token} of France."
112+
inputs = tokenizer(
113+
input_text,
114+
return_tensors="pt",
115+
padding="max_length",
116+
max_length=10,
117+
)
118+
119+
# Test inference using ExecuTorch model
120+
exported_outputs = model.forward(inputs["input_ids"], inputs["attention_mask"])
121+
predicted_masks = tokenizer.decode(exported_outputs[0, 4].topk(5).indices)
122+
print(f"\nInput text:\n\t{input_text}\nPredicted masks:\n\t{predicted_masks}")
123+
124+
125+
def test_t5(model_id, model_dir, recipe, *, quantize=False, run_only=False):
126+
assert not quantize, "Quantization is not supported for T5 model yet"
127+
128+
assert model_id == "google-t5/t5-small"
129+
command = [
130+
"optimum-cli",
131+
"export",
132+
"executorch",
133+
"--model",
134+
model_id,
135+
"--task",
136+
"text2text-generation",
137+
"--recipe",
138+
recipe,
139+
"--output_dir",
140+
model_dir,
141+
]
142+
if not run_only:
143+
cli_export(command, model_dir)
144+
145+
tokenizer = AutoTokenizer.from_pretrained(model_id)
146+
model = ExecuTorchModelForSeq2SeqLM.from_pretrained(model_dir)
147+
article = (
148+
" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County, New York. A"
149+
" year later, she got married again in Westchester County, but to a different man and without divorcing"
150+
" her first husband. Only 18 days after that marriage, she got hitched yet again. Then, Barrientos"
151+
' declared "I do" five more times, sometimes only within two weeks of each other. In 2010, she married'
152+
" once more, this time in the Bronx. In an application for a marriage license, she stated it was her"
153+
' "first and only" marriage. Barrientos, now 39, is facing two criminal counts of "offering a false'
154+
' instrument for filing in the first degree," referring to her false statements on the 2010 marriage'
155+
" license application, according to court documents. Prosecutors said the marriages were part of an"
156+
" immigration scam. On Friday, she pleaded not guilty at State Supreme Court in the Bronx, according to"
157+
" her attorney, Christopher Wright, who declined to comment further. After leaving court, Barrientos was"
158+
" arrested and charged with theft of service and criminal trespass for allegedly sneaking into the New"
159+
" York subway through an emergency exit, said Detective Annette Markowski, a police spokeswoman. In total,"
160+
" Barrientos has been married 10 times, with nine of her marriages occurring between 1999 and 2002. All"
161+
" occurred either in Westchester County, Long Island, New Jersey or the Bronx. She is believed to still be"
162+
" married to four men, and at one time, she was married to eight men at once, prosecutors say. Prosecutors"
163+
" said the immigration scam involved some of her husbands, who filed for permanent residence status"
164+
" shortly after the marriages. Any divorces happened only after such filings were approved. It was"
165+
" unclear whether any of the men will be prosecuted. The case was referred to the Bronx District"
166+
" Attorney's Office by Immigration and Customs Enforcement and the Department of Homeland Security's"
167+
' Investigation Division. Seven of the men are from so-called "red-flagged" countries, including Egypt,'
168+
" Turkey, Georgia, Pakistan and Mali. Her eighth husband, Rashid Rajput, was deported in 2006 to his"
169+
" native Pakistan after an investigation by the Joint Terrorism Task Force."
170+
)
171+
article = "summarize: " + article.strip()
172+
173+
tokenizer = AutoTokenizer.from_pretrained(model_id)
174+
generated_text = model.text_generation(
175+
tokenizer=tokenizer,
176+
prompt=article,
177+
)
178+
expected_text = 'a year later, she got married again in westchester county, new york. she was married to a different man, but only 18 days after that marriage. she is facing two criminal counts of "offering a false instrument"'
179+
print(f"Generated text:\n\t{generated_text}")
180+
print(f"Expected text:\n\t{expected_text}")
181+
182+
183+
def test_whisper(model_id, model_dir, recipe, *, quantize=False, run_only=False):
184+
assert not quantize, "Quantization is not supported for whisper model yet"
185+
186+
assert model_id == "openai/whisper-tiny"
187+
command = [
188+
"optimum-cli",
189+
"export",
190+
"executorch",
191+
"--model",
192+
model_id,
193+
"--task",
194+
"automatic-speech-recognition",
195+
"--recipe",
196+
recipe,
197+
"--output_dir",
198+
model_dir,
199+
]
200+
if not run_only:
201+
cli_export(command, model_dir)
202+
203+
tokenizer = AutoTokenizer.from_pretrained(model_id)
204+
model = ExecuTorchModelForSpeechSeq2Seq.from_pretrained(model_dir)
205+
processor = AutoProcessor.from_pretrained(model_id)
206+
dataset = load_dataset(
207+
"distil-whisper/librispeech_long", "clean", split="validation"
208+
)
209+
sample = dataset[0]["audio"]
210+
211+
input_features = processor(
212+
sample["array"],
213+
return_tensors="pt",
214+
truncation=False,
215+
sampling_rate=sample["sampling_rate"],
216+
).input_features
217+
218+
# Current implementation of the transcibe method accepts up to 30 seconds of audio, therefore I trim the audio here.
219+
input_features_trimmed = input_features[:, :, :3000].contiguous()
220+
221+
generated_transcription = model.transcribe(tokenizer, input_features_trimmed)
222+
expected_text = " Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel. Nor is Mr. Quilter's manner less interesting than his matter. He tells us that at this festive season of the year, with Christmas and roast beef looming before us, similarly drawn from eating and its results occur most readily to the mind. He has grave doubts whether Sir Frederick Latins work is really Greek after all, and can discover that."
223+
print(f"Generated transcription: {generated_transcription}")
224+
print(f"Expected transcription: {expected_text}")
225+
226+
227+
def test_vit(model_id, model_dir, recipe, *, quantize=False, run_only=False):
228+
assert not quantize, "Quantization is not supported for ViT models yet."
229+
230+
assert model_id == "google/vit-base-patch16-224"
231+
command = [
232+
"optimum-cli",
233+
"export",
234+
"executorch",
235+
"--model",
236+
model_id,
237+
"--task",
238+
"image-classification",
239+
"--recipe",
240+
recipe,
241+
"--output_dir",
242+
model_dir,
243+
]
244+
if not run_only:
245+
cli_export(command, model_dir)
246+
247+
config = AutoConfig.from_pretrained(model_id)
248+
batch_size = 1
249+
num_channels = config.num_channels
250+
height = config.image_size
251+
width = config.image_size
252+
pixel_values = torch.rand(batch_size, num_channels, height, width)
253+
254+
# Test fetching and lowering the model to ExecuTorch
255+
et_model = ExecuTorchModelForImageClassification.from_pretrained(model_id=model_dir)
256+
eager_model = (
257+
AutoModelForImageClassification.from_pretrained(model_id).eval().to("cpu")
258+
)
259+
with torch.no_grad():
260+
eager_output = eager_model(pixel_values)
261+
et_output = et_model.forward(pixel_values)
262+
263+
assert torch.allclose(
264+
eager_output.logits, et_output, atol=1e-02, rtol=1e-02
265+
), "CoreML output does not match eager"
266+
267+
268+
if __name__ == "__main__":
269+
parser = argparse.ArgumentParser()
270+
parser.add_argument("--model", type=str, required=True)
271+
parser.add_argument("--recipe", type=str, required=True)
272+
parser.add_argument("--quantize", action="store_true", help="Enable quantization")
273+
args = parser.parse_args()
274+
275+
model_to_model_id_and_test_function = {
276+
"smollm": ("HuggingFaceTB/SmolLM2-135M", test_text_generation), # works
277+
"qwen3": ("Qwen/Qwen3-0.6B", test_text_generation), # works
278+
"olmo": ("allenai/OLMo-1B-hf", test_text_generation), # works
279+
"gemma3": ("unsloth/gemma-3-1b-it", test_text_generation), # does not export
280+
"phi4": (
281+
"microsoft/Phi-4-mini-instruct",
282+
test_text_generation,
283+
), # fails to lower
284+
"llama3": ("NousResearch/Llama-3.2-1B", test_text_generation), # works
285+
"bert": ("google-bert/bert-base-uncased", test_fill_mask), # works
286+
"roberta": ("FacebookAI/xlmcl-roberta-base", test_fill_mask), # works
287+
"distilbert": ("distilbert/distilbert-base-uncased", test_fill_mask), # works
288+
"whisper": ("openai/whisper-tiny", test_whisper), # works
289+
"t5": ("google-t5/t5-small", test_t5), # CoreML runime failure
290+
"vit": ("google/vit-base-patch16-224", test_vit), # works
291+
}
292+
if args.model not in model_to_model_id_and_test_function:
293+
raise ValueError(
294+
f"Unknown model name: {args.model}. Available models: {model_to_model_id_and_test_function.keys()}"
295+
)
296+
297+
with tempfile.TemporaryDirectory() as tmp_dir:
298+
model_id, test_fn = model_to_model_id_and_test_function[args.model]
299+
test_fn(
300+
model_id=model_id,
301+
model_dir=tmp_dir,
302+
recipe=args.recipe,
303+
quantize=args.quantize,
304+
)

.github/workflows/trunk.yml

Lines changed: 55 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -799,6 +799,61 @@ jobs:
799799
800800
echo "::endgroup::"
801801
802+
test-huggingface-optimum-coreml:
803+
# NB: Don't run this on fork PRs because they won't have access to the secret and would fail anyway
804+
if: ${{ !github.event.pull_request.head.repo.fork }}
805+
name: test-huggingface-optimum-coreml
806+
uses: pytorch/test-infra/.github/workflows/macos_job.yml@main
807+
permissions:
808+
id-token: write
809+
contents: read
810+
secrets: inherit
811+
strategy:
812+
matrix:
813+
config: [
814+
qwen3|coreml_fp32_gpu|--quantize,
815+
smollm|coreml_fp32_gpu|--quantize,
816+
llama3|coreml_fp32_gpu|--quantize,
817+
olmo|coreml_fp32_gpu|--quantize,
818+
]
819+
fail-fast: false
820+
with:
821+
secrets-env: EXECUTORCH_HF_TOKEN
822+
runner: macos-15-xlarge
823+
python-version: '3.11'
824+
submodules: 'recursive'
825+
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
826+
timeout: 90
827+
script: |
828+
set -eux
829+
IFS='|' read -r MODEL RECIPE QUANTIZE <<< "${{ matrix.config }}"
830+
echo "Model: $MODEL"
831+
echo "Recipe: $RECIPE"
832+
echo "Quantize: $QUANTIZE"
833+
834+
echo "::group::Set up ExecuTorch"
835+
bash .ci/scripts/setup-conda.sh
836+
eval "$(conda shell.bash hook)"
837+
838+
# Install requirements
839+
${CONDA_RUN} python install_executorch.py
840+
echo "::endgroup::"
841+
842+
echo "::group::Set up Hugging Face"
843+
pip install -U "huggingface_hub[cli]"
844+
huggingface-cli login --token $SECRET_EXECUTORCH_HF_TOKEN
845+
OPTIMUM_ET_COMMIT=$(cat .ci/docker/ci_commit_pins/optimum-executorch.txt)
846+
git clone https://github.com/huggingface/optimum-executorch
847+
pushd optimum-executorch
848+
# There is no release yet, for CI stability, always test from the same commit on main
849+
git checkout $OPTIMUM_ET_COMMIT
850+
${CONDA_RUN} python install_dev.py --skip_override_torch
851+
popd
852+
${CONDA_RUN} pip list
853+
echo "::endgroup::"
854+
855+
# Run test
856+
${CONDA_RUN} python .ci/scripts/test_huggingface_optimum_model.py --model ${MODEL} --recipe ${RECIPE} ${QUANTIZE}
802857
803858
test-llama-runner-qnn-linux:
804859
name: test-llama-runner-qnn-linux

0 commit comments

Comments
 (0)