Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .ci/docker/ci_commit_pins/optimum-executorch.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
eea657ddbdeb1118943a92fb73c289985c3ee1ba
36e3dd54effb3f6d13d792029609292fdd5502bb
304 changes: 304 additions & 0 deletions .ci/scripts/test_huggingface_optimum_model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,304 @@
import argparse
import subprocess
import tempfile
from pathlib import Path

import torch
from datasets import load_dataset

from optimum.executorch import (
ExecuTorchModelForCausalLM,
ExecuTorchModelForImageClassification,
ExecuTorchModelForMaskedLM,
ExecuTorchModelForSeq2SeqLM,
ExecuTorchModelForSpeechSeq2Seq,
)
from transformers import (
AutoConfig,
AutoModelForImageClassification,
AutoProcessor,
AutoTokenizer,
)


def cli_export(command, model_dir):
p = Path(model_dir)
if p.exists():
if not p.is_dir():
raise Exception(f"Path {model_dir} already exists and is not a directory.")
if any(p.iterdir()):
raise Exception(
f"Existing directory {model_dir} is non-empty. Please remove it first."
)
try:
subprocess.run(command, check=True)
print("Export completed successfully.")
except subprocess.CalledProcessError as e:
print(f"Export failed with error: {e}")


def test_text_generation(model_id, model_dir, recipe, *, quantize=True, run_only=False):
command = [
"optimum-cli",
"export",
"executorch",
"--model",
model_id,
"--task",
"text-generation",
"--recipe",
recipe,
"--output_dir",
model_dir,
]
if "coreml" in recipe:
command += [
"--disable_dynamic_shapes",
]
if quantize:
command += [
"--qlinear",
"4w",
"--qembedding",
"8w",
]
else:
assert not quantize, "Quantization is not supported for non-CoreML recipes yet"

if not run_only:
cli_export(command, model_dir)

tokenizer = AutoTokenizer.from_pretrained(model_id)
tokenizer.save_pretrained(model_dir)
model = ExecuTorchModelForCausalLM.from_pretrained(model_dir)
generated_text = model.text_generation(
tokenizer=tokenizer,
prompt="Simply put, the theory of relativity states that",
max_seq_len=64,
)
print(f"\nGenerated text:\n\t{generated_text}")


def test_fill_mask(model_id, model_dir, recipe, *, quantize=True, run_only=False):
command = [
"optimum-cli",
"export",
"executorch",
"--model",
model_id,
"--task",
"fill-mask",
"--recipe",
recipe,
"--output_dir",
model_dir,
]
if "coreml" in recipe and quantize:
command += [
"--qlinear",
"4w",
"--qembedding",
"8w",
]
else:
assert not quantize, "Quantization is not supported for non-CoreML recipes yet"

if not run_only:
cli_export(command, model_dir)

tokenizer = AutoTokenizer.from_pretrained(model_id)
model = ExecuTorchModelForMaskedLM.from_pretrained(model_dir)
input_text = f"Paris is the {tokenizer.mask_token} of France."
inputs = tokenizer(
input_text,
return_tensors="pt",
padding="max_length",
max_length=10,
)

# Test inference using ExecuTorch model
exported_outputs = model.forward(inputs["input_ids"], inputs["attention_mask"])
predicted_masks = tokenizer.decode(exported_outputs[0, 4].topk(5).indices)
print(f"\nInput text:\n\t{input_text}\nPredicted masks:\n\t{predicted_masks}")


def test_t5(model_id, model_dir, recipe, *, quantize=False, run_only=False):
assert not quantize, "Quantization is not supported for T5 model yet"

assert model_id == "google-t5/t5-small"
command = [
"optimum-cli",
"export",
"executorch",
"--model",
model_id,
"--task",
"text2text-generation",
"--recipe",
recipe,
"--output_dir",
model_dir,
]
if not run_only:
cli_export(command, model_dir)

tokenizer = AutoTokenizer.from_pretrained(model_id)
model = ExecuTorchModelForSeq2SeqLM.from_pretrained(model_dir)
article = (
" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County, New York. A"
" year later, she got married again in Westchester County, but to a different man and without divorcing"
" her first husband. Only 18 days after that marriage, she got hitched yet again. Then, Barrientos"
' declared "I do" five more times, sometimes only within two weeks of each other. In 2010, she married'
" once more, this time in the Bronx. In an application for a marriage license, she stated it was her"
' "first and only" marriage. Barrientos, now 39, is facing two criminal counts of "offering a false'
' instrument for filing in the first degree," referring to her false statements on the 2010 marriage'
" license application, according to court documents. Prosecutors said the marriages were part of an"
" immigration scam. On Friday, she pleaded not guilty at State Supreme Court in the Bronx, according to"
" her attorney, Christopher Wright, who declined to comment further. After leaving court, Barrientos was"
" arrested and charged with theft of service and criminal trespass for allegedly sneaking into the New"
" York subway through an emergency exit, said Detective Annette Markowski, a police spokeswoman. In total,"
" Barrientos has been married 10 times, with nine of her marriages occurring between 1999 and 2002. All"
" occurred either in Westchester County, Long Island, New Jersey or the Bronx. She is believed to still be"
" married to four men, and at one time, she was married to eight men at once, prosecutors say. Prosecutors"
" said the immigration scam involved some of her husbands, who filed for permanent residence status"
" shortly after the marriages. Any divorces happened only after such filings were approved. It was"
" unclear whether any of the men will be prosecuted. The case was referred to the Bronx District"
" Attorney's Office by Immigration and Customs Enforcement and the Department of Homeland Security's"
' Investigation Division. Seven of the men are from so-called "red-flagged" countries, including Egypt,'
" Turkey, Georgia, Pakistan and Mali. Her eighth husband, Rashid Rajput, was deported in 2006 to his"
" native Pakistan after an investigation by the Joint Terrorism Task Force."
)
article = "summarize: " + article.strip()

tokenizer = AutoTokenizer.from_pretrained(model_id)
generated_text = model.text_generation(
tokenizer=tokenizer,
prompt=article,
)
expected_text = 'a year later, she got married again in westchester county, new york. she was married to a different man, but only 18 days after that marriage. she is facing two criminal counts of "offering a false instrument"'
print(f"Generated text:\n\t{generated_text}")
print(f"Expected text:\n\t{expected_text}")


def test_whisper(model_id, model_dir, recipe, *, quantize=False, run_only=False):
assert not quantize, "Quantization is not supported for whisper model yet"

assert model_id == "openai/whisper-tiny"
command = [
"optimum-cli",
"export",
"executorch",
"--model",
model_id,
"--task",
"automatic-speech-recognition",
"--recipe",
recipe,
"--output_dir",
model_dir,
]
if not run_only:
cli_export(command, model_dir)

tokenizer = AutoTokenizer.from_pretrained(model_id)
model = ExecuTorchModelForSpeechSeq2Seq.from_pretrained(model_dir)
processor = AutoProcessor.from_pretrained(model_id)
dataset = load_dataset(
"distil-whisper/librispeech_long", "clean", split="validation"
)
sample = dataset[0]["audio"]

input_features = processor(
sample["array"],
return_tensors="pt",
truncation=False,
sampling_rate=sample["sampling_rate"],
).input_features

# Current implementation of the transcibe method accepts up to 30 seconds of audio, therefore I trim the audio here.
input_features_trimmed = input_features[:, :, :3000].contiguous()

generated_transcription = model.transcribe(tokenizer, input_features_trimmed)
expected_text = " Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel. Nor is Mr. Quilter's manner less interesting than his matter. He tells us that at this festive season of the year, with Christmas and roast beef looming before us, similarly drawn from eating and its results occur most readily to the mind. He has grave doubts whether Sir Frederick Latins work is really Greek after all, and can discover that."
print(f"Generated transcription: {generated_transcription}")
print(f"Expected transcription: {expected_text}")


def test_vit(model_id, model_dir, recipe, *, quantize=False, run_only=False):
assert not quantize, "Quantization is not supported for ViT models yet."

assert model_id == "google/vit-base-patch16-224"
command = [
"optimum-cli",
"export",
"executorch",
"--model",
model_id,
"--task",
"image-classification",
"--recipe",
recipe,
"--output_dir",
model_dir,
]
if not run_only:
cli_export(command, model_dir)

config = AutoConfig.from_pretrained(model_id)
batch_size = 1
num_channels = config.num_channels
height = config.image_size
width = config.image_size
pixel_values = torch.rand(batch_size, num_channels, height, width)

# Test fetching and lowering the model to ExecuTorch
et_model = ExecuTorchModelForImageClassification.from_pretrained(model_id=model_dir)
eager_model = (
AutoModelForImageClassification.from_pretrained(model_id).eval().to("cpu")
)
with torch.no_grad():
eager_output = eager_model(pixel_values)
et_output = et_model.forward(pixel_values)

assert torch.allclose(
eager_output.logits, et_output, atol=1e-02, rtol=1e-02
), "CoreML output does not match eager"


if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, required=True)
parser.add_argument("--recipe", type=str, required=True)
parser.add_argument("--quantize", action="store_true", help="Enable quantization")
args = parser.parse_args()

model_to_model_id_and_test_function = {
"smollm": ("HuggingFaceTB/SmolLM2-135M", test_text_generation), # works
"qwen3": ("Qwen/Qwen3-0.6B", test_text_generation), # works
"olmo": ("allenai/OLMo-1B-hf", test_text_generation), # works
"gemma3": ("unsloth/gemma-3-1b-it", test_text_generation), # does not export
"phi4": (
"microsoft/Phi-4-mini-instruct",
test_text_generation,
), # fails to lower
"llama3": ("NousResearch/Llama-3.2-1B", test_text_generation), # works
"bert": ("google-bert/bert-base-uncased", test_fill_mask), # works
"roberta": ("FacebookAI/xlmcl-roberta-base", test_fill_mask), # works
"distilbert": ("distilbert/distilbert-base-uncased", test_fill_mask), # works
"whisper": ("openai/whisper-tiny", test_whisper), # works
"t5": ("google-t5/t5-small", test_t5), # CoreML runime failure
"vit": ("google/vit-base-patch16-224", test_vit), # works
}
if args.model not in model_to_model_id_and_test_function:
raise ValueError(
f"Unknown model name: {args.model}. Available models: {model_to_model_id_and_test_function.keys()}"
)

with tempfile.TemporaryDirectory() as tmp_dir:
model_id, test_fn = model_to_model_id_and_test_function[args.model]
test_fn(
model_id=model_id,
model_dir=tmp_dir,
recipe=args.recipe,
quantize=args.quantize,
)
55 changes: 55 additions & 0 deletions .github/workflows/trunk.yml
Original file line number Diff line number Diff line change
Expand Up @@ -799,6 +799,61 @@ jobs:

echo "::endgroup::"

test-huggingface-optimum-coreml:
# NB: Don't run this on fork PRs because they won't have access to the secret and would fail anyway
if: ${{ !github.event.pull_request.head.repo.fork }}
name: test-huggingface-optimum-coreml
uses: pytorch/test-infra/.github/workflows/macos_job.yml@main
permissions:
id-token: write
contents: read
secrets: inherit
strategy:
matrix:
config: [
qwen3|coreml_fp32_gpu|--quantize,
smollm|coreml_fp32_gpu|--quantize,
llama3|coreml_fp32_gpu|--quantize,
olmo|coreml_fp32_gpu|--quantize,
]
fail-fast: false
with:
secrets-env: EXECUTORCH_HF_TOKEN
runner: macos-15-xlarge
python-version: '3.11'
submodules: 'recursive'
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
timeout: 90
script: |
set -eux
IFS='|' read -r MODEL RECIPE QUANTIZE <<< "${{ matrix.config }}"
echo "Model: $MODEL"
echo "Recipe: $RECIPE"
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Where is the recipe defined?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It's defined in optimum-executorch

echo "Quantize: $QUANTIZE"

echo "::group::Set up ExecuTorch"
bash .ci/scripts/setup-conda.sh
eval "$(conda shell.bash hook)"

# Install requirements
${CONDA_RUN} python install_executorch.py
echo "::endgroup::"

echo "::group::Set up Hugging Face"
pip install -U "huggingface_hub[cli]"
huggingface-cli login --token $SECRET_EXECUTORCH_HF_TOKEN
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Oh, I didn't know there is a secret token, good to know

OPTIMUM_ET_COMMIT=$(cat .ci/docker/ci_commit_pins/optimum-executorch.txt)
git clone https://github.com/huggingface/optimum-executorch
pushd optimum-executorch
# There is no release yet, for CI stability, always test from the same commit on main
git checkout $OPTIMUM_ET_COMMIT
${CONDA_RUN} python install_dev.py --skip_override_torch
popd
${CONDA_RUN} pip list
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

is it possible to make sure the executorch version we have installed here is the same as the CI sha?

echo "::endgroup::"

# Run test
${CONDA_RUN} python .ci/scripts/test_huggingface_optimum_model.py --model ${MODEL} --recipe ${RECIPE} ${QUANTIZE}

test-llama-runner-qnn-linux:
name: test-llama-runner-qnn-linux
Expand Down
Loading