diff --git a/.ci/scripts/test_huggingface_optimum_model.py b/.ci/scripts/test_huggingface_optimum_model.py index cd7a7c2124e..05b25299522 100644 --- a/.ci/scripts/test_huggingface_optimum_model.py +++ b/.ci/scripts/test_huggingface_optimum_model.py @@ -369,7 +369,7 @@ def test_vit(model_id, model_dir, recipe, *, quantize=False, run_only=False): ), # fails to lower for CoreML "smollm2-135m": ("HuggingFaceTB/SmolLM2-135M", test_text_generation), "smollm3-3b": ("HuggingFaceTB/SmolLM3-3B", test_text_generation), - "olmo": ("allenai/OLMo-1B-hf", test_text_generation), + "olmo-1b": ("allenai/OLMo-1B-hf", test_text_generation), } _mask_fill_mapping = { diff --git a/.github/workflows/trunk.yml b/.github/workflows/trunk.yml index ee17524acce..0bfac92311e 100644 --- a/.github/workflows/trunk.yml +++ b/.github/workflows/trunk.yml @@ -836,14 +836,14 @@ jobs: strategy: matrix: config: [ - # XNNPack. - llama3.2-1b|xnnpack|--quantize, - qwen3-0.6b|xnnpack|--quantize, - qwen3-1.7b|xnnpack|--quantize, - gemma3-1b|xnnpack|--quantize, - phi4-mini|xnnpack|--quantize, - smollm2-135m|xnnpack|--quantize, - smollm3-3b|xnnpack|--quantize, + # # XNNPack. (Skipping for now due to intermittent segmentation faults, see https://github.com/huggingface/optimum-executorch/issues/122.) + # llama3.2-1b|xnnpack|--quantize, + # qwen3-0.6b|xnnpack|--quantize, + # qwen3-1.7b|xnnpack|--quantize, + # gemma3-1b|xnnpack|--quantize, + # phi4-mini|xnnpack|--quantize, + # smollm2-135m|xnnpack|--quantize, + # smollm3-3b|xnnpack|--quantize, # CoreML. llama3.2-1b|coreml_fp32_gpu|--quantize, qwen3-0.6b|coreml_fp32_gpu|--quantize,