@@ -555,11 +555,11 @@ jobs:
555555 strategy :
556556 matrix :
557557 hf_model_id : [
558- google/gemma-2-2b ,
559- Qwen/Qwen2.5 -0.5B ,
558+ google/gemma-3-1b-it ,
559+ Qwen/Qwen3 -0.6B ,
560560 HuggingFaceTB/SmolLM2-135M,
561561 meta-llama/Llama-3.2-1B,
562- allenai/OLMo-1B-hf
562+ allenai/OLMo-1B-hf,
563563 ]
564564 fail-fast : false
565565 with :
@@ -569,44 +569,103 @@ jobs:
569569 submodules : ' recursive'
570570 ref : ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
571571 timeout : 90
572+ upload-artifact : profiling-artifacts
573+ upload-artifact-to-s3 : true
572574 script : |
573575 echo "::group::Set up ExecuTorch"
574576 # The generic Linux job chooses to use base env, not the one setup by the image
575577 CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
576578 conda activate "${CONDA_ENV}"
577579 PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh --build-tool cmake
580+ # Build executor_runner with ETdump enabled
581+ PYTHON_EXECUTABLE=python cmake -DPYTHON_EXECUTABLE=python \
582+ -DCMAKE_INSTALL_PREFIX=cmake-out \
583+ -DEXECUTORCH_ENABLE_LOGGING=1 \
584+ -DCMAKE_BUILD_TYPE=Release \
585+ -DEXECUTORCH_BUILD_EXTENSION_DATA_LOADER=ON \
586+ -DEXECUTORCH_BUILD_EXTENSION_MODULE=ON \
587+ -DEXECUTORCH_BUILD_EXTENSION_TENSOR=ON \
588+ -DEXECUTORCH_BUILD_XNNPACK=ON \
589+ -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON \
590+ -DEXECUTORCH_BUILD_KERNELS_OPTIMIZED=ON \
591+ -DEXECUTORCH_BUILD_KERNELS_CUSTOM=ON \
592+ -DEXECUTORCH_BUILD_DEVTOOLS=ON \
593+ -DEXECUTORCH_ENABLE_EVENT_TRACER=ON \
594+ -Bcmake-out .
595+ cmake --build cmake-out -j16 --target install --config Release
578596 echo "::endgroup::"
579597
580598 echo "::group::Set up Hugging Face"
581599 pip install -U "huggingface_hub[cli]"
582600 huggingface-cli login --token $SECRET_EXECUTORCH_HF_TOKEN
583601 git clone https://github.com/huggingface/optimum-executorch
584- cd optimum-executorch
602+ pushd optimum-executorch
585603 # There is no release yet, for CI stability, always test from the same commit on main
586- git checkout 577a2b19670e4c643a5c6ecb09bf47b9a699e7c6
604+ git checkout da80c9e35b3db5c7eea8731b7d660482fb4870a8
587605 pip install .[tests]
606+ popd
607+
608+ if [ "${{ matrix.hf_model_id }}" == "google/gemma-3-1b-it" ]; then
609+ # Fixes for gemma-3 is not available in the released version
610+ git clone https://github.com/huggingface/transformers.git
611+ pushd transformers
612+ git checkout a57274466f7f72efaa2662d1738cdaf28ae8071f
613+ pip install -e .
614+ popd
615+ fi
588616 pip list
589617 echo "::endgroup::"
590618
591- echo "::group::Export and Run ${{ matrix.hf_model_id }} "
619+ echo "::group::Export to ExecuTorch "
592620 # Pass matrix variable as environment variable
593621 export MODEL_ID="${{ matrix.hf_model_id }}"
622+ export OUTPUT_DIR="$(pwd)/${MODEL_ID}_custom_sdpa_8da4w"
623+ pushd optimum-executorch
624+
625+ optimum-cli export executorch \
626+ --model ${MODEL_ID} \
627+ --task text-generation \
628+ --recipe xnnpack \
629+ --use_custom_sdpa \
630+ --output_dir ${OUTPUT_DIR} \
631+ --qlinear
632+
633+ ls -FlAGhp ${OUTPUT_DIR}
634+ popd
635+ echo "::endgroup::"
636+
637+ echo "::group::Inference using python API"
638+ pushd optimum-executorch
594639 python -c "
595640 import os
596641 from optimum.executorch import ExecuTorchModelForCausalLM
597642 from transformers import AutoTokenizer
598643
599644 model_id = os.getenv('MODEL_ID')
600- print(f'Loading model: {model_id} ')
601- model = ExecuTorchModelForCausalLM.from_pretrained( model_id, recipe='xnnpack ')
602- tokenizer = AutoTokenizer .from_pretrained(model_id )
645+ pte_dir = os.getenv('OUTPUT_DIR ')
646+ print(f'Loading model { model_id} from {pte_dir}. ')
647+ model = ExecuTorchModelForCausalLM .from_pretrained(pte_dir )
603648 generated_text = model.text_generation(
604- tokenizer=tokenizer ,
649+ tokenizer=AutoTokenizer.from_pretrained(model_id) ,
605650 prompt='Simply put, the theory of relativity states that',
606651 max_seq_len=64
607652 )
608653 print(generated_text)
609654 "
655+ popd
656+ echo "::endgroup::"
657+
658+ echo "::group::Inference using executor_runner with ETDump"
659+ ./cmake-out/executor_runner \
660+ --model_path ${OUTPUT_DIR}/model.pte \
661+ --etdump_path ${OUTPUT_DIR}/etdump.etdp
662+
663+ export TSV_PATH=artifacts-to-be-uploaded/${MODEL_ID}_op_prof.tsv
664+ mkdir -p $(dirname "$TSV_PATH")
665+ python3 -m devtools.inspector.inspector_cli \
666+ --etdump_path ${OUTPUT_DIR}/etdump.etdp \
667+ --tsv_path ${TSV_PATH}
668+
610669 echo "::endgroup::"
611670
612671
0 commit comments