diff --git a/.github/workflows/macos-job.yml b/.github/workflows/macos-job.yml new file mode 100644 index 00000000000..3b791a0988a --- /dev/null +++ b/.github/workflows/macos-job.yml @@ -0,0 +1,177 @@ +name: Run a macOS job + +on: + workflow_call: + inputs: + script: + description: 'Script to utilize' + default: "python setup.py bdist_wheel" + type: string + timeout: + description: 'Timeout for the job (in minutes)' + default: 30 + type: number + runner: + description: 'Runner type to utilize' + default: "macos-12" + type: string + python-version: + description: If set to any value, dont use sudo to clean the workspace + required: false + type: string + default: "3.9" + upload-artifact: + description: 'Name to give artifacts uploaded from ${RUNNER_ARTIFACT_DIR}' + default: "" + type: string + download-artifact: + description: 'Name to download artifacts to ${RUNNER_ARTIFACT_DIR}' + default: "" + type: string + repository: + description: 'Repository to checkout, defaults to ""' + default: "" + type: string + fetch-depth: + description: 'Number of commits to fetch, defaults to 1 similar to actions/checkout' + default: 1 + type: number + submodules: + description: + Same as actions/checkout, set to `true` to checkout submodules or `recursive` to + recursively checkout everything + default: "" + type: string + ref: + description: 'Reference to checkout, defaults to "nightly"' + default: "" + type: string + test-infra-repository: + description: "Test infra repository to use" + default: "pytorch/test-infra" + type: string + test-infra-ref: + description: "Test infra reference to use" + default: "" + type: string + job-name: + description: "Name for the job, which is displayed in the GitHub UI" + default: "macos-job" + type: string + continue-on-error: + description: "Prevents a job from failing when a step fails. Set to true to allow a job to pass when exec script step fails." + default: false + type: boolean + binary-matrix: + description: "If we are calling this workflow with binary build matrix entry, will initialize matrix entries and env vars" + required: false + default: '' + type: string + +jobs: + job: + name: ${{ inputs.job-name }} + env: + REPOSITORY: ${{ inputs.repository || github.repository }} + SCRIPT: ${{ inputs.script }} + runs-on: ${{ inputs.runner }} + timeout-minutes: ${{ inputs.timeout }} + steps: + - name: Clean workspace + run: | + echo "::group::Cleanup debug output" + rm -rfv "${GITHUB_WORKSPACE}" + mkdir -p "${GITHUB_WORKSPACE}" + echo "::endgroup::" + + - name: Checkout repository (${{ inputs.test-infra-repository }}@${{ inputs.test-infra-ref }}) + uses: actions/checkout@v3 + with: + # Support the use case where we need to checkout someone's fork + repository: ${{ inputs.test-infra-repository }} + ref: ${{ inputs.test-infra-ref }} + path: test-infra + + - name: Setup miniconda + uses: ./test-infra/.github/actions/setup-miniconda + + - name: Checkout repository (${{ inputs.repository || github.repository }}@${{ inputs.ref }}) + uses: actions/checkout@v3 + with: + # Support the use case where we need to checkout someone's fork + repository: ${{ inputs.repository || github.repository }} + ref: ${{ inputs.ref || github.ref }} + path: ${{ inputs.repository || github.repository }} + fetch-depth: ${{ inputs.fetch-depth }} + submodules: ${{ inputs.submodules }} + + - name: Setup useful environment variables + working-directory: ${{ inputs.repository }} + run: | + RUNNER_ARTIFACT_DIR="${RUNNER_TEMP}/artifacts" + mkdir -p "${RUNNER_ARTIFACT_DIR}" + echo "RUNNER_ARTIFACT_DIR=${RUNNER_ARTIFACT_DIR}" >> "${GITHUB_ENV}" + + RUNNER_TEST_RESULTS_DIR="${RUNNER_TEMP}/test-results" + mkdir -p "${RUNNER_TEST_RESULTS_DIR}" + echo "RUNNER_TEST_RESULTS_DIR=${RUNNER_TEST_RESULTS_DIR}" >> "${GITHUB_ENV}" + + - name: Download artifacts (if any) + uses: actions/download-artifact@v3 + if: ${{ inputs.download-artifact != '' }} + with: + name: ${{ inputs.download-artifact }} + path: ${{ runner.temp }}/artifacts/ + + - name: Run script + shell: bash -l {0} + continue-on-error: ${{ inputs.continue-on-error }} + working-directory: ${{ inputs.repository }} + run: | + { + echo "#!/usr/bin/env bash"; + echo "set -eou pipefail"; + # Source conda so it's available to the script environment + echo 'eval "$(conda shell.bash hook)"'; + echo "${SCRIPT}"; + } > "${RUNNER_TEMP}/exec_script" + while read line; do + eval "export ${line}" + done < "${RUNNER_TEMP}/github_env_${GITHUB_RUN_ID}" + bash "${RUNNER_TEMP}/exec_script" + + - name: Surface failing tests + if: always() + uses: pmeier/pytest-results-action@v0.3.0 + with: + path: ${{ env.RUNNER_TEST_RESULTS_DIR }} + fail-on-empty: false + + - name: Setup upterm session + uses: owenthereal/action-upterm@v1 + with: + limit-access-to-actor: true + + - name: Check if there are potential artifacts and move them to the correct artifact location + shell: bash -l {0} + working-directory: ${{ inputs.repository }} + id: check-artifacts + if: ${{ inputs.upload-artifact != '' }} + env: + UPLOAD_ARTIFACT_NAME: ${{ inputs.upload-artifact }} + run: | + # If the default execution path is followed then we should get a wheel in the dist/ folder + # attempt to just grab whatever is in there and scoop it all up + if find "dist/" -name "*.whl" >/dev/null 2>/dev/null; then + mv -v dist/*.whl "${RUNNER_ARTIFACT_DIR}/" + fi + # Set to fail upload step if there are no files for upload and expected files for upload + echo 'if-no-files-found=error' >> "${GITHUB_OUTPUT}" + + - name: Upload artifacts to GitHub (if any) + uses: actions/upload-artifact@v3 + if: ${{ inputs.upload-artifact != '' }} + with: + name: ${{ inputs.upload-artifact }} + path: ${{ runner.temp }}/artifacts/ + if-no-files-found: ${{ steps.check-artifacts.outputs.if-no-files-found }} diff --git a/.github/workflows/pull.yml b/.github/workflows/pull.yml index f7d2b627bc5..c58f83ca077 100644 --- a/.github/workflows/pull.yml +++ b/.github/workflows/pull.yml @@ -13,431 +13,34 @@ concurrency: cancel-in-progress: true jobs: - gather-models: - runs-on: ubuntu-22.04 - outputs: - models: ${{ steps.gather-models.outputs.models }} - steps: - - uses: actions/checkout@v3 - with: - submodules: 'false' - - uses: actions/setup-python@v4 - with: - python-version: '3.10' - - name: Extract the list of models to test - id: gather-models - run: | - set -eux - - PYTHONPATH="${PWD}" python .ci/scripts/gather_test_models.py --event "${GITHUB_EVENT_NAME}" - - test-setup-linux-gcc: - name: test-setup-linux-gcc - uses: pytorch/test-infra/.github/workflows/linux_job.yml@main + debug: + uses: ./.github/workflows/macos-job.yml strategy: - matrix: - include: - - build-tool: cmake fail-fast: false with: - runner: linux.2xlarge - docker-image: executorch-ubuntu-22.04-gcc9 + runner: macos-m1-stable + python-version: '3.11' submodules: 'true' ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} timeout: 90 script: | - # The generic Linux job chooses to use base env, not the one setup by the image - CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]") - conda activate "${CONDA_ENV}" - - BUILD_TOOL=${{ matrix.build-tool }} - - PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh "${BUILD_TOOL}" - # Build and test ExecuTorch with the add model on portable backend. - PYTHON_EXECUTABLE=python bash .ci/scripts/test_model.sh "add" "${BUILD_TOOL}" "portable" - - test-models-linux: - name: test-models-linux - uses: pytorch/test-infra/.github/workflows/linux_job.yml@main - needs: gather-models - strategy: - matrix: ${{ fromJSON(needs.gather-models.outputs.models) }} - fail-fast: false - with: - runner: ${{ matrix.runner }} - docker-image: executorch-ubuntu-22.04-clang12 - submodules: 'true' - ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} - timeout: ${{ matrix.timeout }} - script: | - # The generic Linux job chooses to use base env, not the one setup by the image - CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]") - conda activate "${CONDA_ENV}" - - MODEL_NAME=${{ matrix.model }} - BUILD_TOOL=${{ matrix.build-tool }} - BACKEND=${{ matrix.backend }} - DEMO_BACKEND_DELEGATION=${{ matrix.demo_backend_delegation }} - - PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh "${BUILD_TOOL}" - # Build and test ExecuTorch - PYTHON_EXECUTABLE=python bash .ci/scripts/test_model.sh "${MODEL_NAME}" "${BUILD_TOOL}" "${BACKEND}" "${DEMO_BACKEND_DELEGATION}" - - test-llama-runner-linux: - name: test-llama-runner-linux - uses: pytorch/test-infra/.github/workflows/linux_job.yml@main - strategy: - matrix: - dtype: [fp32] - build-tool: [buck2, cmake] - mode: [portable, xnnpack+custom, xnnpack+custom+qe] - include: - - dtype: bf16 - build-tool: cmake - mode: portable - - dtype: bf16 - build-tool: buck2 - mode: portable - fail-fast: false - with: - runner: linux.2xlarge - docker-image: executorch-ubuntu-22.04-clang12 - submodules: 'true' - ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} - timeout: 900 - script: | - # The generic Linux job chooses to use base env, not the one setup by the image - CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]") - conda activate "${CONDA_ENV}" + BUILD_TOOL=cmake + BACKEND=coreml - DTYPE=${{ matrix.dtype }} - BUILD_TOOL=${{ matrix.build-tool }} - MODE=${{ matrix.mode }} + cd pytorch/executorch + bash .ci/scripts/setup-conda.sh - # Setup executorch - PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh buck2 - # Install requirements for export_llama - PYTHON_EXECUTABLE=python bash examples/models/llama2/install_requirements.sh - # Test llama2 - PYTHON_EXECUTABLE=python bash .ci/scripts/test_llama.sh stories110M "${BUILD_TOOL}" "${DTYPE}" "${MODE}" - - test-llama-runner-linux-android: - name: test-llama-runner-linux-android - uses: pytorch/test-infra/.github/workflows/linux_job.yml@main - strategy: - matrix: - include: - - build-tool: cmake - fail-fast: false - with: - runner: linux.2xlarge - docker-image: executorch-ubuntu-22.04-clang12-android - submodules: 'true' - ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} - timeout: 90 - script: | - # The generic Linux job chooses to use base env, not the one setup by the image - CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]") - conda activate "${CONDA_ENV}" - - BUILD_TOOL=${{ matrix.build-tool }} - PYTHON_EXECUTABLE=python \ - bash .ci/scripts/build_llama_android.sh "${BUILD_TOOL}" - - test-custom-ops-linux: - name: test-custom-ops-linux - uses: pytorch/test-infra/.github/workflows/linux_job.yml@main - strategy: - matrix: - include: - - build-tool: buck2 - - build-tool: cmake - fail-fast: false - with: - runner: linux.2xlarge - docker-image: executorch-ubuntu-22.04-clang12 - submodules: 'true' - ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} - timeout: 90 - script: | - # The generic Linux job chooses to use base env, not the one setup by the image - CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]") - conda activate "${CONDA_ENV}" - - BUILD_TOOL=${{ matrix.build-tool }} - PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh "${BUILD_TOOL}" - # Test custom ops - PYTHON_EXECUTABLE=python bash examples/portable/custom_ops/test_custom_ops.sh "${BUILD_TOOL}" - - test-selective-build-linux: - name: test-selective-build-linux - uses: pytorch/test-infra/.github/workflows/linux_job.yml@main - strategy: - matrix: - include: - - build-tool: buck2 - - build-tool: cmake - fail-fast: false - with: - runner: linux.2xlarge - docker-image: executorch-ubuntu-22.04-clang12 - submodules: 'true' - ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} - timeout: 90 - script: | - # The generic Linux job chooses to use base env, not the one setup by the image - CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]") - conda activate "${CONDA_ENV}" - - BUILD_TOOL=${{ matrix.build-tool }} - PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh "${BUILD_TOOL}" - # Test selective build - PYTHON_EXECUTABLE=python bash examples/selective_build/test_selective_build.sh "${BUILD_TOOL}" - - test-llava-runner-linux: - name: test-llava-runner-linux - uses: pytorch/test-infra/.github/workflows/linux_job.yml@main - strategy: - fail-fast: false - with: - runner: linux.24xlarge - docker-image: executorch-ubuntu-22.04-clang12 - submodules: 'true' - ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} - timeout: 90 - script: | - # The generic Linux job chooses to use base env, not the one setup by the image - CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]") - conda activate "${CONDA_ENV}" - - PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh "cmake" - - # install pybind - bash install_requirements.sh --pybind xnnpack - - # install Llava requirements - bash examples/models/llama2/install_requirements.sh - bash examples/models/llava/install_requirements.sh - - # run python unittest - python -m unittest examples.models.llava.test.test_llava - - # run e2e (export, tokenizer and runner) - PYTHON_EXECUTABLE=python bash .ci/scripts/test_llava.sh - - test-quantized-aot-lib-linux: - name: test-quantized-aot-lib-linux - uses: pytorch/test-infra/.github/workflows/linux_job.yml@main - strategy: - matrix: - include: - - build-tool: cmake - fail-fast: false - with: - runner: linux.2xlarge - docker-image: executorch-ubuntu-22.04-clang12 - submodules: 'true' - ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} - timeout: 90 - script: | - # The generic Linux job chooses to use base env, not the one setup by the image - CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]") - conda activate "${CONDA_ENV}" - - BUILD_TOOL=${{ matrix.build-tool }} - PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh "${BUILD_TOOL}" - PYTHON_EXECUTABLE=python bash examples/xnnpack/quantization/test_quantize.sh "${BUILD_TOOL}" mv2 - - test-pybind-build-linux: - name: test-pybind-build-linux - uses: pytorch/test-infra/.github/workflows/linux_job.yml@main - strategy: - matrix: - include: - - build-tool: cmake - fail-fast: false - with: - runner: linux.2xlarge - docker-image: executorch-ubuntu-22.04-clang12 - submodules: 'true' - ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} - timeout: 90 - script: | - # The generic Linux job chooses to use base env, not the one setup by the image - CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]") - conda activate "${CONDA_ENV}" - - # build module for executorch.extension.pybindings.portable_lib - BUILD_TOOL=${{ matrix.build-tool }} - PYTHON_EXECUTABLE=python \ - EXECUTORCH_BUILD_XNNPACK=ON \ - EXECUTORCH_BUILD_PYBIND=ON \ - bash .ci/scripts/setup-linux.sh "${BUILD_TOOL}" - - # see if we can import the module successfully - python -c "from executorch.extension.pybindings import portable_lib; print('success!')" - - test-binary-size-linux-gcc: - name: test-binary-size-linux-gcc - uses: pytorch/test-infra/.github/workflows/linux_job.yml@main - strategy: - fail-fast: false - with: - runner: linux.2xlarge - docker-image: executorch-ubuntu-22.04-gcc9 - submodules: 'true' - ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} - timeout: 90 - script: | - # The generic Linux job chooses to use base env, not the one setup by the image - CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]") - conda activate "${CONDA_ENV}" - - # build module for executorch.extension.pybindings.portable_lib - bash test/build_size_test.sh - strip cmake-out/test/size_test - output=$(ls -la cmake-out/test/size_test) - arr=($output) - size=${arr[4]} - # threshold=48120 on devserver with gcc11.4 - # todo(lfq): update once binary size is below 50kb. - threshold="51504" - if [[ "$size" -le "$threshold" ]]; then - echo "Success $size <= $threshold" - else - echo "Fail $size > $threshold" - exit 1 - fi - - test-binary-size-linux: - name: test-binary-size-linux - uses: pytorch/test-infra/.github/workflows/linux_job.yml@main - strategy: - fail-fast: false - with: - runner: linux.2xlarge - docker-image: executorch-ubuntu-22.04-clang12 - submodules: 'true' - ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} - timeout: 90 - script: | - # The generic Linux job chooses to use base env, not the one setup by the image - CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]") - conda activate "${CONDA_ENV}" - - # build module for executorch.extension.pybindings.portable_lib - bash test/build_size_test.sh - strip cmake-out/test/size_test - output=$(ls -la cmake-out/test/size_test) - arr=($output) - size=${arr[4]} - # threshold=48120 on devserver with gcc11.4 - # todo(lfq): update once binary size is below 50kb. - threshold="51784" - if [[ "$size" -le "$threshold" ]]; then - echo "Success $size <= $threshold" - else - echo "Fail $size > $threshold" - exit 1 - fi - - unittest: - uses: ./.github/workflows/_unittest.yml - with: - docker-image: executorch-ubuntu-22.04-clang12 - - unittest-arm: - uses: pytorch/test-infra/.github/workflows/linux_job.yml@main - strategy: - matrix: - include: - - build-tool: buck2 - with: - runner: linux.2xlarge - docker-image: executorch-ubuntu-22.04-arm-sdk - submodules: 'true' - ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} - timeout: 90 - script: | - set -eux - - # The generic Linux job chooses to use base env, not the one setup by the image - CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]") - conda activate "${CONDA_ENV}" - - BUILD_TOOL=${{ matrix.build-tool }} + # xcode-select --install # Setup MacOS dependencies as there is no Docker support on MacOS atm - PYTHON_EXECUTABLE=python \ - EXECUTORCH_BUILD_PYBIND=ON \ - EXECUTORCH_BUILD_ARM_BAREMETAL=ON \ - .ci/scripts/setup-linux.sh "${BUILD_TOOL}" - - source .ci/scripts/utils.sh - # Install Arm dependencies - install_arm - - # Run pytest with coverage - pytest -c /dev/null -v -n auto --cov=./ --cov-report=xml backends/arm/test - - - test-llama-runner-qnn-linux: - name: test-llama-runner-qnn-linux - uses: pytorch/test-infra/.github/workflows/linux_job.yml@main - strategy: - matrix: - dtype: [fp32] - build-tool: [cmake] - mode: [qnn] - fail-fast: false - with: - runner: linux.2xlarge - docker-image: executorch-ubuntu-22.04-clang12-android - submodules: 'true' - ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} - timeout: 900 - script: | - # The generic Linux job chooses to use base env, not the one setup by the image - CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]") - conda activate "${CONDA_ENV}" - - DTYPE=${{ matrix.dtype }} - BUILD_TOOL=${{ matrix.build-tool }} - MODE=${{ matrix.mode }} - - PYTHON_EXECUTABLE=python bash .ci/scripts/setup-qnn-deps.sh - PYTHON_EXECUTABLE=python bash .ci/scripts/build-qnn-sdk.sh - - # Setup executorch - PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh buck2 - # Install requirements for export_llama - PYTHON_EXECUTABLE=python bash examples/models/llama2/install_requirements.sh - # Test llama2 - PYTHON_EXECUTABLE=python bash .ci/scripts/test_llama.sh stories110M "${BUILD_TOOL}" "${DTYPE}" "${MODE}" - - test-phi-3-mini-runner-linux: - name: test-phi-3-mini-runner-linux - uses: pytorch/test-infra/.github/workflows/linux_job.yml@main - strategy: - fail-fast: false - with: - runner: linux.24xlarge - docker-image: executorch-ubuntu-22.04-clang12 - submodules: 'true' - ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} - timeout: 90 - script: | - # The generic Linux job chooses to use base env, not the one setup by the image - CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]") - conda activate "${CONDA_ENV}" - - PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh "cmake" - - # install pybind - bash install_requirements.sh --pybind xnnpack - - # install phi-3-mini requirements - bash examples/models/phi-3-mini/install_requirements.sh - - # run e2e (export, tokenizer and runner) - PYTHON_EXECUTABLE=python bash .ci/scripts/test_phi_3_mini.sh + PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/setup-macos.sh "${BUILD_TOOL}" + PYTHON_EXECUTABLE=python ${CONDA_RUN} bash backends/apple/coreml/scripts/install_requirements.sh + echo "Finishing installing coreml." + + # Build and test coreml model + MODELS=(mv3 ic4 resnet50 edsr mobilebert w2l) + for MODEL_NAME in "${MODELS[@]}"; do + echo "::group::Exporting coreml model: $MODEL_NAME" + PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/test_model.sh "${MODEL_NAME}" "${BUILD_TOOL}" "${BACKEND}" + echo "::endgroup::" + done diff --git a/.github/workflows/trunk.yml b/.github/workflows/trunk.yml index d7130561fa6..7157b2fb767 100644 --- a/.github/workflows/trunk.yml +++ b/.github/workflows/trunk.yml @@ -1,436 +1,218 @@ -name: trunk +name: Run a macOS job on: - push: - branches: - - main - - release/* - tags: - - ciflow/trunk/* - pull_request: - paths: - - .ci/docker/ci_commit_pins/pytorch.txt - - .ci/scripts/** - workflow_dispatch: - -concurrency: - group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }} - cancel-in-progress: true + workflow_call: + inputs: + script: + description: 'Script to utilize' + default: "python setup.py bdist_wheel" + type: string + timeout: + description: 'Timeout for the job (in minutes)' + default: 30 + type: number + runner: + description: 'Runner type to utilize' + default: "macos-12" + type: string + upload-artifact: + description: | + Name to give artifacts uploaded from ${RUNNER_ARTIFACT_DIR}, all the wheel files + under dist/ and any files under artifacts-to-be-uploaded/ will be uploaded + default: "" + type: string + upload-artifact-to-s3: + description: | + Upload the artifact to S3 instead of GitHub. This is used for large artifacts like + exported model + required: false + default: false + type: boolean + download-artifact: + description: 'Name to download artifacts to ${RUNNER_ARTIFACT_DIR}' + default: "" + type: string + repository: + description: 'Repository to checkout, defaults to ""' + default: "" + type: string + fetch-depth: + description: 'Number of commits to fetch, defaults to 1 similar to actions/checkout' + default: 1 + type: number + submodules: + description: + Same as actions/checkout, set to `true` to checkout submodules or `recursive` to + recursively checkout everything + default: "" + type: string + ref: + description: 'Reference to checkout, defaults to "nightly"' + default: "" + type: string + test-infra-repository: + description: "Test infra repository to use" + default: "pytorch/test-infra" + type: string + test-infra-ref: + description: "Test infra reference to use" + default: "" + type: string + job-name: + description: "Name for the job, which is displayed in the GitHub UI" + default: "macos-job" + type: string + continue-on-error: + description: "Prevents a job from failing when a step fails. Set to true to allow a job to pass when exec script step fails." + default: false + type: boolean + binary-matrix: + description: "If we are calling this workflow with binary build matrix entry, will initialize matrix entries and env vars" + required: false + default: '' + type: string + secrets-env: + description: "List of secrets to be exported to environment variables" + type: string + default: '' + python-version: + description: Set the python version used in the job + required: false + type: string + default: "3.9" jobs: - gather-models: - runs-on: ubuntu-22.04 - outputs: - models: ${{ steps.gather-models.outputs.models }} + job: + name: ${{ inputs.job-name }} + env: + REPOSITORY: ${{ inputs.repository || github.repository }} + SCRIPT: ${{ inputs.script }} + runs-on: ${{ inputs.runner }} + timeout-minutes: ${{ inputs.timeout }} steps: - - uses: actions/checkout@v3 - with: - submodules: 'false' - - uses: actions/setup-python@v4 - with: - python-version: '3.10' - - name: Extract the list of models to test - id: gather-models + - name: Clean workspace run: | - set -eux - - PYTHONPATH="${PWD}" python .ci/scripts/gather_test_models.py --target-os macos --event "${GITHUB_EVENT_NAME}" - - test-models-macos: - name: test-models-macos - uses: pytorch/test-infra/.github/workflows/macos_job.yml@main - needs: gather-models - strategy: - matrix: ${{ fromJSON(needs.gather-models.outputs.models) }} - fail-fast: false - with: - runner: ${{ matrix.runner }} - python-version: '3.11' - submodules: 'true' - ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} - timeout: ${{ matrix.timeout }} - script: | - MODEL_NAME=${{ matrix.model }} - BUILD_TOOL=${{ matrix.build-tool }} - BACKEND=${{ matrix.backend }} - DEMO_BACKEND_DELEGATION=${{ matrix.demo_backend_delegation }} - - bash .ci/scripts/setup-conda.sh - # Setup MacOS dependencies as there is no Docker support on MacOS atm - PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/setup-macos.sh "${BUILD_TOOL}" - # Build and test xecutorch - PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/test_model.sh "${MODEL_NAME}" "${BUILD_TOOL}" "${BACKEND}" "${DEMO_BACKEND_DELEGATION}" - - test-custom-ops-macos: - name: test-custom-ops-macos - uses: pytorch/test-infra/.github/workflows/macos_job.yml@main - strategy: - matrix: - include: - - build-tool: cmake - fail-fast: false - with: - runner: macos-m1-stable - python-version: '3.11' - submodules: 'true' - ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} - script: | - BUILD_TOOL=${{ matrix.build-tool }} - - bash .ci/scripts/setup-conda.sh - # Setup MacOS dependencies as there is no Docker support on MacOS atm - PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/setup-macos.sh "${BUILD_TOOL}" - # Build and test custom ops - PYTHON_EXECUTABLE=python ${CONDA_RUN} bash examples/portable/custom_ops/test_custom_ops.sh "${BUILD_TOOL}" - - test-selective-build-macos: - name: test-selective-build-macos - uses: pytorch/test-infra/.github/workflows/macos_job.yml@main - strategy: - matrix: - include: - - build-tool: cmake - fail-fast: false - with: - runner: macos-m1-stable - python-version: '3.11' - submodules: 'true' - ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} - script: | - BUILD_TOOL=${{ matrix.build-tool }} - - bash .ci/scripts/setup-conda.sh - # Setup MacOS dependencies as there is no Docker support on MacOS atm - PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/setup-macos.sh "${BUILD_TOOL}" - # Build and test selective build - PYTHON_EXECUTABLE=python ${CONDA_RUN} bash examples/selective_build/test_selective_build.sh "${BUILD_TOOL}" - - test-demo-backend-delegation: - name: test-demo-backend-delegation - uses: pytorch/test-infra/.github/workflows/linux_job.yml@main - strategy: - matrix: - include: - - build-tool: buck2 - - build-tool: cmake - fail-fast: false - with: - runner: linux.2xlarge - docker-image: executorch-ubuntu-22.04-clang12 - submodules: 'true' - ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} - script: | - # The generic Linux job chooses to use base env, not the one setup by the image - CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]") - conda activate "${CONDA_ENV}" - - BUILD_TOOL=${{ matrix.build-tool }} - PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh "${BUILD_TOOL}" - # Test selective build - PYTHON_EXECUTABLE=python bash examples/portable/scripts/test_demo_backend_delegation.sh "${BUILD_TOOL}" - - test-arm-backend-delegation: - name: test-arm-backend-delegation - uses: pytorch/test-infra/.github/workflows/linux_job.yml@main - with: - runner: linux.2xlarge - docker-image: executorch-ubuntu-22.04-arm-sdk - submodules: 'true' - ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} - script: | - # The generic Linux job chooses to use base env, not the one setup by the image - CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]") - conda activate "${CONDA_ENV}" - - source .ci/scripts/utils.sh - install_executorch - - install_arm - - # Increase number of files user can monitor to bypass buck failures. - # Hopefully this is high enough for this setup. - sudo sysctl fs.inotify.max_user_watches=1048576 # 1024 * 1024 - - # Test ethos-u delegate examples with run.sh - PYTHON_EXECUTABLE=python bash examples/arm/run.sh examples/arm/ethos-u-scratch/ - - test-arm-reference-delegation: - name: test-arm-reference-delegation - uses: pytorch/test-infra/.github/workflows/linux_job.yml@main - with: - runner: linux.2xlarge - docker-image: executorch-ubuntu-22.04-arm-sdk - submodules: 'true' - ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} - script: | - # The generic Linux job chooses to use base env, not the one setup by the image - CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]") - conda activate "${CONDA_ENV}" - - source .ci/scripts/utils.sh - install_executorch - - install_arm - - # Run arm unit tests - pytest -c /dev/null -v -n auto --cov=./ --cov-report=xml backends/arm/test - - test-coreml-delegate: - name: test-coreml-delegate - uses: pytorch/test-infra/.github/workflows/macos_job.yml@main - with: - runner: macos-13-xlarge - python-version: '3.11' - submodules: 'true' - ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} - timeout: 90 - script: | - BUILD_TOOL=cmake - - bash .ci/scripts/setup-conda.sh - # Setup MacOS dependencies as there is no Docker support on MacOS atm - GITHUB_RUNNER=1 PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/setup-macos.sh "${BUILD_TOOL}" - # Build and test coreml delegate - PYTHON_EXECUTABLE=python ${CONDA_RUN} bash backends/apple/coreml/scripts/build_all.sh - - test-pybind-build-macos: - name: test-pybind-build-macos - uses: pytorch/test-infra/.github/workflows/macos_job.yml@main - strategy: - matrix: - include: - - build-tool: cmake - fail-fast: false - with: - runner: macos-m1-stable - python-version: '3.11' - submodules: 'true' - ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} - timeout: 180 - script: | - bash .ci/scripts/setup-conda.sh - - # build module for executorch.extension.pybindings.portable_lib - BUILD_TOOL=${{ matrix.build-tool }} - EXECUTORCH_BUILD_PYBIND=ON PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/setup-macos.sh "${BUILD_TOOL}" - - # see if we can import the module successfully - ${CONDA_RUN} python -c "from executorch.extension.pybindings import portable_lib; print('success!')" - - test-llama-runner-macos: - name: test-llama-runner-mac - uses: pytorch/test-infra/.github/workflows/macos_job.yml@main - strategy: - matrix: - dtype: [fp32] - mode: [portable, xnnpack+kv+custom, mps, coreml] - include: - - dtype: bf16 - mode: portable - fail-fast: false - with: - runner: macos-m1-stable - python-version: '3.11' - submodules: 'true' - ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} - timeout: 900 - script: | - - DTYPE=${{ matrix.dtype }} - MODE=${{ matrix.mode }} - - bash .ci/scripts/setup-conda.sh - - # Setup executorch - PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/setup-macos.sh cmake - - if [[ "${MODE}" == "mps" ]]; then - # Install mps delegate - PYTHON_EXECUTABLE=python ${CONDA_RUN} bash backends/apple/mps/install_requirements.sh - echo "Finishing installing mps." - elif [[ "${MODE}" == "coreml" ]]; then - # Install coreml delegate - PYTHON_EXECUTABLE=python ${CONDA_RUN} bash backends/apple/coreml/scripts/install_requirements.sh - echo "Finishing installing coreml." - fi - - # Install requirements for export_llama - PYTHON_EXECUTABLE=python ${CONDA_RUN} bash examples/models/llama2/install_requirements.sh - # Test llama2 - PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/test_llama.sh stories110M cmake "${DTYPE}" "${MODE}" - - # # TODO(jackzhxng): Runner consistently runs out of memory before test finishes. Try to find a more powerful runner. - # test-llava-runner-macos: - # name: test-llava-runner-macos - # uses: pytorch/test-infra/.github/workflows/macos_job.yml@main - # strategy: - # fail-fast: false - # with: - # runner: macos-14-xlarge - # python-version: '3.11' - # submodules: 'true' - # ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} - # timeout: 900 - # script: | - # BUILD_TOOL=cmake - - # bash .ci/scripts/setup-conda.sh - # # Setup MacOS dependencies as there is no Docker support on MacOS atm - # GITHUB_RUNNER=1 PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/setup-macos.sh "${BUILD_TOOL}" - - # # install Llava requirements - # ${CONDA_RUN} bash examples/models/llama2/install_requirements.sh - # ${CONDA_RUN} bash examples/models/llava/install_requirements.sh - - # # run python unittest - # ${CONDA_RUN} python -m unittest examples.models.llava.test.test_llava - - # # run e2e (export, tokenizer and runner) - # PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/test_llava.sh Release - - test-qnn-model: - name: test-qnn-model - uses: pytorch/test-infra/.github/workflows/linux_job.yml@main - strategy: - matrix: - dtype: [fp32] - model: [dl3, mv3, mv2, ic4, ic3, vit] - fail-fast: false - with: - runner: linux.2xlarge - docker-image: executorch-ubuntu-22.04-clang12-android - submodules: 'true' - ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} - timeout: 900 - script: | - # The generic Linux job chooses to use base env, not the one setup by the image - CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]") - conda activate "${CONDA_ENV}" - PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh cmake - PYTHON_EXECUTABLE=python bash .ci/scripts/setup-qnn-deps.sh - PYTHON_EXECUTABLE=python bash .ci/scripts/build-qnn-sdk.sh - PYTHON_EXECUTABLE=python bash .ci/scripts/test_model.sh ${{ matrix.model }} "cmake" "qnn" - - test-coreml-model: - name: test-coreml-model - uses: pytorch/test-infra/.github/workflows/macos_job.yml@main - strategy: - fail-fast: false - with: - runner: macos-m1-stable - python-version: '3.11' - submodules: 'true' - ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} - timeout: 90 - script: | - BUILD_TOOL=cmake - BACKEND=coreml - - bash .ci/scripts/setup-conda.sh - - # Setup MacOS dependencies as there is no Docker support on MacOS atm - PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/setup-macos.sh "${BUILD_TOOL}" - PYTHON_EXECUTABLE=python ${CONDA_RUN} bash backends/apple/coreml/scripts/install_requirements.sh - echo "Finishing installing coreml." - - # Build and test coreml model - MODELS=(mv3 ic4 resnet50 edsr mobilebert w2l) - for MODEL_NAME in "${MODELS[@]}"; do - echo "::group::Exporting coreml model: $MODEL_NAME" - PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/test_model.sh "${MODEL_NAME}" "${BUILD_TOOL}" "${BACKEND}" + echo "::group::Cleanup debug output" + rm -rfv "${GITHUB_WORKSPACE}" + mkdir -p "${GITHUB_WORKSPACE}" echo "::endgroup::" - done - - test-huggingface-transformers: - name: test-huggingface-transformers - uses: pytorch/test-infra/.github/workflows/linux_job.yml@main - secrets: inherit - strategy: - matrix: - hf_model_repo: [google/gemma-2b] - fail-fast: false - with: - secrets-env: EXECUTORCH_HF_TOKEN - runner: linux.12xlarge - docker-image: executorch-ubuntu-22.04-clang12 - submodules: 'true' - ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} - timeout: 90 - script: | - echo "::group::Set up ExecuTorch" - # The generic Linux job chooses to use base env, not the one setup by the image - CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]") - conda activate "${CONDA_ENV}" - PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh cmake - echo "Installing libexecutorch.a, libextension_module.so, libportable_ops_lib.a" - rm -rf cmake-out - cmake \ - -DCMAKE_INSTALL_PREFIX=cmake-out \ - -DCMAKE_BUILD_TYPE=Release \ - -DEXECUTORCH_BUILD_EXTENSION_DATA_LOADER=ON \ - -DEXECUTORCH_BUILD_EXTENSION_MODULE=ON \ - -DEXECUTORCH_BUILD_EXTENSION_TENSOR=ON \ - -DEXECUTORCH_BUILD_KERNELS_CUSTOM=ON \ - -DEXECUTORCH_BUILD_KERNELS_OPTIMIZED=ON \ - -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON \ - -DEXECUTORCH_BUILD_XNNPACK=ON \ - -DPYTHON_EXECUTABLE=python \ - -Bcmake-out . - cmake --build cmake-out -j9 --target install --config Release + - name: Checkout repository (${{ inputs.test-infra-repository }}@${{ inputs.test-infra-ref }}) + uses: actions/checkout@v3 + with: + # Support the use case where we need to checkout someone's fork + repository: ${{ inputs.test-infra-repository }} + ref: ${{ inputs.test-infra-ref }} + path: test-infra - echo "Build llama runner" - dir="examples/models/llama2" - cmake \ - -DCMAKE_INSTALL_PREFIX=cmake-out \ - -DCMAKE_BUILD_TYPE=Release \ - -DEXECUTORCH_BUILD_KERNELS_CUSTOM=ON \ - -DEXECUTORCH_BUILD_KERNELS_OPTIMIZED=ON \ - -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON \ - -DEXECUTORCH_BUILD_XNNPACK=ON \ - -DPYTHON_EXECUTABLE=python \ - -Bcmake-out/${dir} \ - ${dir} - cmake --build cmake-out/${dir} -j9 --config Release - echo "::endgroup::" + - name: Setup miniconda + uses: ./test-infra/.github/actions/setup-miniconda + with: + python-version: ${{ inputs.python-version }} - echo "::group::Set up HuggingFace Dependencies" - if [ -z "$SECRET_EXECUTORCH_HF_TOKEN" ]; then - echo "::error::SECRET_EXECUTORCH_HF_TOKEN is empty. For security reason secrets won't be accessible on forked PRs. Please make sure you submit a non-forked PR." - exit 1 - fi - pip install -U "huggingface_hub[cli]" - huggingface-cli login --token $SECRET_EXECUTORCH_HF_TOKEN - pip install accelerate sentencepiece - # TODO(guangyang): Switch to use released transformers library after all required patches are included - pip install "git+https://github.com/huggingface/transformers.git@6cc4dfe3f1e8d421c6d6351388e06e9b123cbfe1" - pip list - echo "::endgroup::" + - name: Checkout repository (${{ inputs.repository || github.repository }}@${{ inputs.ref }}) + uses: actions/checkout@v3 + with: + # Support the use case where we need to checkout someone's fork + repository: ${{ inputs.repository || github.repository }} + ref: ${{ inputs.ref || github.ref }} + path: ${{ inputs.repository || github.repository }} + fetch-depth: ${{ inputs.fetch-depth }} + submodules: ${{ inputs.submodules }} + + - name: Setup useful environment variables + working-directory: ${{ inputs.repository || github.repository }} + run: | + RUNNER_ARTIFACT_DIR="${RUNNER_TEMP}/artifacts" + mkdir -p "${RUNNER_ARTIFACT_DIR}" + echo "RUNNER_ARTIFACT_DIR=${RUNNER_ARTIFACT_DIR}" >> "${GITHUB_ENV}" - echo "::group::Export to ExecuTorch" - TOKENIZER_FILE=tokenizer.model - TOKENIZER_BIN_FILE=tokenizer.bin - ET_MODEL_NAME=et_model - # Fetch the file using a Python one-liner - DOWNLOADED_TOKENIZER_FILE_PATH=$(python -c " - from huggingface_hub import hf_hub_download - # Download the file from the Hugging Face Hub - downloaded_path = hf_hub_download( - repo_id='${{ matrix.hf_model_repo }}', - filename='${TOKENIZER_FILE}' - ) - print(downloaded_path) - ") - if [ -f "$DOWNLOADED_TOKENIZER_FILE_PATH" ]; then - echo "${TOKENIZER_FILE} downloaded successfully at: $DOWNLOADED_TOKENIZER_FILE_PATH" - python -m extension.llm.tokenizer.tokenizer -t $DOWNLOADED_TOKENIZER_FILE_PATH -o ./${TOKENIZER_BIN_FILE} - ls ./tokenizer.bin - else - echo "Failed to download ${TOKENIZER_FILE} from ${{ matrix.hf_model_repo }}." - exit 1 - fi + RUNNER_TEST_RESULTS_DIR="${RUNNER_TEMP}/test-results" + mkdir -p "${RUNNER_TEST_RESULTS_DIR}" + echo "RUNNER_TEST_RESULTS_DIR=${RUNNER_TEST_RESULTS_DIR}" >> "${GITHUB_ENV}" - python -m extension.export_util.export_hf_model -hfm=${{ matrix.hf_model_repo }} -o ${ET_MODEL_NAME} + - name: Download artifacts (if any) + uses: actions/download-artifact@v3 + if: ${{ inputs.download-artifact != '' }} + with: + name: ${{ inputs.download-artifact }} + path: ${{ runner.temp }}/artifacts/ - cmake-out/examples/models/llama2/llama_main --model_path=${ET_MODEL_NAME}.pte --tokenizer_path=${TOKENIZER_BIN_FILE} --prompt="My name is" - echo "::endgroup::" + - name: Export matrix variables (if any) + uses: ./test-infra/.github/actions/export-matrix-variables + if: ${{ inputs.binary-matrix != '' }} + with: + binary-matrix: ${{ inputs.binary-matrix }} + target-os: "macos" + + - name: Run script + shell: bash -l {0} + continue-on-error: ${{ inputs.continue-on-error }} + working-directory: ${{ inputs.repository || github.repository }} + env: + ALL_SECRETS: ${{ toJSON(secrets) }} + run: | + { + echo "#!/usr/bin/env bash"; + echo "set -eou pipefail"; + # Source conda so it's available to the script environment + echo 'eval "$(conda shell.bash hook)"'; + echo "${SCRIPT}"; + } > "${RUNNER_TEMP}/exec_script" + while read line; do + eval "export ${line}" + done < "${RUNNER_TEMP}/github_env_${GITHUB_RUN_ID}" + python3 "${{ github.workspace }}/test-infra/.github/scripts/run_with_env_secrets.py" "${{ inputs.secrets-env }}" + + - name: Surface failing tests + if: always() + uses: pmeier/pytest-results-action@v0.3.0 + with: + path: ${{ env.RUNNER_TEST_RESULTS_DIR }} + fail-on-empty: false + + - name: Check if there are potential artifacts and move them to the correct artifact location + shell: bash -l {0} + working-directory: ${{ inputs.repository || github.repository }} + id: check-artifacts + if: ${{ always() && inputs.upload-artifact != '' }} + env: + UPLOAD_ARTIFACT_NAME: ${{ inputs.upload-artifact }} + run: | + # If the default execution path is followed then we should get a wheel in the dist/ folder + # attempt to just grab whatever is in there and scoop it all up + if find "dist/" -name "*.whl" >/dev/null 2>/dev/null; then + mv -v dist/*.whl "${RUNNER_ARTIFACT_DIR}/" + fi + if [[ -d "artifacts-to-be-uploaded" ]]; then + mv -v artifacts-to-be-uploaded/* "${RUNNER_ARTIFACT_DIR}/" + fi + # Set to fail upload step if there are no files for upload and expected files for upload + echo 'if-no-files-found=error' >> "${GITHUB_OUTPUT}" + + # NB: Keep this for compatibility with existing jobs and also keep in mind that only + # our AWS runners have access to S3 + - name: Upload artifacts to GitHub (if any) + uses: actions/upload-artifact@v3 + if: ${{ always() && inputs.upload-artifact != '' && !inputs.upload-artifact-to-s3 }} + with: + name: ${{ inputs.upload-artifact }} + path: ${{ runner.temp }}/artifacts/ + if-no-files-found: ${{ steps.check-artifacts.outputs.if-no-files-found }} + + # NB: This only works with our AWS runners + - name: Upload artifacts to S3 (if any) + uses: seemethere/upload-artifact-s3@v5 + if: ${{ always() && inputs.upload-artifact != '' && inputs.upload-artifact-to-s3 }} + with: + retention-days: 14 + s3-bucket: gha-artifacts + s3-prefix: | + ${{ env.REPOSITORY }}/${{ github.run_id }}/artifacts + if-no-files-found: ${{ steps.check-artifacts.outputs.if-no-files-found }} + path: ${{ runner.temp }}/artifacts/ + + - name: Clean up disk space + if: always() + continue-on-error: true + uses: ./test-infra/.github/actions/check-disk-space