@@ -82,58 +82,58 @@ jobs:
8282 # Build and test ExecuTorch
8383 PYTHON_EXECUTABLE=python bash .ci/scripts/test_model.sh "${MODEL_NAME}" "${BUILD_TOOL}" "${BACKEND}"
8484
85- test-models-linux :
86- name : test-models-linux
87- uses : pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
88- permissions :
89- id-token : write
90- contents : read
91- strategy :
92- matrix :
93- model : [linear, add, add_mul, ic3, mv2, resnet18, resnet50, mobilebert, emformer_transcribe]
94- backend : [portable, xnnpack-quantization-delegation]
95- runner : [linux.2xlarge]
96- include :
97- - model : ic4
98- backend : portable
99- runner : linux.4xlarge.memory
100- - model : ic4
101- backend : xnnpack-quantization-delegation
102- runner : linux.4xlarge.memory
103- - model : emformer_join
104- backend : portable
105- runner : linux.4xlarge.memory
106- - model : emformer_join
107- backend : xnnpack-quantization-delegation
108- runner : linux.4xlarge.memory
109- - model : phi_4_mini
110- backend : portable
111- runner : linux.4xlarge.memory
112- - model : llama3_2_vision_encoder
113- backend : portable
114- runner : linux.4xlarge.memory
115- - model : w2l
116- backend : portable
117- runner : linux.4xlarge.memory
118- fail-fast : false
119- with :
120- runner : ${{ matrix.runner }}
121- docker-image : ci-image:executorch-ubuntu-22.04-clang12
122- submodules : ' recursive'
123- ref : ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
124- timeout : 90
125- script : |
126- # The generic Linux job chooses to use base env, not the one setup by the image
127- CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
128- conda activate "${CONDA_ENV}"
85+ # test-models-linux:
86+ # name: test-models-linux
87+ # uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
88+ # permissions:
89+ # id-token: write
90+ # contents: read
91+ # strategy:
92+ # matrix:
93+ # model: [linear, add, add_mul, ic3, mv2, resnet18, resnet50, mobilebert, emformer_transcribe]
94+ # backend: [portable, xnnpack-quantization-delegation]
95+ # runner: [linux.2xlarge]
96+ # include:
97+ # - model: ic4
98+ # backend: portable
99+ # runner: linux.4xlarge.memory
100+ # - model: ic4
101+ # backend: xnnpack-quantization-delegation
102+ # runner: linux.4xlarge.memory
103+ # - model: emformer_join
104+ # backend: portable
105+ # runner: linux.4xlarge.memory
106+ # - model: emformer_join
107+ # backend: xnnpack-quantization-delegation
108+ # runner: linux.4xlarge.memory
109+ # - model: phi_4_mini
110+ # backend: portable
111+ # runner: linux.4xlarge.memory
112+ # - model: llama3_2_vision_encoder
113+ # backend: portable
114+ # runner: linux.4xlarge.memory
115+ # - model: w2l
116+ # backend: portable
117+ # runner: linux.4xlarge.memory
118+ # fail-fast: false
119+ # with:
120+ # runner: ${{ matrix.runner }}
121+ # docker-image: ci-image:executorch-ubuntu-22.04-clang12
122+ # submodules: 'recursive'
123+ # ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
124+ # timeout: 90
125+ # script: |
126+ # # The generic Linux job chooses to use base env, not the one setup by the image
127+ # CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
128+ # conda activate "${CONDA_ENV}"
129129
130- MODEL_NAME=${{ matrix.model }}
131- BUILD_TOOL=cmake
132- BACKEND=${{ matrix.backend }}
130+ # MODEL_NAME=${{ matrix.model }}
131+ # BUILD_TOOL=cmake
132+ # BACKEND=${{ matrix.backend }}
133133
134- PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh --build-tool "${BUILD_TOOL}"
135- # Build and test ExecuTorch
136- PYTHON_EXECUTABLE=python bash .ci/scripts/test_model.sh "${MODEL_NAME}" "${BUILD_TOOL}" "${BACKEND}"
134+ # PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh --build-tool "${BUILD_TOOL}"
135+ # # Build and test ExecuTorch
136+ # PYTHON_EXECUTABLE=python bash .ci/scripts/test_model.sh "${MODEL_NAME}" "${BUILD_TOOL}" "${BACKEND}"
137137
138138 test-llama-runner-linux :
139139 # Test Both linux x86 and linux aarch64
0 commit comments