Skip to content

Commit a6f6921

Browse files
test
1 parent 1ef52fa commit a6f6921

File tree

1 file changed

+204
-163
lines changed

1 file changed

+204
-163
lines changed

.github/workflows/metal.yml

Lines changed: 204 additions & 163 deletions
Original file line numberDiff line numberDiff line change
@@ -14,174 +14,215 @@ concurrency:
1414
jobs:
1515
test-metal-builds:
1616
name: test-executorch-metal-build
17-
runs-on: macos-latest
18-
steps:
19-
- name: Checkout repository
20-
uses: actions/checkout@v3
21-
with:
22-
submodules: recursive
23-
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
24-
25-
- name: Setup Python
26-
uses: actions/setup-python@v4
27-
with:
28-
python-version: '3.10'
29-
30-
- name: Test ExecuTorch Metal build
31-
run: |
32-
set -eux
33-
34-
# Test ExecuTorch Metal build
35-
PYTHON_EXECUTABLE=python3 CMAKE_ARGS="-DEXECUTORCH_BUILD_METAL=ON" ./install_executorch.sh
17+
uses: pytorch/test-infra/.github/workflows/macos_job.yml@main
18+
with:
19+
runner: macos-m2-stable
20+
python-version: '3.11'
21+
submodules: 'recursive'
22+
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
23+
timeout: 90
24+
script: |
25+
set -eux
26+
27+
# Print machine info
28+
uname -a
29+
if [ $(uname -s) == Darwin ]; then
30+
sw_vers
31+
# Print RAM in GB
32+
RAM_BYTES=$(sysctl -n hw.memsize)
33+
RAM_GB=$(echo "scale=2; $RAM_BYTES/1024/1024/1024" | bc)
34+
echo "Available RAM (GB): $RAM_GB"
35+
sysctl machdep.cpu.brand_string
36+
sysctl machdep.cpu.core_count
37+
# Print number of GPU cores (Apple Silicon)
38+
if command -v system_profiler &> /dev/null; then
39+
GPU_CORES=$(system_profiler SPDisplaysDataType | awk '/Total Number of Cores/ {print $5; exit}')
40+
if [ -z "$GPU_CORES" ]; then
41+
# Fallback: try to parse "Core Count" from Apple GPU section
42+
GPU_CORES=$(system_profiler SPDisplaysDataType | awk '/Core Count/ {print $3; exit}')
43+
fi
44+
echo "GPU Cores: ${GPU_CORES:-Unknown}"
45+
else
46+
echo "system_profiler not available, cannot determine GPU cores."
47+
fi
48+
fi
49+
50+
# Test ExecuTorch Metal build
51+
PYTHON_EXECUTABLE=python CMAKE_ARGS="-DEXECUTORCH_BUILD_METAL=ON" ${CONDA_RUN} --no-capture-output ./install_executorch.sh
3652
3753
export-voxtral-metal-artifact:
3854
name: export-voxtral-metal-artifact
39-
runs-on: macos-latest
40-
steps:
41-
- name: Checkout repository
42-
uses: actions/checkout@v3
43-
with:
44-
submodules: recursive
45-
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
46-
47-
- name: Setup Python
48-
uses: actions/setup-python@v4
49-
with:
50-
python-version: '3.10'
51-
52-
- name: Export Voxtral with Metal Backend
53-
env:
54-
EXECUTORCH_HF_TOKEN: ${{ secrets.EXECUTORCH_HF_TOKEN }}
55-
run: |
56-
set -eux
57-
58-
echo "::group::Setup ExecuTorch"
59-
PYTHON_EXECUTABLE=python3 ./install_executorch.sh
60-
echo "::endgroup::"
61-
62-
echo "::group::Setup Huggingface"
63-
pip3 install -U "huggingface_hub[cli]" accelerate
64-
huggingface-cli login --token $EXECUTORCH_HF_TOKEN
65-
OPTIMUM_ET_VERSION=$(cat .ci/docker/ci_commit_pins/optimum-executorch.txt)
66-
pip3 install git+https://github.com/huggingface/optimum-executorch.git@${OPTIMUM_ET_VERSION}
67-
pip3 install mistral-common librosa
68-
pip3 list
69-
echo "::endgroup::"
70-
71-
echo "::group::Export Voxtral"
72-
optimum-cli export executorch \
73-
--model "mistralai/Voxtral-Mini-3B-2507" \
74-
--task "multimodal-text-to-text" \
75-
--recipe "metal" \
76-
--dtype bfloat16 \
77-
--max_seq_len 1024 \
78-
--output_dir ./
79-
python3 -m executorch.extension.audio.mel_spectrogram \
80-
--feature_size 128 \
81-
--stack_output \
82-
--max_audio_len 300 \
83-
--output_file voxtral_preprocessor.pte
84-
85-
test -f model.pte
86-
test -f aoti_metal_blob.ptd
87-
test -f voxtral_preprocessor.pte
88-
echo "::endgroup::"
89-
90-
echo "::group::Store Voxtral Artifacts"
91-
mkdir -p artifacts
92-
cp model.pte artifacts/
93-
cp aoti_metal_blob.ptd artifacts/
94-
cp voxtral_preprocessor.pte artifacts/
95-
ls -al artifacts
96-
echo "::endgroup::"
97-
98-
- name: Upload artifacts
99-
uses: actions/upload-artifact@v4
100-
with:
101-
name: voxtral-metal-export
102-
path: artifacts/
55+
uses: pytorch/test-infra/.github/workflows/macos_job.yml@main
56+
secrets: inherit
57+
with:
58+
runner: macos-m2-stable
59+
python-version: '3.11'
60+
submodules: 'recursive'
61+
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
62+
timeout: 90
63+
secrets-env: EXECUTORCH_HF_TOKEN
64+
upload-artifact: voxtral-metal-export
65+
script: |
66+
set -eux
67+
68+
# Print machine info
69+
uname -a
70+
if [ $(uname -s) == Darwin ]; then
71+
sw_vers
72+
# Print RAM in GB
73+
RAM_BYTES=$(sysctl -n hw.memsize)
74+
RAM_GB=$(echo "scale=2; $RAM_BYTES/1024/1024/1024" | bc)
75+
echo "Available RAM (GB): $RAM_GB"
76+
sysctl machdep.cpu.brand_string
77+
sysctl machdep.cpu.core_count
78+
# Print number of GPU cores (Apple Silicon)
79+
if command -v system_profiler &> /dev/null; then
80+
GPU_CORES=$(system_profiler SPDisplaysDataType | awk '/Total Number of Cores/ {print $5; exit}')
81+
if [ -z "$GPU_CORES" ]; then
82+
# Fallback: try to parse "Core Count" from Apple GPU section
83+
GPU_CORES=$(system_profiler SPDisplaysDataType | awk '/Core Count/ {print $3; exit}')
84+
fi
85+
echo "GPU Cores: ${GPU_CORES:-Unknown}"
86+
else
87+
echo "system_profiler not available, cannot determine GPU cores."
88+
fi
89+
fi
90+
91+
echo "::group::Setup Huggingface"
92+
${CONDA_RUN} pip install -U "huggingface_hub[cli]" accelerate
93+
${CONDA_RUN} huggingface-cli login --token $SECRET_EXECUTORCH_HF_TOKEN
94+
95+
echo "::group::Setup Optimum-ExecuTorch"
96+
OPTIMUM_ET_VERSION=$(cat .ci/docker/ci_commit_pins/optimum-executorch.txt)
97+
echo "Using optimum-executorch version: ${OPTIMUM_ET_VERSION}"
98+
${CONDA_RUN} pip install git+https://github.com/huggingface/optimum-executorch.git@${OPTIMUM_ET_VERSION}
99+
${CONDA_RUN} pip install mistral-common librosa
100+
${CONDA_RUN} pip list
101+
echo "::endgroup::"
102+
103+
echo "::group::Setup ExecuTorch"
104+
PYTHON_EXECUTABLE=python ${CONDA_RUN} ./install_executorch.sh
105+
echo "::endgroup::"
106+
107+
echo "::group::Export Voxtral"
108+
${CONDA_RUN} optimum-cli export executorch \
109+
--model "mistralai/Voxtral-Mini-3B-2507" \
110+
--task "multimodal-text-to-text" \
111+
--recipe "metal" \
112+
--dtype bfloat16 \
113+
--max_seq_len 1024 \
114+
--output_dir ./
115+
${CONDA_RUN} python -m executorch.extension.audio.mel_spectrogram \
116+
--feature_size 128 \
117+
--stack_output \
118+
--max_audio_len 300 \
119+
--output_file voxtral_preprocessor.pte
120+
121+
test -f model.pte
122+
test -f aoti_metal_blob.ptd
123+
test -f voxtral_preprocessor.pte
124+
echo "::endgroup::"
125+
126+
echo "::group::Store Voxtral Artifacts"
127+
mkdir -p "${RUNNER_ARTIFACT_DIR}"
128+
cp model.pte "${RUNNER_ARTIFACT_DIR}/"
129+
cp aoti_metal_blob.ptd "${RUNNER_ARTIFACT_DIR}/"
130+
cp voxtral_preprocessor.pte "${RUNNER_ARTIFACT_DIR}/"
131+
ls -al "${RUNNER_ARTIFACT_DIR}"
132+
echo "::endgroup::"
103133
104134
test-voxtral-metal-e2e:
105135
name: test-voxtral-metal-e2e
106136
needs: export-voxtral-metal-artifact
107-
runs-on: macos-latest
108-
steps:
109-
- name: Checkout repository
110-
uses: actions/checkout@v3
111-
with:
112-
submodules: recursive
113-
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
114-
115-
- name: Setup Python
116-
uses: actions/setup-python@v4
117-
with:
118-
python-version: '3.10'
119-
120-
- name: Download artifacts
121-
uses: actions/download-artifact@v4
122-
with:
123-
name: voxtral-metal-export
124-
path: artifacts/
125-
126-
- name: Test Voxtral Metal E2E
127-
run: |
128-
set -eux
129-
130-
echo "::group::Setup ExecuTorch Requirements"
131-
CMAKE_ARGS="-DEXECUTORCH_BUILD_METAL=ON" ./install_requirements.sh
132-
pip3 list
133-
echo "::endgroup::"
134-
135-
echo "::group::Prepare Voxtral Artifacts"
136-
cp artifacts/model.pte .
137-
cp artifacts/aoti_metal_blob.ptd .
138-
cp artifacts/voxtral_preprocessor.pte .
139-
TOKENIZER_URL="https://huggingface.co/mistralai/Voxtral-Mini-3B-2507/resolve/main/tekken.json"
140-
curl -L $TOKENIZER_URL -o tekken.json
141-
ls -al model.pte aoti_metal_blob.ptd voxtral_preprocessor.pte tekken.json
142-
echo "::endgroup::"
143-
144-
echo "::group::Create Test Audio File"
145-
say -o call_samantha_hall.aiff "Call Samantha Hall"
146-
afconvert -f WAVE -d LEI16 call_samantha_hall.aiff call_samantha_hall.wav
147-
echo "::endgroup::"
148-
149-
echo "::group::Build Voxtral Runner"
150-
cmake --preset llm \
151-
-DEXECUTORCH_BUILD_METAL=ON \
152-
-DCMAKE_INSTALL_PREFIX=cmake-out \
153-
-DCMAKE_BUILD_TYPE=Release \
154-
-Bcmake-out -S.
155-
cmake --build cmake-out -j$(( $(sysctl -n hw.ncpu) - 1 )) --target install --config Release
156-
157-
cmake -DEXECUTORCH_BUILD_METAL=ON \
158-
-DCMAKE_BUILD_TYPE=Release \
159-
-Sexamples/models/voxtral \
160-
-Bcmake-out/examples/models/voxtral/
161-
cmake --build cmake-out/examples/models/voxtral --target voxtral_runner --config Release
162-
echo "::endgroup::"
163-
164-
echo "::group::Run Voxtral Runner"
165-
set +e
166-
OUTPUT=$(cmake-out/examples/models/voxtral/voxtral_runner \
167-
--model_path model.pte \
168-
--data_path aoti_metal_blob.ptd \
169-
--tokenizer_path tekken.json \
170-
--audio_path call_samantha_hall.wav \
171-
--processor_path voxtral_preprocessor.pte \
172-
--temperature 0 2>&1)
173-
EXIT_CODE=$?
174-
set -e
175-
176-
echo "$OUTPUT"
177-
178-
if ! echo "$OUTPUT" | grep -iq "Samantha"; then
179-
echo "Expected output 'Samantha' not found in output"
180-
exit 1
181-
fi
182-
183-
if [ $EXIT_CODE -ne 0 ]; then
184-
echo "Unexpected exit code: $EXIT_CODE"
185-
exit $EXIT_CODE
137+
uses: pytorch/test-infra/.github/workflows/macos_job.yml@main
138+
with:
139+
runner: macos-m2-stable
140+
python-version: '3.11'
141+
submodules: 'recursive'
142+
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
143+
timeout: 90
144+
download-artifact: voxtral-metal-export
145+
script: |
146+
set -eux
147+
148+
# Print machine info
149+
uname -a
150+
if [ $(uname -s) == Darwin ]; then
151+
sw_vers
152+
# Print RAM in GB
153+
RAM_BYTES=$(sysctl -n hw.memsize)
154+
RAM_GB=$(echo "scale=2; $RAM_BYTES/1024/1024/1024" | bc)
155+
echo "Available RAM (GB): $RAM_GB"
156+
sysctl machdep.cpu.brand_string
157+
sysctl machdep.cpu.core_count
158+
# Print number of GPU cores (Apple Silicon)
159+
if command -v system_profiler &> /dev/null; then
160+
GPU_CORES=$(system_profiler SPDisplaysDataType | awk '/Total Number of Cores/ {print $5; exit}')
161+
if [ -z "$GPU_CORES" ]; then
162+
# Fallback: try to parse "Core Count" from Apple GPU section
163+
GPU_CORES=$(system_profiler SPDisplaysDataType | awk '/Core Count/ {print $3; exit}')
164+
fi
165+
echo "GPU Cores: ${GPU_CORES:-Unknown}"
166+
else
167+
echo "system_profiler not available, cannot determine GPU cores."
186168
fi
187-
echo "::endgroup::"
169+
fi
170+
171+
echo "::group::Setup ExecuTorch Requirements"
172+
CMAKE_ARGS="-DEXECUTORCH_BUILD_METAL=ON" ${CONDA_RUN} --no-capture-output ./install_requirements.sh
173+
pip list
174+
echo "::endgroup::"
175+
176+
echo "::group::Prepare Voxtral Artifacts"
177+
cp "${RUNNER_ARTIFACT_DIR}/model.pte" .
178+
cp "${RUNNER_ARTIFACT_DIR}/aoti_metal_blob.ptd" .
179+
cp "${RUNNER_ARTIFACT_DIR}/voxtral_preprocessor.pte" .
180+
TOKENIZER_URL="https://huggingface.co/mistralai/Voxtral-Mini-3B-2507/resolve/main/tekken.json"
181+
curl -L $TOKENIZER_URL -o tekken.json
182+
ls -al model.pte aoti_metal_blob.ptd voxtral_preprocessor.pte tekken.json
183+
echo "::endgroup::"
184+
185+
echo "::group::Create Test Audio File"
186+
say -o call_samantha_hall.aiff "Call Samantha Hall"
187+
afconvert -f WAVE -d LEI16 call_samantha_hall.aiff call_samantha_hall.wav
188+
echo "::endgroup::"
189+
190+
echo "::group::Build Voxtral Runner"
191+
cmake --preset llm \
192+
-DEXECUTORCH_BUILD_METAL=ON \
193+
-DCMAKE_INSTALL_PREFIX=cmake-out \
194+
-DCMAKE_BUILD_TYPE=Release \
195+
-Bcmake-out -S.
196+
cmake --build cmake-out -j$(( $(sysctl -n hw.ncpu) - 1 )) --target install --config Release
197+
198+
cmake -DEXECUTORCH_BUILD_METAL=ON \
199+
-DCMAKE_BUILD_TYPE=Release \
200+
-Sexamples/models/voxtral \
201+
-Bcmake-out/examples/models/voxtral/
202+
cmake --build cmake-out/examples/models/voxtral --target voxtral_runner --config Release
203+
echo "::endgroup::"
204+
205+
echo "::group::Run Voxtral Runner"
206+
set +e
207+
OUTPUT=$(cmake-out/examples/models/voxtral/voxtral_runner \
208+
--model_path model.pte \
209+
--data_path aoti_metal_blob.ptd \
210+
--tokenizer_path tekken.json \
211+
--audio_path call_samantha_hall.wav \
212+
--processor_path voxtral_preprocessor.pte \
213+
--temperature 0 2>&1)
214+
EXIT_CODE=$?
215+
set -e
216+
217+
echo "$OUTPUT"
218+
219+
if ! echo "$OUTPUT" | grep -iq "Samantha"; then
220+
echo "Expected output 'Samantha' not found in output"
221+
exit 1
222+
fi
223+
224+
if [ $EXIT_CODE -ne 0 ]; then
225+
echo "Unexpected exit code: $EXIT_CODE"
226+
exit $EXIT_CODE
227+
fi
228+
echo "::endgroup::"

0 commit comments

Comments
 (0)