Skip to content

Commit 7f5efa1

Browse files
committed
Update base for Update on "Use unlifted export pass to tag delegated constants"
Use the unlifted pass to tag constants for delegates. Implications: - Tagging must happen on the unlifted ep.module(), before going into to_edge_transform_and_lower/to_edge. Why? - The unlifted graph contains constants in getattr nodes, which is a convenient way to isolate constants. - After going into to_edge_transform_and_lower/to_edge, transforms happen on the graph_module, which is lifted. - The lifted graph requires the ep graph signature to differentiate constants via the `is_param` function. However, in to_edge.transform, we do not have access to the ep. Baking the ep as an argument via partial function doesn't work, as the ep from earlier may be outdated. This means we are comparing an older ep to a newer graph_module, which may not have corresponding graph signatures etc. Differential Revision: [D79736684](https://our.internmc.facebook.com/intern/diff/D79736684/) [ghstack-poisoned]
2 parents b64b1af + 6e72e27 commit 7f5efa1

File tree

254 files changed

+8065
-2457
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

254 files changed

+8065
-2457
lines changed

.ci/scripts/test_huggingface_optimum_model.py

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -262,14 +262,20 @@ def test_vit(model_id, model_dir, recipe, *, quantize=False, run_only=False):
262262

263263
assert torch.allclose(
264264
eager_output.logits, et_output, atol=1e-02, rtol=1e-02
265-
), "CoreML output does not match eager"
265+
), "Model output does not match eager"
266266

267267

268268
if __name__ == "__main__":
269269
parser = argparse.ArgumentParser()
270270
parser.add_argument("--model", type=str, required=True)
271271
parser.add_argument("--recipe", type=str, required=True)
272272
parser.add_argument("--quantize", action="store_true", help="Enable quantization")
273+
parser.add_argument(
274+
"--model_dir",
275+
type=str,
276+
required=False,
277+
help="When provided, write the pte file to this directory. Otherwise, a temporary directory is created for the test.",
278+
)
273279
args = parser.parse_args()
274280

275281
model_to_model_id_and_test_function = {
@@ -294,11 +300,11 @@ def test_vit(model_id, model_dir, recipe, *, quantize=False, run_only=False):
294300
f"Unknown model name: {args.model}. Available models: {model_to_model_id_and_test_function.keys()}"
295301
)
296302

303+
model_id, test_fn = model_to_model_id_and_test_function[args.model]
297304
with tempfile.TemporaryDirectory() as tmp_dir:
298-
model_id, test_fn = model_to_model_id_and_test_function[args.model]
299305
test_fn(
300306
model_id=model_id,
301-
model_dir=tmp_dir,
307+
model_dir=tmp_dir if args.model_dir is None else args.model_dir,
302308
recipe=args.recipe,
303309
quantize=args.quantize,
304310
)

.ci/scripts/test_llama_lora.sh

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -80,12 +80,12 @@ cmake-out/examples/models/llama/llama_main --model_path=${MODEL_NAME}.pte --prom
8080
NOW=$(date +"%H:%M:%S")
8181
echo "Finished at ${NOW}"
8282

83-
RESULT=$(cat lora.txt)
83+
RESULT=$(cat result.txt)
8484
if [[ "${RESULT}" == "${EXPECTED_PREFIX}"* ]]; then
8585
echo "Expected result prefix: ${EXPECTED_PREFIX}"
8686
echo "Actual result: ${RESULT}"
87+
# Do not clean up files if test passes, as they're re-used in the next test.
8788
echo "Success"
88-
cleanup_files
8989
else
9090
echo "Expected result prefix: ${EXPECTED_PREFIX}"
9191
echo "Actual result: ${RESULT}"
@@ -108,7 +108,7 @@ $PYTHON_EXECUTABLE -m extension.llm.export.export_llm \
108108
backend.xnnpack.enabled=true \
109109
backend.xnnpack.extended_ops=true \
110110
export.output_name="${MODEL_SEPARATE}.pte" \
111-
serialization.foundation_weights_file="${MODEL_SEPARATE}.ptd"
111+
export.foundation_weights_file="${MODEL_SEPARATE}.ptd"
112112

113113
# Run llama runner.
114114
NOW=$(date +"%H:%M:%S")

.ci/scripts/test_model.sh

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -199,6 +199,8 @@ test_model_with_qnn() {
199199
EXPORT_SCRIPT=albert
200200
elif [[ "${MODEL_NAME}" == "bert" ]]; then
201201
EXPORT_SCRIPT=bert
202+
elif [[ "${MODEL_NAME}" == "conv_former" ]]; then
203+
EXPORT_SCRIPT=conv_former
202204
elif [[ "${MODEL_NAME}" == "cvt" ]]; then
203205
EXPORT_SCRIPT=cvt
204206
elif [[ "${MODEL_NAME}" == "distilbert" ]]; then
@@ -238,7 +240,7 @@ test_model_with_qnn() {
238240
"cvt"|"dit"|"focalnet"|"mobilevit_v2"|"pvt"|"swin")
239241
SCRIPT_FOLDER=oss_scripts
240242
;;
241-
"albert"|"bert"|"distilbert"|"roberta"|"efficientnet"|"mobilevit_v1")
243+
"albert"|"bert"|"conv_former"|"distilbert"|"roberta"|"efficientnet"|"mobilevit_v1")
242244
pip install evaluate
243245
SCRIPT_FOLDER=oss_scripts
244246
# 16bit models will encounter op validation fail on some operations,

.ci/scripts/test_qnn_static_llama.sh

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -33,12 +33,12 @@ echo "Creating tokenizer.bin"
3333
$PYTHON_EXECUTABLE -m pytorch_tokenizers.tools.llama2c.convert -t tokenizer.model -o tokenizer.bin
3434

3535
set +e
36-
# Compile only as weight sharing is not applicable on x86
37-
$PYTHON_EXECUTABLE backends/qualcomm/tests/test_qnn_delegate.py -k TestExampleLLMScript.test_llama_stories_110m --model SM8650 --build_folder build-android/ --executorch_root . --artifact_dir . --llama_artifacts . --compile_only
36+
# Compile only as weight sharing is not applicable on x86.
37+
$PYTHON_EXECUTABLE backends/qualcomm/tests/test_qnn_delegate.py -k TestExampleLLMScript.test_llama_stories_110m --model SM8650 --build_folder build-android/ --executorch_root . --artifact_dir ./stories_110m_pte_size --llama_artifacts . --compile_only
3838
exit_code1=$?
3939

4040
# Checks accuracy with weight sharing disabled since x86 does not support weight sharing.
41-
$PYTHON_EXECUTABLE backends/qualcomm/tests/test_qnn_delegate.py -k TestExampleLLMScript.test_llama_stories_110m --model SM8650 --build_folder build-x86/ --executorch_root . --artifact_dir . --llama_artifacts . --enable_x86_64
41+
$PYTHON_EXECUTABLE backends/qualcomm/tests/test_qnn_delegate.py -k TestExampleLLMScript.test_llama_stories_110m --model SM8650 --build_folder build-x86/ --executorch_root . --artifact_dir ./stories_110m_accuracy --llama_artifacts . --enable_x86_64
4242
exit_code2=$?
4343

4444
# Check BC
Lines changed: 93 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,93 @@
1+
name: Add Open External Contributor PRs and Issues to PyTorch Org Project 136
2+
3+
on:
4+
schedule:
5+
- cron: '0 * * * *'
6+
workflow_dispatch:
7+
8+
jobs:
9+
add_to_project:
10+
runs-on: ubuntu-latest
11+
steps:
12+
- name: Add open issues and open, non-draft PRs to org project (excluding certain authors)
13+
uses: actions/github-script@v7
14+
with:
15+
github-token: ${{ secrets.PYTORCH_PROJECT_PAT }}
16+
script: |
17+
const projectId = "PVT_kwDOAUB9vs4A_PUL"; // PyTorch org project 136
18+
const owner = 'pytorch';
19+
const repo = 'executorch';
20+
21+
// List of authors to exclude
22+
const excludedAuthors = new Set([
23+
"nil-is-all", "cbilgin", "KimishPatel", "psiddh", "digantdesai", "SS-JIA", "ahmtox", "mcr229", "shoumikhin",
24+
"manuelcandales", "metascroy", "cccclai", "rohansjoshi", "kirklandsign", "abhinaykukkadapu", "JacobSzwejbka",
25+
"Conarnar", "lucylq", "larryliu0820", "BujSet", "Gasoonjia", "Juntian777", "guangy10", "jackzhxng",
26+
"GregoryComer", "leafs1", "swolchok", "mergennachin", "tarun292", "byjlw", "jathu", "Jack-Khuu", "georgehong",
27+
"zhenyan-zhang-meta", "silverguo", "dbort", "jorgep31415", "huydhn", "mcremon-meta", "trivedivivek", "angelayi",
28+
"helunwencser", "hsharma35", "zhxchen17", "iseeyuan", "svekars", "nathanaelsee", "dulinriley", "jerryzh168",
29+
"cmodi-meta", "bigfootjon", "sxu", "ydwu4", "Riandy", "tugsbayasgalan", "bsoyluoglu", "yangw-dev", "YIWENX14",
30+
"namanahuja", "yushangdi", "limintang", "pianpwk", "viveknayakatmeta", "andreanicastro", "JakeStevens",
31+
"gmagogsfm", "zonglinpeng", "eigen-k", "derekxu", "salilsdesai", "skrtskrtfb", "pssrawat", "r-barnes", "pytorchbot",
32+
"pytorchmergebot", "pytorchupdatebot", "facebook-github-bot", "Erik-Lundell", "zingo", "AdrianLundell",
33+
"oscarandersson8218", "per", "Sebastian-Larsson", "SaoirseARM", "robell", "mansnils", "martinlsm", "freddan80",
34+
"YufengShi-dudu", "tom-arm", "perheld", "Jerry-Ge", "gggekov", "fumchin", "wwwind", "haowhsu-quic", "shewu-quic",
35+
"winskuo-quic", "chunit-quic", "DannyYuyang-quic", "chuntl", "cymbalrush", "DenisVieriu97", "billmguo",
36+
"StrycekSimon", "jirioc", "robert-kalmar", "skywall", "neuropilot-captain"
37+
]);
38+
39+
async function addItem(contentId, type, number) {
40+
try {
41+
await github.graphql(`
42+
mutation {
43+
addProjectV2ItemById(input: {projectId: "${projectId}", contentId: "${contentId}"}) {
44+
item { id }
45+
}
46+
}
47+
`);
48+
console.log(`Added ${type} #${number} to project`);
49+
} catch (error) {
50+
if (error.message && error.message.includes("A project item already exists for this content")) {
51+
// Ignore if already exists
52+
console.log(`${type} #${number} already in project`);
53+
} else {
54+
console.log(`Error adding ${type} #${number}: ${error.message}`);
55+
}
56+
}
57+
}
58+
59+
try {
60+
// Add open issues (not PRs) and exclude by author
61+
const issues = await github.paginate(
62+
github.rest.issues.listForRepo,
63+
{
64+
owner,
65+
repo,
66+
state: 'open',
67+
filter: 'all'
68+
}
69+
);
70+
for (const issue of issues) {
71+
if (!issue.pull_request && !excludedAuthors.has(issue.user.login)) {
72+
await addItem(issue.node_id, 'issue', issue.number);
73+
}
74+
}
75+
76+
// Add open, non-draft PRs (regardless of review state), exclude by author
77+
const prs = await github.paginate(
78+
github.rest.pulls.list,
79+
{
80+
owner,
81+
repo,
82+
state: 'open',
83+
draft: false,
84+
}
85+
);
86+
for (const pr of prs) {
87+
if (!excludedAuthors.has(pr.user.login)) {
88+
await addItem(pr.node_id, 'pr', pr.number);
89+
}
90+
}
91+
} catch (error) {
92+
core.setFailed(`Workflow failed: ${error.message}`);
93+
}

.github/workflows/pull.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -315,7 +315,7 @@ jobs:
315315
bash examples/models/moshi/mimi/install_requirements.sh
316316
317317
# reinstall executorch
318-
bash ./install_executorch.sh
318+
bash ./install_executorch.sh --minimal
319319
320320
# run python unittest
321321
python -m unittest examples.models.moshi.mimi.test_mimi

.github/workflows/trunk.yml

Lines changed: 62 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ jobs:
6060
uses: pytorch/test-infra/.github/workflows/linux_job.yml@main
6161
strategy:
6262
matrix:
63-
model: [add]
63+
model: [add, softmax, mv2]
6464
fail-fast: false
6565
with:
6666
runner: linux.2xlarge
@@ -72,31 +72,85 @@ jobs:
7272
MODEL_NAME=${{ matrix.model }}
7373
CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
7474
conda activate "${CONDA_ENV}"
75+
if [[ ${{ matrix.model}} == "add" ]]; then
76+
SIM_LIMIT_SEC=60
77+
elif [[ ${{ matrix.model}} == "softmax" ]]; then
78+
SIM_LIMIT_SEC=60
79+
elif [[ ${{ matrix.model}} == "mv2" ]]; then
80+
SIM_LIMIT_SEC=5000
81+
else
82+
echo "Failed unsupported model selection ${{ matrix.model }}"
83+
exit 1
84+
fi
7585
7686
source .ci/scripts/utils.sh
7787
source .ci/scripts/zephyr-utils.sh
7888
mkdir -p zephyr_scratch/
7989
cd zephyr_scratch
8090
export ZEPHYR_PROJ_ROOT=$(realpath $(pwd))
91+
export ARM_FVP_TUTORIALS_ROOT=$ZEPHYR_PROJ_ROOT/zephyr/samples/modules/executorch/arm-fvp-tutorials
8192
93+
# TODO @Bujji: Should see if this can be moved into the docker image itself
8294
download_arm_zephyr_sdk
8395
./zephyr-sdk-0.16.0/setup.sh -c -t arm-zephyr-eabi
84-
8596
cd $ZEPHYR_PROJ_ROOT
8697
setup_zephyr_et_module
8798
99+
# Run setup scripts for Arm FVP and Arm AOT Compilation
88100
cd $ZEPHYR_PROJ_ROOT/modules/lib/executorch
89101
install_executorch "--use-pt-pinned-commit"
90102
.ci/scripts/setup-arm-baremetal-tools.sh --target-toolchain zephyr
91103
source examples/arm/ethos-u-scratch/setup_path.sh
92104
source $ZEPHYR_PROJ_ROOT/zephyr/zephyr-env.sh
93-
cd $ZEPHYR_PROJ_ROOT/zephyr/samples/modules/executorch/arm/hello_world
94-
west build -p always -b mps3/corstone300/fvp
95-
FVP_Corstone_SSE-300_Ethos-U55 -a build/zephyr/zephyr.elf -C mps3_board.visualisation.disable-visualisation=1 -C mps3_board.telnetterminal0.start_telnet=0 -C mps3_board.uart0.out_file='sim.out' -C cpu0.CFGITCMSZ=15 -C cpu0.CFGDTCMSZ=15 --simlimit 120
96105
97-
grep -qF "Output[0][0]: (float) 2.000000" sim.out
106+
# Get the model as PTE
107+
python -m examples.arm.aot_arm_compiler \
108+
--model_name="${MODEL_NAME}" \
109+
--output="${MODEL_NAME}.pte"
110+
111+
# Generate the C-style header
112+
cd $ARM_FVP_TUTORIALS_ROOT
113+
python build_model.py \
114+
--executorch-root $ZEPHYR_PROJ_ROOT/modules/lib/executorch \
115+
--pte-file $ZEPHYR_PROJ_ROOT/modules/lib/executorch/${MODEL_NAME}.pte \
116+
--output-path $ARM_FVP_TUTORIALS_ROOT/models/${MODEL_NAME}/src/
117+
118+
cd $ARM_FVP_TUTORIALS_ROOT/models/${MODEL_NAME}/
119+
120+
# Build the zephyr elf
121+
west build -p always -b mps3/corstone300/fvp -- \
122+
-DET_PTE_FILE_PATH_FOR_SELECTIVE_BUILD=$ZEPHYR_PROJ_ROOT/modules/lib/executorch/${MODEL_NAME}.pte
123+
124+
# Run the simulation
125+
FVP_Corstone_SSE-300_Ethos-U55 -a build/zephyr/zephyr.elf \
126+
-C mps3_board.visualisation.disable-visualisation=1 \
127+
-C mps3_board.telnetterminal0.start_telnet=0 \
128+
-C mps3_board.uart0.out_file='sim.out' \
129+
-C cpu0.CFGITCMSZ=15 \
130+
-C cpu0.CFGDTCMSZ=15 \
131+
--simlimit ${SIM_LIMIT_SEC}
132+
133+
# Disable exit on error
134+
set +e
135+
# Report failure if any of the ouptut verification checks fail
136+
grep -qF "ERROR" sim.out
137+
exit_status=$? #store 0 if found (failure), 1 if not (success)
138+
if [[ "$exit_status" -eq "0" ]]; then
139+
cat sim.out
140+
set -e
141+
exit 1
142+
fi
143+
144+
# Report fail if simulation does not complete successfully
145+
grep -qF "SUCCESS: Program complete, exiting." sim.out
98146
exit_status=$? #store 0 if found (success), 1 if not (failure)
99-
exit $exit_status
147+
if [[ "$exit_status" -eq "1" ]]; then
148+
cat sim.out
149+
set -e
150+
exit 1
151+
fi
152+
# Re-enable exit on error
153+
set -e
100154
101155
test-models-linux-aarch64:
102156
name: test-models-linux-aarch64
@@ -568,7 +622,7 @@ jobs:
568622
strategy:
569623
matrix:
570624
dtype: [fp32]
571-
model: [dl3, mv3, mv2, ic4, ic3, vit, mb, w2l]
625+
model: [dl3, mv3, mv2, ic4, ic3, vit, mb, w2l, conv_former]
572626
fail-fast: false
573627
with:
574628
runner: linux.2xlarge

backends/apple/coreml/TARGETS

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,26 @@ runtime.python_library(
6060
],
6161
)
6262

63+
runtime.python_library(
64+
name = "recipes",
65+
srcs = glob([
66+
"recipes/*.py",
67+
]),
68+
visibility = [
69+
"@EXECUTORCH_CLIENTS",
70+
],
71+
deps = [
72+
"fbsource//third-party/pypi/coremltools:coremltools",
73+
":backend",
74+
"//caffe2:torch",
75+
"//executorch/exir:lib",
76+
"//executorch/exir/backend:compile_spec_schema",
77+
"//executorch/exir/backend:partitioner",
78+
"//executorch/exir/backend:utils",
79+
"//executorch/export:lib",
80+
],
81+
)
82+
6383
runtime.cxx_python_extension(
6484
name = "executorchcoreml",
6585
srcs = [
@@ -103,6 +123,7 @@ runtime.python_test(
103123
"fbsource//third-party/pypi/pytest:pytest",
104124
":partitioner",
105125
":quantizer",
126+
":recipes",
106127
"//caffe2:torch",
107128
"//pytorch/vision:torchvision",
108129
],

0 commit comments

Comments
 (0)