Skip to content

Commit 7a3b883

Browse files
committed
Add Static Stories Llama CI
1 parent e00eaea commit 7a3b883

File tree

5 files changed

+75
-5
lines changed

5 files changed

+75
-5
lines changed

.ci/scripts/setup-stories-llama.sh

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
#!/bin/bash
2+
# Copyright (c) Meta Platforms, Inc. and affiliates.
3+
# All rights reserved.
4+
#
5+
# This source code is licensed under the BSD-style license found in the
6+
# LICENSE file in the root directory of this source tree.
7+
8+
set -ex
9+
10+
# Download and prepare stories llama model artifacts
11+
prepare_model_artifacts() {
12+
echo "Preparing stories model artifacts"
13+
wget -O stories110M.pt "https://huggingface.co/karpathy/tinyllamas/resolve/main/stories110M.pt"
14+
wget -O tokenizer.model "https://raw.githubusercontent.com/karpathy/llama2.c/master/tokenizer.model"
15+
echo '{"dim": 768, "multiple_of": 32, "n_heads": 12, "n_layers": 12, "norm_eps": 1e-05, "vocab_size": 32000}' > params.json
16+
}
17+
18+
prepare_model_artifacts

.github/workflows/pull.yml

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -398,6 +398,31 @@ jobs:
398398
# Test llama2
399399
PYTHON_EXECUTABLE=python bash .ci/scripts/test_llama.sh -model stories110M -build_tool "${BUILD_TOOL}" -mode "${MODE}" -dtype "${DTYPE}" -pt2e_quantize "${PT2E_QUANTIZE}"
400400
401+
test-static-llama-runner-qnn-linux:
402+
name: test-static-llama-runner-qnn-linux
403+
uses: pytorch/test-infra/.github/workflows/linux_job.yml@main
404+
strategy:
405+
fail-fast: false
406+
with:
407+
runner: linux.2xlarge
408+
docker-image: executorch-ubuntu-22.04-qnn-sdk
409+
submodules: 'true'
410+
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
411+
timeout: 900
412+
script: |
413+
# The generic Linux job chooses to use base env, not the one setup by the image
414+
CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
415+
conda activate "${CONDA_ENV}"
416+
417+
PYTHON_EXECUTABLE=python bash .ci/scripts/setup-qnn-deps.sh
418+
PYTHON_EXECUTABLE=python bash .ci/scripts/build-qnn-sdk.sh
419+
420+
# Retrieve 110M Stories Llama Artifacts
421+
PYTHON_EXECUTABLE=python bash .ci/scripts/setup-stories-llama.sh
422+
423+
# Test static llama stories110m
424+
PYTHON_EXECUTABLE=python backends/qualcomm/tests/test_qnn_delegate.py -k TestExampleScript.test_stories_single_llama --model SM8650 --build_folder build-android/ --executorch_root . --artifact_dir . --compile_only"
425+
401426
test-qnn-models-linux:
402427
name: test-qnn-models-linux
403428
uses: pytorch/test-infra/.github/workflows/linux_job.yml@main

backends/qualcomm/tests/test_qnn_delegate.py

Lines changed: 17 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3285,8 +3285,6 @@ def test_stories_single_llama(self):
32853285
self.artifact_dir,
32863286
"--build_folder",
32873287
self.build_folder,
3288-
"--device",
3289-
self.device,
32903288
"--model",
32913289
self.model,
32923290
"--checkpoint",
@@ -3309,7 +3307,17 @@ def test_stories_single_llama(self):
33093307
"0",
33103308
"--llama_model",
33113309
"stories110m",
3310+
"--model_mode",
3311+
"hybrid",
3312+
"--prefill_seq_len",
3313+
"32",
3314+
"--kv_seq_len",
3315+
"128",
33123316
]
3317+
if self.compile_only:
3318+
cmds.extend(["--compile_only"])
3319+
else:
3320+
cmds.extend(["--device", self.device])
33133321
if self.host:
33143322
cmds.extend(["--host", self.host])
33153323

@@ -3322,8 +3330,11 @@ def test_stories_single_llama(self):
33223330
if "Error" in msg:
33233331
self.fail(msg["Error"])
33243332
else:
3325-
model_out = msg["result"][0]
3326-
self.assertTrue(model_out.startswith(golden_start_with))
3333+
if not self.compile_only:
3334+
model_out = msg["result"][0]
3335+
self.assertTrue(model_out.startswith(golden_start_with))
3336+
pte_size = msg["pte_size"]
3337+
self.assertLessEqual(pte_size, 130000000)
33273338

33283339
@unittest.skip("dynamic shape inputs appear in recent torch.export.export")
33293340
def test_mobilebert(self):
@@ -3552,6 +3563,8 @@ def setup_environment():
35523563
TestQNN.shared_buffer = args.shared_buffer
35533564
TestQNN.enable_x86_64 = args.enable_x86_64
35543565
TestQNN.dump_intermediate_outputs = args.dump_intermediate_outputs
3566+
TestQNN.compile_only = args.compile_only
3567+
35553568
return sys.argv[:1] + ns_args
35563569

35573570

backends/qualcomm/tests/utils.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -131,6 +131,7 @@ class TestQNN(unittest.TestCase):
131131
use_16a4w: str = "16a4w"
132132
shared_buffer: bool = False
133133
enable_x86_64: bool = False
134+
compile_only: bool = False
134135

135136
def _assert_outputs_equal(self, model_output, ref_output):
136137
self.assertTrue(len(ref_output) == len(model_output))

examples/qualcomm/oss_scripts/llama/llama.py

Lines changed: 14 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -775,13 +775,14 @@ def post_process():
775775
outputs.append(f.read())
776776

777777
adb.pull(output_path=args.artifact, callback=post_process)
778-
779778
if args.ip and args.port != -1:
779+
pte_size = os.path.getsize(pte_path)
780780
with Client((args.ip, args.port)) as conn:
781781
conn.send(
782782
json.dumps(
783783
{
784784
"result": outputs,
785+
"pte_size": pte_size,
785786
}
786787
)
787788
)
@@ -962,6 +963,18 @@ def main():
962963
)
963964
else:
964965
logging.warning("Quant attributes of the logit is None.")
966+
967+
if args.ip and args.port != -1:
968+
pte_path = f"{args.artifact}/{pte_filename}.pte"
969+
pte_size = os.path.getsize(pte_path)
970+
with Client((args.ip, args.port)) as conn:
971+
conn.send(
972+
json.dumps(
973+
{
974+
"pte_size": pte_size,
975+
}
976+
)
977+
)
965978
exit(f"Finish compile_only and save to {args.artifact}")
966979

967980
try:

0 commit comments

Comments
 (0)