|
| 1 | +#!/bin/bash |
| 2 | +# Copyright (c) Meta Platforms, Inc. and affiliates. |
| 3 | +# All rights reserved. |
| 4 | +# |
| 5 | +# This source code is licensed under the BSD-style license found in the |
| 6 | +# LICENSE file in the root directory of this source tree. |
| 7 | + |
| 8 | +set -exu |
| 9 | +# shellcheck source=/dev/null |
| 10 | +source "$(dirname "${BASH_SOURCE[0]}")/utils.sh" |
| 11 | + |
| 12 | +cmake_install_executorch_libraries() { |
| 13 | + echo "Installing libexecutorch.a, libextension_module.so, libportable_ops_lib.a" |
| 14 | + rm -rf cmake-out |
| 15 | + retry cmake --preset llm \ |
| 16 | + -DCMAKE_INSTALL_PREFIX=cmake-out \ |
| 17 | + -DCMAKE_BUILD_TYPE=Release |
| 18 | + cmake --build cmake-out -j9 --target install --config Release |
| 19 | +} |
| 20 | + |
| 21 | +cmake_build_llama_runner() { |
| 22 | + echo "Building llama runner" |
| 23 | + pushd extension/llm/tokenizers |
| 24 | + echo "Updating tokenizers submodule" |
| 25 | + git submodule update --init |
| 26 | + popd |
| 27 | + dir="examples/models/llama" |
| 28 | + retry cmake \ |
| 29 | + -DBUILD_TESTING=OFF \ |
| 30 | + -DCMAKE_INSTALL_PREFIX=cmake-out \ |
| 31 | + -DCMAKE_BUILD_TYPE=Release \ |
| 32 | + -Bcmake-out/${dir} \ |
| 33 | + ${dir} |
| 34 | + cmake --build cmake-out/${dir} -j9 --config Release |
| 35 | +} |
| 36 | + |
| 37 | +cleanup_files() { |
| 38 | + echo "Deleting downloaded and generated files" |
| 39 | + rm -rf "${DOWNLOADED_PATH}/" |
| 40 | + rm result.txt |
| 41 | +} |
| 42 | + |
| 43 | +# Download model artifacts from HF Hub. |
| 44 | +# Hosting in personal repo for now. |
| 45 | +HF_MODEL_REPO="lucylq/llama3_1B_lora" |
| 46 | +DOWNLOADED_PATH=$( |
| 47 | + bash "$(dirname "${BASH_SOURCE[0]}")/download_hf_hub.sh" \ |
| 48 | + --model_id "${HF_MODEL_REPO}" \ |
| 49 | + --files "adapter_config.json" "adapter_model.pt" "consolidated.00.pth" "params.json" "tokenizer.model" |
| 50 | +) |
| 51 | +EXPORTED_MODEL_NAME="llama_3_2_1B_lora.pte" |
| 52 | +# Export model. |
| 53 | +$PYTHON_EXECUTABLE -m extension.llm.export.export_llm \ |
| 54 | + base.checkpoint="${DOWNLOADED_PATH}/consolidated.00.pth" \ |
| 55 | + base.params="${DOWNLOADED_PATH}/params.json" \ |
| 56 | + base.adapter_checkpoint="${DOWNLOADED_PATH}/adapter_model.pt" \ |
| 57 | + base.adapter_config="${DOWNLOADED_PATH}/adapter_config.json" \ |
| 58 | + base.tokenizer_path="${DOWNLOADED_PATH}/tokenizer.model" \ |
| 59 | + model.use_kv_cache=true \ |
| 60 | + model.use_sdpa_with_kv_cache=true \ |
| 61 | + model.dtype_override="fp32" \ |
| 62 | + backend.xnnpack.enabled=true \ |
| 63 | + backend.xnnpack.extended_ops=true \ |
| 64 | + export.output_name="${EXPORTED_MODEL_NAME}" |
| 65 | + |
| 66 | +# Build llama runner. |
| 67 | +cmake_install_executorch_libraries |
| 68 | +cmake_build_llama_runner |
| 69 | + |
| 70 | +PROMPT="What happens if you eat watermelon seeds?" |
| 71 | +# Run llama runner |
| 72 | +RUNTIME_ARGS="--model_path=${EXPORTED_MODEL_NAME} --tokenizer_path=${DOWNLOADED_PATH}/tokenizer.model --temperature=0 --seq_len=20 --warmup=1" |
| 73 | + |
| 74 | +NOW=$(date +"%H:%M:%S") |
| 75 | +echo "Starting to run llama runner at ${NOW}" |
| 76 | +# shellcheck source=/dev/null |
| 77 | +cmake-out/examples/models/llama/llama_main --prompt="${PROMPT}" ${RUNTIME_ARGS} > result.txt |
| 78 | +NOW=$(date +"%H:%M:%S") |
| 79 | +echo "Finished at ${NOW}" |
| 80 | + |
| 81 | +RESULT=$(cat result.txt) |
| 82 | +EXPECTED_PREFIX="What happens if you eat watermelon seeds? Watermelon seeds are a good source of vitamin C," |
| 83 | + |
| 84 | +if [[ "${RESULT}" == "${EXPECTED_PREFIX}"* ]]; then |
| 85 | + echo "Expected result prefix: ${EXPECTED_PREFIX}" |
| 86 | + echo "Actual result: ${RESULT}" |
| 87 | + echo "Success" |
| 88 | + cleanup_files |
| 89 | +else |
| 90 | + echo "Expected result prefix: ${EXPECTED_PREFIX}" |
| 91 | + echo "Actual result: ${RESULT}" |
| 92 | + echo "Failure; results not the same" |
| 93 | + |
| 94 | + cleanup_files |
| 95 | + exit 1 |
| 96 | +fi |
0 commit comments