From 31264576dd2fb2351618695f0b49eff71b5a6726 Mon Sep 17 00:00:00 2001 From: Chen Lai Date: Thu, 14 Nov 2024 14:48:14 -0800 Subject: [PATCH] Use getops to pass flags and arguments for test_llama.sh (#6863) Summary: To make the test_llama.sh script more scalable, such that we can cover other args from the export_llama script like `pt2e` etc Differential Revision: D65959919 --- .ci/scripts/test_llama.sh | 42 +++++++++++++++++++++++++++++++------ .github/workflows/pull.yml | 4 ++-- .github/workflows/trunk.yml | 2 +- 3 files changed, 39 insertions(+), 9 deletions(-) diff --git a/.ci/scripts/test_llama.sh b/.ci/scripts/test_llama.sh index ed2a9c2558b..ff1c01442f8 100644 --- a/.ci/scripts/test_llama.sh +++ b/.ci/scripts/test_llama.sh @@ -9,11 +9,41 @@ set -exu # shellcheck source=/dev/null source "$(dirname "${BASH_SOURCE[0]}")/utils.sh" -MODEL_NAME=$1 # stories110M -BUILD_TOOL=$2 # buck2 or cmake -DTYPE=$3 # fp16, bf16, or fp32 -MODE=${4:-"xnnpack+custom"} # portable or xnnpack+custom or xnnpack+custom+qe -UPLOAD_DIR=${5:-} +while [[ $# -gt 0 ]]; do + case "$1" in + -model) + MODEL_NAME="$2" + shift 2 + ;; + -build_tool) + BUILD_TOOL="$2" + shift 2 + ;; + -dtype) + DTYPE="$2" + shift 2 + ;; + -mode) + MODE="$2" + shift 2 + ;; + -upload) + UPLOAD_DIR="$2" + shift 2 + ;; + *) + echo "Unknown option: $1" + usage + ;; + esac +done + +# Default mode to xnnpack+custom if not set +MODE=${MODE:-"xnnpack+custom"} + +# Default UPLOAD_DIR to empty string if not set +UPLOAD_DIR="${UPLOAD_DIR:-}" + if [[ $# -lt 4 ]]; then # Assuming 4 mandatory args echo "Expecting atleast 4 positional arguments" echo "Usage: [...]" @@ -150,7 +180,7 @@ cleanup_files() { } prepare_artifacts_upload() { - if [ -n "$UPLOAD_DIR" ]; then + if [ -n "${UPLOAD_DIR}" ]; then echo "Preparing for uploading generated artifacs" zip -j model.zip "${EXPORTED_MODEL_NAME}" tokenizer.bin mkdir -p "${UPLOAD_DIR}" diff --git a/.github/workflows/pull.yml b/.github/workflows/pull.yml index 1f5da06a920..6fc8ca91852 100644 --- a/.github/workflows/pull.yml +++ b/.github/workflows/pull.yml @@ -117,7 +117,7 @@ jobs: # Install requirements for export_llama PYTHON_EXECUTABLE=python bash examples/models/llama/install_requirements.sh # Test llama2 - PYTHON_EXECUTABLE=python bash .ci/scripts/test_llama.sh stories110M "${BUILD_TOOL}" "${DTYPE}" "${MODE}" "${ARTIFACTS_DIR_NAME}" + PYTHON_EXECUTABLE=python bash .ci/scripts/test_llama.sh -model stories110M -build_tool "${BUILD_TOOL}" -dtype "${DTYPE}" -mode "${MODE}" -upload "${ARTIFACTS_DIR_NAME}" test-llama-runner-linux-android: name: test-llama-runner-linux-android @@ -393,7 +393,7 @@ jobs: # Install requirements for export_llama PYTHON_EXECUTABLE=python bash examples/models/llama/install_requirements.sh # Test llama2 - PYTHON_EXECUTABLE=python bash .ci/scripts/test_llama.sh stories110M "${BUILD_TOOL}" "${DTYPE}" "${MODE}" + PYTHON_EXECUTABLE=python bash .ci/scripts/test_llama.sh -model stories110M -build_tool "${BUILD_TOOL}" -dtype "${DTYPE}" -mode "${MODE}" test-phi-3-mini-runner-linux: name: test-phi-3-mini-runner-linux diff --git a/.github/workflows/trunk.yml b/.github/workflows/trunk.yml index 7e8769aa77b..74e9342758c 100644 --- a/.github/workflows/trunk.yml +++ b/.github/workflows/trunk.yml @@ -261,7 +261,7 @@ jobs: # Install requirements for export_llama PYTHON_EXECUTABLE=python ${CONDA_RUN} bash examples/models/llama/install_requirements.sh # Test llama2 - PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/test_llama.sh stories110M cmake "${DTYPE}" "${MODE}" + PYTHON_EXECUTABLE=python ${CONDA_RUN} bash .ci/scripts/test_llama.sh -model stories110M -build_tool cmake -dtype "${DTYPE}" -mode "${MODE}" # # TODO(jackzhxng): Runner consistently runs out of memory before test finishes. Try to find a more powerful runner. # test-llava-runner-macos: