Skip to content

Commit 2a1f564

Browse files
committed
Update
[ghstack-poisoned]
2 parents bffb95f + 8230848 commit 2a1f564

File tree

3 files changed

+36
-28
lines changed

3 files changed

+36
-28
lines changed

.ci/scripts/build_llama_android.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ install_executorch_and_backend_lib() {
1919
echo "Installing executorch and xnnpack backend"
2020
clean_executorch_install_folders
2121
mkdir cmake-android-out
22-
ANDROID_NDK=/opt/ndk
22+
ANDROID_NDK=${ANDROID_NDK:-/opt/ndk}
2323
BUCK2=buck2
2424
ANDROID_ABI=arm64-v8a
2525
cmake --preset llm \

.github/workflows/pull.yml

Lines changed: 27 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -632,32 +632,33 @@ jobs:
632632
# run eval_llama wikitext task
633633
PYTHON_EXECUTABLE=python bash .ci/scripts/test_eval_llama_wikitext.sh
634634
635-
test-eval_llama-mmlu-linux:
636-
name: test-eval_llama-mmlu-linux
637-
uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
638-
permissions:
639-
id-token: write
640-
contents: read
641-
strategy:
642-
fail-fast: false
643-
with:
644-
runner: linux.24xlarge
645-
docker-image: ci-image:executorch-ubuntu-22.04-clang12
646-
submodules: 'recursive'
647-
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
648-
timeout: 90
649-
script: |
650-
# The generic Linux job chooses to use base env, not the one setup by the image
651-
CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
652-
conda activate "${CONDA_ENV}"
653-
654-
PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh --build-tool "cmake"
655-
656-
# install llama requirements
657-
bash examples/models/llama/install_requirements.sh
658-
659-
# run eval_llama mmlu task
660-
PYTHON_EXECUTABLE=python bash .ci/scripts/test_eval_llama_mmlu.sh
635+
# TODO(larryliu0820): Fix this issue before reenabling it: https://gist.github.com/larryliu0820/7377ecd0d79dbc06076cec8d9f2b85d2
636+
# test-eval_llama-mmlu-linux:
637+
# name: test-eval_llama-mmlu-linux
638+
# uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
639+
# permissions:
640+
# id-token: write
641+
# contents: read
642+
# strategy:
643+
# fail-fast: false
644+
# with:
645+
# runner: linux.24xlarge
646+
# docker-image: ci-image:executorch-ubuntu-22.04-clang12
647+
# submodules: 'recursive'
648+
# ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
649+
# timeout: 90
650+
# script: |
651+
# # The generic Linux job chooses to use base env, not the one setup by the image
652+
# CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
653+
# conda activate "${CONDA_ENV}"
654+
655+
# PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh --build-tool "cmake"
656+
657+
# # install llama requirements
658+
# bash examples/models/llama/install_requirements.sh
659+
660+
# # run eval_llama mmlu task
661+
# PYTHON_EXECUTABLE=python bash .ci/scripts/test_eval_llama_mmlu.sh
661662

662663
test-llama_runner_eager-linux:
663664
name: test-llama_runner_eager-linux

CMakeLists.txt

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -161,6 +161,10 @@ endif()
161161

162162
if(EXECUTORCH_BUILD_TESTS)
163163
include(CTest)
164+
else()
165+
# It looks like some of our third-party deps will try to turn this on if it's
166+
# not explicitly set, leading to confusing behavior.
167+
set(BUILD_TESTING OFF)
164168
endif()
165169

166170
add_subdirectory(third-party)
@@ -737,7 +741,10 @@ if(EXECUTORCH_BUILD_EXECUTOR_RUNNER)
737741
endif()
738742

739743
set(CMAKE_EXECUTABLE_SUFFIX ".html")
740-
target_link_options(executor_runner PUBLIC -sALLOW_MEMORY_GROWTH --embed-file "${WASM_MODEL_DIR}@/")
744+
target_link_options(
745+
executor_runner PUBLIC -sALLOW_MEMORY_GROWTH --embed-file
746+
"${WASM_MODEL_DIR}@/"
747+
)
741748
endif()
742749
endif()
743750

0 commit comments

Comments
 (0)