diff --git a/.ci/scripts/build_llama_android.sh b/.ci/scripts/build_llama_android.sh index 2460f5483d9..ed0fa5d16bb 100644 --- a/.ci/scripts/build_llama_android.sh +++ b/.ci/scripts/build_llama_android.sh @@ -22,18 +22,12 @@ install_executorch_and_backend_lib() { ANDROID_NDK=/opt/ndk BUCK2=buck2 ANDROID_ABI=arm64-v8a - cmake -DBUCK2="${BUCK2}" \ + cmake --preset llm \ + -DBUCK2="${BUCK2}" \ -DCMAKE_TOOLCHAIN_FILE="${ANDROID_NDK}/build/cmake/android.toolchain.cmake" \ -DANDROID_ABI="${ANDROID_ABI}" \ -DCMAKE_INSTALL_PREFIX=cmake-android-out \ -DCMAKE_BUILD_TYPE=Release \ - -DEXECUTORCH_BUILD_EXTENSION_DATA_LOADER=ON \ - -DEXECUTORCH_BUILD_EXTENSION_MODULE=ON \ - -DEXECUTORCH_BUILD_EXTENSION_TENSOR=ON \ - -DEXECUTORCH_BUILD_XNNPACK=ON \ - -DEXECUTORCH_BUILD_KERNELS_OPTIMIZED=ON \ - -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON \ - -DEXECUTORCH_BUILD_KERNELS_CUSTOM=ON \ -DXNNPACK_ENABLE_ARM_BF16=OFF \ -Bcmake-android-out . @@ -51,11 +45,7 @@ build_llama_runner() { -DCMAKE_TOOLCHAIN_FILE="$ANDROID_NDK"/build/cmake/android.toolchain.cmake \ -DANDROID_ABI="${ANDROID_ABI}" \ -DCMAKE_INSTALL_PREFIX=cmake-android-out \ - -DCMAKE_BUILD_TYPE=Release -DPYTHON_EXECUTABLE=python \ - -DEXECUTORCH_BUILD_XNNPACK=ON \ - -DEXECUTORCH_BUILD_KERNELS_OPTIMIZED=ON \ - -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON \ - -DEXECUTORCH_BUILD_KERNELS_CUSTOM=ON \ + -DCMAKE_BUILD_TYPE=Release \ -Bcmake-android-out/examples/models/llama examples/models/llama cmake --build cmake-android-out/examples/models/llama -j4 --config Release diff --git a/.ci/scripts/test_llama.sh b/.ci/scripts/test_llama.sh index 8e8ba898c48..4ed5ec308c5 100644 --- a/.ci/scripts/test_llama.sh +++ b/.ci/scripts/test_llama.sh @@ -152,21 +152,11 @@ which "${PYTHON_EXECUTABLE}" cmake_install_executorch_libraries() { echo "Installing libexecutorch.a, libextension_module.so, libportable_ops_lib.a" rm -rf cmake-out - retry cmake \ + retry cmake --preset llm \ -DCMAKE_INSTALL_PREFIX=cmake-out \ -DCMAKE_BUILD_TYPE="$CMAKE_BUILD_TYPE" \ - -DEXECUTORCH_BUILD_EXTENSION_DATA_LOADER=ON \ - -DEXECUTORCH_BUILD_EXTENSION_MODULE=ON \ - -DEXECUTORCH_BUILD_EXTENSION_TENSOR=ON \ - -DEXECUTORCH_BUILD_KERNELS_CUSTOM="$CUSTOM" \ - -DEXECUTORCH_BUILD_KERNELS_OPTIMIZED=ON \ - -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON \ - -DEXECUTORCH_BUILD_XNNPACK="$XNNPACK" \ - -DEXECUTORCH_BUILD_MPS="$MPS" \ - -DEXECUTORCH_BUILD_COREML="$COREML" \ -DEXECUTORCH_BUILD_QNN="$QNN" \ -DQNN_SDK_ROOT="$QNN_SDK_ROOT" \ - -DPYTHON_EXECUTABLE="$PYTHON_EXECUTABLE" \ -Bcmake-out . cmake --build cmake-out -j9 --target install --config "$CMAKE_BUILD_TYPE" } @@ -181,10 +171,6 @@ cmake_build_llama_runner() { retry cmake \ -DCMAKE_INSTALL_PREFIX=cmake-out \ -DCMAKE_BUILD_TYPE="$CMAKE_BUILD_TYPE" \ - -DEXECUTORCH_BUILD_KERNELS_CUSTOM="$CUSTOM" \ - -DEXECUTORCH_BUILD_KERNELS_OPTIMIZED=ON \ - -DEXECUTORCH_BUILD_XNNPACK="$XNNPACK" \ - -DPYTHON_EXECUTABLE="$PYTHON_EXECUTABLE" \ -Bcmake-out/${dir} \ ${dir} cmake --build cmake-out/${dir} -j9 --config "$CMAKE_BUILD_TYPE" diff --git a/.github/workflows/build-presets.yml b/.github/workflows/build-presets.yml index 168abc4a241..59e9438dd07 100644 --- a/.github/workflows/build-presets.yml +++ b/.github/workflows/build-presets.yml @@ -20,7 +20,7 @@ jobs: strategy: fail-fast: false matrix: - preset: [macos-arm64, pybind] + preset: [macos-arm64, pybind, llm] with: job-name: build ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} @@ -39,7 +39,7 @@ jobs: strategy: fail-fast: false matrix: - preset: [pybind] + preset: [pybind, llm] runner: [linux.2xlarge, linux.arm64.2xlarge] docker-image: [executorch-ubuntu-22.04-clang12, executorch-ubuntu-22.04-gcc11-aarch64] # Excluding specific runner + docker image combinations that don't make sense: diff --git a/CMakePresets.json b/CMakePresets.json index 38630690958..ff30558d268 100644 --- a/CMakePresets.json +++ b/CMakePresets.json @@ -36,6 +36,26 @@ "string": "${hostSystemName}", "list": ["Darwin", "Linux", "Windows"] } + }, + { + "name": "llm", + "displayName": "Build LLM libraries", + "inherits": [ + "common" + ], + "cacheVariables": { + "EXECUTORCH_BUILD_PRESET_FILE": "${sourceDir}/tools/cmake/preset/llm.cmake", + "CMAKE_OSX_DEPLOYMENT_TARGET": "10.15" + }, + "condition": { + "type": "inList", + "string": "${hostSystemName}", + "list": [ + "Darwin", + "Linux", + "Windows" + ] + } } ] } diff --git a/examples/models/llama/CMakeLists.txt b/examples/models/llama/CMakeLists.txt index 4ee6f435f9e..62e589ba900 100644 --- a/examples/models/llama/CMakeLists.txt +++ b/examples/models/llama/CMakeLists.txt @@ -88,7 +88,7 @@ add_subdirectory(runner) set(link_libraries executorch gflags) set(_srcs main.cpp) -if(EXECUTORCH_BUILD_KERNELS_OPTIMIZED) +if(TARGET optimized_native_cpu_ops_lib) list( APPEND link_libraries @@ -108,7 +108,7 @@ endif() target_link_options_shared_lib(quantized_ops_lib) list(APPEND link_libraries quantized_kernels quantized_ops_lib) -if(EXECUTORCH_BUILD_KERNELS_CUSTOM) +if(TARGET custom_ops) target_link_options_shared_lib(custom_ops) list(APPEND link_libraries custom_ops) endif() diff --git a/tools/cmake/preset/llm.cmake b/tools/cmake/preset/llm.cmake new file mode 100644 index 00000000000..da1364eb2ad --- /dev/null +++ b/tools/cmake/preset/llm.cmake @@ -0,0 +1,31 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# Enable logging even when in release mode. We are building for desktop, where +# saving a few kB is less important than showing useful error information to +# users. +# keep sorted +set_overridable_option(EXECUTORCH_BUILD_EXTENSION_DATA_LOADER ON) +set_overridable_option(EXECUTORCH_BUILD_EXTENSION_FLAT_TENSOR ON) +set_overridable_option(EXECUTORCH_BUILD_EXTENSION_MODULE ON) +set_overridable_option(EXECUTORCH_BUILD_EXTENSION_TENSOR ON) +set_overridable_option(EXECUTORCH_BUILD_KERNELS_CUSTOM ON) +set_overridable_option(EXECUTORCH_BUILD_KERNELS_OPTIMIZED ON) +set_overridable_option(EXECUTORCH_BUILD_KERNELS_QUANTIZED ON) +set_overridable_option(EXECUTORCH_BUILD_XNNPACK ON) + +if(CMAKE_SYSTEM_NAME STREQUAL "Darwin") + set_overridable_option(EXECUTORCH_BUILD_COREML ON) + set_overridable_option(EXECUTORCH_BUILD_MPS ON) +elseif(CMAKE_SYSTEM_NAME STREQUAL "Linux") + # Linux-specific code here +elseif(CMAKE_SYSTEM_NAME STREQUAL "Windows" OR CMAKE_SYSTEM_NAME STREQUAL "WIN32") + # Windows or other OS-specific code here +elseif(CMAKE_SYSTEM_NAME STREQUAL "Android") + # Android-specific code here +else() + message(FATAL_ERROR "Unsupported CMAKE_SYSTEM_NAME for LLM: ${CMAKE_SYSTEM_NAME}") +endif()