diff --git a/.github/scripts/build_jetson_6.1.sh b/.github/scripts/build_jetson_6.1.sh new file mode 100755 index 0000000000..a4fb365630 --- /dev/null +++ b/.github/scripts/build_jetson_6.1.sh @@ -0,0 +1,61 @@ +#!/usr/bin/env bash + +# how to run the jetson build on jetpack6.1: +# ./.github/scripts/build_jetson_6.1.sh + +set -euxo pipefail + +# get jetpack version: eg: Version: 6.1+b123 ---> 6.1 +jetpack_version=$(apt show nvidia-jetpack 2>/dev/null | grep Version: | cut -d ' ' -f 2 | cut -d '+' -f 1) +python_version=$(python --version) +cuda_version=$(nvcc --version | grep Cuda | grep release | cut -d ',' -f 2 | sed -e 's/ release //g') +echo "Current jetpack_version: ${jetpack_version} cuda_version: ${cuda_version} python_version: ${python_version} " + +export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/lib/aarch64-linux-gnu:/usr/include/aarch64-linux-gnu:/usr/local/cuda-${cuda_version}/lib64 + +# make sure nvidia-jetpack dev package is installed: +# go to /usr/include/aarch64-linux-gnu/ if you can see NvInfer.h(tensorrt related header files) which means dev package is installed +# if not installed, install via the below cmd: +# sudo apt update +# sudo apt install nvidia-jetpack + +# make sure cuda is installed: +# nvcc --version or go to /usr/local/cuda/bin to see whether it is installed +# the install nvidia-jetpack dev package step will automatically install the cuda toolkit +# if not installed, install via the below cmd: +# sudo apt update +# sudo apt install cuda-toolkit-12-6 + +# make sure bazel is installed via the below cmd: +# wget -v https://github.com/bazelbuild/bazelisk/releases/download/v1.20.0/bazelisk-linux-arm64 +# sudo mv bazelisk-linux-arm64 /usr/bin/bazel +# chmod +x /usr/bin/bazel + +# make sure pip is installed: +# sudo apt install python3-pip + +# make sure setuptools is installed with the version < 71.*.* +# version 71.*.* will give the following error during build +# TypeError: canonicalize_version() got an unexpected keyword argument 'strip_trailing_zero' +# python -m pip install setuptools==70.2.0 + +# make sure torch is installed via the below cmd: +# wget https://developer.download.nvidia.cn/compute/redist/jp/v61/pytorch/torch-2.5.0a0+872d972e41.nv24.08.17622132-cp310-cp310-linux_aarch64.whl +# python -m pip install torch-2.5.0a0+872d972e41.nv24.08.17622132-cp310-cp310-linux_aarch64.whl + +# make sure libcusparseLt.so exists if not download and copy via the below cmd: +# wget https://developer.download.nvidia.com/compute/cusparselt/redist/libcusparse_lt/linux-sbsa/libcusparse_lt-linux-sbsa-0.5.2.1-archive.tar.xz +# tar xf libcusparse_lt-linux-sbsa-0.5.2.1-archive.tar.xz +# sudo cp -a libcusparse_lt-linux-sbsa-0.5.2.1-archive/include/* /usr/local/cuda/include/ +# sudo cp -a libcusparse_lt-linux-sbsa-0.5.2.1-archive/lib/* /usr/local/cuda/lib64/ + +export TORCH_INSTALL_PATH=$(python -c "import torch, os; print(os.path.dirname(torch.__file__))") +export SITE_PACKAGE_PATH=${TORCH_INSTALL_PATH::-6} +export CUDA_HOME=/usr/local/cuda-${cuda_version}/ + +# replace the Module file with jetpack one +cat toolchains/jp_workspaces/MODULE.bazel.tmpl | envsubst > MODULE.bazel + +# build on jetpack +python setup.py --use-cxx11-abi install --user + diff --git a/setup.py b/setup.py index de532d9071..0b8f47fb6f 100644 --- a/setup.py +++ b/setup.py @@ -156,12 +156,14 @@ def load_dep_info(): JETPACK_VERSION = "4.6" elif version == "5.0": JETPACK_VERSION = "5.0" + elif version == "6.1": + JETPACK_VERSION = "6.1" if not JETPACK_VERSION: warnings.warn( - "Assuming jetpack version to be 5.0, if not use the --jetpack-version option" + "Assuming jetpack version to be 6.1, if not use the --jetpack-version option" ) - JETPACK_VERSION = "5.0" + JETPACK_VERSION = "6.1" if not CXX11_ABI: warnings.warn( @@ -213,12 +215,15 @@ def build_libtorchtrt_pre_cxx11_abi( elif JETPACK_VERSION == "5.0": cmd.append("--platforms=//toolchains:jetpack_5.0") print("Jetpack version: 5.0") + elif JETPACK_VERSION == "6.1": + cmd.append("--platforms=//toolchains:jetpack_6.1") + print("Jetpack version: 6.1") if CI_BUILD: cmd.append("--platforms=//toolchains:ci_rhel_x86_64_linux") print("CI based build") - print("building libtorchtrt") + print(f"building libtorchtrt {cmd=}") status_code = subprocess.run(cmd).returncode if status_code != 0: diff --git a/toolchains/BUILD b/toolchains/BUILD index aa6486d075..a3cd275373 100644 --- a/toolchains/BUILD +++ b/toolchains/BUILD @@ -35,6 +35,15 @@ platform( ], ) +platform( + name = "jetpack_6.1", + constraint_values = [ + "@platforms//os:linux", + "@platforms//cpu:aarch64", + "@//toolchains/jetpack:6.1", + ], +) + platform( name = "ci_rhel_x86_64_linux", constraint_values = [ diff --git a/toolchains/jetpack/BUILD b/toolchains/jetpack/BUILD index fa5ddd2c87..ed37a864b6 100644 --- a/toolchains/jetpack/BUILD +++ b/toolchains/jetpack/BUILD @@ -11,3 +11,8 @@ constraint_value( name = "4.6", constraint_setting = ":jetpack", ) + +constraint_value( + name = "6.1", + constraint_setting = ":jetpack", +) \ No newline at end of file diff --git a/toolchains/jp_workspaces/MODULE.bazel.tmpl b/toolchains/jp_workspaces/MODULE.bazel.tmpl new file mode 100644 index 0000000000..dd694b91d2 --- /dev/null +++ b/toolchains/jp_workspaces/MODULE.bazel.tmpl @@ -0,0 +1,61 @@ +module( + name = "torch_tensorrt", + repo_name = "org_pytorch_tensorrt", + version = "${BUILD_VERSION}" +) + +bazel_dep(name = "googletest", version = "1.14.0") +bazel_dep(name = "platforms", version = "0.0.10") +bazel_dep(name = "rules_cc", version = "0.0.9") +bazel_dep(name = "rules_python", version = "0.34.0") + +python = use_extension("@rules_python//python/extensions:python.bzl", "python") +python.toolchain( + ignore_root_user_error = True, + python_version = "3.11", +) + +bazel_dep(name = "rules_pkg", version = "1.0.1") +git_override( + module_name = "rules_pkg", + commit = "17c57f4", + remote = "https://github.com/narendasan/rules_pkg", +) + +local_repository = use_repo_rule("@bazel_tools//tools/build_defs/repo:local.bzl", "local_repository") + +# External dependency for torch_tensorrt if you already have precompiled binaries. +local_repository( + name = "torch_tensorrt", + path = "${SITE_PACKAGE_PATH}/torch_tensorrt", +) + + +new_local_repository = use_repo_rule("@bazel_tools//tools/build_defs/repo:local.bzl", "new_local_repository") + +# CUDA should be installed on the system locally +new_local_repository( + name = "cuda", + build_file = "@//third_party/cuda:BUILD", + path = "${CUDA_HOME}", +) + +new_local_repository( + name = "libtorch", + path = "${TORCH_INSTALL_PATH}", + build_file = "third_party/libtorch/BUILD", +) + +new_local_repository( + name = "libtorch_pre_cxx11_abi", + path = "${TORCH_INSTALL_PATH}", + build_file = "third_party/libtorch/BUILD" +) + +new_local_repository( + name = "tensorrt", + path = "/usr/", + build_file = "@//third_party/tensorrt/local:BUILD" +) + + \ No newline at end of file