Skip to content

Commit aada9fc

Browse files
authored
Remove XLA_CUDA and other CUDA build flags. (#9582)
This PR removes the uses and mentions to `XLA_CUDA` and `TF_CUDA_COMPUTE_CAPABILITIES` flags. They are related to the now deprecated CUDA build. This PR also removes **Key Changes:** - (_.bazelrc_) Removed CUDA bazel configuration - (_build_util.py_) Removed the translation of `XLA_CUDA` environment variable to `--config=cuda` bazel argument - Removed uses of `XLA_CUDA` and `TF_CUDA_COMPUTE_CAPABILITIES` throughout the codebase - Removed some logic for compiling PyTorch/XLA with CUDA support
1 parent 49ac22a commit aada9fc

File tree

12 files changed

+6
-73
lines changed

12 files changed

+6
-73
lines changed

.bazelrc

Lines changed: 0 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -79,18 +79,6 @@ build:native_arch_posix --host_copt=-march=native
7979

8080
build:mkl_open_source_only --define=tensorflow_mkldnn_contraction_kernel=1
8181

82-
build:cuda --repo_env TF_NEED_CUDA=1
83-
# "sm" means we emit only cubin, which is forward compatible within a GPU generation.
84-
# "compute" means we emit both cubin and PTX, which is larger but also forward compatible to future GPU generations.
85-
build:cuda --crosstool_top=@local_config_cuda//crosstool:toolchain
86-
build:cuda --@local_config_cuda//:enable_cuda
87-
build:cuda --define=xla_python_enable_gpu=true
88-
build:cuda --cxxopt=-DXLA_CUDA=1
89-
90-
# Coverage with cuda/gcc/nvcc requires manually setting coverage flags.
91-
coverage:cuda --per_file_copt=third_party/.*,torch_xla/.*@--coverage
92-
coverage:cuda --linkopt=-lgcov
93-
9482
build:acl --define==build_with_acl=true
9583

9684
build:nonccl --define=no_nccl_support=true
@@ -105,7 +93,6 @@ build:tpu --define=with_tpu_support=true
10593

10694
# Run tests serially with TPU and GPU (only 1 device is available).
10795
test:tpu --local_test_jobs=1
108-
test:cuda --local_test_jobs=1
10996

11097
#########################################################################
11198
# RBE config options below.

.circleci/build.sh

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,6 @@ source $XLA_DIR/xla_env
5050
export GCLOUD_SERVICE_KEY_FILE="$XLA_DIR/default_credentials.json"
5151
export SILO_NAME='cache-silo-ci-dev-3.8_cuda_12.1' # cache bucket for CI
5252
export BUILD_CPP_TESTS='1'
53-
export TF_CUDA_COMPUTE_CAPABILITIES="sm_50,sm_70,sm_75,compute_80,$TF_CUDA_COMPUTE_CAPABILITIES"
5453
build_torch_xla $XLA_DIR
5554

5655
popd

.github/upstream/Dockerfile

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -15,11 +15,6 @@ ARG tpuvm=""
1515
# Disable CUDA for PyTorch
1616
ENV USE_CUDA "0"
1717

18-
# Enable CUDA for XLA
19-
ENV XLA_CUDA "${cuda}"
20-
ENV TF_CUDA_COMPUTE_CAPABILITIES "${cuda_compute}"
21-
ENV TF_CUDA_PATHS "/usr/local/cuda,/usr/include,/usr"
22-
2318
# CUDA build guidance
2419
ENV NVIDIA_VISIBLE_DEVICES all
2520
ENV NVIDIA_DRIVER_CAPABILITIES compute,utility

benchmarks/nightly.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -99,7 +99,7 @@ if [[ ${IS_FRESH_RUN?} ]]; then
9999
# Query local compute capability. If that fails, assign a sane default.
100100
LOCAL_CAP=compute_$(nvidia-smi --query-gpu=compute_cap --format=csv | \
101101
tail -1 | sed 's/\.//g' | grep -E '^[0-9]{2}$' || echo '80')
102-
XLA_CUDA=1 TF_CUDA_COMPUTE_CAPABILITIES=${LOCAL_CAP:?} python setup.py develop
102+
python setup.py develop
103103
cd ../..
104104

105105
# Set up torchbench deps.

build_util.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -43,8 +43,6 @@ def bazel_options_from_env() -> Iterable[str]:
4343
# Build configuration.
4444
if check_env_flag('BAZEL_VERBOSE'):
4545
bazel_flags.append('-s')
46-
if check_env_flag('XLA_CUDA'):
47-
bazel_flags.append('--config=cuda')
4846
if check_env_flag('XLA_CPU_USE_ACL'):
4947
bazel_flags.append('--config=acl')
5048

configuration.yaml

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ variables:
44
PJRT_DEVICE:
55
description:
66
- Indicates which device is being used with PJRT. It can be either CPU,
7-
TPU, or CUDA
7+
or TPU
88
type: string
99
PJRT_SELECT_DEFAULT_DEVICE:
1010
description:
@@ -36,11 +36,6 @@ variables:
3636
- Verbosity level for GRPC, e.g. INFO, ERROR, etc.
3737
type: string
3838
default_value: "ERROR"
39-
XLA_CUDA:
40-
description:
41-
- Build the xla client with CUDA enabled.
42-
type: bool
43-
default_value: false
4439
GIT_VERSIONED_XLA_BUILD:
4540
description:
4641
- Creates a versioned build. In particular, appends a git sha to the

docker/Dockerfile

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -29,10 +29,6 @@ RUN git clone https://github.com/pytorch/pytorch
2929
ENV USE_CUDA "0"
3030
ENV USE_MPI "0"
3131

32-
# Enable CUDA for XLA
33-
ENV XLA_CUDA "${cuda}"
34-
ENV TF_CUDA_COMPUTE_CAPABILITIES "${cuda_compute}"
35-
3632
# Whether to build for TPUVM mode
3733
ENV TPUVM_MODE "${tpuvm}"
3834
ENV BUNDLE_LIBTPU "${tpuvm}"

infra/ansible/config/env.yaml

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -13,10 +13,6 @@ release_env:
1313
ACCELERATOR: tpu
1414
TPUVM_MODE: 1
1515

16-
cuda:
17-
TF_CUDA_COMPUTE_CAPABILITIES: "{{ cuda_compute_capabilities }}"
18-
XLA_CUDA: 1
19-
2016
# Variables that will be passed to shell environment only for building PyTorch and XLA libs.
2117
build_env:
2218
common:
@@ -41,10 +37,6 @@ build_env:
4137

4238
aarch64:
4339

44-
cuda:
45-
TF_CUDA_COMPUTE_CAPABILITIES: "{{ cuda_compute_capabilities }}"
46-
XLA_CUDA: 1
47-
4840
tpu:
4941
ACCELERATOR: tpu
5042
TPUVM_MODE: 1

scripts/build_torch_wheels.sh

Lines changed: 0 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -56,28 +56,6 @@ function install_cudnn {
5656
rm -f "$CUDNN_FILE"
5757
}
5858

59-
function maybe_install_cuda {
60-
if [ "$XLA_CUDA" == "1" ]; then
61-
if [ ! -d "/usr/local/cuda" ]; then
62-
local CUDA_VER="10.2"
63-
local CUDA_SUBVER="89_440.33.01"
64-
local CUDA_FILE="cuda_${CUDA_VER}.${CUDA_SUBVER}_linux.run"
65-
wget "http://developer.download.nvidia.com/compute/cuda/${CUDA_VER}/Prod/local_installers/${CUDA_FILE}"
66-
sudo sh "${CUDA_FILE}" --silent --toolkit
67-
rm -f "${CUDA_FILE}"
68-
fi
69-
if [ ! -f "/usr/local/cuda/include/cudnn.h" ] && [ ! -f "/usr/include/cudnn.h" ]; then
70-
install_cudnn
71-
fi
72-
export TF_CUDA_PATHS="/usr/local/cuda,/usr/include,/usr"
73-
maybe_append 'export TF_CUDA_PATHS="/usr/local/cuda,/usr/include,/usr"' ~/.bashrc
74-
if [ "$TF_CUDA_COMPUTE_CAPABILITIES" == "" ]; then
75-
export TF_CUDA_COMPUTE_CAPABILITIES="7.0"
76-
fi
77-
maybe_append "export TF_CUDA_COMPUTE_CAPABILITIES=\"$TF_CUDA_COMPUTE_CAPABILITIES\"" ~/.bashrc
78-
fi
79-
}
80-
8159
function maybe_install_sources {
8260
if [[ $(uname -m) == "aarch64" && ! -d "$HOME/ComputeLibrary" ]]; then
8361
# install arm compute library
@@ -148,7 +126,6 @@ function install_gcc() {
148126

149127
function install_req_packages() {
150128
sudo apt-get -y install python3-pip git curl libopenblas-dev vim apt-transport-https ca-certificates wget procps
151-
maybe_install_cuda
152129
install_bazel
153130
install_ninja
154131
}

setup.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -19,9 +19,6 @@
1919
# BAZEL_VERBOSE=0
2020
# turn on verbose messages during the bazel build of the xla/xrt client
2121
#
22-
# XLA_CUDA=0
23-
# build the xla/xrt client with CUDA enabled
24-
#
2522
# XLA_CPU_USE_ACL=0
2623
# whether to use ACL
2724
#

0 commit comments

Comments
 (0)