Skip to content

Commit 3aaae84

Browse files
authored
Merge branch 'master' into toLocal_wspec
2 parents b784f0c + e9a1c5f commit 3aaae84

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

63 files changed

+852
-973
lines changed

.bazelrc

Lines changed: 0 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -79,18 +79,6 @@ build:native_arch_posix --host_copt=-march=native
7979

8080
build:mkl_open_source_only --define=tensorflow_mkldnn_contraction_kernel=1
8181

82-
build:cuda --repo_env TF_NEED_CUDA=1
83-
# "sm" means we emit only cubin, which is forward compatible within a GPU generation.
84-
# "compute" means we emit both cubin and PTX, which is larger but also forward compatible to future GPU generations.
85-
build:cuda --crosstool_top=@local_config_cuda//crosstool:toolchain
86-
build:cuda --@local_config_cuda//:enable_cuda
87-
build:cuda --define=xla_python_enable_gpu=true
88-
build:cuda --cxxopt=-DXLA_CUDA=1
89-
90-
# Coverage with cuda/gcc/nvcc requires manually setting coverage flags.
91-
coverage:cuda --per_file_copt=third_party/.*,torch_xla/.*@--coverage
92-
coverage:cuda --linkopt=-lgcov
93-
9482
build:acl --define==build_with_acl=true
9583

9684
build:nonccl --define=no_nccl_support=true
@@ -105,7 +93,6 @@ build:tpu --define=with_tpu_support=true
10593

10694
# Run tests serially with TPU and GPU (only 1 device is available).
10795
test:tpu --local_test_jobs=1
108-
test:cuda --local_test_jobs=1
10996

11097
#########################################################################
11198
# RBE config options below.

.circleci/build.sh

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,6 @@ source $XLA_DIR/xla_env
5050
export GCLOUD_SERVICE_KEY_FILE="$XLA_DIR/default_credentials.json"
5151
export SILO_NAME='cache-silo-ci-dev-3.8_cuda_12.1' # cache bucket for CI
5252
export BUILD_CPP_TESTS='1'
53-
export TF_CUDA_COMPUTE_CAPABILITIES="sm_50,sm_70,sm_75,compute_80,$TF_CUDA_COMPUTE_CAPABILITIES"
5453
build_torch_xla $XLA_DIR
5554

5655
popd

.circleci/common.sh

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,8 @@ function build_torch_xla() {
112112
# Need to uncomment the line below.
113113
# Currently it fails upstream XLA CI.
114114
# pip install plugins/cuda -v
115-
pip install 'torch_xla[pallas]'
115+
pip install --pre torch_xla[pallas] --index-url https://us-python.pkg.dev/ml-oss-artifacts-published/jax/simple/ --find-links https://storage.googleapis.com/jax-releases/libtpu_releases.html
116+
116117
popd
117118
}
118119

.github/scripts/run_tests.sh

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,7 @@ function run_torch_xla_cpp_tests() {
5555
"test_tensor"
5656
# disable test_xla_backend_intf since it is flaky on upstream
5757
#"test_xla_backend_intf"
58+
"test_xla_generator"
5859
"test_xla_sharding"
5960
"test_runtime"
6061
"test_status_dont_show_cpp_stacktraces"

.github/upstream/Dockerfile

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -15,11 +15,6 @@ ARG tpuvm=""
1515
# Disable CUDA for PyTorch
1616
ENV USE_CUDA "0"
1717

18-
# Enable CUDA for XLA
19-
ENV XLA_CUDA "${cuda}"
20-
ENV TF_CUDA_COMPUTE_CAPABILITIES "${cuda_compute}"
21-
ENV TF_CUDA_PATHS "/usr/local/cuda,/usr/include,/usr"
22-
2318
# CUDA build guidance
2419
ENV NVIDIA_VISIBLE_DEVICES all
2520
ENV NVIDIA_DRIVER_CAPABILITIES compute,utility

.github/workflows/_test.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -140,7 +140,7 @@ jobs:
140140
set -x
141141
142142
pip install expecttest unittest-xml-reporting
143-
pip install 'torch_xla[pallas]'
143+
pip install --pre 'torch_xla[pallas]' --index-url https://us-python.pkg.dev/ml-oss-artifacts-published/jax/simple/ --find-links https://storage.googleapis.com/jax-releases/libtpu_releases.html
144144
145145
if [[ ! -z "$RUN_BENCHMARK_TESTS" ]]; then
146146
pip install -r pytorch/xla/benchmarks/requirements.txt

.github/workflows/_tpu_ci.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -52,8 +52,8 @@ jobs:
5252
pip install fsspec
5353
pip install rich
5454
# jax and libtpu is needed for pallas tests.
55-
pip install 'torch_xla[pallas]'
56-
pip install 'torch_xla[tpu]' -f https://storage.googleapis.com/libtpu-wheels/index.html -f https://storage.googleapis.com/libtpu-releases/index.html
55+
pip install --pre 'torch_xla[pallas]' --index-url https://us-python.pkg.dev/ml-oss-artifacts-published/jax/simple/ --find-links https://storage.googleapis.com/jax-releases/libtpu_releases.html'
56+
pip install --pre 'torch_xla[tpu]' --index-url https://us-python.pkg.dev/ml-oss-artifacts-published/jax/simple/ --find-links https://storage.googleapis.com/jax-releases/libtpu_releases.html'
5757
pip install --upgrade protobuf
5858
- name: Run Tests (${{ matrix.test_script }})
5959
if: inputs.has_code_changes == 'true'

BUILD

Lines changed: 4 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -46,22 +46,6 @@ cc_binary(
4646
]),
4747
)
4848

49-
cc_binary(
50-
name = "_XLAC_cuda_functions.so",
51-
copts = [
52-
"-fopenmp",
53-
"-fPIC",
54-
],
55-
linkopts = [
56-
"-Wl,-soname,_XLAC_cuda_functions.so",
57-
],
58-
linkshared = 1,
59-
visibility = ["//visibility:public"],
60-
deps = [
61-
"//torch_xla/csrc:aten_cuda_functions",
62-
],
63-
)
64-
6549
test_suite(
6650
name = "cpp_tests",
6751
# testonly = True,
@@ -72,15 +56,16 @@ test_suite(
7256
"//test/cpp:test_aten_xla_tensor_4",
7357
"//test/cpp:test_aten_xla_tensor_5",
7458
"//test/cpp:test_aten_xla_tensor_6",
59+
"//test/cpp:test_debug_macros",
7560
"//test/cpp:test_ir",
7661
"//test/cpp:test_lazy",
7762
"//test/cpp:test_replication",
78-
"//test/cpp:test_tensor",
79-
"//test/cpp:test_xla_sharding",
8063
"//test/cpp:test_runtime",
8164
"//test/cpp:test_status_dont_show_cpp_stacktraces",
8265
"//test/cpp:test_status_show_cpp_stacktraces",
83-
"//test/cpp:test_debug_macros",
66+
"//test/cpp:test_tensor",
67+
"//test/cpp:test_xla_generator",
68+
"//test/cpp:test_xla_sharding",
8469
"//torch_xla/csrc/runtime:pjrt_computation_client_test",
8570
# "//torch_xla/csrc/runtime:ifrt_computation_client_test",
8671
],

CONTRIBUTING.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -162,7 +162,7 @@ commands on your Linux machine directly, outside of the container.
162162
-f https://storage.googleapis.com/libtpu-releases/index.html
163163
164164
# Optional: if you're using custom kernels, install pallas dependencies
165-
pip install torch_xla[pallas]
165+
pip install --pre torch_xla[pallas] --index-url https://us-python.pkg.dev/ml-oss-artifacts-published/jax/simple/ --find-links https://storage.googleapis.com/jax-releases/libtpu_releases.html
166166
```
167167

168168
1. If you are running on a TPU VM, ensure `torch` and `torch_xla` were built and

README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,10 +32,10 @@ Note: Builds are available for Python 3.8 to 3.11; please use one of the support
3232
# - for conda
3333
# conda create -n py311 python=3.11
3434

35-
pip install torch==2.7.0 'torch_xla[tpu]==2.7.0'
35+
pip install torch==2.8.0 'torch_xla[tpu]==2.8.0'
3636

3737
# Optional: if you're using custom kernels, install pallas dependencies
38-
pip install 'torch_xla[pallas]'
38+
pip install --pre torch_xla[pallas] --index-url https://us-python.pkg.dev/ml-oss-artifacts-published/jax/simple/ --find-links https://storage.googleapis.com/jax-releases/libtpu_releases.html
3939
```
4040
**As of 07/16/2025 and starting from Pytorch/XLA 2.8 release, PyTorch/XLA will
4141
provide nightly and release wheels for Python 3.11 to 3.13**

0 commit comments

Comments
 (0)