We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent e6f3056 commit daa23eaCopy full SHA for daa23ea
src/lightning/fabric/accelerators/xla.py
@@ -107,7 +107,7 @@ def register_accelerators(cls, accelerator_registry: _AcceleratorRegistry) -> No
107
108
def _using_pjrt() -> bool:
109
# `using_pjrt` is removed in torch_xla 2.5
110
- if _XLA_GREATER_EQUAL_2_5:
+ if True:
111
from torch_xla import runtime as xr
112
113
return xr.device_type() is not None
@@ -117,6 +117,10 @@ def _using_pjrt() -> bool:
117
118
return xr.using_pjrt()
119
120
+ from torch_xla.experimental import pjrt
121
+
122
+ return pjrt.using_pjrt()
123
124
125
def _parse_tpu_devices(devices: Union[int, str, list[int]]) -> Union[int, list[int]]:
126
"""Parses the TPU devices given in the format as accepted by the
0 commit comments