Skip to content

Commit ef5f9ad

Browse files
aws-cphHoomaaan
authored andcommitted
Removing lazy import
1 parent 6c2de4d commit ef5f9ad

File tree

2 files changed

+0
-8
lines changed

2 files changed

+0
-8
lines changed

test/spmd/test_xla_dtensor_spec_conversion.py

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -67,24 +67,17 @@ def test_mesh_conversion(self):
6767

6868
def test_spec_caching(self):
6969
"""Test that _spec property caches results
70-
71-
Addresses PR comment: "These sorts of tests that rely on the wall clock often lead to
72-
annoying flakes in my experience. I think it's sufficient to just test that
73-
self._cached_spec has a permanent value after the first call."
7470
"""
7571
device_count = xr.global_runtime_device_count()
7672
mesh = DeviceMesh("xla", list(range(device_count)))
7773
tensor = torch.randn(100, 100)
7874
xla_tensor = distribute_tensor(tensor, mesh, [Shard(0)])
7975

80-
# First access should create and cache the spec
8176
spec1 = xla_tensor._spec
8277

83-
# Verify the spec is cached
8478
assert xla_tensor._cached_spec is not None
8579
assert xla_tensor._cached_spec is spec1
8680

87-
# Second access should return the cached spec
8881
spec2 = xla_tensor._spec
8982
assert spec1 is spec2
9083

torch_xla/distributed/spmd/xla_sharded_tensor.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -200,7 +200,6 @@ def _spec(self):
200200

201201
# use existing mesh_shape
202202
if self.mesh_shape is not None:
203-
import torch_xla.runtime as xr
204203
device_count = xr.global_runtime_device_count()
205204
device_list = list(range(device_count))
206205
mesh = DeviceMesh("xla",

0 commit comments

Comments
 (0)