This repository was archived by the owner on Sep 4, 2025. It is now read-only.
File tree Expand file tree Collapse file tree 2 files changed +3
-2
lines changed
docs/source/getting_started Expand file tree Collapse file tree 2 files changed +3
-2
lines changed Original file line number Diff line number Diff line change @@ -59,7 +59,7 @@ First, install the dependencies:
59
59
$ export DATE="20240828"
60
60
$ export TORCH_VERSION="2.5.0"
61
61
$ pip install https://storage.googleapis.com/pytorch-xla-releases/wheels/tpuvm/torch-${TORCH_VERSION}.dev${DATE}-cp310-cp310-linux_x86_64.whl
62
- $ pip3 install https://storage.googleapis.com/pytorch-xla-releases/wheels/tpuvm/torch_xla-${TORCH_VERSION}.dev${DATE}-cp310-cp310-linux_x86_64.whl
62
+ $ pip install https://storage.googleapis.com/pytorch-xla-releases/wheels/tpuvm/torch_xla-${TORCH_VERSION}.dev${DATE}-cp310-cp310-linux_x86_64.whl
63
63
64
64
$ # Install JAX and Pallas.
65
65
$ pip install torch_xla[tpu] -f https://storage.googleapis.com/libtpu-releases/index.html
Original file line number Diff line number Diff line change @@ -102,8 +102,9 @@ def init_device(self) -> None:
102
102
# NOTE(woosuk): Set per-rank cache path since different ranks
103
103
# can have slightly different XLA graphs.
104
104
world_size = self .parallel_config .world_size
105
+ rank = xr .global_ordinal ()
105
106
per_rank_path = os .path .join (envs .VLLM_XLA_CACHE_PATH ,
106
- f"tp{ world_size } _rank{ self . rank } " )
107
+ f"tp{ world_size } _rank{ rank } " )
107
108
xr .initialize_cache (per_rank_path , readonly = False )
108
109
109
110
def load_model (self ):
You can’t perform that action at this time.
0 commit comments