Skip to content
This repository was archived by the owner on Sep 10, 2025. It is now read-only.

Commit ab61f0f

Browse files
committed
update install
1 parent c0530d0 commit ab61f0f

File tree

3 files changed

+28
-12
lines changed

3 files changed

+28
-12
lines changed

install/install_requirements.sh

Lines changed: 27 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -53,16 +53,16 @@ echo "Using pip executable: $PIP_EXECUTABLE"
5353
# package versions.
5454
if [[ -x "$(command -v xpu-smi)" ]];
5555
then
56-
PYTORCH_NIGHTLY_VERSION=dev20241217
56+
PYTORCH_NIGHTLY_VERSION=dev20250110
5757
else
5858
PYTORCH_NIGHTLY_VERSION=dev20241218
5959
fi
6060

6161
# Nightly version for torchvision
62-
VISION_NIGHTLY_VERSION=dev20241218
62+
VISION_NIGHTLY_VERSION=dev20250111
6363

6464
# Nightly version for torchtune
65-
TUNE_NIGHTLY_VERSION=dev20241218
65+
TUNE_NIGHTLY_VERSION=dev20250105
6666

6767
# Uninstall triton, as nightly will depend on pytorch-triton, which is one and the same
6868
(
@@ -90,9 +90,9 @@ fi
9090
if [[ -x "$(command -v xpu-smi)" ]];
9191
then
9292
REQUIREMENTS_TO_INSTALL=(
93-
torch=="2.6.0.${PYTORCH_NIGHTLY_VERSION}"
93+
torch=="2.7.0.${PYTORCH_NIGHTLY_VERSION}"
9494
torchvision=="0.22.0.${VISION_NIGHTLY_VERSION}"
95-
torchtune=="0.4.0"
95+
# torchtune=="0.4.0"
9696
)
9797
else
9898
REQUIREMENTS_TO_INSTALL=(
@@ -122,10 +122,28 @@ fi
122122

123123
# For torchao need to install from github since nightly build doesn't have macos build.
124124
# TODO: Remove this and install nightly build, once it supports macos
125-
(
126-
set -x
127-
$PIP_EXECUTABLE install git+https://github.com/pytorch/ao.git@2f97b0955953fa1a46594a27f0df2bc48d93e79d
128-
)
125+
if [[ -x "$(command -v xpu-smi)" ]];
126+
then
127+
# install torchao nightly for xpu
128+
(
129+
set -x
130+
$PIP_EXECUTABLE install --extra-index-url "${TORCH_NIGHTLY_URL}" torchao=="0.8.0.dev20250110"
131+
)
132+
else
133+
(
134+
set -x
135+
$PIP_EXECUTABLE install git+https://github.com/pytorch/ao.git@2f97b0955953fa1a46594a27f0df2bc48d93e79d
136+
)
137+
fi
138+
139+
# install torchtune from source for xpu
140+
if [[ -x "$(command -v xpu-smi)" ]];
141+
then
142+
(
143+
set -x
144+
$PIP_EXECUTABLE install git+https://github.com/pytorch/torchtune
145+
)
146+
fi
129147

130148
if [[ -x "$(command -v nvidia-smi)" ]]; then
131149
(
@@ -134,7 +152,6 @@ if [[ -x "$(command -v nvidia-smi)" ]]; then
134152
)
135153
fi
136154

137-
138155
(
139156
set -x
140157
$PIP_EXECUTABLE install evaluate=="0.4.3" lm-eval=="0.4.2" psutil=="6.0.0"

torchchat/generate.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1168,7 +1168,6 @@ def callback(x, *, done_generating=False):
11681168
else:
11691169
torch.profiler._utils._init_for_cuda_graphs()
11701170
prof = torch.profiler.profile()
1171-
print("prof is: ", prof)
11721171
t0 = time.perf_counter()
11731172
num_tokens_generated = 0
11741173
with prof:

torchchat/utils/quantize.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -111,7 +111,7 @@ def quantize_model(
111111
raise RuntimeError(f"unknown quantizer {quantizer} specified")
112112
else:
113113
# Use tensor subclass API for int4 weight only.
114-
if device == "cuda" and quantizer == "linear:int4":
114+
if (device == "cuda" or device == "xpu") and quantizer == "linear:int4":
115115
quantize_(model, int4_weight_only(q_kwargs["groupsize"]))
116116
if not support_tensor_subclass:
117117
unwrap_tensor_subclass(model)

0 commit comments

Comments
 (0)