Skip to content
This repository was archived by the owner on Sep 10, 2025. It is now read-only.

Commit 7053a60

Browse files
authored
Merge branch 'pytorch:main' into patch-42
2 parents cedb645 + 30372b6 commit 7053a60

File tree

5 files changed

+16
-29
lines changed

5 files changed

+16
-29
lines changed

install/install_requirements.sh

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -51,13 +51,13 @@ echo "Using pip executable: $PIP_EXECUTABLE"
5151
# NOTE: If a newly-fetched version of the executorch repo changes the value of
5252
# PYTORCH_NIGHTLY_VERSION, you should re-run this script to install the necessary
5353
# package versions.
54-
PYTORCH_NIGHTLY_VERSION=dev20250119
54+
PYTORCH_NIGHTLY_VERSION=dev20250124
5555

5656
# Nightly version for torchvision
57-
VISION_NIGHTLY_VERSION=dev20250119
57+
VISION_NIGHTLY_VERSION=dev20250124
5858

5959
# Nightly version for torchtune
60-
TUNE_NIGHTLY_VERSION=dev20250119
60+
TUNE_NIGHTLY_VERSION=dev20250124
6161

6262
# The pip repository that hosts nightly torch packages. cpu by default.
6363
# If cuda is available, based on presence of nvidia-smi, install the pytorch nightly

install/requirements.txt

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,8 +16,7 @@ jinja2
1616
# Miscellaneous
1717
snakeviz
1818
sentencepiece
19-
# numpy version range required by GGUF util
20-
numpy >= 1.17, < 2.0
19+
numpy >= 1.17
2120
blobfile
2221
tomli >= 1.1.0 ; python_version < "3.11"
2322
openai

torchchat/cli/builder.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -589,9 +589,8 @@ def do_nothing(max_batch_size, max_seq_length):
589589
# attributes will NOT be seen on by AOTI-compiled forward
590590
# function, e.g. calling model.setup_cache will NOT touch
591591
# AOTI compiled and maintained model buffers such as kv_cache.
592-
from torch._inductor.package import load_package
593592

594-
aoti_compiled_model = load_package(
593+
aoti_compiled_model = torch._inductor.aoti_load_package(
595594
str(builder_args.aoti_package_path.absolute())
596595
)
597596

torchchat/export.py

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -75,17 +75,20 @@ def export_for_server(
7575
if not package:
7676
options = {"aot_inductor.output_path": output_path}
7777

78-
path = torch._export.aot_compile(
78+
ep = torch.export.export(
7979
model,
8080
example_inputs,
8181
dynamic_shapes=dynamic_shapes,
82-
options=options,
8382
)
8483

8584
if package:
86-
from torch._inductor.package import package_aoti
87-
88-
path = package_aoti(output_path, path)
85+
path = torch._inductor.aoti_compile_and_package(
86+
ep, package_path=output_path, inductor_configs=options
87+
)
88+
else:
89+
path = torch._inductor.aot_compile(
90+
ep.module(), example_inputs, options=options
91+
)
8992

9093
print(f"The generated packaged model can be found at: {path}")
9194
return path

torchchat/model.py

Lines changed: 3 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -657,7 +657,7 @@ def __init__(self, config: TransformerArgs) -> None:
657657
self.layers[str(layer_id)] = TransformerBlock(config)
658658

659659
if config.stage_idx == config.n_stages - 1:
660-
self.norm = RMSNorm(config.dim, eps=config.norm_eps)
660+
self.norm = nn.RMSNorm(config.dim, eps=config.norm_eps)
661661
self.output = nn.Linear(config.dim, config.vocab_size, bias=False)
662662
if config.tie_word_embeddings:
663663
self.output.weight = self.tok_embeddings.weight
@@ -751,8 +751,8 @@ def __init__(self, config: TransformerArgs) -> None:
751751
super().__init__()
752752
self.attention = Attention(config)
753753
self.feed_forward = FeedForward(config)
754-
self.ffn_norm = RMSNorm(config.dim, config.norm_eps)
755-
self.attention_norm = RMSNorm(config.dim, config.norm_eps)
754+
self.ffn_norm = nn.RMSNorm(config.dim, config.norm_eps)
755+
self.attention_norm = nn.RMSNorm(config.dim, config.norm_eps)
756756
# None for llama architecture, set for granite architectures
757757
self.residual_multiplier = (
758758
config.residual_multiplier
@@ -928,20 +928,6 @@ def forward(self, x: Tensor) -> Tensor:
928928
return self.w2(F.silu(self.w1(x)) * self.w3(x))
929929

930930

931-
class RMSNorm(nn.Module):
932-
def __init__(self, dim: int, eps: float = 1e-5):
933-
super().__init__()
934-
self.eps = eps
935-
self.weight = nn.Parameter(torch.ones(dim))
936-
937-
def _norm(self, x):
938-
return x * torch.rsqrt(torch.mean(x * x, dim=-1, keepdim=True) + self.eps)
939-
940-
def forward(self, x: Tensor) -> Tensor:
941-
output = self._norm(x.float()).type_as(x)
942-
return output * self.weight
943-
944-
945931
def apply_scaling(freqs: torch.Tensor, rope_scaling: Dict[str, Any]):
946932
# Check for the presence of the required keys
947933
required_keys = {

0 commit comments

Comments
 (0)