Skip to content

Commit 3bf9203

Browse files
Update PyTorch pin
Signed-off-by: Whitney Tsang <[email protected]>
1 parent 37b03ae commit 3bf9203

File tree

7 files changed

+127
-172
lines changed

7 files changed

+127
-172
lines changed

.github/pins/pytorch.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
3f1636ebef9b45e8a3cb0eb20d327ee6acb74be0
1+
54cc63b467f24242cf0d6538d3e1df39e553daf1

scripts/patch-pytorch.sh

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,5 +38,4 @@ echo "Applying PyTorch patches in $REPO_ROOT"
3838
apply_patch ./patch/flex_attn_143553.patch
3939
apply_patch ./patch/pytorch_fp64.patch
4040
apply_patch ./patch/pytorch_global_scratch.patch
41-
apply_patch ./patch/test_compile_subprocess.patch
4241
apply_patch ./patch/flex_decoding.patch

scripts/patch/flex_attn_143553.patch

Lines changed: 117 additions & 147 deletions
Large diffs are not rendered by default.

scripts/patch/flex_decoding.patch

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
diff --git a/torch/_inductor/kernel/flex/flex_decoding.py b/torch/_inductor/kernel/flex/flex_decoding.py
2-
index 83c6b59cec96..e89981286ed8 100644
2+
index d3e6f14f44..cd2a52ac2c 100644
33
--- a/torch/_inductor/kernel/flex/flex_decoding.py
44
+++ b/torch/_inductor/kernel/flex/flex_decoding.py
5-
@@ -459,15 +459,12 @@ def create_flex_decoding_kernel(*args, **kwargs):
5+
@@ -241,15 +241,12 @@ def create_flex_decoding_kernel(*args, **kwargs):
66
# m
77
# if V.graph.sizevars.evaluate_expr(sympy.Lt(query.get_size()[-2], 0))
88
# else # Always use a BLOCK_M > 16 before Triton fix https://github.com/triton-lang/triton/pull/4061 is in pin

scripts/patch/pytorch_fp64.patch

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
diff --git a/benchmarks/dynamo/common.py b/benchmarks/dynamo/common.py
2-
index 1088634ce911e8..4e70bf5bfce434 100644
2+
index 469ece2958..54ba0d7b3c 100644
33
--- a/benchmarks/dynamo/common.py
44
+++ b/benchmarks/dynamo/common.py
5-
@@ -1598,6 +1598,12 @@ def cast_to_fp32(model, inputs):
5+
@@ -1659,6 +1659,12 @@ def cast_to_fp32(model, inputs):
66
return cast_to(torch.float32, model, inputs)
77

88

@@ -15,7 +15,7 @@ index 1088634ce911e8..4e70bf5bfce434 100644
1515
class DummyGradScaler:
1616
def scale(self, loss):
1717
return loss
18-
@@ -2097,10 +2103,24 @@ def record_status(accuracy_status, dynamo_start_stats):
18+
@@ -2162,10 +2168,24 @@ class BenchmarkRunner:
1919
model_fp64 = None
2020
inputs_fp64 = None
2121
try:
@@ -44,7 +44,7 @@ index 1088634ce911e8..4e70bf5bfce434 100644
4444
self.init_optimizer(name, current_device, model_fp64.parameters())
4545
fp64_outputs = self.run_n_iterations(
4646
model_fp64, inputs_fp64, self.model_iter_fn
47-
@@ -2111,11 +2131,19 @@ def record_status(accuracy_status, dynamo_start_stats):
47+
@@ -2176,11 +2196,19 @@ class BenchmarkRunner:
4848
else x,
4949
fp64_outputs,
5050
)

scripts/patch/pytorch_global_scratch.patch

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,10 @@
11
diff --git a/torch/_inductor/codegen/xpu/device_op_overrides.py b/torch/_inductor/codegen/xpu/device_op_overrides.py
2-
index 632cfd29f17..b08db340c12 100644
2+
index 99502ca2dd..5d538ec20c 100644
33
--- a/torch/_inductor/codegen/xpu/device_op_overrides.py
44
+++ b/torch/_inductor/codegen/xpu/device_op_overrides.py
55
@@ -61,7 +61,7 @@ class XPUDeviceOpOverrides(DeviceOpOverrides):
6-
def cpp_global_scratch(
7-
self, idx: int, workspace: TritonScratchWorkspace
6+
def cpp_scratch(
7+
self, idx: int, workspace: TritonScratchWorkspace, prefix: Optional[str] = None
88
) -> Optional[tuple[list[str], str]]:
99
- return None
1010
+ return [f"void *global_scratch_{idx} = 0;"], f"global_scratch_{idx}"

scripts/patch/test_compile_subprocess.patch

Lines changed: 0 additions & 14 deletions
This file was deleted.

0 commit comments

Comments
 (0)