Skip to content

Commit 16b3bd6

Browse files
build: manually update PyTorch version and fix CI failure (llvm#3830)
This commit sets the PyTorch and TorchVision version to nightly release 2024-10-29. This commit also fixes the CI failure after this commit llvm@54d9e24 got merged. The issue was that the CI checks in the PR were run before the previous roll pytorch update but the PR was actually merged after the roll pytorch update. Hence, the failure was not caught before merging the PR. While exporting the fx_graph through fx_importer for `rrelu` and `rrelu_with_noise` op for train mode, it decomposes the `aten.rrelu_with_noise` op based on the PyTorch decomposition which is the default behavior. However, the decomposition contains an input mutation specifically here https://github.com/pytorch/pytorch/blob/9bbe4a67ad137032add6a3b0b74bda66f5ef83d2/torch/_decomp/decompositions.py#L325, resulting in the runtime failure. This issue would probably be fixed by pytorch/pytorch#138503. Until then, the failing tests are added to the xfail set. Also, after the roll pytorch update following tests started passing for fx_importer, and fx_importer_stablehlo config. - "ElementwiseRreluTrainModule_basic" - "ElementwiseRreluTrainStaticModule_basic" - "ElementwiseRreluWithNoiseTrainModule_basic" - "ElementwiseRreluWithNoiseTrainStaticModule_basic" This commit also updates the dtype check for the `aten.linear` op since the op now expects both the input tensors to have the same dtype. Signed-Off By: Vivek Khandelwal <[email protected]>
1 parent 9ab2a15 commit 16b3bd6

File tree

5 files changed

+14
-12
lines changed

5 files changed

+14
-12
lines changed

projects/pt1/e2e_testing/xfail_sets.py

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -420,7 +420,6 @@
420420
"DeformConv2D_basic",
421421
"DivFloatModule_basic",
422422
"DivIntModule_basic",
423-
"ElementwiseAddScalar_NumToTensorFloat_Module_basic",
424423
"ElementwiseDequantizePerChannelModule_basic",
425424
"ElementwiseDequantizePerTensorModule_basic",
426425
"ElementwiseQuantizePerTensorModule_basic",
@@ -446,8 +445,6 @@
446445
"NllLossModuleBackward1DSum_basic",
447446
"NllLossModuleBackward1DWeight_basic",
448447
"NllLossModuleBackward1D_basic",
449-
"NumToTensorFloatModule_basic",
450-
"NumToTensorIntModule_basic",
451448
"NumelModule_basic",
452449
"NumelZeroRankModule_basic",
453450
"PowIntFloatModule_basic",
@@ -464,7 +461,6 @@
464461
"QuantizedSingleLayer_basic",
465462
"ReduceMaxAlongDimUnsignedInt_basic",
466463
"ReduceMinAlongDimUnsignedInt_basic",
467-
"RsubInt0d_NumToTensor_Module_basic",
468464
"ScalarImplicitFloatModule_basic",
469465
"SortIntListReverse_basic",
470466
"SortIntList_basic",
@@ -523,6 +519,11 @@
523519
"MeshgridIndexingXY_basic",
524520
"Meshgrid_basic",
525521
"OneHotModule_basic",
522+
# RuntimeError: cannot mutate tensors with frozen storage
523+
"ElementwiseRreluTrainModule_basic",
524+
"ElementwiseRreluTrainStaticModule_basic",
525+
"ElementwiseRreluWithNoiseTrainModule_basic",
526+
"ElementwiseRreluWithNoiseTrainStaticModule_basic",
526527
}
527528

528529
FX_IMPORTER_CRASHING_SET = LINALG_CRASHING_SET | {
@@ -690,7 +691,6 @@
690691
"DiagonalModule_with_offset",
691692
"DivFloatModule_basic",
692693
"DivIntModule_basic",
693-
"ElementwiseAddScalar_NumToTensorFloat_Module_basic",
694694
"ElementwiseDequantizePerChannelModule_basic",
695695
"ElementwiseDequantizePerTensorModule_basic",
696696
"ElementwiseErfIntModule_basic",
@@ -792,8 +792,6 @@
792792
"NormScalarComplexModule_basic",
793793
"NormScalarModule_basic",
794794
"NormalFunctionalModule_basic",
795-
"NumToTensorFloatModule_basic",
796-
"NumToTensorIntModule_basic",
797795
"NumelModule_basic",
798796
"NumelZeroRankModule_basic",
799797
"PowIntFloatModule_basic",
@@ -829,7 +827,6 @@
829827
"ReplicationPad2dModule_left0",
830828
"ReplicationPad2dModule_right0",
831829
"ReplicationPad2dModule_top0",
832-
"RsubInt0d_NumToTensor_Module_basic",
833830
"ScalarImplicitFloatModule_basic",
834831
# REMOVE WHEN ENABLE_GQA IS ADDED
835832
"ScatterReduceFloatMaxModule",
@@ -964,6 +961,11 @@
964961
"UpSampleNearest2dStaticFactor_basic",
965962
"UpSampleNearest2dStaticSize_basic",
966963
"UpSampleNearest2d_basic",
964+
# RuntimeError: cannot mutate tensors with frozen storage
965+
"ElementwiseRreluTrainModule_basic",
966+
"ElementwiseRreluTrainStaticModule_basic",
967+
"ElementwiseRreluWithNoiseTrainModule_basic",
968+
"ElementwiseRreluWithNoiseTrainStaticModule_basic",
967969
}
968970

969971
FX_IMPORTER_STABLEHLO_CRASHING_SET = {

projects/pt1/python/torch_mlir/jit_ir_importer/build_tools/abstract_interp_lib_gen.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5371,7 +5371,7 @@ def aten〇atanh〡dtype(self_rank_dtype: Tuple[int, int]) -> int:
53715371
return torch.float32
53725372
return self_dtype
53735373

5374-
@check_dtype_function(_check_two_tensor_op())
5374+
@check_dtype_function(_check_tensors_with_the_same_dtype(num_of_tensors=2))
53755375
def aten〇linear〡dtype(input_rank_dtype: Tuple[int, int], weight_rank_dtype: Tuple[int, int], bias_rank_dtype: Optional[Tuple[int, int]] = None) -> int:
53765376
input_rank, input_dtype = input_rank_dtype
53775377
weight_rank, weight_dtype = weight_rank_dtype

pytorch-hash.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
160d421a40e934ac8183e47f9cbc8618a4bd97dd
1+
c787213d413e85c66bdad0d8c9cde1c5ced34b1b

pytorch-requirements.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
-f https://download.pytorch.org/whl/nightly/cpu/torch/
22
--pre
3-
torch==2.6.0.dev20241020
3+
torch==2.6.0.dev20241029

torchvision-requirements.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
-f https://download.pytorch.org/whl/nightly/cpu/torchvision/
22
--pre
3-
torchvision==0.20.0.dev20241020
3+
torchvision==0.20.0.dev20241029

0 commit comments

Comments
 (0)