Skip to content

Commit 17c1985

Browse files
build: manually update PyTorch version (#3863)
This commit sets the PyTorch and TorchVision version to nightly release 2024-11-07. This commit also updates the dtype check for the `aten.fake_quantize_per_tensor_affine` and `aten.fake_quantize_per_tensor_affine_cachemask` op since the op now supports bfloat16 input. Signed-Off By: Vivek Khandelwal <[email protected]>
1 parent 8eb34da commit 17c1985

File tree

6 files changed

+8
-34
lines changed

6 files changed

+8
-34
lines changed

lib/Dialect/Torch/Transforms/AbstractInterpLibrary.cpp

Lines changed: 3 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -11247,7 +11247,6 @@ StringRef mlir::torch::Torch::getAbstractInterpLibrary() {
1124711247
" return %0#1 : !torch.int\n"
1124811248
" }\n"
1124911249
" func.func @\"__torch_mlir_dtype_fn.aten.fake_quantize_per_tensor_affine\"(%arg0: !torch.tuple<int, int>, %arg1: !torch.float, %arg2: !torch.int, %arg3: !torch.int, %arg4: !torch.int) -> !torch.int {\n"
11250-
" %int15 = torch.constant.int 15\n"
1125111250
" %none = torch.constant.none\n"
1125211251
" %str = torch.constant.str \"AssertionError: \"\n"
1125311252
" %0:2 = torch.prim.TupleUnpack %arg0 : !torch.tuple<int, int> -> !torch.int, !torch.int\n"
@@ -11258,13 +11257,6 @@ StringRef mlir::torch::Torch::getAbstractInterpLibrary() {
1125811257
" torch.prim.RaiseException %str, %none : !torch.str, !torch.none\n"
1125911258
" torch.prim.If.yield\n"
1126011259
" }\n"
11261-
" %2 = torch.aten.ne.int %0#1, %int15 : !torch.int, !torch.int -> !torch.bool\n"
11262-
" torch.prim.If %2 -> () {\n"
11263-
" torch.prim.If.yield\n"
11264-
" } else {\n"
11265-
" torch.prim.RaiseException %str, %none : !torch.str, !torch.none\n"
11266-
" torch.prim.If.yield\n"
11267-
" }\n"
1126811260
" return %0#1 : !torch.int\n"
1126911261
" }\n"
1127011262
" func.func @__torch__.torch_mlir.jit_ir_importer.build_tools.library_generator.is_float_dtype(%arg0: !torch.int) -> !torch.bool {\n"
@@ -11282,7 +11274,6 @@ StringRef mlir::torch::Torch::getAbstractInterpLibrary() {
1128211274
" }\n"
1128311275
" func.func @\"__torch_mlir_dtype_fn.aten.fake_quantize_per_tensor_affine_cachemask\"(%arg0: !torch.tuple<int, int>, %arg1: !torch.float, %arg2: !torch.int, %arg3: !torch.int, %arg4: !torch.int) -> !torch.tuple<int, int> {\n"
1128411276
" %int11 = torch.constant.int 11\n"
11285-
" %int15 = torch.constant.int 15\n"
1128611277
" %none = torch.constant.none\n"
1128711278
" %str = torch.constant.str \"AssertionError: \"\n"
1128811279
" %int1 = torch.constant.int 1\n"
@@ -11294,16 +11285,9 @@ StringRef mlir::torch::Torch::getAbstractInterpLibrary() {
1129411285
" torch.prim.RaiseException %str, %none : !torch.str, !torch.none\n"
1129511286
" torch.prim.If.yield\n"
1129611287
" }\n"
11297-
" %2 = torch.aten.ne.int %0#1, %int15 : !torch.int, !torch.int -> !torch.bool\n"
11298-
" torch.prim.If %2 -> () {\n"
11299-
" torch.prim.If.yield\n"
11300-
" } else {\n"
11301-
" torch.prim.RaiseException %str, %none : !torch.str, !torch.none\n"
11302-
" torch.prim.If.yield\n"
11303-
" }\n"
11304-
" %3 = torch.prim.TupleIndex %arg0, %int1 : !torch.tuple<int, int>, !torch.int -> !torch.int\n"
11305-
" %4 = torch.prim.TupleConstruct %3, %int11 : !torch.int, !torch.int -> !torch.tuple<int, int>\n"
11306-
" return %4 : !torch.tuple<int, int>\n"
11288+
" %2 = torch.prim.TupleIndex %arg0, %int1 : !torch.tuple<int, int>, !torch.int -> !torch.int\n"
11289+
" %3 = torch.prim.TupleConstruct %2, %int11 : !torch.int, !torch.int -> !torch.tuple<int, int>\n"
11290+
" return %3 : !torch.tuple<int, int>\n"
1130711291
" }\n"
1130811292
" func.func @\"__torch_mlir_dtype_fn.aten.fake_quantize_per_tensor_affine.tensor_qparams\"(%arg0: !torch.tuple<int, int>, %arg1: !torch.tuple<int, int>, %arg2: !torch.tuple<int, int>, %arg3: !torch.int, %arg4: !torch.int) -> !torch.int {\n"
1130911293
" %int15 = torch.constant.int 15\n"

projects/pt1/e2e_testing/xfail_sets.py

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -403,7 +403,6 @@
403403
"QuantizedReluInt32_basic",
404404
"QuantizedReluInt8_basic",
405405
"QuantizedReluUint8_basic",
406-
"AtenSubFloatModule_basic",
407406
"BincountMinlengthModule_basic",
408407
"BincountModule_basic",
409408
"BincountStaticSizeModule_basic",
@@ -431,20 +430,16 @@
431430
"ElementwiseQuantizePerTensorModule_basic",
432431
"ElementwiseQuantizePerTensorUIntModule_basic",
433432
"ElementwiseToDtypeI64ToUI8Module_basic",
434-
"EqIntModule_basic",
435433
"FloatImplicitModule_basic",
436434
"GeFloatIntModule_basic",
437-
"GeFloatModule_basic",
438435
"GeIntModule_basic",
439436
"GtFloatIntModule_basic",
440-
"GtIntModule_basic",
441437
"IntFloatModule_basic",
442438
"IntImplicitModule_basic",
443439
"LenStrModule_basic",
444440
"MulFloatModule_basic",
445441
"NativeGroupNormBackwardModule_basic",
446442
"NeFloatIntModule_basic",
447-
"NeIntModule_basic",
448443
"NllLossModuleBackward1DMeanWeight_basic",
449444
"NllLossModuleBackward1DMean_basic",
450445
"NllLossModuleBackward1DSumWeight_basic",
@@ -472,7 +467,6 @@
472467
"SortIntList_basic",
473468
"SplitDimDynamicModule_basic",
474469
"SplitDimStaticModule_basic",
475-
"SqrtIntConstantModule_basic",
476470
"SqrtIntModule_basic",
477471
"SubFloatModule_basic",
478472
"TensorToBoolZeroRank_basic",
@@ -653,7 +647,6 @@
653647
"AtenMmQuint8_basic",
654648
"AtenRealView128Module_basic",
655649
"AtenRealView64Module_basic",
656-
"AtenSubFloatModule_basic",
657650
"AtenTopKModule_basic",
658651
"AtenTopKSmallestModule_basic",
659652
"Aten_EmbeddingBagExample_basic",
@@ -878,7 +871,6 @@
878871
"SortTensor_basic",
879872
"SplitDimDynamicModule_basic",
880873
"SplitDimStaticModule_basic",
881-
"SqrtIntConstantModule_basic",
882874
"SqrtIntModule_basic",
883875
"SubFloatModule_basic",
884876
"TModuleRank0_basic",

projects/pt1/python/torch_mlir/jit_ir_importer/build_tools/abstract_interp_lib_gen.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2580,19 +2580,17 @@ def prims〇split_dim〡dtype(a_rank_dtype: Tuple[int, int], dim: int, outer_len
25802580
return a_dtype
25812581

25822582
# note: fake_quantize_per_tensor_affine doesn't support "meta" device, use "cpu" instead.
2583-
@check_dtype_function(_check_tensors_with_the_same_dtype(num_of_tensors=1, tensor_device="cpu", scale=0.1, zero_point=0, quant_min=0, quant_max=255, error_types={torch.complex128, torch.complex64, torch.bfloat16, torch.int64, torch.int32, torch.int16, torch.int8, torch.uint8, torch.bool}))
2583+
@check_dtype_function(_check_tensors_with_the_same_dtype(num_of_tensors=1, tensor_device="cpu", scale=0.1, zero_point=0, quant_min=0, quant_max=255, error_types={torch.complex128, torch.complex64, torch.int64, torch.int32, torch.int16, torch.int8, torch.uint8, torch.bool}))
25842584
def aten〇fake_quantize_per_tensor_affine〡dtype(self_rank_dtype: Tuple[int, int], scale: float, zero_point: int, quant_min: int, quant_max: int) -> int:
25852585
self_rank, self_dtype = self_rank_dtype
25862586
assert is_float_dtype(self_dtype)
2587-
assert self_dtype != torch.bfloat16
25882587
return self_dtype
25892588

25902589
# note: fake_quantize_per_tensor_affine doesn't support "meta" device, use "cpu" instead.
2591-
@check_dtype_function(_check_tensors_with_the_same_dtype(num_of_tensors=1, tensor_device="cpu", scale=0.1, zero_point=0, quant_min=0, quant_max=255, error_types={torch.complex128, torch.complex64, torch.bfloat16, torch.int64, torch.int32, torch.int16, torch.int8, torch.uint8, torch.bool}))
2590+
@check_dtype_function(_check_tensors_with_the_same_dtype(num_of_tensors=1, tensor_device="cpu", scale=0.1, zero_point=0, quant_min=0, quant_max=255, error_types={torch.complex128, torch.complex64, torch.int64, torch.int32, torch.int16, torch.int8, torch.uint8, torch.bool}))
25922591
def aten〇fake_quantize_per_tensor_affine_cachemask〡dtype(self_rank_dtype: Tuple[int, int], scale: float, zero_point: int, quant_min: int, quant_max: int) -> Tuple[int, int]:
25932592
self_rank, self_dtype = self_rank_dtype
25942593
assert is_float_dtype(self_dtype)
2595-
assert self_dtype != torch.bfloat16
25962594
return (self_rank_dtype[1], torch.bool)
25972595

25982596
# note: fake_quantize_per_tensor_affine.tensor_qparams doesn't support "meta" device, use "cpu" instead.

pytorch-hash.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
c787213d413e85c66bdad0d8c9cde1c5ced34b1b
1+
0d5247caf3ffd618d31cf4cf880c47b7dbd323a7

pytorch-requirements.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
-f https://download.pytorch.org/whl/nightly/cpu/torch/
22
--pre
3-
torch==2.6.0.dev20241029
3+
torch==2.6.0.dev20241107

torchvision-requirements.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
-f https://download.pytorch.org/whl/nightly/cpu/torchvision/
22
--pre
3-
torchvision==0.20.0.dev20241029
3+
torchvision==0.20.0.dev20241107

0 commit comments

Comments
 (0)