Skip to content

Commit 11e22b6

Browse files
authored
Merge pull request #482 from Xilinx/matthias.bump_to_8b0bf2e2
[AutoBump] Merge with fixes of 8b0bf2e (Oct 30, requires LLVM bump) (97)
2 parents d5ca144 + d55d7b9 commit 11e22b6

File tree

5 files changed

+59
-55
lines changed

5 files changed

+59
-55
lines changed

externals/stablehlo

Submodule stablehlo updated 44 files

lib/Dialect/Torch/IR/TorchOps.cpp

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4016,10 +4016,9 @@ OpFoldResult AtenSliceTensorOp::fold(FoldAdaptor adaptor) {
40164016
limit = limit < 0 ? limit + inType.getSizes()[dimInt] : limit;
40174017
limit = limit < 0 ? -1 : limit;
40184018
limit = std::min(limit, inType.getSizes()[dimInt]);
4019-
bool validIterArgs =
4020-
(stride > 0 && begin < limit) || (stride < 0 && begin > limit);
4021-
assert(validIterArgs &&
4022-
"aten.slice.Tensor iteration args are statically invalid.");
4019+
assert((stride > 0 && begin < limit) ||
4020+
(stride < 0 && begin > limit) &&
4021+
"aten.slice.Tensor iteration args are statically invalid.");
40234022

40244023
int64_t inputRank = inType.getSizes().size();
40254024
llvm::SmallVector<int64_t> inputStrides(inputRank, 1);

lib/Dialect/TorchConversion/Transforms/BackendTypeConversion.cpp

Lines changed: 43 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -57,16 +57,16 @@ static void setupTorchBoolToI1Conversion(ConversionTarget &target,
5757
typeConverter.addConversion([](Torch::BoolType type) -> std::optional<Type> {
5858
return IntegerType::get(type.getContext(), 1);
5959
});
60-
typeConverter.addTargetMaterialization(
61-
[](OpBuilder &builder, IntegerType type, ValueRange inputs,
62-
Location loc) -> std::optional<Value> {
63-
// Other builtin integer types could be handled by other materializers.
64-
if (!(type.getWidth() == 1 && type.isSignless()))
65-
return std::nullopt;
66-
assert(inputs.size() == 1);
67-
assert(isa<Torch::BoolType>(inputs[0].getType()));
68-
return builder.create<ToI1Op>(loc, inputs[0]).getResult();
69-
});
60+
typeConverter.addTargetMaterialization([](OpBuilder &builder,
61+
IntegerType type, ValueRange inputs,
62+
Location loc) -> Value {
63+
// Other builtin integer types could be handled by other materializers.
64+
if (!(type.getWidth() == 1 && type.isSignless()))
65+
return Value();
66+
assert(inputs.size() == 1);
67+
assert(isa<Torch::BoolType>(inputs[0].getType()));
68+
return builder.create<ToI1Op>(loc, inputs[0]).getResult();
69+
});
7070
auto sourceMaterialization = [](OpBuilder &builder, Torch::BoolType type,
7171
ValueRange inputs, Location loc) -> Value {
7272
assert(inputs.size() == 1);
@@ -83,19 +83,19 @@ static void setupTorchIntToI64Conversion(ConversionTarget &target,
8383
typeConverter.addConversion([](Torch::IntType type) -> std::optional<Type> {
8484
return IntegerType::get(type.getContext(), 64);
8585
});
86-
typeConverter.addTargetMaterialization(
87-
[](OpBuilder &builder, IntegerType type, ValueRange inputs,
88-
Location loc) -> std::optional<Value> {
89-
// Other builtin integer types could be handled by other materializers.
90-
if (!(type.getWidth() == 64 && type.isSignless()))
91-
return std::nullopt;
92-
// Other input type to be converted to i64 are handled by other
93-
// materializers.
94-
if (!isa<Torch::IntType>(inputs[0].getType()))
95-
return std::nullopt;
96-
assert(inputs.size() == 1);
97-
return builder.createOrFold<ToI64Op>(loc, inputs[0]);
98-
});
86+
typeConverter.addTargetMaterialization([](OpBuilder &builder,
87+
IntegerType type, ValueRange inputs,
88+
Location loc) -> Value {
89+
// Other builtin integer types could be handled by other materializers.
90+
if (!(type.getWidth() == 64 && type.isSignless()))
91+
return Value();
92+
// Other input type to be converted to i64 are handled by other
93+
// materializers.
94+
if (!isa<Torch::IntType>(inputs[0].getType()))
95+
return Value();
96+
assert(inputs.size() == 1);
97+
return builder.createOrFold<ToI64Op>(loc, inputs[0]);
98+
});
9999
auto sourceMaterialization = [](OpBuilder &builder, Torch::IntType type,
100100
ValueRange inputs, Location loc) -> Value {
101101
assert(inputs.size() == 1);
@@ -112,13 +112,13 @@ static void setupTorchFloatToF64Conversion(ConversionTarget &target,
112112
typeConverter.addConversion([](Torch::FloatType type) -> std::optional<Type> {
113113
return Float64Type::get(type.getContext());
114114
});
115-
typeConverter.addTargetMaterialization(
116-
[](OpBuilder &builder, Float64Type type, ValueRange inputs,
117-
Location loc) -> std::optional<Value> {
118-
assert(inputs.size() == 1);
119-
assert(isa<Torch::FloatType>(inputs[0].getType()));
120-
return builder.create<ToF64Op>(loc, inputs[0]).getResult();
121-
});
115+
typeConverter.addTargetMaterialization([](OpBuilder &builder,
116+
Float64Type type, ValueRange inputs,
117+
Location loc) -> Value {
118+
assert(inputs.size() == 1);
119+
assert(isa<Torch::FloatType>(inputs[0].getType()));
120+
return builder.create<ToF64Op>(loc, inputs[0]).getResult();
121+
});
122122
auto sourceMaterialization = [](OpBuilder &builder, Torch::FloatType type,
123123
ValueRange inputs, Location loc) -> Value {
124124
assert(inputs.size() == 1);
@@ -137,19 +137,19 @@ static void setupTorchGeneratorToI64Conversion(ConversionTarget &target,
137137
[](Torch::GeneratorType type) -> std::optional<Type> {
138138
return IntegerType::get(type.getContext(), 64);
139139
});
140-
typeConverter.addTargetMaterialization(
141-
[](OpBuilder &builder, IntegerType type, ValueRange inputs,
142-
Location loc) -> std::optional<Value> {
143-
// Other builtin integer types could be handled by other materializers.
144-
if (!(type.getWidth() == 64 && type.isSignless()))
145-
return std::nullopt;
146-
// Other input type to be converted to i64 are handled by other
147-
// materializers.
148-
if (!isa<Torch::GeneratorType>(inputs[0].getType()))
149-
return std::nullopt;
150-
assert(inputs.size() == 1);
151-
return builder.create<GeneratorToI64Op>(loc, inputs[0]).getResult();
152-
});
140+
typeConverter.addTargetMaterialization([](OpBuilder &builder,
141+
IntegerType type, ValueRange inputs,
142+
Location loc) -> Value {
143+
// Other builtin integer types could be handled by other materializers.
144+
if (!(type.getWidth() == 64 && type.isSignless()))
145+
return Value();
146+
// Other input type to be converted to i64 are handled by other
147+
// materializers.
148+
if (!isa<Torch::GeneratorType>(inputs[0].getType()))
149+
return Value();
150+
assert(inputs.size() == 1);
151+
return builder.create<GeneratorToI64Op>(loc, inputs[0]).getResult();
152+
});
153153
auto sourceMaterialization = [](OpBuilder &builder, Torch::GeneratorType type,
154154
ValueRange inputs, Location loc) -> Value {
155155
assert(inputs.size() == 1);

projects/pt1/e2e_testing/xfail_sets.py

Lines changed: 11 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -506,7 +506,10 @@
506506
"MeshgridIndexingIJ_basic",
507507
"MeshgridIndexingXY_basic",
508508
"Meshgrid_basic",
509+
"OneHotModule_basic",
509510
# RuntimeError: cannot mutate tensors with frozen storage
511+
"ElementwiseRreluTrainModule_basic",
512+
"ElementwiseRreluTrainStaticModule_basic",
510513
"ElementwiseRreluWithNoiseTrainModule_basic",
511514
"ElementwiseRreluWithNoiseTrainStaticModule_basic",
512515
"BernoulliFloatModule_basic",
@@ -522,6 +525,8 @@
522525
"ChunkListUnpackUnevenDynamic_Module_basic",
523526
"ChunkListUnpackUneven_Module_basic",
524527
"ChunkListUnpack_Module_basic",
528+
"ElementwiseRreluTrainModule_basic",
529+
"ElementwiseRreluTrainStaticModule_basic",
525530
"ElementwiseRreluWithNoiseTrainModule_basic",
526531
"ElementwiseRreluWithNoiseTrainStaticModule_basic",
527532
"SplitTensorGetItem_Module_basic",
@@ -550,6 +555,7 @@
550555
"MeshgridIndexingIJ_basic",
551556
"MeshgridIndexingXY_basic",
552557
"Meshgrid_basic",
558+
"OneHotModule_basic",
553559
"UniformModule_basic",
554560
"UniformStaticShapeModule_basic",
555561
}
@@ -739,7 +745,6 @@
739745
"DiagonalModule_with_offset",
740746
"DivFloatModule_basic",
741747
"DivIntModule_basic",
742-
"ElementwiseAddScalar_NumToTensorFloat_Module_basic",
743748
"ElementwiseDequantizePerChannelModule_basic",
744749
"ElementwiseDequantizePerTensorModule_basic",
745750
"ElementwiseErfIntModule_basic",
@@ -841,8 +846,6 @@
841846
"NormScalarComplexModule_basic",
842847
"NormScalarModule_basic",
843848
"NormalFunctionalModule_basic",
844-
"NumToTensorFloatModule_basic",
845-
"NumToTensorIntModule_basic",
846849
"NumelModule_basic",
847850
"NumelZeroRankModule_basic",
848851
"PowIntFloatModule_basic",
@@ -878,7 +881,6 @@
878881
"ReplicationPad2dModule_left0",
879882
"ReplicationPad2dModule_right0",
880883
"ReplicationPad2dModule_top0",
881-
"RsubInt0d_NumToTensor_Module_basic",
882884
"ScalarImplicitFloatModule_basic",
883885
# REMOVE WHEN ENABLE_GQA IS ADDED
884886
"ScatterReduceFloatMaxModule",
@@ -1013,6 +1015,11 @@
10131015
"UpSampleNearest2dStaticFactor_basic",
10141016
"UpSampleNearest2dStaticSize_basic",
10151017
"UpSampleNearest2d_basic",
1018+
# RuntimeError: cannot mutate tensors with frozen storage
1019+
"ElementwiseRreluTrainModule_basic",
1020+
"ElementwiseRreluTrainStaticModule_basic",
1021+
"ElementwiseRreluWithNoiseTrainModule_basic",
1022+
"ElementwiseRreluWithNoiseTrainStaticModule_basic",
10161023
}
10171024

10181025
FX_IMPORTER_STABLEHLO_CRASHING_SET = {
@@ -3675,7 +3682,6 @@
36753682
"IndexPutImpl3DFloatAccumulateModule_basic",
36763683
"IndexPutImpl3DFloatNonAccumulateModule_basic",
36773684
"IndexPutImplIndexWithNoneModule_basic",
3678-
"IndexSelectRank0IdxModule_basic",
36793685
"InterpolateDynamicModule_sizes_bilinear",
36803686
"InterpolateDynamicModule_sizes_nearest",
36813687
"InterpolateStaticModule_scales_bilinear_align_corners",
@@ -4005,7 +4011,6 @@
40054011
"GridSamplerBasic2_basic",
40064012
"GridSamplerBasic3_basic",
40074013
"GridSamplerBasic4_basic",
4008-
"IndexSelectRank0IdxModule_basic",
40094014
"IouOfModule_basic",
40104015
"MaxPool1dEmptyStrideStaticModule_basic",
40114016
"MaxPool1dStaticCeilModeTrueModule_basic",

pytorch-hash.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
160d421a40e934ac8183e47f9cbc8618a4bd97dd
1+
c787213d413e85c66bdad0d8c9cde1c5ced34b1b

0 commit comments

Comments
 (0)