Skip to content

Commit d5ca144

Browse files
authored
Merge pull request #481 from Xilinx/bump_to_9ab2a150
[AutoBump] Merge with fixes of 9ab2a15 (Oct 30) (96)
2 parents 9529dcc + bb8a5fe commit d5ca144

File tree

5 files changed

+109
-0
lines changed

5 files changed

+109
-0
lines changed

include/torch-mlir/Dialect/Torch/IR/GeneratedTorchOps.td

Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14144,6 +14144,59 @@ def Torch_AtenUpsampleNearest2dVecOp : Torch_Op<"aten.upsample_nearest2d.vec", [
1414414144
}];
1414514145
}
1414614146

14147+
def Torch_AtenUpsampleBilinear2dOp : Torch_Op<"aten.upsample_bilinear2d", [
14148+
AllowsTypeRefinement,
14149+
HasValueSemantics,
14150+
ReadOnly
14151+
]> {
14152+
let summary = "Generated op for `aten::upsample_bilinear2d : (Tensor, int[], bool, float?, float?) -> (Tensor)`";
14153+
let arguments = (ins
14154+
AnyTorchTensorType:$self,
14155+
AnyTorchListOfTorchIntType:$output_size,
14156+
Torch_BoolType:$align_corners,
14157+
AnyTorchOptionalFloatType:$scales_h,
14158+
AnyTorchOptionalFloatType:$scales_w
14159+
);
14160+
let results = (outs
14161+
AnyTorchOptionalTensorType:$result
14162+
);
14163+
let hasCustomAssemblyFormat = 1;
14164+
let extraClassDefinition = [{
14165+
ParseResult AtenUpsampleBilinear2dOp::parse(OpAsmParser &parser, OperationState &result) {
14166+
return parseDefaultTorchOp(parser, result, 5, 1);
14167+
}
14168+
void AtenUpsampleBilinear2dOp::print(OpAsmPrinter &printer) {
14169+
printDefaultTorchOp(printer, *this, 5, 1);
14170+
}
14171+
}];
14172+
}
14173+
14174+
def Torch_AtenUpsampleBilinear2dVecOp : Torch_Op<"aten.upsample_bilinear2d.vec", [
14175+
AllowsTypeRefinement,
14176+
HasValueSemantics,
14177+
ReadOnly
14178+
]> {
14179+
let summary = "Generated op for `aten::upsample_bilinear2d.vec : (Tensor, int[]?, bool, float[]?) -> (Tensor)`";
14180+
let arguments = (ins
14181+
AnyTorchTensorType:$input,
14182+
AnyTorchOptionalListOfTorchIntType:$output_size,
14183+
Torch_BoolType:$align_corners,
14184+
AnyTorchOptionalListOfTorchFloatType:$scale_factors
14185+
);
14186+
let results = (outs
14187+
AnyTorchOptionalTensorType:$result
14188+
);
14189+
let hasCustomAssemblyFormat = 1;
14190+
let extraClassDefinition = [{
14191+
ParseResult AtenUpsampleBilinear2dVecOp::parse(OpAsmParser &parser, OperationState &result) {
14192+
return parseDefaultTorchOp(parser, result, 4, 1);
14193+
}
14194+
void AtenUpsampleBilinear2dVecOp::print(OpAsmPrinter &printer) {
14195+
printDefaultTorchOp(printer, *this, 4, 1);
14196+
}
14197+
}];
14198+
}
14199+
1414714200
def Torch_AtenScaledDotProductAttentionOp : Torch_Op<"aten.scaled_dot_product_attention", [
1414814201
AllowsTypeRefinement,
1414914202
HasValueSemantics,

lib/Dialect/Torch/Transforms/AbstractInterpLibrary.cpp

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11064,6 +11064,20 @@ StringRef mlir::torch::Torch::getAbstractInterpLibrary() {
1106411064
" }\n"
1106511065
" return %10 : !torch.list<int>\n"
1106611066
" }\n"
11067+
" func.func @\"__torch_mlir_shape_fn.aten.upsample_bilinear2d\"(%arg0: !torch.list<int>, %arg1: !torch.list<int>, %arg2: !torch.bool, %arg3: !torch.optional<float>, %arg4: !torch.optional<float>) -> !torch.list<int> {\n"
11068+
" %int0 = torch.constant.int 0\n"
11069+
" %int1 = torch.constant.int 1\n"
11070+
" %0 = torch.aten.__getitem__.t %arg0, %int0 : !torch.list<int>, !torch.int -> !torch.int\n"
11071+
" %1 = torch.aten.__getitem__.t %arg0, %int1 : !torch.list<int>, !torch.int -> !torch.int\n"
11072+
" %2 = torch.aten.__getitem__.t %arg1, %int0 : !torch.list<int>, !torch.int -> !torch.int\n"
11073+
" %3 = torch.aten.__getitem__.t %arg1, %int1 : !torch.list<int>, !torch.int -> !torch.int\n"
11074+
" %4 = torch.prim.ListConstruct %0, %1, %2, %3 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>\n"
11075+
" return %4 : !torch.list<int>\n"
11076+
" }\n"
11077+
" func.func @\"__torch_mlir_shape_fn.aten.upsample_bilinear2d.vec\"(%arg0: !torch.list<int>, %arg1: !torch.optional<list<int>>, %arg2: !torch.bool, %arg3: !torch.optional<list<float>>) -> !torch.list<int> {\n"
11078+
" %0 = call @\"__torch_mlir_shape_fn.aten.upsample_nearest2d.vec\"(%arg0, %arg1, %arg3) : (!torch.list<int>, !torch.optional<list<int>>, !torch.optional<list<float>>) -> !torch.list<int>\n"
11079+
" return %0 : !torch.list<int>\n"
11080+
" }\n"
1106711081
" func.func @\"__torch_mlir_dtype_fn.prims.split_dim\"(%arg0: !torch.tuple<int, int>, %arg1: !torch.int, %arg2: !torch.int) -> !torch.int {\n"
1106811082
" %0:2 = torch.prim.TupleUnpack %arg0 : !torch.tuple<int, int> -> !torch.int, !torch.int\n"
1106911083
" return %0#1 : !torch.int\n"
@@ -12598,6 +12612,14 @@ StringRef mlir::torch::Torch::getAbstractInterpLibrary() {
1259812612
" %0:2 = torch.prim.TupleUnpack %arg0 : !torch.tuple<int, int> -> !torch.int, !torch.int\n"
1259912613
" return %0#1 : !torch.int\n"
1260012614
" }\n"
12615+
" func.func @\"__torch_mlir_dtype_fn.aten.upsample_bilinear2d\"(%arg0: !torch.tuple<int, int>, %arg1: !torch.list<int>, %arg2: !torch.bool, %arg3: !torch.optional<float>, %arg4: !torch.optional<float>) -> !torch.int {\n"
12616+
" %0:2 = torch.prim.TupleUnpack %arg0 : !torch.tuple<int, int> -> !torch.int, !torch.int\n"
12617+
" return %0#1 : !torch.int\n"
12618+
" }\n"
12619+
" func.func @\"__torch_mlir_dtype_fn.aten.upsample_bilinear2d.vec\"(%arg0: !torch.tuple<int, int>, %arg1: !torch.optional<list<int>>, %arg2: !torch.bool, %arg3: !torch.optional<list<float>>) -> !torch.int {\n"
12620+
" %0:2 = torch.prim.TupleUnpack %arg0 : !torch.tuple<int, int> -> !torch.int, !torch.int\n"
12621+
" return %0#1 : !torch.int\n"
12622+
" }\n"
1260112623
" func.func @\"__torch_mlir_dtype_fn.aten.view\"(%arg0: !torch.tuple<int, int>, %arg1: !torch.list<int>) -> !torch.int {\n"
1260212624
" %0:2 = torch.prim.TupleUnpack %arg0 : !torch.tuple<int, int> -> !torch.int, !torch.int\n"
1260312625
" return %0#1 : !torch.int\n"

projects/pt1/e2e_testing/xfail_sets.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -580,6 +580,9 @@
580580
"AdaptiveAvgPool1dGeneralDynamicNoBatches_basic",
581581
# Randomly mismatching values
582582
"ConvolutionModule2DTranspose_basic",
583+
# torch export: RuntimeError: cannot mutate tensors with frozen storage
584+
"ElementwiseRreluWithNoiseTrainModule_basic",
585+
"ElementwiseRreluWithNoiseTrainStaticModule_basic",
583586
}
584587

585588
FX_IMPORTER_STABLEHLO_XFAIL_SET = {
@@ -1028,6 +1031,9 @@
10281031
# materialization callback produced value of incorrect type failed
10291032
"ReduceMaxAlongDimUnsignedInt_basic",
10301033
"ReduceMinAlongDimUnsignedInt_basic",
1034+
# torch export: RuntimeError: cannot mutate tensors with frozen storage
1035+
"ElementwiseRreluWithNoiseTrainModule_basic",
1036+
"ElementwiseRreluWithNoiseTrainStaticModule_basic",
10311037
}
10321038

10331039
STABLEHLO_PASS_SET = {

projects/pt1/python/torch_mlir/jit_ir_importer/build_tools/abstract_interp_lib_gen.py

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2349,6 +2349,20 @@ def aten〇upsample_nearest2d〇vec〡shape(input: List[int], output_size: Optio
23492349
assert scale_factors is not None
23502350
return [input[0], input[1], int(input[2] * scale_factors[0]), int(input[3] * scale_factors[1])]
23512351

2352+
@check_shape_function([
2353+
Invocation(TensorOfShape(1, 3, 10, 10), [11, 12], True)
2354+
])
2355+
def aten〇upsample_bilinear2d〡shape(self: List[int], output_size: List[int], align_corners: bool, scales_h: Optional[float] = None, scales_w: Optional[float] = None) -> List[int]:
2356+
return [self[0], self[1], output_size[0], output_size[1]]
2357+
2358+
@check_shape_function([
2359+
Invocation(TensorOfShape(1, 3, 10, 10), [11, 12], True, None),
2360+
Invocation(TensorOfShape(1, 3, 10, 9), None, True, [2.0, 2.3]),
2361+
Invocation(TensorOfShape(1, 3, 5, 6), None, True, [2.5, 1.0])
2362+
])
2363+
def aten〇upsample_bilinear2d〇vec〡shape(input: List[int], output_size: Optional[List[int]], align_corners: bool, scale_factors: Optional[List[float]]) -> List[int]:
2364+
return aten〇upsample_nearest2d〇vec〡shape(input, output_size, scale_factors)
2365+
23522366
# ==============================================================================
23532367
# Dtype Functions
23542368
# ==============================================================================
@@ -3593,6 +3607,16 @@ def aten〇upsample_nearest2d〇vec〡dtype(input_rank_dtype: Tuple[int, int], o
35933607
self_rank, self_dtype = input_rank_dtype
35943608
return self_dtype
35953609

3610+
@check_dtype_function(_check_tensors_with_the_same_dtype(tensor_shapes=[(2, 3, 5, 7)], output_size=[11, 13], align_corners=True))
3611+
def aten〇upsample_bilinear2d〡dtype(self_rank_dtype: Tuple[int, int], output_size: List[int], align_corners: bool, scales_h: Optional[float] = None, scales_w: Optional[float] = None) -> int:
3612+
self_rank, self_dtype = self_rank_dtype
3613+
return self_dtype
3614+
3615+
@check_dtype_function(_check_tensors_with_the_same_dtype(tensor_shapes=[(2, 3, 5, 7)], output_size=[11, 13], align_corners=True, scale_factors=None))
3616+
def aten〇upsample_bilinear2d〇vec〡dtype(input_rank_dtype: Tuple[int, int], output_size: Optional[List[int]], align_corners: bool, scale_factors: Optional[List[float]]) -> int:
3617+
self_rank, self_dtype = input_rank_dtype
3618+
return self_dtype
3619+
35963620
@check_dtype_function(_check_tensors_with_the_same_dtype(num_of_tensors=1, size=[1]))
35973621
def aten〇view〡dtype(self_rank_dtype: Tuple[int, int], size: List[int]) -> int:
35983622
self_rank, self_dtype = self_rank_dtype

projects/pt1/python/torch_mlir/jit_ir_importer/build_tools/torch_ods_gen.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1022,6 +1022,10 @@ def emit_with_mutating_variants(key, **kwargs):
10221022
emit("aten::upsample_nearest1d.vec : (Tensor, int[]?, float[]?) -> (Tensor)")
10231023
emit("aten::upsample_nearest2d : (Tensor, int[], float?, float?) -> (Tensor)")
10241024
emit("aten::upsample_nearest2d.vec : (Tensor, int[]?, float[]?) -> (Tensor)")
1025+
emit(
1026+
"aten::upsample_bilinear2d : (Tensor, int[], bool, float?, float?) -> (Tensor)"
1027+
)
1028+
emit("aten::upsample_bilinear2d.vec : (Tensor, int[]?, bool, float[]?) -> (Tensor)")
10251029
emit(
10261030
"aten::scaled_dot_product_attention : (Tensor, Tensor, Tensor, Tensor?, float, bool, float?, bool) -> (Tensor)"
10271031
)

0 commit comments

Comments
 (0)