Skip to content

Commit 8158fb6

Browse files
authored
Merge pull request #476 from Xilinx/bump_to_aca33f17
[AutoBump] Merge with fixes of aca33f1 (Oct 22, needs LLVM Oct 19) (92)
2 parents 2fc48a6 + 0038abc commit 8158fb6

File tree

25 files changed

+1322
-232
lines changed

25 files changed

+1322
-232
lines changed

externals/llvm-project

Submodule llvm-project updated 3644 files

externals/stablehlo

Submodule stablehlo updated 44 files

include/torch-mlir/Dialect/Torch/IR/GeneratedTorchOps.td

Lines changed: 141 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -309,6 +309,61 @@ def Torch_AtenRrelu_Op : Torch_Op<"aten.rrelu_", [
309309
}];
310310
}
311311

312+
def Torch_AtenRreluWithNoiseOp : Torch_Op<"aten.rrelu_with_noise", [
313+
AllowsTypeRefinement,
314+
HasValueSemantics,
315+
ReadOnly
316+
]> {
317+
let summary = "Generated op for `aten::rrelu_with_noise : (Tensor, Tensor, Scalar, Scalar, bool, Generator?) -> (Tensor)`";
318+
let arguments = (ins
319+
AnyTorchTensorType:$self,
320+
AnyTorchTensorType:$noise,
321+
AnyTorchScalarType:$lower,
322+
AnyTorchScalarType:$upper,
323+
Torch_BoolType:$training,
324+
AnyTorchOptionalGeneratorType:$generator
325+
);
326+
let results = (outs
327+
AnyTorchOptionalTensorType:$result
328+
);
329+
let hasCustomAssemblyFormat = 1;
330+
let extraClassDefinition = [{
331+
ParseResult AtenRreluWithNoiseOp::parse(OpAsmParser &parser, OperationState &result) {
332+
return parseDefaultTorchOp(parser, result, 6, 1);
333+
}
334+
void AtenRreluWithNoiseOp::print(OpAsmPrinter &printer) {
335+
printDefaultTorchOp(printer, *this, 6, 1);
336+
}
337+
}];
338+
}
339+
340+
def Torch_AtenRreluWithNoise_Op : Torch_Op<"aten.rrelu_with_noise_", [
341+
IsTrailingUnderscoreInplaceVariant,
342+
AllowsTypeRefinement
343+
]> {
344+
let summary = "Generated op for `aten::rrelu_with_noise_ : (Tensor, Tensor, Scalar, Scalar, bool, Generator?) -> (Tensor)`";
345+
let arguments = (ins
346+
Torch_NonValueTensorType:$self,
347+
Torch_NonValueTensorType:$noise,
348+
AnyTorchScalarType:$lower,
349+
AnyTorchScalarType:$upper,
350+
Torch_BoolType:$training,
351+
AnyTorchOptionalGeneratorType:$generator
352+
);
353+
let results = (outs
354+
AnyTorchOptionalNonValueTensorType:$result
355+
);
356+
let hasCustomAssemblyFormat = 1;
357+
let extraClassDefinition = [{
358+
ParseResult AtenRreluWithNoise_Op::parse(OpAsmParser &parser, OperationState &result) {
359+
return parseDefaultTorchOp(parser, result, 6, 1);
360+
}
361+
void AtenRreluWithNoise_Op::print(OpAsmPrinter &printer) {
362+
printDefaultTorchOp(printer, *this, 6, 1);
363+
}
364+
}];
365+
}
366+
312367
def Torch_AtenCeluOp : Torch_Op<"aten.celu", [
313368
AllowsTypeRefinement,
314369
HasValueSemantics,
@@ -7352,6 +7407,7 @@ def Torch_AtenMaxPool3dWithIndicesOp : Torch_Op<"aten.max_pool3d_with_indices",
73527407
printDefaultTorchOp(printer, *this, 6, 2);
73537408
}
73547409
}];
7410+
let hasCanonicalizer = 1;
73557411
}
73567412

73577413
def Torch_AtenMaxPool3dWithIndicesBackwardOp : Torch_Op<"aten.max_pool3d_with_indices_backward", [
@@ -8079,6 +8135,7 @@ def Torch_AtenTransposeIntOp : Torch_Op<"aten.transpose.int", [
80798135
printDefaultTorchOp(printer, *this, 3, 1);
80808136
}
80818137
}];
8138+
let hasFolder = 1;
80828139
}
80838140

80848141
def Torch_AtenPixelShuffleOp : Torch_Op<"aten.pixel_shuffle", [
@@ -9671,6 +9728,7 @@ def Torch_AtenFlattenUsingIntsOp : Torch_Op<"aten.flatten.using_ints", [
96719728
printDefaultTorchOp(printer, *this, 3, 1);
96729729
}
96739730
}];
9731+
let hasFolder = 1;
96749732
}
96759733

96769734
def Torch_AtenUnflattenIntOp : Torch_Op<"aten.unflatten.int", [
@@ -9695,6 +9753,7 @@ def Torch_AtenUnflattenIntOp : Torch_Op<"aten.unflatten.int", [
96959753
printDefaultTorchOp(printer, *this, 3, 1);
96969754
}
96979755
}];
9756+
let hasFolder = 1;
96989757
let hasCanonicalizer = 1;
96999758
}
97009759

@@ -14085,6 +14144,59 @@ def Torch_AtenUpsampleNearest2dVecOp : Torch_Op<"aten.upsample_nearest2d.vec", [
1408514144
}];
1408614145
}
1408714146

14147+
def Torch_AtenUpsampleBilinear2dOp : Torch_Op<"aten.upsample_bilinear2d", [
14148+
AllowsTypeRefinement,
14149+
HasValueSemantics,
14150+
ReadOnly
14151+
]> {
14152+
let summary = "Generated op for `aten::upsample_bilinear2d : (Tensor, int[], bool, float?, float?) -> (Tensor)`";
14153+
let arguments = (ins
14154+
AnyTorchTensorType:$self,
14155+
AnyTorchListOfTorchIntType:$output_size,
14156+
Torch_BoolType:$align_corners,
14157+
AnyTorchOptionalFloatType:$scales_h,
14158+
AnyTorchOptionalFloatType:$scales_w
14159+
);
14160+
let results = (outs
14161+
AnyTorchOptionalTensorType:$result
14162+
);
14163+
let hasCustomAssemblyFormat = 1;
14164+
let extraClassDefinition = [{
14165+
ParseResult AtenUpsampleBilinear2dOp::parse(OpAsmParser &parser, OperationState &result) {
14166+
return parseDefaultTorchOp(parser, result, 5, 1);
14167+
}
14168+
void AtenUpsampleBilinear2dOp::print(OpAsmPrinter &printer) {
14169+
printDefaultTorchOp(printer, *this, 5, 1);
14170+
}
14171+
}];
14172+
}
14173+
14174+
def Torch_AtenUpsampleBilinear2dVecOp : Torch_Op<"aten.upsample_bilinear2d.vec", [
14175+
AllowsTypeRefinement,
14176+
HasValueSemantics,
14177+
ReadOnly
14178+
]> {
14179+
let summary = "Generated op for `aten::upsample_bilinear2d.vec : (Tensor, int[]?, bool, float[]?) -> (Tensor)`";
14180+
let arguments = (ins
14181+
AnyTorchTensorType:$input,
14182+
AnyTorchOptionalListOfTorchIntType:$output_size,
14183+
Torch_BoolType:$align_corners,
14184+
AnyTorchOptionalListOfTorchFloatType:$scale_factors
14185+
);
14186+
let results = (outs
14187+
AnyTorchOptionalTensorType:$result
14188+
);
14189+
let hasCustomAssemblyFormat = 1;
14190+
let extraClassDefinition = [{
14191+
ParseResult AtenUpsampleBilinear2dVecOp::parse(OpAsmParser &parser, OperationState &result) {
14192+
return parseDefaultTorchOp(parser, result, 4, 1);
14193+
}
14194+
void AtenUpsampleBilinear2dVecOp::print(OpAsmPrinter &printer) {
14195+
printDefaultTorchOp(printer, *this, 4, 1);
14196+
}
14197+
}];
14198+
}
14199+
1408814200
def Torch_AtenScaledDotProductAttentionOp : Torch_Op<"aten.scaled_dot_product_attention", [
1408914201
AllowsTypeRefinement,
1409014202
HasValueSemantics,
@@ -16861,6 +16973,35 @@ def Torch_AtenLeakyReluBackwardOp : Torch_Op<"aten.leaky_relu_backward", [
1686116973
}];
1686216974
}
1686316975

16976+
def Torch_AtenRreluWithNoiseBackwardOp : Torch_Op<"aten.rrelu_with_noise_backward", [
16977+
AllowsTypeRefinement,
16978+
HasValueSemantics,
16979+
ReadOnly
16980+
]> {
16981+
let summary = "Generated op for `aten::rrelu_with_noise_backward : (Tensor, Tensor, Tensor, Scalar, Scalar, bool, bool) -> (Tensor)`";
16982+
let arguments = (ins
16983+
AnyTorchTensorType:$grad_output,
16984+
AnyTorchTensorType:$self,
16985+
AnyTorchTensorType:$noise,
16986+
AnyTorchScalarType:$lower,
16987+
AnyTorchScalarType:$upper,
16988+
Torch_BoolType:$training,
16989+
Torch_BoolType:$self_is_result
16990+
);
16991+
let results = (outs
16992+
AnyTorchOptionalTensorType:$result
16993+
);
16994+
let hasCustomAssemblyFormat = 1;
16995+
let extraClassDefinition = [{
16996+
ParseResult AtenRreluWithNoiseBackwardOp::parse(OpAsmParser &parser, OperationState &result) {
16997+
return parseDefaultTorchOp(parser, result, 7, 1);
16998+
}
16999+
void AtenRreluWithNoiseBackwardOp::print(OpAsmPrinter &printer) {
17000+
printDefaultTorchOp(printer, *this, 7, 1);
17001+
}
17002+
}];
17003+
}
17004+
1686417005
def Torch_AtenQuantizePerChannelOp : Torch_Op<"aten.quantize_per_channel", [
1686517006
AllowsTypeRefinement,
1686617007
HasValueSemantics,

lib/Conversion/TorchOnnxToTorch/DefaultDomainGtoP.cpp

Lines changed: 29 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1087,9 +1087,6 @@ void mlir::torch::onnx_c::populateDefaultDomainGtoP(
10871087
if (binder.customOpNameStringAttr(autoPad, "auto_pad", "NOTSET"))
10881088
return rewriter.notifyMatchFailure(binder.op,
10891089
"auto_pad bind failure");
1090-
if (autoPad != "NOTSET")
1091-
return rewriter.notifyMatchFailure(
1092-
binder.op, "unsupported conversion: auto_pad != NOTSET");
10931090

10941091
Torch::ValueTensorType resultTypeOut;
10951092
Value operand;
@@ -1136,13 +1133,42 @@ void mlir::torch::onnx_c::populateDefaultDomainGtoP(
11361133
return rewriter.notifyMatchFailure(binder.op,
11371134
"dilations bind failure");
11381135

1136+
// set default padding
11391137
if (padding.empty())
11401138
padding.resize(spatial, 0);
11411139
if (strides.empty())
11421140
strides.resize(spatial, 1);
11431141
if (dilations.empty())
11441142
dilations.resize(spatial, 1);
11451143

1144+
auto inputTensorType = cast<Torch::ValueTensorType>(operand.getType());
1145+
1146+
// Padding for the beginning and ending along each spatial axis, it can
1147+
// take any value greater than or equal to 0. The value represent the
1148+
// number of pixels added to the beginning and end part of the
1149+
// corresponding axis. pads format should be as follow [x1_begin,
1150+
// x2_begin…x1_end, x2_end,…], where xi_begin the number of pixels added
1151+
// at the beginning of axis i and xi_end, the number of pixels added at
1152+
// the end of axis i.
1153+
if (autoPad != "NOTSET" && autoPad != "VALID") {
1154+
const bool isSameLower = autoPad == "SAME_LOWER";
1155+
ArrayRef<int64_t> inputShape = inputTensorType.getSizes();
1156+
padding.resize_for_overwrite(2 * spatial);
1157+
for (unsigned dimIdx = 0; dimIdx < spatial; dimIdx++) {
1158+
const int64_t dilatedKernelSize =
1159+
dilations[dimIdx] * (kernel[dimIdx] - 1) + 1;
1160+
int64_t totalPad = ((inputShape[dimIdx + 2] + strides[dimIdx] - 1) /
1161+
strides[dimIdx] -
1162+
1) *
1163+
strides[dimIdx] +
1164+
dilatedKernelSize - inputShape[dimIdx + 2];
1165+
totalPad = totalPad >= 0 ? totalPad : 0;
1166+
padding[dimIdx] =
1167+
isSameLower ? ((totalPad + 1) / 2) : (totalPad / 2);
1168+
padding[spatial + dimIdx] = totalPad - padding[dimIdx];
1169+
}
1170+
}
1171+
11461172
// If the padding is symmetric we can push the padding operation to the
11471173
// torch operator.
11481174
if (padding.size() == static_cast<size_t>(2 * spatial)) {

lib/Conversion/TorchToLinalg/Linear.cpp

Lines changed: 31 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -1125,54 +1125,57 @@ class ConvertAtenConvolutionOp : public OpConversionPattern<AtenConvolutionOp> {
11251125
}
11261126

11271127
if (numGroups == 1 && inputZp) {
1128-
// The quantized version uses a different channel ordering so we need to
1129-
// permute the tensors in order to use the existing path. We should
1130-
// eventually directly support this channel ordering.
1131-
llvm::SmallVector<int64_t> inPerms, weightPerms;
1132-
inPerms.push_back(0); // N stays at the front for input.
1133-
// Then we expect the spatial dimensions
1134-
for (size_t i = 0; i < numSpatialDims; ++i) {
1135-
inPerms.push_back(i + 2);
1136-
weightPerms.push_back(i + 2);
1137-
}
1138-
inPerms.push_back(1);
1139-
weightPerms.append({1, 0});
1140-
1141-
paddedInput = transposeValue(op.getLoc(), paddedInput, inPerms, rewriter);
1142-
weight = transposeValue(op.getLoc(), weight, weightPerms, rewriter);
1143-
outputTensor =
1144-
transposeValue(op.getLoc(), outputTensor, inPerms, rewriter);
1145-
11461128
switch (numSpatialDims) {
11471129
case 2:
11481130
conv = rewriter
1149-
.create<linalg::Conv2DNhwcHwcfQOp>(
1131+
.create<linalg::Conv2DNchwFchwQOp>(
11501132
loc, outputTensor.getType(),
11511133
ValueRange{paddedInput, weight, inputZp, weightZp},
11521134
outputTensor, stridesAttr, dilationAttr)
11531135
.getResult(0);
11541136
break;
1155-
case 3:
1137+
case 3: {
1138+
// The quantized version uses a different channel ordering so we need to
1139+
// permute the tensors in order to use the existing path. We should
1140+
// eventually directly support this channel ordering.
1141+
llvm::SmallVector<int64_t> inPerms, weightPerms;
1142+
inPerms.push_back(0); // N stays at the front for input.
1143+
// Then we expect the spatial dimensions
1144+
for (size_t i = 0; i < numSpatialDims; ++i) {
1145+
inPerms.push_back(i + 2);
1146+
weightPerms.push_back(i + 2);
1147+
}
1148+
inPerms.push_back(1);
1149+
weightPerms.append({1, 0});
1150+
1151+
paddedInput =
1152+
transposeValue(op.getLoc(), paddedInput, inPerms, rewriter);
1153+
weight = transposeValue(op.getLoc(), weight, weightPerms, rewriter);
1154+
outputTensor =
1155+
transposeValue(op.getLoc(), outputTensor, inPerms, rewriter);
1156+
11561157
conv = rewriter
11571158
.create<linalg::Conv3DNdhwcDhwcfQOp>(
11581159
loc, outputTensor.getType(),
11591160
ValueRange{paddedInput, weight, inputZp, weightZp},
11601161
outputTensor, stridesAttr, dilationAttr)
11611162
.getResult(0);
1163+
1164+
llvm::SmallVector<int64_t> outPerms;
1165+
outPerms.push_back(0);
1166+
outPerms.push_back(inPerms.size() - 1);
1167+
for (size_t i = 0; i < numSpatialDims; ++i) {
1168+
outPerms.push_back(i + 1);
1169+
}
1170+
conv = transposeValue(op.getLoc(), conv, outPerms, rewriter);
1171+
11621172
break;
1173+
}
11631174
default:
11641175
return rewriter.notifyMatchFailure(
11651176
op, "unimplemented: only 1D, 2D, and 3D convolution supported");
11661177
};
11671178

1168-
llvm::SmallVector<int64_t> outPerms;
1169-
outPerms.push_back(0);
1170-
outPerms.push_back(inPerms.size() - 1);
1171-
for (size_t i = 0; i < numSpatialDims; ++i) {
1172-
outPerms.push_back(i + 1);
1173-
}
1174-
conv = transposeValue(op.getLoc(), conv, outPerms, rewriter);
1175-
11761179
Type newResultType = getTypeConverter()->convertType(op.getType());
11771180
if (accumulatorDType != resultDTy) {
11781181
Type resultElementType =

0 commit comments

Comments
 (0)