Skip to content

Commit a83d5a9

Browse files
tatwaichongFranklandJack
authored andcommitted
[mlir][tosa] Make TOSA MUL's Shift an Input
The TOSA-v1.0 specification makes the shift attribute of the MUL (Hammard product) operator an input. Move the `shift` parameter of the MUL operator in the MILR TOSA dialect from an attribute to an input and update any lit tests appropriately. Expand the verifier of the `tosa::MulOp` operation to check the various constraints defined in the TOSA-v1.0 specification. Specifically, ensure that all input operands (excluding the optional shift) are of the same rank. This means that broadcasting tests which previously checked rank-0 tensors would be broadcast are no longer valid and are removed. Signed-off-by: Jack Frankland <[email protected]>
1 parent 0c71fdd commit a83d5a9

File tree

13 files changed

+209
-74
lines changed

13 files changed

+209
-74
lines changed

mlir/include/mlir/Dialect/Tosa/IR/TosaOps.td

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -810,7 +810,7 @@ def Tosa_MulOp : Tosa_ElementwiseOp<"mul", [
810810
let arguments = (ins
811811
Tosa_Tensor:$input1,
812812
Tosa_Tensor:$input2,
813-
I8Attr:$shift
813+
Optional<TosaTensorRankOf<[Tosa_Int8], [0]>>:$shift
814814
);
815815

816816
let results = (outs

mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp

Lines changed: 56 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -90,43 +90,59 @@ static Value createLinalgBodyCalculationForElementwiseOp(
9090
}
9191

9292
// tosa::MulOp
93-
if (isa<tosa::MulOp>(op) && isa<FloatType>(elementTy))
94-
return rewriter.create<arith::MulFOp>(loc, resultTypes, args);
95-
96-
if (isa<tosa::MulOp>(op) && isa<IntegerType>(elementTy)) {
97-
Value a = args[0];
98-
Value b = args[1];
99-
auto shift =
100-
cast<IntegerAttr>(op->getAttr("shift")).getValue().getSExtValue();
101-
if (shift > 0) {
102-
auto shiftConst =
103-
rewriter.create<arith::ConstantIntOp>(loc, shift, /*bitwidth=*/8);
104-
if (!a.getType().isInteger(32))
105-
a = rewriter.create<arith::ExtSIOp>(loc, rewriter.getI32Type(), a);
106-
107-
if (!b.getType().isInteger(32))
108-
b = rewriter.create<arith::ExtSIOp>(loc, rewriter.getI32Type(), b);
109-
110-
auto result = rewriter.create<tosa::ApplyScaleOp>(
111-
loc, rewriter.getI32Type(), a, b, shiftConst,
112-
rewriter.getBoolAttr(false));
113-
114-
if (elementTy.isInteger(32))
115-
return result;
116-
117-
return rewriter.create<arith::TruncIOp>(loc, elementTy, result);
93+
if (isa<tosa::MulOp>(op)) {
94+
auto shift_val = cast<tosa::MulOp>(op).getShift();
95+
if (!elementTy.isInteger(32) && shift_val.getImpl()) {
96+
(void)rewriter.notifyMatchFailure(
97+
op, "Cannot have shift value for non i32 output");
98+
return nullptr;
99+
};
100+
101+
if (isa<FloatType>(elementTy)) {
102+
return rewriter.create<arith::MulFOp>(loc, resultTypes, args[0], args[1]);
118103
}
119104

120-
int aWidth = a.getType().getIntOrFloatBitWidth();
121-
int bWidth = b.getType().getIntOrFloatBitWidth();
122-
int cWidth = resultTypes[0].getIntOrFloatBitWidth();
105+
if (isa<IntegerType>(elementTy)) {
106+
int32_t shift = 0;
107+
ElementsAttr shift_elem;
108+
if (shift_val.getImpl() &&
109+
matchPattern(shift_val, m_Constant(&shift_elem))) {
110+
// Explicit shift is set.
111+
shift = shift_elem.getValues<IntegerAttr>()[0].getInt();
112+
}
113+
114+
Value a = args[0];
115+
Value b = args[1];
116+
if (shift > 0) {
117+
auto shiftConst =
118+
rewriter.create<arith::ConstantIntOp>(loc, shift, /*bitwidth=*/8);
119+
if (!a.getType().isInteger(32))
120+
a = rewriter.create<arith::ExtSIOp>(loc, rewriter.getI32Type(), a);
121+
122+
if (!b.getType().isInteger(32))
123+
b = rewriter.create<arith::ExtSIOp>(loc, rewriter.getI32Type(), b);
124+
125+
auto result = rewriter.create<tosa::ApplyScaleOp>(
126+
loc, rewriter.getI32Type(), a, b, shiftConst,
127+
rewriter.getBoolAttr(false));
123128

124-
if (aWidth < cWidth)
125-
a = rewriter.create<arith::ExtSIOp>(loc, resultTypes[0], a);
126-
if (bWidth < cWidth)
127-
b = rewriter.create<arith::ExtSIOp>(loc, resultTypes[0], b);
129+
if (elementTy.isInteger(32))
130+
return result;
128131

129-
return rewriter.create<arith::MulIOp>(loc, resultTypes, a, b);
132+
return rewriter.create<arith::TruncIOp>(loc, elementTy, result);
133+
}
134+
135+
int aWidth = a.getType().getIntOrFloatBitWidth();
136+
int bWidth = b.getType().getIntOrFloatBitWidth();
137+
int cWidth = resultTypes[0].getIntOrFloatBitWidth();
138+
139+
if (aWidth < cWidth)
140+
a = rewriter.create<arith::ExtSIOp>(loc, resultTypes[0], a);
141+
if (bWidth < cWidth)
142+
b = rewriter.create<arith::ExtSIOp>(loc, resultTypes[0], b);
143+
144+
return rewriter.create<arith::MulIOp>(loc, resultTypes, a, b);
145+
}
130146
}
131147

132148
// tosa::NegateOp
@@ -940,7 +956,13 @@ elementwiseMatchAndRewriteHelper(Operation *operation, ValueRange operands,
940956
auto loc = operation->getLoc();
941957
auto rank =
942958
cast<RankedTensorType>(operation->getResultTypes().front()).getRank();
943-
auto expandedOperands = expandInputRanks(rewriter, loc, operands, rank);
959+
// For the mul op we need to avoid expanding the rank of the optional shift
960+
// input.
961+
auto operandsToExpand =
962+
isa<tosa::MulOp>(operation) ? operands.take_front(2) : operands;
963+
964+
auto expandedOperands =
965+
expandInputRanks(rewriter, loc, operandsToExpand, rank);
944966
auto [targetShape, masterOperands] =
945967
computeTargetShape(rewriter, loc, indexPool, expandedOperands);
946968
auto broadcastOperands = broadcastDynamicDimensions(

mlir/lib/Dialect/Tosa/IR/TosaCanonicalizations.cpp

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -665,7 +665,18 @@ OpFoldResult MulOp::fold(FoldAdaptor adaptor) {
665665
auto rhsAttr =
666666
llvm::dyn_cast_if_present<DenseElementsAttr>(adaptor.getInput2());
667667

668-
const int64_t shift = llvm::isa<IntegerType>(resultETy) ? getShift() : 0;
668+
// Result right shift on i32_t data type only. For simplification, synthesize
669+
// a zero shift for other data type.
670+
int32_t shift = 0;
671+
if (resultETy.isInteger(32)) {
672+
ElementsAttr shift_elem;
673+
if (getShift().getImpl()) {
674+
if (!matchPattern(getShift(), m_Constant(&shift_elem)))
675+
// cannot be folded when the shift value is unknown.
676+
return {};
677+
shift = shift_elem.getValues<IntegerAttr>()[0].getInt();
678+
}
679+
}
669680

670681
if (rhsTy == resultTy) {
671682
if (isSplatZero(resultETy, lhsAttr))
@@ -680,7 +691,7 @@ OpFoldResult MulOp::fold(FoldAdaptor adaptor) {
680691
return lhs;
681692
}
682693

683-
return mulBinaryFolder(lhsAttr, rhsAttr, resultTy, getShift());
694+
return mulBinaryFolder(lhsAttr, rhsAttr, resultTy, shift);
684695
}
685696

686697
OpFoldResult SubOp::fold(FoldAdaptor adaptor) {

mlir/lib/Dialect/Tosa/IR/TosaOps.cpp

Lines changed: 76 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -945,9 +945,82 @@ LogicalResult tosa::SliceOp::verify() {
945945
}
946946

947947
LogicalResult tosa::MulOp::verify() {
948-
Type elementTy = getInput1().getType().getElementType();
949-
if (isa<FloatType>(elementTy) && getShift() != 0)
950-
return emitOpError() << "require shift to be 0 for float type";
948+
auto resElemType = getElementTypeOrSelf(getOutput());
949+
950+
// Verify if the element type amoung operands and result match tosa
951+
// specification.
952+
if (auto resIntType = dyn_cast<IntegerType>(resElemType)) {
953+
IntegerType lhsIntType =
954+
cast<IntegerType>(getElementTypeOrSelf(getInput1()));
955+
IntegerType rhsIntType =
956+
cast<IntegerType>(getElementTypeOrSelf(getInput2()));
957+
if (lhsIntType != rhsIntType)
958+
return emitOpError("requires the same element type for all operands");
959+
960+
// Though the spec requires the element type of result to be i32, a more
961+
// relaxed way is provided at dialect level for easier cooperating with
962+
// other dialects.
963+
if (lhsIntType.getWidth() > resIntType.getWidth())
964+
return emitOpError("invalid data type size for operands or result");
965+
966+
} else {
967+
// For other supported type, the spec requires requires the same element
968+
// type for all operands (excludes `shift` operand) and results.
969+
for (int i = 0; i < 2; ++i) {
970+
if (getElementTypeOrSelf(getOperand(i)) != resElemType)
971+
return emitOpError(
972+
"requires the same element type for all operands and results");
973+
}
974+
}
975+
976+
// Check if the shift value apply to non-i32 output type as that is not
977+
// allowed in the spec.
978+
if (!(llvm::isa<IntegerType>(resElemType) && resElemType.isInteger(32)))
979+
if (getShift().getImpl())
980+
return emitOpError("right shift output only on i32 data type");
981+
982+
// Verify the op has same ranks for all main operands (excludes extra operands
983+
// such as shift of mul op, so this is the only difference with the built-in
984+
// `SameOperandsAndResultRank` trait) and results types, if known.
985+
986+
// delegate function that returns true if type is a shaped type with known
987+
// rank
988+
auto hasRank = [](const Type type) {
989+
if (auto shaped_type = dyn_cast<ShapedType>(type))
990+
return shaped_type.hasRank();
991+
992+
return false;
993+
};
994+
995+
auto rankedOperandTypes =
996+
llvm::to_vector(llvm::make_filter_range(getOperandTypes(), hasRank));
997+
998+
auto rankedResultTypes =
999+
llvm::make_filter_range(getOperation()->getResultTypes(), hasRank);
1000+
1001+
// If all operands and results are unranked, then no further verification.
1002+
if (rankedOperandTypes.empty() && rankedResultTypes.empty())
1003+
return success();
1004+
1005+
// delegate function that returns rank of shaped type with known rank
1006+
auto getRank = [](const Type type) {
1007+
return cast<ShapedType>(type).getRank();
1008+
};
1009+
1010+
auto rank = !rankedOperandTypes.empty() ? getRank(*rankedOperandTypes.begin())
1011+
: getRank(*rankedResultTypes.begin());
1012+
1013+
for (size_t i = 0; i < 2; ++i) {
1014+
if (rank != getRank(rankedOperandTypes[i])) {
1015+
return emitOpError("operands don't have matching ranks");
1016+
}
1017+
}
1018+
1019+
for (const auto type : rankedResultTypes) {
1020+
if (rank != getRank(type)) {
1021+
return emitOpError("result type has different rank than operands");
1022+
}
1023+
}
9511024

9521025
return success();
9531026
}

mlir/lib/Dialect/Tosa/Transforms/TosaDecomposeDepthwise.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -133,7 +133,7 @@ struct DepthwiseConv2DIsMul : public OpRewritePattern<tosa::DepthwiseConv2DOp> {
133133

134134
Value mulValue = rewriter
135135
.create<tosa::MulOp>(op.getLoc(), mulShapeType, input,
136-
weight, /*shift=*/0)
136+
weight, Value{} /* zero_shift */)
137137
.getResult();
138138

139139
// Reshape output to [N, H, W, C * M].

mlir/lib/Dialect/Tosa/Transforms/TosaMakeBroadcastable.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -113,7 +113,7 @@ struct ConvertTosaOp<tosa::MulOp> : public OpRewritePattern<tosa::MulOp> {
113113

114114
Value input1 = tosaBinaryOp.getInput1();
115115
Value input2 = tosaBinaryOp.getInput2();
116-
int32_t shift = tosaBinaryOp.getShift();
116+
Value shift = tosaBinaryOp.getShift();
117117
Value output = tosaBinaryOp.getResult();
118118
auto outputType = dyn_cast<RankedTensorType>(output.getType());
119119
if (!outputType)

mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -472,7 +472,7 @@ func.func @test_simple_f32(%arg0: tensor<1xf32>) -> () {
472472

473473
// CHECK: linalg.generic
474474
// CHECK: arith.mulf
475-
%4 = tosa.mul %0, %1 {shift = 0 : i8} : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
475+
%4 = tosa.mul %0, %1 : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
476476

477477
// CHECK: linalg.generic
478478
// CHECK: arith.negf
@@ -618,7 +618,7 @@ func.func @test_simple_i16(%arg0: tensor<1xi16>) -> () {
618618
// CHECK: arith.extsi
619619
// CHECK: arith.extsi
620620
// CHECK: arith.muli
621-
%0 = tosa.mul %arg0, %arg0 {shift = 0 : i8} : (tensor<1xi16>, tensor<1xi16>) -> tensor<1xi32>
621+
%0 = tosa.mul %arg0, %arg0 : (tensor<1xi16>, tensor<1xi16>) -> tensor<1xi32>
622622

623623
return
624624
}
@@ -646,12 +646,14 @@ func.func @test_simple_i32(%arg0: tensor<1xi32>, %unsigned: tensor<1xui32>, %uns
646646

647647
// CHECK: linalg.generic
648648
// CHECK: arith.muli
649-
%2 = tosa.mul %arg0, %arg0 {shift = 0 : i8} : (tensor<1xi32>, tensor<1xi32>) -> tensor<1xi32>
649+
%shift1 = "tosa.const"() <{value = dense<0> : tensor<i8>}> : () -> tensor<i8>
650+
%2 = tosa.mul %arg0, %arg0, %shift1 : (tensor<1xi32>, tensor<1xi32>, tensor<i8>) -> tensor<1xi32>
650651

651652
// CHECK: linalg.generic
652653
// CHECK: arith.constant 2
653654
// CHECK: apply_scale
654-
%3 = tosa.mul %arg0, %arg0 {shift = 2 : i8} : (tensor<1xi32>, tensor<1xi32>) -> tensor<1xi32>
655+
%shift2 = "tosa.const"() <{value = dense<2> : tensor<i8>}> : () -> tensor<i8>
656+
%3 = tosa.mul %arg0, %arg0, %shift2: (tensor<1xi32>, tensor<1xi32>, tensor<i8>) -> tensor<1xi32>
655657

656658
// CHECK: linalg.generic
657659
// CHECK: arith.divsi

mlir/test/Dialect/Tosa/canonicalize.mlir

Lines changed: 19 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -332,7 +332,7 @@ func.func @mul_one_float(%arg0: tensor<2x3xf32>) -> tensor<2x3xf32> {
332332
// CHECK: return %arg0
333333
// CHECK-NOT: tosa.mul
334334
%ones = "tosa.const"() {value = dense<1.0> : tensor<2x3xf32>} : () -> tensor<2x3xf32>
335-
%1 = tosa.mul %arg0, %ones {shift = 0 : i8} : (tensor<2x3xf32>, tensor<2x3xf32>) -> tensor<2x3xf32>
335+
%1 = tosa.mul %arg0, %ones : (tensor<2x3xf32>, tensor<2x3xf32>) -> tensor<2x3xf32>
336336
return %1 : tensor<2x3xf32>
337337
}
338338

@@ -343,7 +343,7 @@ func.func @mul_bcast_one_float(%arg0: tensor<2x3xf32>) -> tensor<2x3xf32> {
343343
// CHECK: return %arg0
344344
// CHECK-NOT: tosa.mul
345345
%ones = "tosa.const"() {value = dense<1.0> : tensor<1x1xf32>} : () -> tensor<1x1xf32>
346-
%1 = tosa.mul %ones, %arg0 {shift = 0 : i8} : (tensor<1x1xf32>, tensor<2x3xf32>) -> tensor<2x3xf32>
346+
%1 = tosa.mul %ones, %arg0 : (tensor<1x1xf32>, tensor<2x3xf32>) -> tensor<2x3xf32>
347347
return %1 : tensor<2x3xf32>
348348
}
349349

@@ -354,7 +354,20 @@ func.func @mul_one_int(%arg0: tensor<2x3xi32>) -> tensor<2x3xi32> {
354354
// CHECK: return %arg0
355355
// CHECK-NOT: tosa.mul
356356
%ones = "tosa.const"() {value = dense<1> : tensor<2x3xi32>} : () -> tensor<2x3xi32>
357-
%1 = tosa.mul %arg0, %ones {shift = 0 : i8} : (tensor<2x3xi32>, tensor<2x3xi32>) -> tensor<2x3xi32>
357+
%1 = tosa.mul %arg0, %ones : (tensor<2x3xi32>, tensor<2x3xi32>) -> tensor<2x3xi32>
358+
return %1 : tensor<2x3xi32>
359+
}
360+
361+
// -----
362+
363+
// CHECK-LABEL: @mul_one_int_and_shift
364+
func.func @mul_one_int_and_shift(%arg0: tensor<2x3xi32>) -> tensor<2x3xi32> {
365+
// CHECK-DAG: %[[VAL_1:.*]] = "tosa.const"() <{value = dense<1> : tensor<2x3xi32>}>
366+
// CHECK-DAG: %[[VAL_2:.*]] = "tosa.const"() <{value = dense<31> : tensor<i8>}>
367+
// CHECK: %[[VAL_3:.*]] = tosa.mul %arg0, %[[VAL_1]], %[[VAL_2]] : (tensor<2x3xi32>, tensor<2x3xi32>, tensor<i8>)
368+
%ones = "tosa.const"() {value = dense<1> : tensor<2x3xi32>} : () -> tensor<2x3xi32>
369+
%shift = "tosa.const"() <{value = dense<31> : tensor<i8>}> : () -> tensor<i8>
370+
%1 = tosa.mul %arg0, %ones, %shift : (tensor<2x3xi32>, tensor<2x3xi32>, tensor<i8>) -> tensor<2x3xi32>
358371
return %1 : tensor<2x3xi32>
359372
}
360373

@@ -365,11 +378,11 @@ func.func @mul_zero_broadcast(%arg0: tensor<2x3xf32>) -> (tensor<2x3xf32>, tenso
365378
// CHECK: %[[ZERO:.*]] = "tosa.const"() <{value = dense<0.000000e+00> : tensor<2x3xf32>}
366379
// CHECK-NOT: tosa.mul
367380
%zeros = "tosa.const"() {value = dense<0.0> : tensor<1x1xf32>} : () -> tensor<1x1xf32>
368-
%1 = tosa.mul %arg0, %zeros {shift = 0 : i8} : (tensor<2x3xf32>, tensor<1x1xf32>) -> tensor<2x3xf32>
381+
%1 = tosa.mul %arg0, %zeros : (tensor<2x3xf32>, tensor<1x1xf32>) -> tensor<2x3xf32>
369382

370383
// CHECK-NOT: tosa.mul
371384
// CHECK: return %[[ZERO]], %[[ZERO]]
372-
%2 = tosa.mul %zeros, %arg0 {shift = 0 : i8} : (tensor<1x1xf32>, tensor<2x3xf32>) -> tensor<2x3xf32>
385+
%2 = tosa.mul %zeros, %arg0 : (tensor<1x1xf32>, tensor<2x3xf32>) -> tensor<2x3xf32>
373386
return %1, %2 : tensor<2x3xf32>, tensor<2x3xf32>
374387
}
375388

@@ -927,7 +940,7 @@ func.func @mul_quant_nofold() -> tensor<1x!quant.uniform<i8:f32, 3.0757404601899
927940
// CHECK: tosa.mul
928941
%0 = "tosa.const"() {value = dense<0> : tensor<1xi8>} : () -> tensor<1x!quant.uniform<i8:f32, 3.0757404601899907E-5:-128>>
929942
%1 = "tosa.const"() {value = dense<1> : tensor<1xi8>} : () -> tensor<1x!quant.uniform<i8:f32, 3.0757404601899907E-5:-128>>
930-
%2 = tosa.mul %0, %1 { shift = 0 : i8} : (tensor<1x!quant.uniform<i8:f32, 3.0757404601899907E-5:-128>>, tensor<1x!quant.uniform<i8:f32, 3.0757404601899907E-5:-128>>) -> tensor<1x!quant.uniform<i8:f32, 3.0757404601899907E-5:-128>>
943+
%2 = tosa.mul %0, %1 : (tensor<1x!quant.uniform<i8:f32, 3.0757404601899907E-5:-128>>, tensor<1x!quant.uniform<i8:f32, 3.0757404601899907E-5:-128>>) -> tensor<1x!quant.uniform<i8:f32, 3.0757404601899907E-5:-128>>
931944
return %2 : tensor<1x!quant.uniform<i8:f32, 3.0757404601899907E-5:-128>>
932945
}
933946

0 commit comments

Comments
 (0)