@@ -71,7 +71,7 @@ Operation *arith::ArithDialect::materializeConstant(OpBuilder &builder,
7171// / Return true if the type is compatible with fast math, i.e.
7272// / it is a float type or contains a float type.
7373bool arith::ArithFastMathInterface::isCompatibleType (Type type) {
74- if (isa<FloatType>(type))
74+ if (isa<FloatType>(type) || isa<ComplexType>(type) )
7575 return true ;
7676
7777 // ShapeType's with ValueSemantics represent containers
@@ -81,31 +81,12 @@ bool arith::ArithFastMathInterface::isCompatibleType(Type type) {
8181 if (auto shapedType = dyn_cast<ShapedType>(type))
8282 return isCompatibleType (shapedType.getElementType ());
8383
84- // ComplexType's element type is always a FloatType.
85- if (auto complexType = dyn_cast<ComplexType>(type))
86- return true ;
87-
88- // TODO: what about TupleType and custom dialect struct-like types?
89- // It seems that they worth an interface to get to the list of element types.
90- //
91- // NOTE: LLVM only allows fast-math flags for instructions producing
92- // structures with homogeneous floating point members. I think
93- // this restriction must not be asserted here, because custom
94- // MLIR operations may be converted such that the original operation's
95- // FastMathFlags still need to be propagated to the target
96- // operations.
97-
9884 return false ;
9985}
10086
10187// / Return true if any of the results of the operation
10288// / has a type compatible with fast math, i.e. it is a float type
10389// / or contains a float type.
104- // /
105- // / TODO: the results often have the same type, and traversing
106- // / the same type again and again is not very efficient.
107- // / We can cache it here for the duration of the processing.
108- // / Other ideas?
10990bool arith::ArithFastMathInterface::isApplicableImpl () {
11091 Operation *op = getOperation ();
11192 if (llvm::any_of (op->getResults (),
0 commit comments