diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVGroupOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVGroupOps.td index dd25fbbce14b9..a8743b196bfe7 100644 --- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVGroupOps.td +++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVGroupOps.td @@ -661,6 +661,12 @@ def SPIRV_INTELSubgroupBlockReadOp : SPIRV_IntelVendorOp<"SubgroupBlockRead", [] let results = (outs SPIRV_Type:$value ); + + let hasCustomAssemblyFormat = 0; + + let assemblyFormat = [{ + $ptr attr-dict `:` type($ptr) `->` type($value) + }]; } // ----- diff --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVNonUniformOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVNonUniformOps.td index a32f625ae8211..a1b866387e2ec 100644 --- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVNonUniformOps.td +++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVNonUniformOps.td @@ -26,7 +26,13 @@ class SPIRV_GroupNonUniformArithmeticOp:$result - ); + ); + + let hasCustomAssemblyFormat = 0; + + let assemblyFormat = [{ + $execution_scope $group_operation $value (`cluster_size``(` $cluster_size^ `)`)? attr-dict `:` type($value) (`,` type($cluster_size)^)? `->` type(results) + }]; } // ----- @@ -318,24 +324,14 @@ def SPIRV_GroupNonUniformFAddOp : SPIRV_GroupNonUniformArithmeticOp<"GroupNonUni - ``` - scope ::= `"Workgroup"` | `"Subgroup"` - operation ::= `"Reduce"` | `"InclusiveScan"` | `"ExclusiveScan"` | ... - float-scalar-vector-type ::= float-type | - `vector<` integer-literal `x` float-type `>` - non-uniform-fadd-op ::= ssa-id `=` `spirv.GroupNonUniformFAdd` scope operation - ssa-use ( `cluster_size` `(` ssa_use `)` )? - `:` float-scalar-vector-type - ``` - #### Example: ```mlir %four = spirv.Constant 4 : i32 %scalar = ... : f32 %vector = ... : vector<4xf32> - %0 = spirv.GroupNonUniformFAdd "Workgroup" "Reduce" %scalar : f32 - %1 = spirv.GroupNonUniformFAdd "Subgroup" "ClusteredReduce" %vector cluster_size(%four) : vector<4xf32> + %0 = spirv.GroupNonUniformFAdd %scalar : f32 -> f32 + %1 = spirv.GroupNonUniformFAdd %vector cluster_size(%four) : vector<4xf32>, i32 -> vector<4xf32> ``` }]; @@ -378,24 +374,14 @@ def SPIRV_GroupNonUniformFMaxOp : SPIRV_GroupNonUniformArithmeticOp<"GroupNonUni - ``` - scope ::= `"Workgroup"` | `"Subgroup"` - operation ::= `"Reduce"` | `"InclusiveScan"` | `"ExclusiveScan"` | ... - float-scalar-vector-type ::= float-type | - `vector<` integer-literal `x` float-type `>` - non-uniform-fmax-op ::= ssa-id `=` `spirv.GroupNonUniformFMax` scope operation - ssa-use ( `cluster_size` `(` ssa_use `)` )? - `:` float-scalar-vector-type - ``` - #### Example: ```mlir %four = spirv.Constant 4 : i32 %scalar = ... : f32 %vector = ... : vector<4xf32> - %0 = spirv.GroupNonUniformFMax "Workgroup" "Reduce" %scalar : f32 - %1 = spirv.GroupNonUniformFMax "Subgroup" "ClusteredReduce" %vector cluster_size(%four) : vector<4xf32> + %0 = spirv.GroupNonUniformFMax %scalar : f32 -> f32 + %1 = spirv.GroupNonUniformFMax %vector cluster_size(%four) : vector<4xf32>, i32 -> vector<4xf32> ``` }]; @@ -438,24 +424,14 @@ def SPIRV_GroupNonUniformFMinOp : SPIRV_GroupNonUniformArithmeticOp<"GroupNonUni - ``` - scope ::= `"Workgroup"` | `"Subgroup"` - operation ::= `"Reduce"` | `"InclusiveScan"` | `"ExclusiveScan"` | ... - float-scalar-vector-type ::= float-type | - `vector<` integer-literal `x` float-type `>` - non-uniform-fmin-op ::= ssa-id `=` `spirv.GroupNonUniformFMin` scope operation - ssa-use ( `cluster_size` `(` ssa_use `)` )? - `:` float-scalar-vector-type - ``` - #### Example: ```mlir %four = spirv.Constant 4 : i32 %scalar = ... : f32 %vector = ... : vector<4xf32> - %0 = spirv.GroupNonUniformFMin "Workgroup" "Reduce" %scalar : f32 - %1 = spirv.GroupNonUniformFMin "Subgroup" "ClusteredReduce" %vector cluster_size(%four) : vector<4xf32> + %0 = spirv.GroupNonUniformFMin %scalar : f32 -> i32 + %1 = spirv.GroupNonUniformFMin %vector cluster_size(%four) : vector<4xf32>, i32 -> vector<4xf32> ``` }]; @@ -495,24 +471,14 @@ def SPIRV_GroupNonUniformFMulOp : SPIRV_GroupNonUniformArithmeticOp<"GroupNonUni - ``` - scope ::= `"Workgroup"` | `"Subgroup"` - operation ::= `"Reduce"` | `"InclusiveScan"` | `"ExclusiveScan"` | ... - float-scalar-vector-type ::= float-type | - `vector<` integer-literal `x` float-type `>` - non-uniform-fmul-op ::= ssa-id `=` `spirv.GroupNonUniformFMul` scope operation - ssa-use ( `cluster_size` `(` ssa_use `)` )? - `:` float-scalar-vector-type - ``` - #### Example: ```mlir %four = spirv.Constant 4 : i32 %scalar = ... : f32 %vector = ... : vector<4xf32> - %0 = spirv.GroupNonUniformFMul "Workgroup" "Reduce" %scalar : f32 - %1 = spirv.GroupNonUniformFMul "Subgroup" "ClusteredReduce" %vector cluster_size(%four) : vector<4xf32> + %0 = spirv.GroupNonUniformFMul %scalar : f32 -> f32 + %1 = spirv.GroupNonUniformFMul %vector cluster_size(%four) : vector<4xf32>, i32 -> vector<4xf32> ``` }]; @@ -550,24 +516,14 @@ def SPIRV_GroupNonUniformIAddOp : SPIRV_GroupNonUniformArithmeticOp<"GroupNonUni - ``` - scope ::= `"Workgroup"` | `"Subgroup"` - operation ::= `"Reduce"` | `"InclusiveScan"` | `"ExclusiveScan"` | ... - integer-scalar-vector-type ::= integer-type | - `vector<` integer-literal `x` integer-type `>` - non-uniform-iadd-op ::= ssa-id `=` `spirv.GroupNonUniformIAdd` scope operation - ssa-use ( `cluster_size` `(` ssa_use `)` )? - `:` integer-scalar-vector-type - ``` - #### Example: ```mlir %four = spirv.Constant 4 : i32 %scalar = ... : i32 %vector = ... : vector<4xi32> - %0 = spirv.GroupNonUniformIAdd "Workgroup" "Reduce" %scalar : i32 - %1 = spirv.GroupNonUniformIAdd "Subgroup" "ClusteredReduce" %vector cluster_size(%four) : vector<4xi32> + %0 = spirv.GroupNonUniformIAdd %scalar : i32 -> i32 + %1 = spirv.GroupNonUniformIAdd %vector cluster_size(%four) : vector<4xi32>, i32 -> vector<4xi32> ``` }]; @@ -605,24 +561,14 @@ def SPIRV_GroupNonUniformIMulOp : SPIRV_GroupNonUniformArithmeticOp<"GroupNonUni - ``` - scope ::= `"Workgroup"` | `"Subgroup"` - operation ::= `"Reduce"` | `"InclusiveScan"` | `"ExclusiveScan"` | ... - integer-scalar-vector-type ::= integer-type | - `vector<` integer-literal `x` integer-type `>` - non-uniform-imul-op ::= ssa-id `=` `spirv.GroupNonUniformIMul` scope operation - ssa-use ( `cluster_size` `(` ssa_use `)` )? - `:` integer-scalar-vector-type - ``` - #### Example: ```mlir %four = spirv.Constant 4 : i32 %scalar = ... : i32 %vector = ... : vector<4xi32> - %0 = spirv.GroupNonUniformIMul "Workgroup" "Reduce" %scalar : i32 - %1 = spirv.GroupNonUniformIMul "Subgroup" "ClusteredReduce" %vector cluster_size(%four) : vector<4xi32> + %0 = spirv.GroupNonUniformIMul %scalar : i32 -> i32 + %1 = spirv.GroupNonUniformIMul %vector cluster_size(%four) : vector<4xi32>, i32 -> vector<4xi32> ``` }]; @@ -662,24 +608,14 @@ def SPIRV_GroupNonUniformSMaxOp : SPIRV_GroupNonUniformArithmeticOp<"GroupNonUni - ``` - scope ::= `"Workgroup"` | `"Subgroup"` - operation ::= `"Reduce"` | `"InclusiveScan"` | `"ExclusiveScan"` | ... - integer-scalar-vector-type ::= integer-type | - `vector<` integer-literal `x` integer-type `>` - non-uniform-smax-op ::= ssa-id `=` `spirv.GroupNonUniformSMax` scope operation - ssa-use ( `cluster_size` `(` ssa_use `)` )? - `:` integer-scalar-vector-type - ``` - #### Example: ```mlir %four = spirv.Constant 4 : i32 %scalar = ... : i32 %vector = ... : vector<4xi32> - %0 = spirv.GroupNonUniformSMax "Workgroup" "Reduce" %scalar : i32 - %1 = spirv.GroupNonUniformSMax "Subgroup" "ClusteredReduce" %vector cluster_size(%four) : vector<4xi32> + %0 = spirv.GroupNonUniformSMax %scalar : i32 + %1 = spirv.GroupNonUniformSMax %vector cluster_size(%four) : vector<4xi32>, i32 -> vector<4xi32> ``` }]; @@ -719,24 +655,14 @@ def SPIRV_GroupNonUniformSMinOp : SPIRV_GroupNonUniformArithmeticOp<"GroupNonUni - ``` - scope ::= `"Workgroup"` | `"Subgroup"` - operation ::= `"Reduce"` | `"InclusiveScan"` | `"ExclusiveScan"` | ... - integer-scalar-vector-type ::= integer-type | - `vector<` integer-literal `x` integer-type `>` - non-uniform-smin-op ::= ssa-id `=` `spirv.GroupNonUniformSMin` scope operation - ssa-use ( `cluster_size` `(` ssa_use `)` )? - `:` integer-scalar-vector-type - ``` - #### Example: ```mlir %four = spirv.Constant 4 : i32 %scalar = ... : i32 %vector = ... : vector<4xi32> - %0 = spirv.GroupNonUniformSMin "Workgroup" "Reduce" %scalar : i32 - %1 = spirv.GroupNonUniformSMin "Subgroup" "ClusteredReduce" %vector cluster_size(%four) : vector<4xi32> + %0 = spirv.GroupNonUniformSMin %scalar : i32 -> i32 + %1 = spirv.GroupNonUniformSMin %vector cluster_size(%four) : vector<4xi32>, i32 -> vector<4xi32> ``` }]; @@ -992,24 +918,14 @@ def SPIRV_GroupNonUniformUMaxOp : SPIRV_GroupNonUniformArithmeticOp<"GroupNonUni - ``` - scope ::= `"Workgroup"` | `"Subgroup"` - operation ::= `"Reduce"` | `"InclusiveScan"` | `"ExclusiveScan"` | ... - integer-scalar-vector-type ::= integer-type | - `vector<` integer-literal `x` integer-type `>` - non-uniform-umax-op ::= ssa-id `=` `spirv.GroupNonUniformUMax` scope operation - ssa-use ( `cluster_size` `(` ssa_use `)` )? - `:` integer-scalar-vector-type - ``` - #### Example: ```mlir %four = spirv.Constant 4 : i32 %scalar = ... : i32 %vector = ... : vector<4xi32> - %0 = spirv.GroupNonUniformUMax "Workgroup" "Reduce" %scalar : i32 - %1 = spirv.GroupNonUniformUMax "Subgroup" "ClusteredReduce" %vector cluster_size(%four) : vector<4xi32> + %0 = spirv.GroupNonUniformUMax %scalar : i32 -> i32 + %1 = spirv.GroupNonUniformUMax %vector cluster_size(%four) : vector<4xi32>, i32 -> vector<4xi32> ``` }]; @@ -1050,24 +966,14 @@ def SPIRV_GroupNonUniformUMinOp : SPIRV_GroupNonUniformArithmeticOp<"GroupNonUni - ``` - scope ::= `"Workgroup"` | `"Subgroup"` - operation ::= `"Reduce"` | `"InclusiveScan"` | `"ExclusiveScan"` | ... - integer-scalar-vector-type ::= integer-type | - `vector<` integer-literal `x` integer-type `>` - non-uniform-umin-op ::= ssa-id `=` `spirv.GroupNonUniformUMin` scope operation - ssa-use ( `cluster_size` `(` ssa_use `)` )? - `:` integer-scalar-vector-type - ``` - #### Example: ```mlir %four = spirv.Constant 4 : i32 %scalar = ... : i32 %vector = ... : vector<4xi32> - %0 = spirv.GroupNonUniformUMin "Workgroup" "Reduce" %scalar : i32 - %1 = spirv.GroupNonUniformUMin "Subgroup" "ClusteredReduce" %vector cluster_size(%four) : vector<4xi32> + %0 = spirv.GroupNonUniformUMin %scalar : i32 -> i32 + %1 = spirv.GroupNonUniformUMin %vector cluster_size(%four) : vector<4xi32>, i32 -> vector<4xi32> ``` }]; @@ -1113,9 +1019,9 @@ def SPIRV_GroupNonUniformBitwiseAndOp : %four = spirv.Constant 4 : i32 %scalar = ... : i32 %vector = ... : vector<4xi32> - %0 = spirv.GroupNonUniformBitwiseAnd "Workgroup" "Reduce" %scalar : i32 - %1 = spirv.GroupNonUniformBitwiseAnd "Subgroup" "ClusteredReduce" - %vector cluster_size(%four) : vector<4xi32> + %0 = spirv.GroupNonUniformBitwiseAnd %scalar : i32 -> i32 + %1 = spirv.GroupNonUniformBitwiseAnd + %vector cluster_size(%four) : vector<4xi32>, i32 -> vector<4xi32> ``` }]; @@ -1163,9 +1069,9 @@ def SPIRV_GroupNonUniformBitwiseOrOp : %four = spirv.Constant 4 : i32 %scalar = ... : i32 %vector = ... : vector<4xi32> - %0 = spirv.GroupNonUniformBitwiseOr "Workgroup" "Reduce" %scalar : i32 - %1 = spirv.GroupNonUniformBitwiseOr "Subgroup" "ClusteredReduce" - %vector cluster_size(%four) : vector<4xi32> + %0 = spirv.GroupNonUniformBitwiseOr %scalar : i32 -> i32 + %1 = spirv.GroupNonUniformBitwiseOr + %vector cluster_size(%four) : vector<4xi32>, i32 -> vector<4xi32> ``` }]; @@ -1213,9 +1119,9 @@ def SPIRV_GroupNonUniformBitwiseXorOp : %four = spirv.Constant 4 : i32 %scalar = ... : i32 %vector = ... : vector<4xi32> - %0 = spirv.GroupNonUniformBitwiseXor "Workgroup" "Reduce" %scalar : i32 - %1 = spirv.GroupNonUniformBitwiseXor "Subgroup" "ClusteredReduce" - %vector cluster_size(%four) : vector<4xi32> + %0 = spirv.GroupNonUniformBitwiseXor %scalar : i32 -> i32 + %1 = spirv.GroupNonUniformBitwiseXor + %vector cluster_size(%four) : vector<4xi32>, i32 -> vector<4xi32> ``` }]; @@ -1263,9 +1169,9 @@ def SPIRV_GroupNonUniformLogicalAndOp : %four = spirv.Constant 4 : i32 %scalar = ... : i1 %vector = ... : vector<4xi1> - %0 = spirv.GroupNonUniformLogicalAnd "Workgroup" "Reduce" %scalar : i1 - %1 = spirv.GroupNonUniformLogicalAnd "Subgroup" "ClusteredReduce" - %vector cluster_size(%four) : vector<4xi1> + %0 = spirv.GroupNonUniformLogicalAnd %scalar : i1 -> i1 + %1 = spirv.GroupNonUniformLogicalAnd + %vector cluster_size(%four) : vector<4xi1>, i32 -> vector<4xi1> ``` }]; @@ -1313,9 +1219,9 @@ def SPIRV_GroupNonUniformLogicalOrOp : %four = spirv.Constant 4 : i32 %scalar = ... : i1 %vector = ... : vector<4xi1> - %0 = spirv.GroupNonUniformLogicalOr "Workgroup" "Reduce" %scalar : i1 - %1 = spirv.GroupNonUniformLogicalOr "Subgroup" "ClusteredReduce" - %vector cluster_size(%four) : vector<4xi1> + %0 = spirv.GroupNonUniformLogicalOr %scalar : i1 -> i1 + %1 = spirv.GroupNonUniformLogicalOr + %vector cluster_size(%four) : vector<4xi1>, i32 -> vector<4xi1> ``` }]; @@ -1363,9 +1269,9 @@ def SPIRV_GroupNonUniformLogicalXorOp : %four = spirv.Constant 4 : i32 %scalar = ... : i1 %vector = ... : vector<4xi1> - %0 = spirv.GroupNonUniformLogicalXor "Workgroup" "Reduce" %scalar : i1 - %1 = spirv.GroupNonUniformLogicalXor "Subgroup" "ClusteredReduce" - %vector cluster_size(%four) : vector<4xi> + %0 = spirv.GroupNonUniformLogicalXor %scalar : i1 -> i1 + %1 = spirv.GroupNonUniformLogicalXor + %vector cluster_size(%four) : vector<4xi1>, i32 -> vector<4xi1> ``` }]; diff --git a/mlir/lib/Dialect/SPIRV/IR/GroupOps.cpp b/mlir/lib/Dialect/SPIRV/IR/GroupOps.cpp index 2e5a2aab52a16..8aeafda0eb755 100644 --- a/mlir/lib/Dialect/SPIRV/IR/GroupOps.cpp +++ b/mlir/lib/Dialect/SPIRV/IR/GroupOps.cpp @@ -20,70 +20,6 @@ using namespace mlir::spirv::AttrNames; namespace mlir::spirv { -template -static ParseResult parseGroupNonUniformArithmeticOp(OpAsmParser &parser, - OperationState &state) { - spirv::Scope executionScope; - GroupOperation groupOperation; - OpAsmParser::UnresolvedOperand valueInfo; - if (spirv::parseEnumStrAttr( - executionScope, parser, state, - OpTy::getExecutionScopeAttrName(state.name)) || - spirv::parseEnumStrAttr( - groupOperation, parser, state, - OpTy::getGroupOperationAttrName(state.name)) || - parser.parseOperand(valueInfo)) - return failure(); - - std::optional clusterSizeInfo; - if (succeeded(parser.parseOptionalKeyword(kClusterSize))) { - clusterSizeInfo = OpAsmParser::UnresolvedOperand(); - if (parser.parseLParen() || parser.parseOperand(*clusterSizeInfo) || - parser.parseRParen()) - return failure(); - } - - Type resultType; - if (parser.parseColonType(resultType)) - return failure(); - - if (parser.resolveOperand(valueInfo, resultType, state.operands)) - return failure(); - - if (clusterSizeInfo) { - Type i32Type = parser.getBuilder().getIntegerType(32); - if (parser.resolveOperand(*clusterSizeInfo, i32Type, state.operands)) - return failure(); - } - - return parser.addTypeToList(resultType, state.types); -} - -template -static void printGroupNonUniformArithmeticOp(Operation *groupOp, - OpAsmPrinter &printer) { - printer - << " \"" - << stringifyScope( - groupOp - ->getAttrOfType( - GroupNonUniformArithmeticOpTy::getExecutionScopeAttrName( - groupOp->getName())) - .getValue()) - << "\" \"" - << stringifyGroupOperation( - groupOp - ->getAttrOfType( - GroupNonUniformArithmeticOpTy::getGroupOperationAttrName( - groupOp->getName())) - .getValue()) - << "\" " << groupOp->getOperand(0); - - if (groupOp->getNumOperands() > 1) - printer << " " << kClusterSize << '(' << groupOp->getOperand(1) << ')'; - printer << " : " << groupOp->getResult(0).getType(); -} - template static LogicalResult verifyGroupNonUniformArithmeticOp(Operation *groupOp) { spirv::Scope scope = @@ -248,16 +184,6 @@ LogicalResult GroupNonUniformFAddOp::verify() { return verifyGroupNonUniformArithmeticOp(*this); } -ParseResult GroupNonUniformFAddOp::parse(OpAsmParser &parser, - OperationState &result) { - return parseGroupNonUniformArithmeticOp(parser, - result); -} - -void GroupNonUniformFAddOp::print(OpAsmPrinter &p) { - printGroupNonUniformArithmeticOp(*this, p); -} - //===----------------------------------------------------------------------===// // spirv.GroupNonUniformFMaxOp //===----------------------------------------------------------------------===// @@ -266,16 +192,6 @@ LogicalResult GroupNonUniformFMaxOp::verify() { return verifyGroupNonUniformArithmeticOp(*this); } -ParseResult GroupNonUniformFMaxOp::parse(OpAsmParser &parser, - OperationState &result) { - return parseGroupNonUniformArithmeticOp(parser, - result); -} - -void GroupNonUniformFMaxOp::print(OpAsmPrinter &p) { - printGroupNonUniformArithmeticOp(*this, p); -} - //===----------------------------------------------------------------------===// // spirv.GroupNonUniformFMinOp //===----------------------------------------------------------------------===// @@ -284,16 +200,6 @@ LogicalResult GroupNonUniformFMinOp::verify() { return verifyGroupNonUniformArithmeticOp(*this); } -ParseResult GroupNonUniformFMinOp::parse(OpAsmParser &parser, - OperationState &result) { - return parseGroupNonUniformArithmeticOp(parser, - result); -} - -void GroupNonUniformFMinOp::print(OpAsmPrinter &p) { - printGroupNonUniformArithmeticOp(*this, p); -} - //===----------------------------------------------------------------------===// // spirv.GroupNonUniformFMulOp //===----------------------------------------------------------------------===// @@ -302,16 +208,6 @@ LogicalResult GroupNonUniformFMulOp::verify() { return verifyGroupNonUniformArithmeticOp(*this); } -ParseResult GroupNonUniformFMulOp::parse(OpAsmParser &parser, - OperationState &result) { - return parseGroupNonUniformArithmeticOp(parser, - result); -} - -void GroupNonUniformFMulOp::print(OpAsmPrinter &p) { - printGroupNonUniformArithmeticOp(*this, p); -} - //===----------------------------------------------------------------------===// // spirv.GroupNonUniformIAddOp //===----------------------------------------------------------------------===// @@ -320,16 +216,6 @@ LogicalResult GroupNonUniformIAddOp::verify() { return verifyGroupNonUniformArithmeticOp(*this); } -ParseResult GroupNonUniformIAddOp::parse(OpAsmParser &parser, - OperationState &result) { - return parseGroupNonUniformArithmeticOp(parser, - result); -} - -void GroupNonUniformIAddOp::print(OpAsmPrinter &p) { - printGroupNonUniformArithmeticOp(*this, p); -} - //===----------------------------------------------------------------------===// // spirv.GroupNonUniformIMulOp //===----------------------------------------------------------------------===// @@ -338,16 +224,6 @@ LogicalResult GroupNonUniformIMulOp::verify() { return verifyGroupNonUniformArithmeticOp(*this); } -ParseResult GroupNonUniformIMulOp::parse(OpAsmParser &parser, - OperationState &result) { - return parseGroupNonUniformArithmeticOp(parser, - result); -} - -void GroupNonUniformIMulOp::print(OpAsmPrinter &p) { - printGroupNonUniformArithmeticOp(*this, p); -} - //===----------------------------------------------------------------------===// // spirv.GroupNonUniformSMaxOp //===----------------------------------------------------------------------===// @@ -356,16 +232,6 @@ LogicalResult GroupNonUniformSMaxOp::verify() { return verifyGroupNonUniformArithmeticOp(*this); } -ParseResult GroupNonUniformSMaxOp::parse(OpAsmParser &parser, - OperationState &result) { - return parseGroupNonUniformArithmeticOp(parser, - result); -} - -void GroupNonUniformSMaxOp::print(OpAsmPrinter &p) { - printGroupNonUniformArithmeticOp(*this, p); -} - //===----------------------------------------------------------------------===// // spirv.GroupNonUniformSMinOp //===----------------------------------------------------------------------===// @@ -374,16 +240,6 @@ LogicalResult GroupNonUniformSMinOp::verify() { return verifyGroupNonUniformArithmeticOp(*this); } -ParseResult GroupNonUniformSMinOp::parse(OpAsmParser &parser, - OperationState &result) { - return parseGroupNonUniformArithmeticOp(parser, - result); -} - -void GroupNonUniformSMinOp::print(OpAsmPrinter &p) { - printGroupNonUniformArithmeticOp(*this, p); -} - //===----------------------------------------------------------------------===// // spirv.GroupNonUniformUMaxOp //===----------------------------------------------------------------------===// @@ -392,16 +248,6 @@ LogicalResult GroupNonUniformUMaxOp::verify() { return verifyGroupNonUniformArithmeticOp(*this); } -ParseResult GroupNonUniformUMaxOp::parse(OpAsmParser &parser, - OperationState &result) { - return parseGroupNonUniformArithmeticOp(parser, - result); -} - -void GroupNonUniformUMaxOp::print(OpAsmPrinter &p) { - printGroupNonUniformArithmeticOp(*this, p); -} - //===----------------------------------------------------------------------===// // spirv.GroupNonUniformUMinOp //===----------------------------------------------------------------------===// @@ -410,16 +256,6 @@ LogicalResult GroupNonUniformUMinOp::verify() { return verifyGroupNonUniformArithmeticOp(*this); } -ParseResult GroupNonUniformUMinOp::parse(OpAsmParser &parser, - OperationState &result) { - return parseGroupNonUniformArithmeticOp(parser, - result); -} - -void GroupNonUniformUMinOp::print(OpAsmPrinter &p) { - printGroupNonUniformArithmeticOp(*this, p); -} - //===----------------------------------------------------------------------===// // spirv.GroupNonUniformBitwiseAnd //===----------------------------------------------------------------------===// @@ -428,16 +264,6 @@ LogicalResult GroupNonUniformBitwiseAndOp::verify() { return verifyGroupNonUniformArithmeticOp(*this); } -ParseResult GroupNonUniformBitwiseAndOp::parse(OpAsmParser &parser, - OperationState &result) { - return parseGroupNonUniformArithmeticOp(parser, - result); -} - -void GroupNonUniformBitwiseAndOp::print(OpAsmPrinter &p) { - printGroupNonUniformArithmeticOp(*this, p); -} - //===----------------------------------------------------------------------===// // spirv.GroupNonUniformBitwiseOr //===----------------------------------------------------------------------===// @@ -446,16 +272,6 @@ LogicalResult GroupNonUniformBitwiseOrOp::verify() { return verifyGroupNonUniformArithmeticOp(*this); } -ParseResult GroupNonUniformBitwiseOrOp::parse(OpAsmParser &parser, - OperationState &result) { - return parseGroupNonUniformArithmeticOp(parser, - result); -} - -void GroupNonUniformBitwiseOrOp::print(OpAsmPrinter &p) { - printGroupNonUniformArithmeticOp(*this, p); -} - //===----------------------------------------------------------------------===// // spirv.GroupNonUniformBitwiseXor //===----------------------------------------------------------------------===// @@ -464,16 +280,6 @@ LogicalResult GroupNonUniformBitwiseXorOp::verify() { return verifyGroupNonUniformArithmeticOp(*this); } -ParseResult GroupNonUniformBitwiseXorOp::parse(OpAsmParser &parser, - OperationState &result) { - return parseGroupNonUniformArithmeticOp(parser, - result); -} - -void GroupNonUniformBitwiseXorOp::print(OpAsmPrinter &p) { - printGroupNonUniformArithmeticOp(*this, p); -} - //===----------------------------------------------------------------------===// // spirv.GroupNonUniformLogicalAnd //===----------------------------------------------------------------------===// @@ -482,16 +288,6 @@ LogicalResult GroupNonUniformLogicalAndOp::verify() { return verifyGroupNonUniformArithmeticOp(*this); } -ParseResult GroupNonUniformLogicalAndOp::parse(OpAsmParser &parser, - OperationState &result) { - return parseGroupNonUniformArithmeticOp(parser, - result); -} - -void GroupNonUniformLogicalAndOp::print(OpAsmPrinter &p) { - printGroupNonUniformArithmeticOp(*this, p); -} - //===----------------------------------------------------------------------===// // spirv.GroupNonUniformLogicalOr //===----------------------------------------------------------------------===// @@ -500,16 +296,6 @@ LogicalResult GroupNonUniformLogicalOrOp::verify() { return verifyGroupNonUniformArithmeticOp(*this); } -ParseResult GroupNonUniformLogicalOrOp::parse(OpAsmParser &parser, - OperationState &result) { - return parseGroupNonUniformArithmeticOp(parser, - result); -} - -void GroupNonUniformLogicalOrOp::print(OpAsmPrinter &p) { - printGroupNonUniformArithmeticOp(*this, p); -} - //===----------------------------------------------------------------------===// // spirv.GroupNonUniformLogicalXor //===----------------------------------------------------------------------===// @@ -518,16 +304,6 @@ LogicalResult GroupNonUniformLogicalXorOp::verify() { return verifyGroupNonUniformArithmeticOp(*this); } -ParseResult GroupNonUniformLogicalXorOp::parse(OpAsmParser &parser, - OperationState &result) { - return parseGroupNonUniformArithmeticOp(parser, - result); -} - -void GroupNonUniformLogicalXorOp::print(OpAsmPrinter &p) { - printGroupNonUniformArithmeticOp(*this, p); -} - //===----------------------------------------------------------------------===// // Group op verification //===----------------------------------------------------------------------===// diff --git a/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp b/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp index dd0a872e05dcb..26559c1321db5 100644 --- a/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp +++ b/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp @@ -1253,33 +1253,6 @@ LogicalResult spirv::GlobalVariableOp::verify() { // spirv.INTEL.SubgroupBlockRead //===----------------------------------------------------------------------===// -ParseResult spirv::INTELSubgroupBlockReadOp::parse(OpAsmParser &parser, - OperationState &result) { - // Parse the storage class specification - spirv::StorageClass storageClass; - OpAsmParser::UnresolvedOperand ptrInfo; - Type elementType; - if (parseEnumStrAttr(storageClass, parser) || parser.parseOperand(ptrInfo) || - parser.parseColon() || parser.parseType(elementType)) { - return failure(); - } - - auto ptrType = spirv::PointerType::get(elementType, storageClass); - if (auto valVecTy = llvm::dyn_cast(elementType)) - ptrType = spirv::PointerType::get(valVecTy.getElementType(), storageClass); - - if (parser.resolveOperand(ptrInfo, ptrType, result.operands)) { - return failure(); - } - - result.addTypes(elementType); - return success(); -} - -void spirv::INTELSubgroupBlockReadOp::print(OpAsmPrinter &printer) { - printer << " " << getPtr() << " : " << getType(); -} - LogicalResult spirv::INTELSubgroupBlockReadOp::verify() { if (failed(verifyBlockReadWritePtrAndValTypes(*this, getPtr(), getValue()))) return failure(); diff --git a/mlir/test/Conversion/ConvertToSPIRV/argmax-kernel.mlir b/mlir/test/Conversion/ConvertToSPIRV/argmax-kernel.mlir index 5cd1fead2527b..652f447228086 100644 --- a/mlir/test/Conversion/ConvertToSPIRV/argmax-kernel.mlir +++ b/mlir/test/Conversion/ConvertToSPIRV/argmax-kernel.mlir @@ -68,7 +68,7 @@ module attributes { scf.yield %lane_res_next, %lane_max_next : i32, f32 } - // CHECK: %[[SUBGROUP_MAX:.*]] = spirv.GroupNonUniformFMax "Subgroup" "Reduce" %[[LANE_MAX]] : f32 + // CHECK: %[[SUBGROUP_MAX:.*]] = spirv.GroupNonUniformFMax %[[LANE_MAX]] : f32 -> f32 // CHECK: %[[OEQ:.*]] = spirv.FOrdEqual %[[LANE_MAX]], %[[SUBGROUP_MAX]] : f32 // CHECK: %[[BALLOT:.*]] = spirv.GroupNonUniformBallot %[[OEQ]] : vector<4xi32> // CHECK: %[[BALLOTLSB:.*]] = spirv.GroupNonUniformBallotFindLSB %[[BALLOT]] : vector<4xi32>, i32 diff --git a/mlir/test/Conversion/ConvertToSPIRV/gpu.mlir b/mlir/test/Conversion/ConvertToSPIRV/gpu.mlir index f33a66bdf5eff..84f366e5874b0 100644 --- a/mlir/test/Conversion/ConvertToSPIRV/gpu.mlir +++ b/mlir/test/Conversion/ConvertToSPIRV/gpu.mlir @@ -8,7 +8,7 @@ module attributes { gpu.module @kernels { // CHECK-LABEL: spirv.func @all_reduce // CHECK-SAME: (%[[ARG0:.*]]: f32) - // CHECK: %{{.*}} = spirv.GroupNonUniformFAdd "Workgroup" "Reduce" %[[ARG0]] : f32 + // CHECK: %{{.*}} = spirv.GroupNonUniformFAdd %[[ARG0]] : f32 -> f32 gpu.func @all_reduce(%arg0 : f32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { %reduced = gpu.all_reduce add %arg0 {} : (f32) -> (f32) @@ -28,7 +28,7 @@ module attributes { gpu.module @kernels { // CHECK-LABEL: spirv.func @subgroup_reduce // CHECK-SAME: (%[[ARG0:.*]]: f32) - // CHECK: %{{.*}} = spirv.GroupNonUniformFAdd "Subgroup" "Reduce" %[[ARG0]] : f32 + // CHECK: %{{.*}} = spirv.GroupNonUniformFAdd %[[ARG0]] : f32 -> f32 gpu.func @subgroup_reduce(%arg0 : f32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { %reduced = gpu.subgroup_reduce add %arg0 {} : (f32) -> (f32) diff --git a/mlir/test/Conversion/GPUToSPIRV/reductions.mlir b/mlir/test/Conversion/GPUToSPIRV/reductions.mlir index 44f85f68587f1..ae834b9915d50 100644 --- a/mlir/test/Conversion/GPUToSPIRV/reductions.mlir +++ b/mlir/test/Conversion/GPUToSPIRV/reductions.mlir @@ -30,7 +30,7 @@ gpu.module @kernels { // CHECK-SAME: (%[[ARG:.*]]: f32) gpu.func @test(%arg : f32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { - // CHECK: %{{.*}} = spirv.GroupNonUniformFAdd "Workgroup" "Reduce" %[[ARG]] : f32 + // CHECK: %{{.*}} = spirv.GroupNonUniformFAdd %[[ARG]] : f32 -> f32 %reduced = gpu.all_reduce add %arg {} : (f32) -> (f32) gpu.return } @@ -70,7 +70,7 @@ gpu.module @kernels { // CHECK-SAME: (%[[ARG:.*]]: i32) gpu.func @test(%arg : i32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { - // CHECK: %{{.*}} = spirv.GroupNonUniformIAdd "Workgroup" "Reduce" %[[ARG]] : i32 + // CHECK: %{{.*}} = spirv.GroupNonUniformIAdd %[[ARG]] : i32 -> i32 %reduced = gpu.all_reduce add %arg {} : (i32) -> (i32) gpu.return } @@ -110,7 +110,7 @@ gpu.module @kernels { // CHECK-SAME: (%[[ARG:.*]]: f32) gpu.func @test(%arg : f32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { - // CHECK: %{{.*}} = spirv.GroupNonUniformFAdd "Subgroup" "Reduce" %[[ARG]] : f32 + // CHECK: %{{.*}} = spirv.GroupNonUniformFAdd %[[ARG]] : f32 -> f32 %reduced = gpu.subgroup_reduce add %arg : (f32) -> (f32) gpu.return } @@ -150,7 +150,7 @@ gpu.module @kernels { // CHECK-SAME: (%[[ARG:.*]]: i32) gpu.func @test(%arg : i32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { - // CHECK: %{{.*}} = spirv.GroupNonUniformIAdd "Subgroup" "Reduce" %[[ARG]] : i32 + // CHECK: %{{.*}} = spirv.GroupNonUniformIAdd %[[ARG]] : i32 -> i32 %reduced = gpu.subgroup_reduce add %arg : (i32) -> (i32) gpu.return } @@ -190,7 +190,7 @@ gpu.module @kernels { // CHECK-SAME: (%[[ARG:.*]]: f32) gpu.func @test(%arg : f32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { - // CHECK: %{{.*}} = spirv.GroupNonUniformFMul "Workgroup" "Reduce" %[[ARG]] : f32 + // CHECK: %{{.*}} = spirv.GroupNonUniformFMul %[[ARG]] : f32 -> f32 %reduced = gpu.all_reduce mul %arg {} : (f32) -> (f32) gpu.return } @@ -230,7 +230,7 @@ gpu.module @kernels { // CHECK-SAME: (%[[ARG:.*]]: i32) gpu.func @test(%arg : i32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { - // CHECK: %{{.*}} = spirv.GroupNonUniformIMul "Workgroup" "Reduce" %[[ARG]] : i32 + // CHECK: %{{.*}} = spirv.GroupNonUniformIMul %[[ARG]] : i32 -> i32 %reduced = gpu.all_reduce mul %arg {} : (i32) -> (i32) gpu.return } @@ -270,7 +270,7 @@ gpu.module @kernels { // CHECK-SAME: (%[[ARG:.*]]: f32) gpu.func @test(%arg : f32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { - // CHECK: %{{.*}} = spirv.GroupNonUniformFMul "Subgroup" "Reduce" %[[ARG]] : f32 + // CHECK: %{{.*}} = spirv.GroupNonUniformFMul %[[ARG]] : f32 -> f32 %reduced = gpu.subgroup_reduce mul %arg : (f32) -> (f32) gpu.return } @@ -310,7 +310,7 @@ gpu.module @kernels { // CHECK-SAME: (%[[ARG:.*]]: i32) gpu.func @test(%arg : i32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { - // CHECK: %{{.*}} = spirv.GroupNonUniformIMul "Subgroup" "Reduce" %[[ARG]] : i32 + // CHECK: %{{.*}} = spirv.GroupNonUniformIMul %[[ARG]] : i32 -> i32 %reduced = gpu.subgroup_reduce mul %arg : (i32) -> (i32) gpu.return } @@ -350,7 +350,7 @@ gpu.module @kernels { // CHECK-SAME: (%[[ARG:.*]]: f32) gpu.func @test(%arg : f32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { - // CHECK: %{{.*}} = spirv.GroupNonUniformFMin "Workgroup" "Reduce" %[[ARG]] : f32 + // CHECK: %{{.*}} = spirv.GroupNonUniformFMin %[[ARG]] : f32 -> f32 %reduced = gpu.all_reduce minnumf %arg {} : (f32) -> (f32) gpu.return } @@ -392,7 +392,7 @@ gpu.module @kernels { // CHECK-SAME: (%[[ARG:.*]]: i32) gpu.func @test(%arg : i32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { - // CHECK: %{{.*}} = spirv.GroupNonUniformUMin "Workgroup" "Reduce" %[[ARG]] : i32 + // CHECK: %{{.*}} = spirv.GroupNonUniformUMin %[[ARG]] : i32 -> i32 %r0 = gpu.all_reduce minsi %arg {} : (i32) -> (i32) %r1 = gpu.all_reduce minui %arg {} : (i32) -> (i32) gpu.return @@ -433,7 +433,7 @@ gpu.module @kernels { // CHECK-SAME: (%[[ARG:.*]]: f32) gpu.func @test(%arg : f32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { - // CHECK: %{{.*}} = spirv.GroupNonUniformFMin "Subgroup" "Reduce" %[[ARG]] : f32 + // CHECK: %{{.*}} = spirv.GroupNonUniformFMin %[[ARG]] : f32 -> f32 %reduced = gpu.subgroup_reduce minnumf %arg : (f32) -> (f32) gpu.return } @@ -475,8 +475,8 @@ gpu.module @kernels { // CHECK-SAME: (%[[ARG:.*]]: i32) gpu.func @test(%arg : i32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { - // CHECK: %{{.*}} = spirv.GroupNonUniformSMin "Subgroup" "Reduce" %[[ARG]] : i32 - // CHECK: %{{.*}} = spirv.GroupNonUniformUMin "Subgroup" "Reduce" %[[ARG]] : i32 + // CHECK: %{{.*}} = spirv.GroupNonUniformSMin %[[ARG]] : i32 -> i32 + // CHECK: %{{.*}} = spirv.GroupNonUniformUMin %[[ARG]] : i32 -> i32 %r0 = gpu.subgroup_reduce minsi %arg : (i32) -> (i32) %r1 = gpu.subgroup_reduce minui %arg : (i32) -> (i32) gpu.return @@ -517,7 +517,7 @@ gpu.module @kernels { // CHECK-SAME: (%[[ARG:.*]]: f32) gpu.func @test(%arg : f32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { - // CHECK: %{{.*}} = spirv.GroupNonUniformFMax "Workgroup" "Reduce" %[[ARG]] : f32 + // CHECK: %{{.*}} = spirv.GroupNonUniformFMax %[[ARG]] : f32 -> f32 %reduced = gpu.all_reduce maxnumf %arg {} : (f32) -> (f32) gpu.return } @@ -559,8 +559,8 @@ gpu.module @kernels { // CHECK-SAME: (%[[ARG:.*]]: i32) gpu.func @test(%arg : i32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { - // CHECK: %{{.*}} = spirv.GroupNonUniformSMax "Workgroup" "Reduce" %[[ARG]] : i32 - // CHECK: %{{.*}} = spirv.GroupNonUniformUMax "Workgroup" "Reduce" %[[ARG]] : i32 + // CHECK: %{{.*}} = spirv.GroupNonUniformSMax %[[ARG]] : i32 -> i32 + // CHECK: %{{.*}} = spirv.GroupNonUniformUMax %[[ARG]] : i32 -> i32 %r0 = gpu.all_reduce maxsi %arg {} : (i32) -> (i32) %r1 = gpu.all_reduce maxui %arg {} : (i32) -> (i32) gpu.return @@ -601,7 +601,7 @@ gpu.module @kernels { // CHECK-SAME: (%[[ARG:.*]]: f32) gpu.func @test(%arg : f32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { - // CHECK: %{{.*}} = spirv.GroupNonUniformFMax "Subgroup" "Reduce" %[[ARG]] : f32 + // CHECK: %{{.*}} = spirv.GroupNonUniformFMax %[[ARG]] : f32 -> f32 %reduced = gpu.subgroup_reduce maxnumf %arg : (f32) -> (f32) gpu.return } @@ -643,8 +643,8 @@ gpu.module @kernels { // CHECK-SAME: (%[[ARG:.*]]: i32) gpu.func @test(%arg : i32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { - // CHECK: %{{.*}} = spirv.GroupNonUniformSMax "Subgroup" "Reduce" %[[ARG]] : i32 - // CHECK: %{{.*}} = spirv.GroupNonUniformUMax "Subgroup" "Reduce" %[[ARG]] : i32 + // CHECK: %{{.*}} = spirv.GroupNonUniformSMax %[[ARG]] : i32 -> i32 + // CHECK: %{{.*}} = spirv.GroupNonUniformUMax %[[ARG]] : i32 -> i32 %r0 = gpu.subgroup_reduce maxsi %arg : (i32) -> (i32) %r1 = gpu.subgroup_reduce maxui %arg : (i32) -> (i32) gpu.return @@ -665,7 +665,7 @@ gpu.module @kernels { // CHECK-SAME: (%[[ARG:.*]]: i32) gpu.func @test(%arg : vector<1xi32>) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { - // CHECK: %{{.*}} = spirv.GroupNonUniformSMax "Subgroup" "Reduce" %[[ARG]] : i32 + // CHECK: %{{.*}} = spirv.GroupNonUniformSMax %[[ARG]] : i32 -> i32 %r0 = gpu.subgroup_reduce maxsi %arg : (vector<1xi32>) -> (vector<1xi32>) gpu.return } diff --git a/mlir/test/Dialect/SPIRV/IR/group-ops.mlir b/mlir/test/Dialect/SPIRV/IR/group-ops.mlir index 741081a37d8a0..c879b901311f2 100644 --- a/mlir/test/Dialect/SPIRV/IR/group-ops.mlir +++ b/mlir/test/Dialect/SPIRV/IR/group-ops.mlir @@ -80,16 +80,16 @@ func.func @subgroup_ballot(%predicate: i1) -> vector<4xi32> { //===----------------------------------------------------------------------===// func.func @subgroup_block_read_intel(%ptr : !spirv.ptr) -> i32 { - // CHECK: spirv.INTEL.SubgroupBlockRead %{{.*}} : i32 - %0 = spirv.INTEL.SubgroupBlockRead "StorageBuffer" %ptr : i32 + // CHECK: spirv.INTEL.SubgroupBlockRead %{{.*}} : !spirv.ptr -> i32 + %0 = spirv.INTEL.SubgroupBlockRead %ptr : !spirv.ptr -> i32 return %0: i32 } // ----- func.func @subgroup_block_read_intel_vector(%ptr : !spirv.ptr) -> vector<3xi32> { - // CHECK: spirv.INTEL.SubgroupBlockRead %{{.*}} : vector<3xi32> - %0 = spirv.INTEL.SubgroupBlockRead "StorageBuffer" %ptr : vector<3xi32> + // CHECK: spirv.INTEL.SubgroupBlockRead %{{.*}} : !spirv.ptr -> vector<3xi32> + %0 = spirv.INTEL.SubgroupBlockRead %ptr : !spirv.ptr -> vector<3xi32> return %0: vector<3xi32> } diff --git a/mlir/test/Dialect/SPIRV/IR/non-uniform-ops.mlir b/mlir/test/Dialect/SPIRV/IR/non-uniform-ops.mlir index d8a26c71d12f9..60ae1584d29fb 100644 --- a/mlir/test/Dialect/SPIRV/IR/non-uniform-ops.mlir +++ b/mlir/test/Dialect/SPIRV/IR/non-uniform-ops.mlir @@ -150,16 +150,16 @@ func.func @group_non_uniform_elect() -> i1 { // CHECK-LABEL: @group_non_uniform_fadd_reduce func.func @group_non_uniform_fadd_reduce(%val: f32) -> f32 { - // CHECK: %{{.+}} = spirv.GroupNonUniformFAdd "Workgroup" "Reduce" %{{.+}} : f32 - %0 = spirv.GroupNonUniformFAdd "Workgroup" "Reduce" %val : f32 + // CHECK: %{{.+}} = spirv.GroupNonUniformFAdd %{{.+}} : f32 -> f32 + %0 = spirv.GroupNonUniformFAdd %val : f32 -> f32 return %0: f32 } // CHECK-LABEL: @group_non_uniform_fadd_clustered_reduce func.func @group_non_uniform_fadd_clustered_reduce(%val: vector<2xf32>) -> vector<2xf32> { %four = spirv.Constant 4 : i32 - // CHECK: %{{.+}} = spirv.GroupNonUniformFAdd "Workgroup" "ClusteredReduce" %{{.+}} cluster_size(%{{.+}}) : vector<2xf32> - %0 = spirv.GroupNonUniformFAdd "Workgroup" "ClusteredReduce" %val cluster_size(%four) : vector<2xf32> + // CHECK: %{{.+}} = spirv.GroupNonUniformFAdd %{{.+}} cluster_size(%{{.+}}) : vector<2xf32>, i32 -> vector<2xf32> + %0 = spirv.GroupNonUniformFAdd %val cluster_size(%four) : vector<2xf32>, i32 -> vector<2xf32> return %0: vector<2xf32> } @@ -169,16 +169,16 @@ func.func @group_non_uniform_fadd_clustered_reduce(%val: vector<2xf32>) -> vecto // CHECK-LABEL: @group_non_uniform_fmul_reduce func.func @group_non_uniform_fmul_reduce(%val: f32) -> f32 { - // CHECK: %{{.+}} = spirv.GroupNonUniformFMul "Workgroup" "Reduce" %{{.+}} : f32 - %0 = spirv.GroupNonUniformFMul "Workgroup" "Reduce" %val : f32 + // CHECK: %{{.+}} = spirv.GroupNonUniformFMul %{{.+}} : f32 -> f32 + %0 = spirv.GroupNonUniformFMul %val : f32 -> f32 return %0: f32 } // CHECK-LABEL: @group_non_uniform_fmul_clustered_reduce func.func @group_non_uniform_fmul_clustered_reduce(%val: vector<2xf32>) -> vector<2xf32> { %four = spirv.Constant 4 : i32 - // CHECK: %{{.+}} = spirv.GroupNonUniformFMul "Workgroup" "ClusteredReduce" %{{.+}} cluster_size(%{{.+}}) : vector<2xf32> - %0 = spirv.GroupNonUniformFMul "Workgroup" "ClusteredReduce" %val cluster_size(%four) : vector<2xf32> + // CHECK: %{{.+}} = spirv.GroupNonUniformFMul %{{.+}} cluster_size(%{{.+}}) : vector<2xf32>, i32 -> vector<2xf32> + %0 = spirv.GroupNonUniformFMul %val cluster_size(%four) : vector<2xf32>, i32 -> vector<2xf32> return %0: vector<2xf32> } @@ -190,8 +190,8 @@ func.func @group_non_uniform_fmul_clustered_reduce(%val: vector<2xf32>) -> vecto // CHECK-LABEL: @group_non_uniform_fmax_reduce func.func @group_non_uniform_fmax_reduce(%val: f32) -> f32 { - // CHECK: %{{.+}} = spirv.GroupNonUniformFMax "Workgroup" "Reduce" %{{.+}} : f32 - %0 = spirv.GroupNonUniformFMax "Workgroup" "Reduce" %val : f32 + // CHECK: %{{.+}} = spirv.GroupNonUniformFMax %{{.+}} : f32 -> f32 + %0 = spirv.GroupNonUniformFMax %val : f32 -> f32 return %0: f32 } @@ -203,8 +203,8 @@ func.func @group_non_uniform_fmax_reduce(%val: f32) -> f32 { // CHECK-LABEL: @group_non_uniform_fmin_reduce func.func @group_non_uniform_fmin_reduce(%val: f32) -> f32 { - // CHECK: %{{.+}} = spirv.GroupNonUniformFMin "Workgroup" "Reduce" %{{.+}} : f32 - %0 = spirv.GroupNonUniformFMin "Workgroup" "Reduce" %val : f32 + // CHECK: %{{.+}} = spirv.GroupNonUniformFMin %{{.+}} : f32 -> f32 + %0 = spirv.GroupNonUniformFMin %val : f32 -> f32 return %0: f32 } @@ -216,16 +216,16 @@ func.func @group_non_uniform_fmin_reduce(%val: f32) -> f32 { // CHECK-LABEL: @group_non_uniform_iadd_reduce func.func @group_non_uniform_iadd_reduce(%val: i32) -> i32 { - // CHECK: %{{.+}} = spirv.GroupNonUniformIAdd "Workgroup" "Reduce" %{{.+}} : i32 - %0 = spirv.GroupNonUniformIAdd "Workgroup" "Reduce" %val : i32 + // CHECK: %{{.+}} = spirv.GroupNonUniformIAdd %{{.+}} : i32 -> i32 + %0 = spirv.GroupNonUniformIAdd %val : i32 -> i32 return %0: i32 } // CHECK-LABEL: @group_non_uniform_iadd_clustered_reduce func.func @group_non_uniform_iadd_clustered_reduce(%val: vector<2xi32>) -> vector<2xi32> { %four = spirv.Constant 4 : i32 - // CHECK: %{{.+}} = spirv.GroupNonUniformIAdd "Workgroup" "ClusteredReduce" %{{.+}} cluster_size(%{{.+}}) : vector<2xi32> - %0 = spirv.GroupNonUniformIAdd "Workgroup" "ClusteredReduce" %val cluster_size(%four) : vector<2xi32> + // CHECK: %{{.+}} = spirv.GroupNonUniformIAdd %{{.+}} cluster_size(%{{.+}}) : vector<2xi32>, i32 -> vector<2xi32> + %0 = spirv.GroupNonUniformIAdd %val cluster_size(%four) : vector<2xi32>, i32 -> vector<2xi32> return %0: vector<2xi32> } @@ -233,7 +233,7 @@ func.func @group_non_uniform_iadd_clustered_reduce(%val: vector<2xi32>) -> vecto func.func @group_non_uniform_iadd_reduce(%val: i32) -> i32 { // expected-error @+1 {{execution scope must be 'Workgroup' or 'Subgroup'}} - %0 = spirv.GroupNonUniformIAdd "Device" "Reduce" %val : i32 + %0 = spirv.GroupNonUniformIAdd %val : i32 -> i32 return %0: i32 } @@ -241,7 +241,7 @@ func.func @group_non_uniform_iadd_reduce(%val: i32) -> i32 { func.func @group_non_uniform_iadd_clustered_reduce(%val: vector<2xi32>) -> vector<2xi32> { // expected-error @+1 {{cluster size operand must be provided for 'ClusteredReduce' group operation}} - %0 = spirv.GroupNonUniformIAdd "Workgroup" "ClusteredReduce" %val : vector<2xi32> + %0 = spirv.GroupNonUniformIAdd %val : vector<2xi32> -> vector<2xi32> return %0: vector<2xi32> } @@ -249,7 +249,7 @@ func.func @group_non_uniform_iadd_clustered_reduce(%val: vector<2xi32>) -> vecto func.func @group_non_uniform_iadd_clustered_reduce(%val: vector<2xi32>, %size: i32) -> vector<2xi32> { // expected-error @+1 {{cluster size operand must come from a constant op}} - %0 = spirv.GroupNonUniformIAdd "Workgroup" "ClusteredReduce" %val cluster_size(%size) : vector<2xi32> + %0 = spirv.GroupNonUniformIAdd %val cluster_size(%size) : vector<2xi32>, i32 -> vector<2xi32> return %0: vector<2xi32> } @@ -258,7 +258,7 @@ func.func @group_non_uniform_iadd_clustered_reduce(%val: vector<2xi32>, %size: i func.func @group_non_uniform_iadd_clustered_reduce(%val: vector<2xi32>) -> vector<2xi32> { %five = spirv.Constant 5 : i32 // expected-error @+1 {{cluster size operand must be a power of two}} - %0 = spirv.GroupNonUniformIAdd "Workgroup" "ClusteredReduce" %val cluster_size(%five) : vector<2xi32> + %0 = spirv.GroupNonUniformIAdd %val cluster_size(%five) : vector<2xi32>, i32 -> vector<2xi32> return %0: vector<2xi32> } @@ -270,16 +270,16 @@ func.func @group_non_uniform_iadd_clustered_reduce(%val: vector<2xi32>) -> vecto // CHECK-LABEL: @group_non_uniform_imul_reduce func.func @group_non_uniform_imul_reduce(%val: i32) -> i32 { - // CHECK: %{{.+}} = spirv.GroupNonUniformIMul "Workgroup" "Reduce" %{{.+}} : i32 - %0 = spirv.GroupNonUniformIMul "Workgroup" "Reduce" %val : i32 + // CHECK: %{{.+}} = spirv.GroupNonUniformIMul %{{.+}} : i32 -> i32 + %0 = spirv.GroupNonUniformIMul %val : i32 -> i32 return %0: i32 } // CHECK-LABEL: @group_non_uniform_imul_clustered_reduce func.func @group_non_uniform_imul_clustered_reduce(%val: vector<2xi32>) -> vector<2xi32> { %four = spirv.Constant 4 : i32 - // CHECK: %{{.+}} = spirv.GroupNonUniformIMul "Workgroup" "ClusteredReduce" %{{.+}} cluster_size(%{{.+}}) : vector<2xi32> - %0 = spirv.GroupNonUniformIMul "Workgroup" "ClusteredReduce" %val cluster_size(%four) : vector<2xi32> + // CHECK: %{{.+}} = spirv.GroupNonUniformIMul %{{.+}} cluster_size(%{{.+}}) : vector<2xi32>, i32 -> vector<2xi32> + %0 = spirv.GroupNonUniformIMul %val cluster_size(%four) : vector<2xi32>, i32 -> vector<2xi32> return %0: vector<2xi32> } @@ -291,8 +291,8 @@ func.func @group_non_uniform_imul_clustered_reduce(%val: vector<2xi32>) -> vecto // CHECK-LABEL: @group_non_uniform_smax_reduce func.func @group_non_uniform_smax_reduce(%val: i32) -> i32 { - // CHECK: %{{.+}} = spirv.GroupNonUniformSMax "Workgroup" "Reduce" %{{.+}} : i32 - %0 = spirv.GroupNonUniformSMax "Workgroup" "Reduce" %val : i32 + // CHECK: %{{.+}} = spirv.GroupNonUniformSMax %{{.+}} : i32 -> i32 + %0 = spirv.GroupNonUniformSMax %val : i32 -> i32 return %0: i32 } @@ -304,8 +304,8 @@ func.func @group_non_uniform_smax_reduce(%val: i32) -> i32 { // CHECK-LABEL: @group_non_uniform_smin_reduce func.func @group_non_uniform_smin_reduce(%val: i32) -> i32 { - // CHECK: %{{.+}} = spirv.GroupNonUniformSMin "Workgroup" "Reduce" %{{.+}} : i32 - %0 = spirv.GroupNonUniformSMin "Workgroup" "Reduce" %val : i32 + // CHECK: %{{.+}} = spirv.GroupNonUniformSMin %{{.+}} : i32 -> i32 + %0 = spirv.GroupNonUniformSMin %val : i32 -> i32 return %0: i32 } @@ -461,8 +461,8 @@ func.func @group_non_uniform_shuffle(%val: vector<2xf32>, %id: si32) -> vector<2 // CHECK-LABEL: @group_non_uniform_umax_reduce func.func @group_non_uniform_umax_reduce(%val: i32) -> i32 { - // CHECK: %{{.+}} = spirv.GroupNonUniformUMax "Workgroup" "Reduce" %{{.+}} : i32 - %0 = spirv.GroupNonUniformUMax "Workgroup" "Reduce" %val : i32 + // CHECK: %{{.+}} = spirv.GroupNonUniformUMax %{{.+}} : i32 -> i32 + %0 = spirv.GroupNonUniformUMax %val : i32 -> i32 return %0: i32 } @@ -474,8 +474,8 @@ func.func @group_non_uniform_umax_reduce(%val: i32) -> i32 { // CHECK-LABEL: @group_non_uniform_umin_reduce func.func @group_non_uniform_umin_reduce(%val: i32) -> i32 { - // CHECK: %{{.+}} = spirv.GroupNonUniformUMin "Workgroup" "Reduce" %{{.+}} : i32 - %0 = spirv.GroupNonUniformUMin "Workgroup" "Reduce" %val : i32 + // CHECK: %{{.+}} = spirv.GroupNonUniformUMin %{{.+}} : i32 -> i32 + %0 = spirv.GroupNonUniformUMin %val : i32 -> i32 return %0: i32 } @@ -487,8 +487,8 @@ func.func @group_non_uniform_umin_reduce(%val: i32) -> i32 { // CHECK-LABEL: @group_non_uniform_bitwise_and func.func @group_non_uniform_bitwise_and(%val: i32) -> i32 { - // CHECK: %{{.+}} = spirv.GroupNonUniformBitwiseAnd "Workgroup" "Reduce" %{{.+}} : i32 - %0 = spirv.GroupNonUniformBitwiseAnd "Workgroup" "Reduce" %val : i32 + // CHECK: %{{.+}} = spirv.GroupNonUniformBitwiseAnd %{{.+}} : i32 -> i32 + %0 = spirv.GroupNonUniformBitwiseAnd %val : i32 -> i32 return %0: i32 } @@ -496,7 +496,7 @@ func.func @group_non_uniform_bitwise_and(%val: i32) -> i32 { func.func @group_non_uniform_bitwise_and(%val: i1) -> i1 { // expected-error @+1 {{operand #0 must be 8/16/32/64-bit integer or vector of 8/16/32/64-bit integer values of length 2/3/4/8/16, but got 'i1'}} - %0 = spirv.GroupNonUniformBitwiseAnd "Workgroup" "Reduce" %val : i1 + %0 = spirv.GroupNonUniformBitwiseAnd %val : i1 -> i1 return %0: i1 } @@ -508,8 +508,8 @@ func.func @group_non_uniform_bitwise_and(%val: i1) -> i1 { // CHECK-LABEL: @group_non_uniform_bitwise_or func.func @group_non_uniform_bitwise_or(%val: i32) -> i32 { - // CHECK: %{{.+}} = spirv.GroupNonUniformBitwiseOr "Workgroup" "Reduce" %{{.+}} : i32 - %0 = spirv.GroupNonUniformBitwiseOr "Workgroup" "Reduce" %val : i32 + // CHECK: %{{.+}} = spirv.GroupNonUniformBitwiseOr %{{.+}} : i32 -> i32 + %0 = spirv.GroupNonUniformBitwiseOr %val : i32 -> i32 return %0: i32 } @@ -517,7 +517,7 @@ func.func @group_non_uniform_bitwise_or(%val: i32) -> i32 { func.func @group_non_uniform_bitwise_or(%val: i1) -> i1 { // expected-error @+1 {{operand #0 must be 8/16/32/64-bit integer or vector of 8/16/32/64-bit integer values of length 2/3/4/8/16, but got 'i1'}} - %0 = spirv.GroupNonUniformBitwiseOr "Workgroup" "Reduce" %val : i1 + %0 = spirv.GroupNonUniformBitwiseOr %val : i1 -> i1 return %0: i1 } @@ -529,8 +529,8 @@ func.func @group_non_uniform_bitwise_or(%val: i1) -> i1 { // CHECK-LABEL: @group_non_uniform_bitwise_xor func.func @group_non_uniform_bitwise_xor(%val: i32) -> i32 { - // CHECK: %{{.+}} = spirv.GroupNonUniformBitwiseXor "Workgroup" "Reduce" %{{.+}} : i32 - %0 = spirv.GroupNonUniformBitwiseXor "Workgroup" "Reduce" %val : i32 + // CHECK: %{{.+}} = spirv.GroupNonUniformBitwiseXor %{{.+}} : i32 -> i32 + %0 = spirv.GroupNonUniformBitwiseXor %val : i32 -> i32 return %0: i32 } @@ -538,7 +538,7 @@ func.func @group_non_uniform_bitwise_xor(%val: i32) -> i32 { func.func @group_non_uniform_bitwise_xor(%val: i1) -> i1 { // expected-error @+1 {{operand #0 must be 8/16/32/64-bit integer or vector of 8/16/32/64-bit integer values of length 2/3/4/8/16, but got 'i1'}} - %0 = spirv.GroupNonUniformBitwiseXor "Workgroup" "Reduce" %val : i1 + %0 = spirv.GroupNonUniformBitwiseXor %val : i1 -> i1 return %0: i1 } @@ -550,8 +550,8 @@ func.func @group_non_uniform_bitwise_xor(%val: i1) -> i1 { // CHECK-LABEL: @group_non_uniform_logical_and func.func @group_non_uniform_logical_and(%val: i1) -> i1 { - // CHECK: %{{.+}} = spirv.GroupNonUniformLogicalAnd "Workgroup" "Reduce" %{{.+}} : i1 - %0 = spirv.GroupNonUniformLogicalAnd "Workgroup" "Reduce" %val : i1 + // CHECK: %{{.+}} = spirv.GroupNonUniformLogicalAnd %{{.+}} : i1 -> i1 + %0 = spirv.GroupNonUniformLogicalAnd %val : i1 -> i1 return %0: i1 } @@ -559,7 +559,7 @@ func.func @group_non_uniform_logical_and(%val: i1) -> i1 { func.func @group_non_uniform_logical_and(%val: i32) -> i32 { // expected-error @+1 {{operand #0 must be bool or vector of bool values of length 2/3/4/8/16, but got 'i32'}} - %0 = spirv.GroupNonUniformLogicalAnd "Workgroup" "Reduce" %val : i32 + %0 = spirv.GroupNonUniformLogicalAnd %val : i32 -> i32 return %0: i32 } @@ -571,8 +571,8 @@ func.func @group_non_uniform_logical_and(%val: i32) -> i32 { // CHECK-LABEL: @group_non_uniform_logical_or func.func @group_non_uniform_logical_or(%val: i1) -> i1 { - // CHECK: %{{.+}} = spirv.GroupNonUniformLogicalOr "Workgroup" "Reduce" %{{.+}} : i1 - %0 = spirv.GroupNonUniformLogicalOr "Workgroup" "Reduce" %val : i1 + // CHECK: %{{.+}} = spirv.GroupNonUniformLogicalOr %{{.+}} : i1 -> i1 + %0 = spirv.GroupNonUniformLogicalOr %val : i1 -> i1 return %0: i1 } @@ -580,7 +580,7 @@ func.func @group_non_uniform_logical_or(%val: i1) -> i1 { func.func @group_non_uniform_logical_or(%val: i32) -> i32 { // expected-error @+1 {{operand #0 must be bool or vector of bool values of length 2/3/4/8/16, but got 'i32'}} - %0 = spirv.GroupNonUniformLogicalOr "Workgroup" "Reduce" %val : i32 + %0 = spirv.GroupNonUniformLogicalOr %val : i32 -> i32 return %0: i32 } @@ -592,8 +592,8 @@ func.func @group_non_uniform_logical_or(%val: i32) -> i32 { // CHECK-LABEL: @group_non_uniform_logical_xor func.func @group_non_uniform_logical_xor(%val: i1) -> i1 { - // CHECK: %{{.+}} = spirv.GroupNonUniformLogicalXor "Workgroup" "Reduce" %{{.+}} : i1 - %0 = spirv.GroupNonUniformLogicalXor "Workgroup" "Reduce" %val : i1 + // CHECK: %{{.+}} = spirv.GroupNonUniformLogicalXor %{{.+}} : i1 -> i1 + %0 = spirv.GroupNonUniformLogicalXor %val : i1 -> i1 return %0: i1 } @@ -601,6 +601,6 @@ func.func @group_non_uniform_logical_xor(%val: i1) -> i1 { func.func @group_non_uniform_logical_xor(%val: i32) -> i32 { // expected-error @+1 {{operand #0 must be bool or vector of bool values of length 2/3/4/8/16, but got 'i32'}} - %0 = spirv.GroupNonUniformLogicalXor "Workgroup" "Reduce" %val : i32 + %0 = spirv.GroupNonUniformLogicalXor %val : i32 -> i32 return %0: i32 } diff --git a/mlir/test/Dialect/SPIRV/Transforms/vce-deduction.mlir b/mlir/test/Dialect/SPIRV/Transforms/vce-deduction.mlir index 931034f3d5f6e..ff5ac7cea8fc6 100644 --- a/mlir/test/Dialect/SPIRV/Transforms/vce-deduction.mlir +++ b/mlir/test/Dialect/SPIRV/Transforms/vce-deduction.mlir @@ -101,7 +101,7 @@ spirv.module Logical GLSL450 attributes { #spirv.vce, #spirv.resource_limits<>> } { spirv.func @group_non_uniform_iadd(%val : i32) -> i32 "None" { - %0 = spirv.GroupNonUniformIAdd "Subgroup" "Reduce" %val : i32 + %0 = spirv.GroupNonUniformIAdd %val : i32 -> i32 spirv.ReturnValue %0: i32 } } @@ -112,7 +112,7 @@ spirv.module Logical GLSL450 attributes { #spirv.vce, #spirv.resource_limits<>> } { spirv.func @group_non_uniform_iadd(%val : i32) -> i32 "None" { - %0 = spirv.GroupNonUniformIAdd "Subgroup" "Reduce" %val : i32 + %0 = spirv.GroupNonUniformIAdd %val : i32 -> i32 spirv.ReturnValue %0: i32 } } diff --git a/mlir/test/Target/SPIRV/debug.mlir b/mlir/test/Target/SPIRV/debug.mlir index 50c83d876bef3..d1cd71d65ca8d 100644 --- a/mlir/test/Target/SPIRV/debug.mlir +++ b/mlir/test/Target/SPIRV/debug.mlir @@ -39,7 +39,7 @@ spirv.module Logical GLSL450 requires #spirv.vce { spirv.func @group_non_uniform(%val: f32) "None" { // CHECK: loc({{".*debug.mlir"}}:42:10) - %0 = spirv.GroupNonUniformFAdd "Workgroup" "Reduce" %val : f32 + %0 = spirv.GroupNonUniformFAdd %val : f32 -> f32 spirv.Return } diff --git a/mlir/test/Target/SPIRV/group-ops.mlir b/mlir/test/Target/SPIRV/group-ops.mlir index dc07f8c8ef61f..32da4d9c26bd1 100644 --- a/mlir/test/Target/SPIRV/group-ops.mlir +++ b/mlir/test/Target/SPIRV/group-ops.mlir @@ -21,14 +21,14 @@ spirv.module Logical GLSL450 requires #spirv.vce { } // CHECK-LABEL: @subgroup_block_read_intel spirv.func @subgroup_block_read_intel(%ptr : !spirv.ptr) -> i32 "None" { - // CHECK: spirv.INTEL.SubgroupBlockRead %{{.*}} : i32 - %0 = spirv.INTEL.SubgroupBlockRead "StorageBuffer" %ptr : i32 + // CHECK: spirv.INTEL.SubgroupBlockRead %{{.*}} : !spirv.ptr -> i32 + %0 = spirv.INTEL.SubgroupBlockRead %ptr : !spirv.ptr -> i32 spirv.ReturnValue %0: i32 } // CHECK-LABEL: @subgroup_block_read_intel_vector spirv.func @subgroup_block_read_intel_vector(%ptr : !spirv.ptr) -> vector<3xi32> "None" { - // CHECK: spirv.INTEL.SubgroupBlockRead %{{.*}} : vector<3xi32> - %0 = spirv.INTEL.SubgroupBlockRead "StorageBuffer" %ptr : vector<3xi32> + // CHECK: spirv.INTEL.SubgroupBlockRead %{{.*}} : !spirv.ptr -> vector<3xi32> + %0 = spirv.INTEL.SubgroupBlockRead %ptr : !spirv.ptr -> vector<3xi32> spirv.ReturnValue %0: vector<3xi32> } // CHECK-LABEL: @subgroup_block_write_intel diff --git a/mlir/test/Target/SPIRV/non-uniform-ops.mlir b/mlir/test/Target/SPIRV/non-uniform-ops.mlir index 4a08de2e25790..3e78eaf8b03ef 100644 --- a/mlir/test/Target/SPIRV/non-uniform-ops.mlir +++ b/mlir/test/Target/SPIRV/non-uniform-ops.mlir @@ -25,79 +25,79 @@ spirv.module Logical GLSL450 requires #spirv.vce { // CHECK-LABEL: @group_non_uniform_fadd_reduce spirv.func @group_non_uniform_fadd_reduce(%val: f32) -> f32 "None" { - // CHECK: %{{.+}} = spirv.GroupNonUniformFAdd "Workgroup" "Reduce" %{{.+}} : f32 - %0 = spirv.GroupNonUniformFAdd "Workgroup" "Reduce" %val : f32 + // CHECK: %{{.+}} = spirv.GroupNonUniformFAdd %{{.+}} : f32 -> f32 + %0 = spirv.GroupNonUniformFAdd %val : f32 -> f32 spirv.ReturnValue %0: f32 } // CHECK-LABEL: @group_non_uniform_fmax_reduce spirv.func @group_non_uniform_fmax_reduce(%val: f32) -> f32 "None" { - // CHECK: %{{.+}} = spirv.GroupNonUniformFMax "Workgroup" "Reduce" %{{.+}} : f32 - %0 = spirv.GroupNonUniformFMax "Workgroup" "Reduce" %val : f32 + // CHECK: %{{.+}} = spirv.GroupNonUniformFMax %{{.+}} : f32 -> f32 + %0 = spirv.GroupNonUniformFMax %val : f32 -> f32 spirv.ReturnValue %0: f32 } // CHECK-LABEL: @group_non_uniform_fmin_reduce spirv.func @group_non_uniform_fmin_reduce(%val: f32) -> f32 "None" { - // CHECK: %{{.+}} = spirv.GroupNonUniformFMin "Workgroup" "Reduce" %{{.+}} : f32 - %0 = spirv.GroupNonUniformFMin "Workgroup" "Reduce" %val : f32 + // CHECK: %{{.+}} = spirv.GroupNonUniformFMin %{{.+}} : f32 -> f32 + %0 = spirv.GroupNonUniformFMin %val : f32 -> f32 spirv.ReturnValue %0: f32 } // CHECK-LABEL: @group_non_uniform_fmul_reduce spirv.func @group_non_uniform_fmul_reduce(%val: f32) -> f32 "None" { - // CHECK: %{{.+}} = spirv.GroupNonUniformFMul "Workgroup" "Reduce" %{{.+}} : f32 - %0 = spirv.GroupNonUniformFMul "Workgroup" "Reduce" %val : f32 + // CHECK: %{{.+}} = spirv.GroupNonUniformFMul %{{.+}} : f32 -> f32 + %0 = spirv.GroupNonUniformFMul %val : f32 -> f32 spirv.ReturnValue %0: f32 } // CHECK-LABEL: @group_non_uniform_iadd_reduce spirv.func @group_non_uniform_iadd_reduce(%val: i32) -> i32 "None" { - // CHECK: %{{.+}} = spirv.GroupNonUniformIAdd "Workgroup" "Reduce" %{{.+}} : i32 - %0 = spirv.GroupNonUniformIAdd "Workgroup" "Reduce" %val : i32 + // CHECK: %{{.+}} = spirv.GroupNonUniformIAdd %{{.+}} : i32 -> i32 + %0 = spirv.GroupNonUniformIAdd %val : i32 -> i32 spirv.ReturnValue %0: i32 } // CHECK-LABEL: @group_non_uniform_iadd_clustered_reduce spirv.func @group_non_uniform_iadd_clustered_reduce(%val: vector<2xi32>) -> vector<2xi32> "None" { %four = spirv.Constant 4 : i32 - // CHECK: %{{.+}} = spirv.GroupNonUniformIAdd "Workgroup" "ClusteredReduce" %{{.+}} cluster_size(%{{.+}}) : vector<2xi32> - %0 = spirv.GroupNonUniformIAdd "Workgroup" "ClusteredReduce" %val cluster_size(%four) : vector<2xi32> + // CHECK: %{{.+}} = spirv.GroupNonUniformIAdd %{{.+}} cluster_size(%{{.+}}) : vector<2xi32>, i32 -> vector<2xi32> + %0 = spirv.GroupNonUniformIAdd %val cluster_size(%four) : vector<2xi32>, i32 -> vector<2xi32> spirv.ReturnValue %0: vector<2xi32> } // CHECK-LABEL: @group_non_uniform_imul_reduce spirv.func @group_non_uniform_imul_reduce(%val: i32) -> i32 "None" { - // CHECK: %{{.+}} = spirv.GroupNonUniformIMul "Workgroup" "Reduce" %{{.+}} : i32 - %0 = spirv.GroupNonUniformIMul "Workgroup" "Reduce" %val : i32 + // CHECK: %{{.+}} = spirv.GroupNonUniformIMul %{{.+}} : i32 -> i32 + %0 = spirv.GroupNonUniformIMul %val : i32 -> i32 spirv.ReturnValue %0: i32 } // CHECK-LABEL: @group_non_uniform_smax_reduce spirv.func @group_non_uniform_smax_reduce(%val: i32) -> i32 "None" { - // CHECK: %{{.+}} = spirv.GroupNonUniformSMax "Workgroup" "Reduce" %{{.+}} : i32 - %0 = spirv.GroupNonUniformSMax "Workgroup" "Reduce" %val : i32 + // CHECK: %{{.+}} = spirv.GroupNonUniformSMax %{{.+}} : i32 -> i32 + %0 = spirv.GroupNonUniformSMax %val : i32 -> i32 spirv.ReturnValue %0: i32 } // CHECK-LABEL: @group_non_uniform_smin_reduce spirv.func @group_non_uniform_smin_reduce(%val: i32) -> i32 "None" { - // CHECK: %{{.+}} = spirv.GroupNonUniformSMin "Workgroup" "Reduce" %{{.+}} : i32 - %0 = spirv.GroupNonUniformSMin "Workgroup" "Reduce" %val : i32 + // CHECK: %{{.+}} = spirv.GroupNonUniformSMin %{{.+}} : i32 -> i32 + %0 = spirv.GroupNonUniformSMin %val : i32 -> i32 spirv.ReturnValue %0: i32 } // CHECK-LABEL: @group_non_uniform_umax_reduce spirv.func @group_non_uniform_umax_reduce(%val: i32) -> i32 "None" { - // CHECK: %{{.+}} = spirv.GroupNonUniformUMax "Workgroup" "Reduce" %{{.+}} : i32 - %0 = spirv.GroupNonUniformUMax "Workgroup" "Reduce" %val : i32 + // CHECK: %{{.+}} = spirv.GroupNonUniformUMax %{{.+}} : i32 -> i32 + %0 = spirv.GroupNonUniformUMax %val : i32 -> i32 spirv.ReturnValue %0: i32 } // CHECK-LABEL: @group_non_uniform_umin_reduce spirv.func @group_non_uniform_umin_reduce(%val: i32) -> i32 "None" { - // CHECK: %{{.+}} = spirv.GroupNonUniformUMin "Workgroup" "Reduce" %{{.+}} : i32 - %0 = spirv.GroupNonUniformUMin "Workgroup" "Reduce" %val : i32 + // CHECK: %{{.+}} = spirv.GroupNonUniformUMin %{{.+}} : i32 -> i32 + %0 = spirv.GroupNonUniformUMin %val : i32 -> i32 spirv.ReturnValue %0: i32 }