@@ -2047,8 +2047,8 @@ def Vector_GatherOp :
2047
2047
DeclareOpInterfaceMethods<VectorUnrollOpInterface, ["getShapeForUnroll"]>
2048
2048
]>,
2049
2049
Arguments<(ins Arg<TensorOrMemRef<[AnyType]>, "", [MemRead]>:$base,
2050
- Variadic<Index>:$indices ,
2051
- VectorOfNonZeroRankOf<[AnyInteger, Index]>:$index_vec ,
2050
+ Variadic<Index>:$offsets ,
2051
+ VectorOfNonZeroRankOf<[AnyInteger, Index]>:$indices ,
2052
2052
VectorOfNonZeroRankOf<[I1]>:$mask,
2053
2053
AnyVectorOfNonZeroRank:$pass_thru,
2054
2054
ConfinedAttr<OptionalAttr<I64Attr>,
@@ -2072,19 +2072,19 @@ def Vector_GatherOp :
2072
2072
2073
2073
```mlir
2074
2074
func.func @gather_3D_to_2D(
2075
- %base: memref<?x10x?xf32>, %i0 : index, %i1 : index, %i2 : index,
2076
- %index_vec : vector<2x3xi32>, %mask: vector<2x3xi1>,
2075
+ %base: memref<?x10x?xf32>, %ofs_0 : index, %ofs_1 : index, %ofs_2 : index,
2076
+ %indices : vector<2x3xi32>, %mask: vector<2x3xi1>,
2077
2077
%fall_thru: vector<2x3xf32>) -> vector<2x3xf32> {
2078
- %result = vector.gather %base[%i0 , %i1 , %i2 ]
2079
- [%index_vec ], %mask, %fall_thru : [...]
2078
+ %result = vector.gather %base[%ofs_0 , %ofs_1 , %ofs_2 ]
2079
+ [%indices ], %mask, %fall_thru : [...]
2080
2080
return %result : vector<2x3xf32>
2081
2081
}
2082
2082
```
2083
2083
2084
2084
The indexing semantics are then,
2085
2085
2086
2086
```
2087
- result[i,j] := if mask[i,j] then base[i0, i1, i2 + index_vec [i,j]]
2087
+ result[i,j] := if mask[i,j] then base[i0, i1, i2 + indices [i,j]]
2088
2088
else pass_thru[i,j]
2089
2089
```
2090
2090
The index into `base` only varies in the innermost ((k-1)-th) dimension.
@@ -2118,16 +2118,16 @@ def Vector_GatherOp :
2118
2118
2119
2119
let extraClassDeclaration = [{
2120
2120
ShapedType getBaseType() { return getBase().getType(); }
2121
- VectorType getIndexVectorType() { return getIndexVec ().getType(); }
2121
+ VectorType getIndexVectorType() { return getIndices ().getType(); }
2122
2122
VectorType getMaskVectorType() { return getMask().getType(); }
2123
2123
VectorType getPassThruVectorType() { return getPassThru().getType(); }
2124
2124
VectorType getVectorType() { return getResult().getType(); }
2125
2125
}];
2126
2126
2127
2127
let assemblyFormat =
2128
- "$base `[` $indices `]` `[` $index_vec `]` `,` "
2128
+ "$base `[` $offsets `]` `[` $indices `]` `,` "
2129
2129
"$mask `,` $pass_thru attr-dict `:` type($base) `,` "
2130
- "type($index_vec ) `,` type($mask) `,` type($pass_thru) "
2130
+ "type($indices ) `,` type($mask) `,` type($pass_thru) "
2131
2131
"`into` type($result)";
2132
2132
let hasCanonicalizer = 1;
2133
2133
let hasVerifier = 1;
@@ -2150,8 +2150,8 @@ def Vector_GatherOp :
2150
2150
def Vector_ScatterOp :
2151
2151
Vector_Op<"scatter">,
2152
2152
Arguments<(ins Arg<AnyMemRef, "", [MemWrite]>:$base,
2153
- Variadic<Index>:$indices ,
2154
- VectorOfNonZeroRankOf<[AnyInteger, Index]>:$index_vec ,
2153
+ Variadic<Index>:$offsets ,
2154
+ VectorOfNonZeroRankOf<[AnyInteger, Index]>:$indices ,
2155
2155
VectorOfNonZeroRankOf<[I1]>:$mask,
2156
2156
AnyVectorOfNonZeroRank:$valueToStore,
2157
2157
ConfinedAttr<OptionalAttr<I64Attr>,
@@ -2207,15 +2207,15 @@ def Vector_ScatterOp :
2207
2207
2208
2208
let extraClassDeclaration = [{
2209
2209
MemRefType getMemRefType() { return getBase().getType(); }
2210
- VectorType getIndexVectorType() { return getIndexVec ().getType(); }
2210
+ VectorType getIndexVectorType() { return getIndices ().getType(); }
2211
2211
VectorType getMaskVectorType() { return getMask().getType(); }
2212
2212
VectorType getVectorType() { return getValueToStore().getType(); }
2213
2213
}];
2214
2214
2215
2215
let assemblyFormat =
2216
- "$base `[` $indices `]` `[` $index_vec `]` `,` "
2216
+ "$base `[` $offsets `]` `[` $indices `]` `,` "
2217
2217
"$mask `,` $valueToStore attr-dict `:` type($base) `,` "
2218
- "type($index_vec ) `,` type($mask) `,` type($valueToStore)";
2218
+ "type($indices ) `,` type($mask) `,` type($valueToStore)";
2219
2219
let hasCanonicalizer = 1;
2220
2220
let hasVerifier = 1;
2221
2221
0 commit comments