Skip to content

Commit 5133673

Browse files
[mlir] Extend semantic of OffsetSizeAndStrideOpInterface.
OffsetSizeAndStrideOpInterface now have the ability to specify only a leading subset of offset, sizes, strides operands/attributes. The size of that leading subset must be limited by the corresponding entry in `getArrayAttrMaxRanks` to avoid overflows. Missing trailing dimensions are assumed to span the whole range (i.e. [0 .. dim)). This brings more natural semantics to slice-like op on top of subview and is a simplifies to removing all uses of SliceOp in dependent projects. Differential revision: https://reviews.llvm.org/D95441
1 parent f967673 commit 5133673

File tree

11 files changed

+324
-82
lines changed

11 files changed

+324
-82
lines changed

mlir/include/mlir/Dialect/StandardOps/IR/Ops.td

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1986,7 +1986,7 @@ def MemRefReinterpretCastOp:
19861986

19871987
/// Return the expected rank of each of the`static_offsets`, `static_sizes`
19881988
/// and `static_strides` attributes.
1989-
std::array<unsigned, 3> getArrayAttrRanks() {
1989+
std::array<unsigned, 3> getArrayAttrMaxRanks() {
19901990
unsigned resultRank = getResult().getType().cast<ShapedType>().getRank();
19911991
return {1, resultRank, resultRank};
19921992
}
@@ -2983,7 +2983,7 @@ def SubViewOp : BaseOpWithOffsetSizesAndStrides<
29832983

29842984
/// Return the expected rank of each of the`static_offsets`, `static_sizes`
29852985
/// and `static_strides` attributes.
2986-
std::array<unsigned, 3> getArrayAttrRanks() {
2986+
std::array<unsigned, 3> getArrayAttrMaxRanks() {
29872987
unsigned rank = getSourceType().getRank();
29882988
return {rank, rank, rank};
29892989
}
@@ -3097,7 +3097,7 @@ def SubTensorOp : BaseOpWithOffsetSizesAndStrides<
30973097

30983098
/// Return the expected rank of each of the`static_offsets`, `static_sizes`
30993099
/// and `static_strides` attributes.
3100-
std::array<unsigned, 3> getArrayAttrRanks() {
3100+
std::array<unsigned, 3> getArrayAttrMaxRanks() {
31013101
unsigned rank = getSourceType().getRank();
31023102
return {rank, rank, rank};
31033103
}
@@ -3184,7 +3184,7 @@ def SubTensorInsertOp : BaseOpWithOffsetSizesAndStrides<
31843184

31853185
/// Return the expected rank of each of the`static_offsets`, `static_sizes`
31863186
/// and `static_strides` attributes.
3187-
std::array<unsigned, 3> getArrayAttrRanks() {
3187+
std::array<unsigned, 3> getArrayAttrMaxRanks() {
31883188
unsigned rank = getType().getRank();
31893189
return {rank, rank, rank};
31903190
}

mlir/include/mlir/Interfaces/ViewLikeInterface.td

Lines changed: 7 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ def OffsetSizeAndStrideOpInterface : OpInterface<"OffsetSizeAndStrideOpInterface
3535
Common interface for ops that allow specifying mixed dynamic and static
3636
offsets, sizes and strides variadic operands.
3737
Ops that implement this interface need to expose the following methods:
38-
1. `getArrayAttrRanks` to specify the length of static integer
38+
1. `getArrayAttrMaxRanks` to specify the length of static integer
3939
attributes.
4040
2. `offsets`, `sizes` and `strides` variadic operands.
4141
3. `static_offsets`, resp. `static_sizes` and `static_strides` integer
@@ -45,9 +45,9 @@ def OffsetSizeAndStrideOpInterface : OpInterface<"OffsetSizeAndStrideOpInterface
4545

4646
The invariants of this interface are:
4747
1. `static_offsets`, `static_sizes` and `static_strides` have length
48-
exactly `getArrayAttrRanks()`[0] (resp. [1], [2]).
48+
at most `getArrayAttrMaxRanks()`[0] (resp. [1], [2]).
4949
2. `offsets`, `sizes` and `strides` have each length at most
50-
`getArrayAttrRanks()`[0] (resp. [1], [2]).
50+
length `static_offsets` (resp. `static_sizes`, `static_strides`).
5151
3. if an entry of `static_offsets` (resp. `static_sizes`,
5252
`static_strides`) is equal to a special sentinel value, namely
5353
`ShapedType::kDynamicStrideOrOffset` (resp. `ShapedType::kDynamicSize`,
@@ -81,7 +81,7 @@ def OffsetSizeAndStrideOpInterface : OpInterface<"OffsetSizeAndStrideOpInterface
8181
and `static_strides` attributes.
8282
}],
8383
/*retTy=*/"std::array<unsigned, 3>",
84-
/*methodName=*/"getArrayAttrRanks",
84+
/*methodName=*/"getArrayAttrMaxRanks",
8585
/*args=*/(ins)
8686
>,
8787
InterfaceMethod<
@@ -166,9 +166,8 @@ def OffsetSizeAndStrideOpInterface : OpInterface<"OffsetSizeAndStrideOpInterface
166166
/*methodBody=*/"",
167167
/*defaultImplementation=*/[{
168168
SmallVector<OpFoldResult, 4> res;
169-
std::array<unsigned, 3> ranks = $_op.getArrayAttrRanks();
170169
unsigned numDynamic = 0;
171-
unsigned count = ranks[getOffsetOperandGroupPosition()];
170+
unsigned count = $_op.static_offsets().size();
172171
for (unsigned idx = 0; idx < count; ++idx) {
173172
if (isDynamicOffset(idx))
174173
res.push_back($_op.offsets()[numDynamic++]);
@@ -188,9 +187,8 @@ def OffsetSizeAndStrideOpInterface : OpInterface<"OffsetSizeAndStrideOpInterface
188187
/*methodBody=*/"",
189188
/*defaultImplementation=*/[{
190189
SmallVector<OpFoldResult, 4> res;
191-
std::array<unsigned, 3> ranks = $_op.getArrayAttrRanks();
192190
unsigned numDynamic = 0;
193-
unsigned count = ranks[getSizeOperandGroupPosition()];
191+
unsigned count = $_op.static_sizes().size();
194192
for (unsigned idx = 0; idx < count; ++idx) {
195193
if (isDynamicSize(idx))
196194
res.push_back($_op.sizes()[numDynamic++]);
@@ -210,9 +208,8 @@ def OffsetSizeAndStrideOpInterface : OpInterface<"OffsetSizeAndStrideOpInterface
210208
/*methodBody=*/"",
211209
/*defaultImplementation=*/[{
212210
SmallVector<OpFoldResult, 4> res;
213-
std::array<unsigned, 3> ranks = $_op.getArrayAttrRanks();
214211
unsigned numDynamic = 0;
215-
unsigned count = ranks[getStrideOperandGroupPosition()];
212+
unsigned count = $_op.static_strides().size();
216213
for (unsigned idx = 0; idx < count; ++idx) {
217214
if (isDynamicStride(idx))
218215
res.push_back($_op.strides()[numDynamic++]);
Lines changed: 63 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,63 @@
1+
// RUN: mlir-opt %s -convert-std-to-llvm | \
2+
// RUN: mlir-cpu-runner -e main -entry-point-result=void \
3+
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext | FileCheck %s
4+
5+
global_memref "private" constant @__constant_5x3xf32 : memref<5x3xf32> =
6+
dense<[[0.0, 1.0, 2.0],
7+
[3.0, 4.0, 5.0],
8+
[6.0, 7.0, 8.0],
9+
[9.0, 10.0, 11.0],
10+
[12.0, 13.0, 14.0]]>
11+
12+
func @main() {
13+
%0 = get_global_memref @__constant_5x3xf32 : memref<5x3xf32>
14+
15+
/// Subview with only leading operands.
16+
%1 = subview %0[2][3][1]: memref<5x3xf32> to memref<3x3xf32, offset: 6, strides: [3, 1]>
17+
%unranked = memref_cast %1 : memref<3x3xf32, offset: 6, strides: [3, 1]> to memref<*xf32>
18+
call @print_memref_f32(%unranked) : (memref<*xf32>) -> ()
19+
20+
// CHECK: Unranked Memref base@ = {{0x[-9a-f]*}}
21+
// CHECK-SAME: rank = 2 offset = 6 sizes = [3, 3] strides = [3, 1] data =
22+
// CHECK-NEXT: [
23+
// CHECK-SAME: [6, 7, 8],
24+
// CHECK-NEXT: [9, 10, 11],
25+
// CHECK-NEXT: [12, 13, 14]
26+
// CHECK-SAME: ]
27+
28+
/// Regular subview.
29+
%2 = subview %0[0, 2][5, 1][1, 1]: memref<5x3xf32> to memref<5x1xf32, offset: 2, strides: [3, 1]>
30+
%unranked2 = memref_cast %2 : memref<5x1xf32, offset: 2, strides: [3, 1]> to memref<*xf32>
31+
call @print_memref_f32(%unranked2) : (memref<*xf32>) -> ()
32+
33+
// CHECK: Unranked Memref base@ = {{0x[-9a-f]*}}
34+
// CHECK-SAME: rank = 2 offset = 2 sizes = [5, 1] strides = [3, 1] data =
35+
// CHECK-NEXT: [
36+
// CHECK-SAME: [2],
37+
// CHECK-NEXT: [5],
38+
// CHECK-NEXT: [8],
39+
// CHECK-NEXT: [11],
40+
// CHECK-NEXT: [14]
41+
// CHECK-SAME: ]
42+
43+
/// Rank-reducing subview.
44+
%3 = subview %0[0, 2][5, 1][1, 1]: memref<5x3xf32> to memref<5xf32, offset: 2, strides: [3]>
45+
%unranked3 = memref_cast %3 : memref<5xf32, offset: 2, strides: [3]> to memref<*xf32>
46+
call @print_memref_f32(%unranked3) : (memref<*xf32>) -> ()
47+
48+
// CHECK: Unranked Memref base@ = {{0x[-9a-f]*}}
49+
// CHECK-SAME: rank = 1 offset = 2 sizes = [5] strides = [3] data =
50+
// CHECK-NEXT: [2, 5, 8, 11, 14]
51+
52+
/// Rank-reducing subview with only leading operands.
53+
%4 = subview %0[1][1][1]: memref<5x3xf32> to memref<3xf32, offset: 3, strides: [1]>
54+
%unranked4 = memref_cast %4 : memref<3xf32, offset: 3, strides: [1]> to memref<*xf32>
55+
call @print_memref_f32(%unranked4) : (memref<*xf32>) -> ()
56+
// CHECK: Unranked Memref base@ = {{0x[-9a-f]*}}
57+
// CHECK-SAME: rank = 1 offset = 3 sizes = [3] strides = [1] data =
58+
// CHECK-NEXT: [3, 4, 5]
59+
60+
return
61+
}
62+
63+
func private @print_memref_f32(%ptr : memref<*xf32>)

mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp

Lines changed: 38 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -3336,8 +3336,14 @@ struct SubViewOpLowering : public ConvertOpToLLVMPattern<SubViewOp> {
33363336
targetMemRef.setConstantOffset(rewriter, loc, offset);
33373337
} else {
33383338
Value baseOffset = sourceMemRef.offset(rewriter, loc);
3339-
for (unsigned i = 0; i < inferredShapeRank; ++i) {
3339+
// `inferredShapeRank` may be larger than the number of offset operands
3340+
// because of trailing semantics. In this case, the offset is guaranteed
3341+
// to be interpreted as 0 and we can just skip the extra dimensions.
3342+
for (unsigned i = 0, e = std::min(inferredShapeRank,
3343+
subViewOp.getMixedOffsets().size());
3344+
i < e; ++i) {
33403345
Value offset =
3346+
// TODO: need OpFoldResult ODS adaptor to clean this up.
33413347
subViewOp.isDynamicOffset(i)
33423348
? operands[subViewOp.getIndexOfDynamicOffset(i)]
33433349
: rewriter.create<LLVM::ConstantOp>(
@@ -3350,31 +3356,47 @@ struct SubViewOpLowering : public ConvertOpToLLVMPattern<SubViewOp> {
33503356
}
33513357

33523358
// Update sizes and strides.
3359+
SmallVector<OpFoldResult> mixedSizes = subViewOp.getMixedSizes();
3360+
SmallVector<OpFoldResult> mixedStrides = subViewOp.getMixedStrides();
3361+
assert(mixedSizes.size() == mixedStrides.size() &&
3362+
"expected sizes and strides of equal length");
33533363
for (int i = inferredShapeRank - 1, j = resultShapeRank - 1;
33543364
i >= 0 && j >= 0; --i) {
33553365
if (!mask[i])
33563366
continue;
33573367

3358-
Value size =
3359-
subViewOp.isDynamicSize(i)
3360-
? operands[subViewOp.getIndexOfDynamicSize(i)]
3361-
: rewriter.create<LLVM::ConstantOp>(
3362-
loc, llvmIndexType,
3363-
rewriter.getI64IntegerAttr(subViewOp.getStaticSize(i)));
3364-
targetMemRef.setSize(rewriter, loc, j, size);
3365-
Value stride;
3366-
if (!ShapedType::isDynamicStrideOrOffset(strides[i])) {
3368+
// `i` may overflow subViewOp.getMixedSizes because of trailing semantics.
3369+
// In this case, the size is guaranteed to be interpreted as Dim and the
3370+
// stride as 1.
3371+
Value size, stride;
3372+
if (static_cast<unsigned>(i) >= mixedSizes.size()) {
3373+
size = rewriter.create<LLVM::DialectCastOp>(
3374+
loc, llvmIndexType,
3375+
rewriter.create<DimOp>(loc, subViewOp.source(), i));
33673376
stride = rewriter.create<LLVM::ConstantOp>(
3368-
loc, llvmIndexType, rewriter.getI64IntegerAttr(strides[i]));
3377+
loc, llvmIndexType, rewriter.getI64IntegerAttr(1));
33693378
} else {
3370-
stride =
3371-
subViewOp.isDynamicStride(i)
3372-
? operands[subViewOp.getIndexOfDynamicStride(i)]
3379+
// TODO: need OpFoldResult ODS adaptor to clean this up.
3380+
size =
3381+
subViewOp.isDynamicSize(i)
3382+
? operands[subViewOp.getIndexOfDynamicSize(i)]
33733383
: rewriter.create<LLVM::ConstantOp>(
33743384
loc, llvmIndexType,
3375-
rewriter.getI64IntegerAttr(subViewOp.getStaticStride(i)));
3376-
stride = rewriter.create<LLVM::MulOp>(loc, stride, strideValues[i]);
3385+
rewriter.getI64IntegerAttr(subViewOp.getStaticSize(i)));
3386+
if (!ShapedType::isDynamicStrideOrOffset(strides[i])) {
3387+
stride = rewriter.create<LLVM::ConstantOp>(
3388+
loc, llvmIndexType, rewriter.getI64IntegerAttr(strides[i]));
3389+
} else {
3390+
stride = subViewOp.isDynamicStride(i)
3391+
? operands[subViewOp.getIndexOfDynamicStride(i)]
3392+
: rewriter.create<LLVM::ConstantOp>(
3393+
loc, llvmIndexType,
3394+
rewriter.getI64IntegerAttr(
3395+
subViewOp.getStaticStride(i)));
3396+
stride = rewriter.create<LLVM::MulOp>(loc, stride, strideValues[i]);
3397+
}
33773398
}
3399+
targetMemRef.setSize(rewriter, loc, j, size);
33783400
targetMemRef.setStride(rewriter, loc, j, stride);
33793401
j--;
33803402
}

mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -712,6 +712,10 @@ static LogicalResult verify(InitTensorOp op) {
712712
ShapedType::isDynamic)))
713713
return failure();
714714

715+
if (op.static_sizes().size() != static_cast<unsigned>(resultType.getRank()))
716+
return op->emitError("expected ")
717+
<< resultType.getRank() << " sizes values";
718+
715719
Type expectedType =
716720
InitTensorOp::inferResultType(staticSizes, resultType.getElementType());
717721
if (resultType != expectedType) {

mlir/lib/Dialect/StandardOps/IR/Ops.cpp

Lines changed: 35 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -2209,6 +2209,8 @@ static ParseResult parseMemRefReinterpretCastOp(OpAsmParser &parser,
22092209
return parser.addTypeToList(dstType, result.types);
22102210
}
22112211

2212+
// TODO: ponder whether we want to allow missing trailing sizes/strides that are
2213+
// completed automatically, like we have for subview and subtensor.
22122214
static LogicalResult verify(MemRefReinterpretCastOp op) {
22132215
// The source and result memrefs should be in the same memory space.
22142216
auto srcType = op.source().getType().cast<BaseMemRefType>();
@@ -2833,16 +2835,28 @@ Wrapper operator*(Wrapper a, int64_t b) {
28332835
/// static representation of offsets, sizes and strides. Special sentinels
28342836
/// encode the dynamic case.
28352837
Type SubViewOp::inferResultType(MemRefType sourceMemRefType,
2836-
ArrayRef<int64_t> staticOffsets,
2837-
ArrayRef<int64_t> staticSizes,
2838-
ArrayRef<int64_t> staticStrides) {
2838+
ArrayRef<int64_t> leadingStaticOffsets,
2839+
ArrayRef<int64_t> leadingStaticSizes,
2840+
ArrayRef<int64_t> leadingStaticStrides) {
2841+
// A subview may specify only a leading subset of offset/sizes/strides in
2842+
// which case we complete with offset=0, sizes from memref type and strides=1.
28392843
unsigned rank = sourceMemRefType.getRank();
2840-
(void)rank;
2841-
assert(staticOffsets.size() == rank &&
2842-
"unexpected staticOffsets size mismatch");
2843-
assert(staticSizes.size() == rank && "unexpected staticSizes size mismatch");
2844-
assert(staticStrides.size() == rank &&
2845-
"unexpected staticStrides size mismatch");
2844+
assert(leadingStaticOffsets.size() <= rank &&
2845+
"unexpected leadingStaticOffsets overflow");
2846+
assert(leadingStaticSizes.size() <= rank &&
2847+
"unexpected leadingStaticSizes overflow");
2848+
assert(leadingStaticStrides.size() <= rank &&
2849+
"unexpected leadingStaticStrides overflow");
2850+
auto staticOffsets = llvm::to_vector<4>(leadingStaticOffsets);
2851+
auto staticSizes = llvm::to_vector<4>(leadingStaticSizes);
2852+
auto staticStrides = llvm::to_vector<4>(leadingStaticStrides);
2853+
unsigned numTrailingOffsets = rank - staticOffsets.size();
2854+
unsigned numTrailingSizes = rank - staticSizes.size();
2855+
unsigned numTrailingStrides = rank - staticStrides.size();
2856+
staticOffsets.append(numTrailingOffsets, 0);
2857+
llvm::append_range(staticSizes,
2858+
sourceMemRefType.getShape().take_back(numTrailingSizes));
2859+
staticStrides.append(numTrailingStrides, 1);
28462860

28472861
// Extract source offset and strides.
28482862
int64_t sourceOffset;
@@ -3197,7 +3211,7 @@ raw_ostream &mlir::operator<<(raw_ostream &os, Range &range) {
31973211
/// with `b` at location `loc`.
31983212
SmallVector<Range, 8> mlir::getOrCreateRanges(OffsetSizeAndStrideOpInterface op,
31993213
OpBuilder &b, Location loc) {
3200-
std::array<unsigned, 3> ranks = op.getArrayAttrRanks();
3214+
std::array<unsigned, 3> ranks = op.getArrayAttrMaxRanks();
32013215
assert(ranks[0] == ranks[1] && "expected offset and sizes of equal ranks");
32023216
assert(ranks[1] == ranks[2] && "expected sizes and strides of equal ranks");
32033217
SmallVector<Range, 8> res;
@@ -3484,16 +3498,18 @@ static ParseResult parseSubTensorOp(OpAsmParser &parser,
34843498
/// static representation of offsets, sizes and strides. Special sentinels
34853499
/// encode the dynamic case.
34863500
Type SubTensorOp::inferResultType(RankedTensorType sourceRankedTensorType,
3487-
ArrayRef<int64_t> staticOffsets,
3488-
ArrayRef<int64_t> staticSizes,
3489-
ArrayRef<int64_t> staticStrides) {
3501+
ArrayRef<int64_t> leadingStaticOffsets,
3502+
ArrayRef<int64_t> leadingStaticSizes,
3503+
ArrayRef<int64_t> leadingStaticStrides) {
3504+
// A subtensor may specify only a leading subset of offset/sizes/strides in
3505+
// which case we complete with offset=0, sizes from memref type and strides=1.
34903506
unsigned rank = sourceRankedTensorType.getRank();
3491-
(void)rank;
3492-
assert(staticOffsets.size() == rank &&
3493-
"unexpected staticOffsets size mismatch");
3494-
assert(staticSizes.size() == rank && "unexpected staticSizes size mismatch");
3495-
assert(staticStrides.size() == rank &&
3496-
"unexpected staticStrides size mismatch");
3507+
assert(leadingStaticSizes.size() <= rank &&
3508+
"unexpected leadingStaticSizes overflow");
3509+
auto staticSizes = llvm::to_vector<4>(leadingStaticSizes);
3510+
unsigned numTrailingSizes = rank - staticSizes.size();
3511+
llvm::append_range(staticSizes, sourceRankedTensorType.getShape().take_back(
3512+
numTrailingSizes));
34973513
return RankedTensorType::get(staticSizes,
34983514
sourceRankedTensorType.getElementType());
34993515
}

mlir/lib/Interfaces/ViewLikeInterface.cpp

Lines changed: 27 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -18,12 +18,12 @@ using namespace mlir;
1818
#include "mlir/Interfaces/ViewLikeInterface.cpp.inc"
1919

2020
LogicalResult mlir::verifyListOfOperandsOrIntegers(
21-
Operation *op, StringRef name, unsigned expectedNumElements, ArrayAttr attr,
21+
Operation *op, StringRef name, unsigned maxNumElements, ArrayAttr attr,
2222
ValueRange values, llvm::function_ref<bool(int64_t)> isDynamic) {
23-
/// Check static and dynamic offsets/sizes/strides breakdown.
24-
if (attr.size() != expectedNumElements)
25-
return op->emitError("expected ")
26-
<< expectedNumElements << " " << name << " values";
23+
/// Check static and dynamic offsets/sizes/strides does not overflow type.
24+
if (attr.size() > maxNumElements)
25+
return op->emitError("expected <= ")
26+
<< maxNumElements << " " << name << " values";
2727
unsigned expectedNumDynamicEntries =
2828
llvm::count_if(attr.getValue(), [&](Attribute attr) {
2929
return isDynamic(attr.cast<IntegerAttr>().getInt());
@@ -35,17 +35,35 @@ LogicalResult mlir::verifyListOfOperandsOrIntegers(
3535
}
3636

3737
LogicalResult mlir::verify(OffsetSizeAndStrideOpInterface op) {
38-
std::array<unsigned, 3> ranks = op.getArrayAttrRanks();
38+
std::array<unsigned, 3> maxRanks = op.getArrayAttrMaxRanks();
39+
// Offsets can come in 2 flavors:
40+
// 1. Either single entry (when maxRanks == 1).
41+
// 2. Or as an array whose rank must match that of the mixed sizes.
42+
// So that the result type is well-formed.
43+
if (!(op.getMixedOffsets().size() == 1 && maxRanks[0] == 1) &&
44+
op.getMixedOffsets().size() != op.getMixedSizes().size())
45+
return op->emitError(
46+
"expected mixed offsets rank to match mixed sizes rank (")
47+
<< op.getMixedOffsets().size() << " vs " << op.getMixedSizes().size()
48+
<< ") so the rank of the result type is well-formed.";
49+
// Ranks of mixed sizes and strides must always match so the result type is
50+
// well-formed.
51+
if (op.getMixedSizes().size() != op.getMixedStrides().size())
52+
return op->emitError(
53+
"expected mixed sizes rank to match mixed strides rank (")
54+
<< op.getMixedSizes().size() << " vs " << op.getMixedStrides().size()
55+
<< ") so the rank of the result type is well-formed.";
56+
3957
if (failed(verifyListOfOperandsOrIntegers(
40-
op, "offset", ranks[0], op.static_offsets(), op.offsets(),
58+
op, "offset", maxRanks[0], op.static_offsets(), op.offsets(),
4159
ShapedType::isDynamicStrideOrOffset)))
4260
return failure();
43-
if (failed(verifyListOfOperandsOrIntegers(op, "size", ranks[1],
61+
if (failed(verifyListOfOperandsOrIntegers(op, "size", maxRanks[1],
4462
op.static_sizes(), op.sizes(),
4563
ShapedType::isDynamic)))
4664
return failure();
4765
if (failed(verifyListOfOperandsOrIntegers(
48-
op, "stride", ranks[2], op.static_strides(), op.strides(),
66+
op, "stride", maxRanks[2], op.static_strides(), op.strides(),
4967
ShapedType::isDynamicStrideOrOffset)))
5068
return failure();
5169
return success();

0 commit comments

Comments
 (0)