Skip to content
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
131 changes: 125 additions & 6 deletions mlir/lib/Dialect/Vector/Transforms/VectorUnroll.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,33 @@ static SmallVector<Value> sliceTransferIndices(ArrayRef<int64_t> elementOffsets,
return slicedIndices;
}

// compute the new indices for vector.load/store by adding offsets to
// originalIndices.
// It assumes m <= n (m = offsets.size(), n = originalIndices.size())
// Last m of originalIndices will be updated.
static SmallVector<Value> computeIndices(PatternRewriter &rewriter,
Location loc,
ArrayRef<Value> originalIndices,
ArrayRef<int64_t> offsets) {
assert(offsets.size() <= originalIndices.size() &&
"Offsets should not exceed the number of original indices");
SmallVector<Value> indices(originalIndices);
auto originalIter = originalIndices.rbegin();
auto offsetsIter = offsets.rbegin();
auto indicesIter = indices.rbegin();
while (offsetsIter != offsets.rend()) {
Value original = *originalIter;
int64_t offset = *offsetsIter;
if (offset != 0)
*indicesIter = rewriter.create<arith::AddIOp>(
loc, original, rewriter.create<arith::ConstantIndexOp>(loc, offset));
originalIter++;
offsetsIter++;
indicesIter++;
}
return indices;
};

// Clones `op` into a new operations that takes `operands` and returns
// `resultTypes`.
static Operation *cloneOpWithOperandsAndTypes(OpBuilder &builder, Location loc,
Expand Down Expand Up @@ -631,6 +658,98 @@ struct UnrollGatherPattern : public OpRewritePattern<vector::GatherOp> {
vector::UnrollVectorOptions options;
};

struct UnrollLoadPattern : public OpRewritePattern<vector::LoadOp> {
UnrollLoadPattern(MLIRContext *context,
const vector::UnrollVectorOptions &options,
PatternBenefit benefit = 1)
: OpRewritePattern<vector::LoadOp>(context, benefit), options(options) {}

LogicalResult matchAndRewrite(vector::LoadOp loadOp,
PatternRewriter &rewriter) const override {
VectorType vecType = loadOp.getVectorType();
// Only unroll >1D loads
if (vecType.getRank() <= 1)
return failure();

Location loc = loadOp.getLoc();
ArrayRef<int64_t> originalShape = vecType.getShape();

// Target type is a 1D vector of the innermost dimension.
auto targetType =
VectorType::get(originalShape.back(), vecType.getElementType());

// Extend the targetShape to the same rank of original shape by padding 1s
// for leading dimensions for convenience of computing offsets
SmallVector<int64_t> targetShape(originalShape.size(), 1);
targetShape.back() = originalShape.back();

Value result = rewriter.create<arith::ConstantOp>(
loc, vecType, rewriter.getZeroAttr(vecType));

SmallVector<Value> originalIndices(loadOp.getIndices().begin(),
loadOp.getIndices().end());

for (SmallVector<int64_t> offsets :
StaticTileOffsetRange(originalShape, targetShape)) {
SmallVector<Value> indices =
computeIndices(rewriter, loc, originalIndices, offsets);
Value slice = rewriter.create<vector::LoadOp>(loc, targetType,
loadOp.getBase(), indices);
// Insert the slice into the result at the correct position.
result = rewriter.createOrFold<vector::InsertStridedSliceOp>(
loc, slice, result, offsets, SmallVector<int64_t>({1}));
}
rewriter.replaceOp(loadOp, result);
return success();
}

private:
vector::UnrollVectorOptions options;
};

struct UnrollStorePattern : public OpRewritePattern<vector::StoreOp> {
UnrollStorePattern(MLIRContext *context,
const vector::UnrollVectorOptions &options,
PatternBenefit benefit = 1)
: OpRewritePattern<vector::StoreOp>(context, benefit), options(options) {}

LogicalResult matchAndRewrite(vector::StoreOp storeOp,
PatternRewriter &rewriter) const override {
VectorType vecType = storeOp.getVectorType();
// Only unroll >1D stores.
if (vecType.getRank() <= 1)
return failure();

Location loc = storeOp.getLoc();
ArrayRef<int64_t> originalShape = vecType.getShape();

// Extend the targetShape to the same rank of original shape by padding 1s
// for leading dimensions for convenience of computing offsets
SmallVector<int64_t> targetShape(originalShape.size(), 1);
targetShape.back() = originalShape.back();

Value base = storeOp.getBase();
Value vector = storeOp.getValueToStore();

SmallVector<Value> originalIndices(storeOp.getIndices().begin(),
storeOp.getIndices().end());

for (SmallVector<int64_t> offsets :
StaticTileOffsetRange(originalShape, targetShape)) {
SmallVector<Value> indices =
computeIndices(rewriter, loc, originalIndices, offsets);
offsets.pop_back();
Value slice = rewriter.create<vector::ExtractOp>(loc, vector, offsets);
rewriter.create<vector::StoreOp>(loc, slice, base, indices);
}
rewriter.eraseOp(storeOp);
return success();
}

private:
vector::UnrollVectorOptions options;
};

struct UnrollBroadcastPattern : public OpRewritePattern<vector::BroadcastOp> {
UnrollBroadcastPattern(MLIRContext *context,
const vector::UnrollVectorOptions &options,
Expand Down Expand Up @@ -699,10 +818,10 @@ struct UnrollBroadcastPattern : public OpRewritePattern<vector::BroadcastOp> {
void mlir::vector::populateVectorUnrollPatterns(
RewritePatternSet &patterns, const UnrollVectorOptions &options,
PatternBenefit benefit) {
patterns
.add<UnrollTransferReadPattern, UnrollTransferWritePattern,
UnrollContractionPattern, UnrollElementwisePattern,
UnrollReductionPattern, UnrollMultiReductionPattern,
UnrollTransposePattern, UnrollGatherPattern, UnrollBroadcastPattern>(
patterns.getContext(), options, benefit);
patterns.add<UnrollTransferReadPattern, UnrollTransferWritePattern,
UnrollContractionPattern, UnrollElementwisePattern,
UnrollReductionPattern, UnrollMultiReductionPattern,
UnrollTransposePattern, UnrollGatherPattern, UnrollLoadPattern,
UnrollStorePattern, UnrollBroadcastPattern>(
patterns.getContext(), options, benefit);
}
73 changes: 73 additions & 0 deletions mlir/test/Dialect/Vector/vector-unroll-options.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -378,3 +378,76 @@ func.func @vector_broadcast_with_tailing_unit_dim(%v: vector<4x1xf32>) -> vector
// CHECK: [[b3:%.+]] = vector.broadcast [[s3]] : vector<2x1xf32> to vector<2x2xf32>
// CHECK: [[r3:%.+]] = vector.insert_strided_slice [[b3]], [[r2]] {offsets = [2, 2], strides = [1, 1]} : vector<2x2xf32> into vector<4x4xf32>
// CHECK: return [[r3]] : vector<4x4xf32>


// CHECK-LABEL: func.func @unroll_2D_vector_load(
// CHECK-SAME: %[[ARG:.*]]: memref<4x4xf16>) -> vector<4x4xf16> {
func.func @unroll_2D_vector_load(%arg0: memref<4x4xf16>) -> vector<4x4xf16> {
// CHECK: %[[C3:.*]] = arith.constant 3 : index
// CHECK: %[[C2:.*]] = arith.constant 2 : index
// CHECK: %[[C1:.*]] = arith.constant 1 : index
// CHECK: %[[C0:.*]] = arith.constant 0 : index
// CHECK: %[[CST:.*]] = arith.constant dense<0.000000e+00> : vector<4x4xf16>
// CHECK: %[[V0:.*]] = vector.load %[[ARG]][%[[C0]], %[[C0]]] : memref<4x4xf16>, vector<4xf16>
// CHECK: %[[V1:.*]] = vector.insert_strided_slice %[[V0]], %[[CST]] {offsets = [0, 0], strides = [1]} : vector<4xf16> into vector<4x4xf16>
// CHECK: %[[V2:.*]] = vector.load %[[ARG]][%[[C1]], %[[C0]]] : memref<4x4xf16>, vector<4xf16>
// CHECK: %[[V3:.*]] = vector.insert_strided_slice %[[V2]], %[[V1]] {offsets = [1, 0], strides = [1]} : vector<4xf16> into vector<4x4xf16>
// CHECK: %[[V4:.*]] = vector.load %[[ARG]][%[[C2]], %[[C0]]] : memref<4x4xf16>, vector<4xf16>
// CHECK: %[[V5:.*]] = vector.insert_strided_slice %[[V4]], %[[V3]] {offsets = [2, 0], strides = [1]} : vector<4xf16> into vector<4x4xf16>
// CHECK: %[[V6:.*]] = vector.load %[[ARG]][%[[C3]], %[[C0]]] : memref<4x4xf16>, vector<4xf16>
// CHECK: %[[V7:.*]] = vector.insert_strided_slice %[[V6]], %[[V5]] {offsets = [3, 0], strides = [1]} : vector<4xf16> into vector<4x4xf16>
// CHECK: return %[[V7]] : vector<4x4xf16>
%c0 = arith.constant 0 : index
%0 = vector.load %arg0[%c0, %c0] : memref<4x4xf16>, vector<4x4xf16>
return %0 : vector<4x4xf16>
}

// CHECK-LABEL: func.func @unroll_2D_vector_store(
// CHECK-SAME: %[[ARG0:.*]]: memref<4x4xf16>, %[[ARG1:.*]]: vector<4x4xf16>) {
func.func @unroll_2D_vector_store(%arg0: memref<4x4xf16>, %arg1: vector<4x4xf16>) {
// CHECK: %[[C3:.*]] = arith.constant 3 : index
// CHECK: %[[C2:.*]] = arith.constant 2 : index
// CHECK: %[[C1:.*]] = arith.constant 1 : index
// CHECK: %[[C0:.*]] = arith.constant 0 : index
// CHECK: %[[V0:.*]] = vector.extract %[[ARG1]][0] : vector<4xf16> from vector<4x4xf16>
// CHECK: vector.store %[[V0]], %[[ARG0]][%[[C0]], %[[C0]]] : memref<4x4xf16>, vector<4xf16>
// CHECK: %[[V1:.*]] = vector.extract %[[ARG1]][1] : vector<4xf16> from vector<4x4xf16>
// CHECK: vector.store %[[V1]], %[[ARG0]][%[[C1]], %[[C0]]] : memref<4x4xf16>, vector<4xf16>
// CHECK: %[[V2:.*]] = vector.extract %[[ARG1]][2] : vector<4xf16> from vector<4x4xf16>
// CHECK: vector.store %[[V2]], %[[ARG0]][%[[C2]], %[[C0]]] : memref<4x4xf16>, vector<4xf16>
// CHECK: %[[V3:.*]] = vector.extract %[[ARG1]][3] : vector<4xf16> from vector<4x4xf16>
// CHECK: vector.store %[[V3]], %[[ARG0]][%[[C3]], %[[C0]]] : memref<4x4xf16>, vector<4xf16>
%c0 = arith.constant 0 : index
vector.store %arg1, %arg0[%c0, %c0] : memref<4x4xf16>, vector<4x4xf16>
return
}

// CHECK-LABEL: func.func @unroll_vector_load(
// CHECK-SAME: %[[ARG:.*]]: memref<4x4x4x4xf16>) -> vector<2x2xf16> {
func.func @unroll_vector_load(%arg0: memref<4x4x4x4xf16>) -> vector<2x2xf16> {
// CHECK: %[[C2:.*]] = arith.constant 2 : index
// CHECK: %[[C1:.*]] = arith.constant 1 : index
// CHECK: %[[CST:.*]] = arith.constant dense<0.000000e+00> : vector<2x2xf16>
// CHECK: %[[V0:.*]] = vector.load %[[ARG]][%[[C1]], %[[C1]], %[[C1]], %[[C1]]] : memref<4x4x4x4xf16>, vector<2xf16>
// CHECK: %[[V1:.*]] = vector.insert_strided_slice %[[V0]], %[[CST]] {offsets = [0, 0], strides = [1]} : vector<2xf16> into vector<2x2xf16>
// CHECK: %[[V2:.*]] = vector.load %[[ARG]][%[[C1]], %[[C1]], %[[C2]], %[[C1]]] : memref<4x4x4x4xf16>, vector<2xf16>
// CHECK: %[[V3:.*]] = vector.insert_strided_slice %[[V2]], %[[V1]] {offsets = [1, 0], strides = [1]} : vector<2xf16> into vector<2x2xf16>
// CHECK: return %[[V3]] : vector<2x2xf16>
%c1 = arith.constant 1 : index
%0 = vector.load %arg0[%c1, %c1, %c1, %c1] : memref<4x4x4x4xf16>, vector<2x2xf16>
return %0 : vector<2x2xf16>
}

// CHECK-LABEL: func.func @unroll_vector_store(
// CHECK-SAME: %[[ARG0:.*]]: memref<4x4x4x4xf16>, %[[ARG1:.*]]: vector<2x2xf16>) {
func.func @unroll_vector_store(%arg0: memref<4x4x4x4xf16>, %arg1: vector<2x2xf16>) {
// CHECK: %[[C2:.*]] = arith.constant 2 : index
// CHECK: %[[C1:.*]] = arith.constant 1 : index
// CHECK: %[[V0:.*]] = vector.extract %[[ARG1]][0] : vector<2xf16> from vector<2x2xf16>
// CHECK: vector.store %[[V0]], %[[ARG0]][%[[C1]], %[[C1]], %[[C1]], %[[C1]]] : memref<4x4x4x4xf16>, vector<2xf16>
// CHECK: %[[V1:.*]] = vector.extract %[[ARG1]][1] : vector<2xf16> from vector<2x2xf16>
// CHECK: vector.store %[[V1]], %[[ARG0]][%[[C1]], %[[C1]], %[[C2]], %[[C1]]] : memref<4x4x4x4xf16>, vector<2xf16>
%c1 = arith.constant 1 : index
vector.store %arg1, %arg0[%c1, %c1, %c1, %c1] : memref<4x4x4x4xf16>, vector<2x2xf16>
return
}
10 changes: 10 additions & 0 deletions mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -178,6 +178,16 @@ struct TestVectorUnrollingPatterns
return success(isa<vector::TransposeOp>(op));
}));

populateVectorUnrollPatterns(
patterns, UnrollVectorOptions()
.setNativeShape(ArrayRef<int64_t>{2, 2})
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think {2,2} should be used

.setFilterConstraint([](Operation *op) {
if (auto loadOp = dyn_cast<vector::LoadOp>(op))
return success(loadOp.getType().getRank() > 1);
if (auto storeOp = dyn_cast<vector::StoreOp>(op))
return success(storeOp.getVectorType().getRank() > 1);
return failure();
}));
if (unrollBasedOnType) {
UnrollVectorOptions::NativeShapeFnType nativeShapeFn =
[](Operation *op) -> std::optional<SmallVector<int64_t>> {
Expand Down