Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
91 changes: 89 additions & 2 deletions mlir/lib/Dialect/Vector/Transforms/VectorLinearize.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -623,6 +623,93 @@ struct LinearizeVectorCreateMask final
}
};

/// This pattern linearizes vector.load from vector<1x1x...xN> to vector<N>
/// It currently supports linearization where all but the last dimension are 1
/// The following,
/// vector.load %arg0[%c0, %c0] : memref<1x4xf32>, vector<1x4xf32>
/// is converted to:
/// vector.load %arg0[%c0, %c0] : memref<1x4xf32>, vector<4xf32>
/// vector.shape_cast %load_result : vector<4xf32> to vector<1x4xf32>
/// For generic cases, the vector unroll pass should be used to unroll the load
/// to vector<1x1x...xN> form and then linearized
struct LinearizeVectorLoad final : public OpConversionPattern<vector::LoadOp> {
using OpConversionPattern::OpConversionPattern;
LinearizeVectorLoad(const TypeConverter &typeConverter, MLIRContext *context,
PatternBenefit benefit = 1)
: OpConversionPattern(typeConverter, context, benefit) {}

LogicalResult
matchAndRewrite(vector::LoadOp loadOp, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
VectorType vecTy = loadOp.getType();
if (!vecTy)
return rewriter.notifyMatchFailure(loadOp, "expected vector type");

auto shape = vecTy.getShape();
auto scalableDims = vecTy.getScalableDims();
// All but the last dim must be 1, and only the last dim may be scalable (if
// any).
if (!llvm::all_of(shape.drop_back(1), [](auto d) { return d == 1; }))
return rewriter.notifyMatchFailure(loadOp,
"only vector<1x1x...xN> supported");

if (llvm::any_of(scalableDims.drop_back(1), [](bool s) { return s; }))
return rewriter.notifyMatchFailure(loadOp,
"only innermost dim may be scalable");

auto linearTy = typeConverter->convertType<VectorType>(vecTy);

auto newLoad = rewriter.create<vector::LoadOp>(
loadOp.getLoc(), linearTy, adaptor.getBase(), adaptor.getIndices());
rewriter.replaceOp(loadOp, newLoad.getResult());
return success();
}
};

/// This pattern linearizes vector.store from vector<1x1x...xN> to vector<N>
/// It currently supports linearization where all but the last dimension are 1
/// The following,
/// vector.store %arg0, %arg1[%c0, %c0]s
/// : vector<1x4xf32>, memref<1x4xf32>
/// is converted to:
/// vector.shape_cast %arg0 : vector<1x4xf32> to vector<4xf32>
/// vector.store %arg0, %arg1[%c0, %c0]
/// : vector<4xf32>, memref<1x4xf32>
/// For generic cases, the vector unroll pass should be used to unroll the store
/// to vector<1x1x...xN> form and then linearized
struct LinearizeVectorStore final
: public OpConversionPattern<vector::StoreOp> {
using OpConversionPattern::OpConversionPattern;
LinearizeVectorStore(const TypeConverter &typeConverter, MLIRContext *context,
PatternBenefit benefit = 1)
: OpConversionPattern(typeConverter, context, benefit) {}

LogicalResult
matchAndRewrite(vector::StoreOp storeOp, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
VectorType vecTy = storeOp.getValueToStore().getType();
if (!vecTy)
return rewriter.notifyMatchFailure(storeOp, "expected vector type");

auto shape = vecTy.getShape();
auto scalableDims = vecTy.getScalableDims();
// All but the last dim must be 1, and only the last dim may be scalable (if
// any).
if (!llvm::all_of(shape.drop_back(1), [](auto d) { return d == 1; }))
return rewriter.notifyMatchFailure(storeOp,
"only vector<1x1x...xN> supported");

if (llvm::any_of(scalableDims.drop_back(1), [](bool s) { return s; }))
return rewriter.notifyMatchFailure(storeOp,
"only innermost dim may be scalable");

rewriter.replaceOpWithNewOp<vector::StoreOp>(
storeOp, adaptor.getValueToStore(), adaptor.getBase(),
adaptor.getIndices());
return success();
}
};

} // namespace

/// This method defines the set of operations that are linearizable, and hence
Expand Down Expand Up @@ -714,8 +801,8 @@ void mlir::vector::populateVectorLinearizeBasePatterns(
RewritePatternSet &patterns) {
patterns
.add<LinearizeConstantLike, LinearizeVectorizable, LinearizeVectorBitCast,
LinearizeVectorSplat, LinearizeVectorCreateMask>(
typeConverter, patterns.getContext());
LinearizeVectorSplat, LinearizeVectorCreateMask, LinearizeVectorLoad,
LinearizeVectorStore>(typeConverter, patterns.getContext());
}

void mlir::vector::populateVectorLinearizeShuffleLikeOpsPatterns(
Expand Down
46 changes: 46 additions & 0 deletions mlir/test/Dialect/Vector/linearize.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -464,3 +464,49 @@ func.func @linearize_scalable_create_mask(%arg0 : index, %arg1 : index) -> vecto
%0 = vector.create_mask %arg0, %arg1 : vector<1x[16]xi1>
return %0 : vector<1x[16]xi1>
}

// CHECK-LABEL: linearize_vector_load
// CHECK-SAME: (%[[ARG0:.*]]: memref<2x8xf32>) -> vector<1x4xf32>
func.func @linearize_vector_load(%arg0: memref<2x8xf32>) -> vector<1x4xf32> {
// CHECK: %[[CST0:.*]] = arith.constant 0 : index
// CHECK: %[[LOAD:.*]] = vector.load %[[ARG0]][%[[CST0]], %[[CST0]]] : memref<2x8xf32>, vector<4xf32>
// CHECK: %[[CAST:.*]] = vector.shape_cast %[[LOAD]] : vector<4xf32> to vector<1x4xf32>
// CHECK: return %[[CAST]] : vector<1x4xf32>
%c0 = arith.constant 0 : index
%0 = vector.load %arg0[%c0, %c0] : memref<2x8xf32>, vector<1x4xf32>
return %0 : vector<1x4xf32>
}

// CHECK-LABEL: linearize_vector_store
// CHECK-SAME: (%[[ARG0:.*]]: memref<2x8xf32>, %[[ARG1:.*]]: vector<1x4xf32>)
func.func @linearize_vector_store(%arg0: memref<2x8xf32>, %arg1: vector<1x4xf32>) {
// CHECK: %[[CAST:.*]] = vector.shape_cast %arg1 : vector<1x4xf32> to vector<4xf32>
// CHECK: %[[CST0:.*]] = arith.constant 0 : index
// CHECK: vector.store %[[CAST]], %[[ARG0]][%[[CST0]], %[[CST0]]] : memref<2x8xf32>, vector<4xf32>
%c0 = arith.constant 0 : index
vector.store %arg1, %arg0[%c0, %c0] : memref<2x8xf32>, vector<1x4xf32>
return
}

// CHECK-LABEL: linearize_vector_load_scalable
// CHECK-SAME: (%[[ARG0:.*]]: memref<2x8xf32>) -> vector<1x[4]xf32>
func.func @linearize_vector_load_scalable(%arg0: memref<2x8xf32>) -> vector<1x[4]xf32> {
// CHECK: %[[CST0:.*]] = arith.constant 0 : index
// CHECK: %[[LOAD:.*]] = vector.load %[[ARG0]][%[[CST0]], %[[CST0]]] : memref<2x8xf32>, vector<[4]xf32>
// CHECK: %[[CAST:.*]] = vector.shape_cast %[[LOAD]] : vector<[4]xf32> to vector<1x[4]xf32>
// CHECK: return %[[CAST]] : vector<1x[4]xf32>
%c0 = arith.constant 0 : index
%0 = vector.load %arg0[%c0, %c0] : memref<2x8xf32>, vector<1x[4]xf32>
return %0 : vector<1x[4]xf32>
}

// CHECK-LABEL: linearize_vector_store_scalable
// CHECK-SAME: (%[[ARG0:.*]]: memref<2x8xf32>, %[[ARG1:.*]]: vector<1x[4]xf32>)
func.func @linearize_vector_store_scalable(%arg0: memref<2x8xf32>, %arg1: vector<1x[4]xf32>) {
// CHECK: %[[CAST:.*]] = vector.shape_cast %arg1 : vector<1x[4]xf32> to vector<[4]xf32>
// CHECK: %[[CST0:.*]] = arith.constant 0 : index
// CHECK: vector.store %[[CAST]], %[[ARG0]][%[[CST0]], %[[CST0]]] : memref<2x8xf32>, vector<[4]xf32>
%c0 = arith.constant 0 : index
vector.store %arg1, %arg0[%c0, %c0] : memref<2x8xf32>, vector<1x[4]xf32>
return
}
Loading