Skip to content

[mlir][vector] Propagate alignment from vector to llvm dialects. #153482

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Draft
wants to merge 6 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
49 changes: 36 additions & 13 deletions mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td
Original file line number Diff line number Diff line change
Expand Up @@ -87,21 +87,21 @@ class LLVM_TernarySameArgsIntrOpF<string func, list<Trait> traits = []> :
class LLVM_CountZerosIntrOp<string func, list<Trait> traits = []> :
LLVM_OneResultIntrOp<func, [], [0],
!listconcat([Pure, SameOperandsAndResultType], traits),
/*requiresFastmath=*/0,
/*requiresFastmath=*/0, /*requiresArgAndResultAttrs=*/0,
/*immArgPositions=*/[1], /*immArgAttrNames=*/["is_zero_poison"]> {
let arguments = (ins LLVM_ScalarOrVectorOf<AnySignlessInteger>:$in,
I1Attr:$is_zero_poison);
}

def LLVM_AbsOp : LLVM_OneResultIntrOp<"abs", [], [0], [Pure],
/*requiresFastmath=*/0,
/*requiresFastmath=*/0, /*requiresArgAndResultAttrs=*/0,
/*immArgPositions=*/[1], /*immArgAttrNames=*/["is_int_min_poison"]> {
let arguments = (ins LLVM_ScalarOrVectorOf<AnySignlessInteger>:$in,
I1Attr:$is_int_min_poison);
}

def LLVM_IsFPClass : LLVM_OneResultIntrOp<"is.fpclass", [], [0], [Pure],
/*requiresFastmath=*/0,
/*requiresFastmath=*/0, /*requiresArgAndResultAttrs=*/0,
/*immArgPositions=*/[1], /*immArgAttrNames=*/["bit"]> {
let arguments = (ins LLVM_ScalarOrVectorOf<LLVM_AnyFloat>:$in, I32Attr:$bit);
}
Expand Down Expand Up @@ -360,8 +360,8 @@ def LLVM_LifetimeEndOp : LLVM_LifetimeBaseOp<"lifetime.end">;

def LLVM_InvariantStartOp : LLVM_OneResultIntrOp<"invariant.start", [], [1],
[DeclareOpInterfaceMethods<PromotableOpInterface>],
/*requiresFastmath=*/0, /*immArgPositions=*/[0],
/*immArgAttrNames=*/["size"]> {
/*requiresFastmath=*/0, /*requiresArgAndResultAttrs=*/0,
/*immArgPositions=*/[0], /*immArgAttrNames=*/["size"]> {
let arguments = (ins I64Attr:$size, LLVM_AnyPointer:$ptr);
let results = (outs LLVM_DefaultPointer:$res);
let assemblyFormat = "$size `,` $ptr attr-dict `:` qualified(type($ptr))";
Expand Down Expand Up @@ -412,6 +412,7 @@ class LLVM_ConstrainedIntr<string mnem, int numArgs,
!gt(hasRoundingMode, 0) : [DeclareOpInterfaceMethods<RoundingModeOpInterface>],
true : []),
/*requiresFastmath=*/0,
/*requiresArgAndResultAttrs=*/0,
/*immArgPositions=*/[],
/*immArgAttrNames=*/[]> {
dag regularArgs = !dag(ins, !listsplat(LLVM_Type, numArgs), !foreach(i, !range(numArgs), "arg_" #i));
Expand Down Expand Up @@ -589,7 +590,7 @@ def LLVM_ExpectOp
def LLVM_ExpectWithProbabilityOp
: LLVM_OneResultIntrOp<"expect.with.probability", [], [0],
[Pure, AllTypesMatch<["val", "expected", "res"]>],
/*requiresFastmath=*/0,
/*requiresFastmath=*/0, /*requiresArgAndResultAttrs=*/0,
/*immArgPositions=*/[2], /*immArgAttrNames=*/["prob"]> {
let arguments = (ins AnySignlessInteger:$val,
AnySignlessInteger:$expected,
Expand Down Expand Up @@ -825,7 +826,7 @@ class LLVM_VecReductionAccBase<string mnem, Type element>
/*overloadedResults=*/[],
/*overloadedOperands=*/[1],
/*traits=*/[Pure, SameOperandsAndResultElementType],
/*equiresFastmath=*/1>,
/*requiresFastmath=*/1>,
Arguments<(ins element:$start_value,
LLVM_VectorOf<element>:$input,
DefaultValuedAttr<LLVM_FastmathFlagsAttr, "{}">:$fastmathFlags)>;
Expand Down Expand Up @@ -1069,14 +1070,36 @@ def LLVM_masked_scatter : LLVM_ZeroResultIntrOp<"masked.scatter"> {
}

/// Create a call to Masked Expand Load intrinsic.
def LLVM_masked_expandload : LLVM_IntrOp<"masked.expandload", [0], [], [], 1> {
let arguments = (ins LLVM_AnyPointer, LLVM_VectorOf<I1>, LLVM_AnyVector);
def LLVM_masked_expandload
: LLVM_OneResultIntrOp<"masked.expandload", [0], [],
/*traits=*/[], /*requiresFastMath=*/0, /*requiresArgAndResultAttrs=*/1,
/*immArgPositions=*/[], /*immArgAttrNames=*/[]> {
dag args = (ins LLVM_AnyPointer:$ptr,
LLVM_VectorOf<I1>:$mask,
LLVM_AnyVector:$passthru);

let arguments = !con(args, baseArgs);

let builders = [
OpBuilder<(ins "TypeRange":$resTy, "Value":$ptr, "Value":$mask, "Value":$passthru, CArg<"uint64_t", "1">:$align)>
];
}

/// Create a call to Masked Compress Store intrinsic.
def LLVM_masked_compressstore
: LLVM_IntrOp<"masked.compressstore", [], [0], [], 0> {
let arguments = (ins LLVM_AnyVector, LLVM_AnyPointer, LLVM_VectorOf<I1>);
: LLVM_ZeroResultIntrOp<"masked.compressstore", [0],
/*traits=*/[], /*requiresAccessGroup=*/0, /*requiresAliasAnalysis=*/0,
/*requiresArgAndResultAttrs=*/1, /*requiresOpBundles=*/0,
/*immArgPositions=*/[], /*immArgAttrNames=*/[]> {
dag args = (ins LLVM_AnyVector:$value,
LLVM_AnyPointer:$ptr,
LLVM_VectorOf<I1>:$mask);

let arguments = !con(args, baseArgs);

let builders = [
OpBuilder<(ins "Value":$value, "Value":$ptr, "Value":$mask, CArg<"uint64_t", "1">:$align)>
];
}

//
Expand Down Expand Up @@ -1155,7 +1178,7 @@ def LLVM_vector_insert
PredOpTrait<"it is not inserting scalable into fixed-length vectors.",
CPred<"!isScalableVectorType($srcvec.getType()) || "
"isScalableVectorType($dstvec.getType())">>],
/*requiresFastmath=*/0,
/*requiresFastmath=*/0, /*requiresArgAndResultAttrs=*/0,
/*immArgPositions=*/[2], /*immArgAttrNames=*/["pos"]> {
let arguments = (ins LLVM_AnyVector:$dstvec, LLVM_AnyVector:$srcvec,
I64Attr:$pos);
Expand Down Expand Up @@ -1189,7 +1212,7 @@ def LLVM_vector_extract
PredOpTrait<"it is not extracting scalable from fixed-length vectors.",
CPred<"!isScalableVectorType($res.getType()) || "
"isScalableVectorType($srcvec.getType())">>],
/*requiresFastmath=*/0,
/*requiresFastmath=*/0, /*requiresArgAndResultAttrs=*/0,
/*immArgPositions=*/[1], /*immArgAttrNames=*/["pos"]> {
let arguments = (ins LLVM_AnyVector:$srcvec, I64Attr:$pos);
let results = (outs LLVM_AnyVector:$res);
Expand Down
3 changes: 2 additions & 1 deletion mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td
Original file line number Diff line number Diff line change
Expand Up @@ -475,11 +475,12 @@ class LLVM_OneResultIntrOp<string mnem, list<int> overloadedResults = [],
list<int> overloadedOperands = [],
list<Trait> traits = [],
bit requiresFastmath = 0,
bit requiresArgAndResultAttrs = 0,
list<int> immArgPositions = [],
list<string> immArgAttrNames = []>
: LLVM_IntrOp<mnem, overloadedResults, overloadedOperands, traits, 1,
/*requiresAccessGroup=*/0, /*requiresAliasAnalysis=*/0,
requiresFastmath, /*requiresArgAndResultAttrs=*/0,
requiresFastmath, requiresArgAndResultAttrs,
/*requiresOpBundles=*/0, immArgPositions,
immArgAttrNames>;

Expand Down
91 changes: 87 additions & 4 deletions mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
Original file line number Diff line number Diff line change
Expand Up @@ -2054,7 +2054,9 @@ def Vector_GatherOp :
Variadic<Index>:$indices,
VectorOfNonZeroRankOf<[AnyInteger, Index]>:$index_vec,
VectorOfNonZeroRankOf<[I1]>:$mask,
AnyVectorOfNonZeroRank:$pass_thru)>,
AnyVectorOfNonZeroRank:$pass_thru,
ConfinedAttr<OptionalAttr<I64Attr>,
[AllAttrOf<[IntPositive, IntPowerOf2]>]>:$alignment)>,
Results<(outs AnyVectorOfNonZeroRank:$result)> {

let summary = [{
Expand Down Expand Up @@ -2085,6 +2087,12 @@ def Vector_GatherOp :
during progressively lowering to bring other memory operations closer to
hardware ISA support for a gather.

An optional `alignment` attribute allows to specify the byte alignment of the
scatter operation. It must be a positive power of 2. The operation must access
memory at an address aligned to this boundary. Violations may lead to
architecture-specific faults or performance penalties.
A value of 0 indicates no specific alignment requirement.

Examples:

```mlir
Expand All @@ -2111,6 +2119,20 @@ def Vector_GatherOp :
"`into` type($result)";
let hasCanonicalizer = 1;
let hasVerifier = 1;

let builders = [
OpBuilder<(ins "VectorType":$resultType,
"Value":$base,
"ValueRange":$indices,
"Value":$index_vec,
"Value":$mask,
"Value":$passthrough,
CArg<"llvm::MaybeAlign", "llvm::MaybeAlign()">:$alignment), [{
return build($_builder, $_state, resultType, base, indices, index_vec, mask, passthrough,
alignment.has_value() ? $_builder.getI64IntegerAttr(alignment->value()) :
nullptr);
}]>
];
}

def Vector_ScatterOp :
Expand All @@ -2119,7 +2141,9 @@ def Vector_ScatterOp :
Variadic<Index>:$indices,
VectorOfNonZeroRankOf<[AnyInteger, Index]>:$index_vec,
VectorOfNonZeroRankOf<[I1]>:$mask,
AnyVectorOfNonZeroRank:$valueToStore)> {
AnyVectorOfNonZeroRank:$valueToStore,
ConfinedAttr<OptionalAttr<I64Attr>,
[AllAttrOf<[IntPositive, IntPowerOf2]>]>:$alignment)> {

let summary = [{
scatters elements from a vector into memory as defined by an index vector
Expand Down Expand Up @@ -2153,6 +2177,12 @@ def Vector_ScatterOp :
correspond to those of the `llvm.masked.scatter`
[intrinsic](https://llvm.org/docs/LangRef.html#llvm-masked-scatter-intrinsics).

An optional `alignment` attribute allows to specify the byte alignment of the
scatter operation. It must be a positive power of 2. The operation must access
memory at an address aligned to this boundary. Violations may lead to
architecture-specific faults or performance penalties.
A value of 0 indicates no specific alignment requirement.

Examples:

```mlir
Expand All @@ -2177,14 +2207,29 @@ def Vector_ScatterOp :
"type($index_vec) `,` type($mask) `,` type($valueToStore)";
let hasCanonicalizer = 1;
let hasVerifier = 1;

let builders = [
OpBuilder<(ins "Value":$base,
"ValueRange":$indices,
"Value":$index_vec,
"Value":$mask,
"Value":$valueToStore,
CArg<"llvm::MaybeAlign", "llvm::MaybeAlign()">: $alignment), [{
return build($_builder, $_state, base, indices, index_vec, mask, valueToStore,
alignment.has_value() ? $_builder.getI64IntegerAttr(alignment->value()) :
nullptr);
}]>
];
}

def Vector_ExpandLoadOp :
Vector_Op<"expandload">,
Arguments<(ins Arg<AnyMemRef, "", [MemRead]>:$base,
Variadic<Index>:$indices,
FixedVectorOfNonZeroRankOf<[I1]>:$mask,
AnyVectorOfNonZeroRank:$pass_thru)>,
AnyVectorOfNonZeroRank:$pass_thru,
ConfinedAttr<OptionalAttr<I64Attr>,
[AllAttrOf<[IntPositive, IntPowerOf2]>]>:$alignment)>,
Results<(outs AnyVectorOfNonZeroRank:$result)> {

let summary = "reads elements from memory and spreads them into a vector as defined by a mask";
Expand Down Expand Up @@ -2216,6 +2261,12 @@ def Vector_ExpandLoadOp :
correspond to those of the `llvm.masked.expandload`
[intrinsic](https://llvm.org/docs/LangRef.html#llvm-masked-expandload-intrinsics).

An optional `alignment` attribute allows to specify the byte alignment of the
load operation. It must be a positive power of 2. The operation must access
memory at an address aligned to this boundary. Violations may lead to
architecture-specific faults or performance penalties.
A value of 0 indicates no specific alignment requirement.

Note, at the moment this Op is only available for fixed-width vectors.

Examples:
Expand Down Expand Up @@ -2246,14 +2297,29 @@ def Vector_ExpandLoadOp :
"type($base) `,` type($mask) `,` type($pass_thru) `into` type($result)";
let hasCanonicalizer = 1;
let hasVerifier = 1;

let builders = [
OpBuilder<(ins "VectorType":$resultType,
"Value":$base,
"ValueRange":$indices,
"Value":$mask,
"Value":$passthrough,
CArg<"llvm::MaybeAlign", "llvm::MaybeAlign()">:$alignment), [{
return build($_builder, $_state, resultType, base, indices, mask, passthrough,
alignment.has_value() ? $_builder.getI64IntegerAttr(alignment->value()) :
nullptr);
}]>
];
}

def Vector_CompressStoreOp :
Vector_Op<"compressstore">,
Arguments<(ins Arg<AnyMemRef, "", [MemWrite]>:$base,
Variadic<Index>:$indices,
FixedVectorOfNonZeroRankOf<[I1]>:$mask,
AnyVectorOfNonZeroRank:$valueToStore)> {
AnyVectorOfNonZeroRank:$valueToStore,
ConfinedAttr<OptionalAttr<I64Attr>,
[AllAttrOf<[IntPositive, IntPowerOf2]>]>:$alignment)> {

let summary = "writes elements selectively from a vector as defined by a mask";

Expand Down Expand Up @@ -2284,6 +2350,12 @@ def Vector_CompressStoreOp :
correspond to those of the `llvm.masked.compressstore`
[intrinsic](https://llvm.org/docs/LangRef.html#llvm-masked-compressstore-intrinsics).

An optional `alignment` attribute allows to specify the byte alignment of the
store operation. It must be a positive power of 2. The operation must access
memory at an address aligned to this boundary. Violations may lead to
architecture-specific faults or performance penalties.
A value of 0 indicates no specific alignment requirement.

Note, at the moment this Op is only available for fixed-width vectors.

Examples:
Expand Down Expand Up @@ -2312,6 +2384,17 @@ def Vector_CompressStoreOp :
"type($base) `,` type($mask) `,` type($valueToStore)";
let hasCanonicalizer = 1;
let hasVerifier = 1;
let builders = [
OpBuilder<(ins "Value":$base,
"ValueRange":$indices,
"Value":$mask,
"Value":$valueToStore,
CArg<"llvm::MaybeAlign", "llvm::MaybeAlign()">:$alignment), [{
return build($_builder, $_state, base, indices, valueToStore, mask,
alignment.has_value() ? $_builder.getI64IntegerAttr(alignment->value()) :
nullptr);
}]>
];
}

def Vector_ShapeCastOp :
Expand Down
25 changes: 19 additions & 6 deletions mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -299,8 +299,9 @@ class VectorGatherOpConversion
}

// Resolve alignment.
unsigned align;
if (failed(getVectorToLLVMAlignment(*this->getTypeConverter(), vType,
unsigned align = gather.getAlignment().value_or(0);
if (!align &&
failed(getVectorToLLVMAlignment(*this->getTypeConverter(), vType,
memRefType, align, useVectorAlignment)))
return rewriter.notifyMatchFailure(gather, "could not resolve alignment");

Expand Down Expand Up @@ -354,8 +355,9 @@ class VectorScatterOpConversion
}

// Resolve alignment.
unsigned align;
if (failed(getVectorToLLVMAlignment(*this->getTypeConverter(), vType,
unsigned align = scatter.getAlignment().value_or(0);
if (!align &&
failed(getVectorToLLVMAlignment(*this->getTypeConverter(), vType,
memRefType, align, useVectorAlignment)))
return rewriter.notifyMatchFailure(scatter,
"could not resolve alignment");
Expand Down Expand Up @@ -399,8 +401,14 @@ class VectorExpandLoadOpConversion
Value ptr = getStridedElementPtr(rewriter, loc, memRefType,
adaptor.getBase(), adaptor.getIndices());

// From:
// https://llvm.org/docs/LangRef.html#llvm-masked-expandload-intrinsics
// The pointer alignment defaults to 1.
uint64_t alignment = expand.getAlignment().value_or(1);

rewriter.replaceOpWithNewOp<LLVM::masked_expandload>(
expand, vtype, ptr, adaptor.getMask(), adaptor.getPassThru());
expand, vtype, ptr, adaptor.getMask(), adaptor.getPassThru(),
alignment);
return success();
}
};
Expand All @@ -421,8 +429,13 @@ class VectorCompressStoreOpConversion
Value ptr = getStridedElementPtr(rewriter, loc, memRefType,
adaptor.getBase(), adaptor.getIndices());

// From:
// https://llvm.org/docs/LangRef.html#llvm-masked-compressstore-intrinsics
// The pointer alignment defaults to 1.
uint64_t alignment = compress.getAlignment().value_or(1);

rewriter.replaceOpWithNewOp<LLVM::masked_compressstore>(
compress, adaptor.getValueToStore(), ptr, adaptor.getMask());
compress, adaptor.getValueToStore(), ptr, adaptor.getMask(), alignment);
return success();
}
};
Expand Down
Loading