Skip to content

Commit 1c4d2a5

Browse files
SC llvm teamSC llvm team
authored andcommitted
Merged main:e072cffe3b63 into amd-gfx:c0b9c269979b
Local branch amd-gfx c0b9c26 Merged main:93c2577020fe into amd-gfx:fb65d18eb39e Remote branch main e072cff [RISCV] Add immop_oneuse PatLeaf. Use it to replace some PatLeafs with ImmLeaf/IntImmLeaf. NFC (llvm#120804)
2 parents c0b9c26 + e072cff commit 1c4d2a5

File tree

16 files changed

+99
-117
lines changed

16 files changed

+99
-117
lines changed

clang/include/clang/Sema/SemaConcept.h

Lines changed: 6 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -135,31 +135,20 @@ struct NormalizedConstraint {
135135
return *this;
136136
}
137137

138-
bool isAtomic() const { return Constraint.is<AtomicConstraint *>(); }
138+
bool isAtomic() const { return llvm::isa<AtomicConstraint *>(Constraint); }
139139
bool isFoldExpanded() const {
140-
return Constraint.is<FoldExpandedConstraint *>();
140+
return llvm::isa<FoldExpandedConstraint *>(Constraint);
141141
}
142-
bool isCompound() const { return Constraint.is<CompoundConstraint>(); }
142+
bool isCompound() const { return llvm::isa<CompoundConstraint>(Constraint); }
143143

144-
CompoundConstraintKind getCompoundKind() const {
145-
assert(isCompound() && "getCompoundKind on a non-compound constraint..");
146-
return Constraint.get<CompoundConstraint>().getInt();
147-
}
144+
CompoundConstraintKind getCompoundKind() const;
148145

149146
NormalizedConstraint &getLHS() const;
150147
NormalizedConstraint &getRHS() const;
151148

152-
AtomicConstraint *getAtomicConstraint() const {
153-
assert(isAtomic() &&
154-
"getAtomicConstraint called on non-atomic constraint.");
155-
return Constraint.get<AtomicConstraint *>();
156-
}
149+
AtomicConstraint *getAtomicConstraint() const;
157150

158-
FoldExpandedConstraint *getFoldExpandedConstraint() const {
159-
assert(isFoldExpanded() &&
160-
"getFoldExpandedConstraint called on non-fold-expanded constraint.");
161-
return Constraint.get<FoldExpandedConstraint *>();
162-
}
151+
FoldExpandedConstraint *getFoldExpandedConstraint() const;
163152

164153
private:
165154
static std::optional<NormalizedConstraint>

clang/lib/Sema/SemaConcept.cpp

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1958,3 +1958,21 @@ concepts::TypeRequirement::TypeRequirement(TypeSourceInfo *T) :
19581958
Value(T),
19591959
Status(T->getType()->isInstantiationDependentType() ? SS_Dependent
19601960
: SS_Satisfied) {}
1961+
1962+
NormalizedConstraint::CompoundConstraintKind
1963+
NormalizedConstraint::getCompoundKind() const {
1964+
assert(isCompound() && "getCompoundKind on a non-compound constraint..");
1965+
return cast<CompoundConstraint>(Constraint).getInt();
1966+
}
1967+
1968+
AtomicConstraint *NormalizedConstraint::getAtomicConstraint() const {
1969+
assert(isAtomic() && "getAtomicConstraint called on non-atomic constraint.");
1970+
return cast<AtomicConstraint *>(Constraint);
1971+
}
1972+
1973+
FoldExpandedConstraint *
1974+
NormalizedConstraint::getFoldExpandedConstraint() const {
1975+
assert(isFoldExpanded() &&
1976+
"getFoldExpandedConstraint called on non-fold-expanded constraint.");
1977+
return cast<FoldExpandedConstraint *>(Constraint);
1978+
}

llvm/include/llvm/Config/llvm-config.h.cmake

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616

1717
/* Indicate that this is LLVM compiled from the amd-gfx branch. */
1818
#define LLVM_HAVE_BRANCH_AMD_GFX
19-
#define LLVM_MAIN_REVISION 522209
19+
#define LLVM_MAIN_REVISION 522213
2020

2121
/* Define if LLVM_ENABLE_DUMP is enabled */
2222
#cmakedefine LLVM_ENABLE_DUMP

llvm/lib/Target/RISCV/RISCVInstrInfo.td

Lines changed: 33 additions & 64 deletions
Original file line numberDiff line numberDiff line change
@@ -440,20 +440,10 @@ def GIImmSubFrom32 : GICustomOperandRenderer<"renderImmSubFrom32">,
440440
// in which imm = imm0 + imm1 and both imm0 and imm1 are simm12. We make imm0
441441
// as large as possible and imm1 as small as possible so that we might be able
442442
// to use c.addi for the small immediate.
443-
def AddiPair : PatLeaf<(imm), [{
444-
if (!N->hasOneUse())
445-
return false;
443+
def AddiPair : ImmLeaf<XLenVT, [{
446444
// The immediate operand must be in range [-4096,-2049] or [2048,4094].
447-
int64_t Imm = N->getSExtValue();
448445
return (-4096 <= Imm && Imm <= -2049) || (2048 <= Imm && Imm <= 4094);
449-
}]> {
450-
let GISelPredicateCode = [{
451-
if (!MRI.hasOneNonDBGUse(MI.getOperand(0).getReg()))
452-
return false;
453-
int64_t Imm = MI.getOperand(1).getCImm()->getSExtValue();
454-
return (-4096 <= Imm && Imm <= -2049) || (2048 <= Imm && Imm <= 4094);
455-
}];
456-
}
446+
}]>;
457447

458448
// Return imm - (imm < 0 ? -2048 : 2047).
459449
def AddiPairImmSmall : SDNodeXForm<imm, [{
@@ -492,55 +482,23 @@ def GIXLenSubTrailingOnes : GICustomOperandRenderer<"renderXLenSubTrailingOnes">
492482

493483
// Checks if this mask is a non-empty sequence of ones starting at the
494484
// most/least significant bit with the remainder zero and exceeds simm32/simm12.
495-
def LeadingOnesMask : PatLeaf<(imm), [{
496-
if (!N->hasOneUse())
497-
return false;
498-
return !isInt<32>(N->getSExtValue()) && isMask_64(~N->getSExtValue());
499-
}], TrailingZeros> {
500-
let GISelPredicateCode = [{
501-
if (!MRI.hasOneNonDBGUse(MI.getOperand(0).getReg()))
502-
return false;
503-
const auto &MO = MI.getOperand(1);
504-
return !isInt<32>(MO.getCImm()->getSExtValue()) &&
505-
isMask_64(~MO.getCImm()->getSExtValue());
506-
}];
507-
}
485+
def LeadingOnesMask : ImmLeaf<XLenVT, [{
486+
return !isInt<32>(Imm) && isMask_64(~Imm);
487+
}], TrailingZeros>;
508488

509-
def TrailingOnesMask : PatLeaf<(imm), [{
510-
if (!N->hasOneUse())
511-
return false;
512-
return !isInt<12>(N->getSExtValue()) && isMask_64(N->getZExtValue());
513-
}], XLenSubTrailingOnes> {
514-
let GISelPredicateCode = [{
515-
if (!MRI.hasOneNonDBGUse(MI.getOperand(0).getReg()))
516-
return false;
517-
const auto &MO = MI.getOperand(1);
518-
return !isInt<12>(MO.getCImm()->getSExtValue()) &&
519-
isMask_64(MO.getCImm()->getZExtValue());
520-
}];
521-
}
489+
def TrailingOnesMask : IntImmLeaf<XLenVT, [{
490+
return !isInt<12>(Imm.getSExtValue()) && isMask_64(Imm.getZExtValue());
491+
}], XLenSubTrailingOnes>;
522492

523493
// Similar to LeadingOnesMask, but only consider leading ones in the lower 32
524494
// bits.
525-
def LeadingOnesWMask : PatLeaf<(imm), [{
526-
if (!N->hasOneUse())
527-
return false;
495+
def LeadingOnesWMask : ImmLeaf<XLenVT, [{
528496
// If the value is a uint32 but not an int32, it must have bit 31 set and
529497
// bits 63:32 cleared. After that we're looking for a shifted mask but not
530498
// an all ones mask.
531-
int64_t Imm = N->getSExtValue();
532499
return !isInt<32>(Imm) && isUInt<32>(Imm) && isShiftedMask_64(Imm) &&
533500
Imm != UINT64_C(0xffffffff);
534-
}], TrailingZeros> {
535-
let GISelPredicateCode = [{
536-
if (!MRI.hasOneNonDBGUse(MI.getOperand(0).getReg()))
537-
return false;
538-
const auto &MO = MI.getOperand(1);
539-
int64_t Imm = MO.getCImm()->getSExtValue();
540-
return !isInt<32>(Imm) && isUInt<32>(Imm) && isShiftedMask_64(Imm) &&
541-
Imm != UINT64_C(0xffffffff);
542-
}];
543-
}
501+
}], TrailingZeros>;
544502

545503
//===----------------------------------------------------------------------===//
546504
// Instruction Formats
@@ -1350,6 +1308,14 @@ def 33signbits_node : PatLeaf<(i64 GPR:$src), [{
13501308
return CurDAG->ComputeNumSignBits(SDValue(N, 0)) > 32;
13511309
}]>;
13521310

1311+
class immop_oneuse<ImmLeaf leaf> : PatLeaf<(leaf), [{
1312+
return N->hasOneUse();
1313+
}]> {
1314+
let GISelPredicateCode = [{
1315+
return MRI.hasOneNonDBGUse(MI.getOperand(0).getReg());
1316+
}];
1317+
}
1318+
13531319
/// Simple arithmetic operations
13541320

13551321
def : PatGprGpr<add, ADD>;
@@ -1387,10 +1353,12 @@ def : Pat<(XLenVT (sub 0, (and_oneuse GPR:$rs, 1))),
13871353
(ImmSubFromXLen (XLenVT 1)))>;
13881354

13891355
// AND with leading/trailing ones mask exceeding simm32/simm12.
1390-
def : Pat<(i64 (and GPR:$rs, LeadingOnesMask:$mask)),
1391-
(SLLI (i64 (SRLI $rs, LeadingOnesMask:$mask)), LeadingOnesMask:$mask)>;
1392-
def : Pat<(XLenVT (and GPR:$rs, TrailingOnesMask:$mask)),
1393-
(SRLI (XLenVT (SLLI $rs, TrailingOnesMask:$mask)), TrailingOnesMask:$mask)>;
1356+
def : Pat<(i64 (and GPR:$rs, immop_oneuse<LeadingOnesMask>:$mask)),
1357+
(SLLI (i64 (SRLI $rs, (TrailingZeros imm:$mask))),
1358+
(TrailingZeros imm:$mask))>;
1359+
def : Pat<(XLenVT (and GPR:$rs, immop_oneuse<TrailingOnesMask>:$mask)),
1360+
(SRLI (XLenVT (SLLI $rs, (XLenSubTrailingOnes imm:$mask))),
1361+
(XLenSubTrailingOnes imm:$mask))>;
13941362

13951363
// Match both a plain shift and one where the shift amount is masked (this is
13961364
// typically introduced when the legalizer promotes the shift amount and
@@ -1981,8 +1949,9 @@ def u32simm12 : ImmLeaf<XLenVT, [{
19811949

19821950
let Predicates = [IsRV64] in {
19831951

1984-
def : Pat<(i64 (and GPR:$rs, LeadingOnesWMask:$mask)),
1985-
(SLLI (i64 (SRLIW $rs, LeadingOnesWMask:$mask)), LeadingOnesWMask:$mask)>;
1952+
def : Pat<(i64 (and GPR:$rs, immop_oneuse<LeadingOnesWMask>:$mask)),
1953+
(SLLI (i64 (SRLIW $rs, (TrailingZeros imm:$mask))),
1954+
(TrailingZeros imm:$mask))>;
19861955

19871956
/// sext and zext
19881957

@@ -2081,15 +2050,15 @@ def KCFI_CHECK
20812050
}
20822051

20832052
/// Simple optimization
2084-
def : Pat<(XLenVT (add GPR:$rs1, (AddiPair:$rs2))),
2085-
(ADDI (XLenVT (ADDI GPR:$rs1, (AddiPairImmLarge AddiPair:$rs2))),
2086-
(AddiPairImmSmall GPR:$rs2))>;
2053+
def : Pat<(XLenVT (add GPR:$rs1, immop_oneuse<AddiPair>:$rs2)),
2054+
(ADDI (XLenVT (ADDI GPR:$rs1, (AddiPairImmLarge imm:$rs2))),
2055+
(AddiPairImmSmall imm:$rs2))>;
20872056

20882057
let Predicates = [IsRV64] in {
20892058
// Select W instructions if only the lower 32-bits of the result are used.
2090-
def : Pat<(binop_allwusers<add> GPR:$rs1, (AddiPair:$rs2)),
2091-
(ADDIW (i64 (ADDIW GPR:$rs1, (AddiPairImmLarge AddiPair:$rs2))),
2092-
(AddiPairImmSmall AddiPair:$rs2))>;
2059+
def : Pat<(binop_allwusers<add> GPR:$rs1, immop_oneuse<AddiPair>:$rs2),
2060+
(ADDIW (i64 (ADDIW GPR:$rs1, (AddiPairImmLarge imm:$rs2))),
2061+
(AddiPairImmSmall imm:$rs2))>;
20932062
}
20942063

20952064
//===----------------------------------------------------------------------===//

llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -61,10 +61,16 @@ Type *VPTypeAnalysis::inferScalarTypeForRecipe(const VPInstruction *R) {
6161
case Instruction::ICmp:
6262
case VPInstruction::ActiveLaneMask:
6363
return inferScalarType(R->getOperand(1));
64+
case VPInstruction::ComputeReductionResult: {
65+
auto *PhiR = cast<VPReductionPHIRecipe>(R->getOperand(0));
66+
auto *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue());
67+
return OrigPhi->getType();
68+
}
6469
case VPInstruction::ExplicitVectorLength:
6570
return Type::getIntNTy(Ctx, 32);
6671
case VPInstruction::FirstOrderRecurrenceSplice:
6772
case VPInstruction::Not:
73+
case VPInstruction::ResumePhi:
6874
return SetResultTyFromOp();
6975
case VPInstruction::ExtractFromEnd: {
7076
Type *BaseTy = inferScalarType(R->getOperand(0));

llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -629,7 +629,7 @@ Value *VPInstruction::generate(VPTransformState &State) {
629629
Value *IncomingFromOtherPreds =
630630
State.get(getOperand(1), /* IsScalar */ true);
631631
auto *NewPhi =
632-
Builder.CreatePHI(IncomingFromOtherPreds->getType(), 2, Name);
632+
Builder.CreatePHI(State.TypeAnalysis.inferScalarType(this), 2, Name);
633633
BasicBlock *VPlanPred =
634634
State.CFG
635635
.VPBB2IRBB[cast<VPBasicBlock>(getParent()->getPredecessors()[0])];

mlir/lib/Dialect/Arith/Utils/Utils.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ mlir::inferExpandShapeOutputShape(OpBuilder &b, Location loc,
6666
int64_t inputIndex = it.index();
6767
// Call get<Value>() under the assumption that we're not casting
6868
// dynamism.
69-
Value indexGroupSize = inputShape[inputIndex].get<Value>();
69+
Value indexGroupSize = cast<Value>(inputShape[inputIndex]);
7070
Value indexGroupStaticSizesProduct =
7171
b.create<arith::ConstantIndexOp>(loc, indexGroupStaticSizesProductInt);
7272
Value dynamicDimSize = b.createOrFold<arith::DivUIOp>(

mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -174,7 +174,7 @@ FailureOr<Value> bufferization::allocateTensorForShapedValue(
174174
resultDims[llvm::cast<OpResult>(shapedValue).getResultNumber()];
175175
for (const auto &dim : enumerate(tensorType.getShape()))
176176
if (ShapedType::isDynamic(dim.value()))
177-
dynamicSizes.push_back(shape[dim.index()].get<Value>());
177+
dynamicSizes.push_back(cast<Value>(shape[dim.index()]));
178178
}
179179
}
180180

mlir/lib/Dialect/DLTI/DLTI.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -312,7 +312,7 @@ combineOneSpec(DataLayoutSpecInterface spec,
312312
continue;
313313
}
314314

315-
Type typeSample = kvp.second.front().getKey().get<Type>();
315+
Type typeSample = cast<Type>(kvp.second.front().getKey());
316316
assert(&typeSample.getDialect() !=
317317
typeSample.getContext()->getLoadedDialect<BuiltinDialect>() &&
318318
"unexpected data layout entry for built-in type");
@@ -325,7 +325,7 @@ combineOneSpec(DataLayoutSpecInterface spec,
325325
}
326326

327327
for (const auto &kvp : newEntriesForID) {
328-
StringAttr id = kvp.second.getKey().get<StringAttr>();
328+
StringAttr id = cast<StringAttr>(kvp.second.getKey());
329329
Dialect *dialect = id.getReferencedDialect();
330330
if (!entriesForID.count(id)) {
331331
entriesForID[id] = kvp.second;
@@ -574,7 +574,7 @@ class TargetDataLayoutInterface : public DataLayoutDialectInterface {
574574

575575
LogicalResult verifyEntry(DataLayoutEntryInterface entry,
576576
Location loc) const final {
577-
StringRef entryName = entry.getKey().get<StringAttr>().strref();
577+
StringRef entryName = cast<StringAttr>(entry.getKey()).strref();
578578
if (entryName == DLTIDialect::kDataLayoutEndiannessKey) {
579579
auto value = dyn_cast<StringAttr>(entry.getValue());
580580
if (value &&

mlir/lib/Dialect/GPU/TransformOps/Utils.cpp

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -113,16 +113,17 @@ static GpuIdBuilderFnType commonLinearIdBuilderFn(int64_t multiplicity = 1) {
113113
// clang-format on
114114

115115
// Return n-D ids for indexing and 1-D size + id for predicate generation.
116-
return IdBuilderResult{
117-
/*mappingIdOps=*/ids,
118-
/*availableMappingSizes=*/
119-
SmallVector<int64_t>{computeProduct(originalBasis)},
120-
// `forallMappingSizes` iterate in the scaled basis, they need to be
121-
// scaled back into the original basis to provide tight
122-
// activeMappingSizes quantities for predication.
123-
/*activeMappingSizes=*/
124-
SmallVector<int64_t>{computeProduct(forallMappingSizes) * multiplicity},
125-
/*activeIdOps=*/SmallVector<Value>{linearId.get<Value>()}};
116+
return IdBuilderResult{
117+
/*mappingIdOps=*/ids,
118+
/*availableMappingSizes=*/
119+
SmallVector<int64_t>{computeProduct(originalBasis)},
120+
// `forallMappingSizes` iterate in the scaled basis, they need to be
121+
// scaled back into the original basis to provide tight
122+
// activeMappingSizes quantities for predication.
123+
/*activeMappingSizes=*/
124+
SmallVector<int64_t>{computeProduct(forallMappingSizes) *
125+
multiplicity},
126+
/*activeIdOps=*/SmallVector<Value>{cast<Value>(linearId)}};
126127
};
127128

128129
return res;
@@ -144,9 +145,8 @@ static GpuIdBuilderFnType common3DIdBuilderFn(int64_t multiplicity = 1) {
144145
// In the 3-D mapping case, scale the first dimension by the multiplicity.
145146
SmallVector<Value> scaledIds = ids;
146147
AffineExpr d0 = getAffineDimExpr(0, rewriter.getContext());
147-
scaledIds[0] = affine::makeComposedFoldedAffineApply(
148-
rewriter, loc, d0.floorDiv(multiplicity), {scaledIds[0]})
149-
.get<Value>();
148+
scaledIds[0] = cast<Value>(affine::makeComposedFoldedAffineApply(
149+
rewriter, loc, d0.floorDiv(multiplicity), {scaledIds[0]}));
150150
// In the 3-D mapping case, unscale the first dimension by the multiplicity.
151151
SmallVector<int64_t> forallMappingSizeInOriginalBasis(forallMappingSizes);
152152
forallMappingSizeInOriginalBasis[0] *= multiplicity;

0 commit comments

Comments
 (0)