Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 18 additions & 9 deletions llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -475,14 +475,15 @@ void AMDGPUDAGToDAGISel::SelectBuildVector(SDNode *N, unsigned RegClassID) {
EVT EltVT = VT.getVectorElementType();
SDLoc DL(N);
SDValue RegClass = CurDAG->getTargetConstant(RegClassID, DL, MVT::i32);
unsigned NumRegs = EltVT.getSizeInBits() / 32;
bool IsGCN = TM.getTargetTriple().isAMDGCN();

if (NumVectorElts == 1) {
CurDAG->SelectNodeTo(N, AMDGPU::COPY_TO_REGCLASS, EltVT, N->getOperand(0),
RegClass);
return;
}

bool IsGCN = CurDAG->getSubtarget().getTargetTriple().isAMDGCN();
if (IsGCN && Subtarget->has64BitLiterals() && VT.getSizeInBits() == 64 &&
CurDAG->isConstantValueOfAnyType(SDValue(N, 0))) {
uint64_t C = 0;
Expand Down Expand Up @@ -511,8 +512,10 @@ void AMDGPUDAGToDAGISel::SelectBuildVector(SDNode *N, unsigned RegClassID) {
}
}

assert(NumVectorElts <= 32 && "Vectors with more than 32 elements not "
"supported yet");
assert(NumVectorElts <= 32 &&
"Vectors with more than 32 elements are not supported yet");
assert((IsGCN || (!IsGCN && NumRegs == 1)) &&
"R600 does not support 64-bit reg_seq elements");
// 32 = Max Num Vector Elements
// 2 = 2 REG_SEQUENCE operands per element (value, subreg index)
// 1 = Vector Register Class
Expand All @@ -527,8 +530,9 @@ void AMDGPUDAGToDAGISel::SelectBuildVector(SDNode *N, unsigned RegClassID) {
IsRegSeq = false;
break;
}
unsigned Sub = IsGCN ? SIRegisterInfo::getSubRegFromChannel(i)
: R600RegisterInfo::getSubRegFromChannel(i);
unsigned Sub =
IsGCN ? SIRegisterInfo::getSubRegFromChannel(i * NumRegs, NumRegs)
: R600RegisterInfo::getSubRegFromChannel(i);
RegSeqArgs[1 + (2 * i)] = N->getOperand(i);
RegSeqArgs[1 + (2 * i) + 1] = CurDAG->getTargetConstant(Sub, DL, MVT::i32);
}
Expand All @@ -538,8 +542,9 @@ void AMDGPUDAGToDAGISel::SelectBuildVector(SDNode *N, unsigned RegClassID) {
MachineSDNode *ImpDef = CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
DL, EltVT);
for (unsigned i = NOps; i < NumVectorElts; ++i) {
unsigned Sub = IsGCN ? SIRegisterInfo::getSubRegFromChannel(i)
: R600RegisterInfo::getSubRegFromChannel(i);
unsigned Sub =
IsGCN ? SIRegisterInfo::getSubRegFromChannel(i * NumRegs, NumRegs)
: R600RegisterInfo::getSubRegFromChannel(i);
RegSeqArgs[1 + (2 * i)] = SDValue(ImpDef, 0);
RegSeqArgs[1 + (2 * i) + 1] =
CurDAG->getTargetConstant(Sub, DL, MVT::i32);
Expand Down Expand Up @@ -707,9 +712,13 @@ void AMDGPUDAGToDAGISel::Select(SDNode *N) {
break;
}

assert(VT.getVectorElementType().bitsEq(MVT::i32));
EVT VET = VT.getVectorElementType();
assert((VET.bitsEq(MVT::i32) || VET.bitsEq(MVT::i64)) &&
"Only 32-bit and 64-bit vector elements supported");
unsigned EltSize = VET.getSizeInBits();
unsigned RegClassID =
SIRegisterInfo::getSGPRClassForBitWidth(NumVectorElts * 32)->getID();
SIRegisterInfo::getSGPRClassForBitWidth(NumVectorElts * EltSize)
->getID();
SelectBuildVector(N, RegClassID);
return;
}
Expand Down
160 changes: 145 additions & 15 deletions llvm/lib/Target/AMDGPU/SIISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -360,9 +360,10 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
// Most operations are naturally 32-bit vector operations. We only support
// load and store of i64 vectors, so promote v2i64 vector operations to v4i32.
for (MVT Vec64 : {MVT::v2i64, MVT::v2f64}) {
setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote);
AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v4i32);

if (!STI.hasMovB64()) {
setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote);
AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v4i32);
}
setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote);
AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v4i32);

Expand All @@ -374,9 +375,10 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
}

for (MVT Vec64 : {MVT::v3i64, MVT::v3f64}) {
setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote);
AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v6i32);

if (!STI.hasMovB64()) {
setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote);
AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v6i32);
}
setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote);
AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v6i32);

Expand All @@ -388,9 +390,10 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
}

for (MVT Vec64 : {MVT::v4i64, MVT::v4f64}) {
setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote);
AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v8i32);

if (!STI.hasMovB64()) {
setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote);
AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v8i32);
}
setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote);
AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v8i32);

Expand All @@ -402,9 +405,10 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
}

for (MVT Vec64 : {MVT::v8i64, MVT::v8f64}) {
setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote);
AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v16i32);

if (!STI.hasMovB64()) {
setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote);
AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v16i32);
}
setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote);
AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v16i32);

Expand All @@ -416,9 +420,10 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
}

for (MVT Vec64 : {MVT::v16i64, MVT::v16f64}) {
setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote);
AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v32i32);

if (!STI.hasMovB64()) {
setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote);
AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v32i32);
}
setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote);
AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v32i32);

Expand Down Expand Up @@ -977,6 +982,7 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,

setTargetDAGCombine({ISD::ADD,
ISD::PTRADD,
ISD::BUILD_VECTOR,
ISD::UADDO_CARRY,
ISD::SUB,
ISD::USUBO_CARRY,
Expand Down Expand Up @@ -15103,6 +15109,18 @@ bool SITargetLowering::shouldExpandVectorDynExt(SDNode *N) const {
EltSize, NumElem, Idx->isDivergent(), getSubtarget());
}

static unsigned getMappedVectorIndex(unsigned Idx, EVT From, EVT To) {
assert(From.isVector() && To.isVector() &&
"Expected From and To types to be vector types.");
assert(From.getSizeInBits() == To.getSizeInBits() &&
"Expected From and To vector types require to have the same size.");

unsigned FromNumElts = From.getVectorNumElements();
unsigned ToNumElts = To.getVectorNumElements();

return (Idx * ToNumElts) / FromNumElts;
}

SDValue
SITargetLowering::performExtractVectorEltCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
Expand Down Expand Up @@ -15186,6 +15204,27 @@ SITargetLowering::performExtractVectorEltCombine(SDNode *N,
}
}

// if PeekThoughBitcast(Vec)[MapIdx(CIdx)] == undef &&
// VecEltSize < PeekThroughEltSize, then
// EXTRACT_VECTOR_ELT(bitcast(build_vector(..., undef, ...)), CIdx) => undef
auto *IndexC = dyn_cast<ConstantSDNode>(N->getOperand(1));
SDValue PeekThroughVec = peekThroughBitcasts(Vec);
EVT PeekThroughVecVT = PeekThroughVec.getValueType();
if (IndexC && PeekThroughVec.getOpcode() == ISD::BUILD_VECTOR &&
PeekThroughVecVT.isFixedLengthVector()) {
EVT PeekThroughVecEltVT = PeekThroughVecVT.getVectorElementType();
// Small elt size vectors to big elt size vectors are the cases covered for
// now (e.g., v4i32 bitcast(v2i64)) which may be conservative.
if (VecEltSize < PeekThroughVecEltVT.getSizeInBits()) {
unsigned IndexVal = IndexC->getZExtValue();
unsigned MappedIndexVal =
getMappedVectorIndex(IndexVal, VecVT, PeekThroughVecVT);
SDValue PeekThroughElt = PeekThroughVec.getOperand(MappedIndexVal);
if (PeekThroughElt.isUndef())
return DAG.getNode(PeekThroughElt.getOpcode(), SDLoc(), VecEltVT);
}
}

// EXTRACT_VECTOR_ELT (<n x e>, var-idx) => n x select (e, const-idx)
if (shouldExpandVectorDynExt(N)) {
SDLoc SL(N);
Expand Down Expand Up @@ -16792,6 +16831,95 @@ SDValue SITargetLowering::performSelectCombine(SDNode *N,
SelectLHS, SelectRHS);
}

SDValue
SITargetLowering::performBuildVectorCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
// TODO: legalize for all targets instead of just v_mov_b64 enabled ones,
// legalizing could still enable s_mov_b64 which is supported on all targets.
const GCNSubtarget *ST = getSubtarget();
if (DCI.Level < AfterLegalizeDAG || !ST->hasMovB64())
return SDValue();

SelectionDAG &DAG = DCI.DAG;
SDLoc SL(N);

EVT VT = N->getValueType(0);
EVT EltVT = VT.getVectorElementType();
unsigned SizeBits = VT.getSizeInBits();
unsigned EltSize = EltVT.getSizeInBits();

// Skip if:
// - Value type isn't multiple of 64 bit (e.g., v3i32), or
// - Element type has already been combined into 64b elements
if ((SizeBits % 64) != 0 || EltVT == MVT::i64 || EltVT == MVT::f64)
return SDValue();

// Construct the 64b values.
SmallVector<uint64_t, 8> ImmVals;
uint64_t ImmVal = 0;
uint64_t ImmSize = 0;
for (SDValue Opand : N->ops()) {
// Build_vector with constants only.
ConstantSDNode *C = dyn_cast<ConstantSDNode>(Opand);
ConstantFPSDNode *FPC = dyn_cast<ConstantFPSDNode>(Opand);
BuildVectorSDNode *BV =
dyn_cast<BuildVectorSDNode>(peekThroughBitcasts(Opand));

if (!C && !FPC && !BV)
return SDValue();

uint64_t Val = 0;
if (BV) {
if (!BV->isConstant())
return SDValue();
bool IsLE = DAG.getDataLayout().isLittleEndian();
BitVector UndefElements;
SmallVector<APInt> RawBits;
if (!BV->getConstantRawBits(IsLE, EltSize, RawBits, UndefElements))
return SDValue();

assert(RawBits.size() == 1 &&
"BuildVector constant value retrieval expected 1 element");

if (UndefElements.any())
return SDValue();

Val = RawBits[0].getZExtValue();
} else {
Val = C ? C->getZExtValue()
: FPC->getValueAPF().bitcastToAPInt().getZExtValue();
}
ImmVal |= Val << ImmSize;
ImmSize += EltSize;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't understand what ImmSize is for. All the sizes are exactly computable from the type and number of operands, you shouldn't need to sum anything?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'd still have to keep track of some idx to know how to shift the immediate value into ImmVal or know when we've reached a finished ImmVal, I just though I could combine iteration and element size to more conveniently deal with the shift value and end-of-64b ImmVal.
For now I'll leave it as ImmSize, but if you'd prefer the iteration idx + compute using vector type, let me know

if (ImmSize == 64) {
if (!isUInt<32>(ImmVal))
return SDValue();
ImmVals.push_back(ImmVal);
ImmVal = 0;
ImmSize = 0;
}
}

// Avoid emitting build_vector with 1 element and directly emit value.
if (ImmVals.size() == 1) {
SDValue Val = DAG.getConstant(ImmVals[0], SL, MVT::i64);
return DAG.getBitcast(VT, Val);
}

// Construct and return build_vector with 64b elements.
if (!ImmVals.empty()) {
SmallVector<SDValue, 8> VectorConsts(ImmVals.size());
for (unsigned i = 0; i < ImmVals.size(); ++i)
VectorConsts[i] = DAG.getConstant(ImmVals[i], SL, MVT::i64);
unsigned NewNumElts = SizeBits / 64;
LLVMContext &Ctx = *DAG.getContext();
EVT NewVT = EVT::getVectorVT(Ctx, MVT::i64, NewNumElts);
SDValue BV = DAG.getBuildVector(NewVT, SL, VectorConsts);
return DAG.getBitcast(VT, BV);
}
return SDValue();
}

SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
switch (N->getOpcode()) {
Expand Down Expand Up @@ -16885,6 +17013,8 @@ SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
return performFCanonicalizeCombine(N, DCI);
case AMDGPUISD::RCP:
return performRcpCombine(N, DCI);
case ISD::BUILD_VECTOR:
return performBuildVectorCombine(N, DCI);
case ISD::FLDEXP:
case AMDGPUISD::FRACT:
case AMDGPUISD::RSQ:
Expand Down
1 change: 1 addition & 0 deletions llvm/lib/Target/AMDGPU/SIISelLowering.h
Original file line number Diff line number Diff line change
Expand Up @@ -245,6 +245,7 @@ class SITargetLowering final : public AMDGPUTargetLowering {
SDValue performCvtF32UByteNCombine(SDNode *N, DAGCombinerInfo &DCI) const;
SDValue performClampCombine(SDNode *N, DAGCombinerInfo &DCI) const;
SDValue performRcpCombine(SDNode *N, DAGCombinerInfo &DCI) const;
SDValue performBuildVectorCombine(SDNode *N, DAGCombinerInfo &DCI) const;

bool isLegalMUBUFAddressingMode(const AddrMode &AM) const;

Expand Down
32 changes: 16 additions & 16 deletions llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll
Original file line number Diff line number Diff line change
Expand Up @@ -10257,48 +10257,48 @@ define void @flat_atomic_fsub_f64_ret_av_av(ptr %ptr) #0 {
; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX950-NEXT: s_mov_b64 s[2:3], 0x50
; GFX950-NEXT: s_mov_b64 s[0:1], src_private_base
; GFX950-NEXT: v_lshl_add_u64 v[0:1], v[0:1], 0, s[2:3]
; GFX950-NEXT: v_cmp_ne_u32_e32 vcc, s1, v1
; GFX950-NEXT: v_lshl_add_u64 v[2:3], v[0:1], 0, s[2:3]
; GFX950-NEXT: v_cmp_ne_u32_e32 vcc, s1, v3
; GFX950-NEXT: ;;#ASMSTART
; GFX950-NEXT: ; def v[2:3]
; GFX950-NEXT: ; def v[4:5]
; GFX950-NEXT: ;;#ASMEND
; GFX950-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX950-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX950-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX950-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
; GFX950-NEXT: s_cbranch_execz .LBB130_4
; GFX950-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX950-NEXT: flat_load_dwordx2 v[4:5], v[0:1]
; GFX950-NEXT: flat_load_dwordx2 v[0:1], v[2:3]
; GFX950-NEXT: s_mov_b64 s[2:3], 0
; GFX950-NEXT: .LBB130_2: ; %atomicrmw.start
; GFX950-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX950-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX950-NEXT: v_mov_b64_e32 v[6:7], v[4:5]
; GFX950-NEXT: v_add_f64 v[4:5], v[6:7], -v[2:3]
; GFX950-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] sc0
; GFX950-NEXT: v_mov_b64_e32 v[8:9], v[0:1]
; GFX950-NEXT: v_add_f64 v[6:7], v[8:9], -v[4:5]
; GFX950-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[2:3], v[6:9] sc0
; GFX950-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
; GFX950-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
; GFX950-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX950-NEXT: s_cbranch_execnz .LBB130_2
; GFX950-NEXT: ; %bb.3: ; %Flow
; GFX950-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX950-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX950-NEXT: ; implicit-def: $vgpr2_vgpr3
; GFX950-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX950-NEXT: .LBB130_4: ; %Flow3
; GFX950-NEXT: s_andn2_saveexec_b64 s[0:1], s[0:1]
; GFX950-NEXT: s_cbranch_execz .LBB130_6
; GFX950-NEXT: ; %bb.5: ; %atomicrmw.private
; GFX950-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
; GFX950-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[2:3]
; GFX950-NEXT: s_nop 1
; GFX950-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc
; GFX950-NEXT: scratch_load_dwordx2 v[4:5], v6, off
; GFX950-NEXT: v_cndmask_b32_e32 v6, -1, v2, vcc
; GFX950-NEXT: scratch_load_dwordx2 v[0:1], v6, off
; GFX950-NEXT: s_waitcnt vmcnt(0)
; GFX950-NEXT: v_add_f64 v[0:1], v[4:5], -v[2:3]
; GFX950-NEXT: scratch_store_dwordx2 v6, v[0:1], off
; GFX950-NEXT: v_add_f64 v[2:3], v[0:1], -v[4:5]
; GFX950-NEXT: scratch_store_dwordx2 v6, v[2:3], off
; GFX950-NEXT: .LBB130_6: ; %atomicrmw.phi
; GFX950-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX950-NEXT: ;;#ASMSTART
; GFX950-NEXT: ; use v[4:5]
; GFX950-NEXT: ; use v[0:1]
; GFX950-NEXT: ;;#ASMEND
; GFX950-NEXT: s_waitcnt vmcnt(0)
; GFX950-NEXT: s_setpc_b64 s[30:31]
Expand Down
Loading