diff --git a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp index be38b22f70c58..f90cd4641fc91 100644 --- a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp @@ -261,12 +261,13 @@ class SPIRVInstructionSelector : public InstructionSelector { bool selectUnmergeValues(MachineInstr &I) const; - void selectHandleFromBinding(Register &ResVReg, const SPIRVType *ResType, + bool selectHandleFromBinding(Register &ResVReg, const SPIRVType *ResType, MachineInstr &I) const; // Utilities - Register buildI32Constant(uint32_t Val, MachineInstr &I, - const SPIRVType *ResType = nullptr) const; + std::pair + buildI32Constant(uint32_t Val, MachineInstr &I, + const SPIRVType *ResType = nullptr) const; Register buildZerosVal(const SPIRVType *ResType, MachineInstr &I) const; Register buildZerosValF(const SPIRVType *ResType, MachineInstr &I) const; @@ -1018,6 +1019,7 @@ bool SPIRVInstructionSelector::selectMemOperation(Register ResVReg, MachineInstr &I) const { MachineBasicBlock &BB = *I.getParent(); Register SrcReg = I.getOperand(1).getReg(); + bool Result = true; if (I.getOpcode() == TargetOpcode::G_MEMSET) { assert(I.getOperand(1).isReg() && I.getOperand(2).isReg()); unsigned Val = getIConstVal(I.getOperand(1).getReg(), MRI); @@ -1038,12 +1040,13 @@ bool SPIRVInstructionSelector::selectMemOperation(Register ResVReg, Register VarReg = MRI->createGenericVirtualRegister(LLT::scalar(64)); GR.add(GV, GR.CurMF, VarReg); - BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable)) - .addDef(VarReg) - .addUse(GR.getSPIRVTypeID(VarTy)) - .addImm(SPIRV::StorageClass::UniformConstant) - .addUse(Const) - .constrainAllUses(TII, TRI, RBI); + Result &= + BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable)) + .addDef(VarReg) + .addUse(GR.getSPIRVTypeID(VarTy)) + .addImm(SPIRV::StorageClass::UniformConstant) + .addUse(Const) + .constrainAllUses(TII, TRI, RBI); buildOpDecorate(VarReg, I, TII, SPIRV::Decoration::Constant, {}); SPIRVType *SourceTy = GR.getOrCreateSPIRVPointerType( ValTy, I, TII, SPIRV::StorageClass::UniformConstant); @@ -1056,10 +1059,12 @@ bool SPIRVInstructionSelector::selectMemOperation(Register ResVReg, .addUse(I.getOperand(2).getReg()); if (I.getNumMemOperands()) addMemoryOperands(*I.memoperands_begin(), MIB); - bool Result = MIB.constrainAllUses(TII, TRI, RBI); + Result &= MIB.constrainAllUses(TII, TRI, RBI); if (ResVReg.isValid() && ResVReg != MIB->getOperand(0).getReg()) - BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY), ResVReg) - .addUse(MIB->getOperand(0).getReg()); + Result &= + BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY), ResVReg) + .addUse(MIB->getOperand(0).getReg()) + .constrainAllUses(TII, TRI, RBI); return Result; } @@ -1068,11 +1073,14 @@ bool SPIRVInstructionSelector::selectAtomicRMW(Register ResVReg, MachineInstr &I, unsigned NewOpcode, unsigned NegateOpcode) const { + bool Result = true; assert(I.hasOneMemOperand()); const MachineMemOperand *MemOp = *I.memoperands_begin(); uint32_t Scope = static_cast(getMemScope( GR.CurMF->getFunction().getContext(), MemOp->getSyncScopeID())); - Register ScopeReg = buildI32Constant(Scope, I); + auto ScopeConstant = buildI32Constant(Scope, I); + Register ScopeReg = ScopeConstant.first; + Result &= ScopeConstant.second; Register Ptr = I.getOperand(1).getReg(); // TODO: Changed as it's implemented in the translator. See test/atomicrmw.ll @@ -1080,26 +1088,27 @@ bool SPIRVInstructionSelector::selectAtomicRMW(Register ResVReg, // getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr)); AtomicOrdering AO = MemOp->getSuccessOrdering(); uint32_t MemSem = static_cast(getMemSemantics(AO)); - Register MemSemReg = buildI32Constant(MemSem /*| ScSem*/, I); + auto MemSemConstant = buildI32Constant(MemSem /*| ScSem*/, I); + Register MemSemReg = MemSemConstant.first; + Result &= MemSemConstant.second; - bool Result = false; Register ValueReg = I.getOperand(2).getReg(); if (NegateOpcode != 0) { // Translation with negative value operand is requested Register TmpReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass); - Result |= selectUnOpWithSrc(TmpReg, ResType, I, ValueReg, NegateOpcode); + Result &= selectUnOpWithSrc(TmpReg, ResType, I, ValueReg, NegateOpcode); ValueReg = TmpReg; } - Result |= BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(NewOpcode)) - .addDef(ResVReg) - .addUse(GR.getSPIRVTypeID(ResType)) - .addUse(Ptr) - .addUse(ScopeReg) - .addUse(MemSemReg) - .addUse(ValueReg) - .constrainAllUses(TII, TRI, RBI); - return Result; + return Result && + BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(NewOpcode)) + .addDef(ResVReg) + .addUse(GR.getSPIRVTypeID(ResType)) + .addUse(Ptr) + .addUse(ScopeReg) + .addUse(MemSemReg) + .addUse(ValueReg) + .constrainAllUses(TII, TRI, RBI); } bool SPIRVInstructionSelector::selectUnmergeValues(MachineInstr &I) const { @@ -1140,16 +1149,21 @@ bool SPIRVInstructionSelector::selectUnmergeValues(MachineInstr &I) const { bool SPIRVInstructionSelector::selectFence(MachineInstr &I) const { AtomicOrdering AO = AtomicOrdering(I.getOperand(0).getImm()); uint32_t MemSem = static_cast(getMemSemantics(AO)); - Register MemSemReg = buildI32Constant(MemSem, I); + auto MemSemConstant = buildI32Constant(MemSem, I); + Register MemSemReg = MemSemConstant.first; + bool Result = MemSemConstant.second; SyncScope::ID Ord = SyncScope::ID(I.getOperand(1).getImm()); uint32_t Scope = static_cast( getMemScope(GR.CurMF->getFunction().getContext(), Ord)); - Register ScopeReg = buildI32Constant(Scope, I); + auto ScopeConstant = buildI32Constant(Scope, I); + Register ScopeReg = ScopeConstant.first; + Result &= ScopeConstant.second; MachineBasicBlock &BB = *I.getParent(); - return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpMemoryBarrier)) - .addUse(ScopeReg) - .addUse(MemSemReg) - .constrainAllUses(TII, TRI, RBI); + return Result && + BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpMemoryBarrier)) + .addUse(ScopeReg) + .addUse(MemSemReg) + .constrainAllUses(TII, TRI, RBI); } bool SPIRVInstructionSelector::selectOverflowArith(Register ResVReg, @@ -1193,7 +1207,7 @@ bool SPIRVInstructionSelector::selectOverflowArith(Register ResVReg, .addUse(GR.getSPIRVTypeID(StructType)); for (unsigned i = I.getNumDefs(); i < I.getNumOperands(); ++i) MIB.addUse(I.getOperand(i).getReg()); - bool Status = MIB.constrainAllUses(TII, TRI, RBI); + bool Result = MIB.constrainAllUses(TII, TRI, RBI); // Build instructions to extract fields of the instruction's result. // A new virtual register to store the higher part of the result struct. Register HigherVReg = MRI->createGenericVirtualRegister(LLT::scalar(64)); @@ -1205,21 +1219,21 @@ bool SPIRVInstructionSelector::selectOverflowArith(Register ResVReg, .addUse(GR.getSPIRVTypeID(ResType)) .addUse(StructVReg) .addImm(i); - Status &= MIB.constrainAllUses(TII, TRI, RBI); + Result &= MIB.constrainAllUses(TII, TRI, RBI); } // Build boolean value from the higher part. - Status &= BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpINotEqual)) - .addDef(I.getOperand(1).getReg()) - .addUse(BoolTypeReg) - .addUse(HigherVReg) - .addUse(ZeroReg) - .constrainAllUses(TII, TRI, RBI); - return Status; + return Result && BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpINotEqual)) + .addDef(I.getOperand(1).getReg()) + .addUse(BoolTypeReg) + .addUse(HigherVReg) + .addUse(ZeroReg) + .constrainAllUses(TII, TRI, RBI); } bool SPIRVInstructionSelector::selectAtomicCmpXchg(Register ResVReg, const SPIRVType *ResType, MachineInstr &I) const { + bool Result = true; Register ScopeReg; Register MemSemEqReg; Register MemSemNeqReg; @@ -1229,17 +1243,26 @@ bool SPIRVInstructionSelector::selectAtomicCmpXchg(Register ResVReg, const MachineMemOperand *MemOp = *I.memoperands_begin(); unsigned Scope = static_cast(getMemScope( GR.CurMF->getFunction().getContext(), MemOp->getSyncScopeID())); - ScopeReg = buildI32Constant(Scope, I); + auto ScopeConstant = buildI32Constant(Scope, I); + ScopeReg = ScopeConstant.first; + Result &= ScopeConstant.second; unsigned ScSem = static_cast( getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr))); AtomicOrdering AO = MemOp->getSuccessOrdering(); unsigned MemSemEq = static_cast(getMemSemantics(AO)) | ScSem; - MemSemEqReg = buildI32Constant(MemSemEq, I); + auto MemSemEqConstant = buildI32Constant(MemSemEq, I); + MemSemEqReg = MemSemEqConstant.first; + Result &= MemSemEqConstant.second; AtomicOrdering FO = MemOp->getFailureOrdering(); unsigned MemSemNeq = static_cast(getMemSemantics(FO)) | ScSem; - MemSemNeqReg = - MemSemEq == MemSemNeq ? MemSemEqReg : buildI32Constant(MemSemNeq, I); + if (MemSemEq == MemSemNeq) + MemSemNeqReg = MemSemEqReg; + else { + auto MemSemNeqConstant = buildI32Constant(MemSemEq, I); + MemSemNeqReg = MemSemNeqConstant.first; + Result &= MemSemNeqConstant.second; + } } else { ScopeReg = I.getOperand(5).getReg(); MemSemEqReg = I.getOperand(6).getReg(); @@ -1251,7 +1274,7 @@ bool SPIRVInstructionSelector::selectAtomicCmpXchg(Register ResVReg, SPIRVType *SpvValTy = GR.getSPIRVTypeForVReg(Val); Register ACmpRes = MRI->createVirtualRegister(&SPIRV::iIDRegClass); const DebugLoc &DL = I.getDebugLoc(); - bool Result = + Result &= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpAtomicCompareExchange)) .addDef(ACmpRes) .addUse(GR.getSPIRVTypeID(SpvValTy)) @@ -1264,28 +1287,28 @@ bool SPIRVInstructionSelector::selectAtomicCmpXchg(Register ResVReg, .constrainAllUses(TII, TRI, RBI); Register CmpSuccReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass); SPIRVType *BoolTy = GR.getOrCreateSPIRVBoolType(I, TII); - Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpIEqual)) + Result &= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpIEqual)) .addDef(CmpSuccReg) .addUse(GR.getSPIRVTypeID(BoolTy)) .addUse(ACmpRes) .addUse(Cmp) .constrainAllUses(TII, TRI, RBI); Register TmpReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass); - Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert)) + Result &= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert)) .addDef(TmpReg) .addUse(GR.getSPIRVTypeID(ResType)) .addUse(ACmpRes) .addUse(GR.getOrCreateUndef(I, ResType, TII)) .addImm(0) .constrainAllUses(TII, TRI, RBI); - Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert)) - .addDef(ResVReg) - .addUse(GR.getSPIRVTypeID(ResType)) - .addUse(CmpSuccReg) - .addUse(TmpReg) - .addImm(1) - .constrainAllUses(TII, TRI, RBI); - return Result; + return Result && + BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert)) + .addDef(ResVReg) + .addUse(GR.getSPIRVTypeID(ResType)) + .addUse(CmpSuccReg) + .addUse(TmpReg) + .addImm(1) + .constrainAllUses(TII, TRI, RBI); } static bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC) { @@ -1435,16 +1458,16 @@ bool SPIRVInstructionSelector::selectAddrSpaceCast(Register ResVReg, Register Tmp = MRI->createVirtualRegister(&SPIRV::iIDRegClass); SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType( GR.getPointeeType(SrcPtrTy), I, TII, SPIRV::StorageClass::Generic); - bool Success = BuildMI(BB, I, DL, TII.get(SPIRV::OpPtrCastToGeneric)) - .addDef(Tmp) - .addUse(GR.getSPIRVTypeID(GenericPtrTy)) - .addUse(SrcPtr) - .constrainAllUses(TII, TRI, RBI); - return Success && BuildMI(BB, I, DL, TII.get(SPIRV::OpGenericCastToPtr)) - .addDef(ResVReg) - .addUse(GR.getSPIRVTypeID(ResType)) - .addUse(Tmp) - .constrainAllUses(TII, TRI, RBI); + bool Result = BuildMI(BB, I, DL, TII.get(SPIRV::OpPtrCastToGeneric)) + .addDef(Tmp) + .addUse(GR.getSPIRVTypeID(GenericPtrTy)) + .addUse(SrcPtr) + .constrainAllUses(TII, TRI, RBI); + return Result && BuildMI(BB, I, DL, TII.get(SPIRV::OpGenericCastToPtr)) + .addDef(ResVReg) + .addUse(GR.getSPIRVTypeID(ResType)) + .addUse(Tmp) + .constrainAllUses(TII, TRI, RBI); } // Check if instructions from the SPV_INTEL_usm_storage_classes extension may @@ -1618,26 +1641,27 @@ bool SPIRVInstructionSelector::selectAnyOrAll(Register ResVReg, SpvBoolTy = GR.getOrCreateSPIRVVectorType(SpvBoolTy, NumElts, I, TII); } + bool Result = true; if (!IsBoolTy) { Register ConstZeroReg = IsFloatTy ? buildZerosValF(InputType, I) : buildZerosVal(InputType, I); - BuildMI(BB, I, I.getDebugLoc(), TII.get(SpirvNotEqualId)) - .addDef(NotEqualReg) - .addUse(GR.getSPIRVTypeID(SpvBoolTy)) - .addUse(InputRegister) - .addUse(ConstZeroReg) - .constrainAllUses(TII, TRI, RBI); + Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(SpirvNotEqualId)) + .addDef(NotEqualReg) + .addUse(GR.getSPIRVTypeID(SpvBoolTy)) + .addUse(InputRegister) + .addUse(ConstZeroReg) + .constrainAllUses(TII, TRI, RBI); } if (!IsVectorTy) - return true; + return Result; - return BuildMI(BB, I, I.getDebugLoc(), TII.get(OpAnyOrAll)) - .addDef(ResVReg) - .addUse(GR.getSPIRVTypeID(SpvBoolScalarTy)) - .addUse(NotEqualReg) - .constrainAllUses(TII, TRI, RBI); + return Result && BuildMI(BB, I, I.getDebugLoc(), TII.get(OpAnyOrAll)) + .addDef(ResVReg) + .addUse(GR.getSPIRVTypeID(SpvBoolScalarTy)) + .addUse(NotEqualReg) + .constrainAllUses(TII, TRI, RBI); } bool SPIRVInstructionSelector::selectAll(Register ResVReg, @@ -1762,14 +1786,12 @@ bool SPIRVInstructionSelector::selectDot4AddPacked(Register ResVReg, .addUse(I.getOperand(3).getReg()) .constrainAllUses(TII, TRI, RBI); - Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpIAddS)) - .addDef(ResVReg) - .addUse(GR.getSPIRVTypeID(ResType)) - .addUse(Dot) - .addUse(I.getOperand(4).getReg()) - .constrainAllUses(TII, TRI, RBI); - - return Result; + return Result && BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpIAddS)) + .addDef(ResVReg) + .addUse(GR.getSPIRVTypeID(ResType)) + .addUse(Dot) + .addUse(I.getOperand(4).getReg()) + .constrainAllUses(TII, TRI, RBI); } // Since pre-1.6 SPIRV has no DotProductInput4x8BitPacked implementation, @@ -1784,7 +1806,7 @@ bool SPIRVInstructionSelector::selectDot4AddPackedExpansion( assert(I.getOperand(4).isReg()); MachineBasicBlock &BB = *I.getParent(); - bool Result = false; + bool Result = true; // Acc = C Register Acc = I.getOperand(4).getReg(); @@ -1796,7 +1818,7 @@ bool SPIRVInstructionSelector::selectDot4AddPackedExpansion( for (unsigned i = 0; i < 4; i++) { // A[i] Register AElt = MRI->createVirtualRegister(&SPIRV::IDRegClass); - Result |= BuildMI(BB, I, I.getDebugLoc(), TII.get(ExtractOp)) + Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(ExtractOp)) .addDef(AElt) .addUse(GR.getSPIRVTypeID(ResType)) .addUse(I.getOperand(2).getReg()) @@ -1806,7 +1828,7 @@ bool SPIRVInstructionSelector::selectDot4AddPackedExpansion( // B[i] Register BElt = MRI->createVirtualRegister(&SPIRV::IDRegClass); - Result |= BuildMI(BB, I, I.getDebugLoc(), TII.get(ExtractOp)) + Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(ExtractOp)) .addDef(BElt) .addUse(GR.getSPIRVTypeID(ResType)) .addUse(I.getOperand(3).getReg()) @@ -1816,7 +1838,7 @@ bool SPIRVInstructionSelector::selectDot4AddPackedExpansion( // A[i] * B[i] Register Mul = MRI->createVirtualRegister(&SPIRV::IDRegClass); - Result |= BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpIMulS)) + Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpIMulS)) .addDef(Mul) .addUse(GR.getSPIRVTypeID(ResType)) .addUse(AElt) @@ -1825,7 +1847,7 @@ bool SPIRVInstructionSelector::selectDot4AddPackedExpansion( // Discard 24 highest-bits so that stored i32 register is i8 equivalent Register MaskMul = MRI->createVirtualRegister(&SPIRV::IDRegClass); - Result |= BuildMI(BB, I, I.getDebugLoc(), TII.get(ExtractOp)) + Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(ExtractOp)) .addDef(MaskMul) .addUse(GR.getSPIRVTypeID(ResType)) .addUse(Mul) @@ -1836,7 +1858,7 @@ bool SPIRVInstructionSelector::selectDot4AddPackedExpansion( // Acc = Acc + A[i] * B[i] Register Sum = i < 3 ? MRI->createVirtualRegister(&SPIRV::IDRegClass) : ResVReg; - Result |= BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpIAddS)) + Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpIAddS)) .addDef(Sum) .addUse(GR.getSPIRVTypeID(ResType)) .addUse(Acc) @@ -1907,7 +1929,7 @@ bool SPIRVInstructionSelector::selectSign(Register ResVReg, if (NeedsConversion) { auto ConvertOpcode = IsFloatTy ? SPIRV::OpConvertFToS : SPIRV::OpSConvert; - Result |= BuildMI(*I.getParent(), I, DL, TII.get(ConvertOpcode)) + Result &= BuildMI(*I.getParent(), I, DL, TII.get(ConvertOpcode)) .addDef(ResVReg) .addUse(GR.getSPIRVTypeID(ResType)) .addUse(SignReg) @@ -2170,7 +2192,7 @@ void SPIRVInstructionSelector::renderImm32(MachineInstrBuilder &MIB, addNumImm(I.getOperand(1).getCImm()->getValue(), MIB); } -Register +std::pair SPIRVInstructionSelector::buildI32Constant(uint32_t Val, MachineInstr &I, const SPIRVType *ResType) const { Type *LLVMTy = IntegerType::get(GR.CurMF->getFunction().getContext(), 32); @@ -2179,6 +2201,7 @@ SPIRVInstructionSelector::buildI32Constant(uint32_t Val, MachineInstr &I, // Find a constant in DT or build a new one. auto ConstInt = ConstantInt::get(LLVMTy, Val); Register NewReg = GR.find(ConstInt, GR.CurMF); + bool Result = true; if (!NewReg.isValid()) { NewReg = MRI->createGenericVirtualRegister(LLT::scalar(64)); GR.add(ConstInt, GR.CurMF, NewReg); @@ -2194,9 +2217,9 @@ SPIRVInstructionSelector::buildI32Constant(uint32_t Val, MachineInstr &I, .addUse(GR.getSPIRVTypeID(SpvI32Ty)) .addImm(APInt(32, Val).getZExtValue()); } - constrainSelectedInstRegOperands(*MI, TII, TRI, RBI); + Result &= constrainSelectedInstRegOperands(*MI, TII, TRI, RBI); } - return NewReg; + return {NewReg, Result}; } bool SPIRVInstructionSelector::selectFCmp(Register ResVReg, @@ -2322,18 +2345,18 @@ bool SPIRVInstructionSelector::selectIntToBool(Register IntReg, Register Zero = buildZerosVal(IntTy, I); Register One = buildOnesVal(false, IntTy, I); MachineBasicBlock &BB = *I.getParent(); - BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode)) - .addDef(BitIntReg) - .addUse(GR.getSPIRVTypeID(IntTy)) - .addUse(IntReg) - .addUse(One) - .constrainAllUses(TII, TRI, RBI); - return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpINotEqual)) - .addDef(ResVReg) - .addUse(GR.getSPIRVTypeID(BoolTy)) - .addUse(BitIntReg) - .addUse(Zero) - .constrainAllUses(TII, TRI, RBI); + bool Result = BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode)) + .addDef(BitIntReg) + .addUse(GR.getSPIRVTypeID(IntTy)) + .addUse(IntReg) + .addUse(One) + .constrainAllUses(TII, TRI, RBI); + return Result && BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpINotEqual)) + .addDef(ResVReg) + .addUse(GR.getSPIRVTypeID(BoolTy)) + .addUse(BitIntReg) + .addUse(Zero) + .constrainAllUses(TII, TRI, RBI); } bool SPIRVInstructionSelector::selectTrunc(Register ResVReg, @@ -2646,34 +2669,40 @@ bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg, case Intrinsic::spv_cmpxchg: return selectAtomicCmpXchg(ResVReg, ResType, I); case Intrinsic::spv_unreachable: - BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUnreachable)); - break; + return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUnreachable)) + .constrainAllUses(TII, TRI, RBI); case Intrinsic::spv_alloca: return selectFrameIndex(ResVReg, ResType, I); case Intrinsic::spv_alloca_array: return selectAllocaArray(ResVReg, ResType, I); case Intrinsic::spv_assume: if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume)) - BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpAssumeTrueKHR)) - .addUse(I.getOperand(1).getReg()); + return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpAssumeTrueKHR)) + .addUse(I.getOperand(1).getReg()) + .constrainAllUses(TII, TRI, RBI); break; case Intrinsic::spv_expect: if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume)) - BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExpectKHR)) + return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExpectKHR)) .addDef(ResVReg) .addUse(GR.getSPIRVTypeID(ResType)) .addUse(I.getOperand(2).getReg()) - .addUse(I.getOperand(3).getReg()); + .addUse(I.getOperand(3).getReg()) + .constrainAllUses(TII, TRI, RBI); break; case Intrinsic::arithmetic_fence: if (STI.canUseExtension(SPIRV::Extension::SPV_EXT_arithmetic_fence)) - BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpArithmeticFenceEXT)) + return BuildMI(BB, I, I.getDebugLoc(), + TII.get(SPIRV::OpArithmeticFenceEXT)) .addDef(ResVReg) .addUse(GR.getSPIRVTypeID(ResType)) - .addUse(I.getOperand(2).getReg()); + .addUse(I.getOperand(2).getReg()) + .constrainAllUses(TII, TRI, RBI); else - BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY), ResVReg) - .addUse(I.getOperand(2).getReg()); + return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY), + ResVReg) + .addUse(I.getOperand(2).getReg()) + .constrainAllUses(TII, TRI, RBI); break; case Intrinsic::spv_thread_id: return selectSpvThreadId(ResVReg, ResType, I); @@ -2712,16 +2741,22 @@ bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg, case Intrinsic::spv_firstbitshigh: // There is no CL equivalent of FindSMsb return selectFirstBitHigh(ResVReg, ResType, I, /*IsSigned=*/true); case Intrinsic::spv_group_memory_barrier_with_group_sync: { - Register MemSemReg = + bool Result = true; + auto MemSemConstant = buildI32Constant(SPIRV::MemorySemantics::SequentiallyConsistent, I); - Register ScopeReg = buildI32Constant(SPIRV::Scope::Workgroup, I); + Register MemSemReg = MemSemConstant.first; + Result &= MemSemConstant.second; + auto ScopeConstant = buildI32Constant(SPIRV::Scope::Workgroup, I); + Register ScopeReg = ScopeConstant.first; + Result &= ScopeConstant.second; MachineBasicBlock &BB = *I.getParent(); - return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpControlBarrier)) - .addUse(ScopeReg) - .addUse(ScopeReg) - .addUse(MemSemReg) - .constrainAllUses(TII, TRI, RBI); - } break; + return Result && + BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpControlBarrier)) + .addUse(ScopeReg) + .addUse(ScopeReg) + .addUse(MemSemReg) + .constrainAllUses(TII, TRI, RBI); + } case Intrinsic::spv_lifetime_start: case Intrinsic::spv_lifetime_end: { unsigned Op = IID == Intrinsic::spv_lifetime_start ? SPIRV::OpLifetimeStart @@ -2730,8 +2765,11 @@ bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg, Register PtrReg = I.getOperand(I.getNumExplicitDefs() + 2).getReg(); if (Size == -1) Size = 0; - BuildMI(BB, I, I.getDebugLoc(), TII.get(Op)).addUse(PtrReg).addImm(Size); - } break; + return BuildMI(BB, I, I.getDebugLoc(), TII.get(Op)) + .addUse(PtrReg) + .addImm(Size) + .constrainAllUses(TII, TRI, RBI); + } case Intrinsic::spv_saturate: return selectSaturate(ResVReg, ResType, I); case Intrinsic::spv_wave_is_first_lane: { @@ -2759,8 +2797,7 @@ bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg, case Intrinsic::spv_value_md: break; case Intrinsic::spv_handle_fromBinding: { - selectHandleFromBinding(ResVReg, ResType, I); - return true; + return selectHandleFromBinding(ResVReg, ResType, I); } default: { std::string DiagMsg; @@ -2773,7 +2810,7 @@ bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg, return true; } -void SPIRVInstructionSelector::selectHandleFromBinding(Register &ResVReg, +bool SPIRVInstructionSelector::selectHandleFromBinding(Register &ResVReg, const SPIRVType *ResType, MachineInstr &I) const { @@ -2792,10 +2829,11 @@ void SPIRVInstructionSelector::selectHandleFromBinding(Register &ResVReg, // TODO: For now we assume the resource is an image, which needs to be // loaded to get the handle. That will not be true for storage buffers. - BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad)) + return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad)) .addDef(ResVReg) .addUse(GR.getSPIRVTypeID(ResType)) - .addUse(VarReg); + .addUse(VarReg) + .constrainAllUses(TII, TRI, RBI); } Register SPIRVInstructionSelector::buildPointerToResource( @@ -3236,14 +3274,12 @@ bool SPIRVInstructionSelector::selectLog10(Register ResVReg, auto Opcode = ResType->getOpcode() == SPIRV::OpTypeVector ? SPIRV::OpVectorTimesScalar : SPIRV::OpFMulS; - Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode)) - .addDef(ResVReg) - .addUse(GR.getSPIRVTypeID(ResType)) - .addUse(VarReg) - .addUse(ScaleReg) - .constrainAllUses(TII, TRI, RBI); - - return Result; + return Result && BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode)) + .addDef(ResVReg) + .addUse(GR.getSPIRVTypeID(ResType)) + .addUse(VarReg) + .addUse(ScaleReg) + .constrainAllUses(TII, TRI, RBI); } bool SPIRVInstructionSelector::selectSpvThreadId(Register ResVReg, @@ -3280,10 +3316,11 @@ bool SPIRVInstructionSelector::selectSpvThreadId(Register ResVReg, GR.assignSPIRVTypeToVReg(Vec3Ty, LoadedRegister, MIRBuilder.getMF()); // Load v3uint value from the global variable. - BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad)) - .addDef(LoadedRegister) - .addUse(GR.getSPIRVTypeID(Vec3Ty)) - .addUse(Variable); + bool Result = + BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad)) + .addDef(LoadedRegister) + .addUse(GR.getSPIRVTypeID(Vec3Ty)) + .addUse(Variable); // Get Thread ID index. Expecting operand is a constant immediate value, // wrapped in a type assignment. @@ -3297,7 +3334,7 @@ bool SPIRVInstructionSelector::selectSpvThreadId(Register ResVReg, .addUse(GR.getSPIRVTypeID(ResType)) .addUse(LoadedRegister) .addImm(ThreadId); - return MIB.constrainAllUses(TII, TRI, RBI); + return Result && MIB.constrainAllUses(TII, TRI, RBI); } namespace llvm {