Skip to content

Commit 7f35055

Browse files
committed
FEAT:
- Lowered sadd.with.overflow intrinsic to a series of instructions to check for overflow. - Used the logic that if two operands are positive and the sum is less than any operand an overflow occured. - Similarly if two operands are negative and sum is greater than any operand an overflow occured. - Added a test to ensure all instructions are present.
1 parent 70fdd9f commit 7f35055

File tree

2 files changed

+342
-1
lines changed

2 files changed

+342
-1
lines changed

llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp

Lines changed: 181 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -202,6 +202,9 @@ class SPIRVInstructionSelector : public InstructionSelector {
202202
bool selectOverflowArith(Register ResVReg, const SPIRVType *ResType,
203203
MachineInstr &I, unsigned Opcode) const;
204204

205+
bool selectOverflowArithSigned(Register ResVReg, const SPIRVType *ResType,
206+
MachineInstr &I, bool isVectorType) const;
207+
205208
bool selectIntegerDot(Register ResVReg, const SPIRVType *ResType,
206209
MachineInstr &I, bool Signed) const;
207210

@@ -511,7 +514,6 @@ static bool mayApplyGenericSelection(unsigned Opcode) {
511514
switch (Opcode) {
512515
case TargetOpcode::G_CONSTANT:
513516
return false;
514-
case TargetOpcode::G_SADDO:
515517
case TargetOpcode::G_SSUBO:
516518
return true;
517519
}
@@ -730,6 +732,11 @@ bool SPIRVInstructionSelector::spvSelect(Register ResVReg,
730732
ResType->getOpcode() == SPIRV::OpTypeVector
731733
? SPIRV::OpIAddCarryV
732734
: SPIRV::OpIAddCarryS);
735+
case TargetOpcode::G_SADDO:
736+
return selectOverflowArithSigned(ResVReg, ResType, I,
737+
ResType->getOpcode() == SPIRV::OpTypeVector
738+
? true
739+
: false);
733740
case TargetOpcode::G_USUBO:
734741
return selectOverflowArith(ResVReg, ResType, I,
735742
ResType->getOpcode() == SPIRV::OpTypeVector
@@ -1370,6 +1377,179 @@ bool SPIRVInstructionSelector::selectOverflowArith(Register ResVReg,
13701377
.constrainAllUses(TII, TRI, RBI);
13711378
}
13721379

1380+
bool SPIRVInstructionSelector::selectOverflowArithSigned(Register ResVReg,
1381+
const SPIRVType *ResType,
1382+
MachineInstr &I,
1383+
bool isVector) const {
1384+
1385+
//Checking overflow based on the logic that if two operands are positive and the sum is
1386+
//less than one of the operands then an overflow occured. Likewise if two operands are
1387+
//negative and if sum is greater than one operand then also overflow occured.
1388+
1389+
Type *ResTy = nullptr;
1390+
StringRef ResName;
1391+
MachineIRBuilder MIRBuilder(I);
1392+
if (!GR.findValueAttrs(&I, ResTy, ResName))
1393+
report_fatal_error(
1394+
"Not enough info to select the signed arithmetic instruction");
1395+
if (!ResTy || !ResTy->isStructTy())
1396+
report_fatal_error(
1397+
"Expect struct type result for the signed arithmetic instruction");
1398+
1399+
StructType *ResStructTy = cast<StructType>(ResTy);
1400+
Type *ResElemTy = ResStructTy->getElementType(0);
1401+
Type *OverflowTy = ResStructTy->getElementType(1);
1402+
ResTy = StructType::get(ResElemTy, OverflowTy);
1403+
SPIRVType *StructType = GR.getOrCreateSPIRVType(
1404+
ResTy, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, false);
1405+
if (!StructType) {
1406+
report_fatal_error("Failed to create SPIR-V type for struct");
1407+
}
1408+
SPIRVType *BoolType = GR.getOrCreateSPIRVBoolType(I, TII);
1409+
unsigned N = GR.getScalarOrVectorComponentCount(ResType);
1410+
if (N > 1)
1411+
BoolType = GR.getOrCreateSPIRVVectorType(BoolType, N, I, TII);
1412+
Register BoolTypeReg = GR.getSPIRVTypeID(BoolType);
1413+
Register ZeroReg = buildZerosVal(ResType, I);
1414+
Register StructVReg = MRI->createGenericVirtualRegister(LLT::scalar(64));
1415+
MRI->setRegClass(StructVReg, &SPIRV::IDRegClass);
1416+
1417+
if (ResName.size() > 0)
1418+
buildOpName(StructVReg, ResName, MIRBuilder);
1419+
1420+
MachineBasicBlock &BB = *I.getParent();
1421+
Register SumVReg = MRI->createGenericVirtualRegister(LLT::scalar(64));
1422+
MRI->setRegClass(SumVReg, &SPIRV::IDRegClass);
1423+
SPIRVType *IntType = GR.getOrCreateSPIRVType(ResElemTy, MIRBuilder);
1424+
1425+
auto SumMIB = BuildMI(BB, MIRBuilder.getInsertPt(), I.getDebugLoc(), TII.get(isVector ? SPIRV::OpIAddV : SPIRV::OpIAddS))
1426+
.addDef(SumVReg)
1427+
.addUse(GR.getSPIRVTypeID(IntType));
1428+
for (unsigned i = I.getNumDefs(); i < I.getNumOperands(); ++i)
1429+
SumMIB.addUse(I.getOperand(i).getReg());
1430+
bool Result = SumMIB.constrainAllUses(TII, TRI, RBI);
1431+
1432+
Register OverflowVReg = MRI->createGenericVirtualRegister(LLT::scalar(1));
1433+
MRI->setRegClass(OverflowVReg, &SPIRV::IDRegClass);
1434+
unsigned i = I.getNumDefs();
1435+
1436+
Register posCheck1 = MRI->createGenericVirtualRegister(LLT::scalar(1));
1437+
MRI->setRegClass(posCheck1, &SPIRV::IDRegClass);
1438+
Register posCheck2 = MRI->createGenericVirtualRegister(LLT::scalar(1));
1439+
MRI->setRegClass(posCheck2, &SPIRV::IDRegClass);
1440+
Register posCheck3 = MRI->createGenericVirtualRegister(LLT::scalar(1));
1441+
MRI->setRegClass(posCheck3, &SPIRV::IDRegClass);
1442+
Register posOverflow = MRI->createGenericVirtualRegister(LLT::scalar(1));
1443+
MRI->setRegClass(posOverflow, &SPIRV::IDRegClass);
1444+
Register posOverflowCheck = MRI->createGenericVirtualRegister(LLT::scalar(1));
1445+
MRI->setRegClass(posOverflowCheck, &SPIRV::IDRegClass);
1446+
1447+
BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSGreaterThan))
1448+
.addDef(posCheck1)
1449+
.addUse(GR.getSPIRVTypeID(BoolType))
1450+
.addUse(I.getOperand(i).getReg())
1451+
.addUse(ZeroReg);
1452+
BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSGreaterThan))
1453+
.addDef(posCheck2)
1454+
.addUse(GR.getSPIRVTypeID(BoolType))
1455+
.addUse(I.getOperand(i+1).getReg())
1456+
.addUse(ZeroReg);
1457+
BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSLessThan))
1458+
.addDef(posCheck3)
1459+
.addUse(GR.getSPIRVTypeID(BoolType))
1460+
.addUse(SumVReg)
1461+
.addUse(I.getOperand(i+1).getReg());
1462+
BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpLogicalAnd))
1463+
.addDef(posOverflow)
1464+
.addUse(GR.getSPIRVTypeID(BoolType))
1465+
.addUse(posCheck1)
1466+
.addUse(posCheck2);
1467+
BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpLogicalAnd))
1468+
.addDef(posOverflowCheck)
1469+
.addUse(GR.getSPIRVTypeID(BoolType))
1470+
.addUse(posOverflow)
1471+
.addUse(posCheck3);
1472+
1473+
Register negCheck1 = MRI->createGenericVirtualRegister(LLT::scalar(1));
1474+
MRI->setRegClass(negCheck1, &SPIRV::IDRegClass);
1475+
Register negCheck2 = MRI->createGenericVirtualRegister(LLT::scalar(1));
1476+
MRI->setRegClass(negCheck2, &SPIRV::IDRegClass);
1477+
Register negCheck3 = MRI->createGenericVirtualRegister(LLT::scalar(1));
1478+
MRI->setRegClass(negCheck3, &SPIRV::IDRegClass);
1479+
Register negOverflow = MRI->createGenericVirtualRegister(LLT::scalar(1));
1480+
MRI->setRegClass(negOverflow, &SPIRV::IDRegClass);
1481+
Register negOverflowCheck = MRI->createGenericVirtualRegister(LLT::scalar(1));
1482+
MRI->setRegClass(negOverflowCheck, &SPIRV::IDRegClass);
1483+
1484+
BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSLessThan))
1485+
.addDef(negCheck1)
1486+
.addUse(GR.getSPIRVTypeID(BoolType))
1487+
.addUse(I.getOperand(i).getReg())
1488+
.addUse(ZeroReg);
1489+
BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSLessThan))
1490+
.addDef(negCheck2)
1491+
.addUse(GR.getSPIRVTypeID(BoolType))
1492+
.addUse(I.getOperand(i+1).getReg())
1493+
.addUse(ZeroReg);
1494+
BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSGreaterThan))
1495+
.addDef(negCheck3)
1496+
.addUse(GR.getSPIRVTypeID(BoolType))
1497+
.addUse(SumVReg)
1498+
.addUse(I.getOperand(i+1).getReg());
1499+
BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpLogicalAnd))
1500+
.addDef(negOverflow)
1501+
.addUse(GR.getSPIRVTypeID(BoolType))
1502+
.addUse(negCheck1)
1503+
.addUse(negCheck2);
1504+
BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpLogicalAnd))
1505+
.addDef(negOverflowCheck)
1506+
.addUse(GR.getSPIRVTypeID(BoolType))
1507+
.addUse(negOverflow)
1508+
.addUse(negCheck3);
1509+
1510+
BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpLogicalOr))
1511+
.addDef(OverflowVReg)
1512+
.addUse(GR.getSPIRVTypeID(BoolType))
1513+
.addUse(negOverflowCheck)
1514+
.addUse(posOverflowCheck);
1515+
1516+
// Construct the result struct containing sum and overflow flag
1517+
BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeConstruct))
1518+
.addDef(StructVReg)
1519+
.addUse(GR.getSPIRVTypeID(StructType))
1520+
.addUse(SumVReg)
1521+
.addUse(OverflowVReg);
1522+
1523+
Register HigherVReg = MRI->createGenericVirtualRegister(LLT::scalar(64));
1524+
MRI->setRegClass(HigherVReg, &SPIRV::iIDRegClass);
1525+
1526+
for (unsigned i = 0; i < I.getNumDefs(); ++i) {
1527+
auto MIB =
1528+
BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
1529+
.addDef(i == 1 ? HigherVReg : I.getOperand(i).getReg())
1530+
.addUse(i == 1 ? GR.getSPIRVTypeID(BoolType) : GR.getSPIRVTypeID(ResType))
1531+
.addUse(StructVReg)
1532+
.addImm(i);
1533+
Result &= MIB.constrainAllUses(TII, TRI, RBI);
1534+
}
1535+
Register FalseReg = MRI->createGenericVirtualRegister(LLT::scalar(1));
1536+
MRI->setRegClass(FalseReg, &SPIRV::IDRegClass);
1537+
1538+
// Use OpConstantFalse to initialize it
1539+
BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
1540+
.addDef(FalseReg)
1541+
.addUse(GR.getSPIRVTypeID(BoolType));
1542+
1543+
BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpLogicalNotEqual))
1544+
.addDef(I.getOperand(1).getReg())
1545+
.addUse(BoolTypeReg)
1546+
.addUse(HigherVReg)
1547+
.addUse(FalseReg)
1548+
.constrainAllUses(TII, TRI, RBI);
1549+
return true;
1550+
1551+
}
1552+
13731553
bool SPIRVInstructionSelector::selectAtomicCmpXchg(Register ResVReg,
13741554
const SPIRVType *ResType,
13751555
MachineInstr &I) const {
Lines changed: 161 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,161 @@
1+
; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
2+
; RUN: %if spirv-tools %{ llc -verify-machineinstrs -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
3+
4+
; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
5+
; RUN: %if spirv-tools %{ llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
6+
7+
;===---------------------------------------------------------------------===//
8+
; Type definitions.
9+
; CHECK-DAG: %[[I16:.*]] = OpTypeInt 16 0
10+
; CHECK-DAG: %[[Bool:.*]] = OpTypeBool
11+
; CHECK-DAG: %[[I32:.*]] = OpTypeInt 32 0
12+
; CHECK-DAG: %[[I64:.*]] = OpTypeInt 64 0
13+
; CHECK-DAG: %[[PtrI16:.*]] = OpTypePointer Function %[[I16]]
14+
; CHECK-DAG: %[[PtrI32:.*]] = OpTypePointer Function %[[I32]]
15+
; CHECK-DAG: %[[PtrI64:.*]] = OpTypePointer Function %[[I64]]
16+
; CHECK-DAG: %[[StructI16:.*]] = OpTypeStruct %[[I16]] %[[Bool]]
17+
; CHECK-DAG: %[[StructI32:.*]] = OpTypeStruct %[[I32]] %[[Bool]]
18+
; CHECK-DAG: %[[StructI64:.*]] = OpTypeStruct %[[I64]] %[[Bool]]
19+
; CHECK-DAG: %[[ZeroI16:.*]] = OpConstant %[[I16]] 0
20+
; CHECK-DAG: %[[ZeroI32:.*]] = OpConstant %[[I32]] 0
21+
; CHECK-DAG: %[[ZeroI64:.*]] = OpConstantNull %[[I64]]
22+
; CHECK-DAG: %[[V4I32:.*]] = OpTypeVector %[[I32]] 4
23+
; CHECK-DAG: %[[V4Bool:.*]] = OpTypeVector %[[Bool]] 4
24+
; CHECK-DAG: %[[PtrV4I32:.*]] = OpTypePointer Function %[[V4I32]]
25+
; CHECK-DAG: %[[StructV4I32:.*]] = OpTypeStruct %[[V4I32]] %[[V4Bool]]
26+
; CHECK-DAG: %[[ZeroV4I32:.*]] = OpConstantNull %[[V4I32]]
27+
;===---------------------------------------------------------------------===//
28+
; Function for i16 sadd.with.overflow.
29+
; CHECK: OpFunction
30+
; CHECK: %[[A16:.*]] = OpFunctionParameter %[[I16]]
31+
; CHECK: %[[B16:.*]] = OpFunctionParameter %[[I16]]
32+
; CHECK: %[[Ptr16:.*]] = OpFunctionParameter %[[PtrI16]]
33+
; CHECK: %[[Sum16:.*]] = OpIAdd %[[I16]] %[[A16]] %[[B16]]
34+
; CHECK: %[[PosCmp16_1:.*]] = OpSGreaterThan %[[Bool]] %[[A16]] %[[ZeroI16]]
35+
; CHECK: %[[PosCmp16_2:.*]] = OpSGreaterThan %[[Bool]] %[[B16]] %[[ZeroI16]]
36+
; CHECK: %[[PosCmp16_3:.*]] = OpSLessThan %[[Bool]] %[[Sum16]] %[[B16]]
37+
; CHECK: %[[PosCond16:.*]] = OpLogicalAnd %[[Bool]] %[[PosCmp16_1]] %[[PosCmp16_2]]
38+
; CHECK: %[[PosOverflow16:.*]] = OpLogicalAnd %[[Bool]] %[[PosCond16]] %[[PosCmp16_3]]
39+
; CHECK: %[[NegCmp16_1:.*]] = OpSLessThan %[[Bool]] %[[A16]] %[[ZeroI16]]
40+
; CHECK: %[[NegCmp16_2:.*]] = OpSLessThan %[[Bool]] %[[B16]] %[[ZeroI16]]
41+
; CHECK: %[[NegCmp16_3:.*]] = OpSGreaterThan %[[Bool]] %[[Sum16]] %[[B16]]
42+
; CHECK: %[[NegCond16:.*]] = OpLogicalAnd %[[Bool]] %[[NegCmp16_1]] %[[NegCmp16_2]]
43+
; CHECK: %[[NegOverflow16:.*]] = OpLogicalAnd %[[Bool]] %[[NegCond16]] %[[NegCmp16_3]]
44+
; CHECK: %[[Overflow16:.*]] = OpLogicalOr %[[Bool]] %[[NegOverflow16]] %[[PosOverflow16]]
45+
; CHECK: %[[Comp16:.*]] = OpCompositeConstruct %[[StructI16]] %[[Sum16]] %[[Overflow16]]
46+
; CHECK: %[[ExtOver16:.*]] = OpCompositeExtract %[[Bool]] %[[Comp16]] 1
47+
; CHECK: %[[Final16:.*]] = OpLogicalNotEqual %[[Bool]] %[[ExtOver16]] %[[#]]
48+
; CHECK: OpReturn
49+
define spir_func void @smulo_i16(i16 %a, i16 %b, ptr nocapture %c) {
50+
entry:
51+
%umul = tail call { i16, i1 } @llvm.sadd.with.overflow.i16(i16 %a, i16 %b)
52+
%cmp = extractvalue { i16, i1 } %umul, 1
53+
%umul.value = extractvalue { i16, i1 } %umul, 0
54+
%storemerge = select i1 %cmp, i16 0, i16 %umul.value
55+
store i16 %storemerge, ptr %c, align 1
56+
ret void
57+
}
58+
59+
;===---------------------------------------------------------------------===//
60+
; Function for i32 sadd.with.overflow.
61+
; CHECK: OpFunction
62+
; CHECK: %[[A32:.*]] = OpFunctionParameter %[[I32]]
63+
; CHECK: %[[B32:.*]] = OpFunctionParameter %[[I32]]
64+
; CHECK: %[[Ptr32:.*]] = OpFunctionParameter %[[PtrI32]]
65+
; CHECK: %[[Sum32:.*]] = OpIAdd %[[I32]] %[[A32]] %[[B32]]
66+
; CHECK: %[[PosCmp32_1:.*]] = OpSGreaterThan %[[Bool]] %[[A32]] %[[ZeroI32]]
67+
; CHECK: %[[PosCmp32_2:.*]] = OpSGreaterThan %[[Bool]] %[[B32]] %[[ZeroI32]]
68+
; CHECK: %[[PosCmp32_3:.*]] = OpSLessThan %[[Bool]] %[[Sum32]] %[[B32]]
69+
; CHECK: %[[PosCond32:.*]] = OpLogicalAnd %[[Bool]] %[[PosCmp32_1]] %[[PosCmp32_2]]
70+
; CHECK: %[[PosOverflow32:.*]] = OpLogicalAnd %[[Bool]] %[[PosCond32]] %[[PosCmp32_3]]
71+
; CHECK: %[[NegCmp32_1:.*]] = OpSLessThan %[[Bool]] %[[A32]] %[[ZeroI32]]
72+
; CHECK: %[[NegCmp32_2:.*]] = OpSLessThan %[[Bool]] %[[B32]] %[[ZeroI32]]
73+
; CHECK: %[[NegCmp32_3:.*]] = OpSGreaterThan %[[Bool]] %[[Sum32]] %[[B32]]
74+
; CHECK: %[[NegCond32:.*]] = OpLogicalAnd %[[Bool]] %[[NegCmp32_1]] %[[NegCmp32_2]]
75+
; CHECK: %[[NegOverflow32:.*]] = OpLogicalAnd %[[Bool]] %[[NegCond32]] %[[NegCmp32_3]]
76+
; CHECK: %[[Overflow32:.*]] = OpLogicalOr %[[Bool]] %[[NegOverflow32]] %[[PosOverflow32]]
77+
; CHECK: %[[Comp32:.*]] = OpCompositeConstruct %[[StructI32]] %[[Sum32]] %[[Overflow32]]
78+
; CHECK: %[[ExtOver32:.*]] = OpCompositeExtract %[[Bool]] %[[Comp32]] 1
79+
; CHECK: %[[Final32:.*]] = OpLogicalNotEqual %[[Bool]] %[[ExtOver32]] %[[#]]
80+
; CHECK: OpReturn
81+
define spir_func void @smulo_i32(i32 %a, i32 %b, ptr nocapture %c) {
82+
entry:
83+
%umul = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %a, i32 %b)
84+
%cmp = extractvalue { i32, i1 } %umul, 1
85+
%umul.value = extractvalue { i32, i1 } %umul, 0
86+
%storemerge = select i1 %cmp, i32 0, i32 %umul.value
87+
store i32 %storemerge, ptr %c, align 4
88+
ret void
89+
}
90+
91+
;===---------------------------------------------------------------------===//
92+
; Function for i64 sadd.with.overflow.
93+
; CHECK: OpFunction
94+
; CHECK: %[[A64:.*]] = OpFunctionParameter %[[I64]]
95+
; CHECK: %[[B64:.*]] = OpFunctionParameter %[[I64]]
96+
; CHECK: %[[Ptr64:.*]] = OpFunctionParameter %[[PtrI64]]
97+
; CHECK: %[[Sum64:.*]] = OpIAdd %[[I64]] %[[A64]] %[[B64]]
98+
; CHECK: %[[PosCmp64_1:.*]] = OpSGreaterThan %[[Bool]] %[[A64]] %[[ZeroI64]]
99+
; CHECK: %[[PosCmp64_2:.*]] = OpSGreaterThan %[[Bool]] %[[B64]] %[[ZeroI64]]
100+
; CHECK: %[[PosCmp64_3:.*]] = OpSLessThan %[[Bool]] %[[Sum64]] %[[B64]]
101+
; CHECK: %[[PosCond64:.*]] = OpLogicalAnd %[[Bool]] %[[PosCmp64_1]] %[[PosCmp64_2]]
102+
; CHECK: %[[PosOverflow64:.*]] = OpLogicalAnd %[[Bool]] %[[PosCond64]] %[[PosCmp64_3]]
103+
; CHECK: %[[NegCmp64_1:.*]] = OpSLessThan %[[Bool]] %[[A64]] %[[ZeroI64]]
104+
; CHECK: %[[NegCmp64_2:.*]] = OpSLessThan %[[Bool]] %[[B64]] %[[ZeroI64]]
105+
; CHECK: %[[NegCmp64_3:.*]] = OpSGreaterThan %[[Bool]] %[[Sum64]] %[[B64]]
106+
; CHECK: %[[NegCond64:.*]] = OpLogicalAnd %[[Bool]] %[[NegCmp64_1]] %[[NegCmp64_2]]
107+
; CHECK: %[[NegOverflow64:.*]] = OpLogicalAnd %[[Bool]] %[[NegCond64]] %[[NegCmp64_3]]
108+
; CHECK: %[[Overflow64:.*]] = OpLogicalOr %[[Bool]] %[[NegOverflow64]] %[[PosOverflow64]]
109+
; CHECK: %[[Comp64:.*]] = OpCompositeConstruct %[[StructI64]] %[[Sum64]] %[[Overflow64]]
110+
; CHECK: %[[ExtOver64:.*]] = OpCompositeExtract %[[Bool]] %[[Comp64]] 1
111+
; CHECK: %[[Final64:.*]] = OpLogicalNotEqual %[[Bool]] %[[ExtOver64]] %[[#]]
112+
; CHECK: OpReturn
113+
define spir_func void @smulo_i64(i64 %a, i64 %b, ptr nocapture %c) {
114+
entry:
115+
%umul = tail call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 %a, i64 %b)
116+
%cmp = extractvalue { i64, i1 } %umul, 1
117+
%umul.value = extractvalue { i64, i1 } %umul, 0
118+
%storemerge = select i1 %cmp, i64 0, i64 %umul.value
119+
store i64 %storemerge, ptr %c, align 8
120+
ret void
121+
}
122+
123+
;===---------------------------------------------------------------------===//
124+
; Function for vector (4 x i32) sadd.with.overflow.
125+
126+
; CHECK: OpFunction
127+
; CHECK: %[[A4:.*]] = OpFunctionParameter %[[V4I32]]
128+
; CHECK: %[[B4:.*]] = OpFunctionParameter %[[V4I32]]
129+
; CHECK: %[[Ptr4:.*]] = OpFunctionParameter %[[PtrV4I32]]
130+
; CHECK: %[[Sum4:.*]] = OpIAdd %[[V4I32]] %[[A4]] %[[B4]]
131+
; CHECK: %[[PosCmp4_1:.*]] = OpSGreaterThan %[[V4Bool]] %[[A4]] %[[ZeroV4I32]]
132+
; CHECK: %[[PosCmp4_2:.*]] = OpSGreaterThan %[[V4Bool]] %[[B4]] %[[ZeroV4I32]]
133+
; CHECK: %[[PosCmp4_3:.*]] = OpSLessThan %[[V4Bool]] %[[Sum4]] %[[B4]]
134+
; CHECK: %[[PosCond4:.*]] = OpLogicalAnd %[[V4Bool]] %[[PosCmp4_1]] %[[PosCmp4_2]]
135+
; CHECK: %[[PosOverflow4:.*]] = OpLogicalAnd %[[V4Bool]] %[[PosCond4]] %[[PosCmp4_3]]
136+
; CHECK: %[[NegCmp4_1:.*]] = OpSLessThan %[[V4Bool]] %[[A4]] %[[ZeroV4I32]]
137+
; CHECK: %[[NegCmp4_2:.*]] = OpSLessThan %[[V4Bool]] %[[B4]] %[[ZeroV4I32]]
138+
; CHECK: %[[NegCmp4_3:.*]] = OpSGreaterThan %[[V4Bool]] %[[Sum4]] %[[B4]]
139+
; CHECK: %[[NegCond4:.*]] = OpLogicalAnd %[[V4Bool]] %[[NegCmp4_1]] %[[NegCmp4_2]]
140+
; CHECK: %[[NegOverflow4:.*]] = OpLogicalAnd %[[V4Bool]] %[[NegCond4]] %[[NegCmp4_3]]
141+
; CHECK: %[[Overflow4:.*]] = OpLogicalOr %[[V4Bool]] %[[NegOverflow4]] %[[PosOverflow4]]
142+
; CHECK: %[[Comp4:.*]] = OpCompositeConstruct %[[StructV4I32]] %[[Sum4]] %[[Overflow4]]
143+
; CHECK: %[[ExtOver4:.*]] = OpCompositeExtract %[[V4Bool]] %[[Comp4]] 1
144+
; CHECK: %[[Final4:.*]] = OpLogicalNotEqual %[[V4Bool]] %[[ExtOver4]] %[[#]]
145+
; CHECK: OpReturn
146+
define spir_func void @smulo_v4i32(<4 x i32> %a, <4 x i32> %b, ptr nocapture %c) {
147+
entry:
148+
%umul = tail call { <4 x i32>, <4 x i1> } @llvm.sadd.with.overflow.v4i32(<4 x i32> %a, <4 x i32> %b)
149+
%cmp = extractvalue { <4 x i32>, <4 x i1> } %umul, 1
150+
%umul.value = extractvalue { <4 x i32>, <4 x i1> } %umul, 0
151+
%storemerge = select <4 x i1> %cmp, <4 x i32> zeroinitializer, <4 x i32> %umul.value
152+
store <4 x i32> %storemerge, ptr %c, align 16
153+
ret void
154+
}
155+
156+
;===---------------------------------------------------------------------===//
157+
; Declarations of the intrinsics.
158+
declare { i16, i1 } @llvm.sadd.with.overflow.i16(i16, i16)
159+
declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32)
160+
declare { i64, i1 } @llvm.sadd.with.overflow.i64(i64, i64)
161+
declare { <4 x i32>, <4 x i1> } @llvm.sadd.with.overflow.v4i32(<4 x i32>, <4 x i32>)

0 commit comments

Comments
 (0)