Skip to content

Commit 6e2ec24

Browse files
committed
FPInfo: IRTranslator and CallLowering
1 parent 082c256 commit 6e2ec24

File tree

5 files changed

+142
-51
lines changed

5 files changed

+142
-51
lines changed

llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -802,6 +802,8 @@ class MachineIRBuilder {
802802
MachineInstrBuilder buildExtOrTrunc(unsigned ExtOpc, const DstOp &Res,
803803
const SrcOp &Op);
804804

805+
MachineInstrBuilder buildTruncLike(const DstOp &Res, const SrcOp &Op);
806+
805807
/// Build and inserts \p Res = \p G_AND \p Op, \p LowBitsSet(ImmOp)
806808
/// Since there is no G_ZEXT_INREG like G_SEXT_INREG, the instruction is
807809
/// emulated using G_AND.

llvm/lib/CodeGen/GlobalISel/CallLowering.cpp

Lines changed: 65 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
//===----------------------------------------------------------------------===//
1313

1414
#include "llvm/CodeGen/GlobalISel/CallLowering.h"
15+
#include "llvm/ADT/STLExtras.h"
1516
#include "llvm/CodeGen/Analysis.h"
1617
#include "llvm/CodeGen/CallingConvLower.h"
1718
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
@@ -20,9 +21,11 @@
2021
#include "llvm/CodeGen/MachineOperand.h"
2122
#include "llvm/CodeGen/MachineRegisterInfo.h"
2223
#include "llvm/CodeGen/TargetLowering.h"
24+
#include "llvm/CodeGen/TargetOpcodes.h"
2325
#include "llvm/IR/DataLayout.h"
2426
#include "llvm/IR/LLVMContext.h"
2527
#include "llvm/IR/Module.h"
28+
#include "llvm/Support/ErrorHandling.h"
2629
#include "llvm/Target/TargetMachine.h"
2730

2831
#define DEBUG_TYPE "call-lowering"
@@ -409,12 +412,12 @@ static void buildCopyFromRegs(MachineIRBuilder &B, ArrayRef<Register> OrigRegs,
409412
// Sometimes pointers are passed zero extended.
410413
LLT OrigTy = MRI.getType(OrigRegs[0]);
411414
if (OrigTy.isPointer()) {
412-
LLT IntPtrTy = LLT::scalar(OrigTy.getSizeInBits());
415+
LLT IntPtrTy = LLT::integer(OrigTy.getSizeInBits());
413416
B.buildIntToPtr(OrigRegs[0], B.buildTrunc(IntPtrTy, SrcReg));
414417
return;
415418
}
416419

417-
B.buildTrunc(OrigRegs[0], SrcReg);
420+
B.buildTruncLike(OrigRegs[0], SrcReg);
418421
return;
419422
}
420423

@@ -423,11 +426,22 @@ static void buildCopyFromRegs(MachineIRBuilder &B, ArrayRef<Register> OrigRegs,
423426
LLT OrigTy = MRI.getType(OrigRegs[0]);
424427

425428
unsigned SrcSize = PartLLT.getSizeInBits().getFixedValue() * Regs.size();
426-
if (SrcSize == OrigTy.getSizeInBits())
427-
B.buildMergeValues(OrigRegs[0], Regs);
428-
else {
429-
auto Widened = B.buildMergeLikeInstr(LLT::scalar(SrcSize), Regs);
430-
B.buildTrunc(OrigRegs[0], Widened);
429+
if (SrcSize == OrigTy.getSizeInBits()) {
430+
if (OrigTy.isFloat() && !PartLLT.isFloat()) {
431+
auto Merge = B.buildMergeValues(OrigTy.changeToInteger(), Regs);
432+
B.buildBitcast(OrigRegs[0], Merge);
433+
} else if (!OrigTy.isFloat() && PartLLT.isFloat()) {
434+
SmallVector<Register> CastRegs(Regs.size());
435+
for (auto&& [Idx, Reg]: enumerate(Regs))
436+
CastRegs[Idx] = B.buildBitcast(PartLLT.changeToInteger(), Reg).getReg(0);
437+
438+
B.buildMergeValues(OrigRegs[0], CastRegs);
439+
} else {
440+
B.buildMergeValues(OrigRegs[0], Regs);
441+
}
442+
} else {
443+
auto Widened = B.buildMergeLikeInstr(LLT::integer(SrcSize), Regs);
444+
B.buildTruncLike(OrigRegs[0], Widened);
431445
}
432446

433447
return;
@@ -492,19 +506,25 @@ static void buildCopyFromRegs(MachineIRBuilder &B, ArrayRef<Register> OrigRegs,
492506
SmallVector<Register, 8> EltMerges;
493507
int PartsPerElt =
494508
divideCeil(DstEltTy.getSizeInBits(), PartLLT.getSizeInBits());
495-
LLT ExtendedPartTy = LLT::scalar(PartLLT.getSizeInBits() * PartsPerElt);
509+
LLT ExtendedPartTy = LLT::integer(PartLLT.getSizeInBits() * PartsPerElt);
496510

497511
for (int I = 0, NumElts = LLTy.getNumElements(); I != NumElts; ++I) {
498512
auto Merge =
499513
B.buildMergeLikeInstr(ExtendedPartTy, Regs.take_front(PartsPerElt));
500514
if (ExtendedPartTy.getSizeInBits() > RealDstEltTy.getSizeInBits())
501-
Merge = B.buildTrunc(RealDstEltTy, Merge);
515+
Merge = B.buildTruncLike(RealDstEltTy, Merge);
502516
// Fix the type in case this is really a vector of pointers.
503-
MRI.setType(Merge.getReg(0), RealDstEltTy);
504-
EltMerges.push_back(Merge.getReg(0));
517+
Register MergeReg = Merge.getReg(0);
518+
519+
if (RealDstEltTy.isPointer()) {
520+
MRI.setType(MergeReg, RealDstEltTy);
521+
} else if (RealDstEltTy.isFloat() &&
522+
!MRI.getType(MergeReg).getScalarType().isFloat()) {
523+
MergeReg = B.buildBitcast(RealDstEltTy, MergeReg).getReg(0);
524+
}
525+
EltMerges.push_back(MergeReg);
505526
Regs = Regs.drop_front(PartsPerElt);
506527
}
507-
508528
B.buildBuildVector(OrigRegs[0], EltMerges);
509529
} else {
510530
// Vector was split, and elements promoted to a wider type.
@@ -532,9 +552,12 @@ static void buildCopyFromRegs(MachineIRBuilder &B, ArrayRef<Register> OrigRegs,
532552
SmallVector<Register, 0> BVRegs;
533553
BVRegs.reserve(Regs.size() * EltPerReg);
534554
for (Register R : Regs) {
535-
auto Unmerge = B.buildUnmerge(OriginalEltTy, R);
536-
for (unsigned K = 0; K < EltPerReg; ++K)
537-
BVRegs.push_back(B.buildAnyExt(PartLLT, Unmerge.getReg(K)).getReg(0));
555+
auto Unmerge = B.buildUnmerge(OriginalEltTy.changeToInteger(), R);
556+
for (unsigned K = 0; K < EltPerReg; ++K) {
557+
Register BVreg;
558+
BVreg = B.buildAnyExt(PartLLT, Unmerge.getReg(K)).getReg(0);
559+
BVRegs.push_back(BVreg);
560+
}
538561
}
539562

540563
// We may have some more elements in BVRegs, e.g. if we have 2 s32 pieces
@@ -545,7 +568,8 @@ static void buildCopyFromRegs(MachineIRBuilder &B, ArrayRef<Register> OrigRegs,
545568
}
546569
BuildVec = B.buildBuildVector(BVType, BVRegs).getReg(0);
547570
}
548-
B.buildTrunc(OrigRegs[0], BuildVec);
571+
572+
B.buildTruncLike(OrigRegs[0], BuildVec);
549573
}
550574
}
551575

@@ -565,6 +589,8 @@ static void buildCopyToRegs(MachineIRBuilder &B, ArrayRef<Register> DstRegs,
565589
if (PartTy.isVector() == SrcTy.isVector() &&
566590
PartTy.getScalarSizeInBits() > SrcTy.getScalarSizeInBits()) {
567591
assert(DstRegs.size() == 1);
592+
if (PartTy.getScalarType().isFloat() && SrcTy.getScalarType().isFloat())
593+
ExtendOp = TargetOpcode::G_FPEXT;
568594
B.buildInstr(ExtendOp, {DstRegs[0]}, {SrcReg});
569595
return;
570596
}
@@ -573,8 +599,18 @@ static void buildCopyToRegs(MachineIRBuilder &B, ArrayRef<Register> DstRegs,
573599
TypeSize::isKnownGT(PartSize, SrcTy.getElementType().getSizeInBits())) {
574600
// Vector was scalarized, and the elements extended.
575601
auto UnmergeToEltTy = B.buildUnmerge(SrcTy.getElementType(), SrcReg);
576-
for (int i = 0, e = DstRegs.size(); i != e; ++i)
577-
B.buildAnyExt(DstRegs[i], UnmergeToEltTy.getReg(i));
602+
for (int i = 0, e = DstRegs.size(); i != e; ++i) {
603+
Register Unmerge = UnmergeToEltTy.getReg(i);
604+
if (SrcTy.isFloatVector() && PartTy.isFloat()) {
605+
B.buildFPExt(DstRegs[i], Unmerge);
606+
continue;
607+
}
608+
609+
if (SrcTy.isFloatVector() && !PartTy.isFloat())
610+
Unmerge = B.buildBitcast(SrcTy.getElementType().changeToInteger(), Unmerge).getReg(0);
611+
612+
B.buildAnyExt(DstRegs[i], Unmerge);
613+
}
578614
return;
579615
}
580616

@@ -590,6 +626,9 @@ static void buildCopyToRegs(MachineIRBuilder &B, ArrayRef<Register> DstRegs,
590626

591627
LLT GCDTy = getGCDType(SrcTy, PartTy);
592628
if (GCDTy == PartTy) {
629+
if (SrcTy.getScalarType().isFloat() && !PartTy.getScalarType().isFloat())
630+
SrcReg = B.buildBitcast(SrcTy.changeToInteger(), SrcReg).getReg(0);
631+
593632
// If this already evenly divisible, we can create a simple unmerge.
594633
B.buildUnmerge(DstRegs, SrcReg);
595634
return;
@@ -599,8 +638,11 @@ static void buildCopyToRegs(MachineIRBuilder &B, ArrayRef<Register> DstRegs,
599638
SrcTy.getScalarSizeInBits() > PartTy.getSizeInBits()) {
600639
LLT ExtTy =
601640
LLT::vector(SrcTy.getElementCount(),
602-
LLT::scalar(PartTy.getScalarSizeInBits() * DstRegs.size() /
603-
SrcTy.getNumElements()));
641+
LLT::integer(PartTy.getScalarSizeInBits() * DstRegs.size() /
642+
SrcTy.getNumElements()));
643+
if (SrcTy.isFloatVector())
644+
SrcReg = B.buildBitcast(SrcTy.changeToInteger(), SrcReg).getReg(0);
645+
604646
auto Ext = B.buildAnyExt(ExtTy, SrcReg);
605647
B.buildUnmerge(DstRegs, Ext);
606648
return;
@@ -626,7 +668,7 @@ static void buildCopyToRegs(MachineIRBuilder &B, ArrayRef<Register> DstRegs,
626668
// For scalars, it's common to be able to use a simple extension.
627669
if (SrcTy.isScalar() && DstTy.isScalar()) {
628670
CoveringSize = alignTo(SrcSize, DstSize);
629-
LLT CoverTy = LLT::scalar(CoveringSize);
671+
LLT CoverTy = LLT::integer(CoveringSize);
630672
UnmergeSrc = B.buildInstr(ExtendOp, {CoverTy}, {SrcReg}).getReg(0);
631673
} else {
632674
// Widen to the common type.
@@ -822,8 +864,9 @@ bool CallLowering::handleAssignments(ValueHandler &Handler,
822864
if (!Handler.isIncomingArgumentHandler() && OrigTy != ValTy &&
823865
VA.getLocInfo() != CCValAssign::Indirect) {
824866
assert(Args[i].OrigRegs.size() == 1);
867+
unsigned ExtendOp = extendOpFromFlags(Args[i].Flags[0]);
825868
buildCopyToRegs(MIRBuilder, Args[i].Regs, Args[i].OrigRegs[0], OrigTy,
826-
ValTy, extendOpFromFlags(Args[i].Flags[0]));
869+
ValTy, ExtendOp);
827870
}
828871

829872
bool IndirectParameterPassingHandled = false;

llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp

Lines changed: 23 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -862,7 +862,7 @@ bool IRTranslator::emitJumpTableHeader(SwitchCG::JumpTable &JT,
862862
// This value may be smaller or larger than the target's pointer type, and
863863
// therefore require extension or truncating.
864864
auto *PtrIRTy = PointerType::getUnqual(SValue.getContext());
865-
const LLT PtrScalarTy = LLT::scalar(DL->getTypeSizeInBits(PtrIRTy));
865+
const LLT PtrScalarTy = LLT::integer(DL->getTypeSizeInBits(PtrIRTy));
866866
Sub = MIB.buildZExtOrTrunc(PtrScalarTy, Sub);
867867

868868
JT.Reg = Sub.getReg(0);
@@ -879,7 +879,8 @@ bool IRTranslator::emitJumpTableHeader(SwitchCG::JumpTable &JT,
879879
auto Cst = getOrCreateVReg(
880880
*ConstantInt::get(SValue.getType(), JTH.Last - JTH.First));
881881
Cst = MIB.buildZExtOrTrunc(PtrScalarTy, Cst).getReg(0);
882-
auto Cmp = MIB.buildICmp(CmpInst::ICMP_UGT, LLT::scalar(1), Sub, Cst);
882+
LLT CmpTy = LLT::integer(1);
883+
auto Cmp = MIB.buildICmp(CmpInst::ICMP_UGT, CmpTy, Sub, Cst);
883884

884885
auto BrCond = MIB.buildBrCond(Cmp.getReg(0), *JT.Default);
885886

@@ -910,7 +911,7 @@ void IRTranslator::emitSwitchCase(SwitchCG::CaseBlock &CB,
910911
return;
911912
}
912913

913-
const LLT i1Ty = LLT::scalar(1);
914+
const LLT i1Ty = LLT::integer(1);
914915
// Build the compare.
915916
if (!CB.CmpMHS) {
916917
const auto *CI = dyn_cast<ConstantInt>(CB.CmpRHS);
@@ -1092,14 +1093,14 @@ void IRTranslator::emitBitTestHeader(SwitchCG::BitTestBlock &B,
10921093
LLT MaskTy = SwitchOpTy;
10931094
if (MaskTy.getSizeInBits() > PtrTy.getSizeInBits() ||
10941095
!llvm::has_single_bit<uint32_t>(MaskTy.getSizeInBits()))
1095-
MaskTy = LLT::scalar(PtrTy.getSizeInBits());
1096+
MaskTy = LLT::integer(PtrTy.getSizeInBits());
10961097
else {
10971098
// Ensure that the type will fit the mask value.
10981099
for (unsigned I = 0, E = B.Cases.size(); I != E; ++I) {
10991100
if (!isUIntN(SwitchOpTy.getSizeInBits(), B.Cases[I].Mask)) {
11001101
// Switch table case range are encoded into series of masks.
11011102
// Just use pointer type, it's guaranteed to fit.
1102-
MaskTy = LLT::scalar(PtrTy.getSizeInBits());
1103+
MaskTy = LLT::integer(PtrTy.getSizeInBits());
11031104
break;
11041105
}
11051106
}
@@ -1122,8 +1123,9 @@ void IRTranslator::emitBitTestHeader(SwitchCG::BitTestBlock &B,
11221123
if (!B.FallthroughUnreachable) {
11231124
// Conditional branch to the default block.
11241125
auto RangeCst = MIB.buildConstant(SwitchOpTy, B.Range);
1125-
auto RangeCmp = MIB.buildICmp(CmpInst::Predicate::ICMP_UGT, LLT::scalar(1),
1126-
RangeSub, RangeCst);
1126+
LLT CmpTy = LLT::integer(1);
1127+
auto RangeCmp =
1128+
MIB.buildICmp(CmpInst::Predicate::ICMP_UGT, CmpTy, RangeSub, RangeCst);
11271129
MIB.buildBrCond(RangeCmp, *B.Default);
11281130
}
11291131

@@ -1141,6 +1143,7 @@ void IRTranslator::emitBitTestCase(SwitchCG::BitTestBlock &BB,
11411143
MIB.setMBB(*SwitchBB);
11421144

11431145
LLT SwitchTy = getLLTForMVT(BB.RegVT);
1146+
LLT I1 = LLT::integer(1);
11441147
Register Cmp;
11451148
unsigned PopCount = llvm::popcount(B.Mask);
11461149
if (PopCount == 1) {
@@ -1149,14 +1152,12 @@ void IRTranslator::emitBitTestCase(SwitchCG::BitTestBlock &BB,
11491152
auto MaskTrailingZeros =
11501153
MIB.buildConstant(SwitchTy, llvm::countr_zero(B.Mask));
11511154
Cmp =
1152-
MIB.buildICmp(ICmpInst::ICMP_EQ, LLT::scalar(1), Reg, MaskTrailingZeros)
1153-
.getReg(0);
1155+
MIB.buildICmp(ICmpInst::ICMP_EQ, I1, Reg, MaskTrailingZeros).getReg(0);
11541156
} else if (PopCount == BB.Range) {
11551157
// There is only one zero bit in the range, test for it directly.
11561158
auto MaskTrailingOnes =
11571159
MIB.buildConstant(SwitchTy, llvm::countr_one(B.Mask));
1158-
Cmp = MIB.buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), Reg, MaskTrailingOnes)
1159-
.getReg(0);
1160+
Cmp = MIB.buildICmp(CmpInst::ICMP_NE, I1, Reg, MaskTrailingOnes).getReg(0);
11601161
} else {
11611162
// Make desired shift.
11621163
auto CstOne = MIB.buildConstant(SwitchTy, 1);
@@ -1166,8 +1167,7 @@ void IRTranslator::emitBitTestCase(SwitchCG::BitTestBlock &BB,
11661167
auto CstMask = MIB.buildConstant(SwitchTy, B.Mask);
11671168
auto AndOp = MIB.buildAnd(SwitchTy, SwitchVal, CstMask);
11681169
auto CstZero = MIB.buildConstant(SwitchTy, 0);
1169-
Cmp = MIB.buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), AndOp, CstZero)
1170-
.getReg(0);
1170+
Cmp = MIB.buildICmp(CmpInst::ICMP_NE, I1, AndOp, CstZero).getReg(0);
11711171
}
11721172

11731173
// The branch probability from SwitchBB to B.TargetBB is B.ExtraProb.
@@ -1691,7 +1691,7 @@ bool IRTranslator::translateMemFunc(const CallInst &CI,
16911691
SrcRegs.push_back(SrcReg);
16921692
}
16931693

1694-
LLT SizeTy = LLT::scalar(MinPtrSize);
1694+
LLT SizeTy = LLT::integer(MinPtrSize);
16951695

16961696
// The size operand should be the minimum of the pointer sizes.
16971697
Register &SizeOpReg = SrcRegs[SrcRegs.size() - 1];
@@ -2812,7 +2812,7 @@ bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
28122812
DL->getABITypeAlign(Info.memVT.getTypeForEVT(F->getContext())));
28132813
LLT MemTy = Info.memVT.isSimple()
28142814
? getLLTForMVT(Info.memVT.getSimpleVT())
2815-
: LLT::scalar(Info.memVT.getStoreSizeInBits());
2815+
: LLT::integer(Info.memVT.getStoreSizeInBits());
28162816

28172817
// TODO: We currently just fallback to address space 0 if getTgtMemIntrinsic
28182818
// didn't yield anything useful.
@@ -3158,7 +3158,7 @@ bool IRTranslator::translateInsertElement(const User &U,
31583158
if (!Idx)
31593159
Idx = getOrCreateVReg(*U.getOperand(2));
31603160
if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
3161-
const LLT VecIdxTy = LLT::scalar(PreferredVecIdxWidth);
3161+
const LLT VecIdxTy = LLT::integer(PreferredVecIdxWidth);
31623162
Idx = MIRBuilder.buildZExtOrTrunc(VecIdxTy, Idx).getReg(0);
31633163
}
31643164
MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx);
@@ -3201,7 +3201,7 @@ bool IRTranslator::translateInsertVector(const User &U,
32013201
if (isa<ScalableVectorType>(U.getOperand(0)->getType())) {
32023202
// We are inserting an illegal fixed vector into a scalable
32033203
// vector, use a scalar element insert.
3204-
LLT VecIdxTy = LLT::scalar(PreferredVecIdxWidth);
3204+
LLT VecIdxTy = LLT::integer(PreferredVecIdxWidth);
32053205
Register Idx = getOrCreateVReg(*CI);
32063206
auto ScaledIndex = MIRBuilder.buildMul(
32073207
VecIdxTy, MIRBuilder.buildVScale(VecIdxTy, 1), Idx);
@@ -3239,7 +3239,7 @@ bool IRTranslator::translateExtractElement(const User &U,
32393239
if (!Idx)
32403240
Idx = getOrCreateVReg(*U.getOperand(1));
32413241
if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
3242-
const LLT VecIdxTy = LLT::scalar(PreferredVecIdxWidth);
3242+
const LLT VecIdxTy = LLT::integer(PreferredVecIdxWidth);
32433243
Idx = MIRBuilder.buildZExtOrTrunc(VecIdxTy, Idx).getReg(0);
32443244
}
32453245
MIRBuilder.buildExtractVectorElement(Res, Val, Idx);
@@ -3279,7 +3279,7 @@ bool IRTranslator::translateExtractVector(const User &U,
32793279
if (isa<ScalableVectorType>(U.getOperand(0)->getType())) {
32803280
// We are extracting an illegal fixed vector from a scalable
32813281
// vector, use a scalar element extract.
3282-
LLT VecIdxTy = LLT::scalar(PreferredVecIdxWidth);
3282+
LLT VecIdxTy = LLT::integer(PreferredVecIdxWidth);
32833283
Register Idx = getOrCreateVReg(*CI);
32843284
auto ScaledIndex = MIRBuilder.buildMul(
32853285
VecIdxTy, MIRBuilder.buildVScale(VecIdxTy, 1), Idx);
@@ -3868,8 +3868,8 @@ bool IRTranslator::emitSPDescriptorParent(StackProtectorDescriptor &SPD,
38683868
// If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD.
38693869
// Otherwise, emit a volatile load to retrieve the stack guard value.
38703870
if (TLI->useLoadStackGuardNode(*ParentBB->getBasicBlock()->getModule())) {
3871-
Guard =
3872-
MRI->createGenericVirtualRegister(LLT::scalar(PtrTy.getSizeInBits()));
3871+
LLT RegTy = LLT::integer(PtrTy.getSizeInBits());
3872+
Guard = MRI->createGenericVirtualRegister(RegTy);
38733873
getStackGuard(Guard, *CurBuilder);
38743874
} else {
38753875
// TODO: test using android subtarget when we support @llvm.thread.pointer.
@@ -3885,8 +3885,8 @@ bool IRTranslator::emitSPDescriptorParent(StackProtectorDescriptor &SPD,
38853885
}
38863886

38873887
// Perform the comparison.
3888-
auto Cmp =
3889-
CurBuilder->buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), Guard, GuardVal);
3888+
LLT I1 = LLT::integer(1);
3889+
auto Cmp = CurBuilder->buildICmp(CmpInst::ICMP_NE, I1, Guard, GuardVal);
38903890
// If the guard/stackslot do not equal, branch to failure MBB.
38913891
CurBuilder->buildBrCond(Cmp, *SPD.getFailureMBB());
38923892
// Otherwise branch to success MBB.

0 commit comments

Comments
 (0)