Skip to content

Commit 881b001

Browse files
[ADT] Make internal methods of DenseMap/SmallDenseMap private (NFC) (llvm#165079)
This patch moves the init, copyFrom, and grow methods in DenseMap and SmallDenseMap from public to private to hide implementation details. The only problem is that PhysicalRegisterUsageInfo calls DenseMap::grow instead of DenseMap::reserve, which I don't think is intended. This patch updates the call to reserve.
1 parent b97835d commit 881b001

File tree

2 files changed

+91
-91
lines changed

2 files changed

+91
-91
lines changed

llvm/include/llvm/ADT/DenseMap.h

Lines changed: 90 additions & 90 deletions
Original file line numberDiff line numberDiff line change
@@ -767,37 +767,6 @@ class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,
767767
return *this;
768768
}
769769

770-
void copyFrom(const DenseMap &other) {
771-
this->destroyAll();
772-
deallocateBuckets();
773-
if (allocateBuckets(other.NumBuckets)) {
774-
this->BaseT::copyFrom(other);
775-
} else {
776-
NumEntries = 0;
777-
NumTombstones = 0;
778-
}
779-
}
780-
781-
void grow(unsigned AtLeast) {
782-
unsigned OldNumBuckets = NumBuckets;
783-
BucketT *OldBuckets = Buckets;
784-
785-
allocateBuckets(std::max<unsigned>(
786-
64, static_cast<unsigned>(NextPowerOf2(AtLeast - 1))));
787-
assert(Buckets);
788-
if (!OldBuckets) {
789-
this->BaseT::initEmpty();
790-
return;
791-
}
792-
793-
this->moveFromOldBuckets(
794-
llvm::make_range(OldBuckets, OldBuckets + OldNumBuckets));
795-
796-
// Free the old table.
797-
deallocate_buffer(OldBuckets, sizeof(BucketT) * OldNumBuckets,
798-
alignof(BucketT));
799-
}
800-
801770
void shrink_and_clear() {
802771
unsigned OldNumBuckets = NumBuckets;
803772
unsigned OldNumEntries = NumEntries;
@@ -855,6 +824,37 @@ class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,
855824
NumTombstones = 0;
856825
}
857826
}
827+
828+
void copyFrom(const DenseMap &other) {
829+
this->destroyAll();
830+
deallocateBuckets();
831+
if (allocateBuckets(other.NumBuckets)) {
832+
this->BaseT::copyFrom(other);
833+
} else {
834+
NumEntries = 0;
835+
NumTombstones = 0;
836+
}
837+
}
838+
839+
void grow(unsigned AtLeast) {
840+
unsigned OldNumBuckets = NumBuckets;
841+
BucketT *OldBuckets = Buckets;
842+
843+
allocateBuckets(std::max<unsigned>(
844+
64, static_cast<unsigned>(NextPowerOf2(AtLeast - 1))));
845+
assert(Buckets);
846+
if (!OldBuckets) {
847+
this->BaseT::initEmpty();
848+
return;
849+
}
850+
851+
this->moveFromOldBuckets(
852+
llvm::make_range(OldBuckets, OldBuckets + OldNumBuckets));
853+
854+
// Free the old table.
855+
deallocate_buffer(OldBuckets, sizeof(BucketT) * OldNumBuckets,
856+
alignof(BucketT));
857+
}
858858
};
859859

860860
template <typename KeyT, typename ValueT, unsigned InlineBuckets = 4,
@@ -1007,65 +1007,6 @@ class SmallDenseMap
10071007
return *this;
10081008
}
10091009

1010-
void copyFrom(const SmallDenseMap &other) {
1011-
this->destroyAll();
1012-
deallocateBuckets();
1013-
allocateBuckets(other.getNumBuckets());
1014-
this->BaseT::copyFrom(other);
1015-
}
1016-
1017-
void init(unsigned InitNumEntries) {
1018-
auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries);
1019-
allocateBuckets(InitBuckets);
1020-
this->BaseT::initEmpty();
1021-
}
1022-
1023-
void grow(unsigned AtLeast) {
1024-
if (AtLeast > InlineBuckets)
1025-
AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast - 1));
1026-
1027-
if (Small) {
1028-
// First move the inline buckets into a temporary storage.
1029-
AlignedCharArrayUnion<BucketT[InlineBuckets]> TmpStorage;
1030-
BucketT *TmpBegin = reinterpret_cast<BucketT *>(&TmpStorage);
1031-
BucketT *TmpEnd = TmpBegin;
1032-
1033-
// Loop over the buckets, moving non-empty, non-tombstones into the
1034-
// temporary storage. Have the loop move the TmpEnd forward as it goes.
1035-
const KeyT EmptyKey = this->getEmptyKey();
1036-
const KeyT TombstoneKey = this->getTombstoneKey();
1037-
for (BucketT &B : inlineBuckets()) {
1038-
if (!KeyInfoT::isEqual(B.getFirst(), EmptyKey) &&
1039-
!KeyInfoT::isEqual(B.getFirst(), TombstoneKey)) {
1040-
assert(size_t(TmpEnd - TmpBegin) < InlineBuckets &&
1041-
"Too many inline buckets!");
1042-
::new (&TmpEnd->getFirst()) KeyT(std::move(B.getFirst()));
1043-
::new (&TmpEnd->getSecond()) ValueT(std::move(B.getSecond()));
1044-
++TmpEnd;
1045-
B.getSecond().~ValueT();
1046-
}
1047-
B.getFirst().~KeyT();
1048-
}
1049-
1050-
// AtLeast == InlineBuckets can happen if there are many tombstones,
1051-
// and grow() is used to remove them. Usually we always switch to the
1052-
// large rep here.
1053-
allocateBuckets(AtLeast);
1054-
this->moveFromOldBuckets(llvm::make_range(TmpBegin, TmpEnd));
1055-
return;
1056-
}
1057-
1058-
LargeRep OldRep = std::move(*getLargeRep());
1059-
getLargeRep()->~LargeRep();
1060-
allocateBuckets(AtLeast);
1061-
1062-
this->moveFromOldBuckets(OldRep.buckets());
1063-
1064-
// Free the old table.
1065-
deallocate_buffer(OldRep.Buckets, sizeof(BucketT) * OldRep.NumBuckets,
1066-
alignof(BucketT));
1067-
}
1068-
10691010
void shrink_and_clear() {
10701011
unsigned OldSize = this->size();
10711012
this->destroyAll();
@@ -1162,6 +1103,65 @@ class SmallDenseMap
11621103
new (getLargeRep()) LargeRep{NewBuckets, Num};
11631104
}
11641105
}
1106+
1107+
void init(unsigned InitNumEntries) {
1108+
auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries);
1109+
allocateBuckets(InitBuckets);
1110+
this->BaseT::initEmpty();
1111+
}
1112+
1113+
void copyFrom(const SmallDenseMap &other) {
1114+
this->destroyAll();
1115+
deallocateBuckets();
1116+
allocateBuckets(other.getNumBuckets());
1117+
this->BaseT::copyFrom(other);
1118+
}
1119+
1120+
void grow(unsigned AtLeast) {
1121+
if (AtLeast > InlineBuckets)
1122+
AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast - 1));
1123+
1124+
if (Small) {
1125+
// First move the inline buckets into a temporary storage.
1126+
AlignedCharArrayUnion<BucketT[InlineBuckets]> TmpStorage;
1127+
BucketT *TmpBegin = reinterpret_cast<BucketT *>(&TmpStorage);
1128+
BucketT *TmpEnd = TmpBegin;
1129+
1130+
// Loop over the buckets, moving non-empty, non-tombstones into the
1131+
// temporary storage. Have the loop move the TmpEnd forward as it goes.
1132+
const KeyT EmptyKey = this->getEmptyKey();
1133+
const KeyT TombstoneKey = this->getTombstoneKey();
1134+
for (BucketT &B : inlineBuckets()) {
1135+
if (!KeyInfoT::isEqual(B.getFirst(), EmptyKey) &&
1136+
!KeyInfoT::isEqual(B.getFirst(), TombstoneKey)) {
1137+
assert(size_t(TmpEnd - TmpBegin) < InlineBuckets &&
1138+
"Too many inline buckets!");
1139+
::new (&TmpEnd->getFirst()) KeyT(std::move(B.getFirst()));
1140+
::new (&TmpEnd->getSecond()) ValueT(std::move(B.getSecond()));
1141+
++TmpEnd;
1142+
B.getSecond().~ValueT();
1143+
}
1144+
B.getFirst().~KeyT();
1145+
}
1146+
1147+
// AtLeast == InlineBuckets can happen if there are many tombstones,
1148+
// and grow() is used to remove them. Usually we always switch to the
1149+
// large rep here.
1150+
allocateBuckets(AtLeast);
1151+
this->moveFromOldBuckets(llvm::make_range(TmpBegin, TmpEnd));
1152+
return;
1153+
}
1154+
1155+
LargeRep OldRep = std::move(*getLargeRep());
1156+
getLargeRep()->~LargeRep();
1157+
allocateBuckets(AtLeast);
1158+
1159+
this->moveFromOldBuckets(OldRep.buckets());
1160+
1161+
// Free the old table.
1162+
deallocate_buffer(OldRep.Buckets, sizeof(BucketT) * OldRep.NumBuckets,
1163+
alignof(BucketT));
1164+
}
11651165
};
11661166

11671167
template <typename KeyT, typename ValueT, typename KeyInfoT, typename Bucket,

llvm/lib/CodeGen/RegisterUsageInfo.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ void PhysicalRegisterUsageInfo::setTargetMachine(const TargetMachine &TM) {
4444
}
4545

4646
bool PhysicalRegisterUsageInfo::doInitialization(Module &M) {
47-
RegMasks.grow(M.size());
47+
RegMasks.reserve(M.size());
4848
return false;
4949
}
5050

0 commit comments

Comments
 (0)