Skip to content

Commit 48ce7d0

Browse files
committed
[GR-18163] Fix capacity computation for huge Hash
PullRequest: truffleruby/3300
2 parents 0f33c69 + 85a9efa commit 48ce7d0

File tree

3 files changed

+19
-6
lines changed

3 files changed

+19
-6
lines changed

CHANGELOG.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ Bug fixes:
88
* Fix `rb_id2name` to ensure the native string will have the same lifetime as the id (#2630, @aardvark179).
99
* Fix `MatchData#[]` exception when passing a length argument larger than the number of match values (#2636, @nirvdrum).
1010
* Fix `MatchData#[]` exception when supplying a large negative index along with a length argument (@nirvdrum).
11+
* Fix capacity computation for huge `Hash` (#2635, @eregon).
1112

1213
Compatibility:
1314

src/main/java/org/truffleruby/core/hash/library/BucketsHashStore.java

Lines changed: 17 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -104,18 +104,30 @@ public BucketsHashStore(Entry[] entries, Entry firstInSequence, Entry lastInSequ
104104
1073741824 + 85
105105
};
106106

107+
private static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
108+
private static final int MAX_ENTRIES = (int) (MAX_ARRAY_SIZE * LOAD_FACTOR);
109+
107110
// endregion
108111
// region Utilities
109112

110113
@TruffleBoundary
111-
static int capacityGreaterThan(int size) {
114+
static int growthCapacityGreaterThan(int size) {
115+
int buckets = 0;
112116
for (int capacity : CAPACITIES) {
113117
if (capacity > size) {
114-
return capacity;
118+
buckets = capacity * OVERALLOCATE_FACTOR;
119+
break;
115120
}
116121
}
117122

118-
return CAPACITIES[CAPACITIES.length - 1];
123+
if (buckets > 0) {
124+
assert buckets * LOAD_FACTOR > size;
125+
return buckets;
126+
} else if (size < MAX_ENTRIES) {
127+
return MAX_ARRAY_SIZE;
128+
} else {
129+
throw new OutOfMemoryError("too big Hash: " + size + " entries");
130+
}
119131
}
120132

121133
static int getBucketIndex(int hashed, int bucketsCount) {
@@ -124,7 +136,7 @@ static int getBucketIndex(int hashed, int bucketsCount) {
124136

125137
@TruffleBoundary
126138
private void resize(RubyHash hash, int size) {
127-
final int bucketsCount = capacityGreaterThan(size) * OVERALLOCATE_FACTOR;
139+
final int bucketsCount = growthCapacityGreaterThan(size);
128140
final Entry[] newEntries = new Entry[bucketsCount];
129141

130142
final Entry firstInSequence = this.firstInSequence;
@@ -589,7 +601,7 @@ public static class GenericHashLiteralNode extends HashLiteralNode {
589601

590602
public GenericHashLiteralNode(RubyNode[] keyValues) {
591603
super(keyValues);
592-
bucketsCount = capacityGreaterThan(keyValues.length / 2) * OVERALLOCATE_FACTOR;
604+
bucketsCount = growthCapacityGreaterThan(keyValues.length / 2);
593605
}
594606

595607
@ExplodeLoop

src/main/java/org/truffleruby/core/hash/library/PackedHashStoreLibrary.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -124,7 +124,7 @@ private static boolean verifyIntegerHashes(Object[] store) {
124124

125125
@TruffleBoundary
126126
private static void promoteToBuckets(RubyHash hash, Object[] store, int size) {
127-
final Entry[] buckets = new Entry[BucketsHashStore.capacityGreaterThan(size)];
127+
final Entry[] buckets = new Entry[BucketsHashStore.growthCapacityGreaterThan(size)];
128128

129129
Entry firstInSequence = null;
130130
Entry previousInSequence = null;

0 commit comments

Comments
 (0)