Skip to content

Commit ed33431

Browse files
Cleanup after code review
1 parent 7a4c91f commit ed33431

File tree

4 files changed

+11
-9
lines changed

4 files changed

+11
-9
lines changed

src/java/org/apache/cassandra/index/sai/disk/v5/V5VectorPostingsWriter.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -480,8 +480,8 @@ public static <T> RemappedPostings createGenericIdentityMapping(Map<VectorFloat<
480480
maxRow = max(maxRow, rowId);
481481
}
482482

483-
assert maxOldOrdinal >= 0;
484-
assert maxRow >= 0;
483+
if (maxOldOrdinal < 0 || maxRow < 0)
484+
throw new IllegalStateException("maxOldOrdinal or maxRow is negative: " + maxOldOrdinal + ' ' + maxRow);
485485

486486
var presentOrdinals = new FixedBitSet(maxOldOrdinal + 1);
487487
for (var entry : postingsMap.entrySet())

src/java/org/apache/cassandra/index/sai/disk/vector/CompactionGraph.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -495,7 +495,7 @@ static <T> boolean safePut(ChronicleMap<T, CompactionVectorPostings> map, T key,
495495
}
496496
catch (IllegalArgumentException e)
497497
{
498-
logger.error("Error serializing postings to disk, will reattempt with compression", e);
498+
logger.debug("Error serializing postings to disk, will reattempt with compression", e);
499499
// This is an extreme edge case where there are many duplicate vectors. This naive approach
500500
// means that we might have a smaller vector graph than desired, but at least we will not
501501
// fail to build the index.

src/java/org/apache/cassandra/index/sai/disk/vector/VectorPostings.java

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -222,8 +222,9 @@ public void write(Bytes out, CompactionVectorPostings postings) {
222222
for (int posting : postings.getPostings())
223223
writer.writeVInt(posting);
224224
}
225-
catch (Exception e)
225+
catch (IOException e)
226226
{
227+
// Not be reachable because the Bytes out object does not throw an exception on write.
227228
throw new RuntimeException(e);
228229
}
229230
}
@@ -251,8 +252,9 @@ public CompactionVectorPostings read(Bytes in, CompactionVectorPostings using) {
251252

252253
return new CompactionVectorPostings(ordinal, postingsList);
253254
}
254-
catch (Exception e)
255+
catch (IOException e)
255256
{
257+
// Not be reachable because the Bytes in object does not throw an exception on read.
256258
throw new RuntimeException(e);
257259
}
258260
}
@@ -283,13 +285,13 @@ public BytesDataOutput(Bytes<?> bytes)
283285
}
284286

285287
@Override
286-
public void writeByte(byte b) throws IOException
288+
public void writeByte(byte b)
287289
{
288290
bytes.writeByte(b);
289291
}
290292

291293
@Override
292-
public void writeBytes(byte[] b, int off, int len) throws IOException
294+
public void writeBytes(byte[] b, int off, int len)
293295
{
294296
bytes.write(b, off, len);
295297
}
@@ -305,7 +307,7 @@ public BytesDataInput(Bytes<?> bytes)
305307
}
306308

307309
@Override
308-
public byte readByte() throws IOException
310+
public byte readByte()
309311
{
310312
return bytes.readByte();
311313
}

test/unit/org/apache/cassandra/index/sai/disk/vector/CompactionGraphTest.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ public void test50MEntries() throws Exception
5555
testEntries(50000000, 5000, 100);
5656
}
5757

58-
// Callers of this method are expected to provide enought iterations and postings added per iteration
58+
// Callers of this method are expected to provide enough iterations and postings added per iteration
5959
// to hit the entry size limit without exceeding it too much. Note that we add postings one at a time in the
6060
// compaction graph, so we only ever increment by 4 bytes each time we attempt to re-serialize the entry.
6161
private void testEntries(int entries, int iterations, int postingsAddedPerIteration) throws Exception

0 commit comments

Comments
 (0)