Skip to content

Commit 3c9aa1f

Browse files
committed
fix up a bit
1 parent e122a04 commit 3c9aa1f

File tree

1 file changed

+1
-14
lines changed

1 file changed

+1
-14
lines changed

server/src/main/java/org/elasticsearch/index/codec/vectors/DefaultIVFVectorsWriter.java

Lines changed: 1 addition & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -103,10 +103,7 @@ CentroidOffsetAndLength buildAndWritePostingsLists(
103103
// write the posting lists
104104
final PackedLongValues.Builder offsets = PackedLongValues.monotonicBuilder(PackedInts.COMPACT);
105105
final PackedLongValues.Builder lengths = PackedLongValues.monotonicBuilder(PackedInts.COMPACT);
106-
DiskBBQBulkWriter.OneBitDiskBBQBulkWriter bulkWriter = new DiskBBQBulkWriter.OneBitDiskBBQBulkWriter(
107-
ES91OSQVectorsScorer.BULK_SIZE,
108-
postingsOutput
109-
);
106+
DiskBBQBulkWriter bulkWriter = new DiskBBQBulkWriter.OneBitDiskBBQBulkWriter(ES91OSQVectorsScorer.BULK_SIZE, postingsOutput);
110107
OnHeapQuantizedVectors onHeapQuantizedVectors = new OnHeapQuantizedVectors(
111108
floatVectorValues,
112109
fieldInfo.getVectorDimension(),
@@ -143,11 +140,6 @@ CentroidOffsetAndLength buildAndWritePostingsLists(
143140
onHeapQuantizedVectors.reset(centroid, size, ord -> cluster[clusterOrds[ord]]);
144141
byte encoding = idsWriter.calculateBlockEncoding(i -> docDeltas[i], size, ES91OSQVectorsScorer.BULK_SIZE);
145142
postingsOutput.writeByte(encoding);
146-
// TODO we might want to consider putting the docIds in a separate file
147-
// to aid with only having to fetch vectors from slower storage when they are required
148-
// keeping them in the same file indicates we pull the entire file into cache
149-
// idsWriter.writeDocIds(i -> docDeltas[i], size, postingsOutput);
150-
// write vectors
151143
bulkWriter.writeVectors(onHeapQuantizedVectors, i -> {
152144
// for vector i we write `bulk` size docs or the remaining docs
153145
idsWriter.writeDocIds(d -> docDeltas[i + d], Math.min(ES91OSQVectorsScorer.BULK_SIZE, size - i), encoding, postingsOutput);
@@ -298,10 +290,6 @@ CentroidOffsetAndLength buildAndWritePostingsLists(
298290
byte encoding = idsWriter.calculateBlockEncoding(i -> docDeltas[i], size, ES91OSQVectorsScorer.BULK_SIZE);
299291
postingsOutput.writeByte(encoding);
300292
offHeapQuantizedVectors.reset(size, ord -> isOverspill[clusterOrds[ord]], ord -> cluster[clusterOrds[ord]]);
301-
// TODO we might want to consider putting the docIds in a separate file
302-
// to aid with only having to fetch vectors from slower storage when they are required
303-
// keeping them in the same file indicates we pull the entire file into cache
304-
// idsWriter.writeDocIds(i -> docDeltas[i], size, postingsOutput);
305293
// write vectors
306294
bulkWriter.writeVectors(offHeapQuantizedVectors, i -> {
307295
// for vector i we write `bulk` size docs or the remaining docs
@@ -313,7 +301,6 @@ CentroidOffsetAndLength buildAndWritePostingsLists(
313301
);
314302
});
315303
lengths.add(postingsOutput.getFilePointer() - fileOffset - offset);
316-
// lengths.add(1);
317304
}
318305

319306
if (logger.isDebugEnabled()) {

0 commit comments

Comments
 (0)