|
37 | 37 | import org.elasticsearch.index.codec.vectors.diskbbq.QuantizedVectorValues; |
38 | 38 | import org.elasticsearch.logging.LogManager; |
39 | 39 | import org.elasticsearch.logging.Logger; |
40 | | -import org.elasticsearch.simdvec.ES91OSQVectorsScorer; |
41 | 40 | import org.elasticsearch.simdvec.ES92Int7VectorsScorer; |
42 | 41 | import org.elasticsearch.simdvec.ESNextOSQVectorsScorer; |
43 | 42 |
|
@@ -120,7 +119,11 @@ public CentroidOffsetAndLength buildAndWritePostingsLists( |
120 | 119 | // write the posting lists |
121 | 120 | final PackedLongValues.Builder offsets = PackedLongValues.monotonicBuilder(PackedInts.COMPACT); |
122 | 121 | final PackedLongValues.Builder lengths = PackedLongValues.monotonicBuilder(PackedInts.COMPACT); |
123 | | - DiskBBQBulkWriter bulkWriter = DiskBBQBulkWriter.fromBitSize(quantEncoding.bits(), ES91OSQVectorsScorer.BULK_SIZE, postingsOutput); |
| 122 | + DiskBBQBulkWriter bulkWriter = DiskBBQBulkWriter.fromBitSize( |
| 123 | + quantEncoding.bits(), |
| 124 | + ESNextOSQVectorsScorer.BULK_SIZE, |
| 125 | + postingsOutput |
| 126 | + ); |
124 | 127 | OnHeapQuantizedVectors onHeapQuantizedVectors = new OnHeapQuantizedVectors( |
125 | 128 | floatVectorValues, |
126 | 129 | quantEncoding, |
@@ -156,11 +159,16 @@ public CentroidOffsetAndLength buildAndWritePostingsLists( |
156 | 159 | docDeltas[j] = j == 0 ? docIds[clusterOrds[j]] : docIds[clusterOrds[j]] - docIds[clusterOrds[j - 1]]; |
157 | 160 | } |
158 | 161 | onHeapQuantizedVectors.reset(centroid, size, ord -> cluster[clusterOrds[ord]]); |
159 | | - byte encoding = idsWriter.calculateBlockEncoding(i -> docDeltas[i], size, ES91OSQVectorsScorer.BULK_SIZE); |
| 162 | + byte encoding = idsWriter.calculateBlockEncoding(i -> docDeltas[i], size, ESNextOSQVectorsScorer.BULK_SIZE); |
160 | 163 | postingsOutput.writeByte(encoding); |
161 | 164 | bulkWriter.writeVectors(onHeapQuantizedVectors, i -> { |
162 | 165 | // for vector i we write `bulk` size docs or the remaining docs |
163 | | - idsWriter.writeDocIds(d -> docDeltas[i + d], Math.min(ES91OSQVectorsScorer.BULK_SIZE, size - i), encoding, postingsOutput); |
| 166 | + idsWriter.writeDocIds( |
| 167 | + d -> docDeltas[i + d], |
| 168 | + Math.min(ESNextOSQVectorsScorer.BULK_SIZE, size - i), |
| 169 | + encoding, |
| 170 | + postingsOutput |
| 171 | + ); |
164 | 172 | }); |
165 | 173 | lengths.add(postingsOutput.getFilePointer() - fileOffset - offset); |
166 | 174 | } |
@@ -275,7 +283,7 @@ public CentroidOffsetAndLength buildAndWritePostingsLists( |
275 | 283 | ); |
276 | 284 | DiskBBQBulkWriter bulkWriter = DiskBBQBulkWriter.fromBitSize( |
277 | 285 | quantEncoding.bits(), |
278 | | - ES91OSQVectorsScorer.BULK_SIZE, |
| 286 | + ESNextOSQVectorsScorer.BULK_SIZE, |
279 | 287 | postingsOutput |
280 | 288 | ); |
281 | 289 | final ByteBuffer buffer = ByteBuffer.allocate(fieldInfo.getVectorDimension() * Float.BYTES).order(ByteOrder.LITTLE_ENDIAN); |
@@ -308,7 +316,7 @@ public CentroidOffsetAndLength buildAndWritePostingsLists( |
308 | 316 | for (int j = 0; j < size; j++) { |
309 | 317 | docDeltas[j] = j == 0 ? docIds[clusterOrds[j]] : docIds[clusterOrds[j]] - docIds[clusterOrds[j - 1]]; |
310 | 318 | } |
311 | | - byte encoding = idsWriter.calculateBlockEncoding(i -> docDeltas[i], size, ES91OSQVectorsScorer.BULK_SIZE); |
| 319 | + byte encoding = idsWriter.calculateBlockEncoding(i -> docDeltas[i], size, ESNextOSQVectorsScorer.BULK_SIZE); |
312 | 320 | postingsOutput.writeByte(encoding); |
313 | 321 | offHeapQuantizedVectors.reset(size, ord -> isOverspill[clusterOrds[ord]], ord -> cluster[clusterOrds[ord]]); |
314 | 322 | // write vectors |
|
0 commit comments