|
| 1 | +/* |
| 2 | + * @notice |
| 3 | + * Licensed to the Apache Software Foundation (ASF) under one or more |
| 4 | + * contributor license agreements. See the NOTICE file distributed with |
| 5 | + * this work for additional information regarding copyright ownership. |
| 6 | + * The ASF licenses this file to You under the Apache License, Version 2.0 |
| 7 | + * (the "License"); you may not use this file except in compliance with |
| 8 | + * the License. You may obtain a copy of the License at |
| 9 | + * |
| 10 | + * http://www.apache.org/licenses/LICENSE-2.0 |
| 11 | + * |
| 12 | + * Unless required by applicable law or agreed to in writing, software |
| 13 | + * distributed under the License is distributed on an "AS IS" BASIS, |
| 14 | + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 15 | + * See the License for the specific language governing permissions and |
| 16 | + * limitations under the License. |
| 17 | + * Modifications copyright (C) 2025 Elasticsearch B.V. |
| 18 | + */ |
| 19 | +package org.elasticsearch.index.codec.vectors; |
| 20 | + |
| 21 | +import org.apache.lucene.index.PointValues.IntersectVisitor; |
| 22 | +import org.apache.lucene.search.DocIdSetIterator; |
| 23 | +import org.apache.lucene.store.DataOutput; |
| 24 | +import org.apache.lucene.store.IndexInput; |
| 25 | +import org.apache.lucene.util.ArrayUtil; |
| 26 | +import org.apache.lucene.util.DocBaseBitSetIterator; |
| 27 | +import org.apache.lucene.util.FixedBitSet; |
| 28 | +import org.apache.lucene.util.IntsRef; |
| 29 | +import org.apache.lucene.util.LongsRef; |
| 30 | +import org.apache.lucene.util.hnsw.IntToIntFunction; |
| 31 | + |
| 32 | +import java.io.IOException; |
| 33 | +import java.util.Arrays; |
| 34 | + |
| 35 | +/** |
| 36 | + * This class is used to write and read the doc ids in a compressed format. The format is optimized |
| 37 | + * for the number of bits per value (bpv) and the number of values. |
| 38 | + * |
| 39 | + * <p>It is copied from the BKD implementation. |
| 40 | + */ |
| 41 | +final class DocIdsWriter { |
| 42 | + public static final int DEFAULT_MAX_POINTS_IN_LEAF_NODE = 512; |
| 43 | + |
| 44 | + private static final byte CONTINUOUS_IDS = (byte) -2; |
| 45 | + private static final byte BITSET_IDS = (byte) -1; |
| 46 | + private static final byte DELTA_BPV_16 = (byte) 16; |
| 47 | + private static final byte BPV_21 = (byte) 21; |
| 48 | + private static final byte BPV_24 = (byte) 24; |
| 49 | + private static final byte BPV_32 = (byte) 32; |
| 50 | + |
| 51 | + private int[] scratch = new int[0]; |
| 52 | + private final LongsRef scratchLongs = new LongsRef(); |
| 53 | + |
| 54 | + /** |
| 55 | + * IntsRef to be used to iterate over the scratch buffer. A single instance is reused to avoid |
| 56 | + * re-allocating the object. The ints and length fields need to be reset each use. |
| 57 | + * |
| 58 | + * <p>The main reason for existing is to be able to call the {@link |
| 59 | + * IntersectVisitor#visit(IntsRef)} method rather than the {@link IntersectVisitor#visit(int)} |
| 60 | + * method. This seems to make a difference in performance, probably due to fewer virtual calls |
| 61 | + * then happening (once per read call rather than once per doc). |
| 62 | + */ |
| 63 | + private final IntsRef scratchIntsRef = new IntsRef(); |
| 64 | + |
| 65 | + { |
| 66 | + // This is here to not rely on the default constructor of IntsRef to set offset to 0 |
| 67 | + scratchIntsRef.offset = 0; |
| 68 | + } |
| 69 | + |
| 70 | + DocIdsWriter() {} |
| 71 | + |
| 72 | + void writeDocIds(IntToIntFunction docIds, int count, DataOutput out) throws IOException { |
| 73 | + // docs can be sorted either when all docs in a block have the same value |
| 74 | + // or when a segment is sorted |
| 75 | + if (count == 0) { |
| 76 | + out.writeByte(CONTINUOUS_IDS); |
| 77 | + return; |
| 78 | + } |
| 79 | + if (count > scratch.length) { |
| 80 | + scratch = new int[count]; |
| 81 | + } |
| 82 | + boolean strictlySorted = true; |
| 83 | + int min = docIds.apply(0); |
| 84 | + int max = min; |
| 85 | + for (int i = 1; i < count; ++i) { |
| 86 | + int last = docIds.apply(i - 1); |
| 87 | + int current = docIds.apply(i); |
| 88 | + if (last >= current) { |
| 89 | + strictlySorted = false; |
| 90 | + } |
| 91 | + min = Math.min(min, current); |
| 92 | + max = Math.max(max, current); |
| 93 | + } |
| 94 | + |
| 95 | + int min2max = max - min + 1; |
| 96 | + if (strictlySorted) { |
| 97 | + if (min2max == count) { |
| 98 | + // continuous ids, typically happens when segment is sorted |
| 99 | + out.writeByte(CONTINUOUS_IDS); |
| 100 | + out.writeVInt(docIds.apply(0)); |
| 101 | + return; |
| 102 | + } else if (min2max <= (count << 4)) { |
| 103 | + assert min2max > count : "min2max: " + min2max + ", count: " + count; |
| 104 | + // Only trigger bitset optimization when max - min + 1 <= 16 * count in order to avoid |
| 105 | + // expanding too much storage. |
| 106 | + // A field with lower cardinality will have higher probability to trigger this optimization. |
| 107 | + out.writeByte(BITSET_IDS); |
| 108 | + writeIdsAsBitSet(docIds, count, out); |
| 109 | + return; |
| 110 | + } |
| 111 | + } |
| 112 | + |
| 113 | + if (min2max <= 0xFFFF) { |
| 114 | + out.writeByte(DELTA_BPV_16); |
| 115 | + for (int i = 0; i < count; i++) { |
| 116 | + scratch[i] = docIds.apply(i) - min; |
| 117 | + } |
| 118 | + out.writeVInt(min); |
| 119 | + final int halfLen = count >> 1; |
| 120 | + for (int i = 0; i < halfLen; ++i) { |
| 121 | + scratch[i] = scratch[halfLen + i] | (scratch[i] << 16); |
| 122 | + } |
| 123 | + for (int i = 0; i < halfLen; i++) { |
| 124 | + out.writeInt(scratch[i]); |
| 125 | + } |
| 126 | + if ((count & 1) == 1) { |
| 127 | + out.writeShort((short) scratch[count - 1]); |
| 128 | + } |
| 129 | + } else { |
| 130 | + if (max <= 0x1FFFFF) { |
| 131 | + out.writeByte(BPV_21); |
| 132 | + final int oneThird = floorToMultipleOf16(count / 3); |
| 133 | + final int numInts = oneThird * 2; |
| 134 | + for (int i = 0; i < numInts; i++) { |
| 135 | + scratch[i] = docIds.apply(i) << 11; |
| 136 | + } |
| 137 | + for (int i = 0; i < oneThird; i++) { |
| 138 | + final int longIdx = i + numInts; |
| 139 | + scratch[i] |= docIds.apply(longIdx) & 0x7FF; |
| 140 | + scratch[i + oneThird] |= (docIds.apply(longIdx) >>> 11) & 0x7FF; |
| 141 | + } |
| 142 | + for (int i = 0; i < numInts; i++) { |
| 143 | + out.writeInt(scratch[i]); |
| 144 | + } |
| 145 | + int i = oneThird * 3; |
| 146 | + for (; i < count - 2; i += 3) { |
| 147 | + out.writeLong(((long) docIds.apply(i)) | (((long) docIds.apply(i + 1)) << 21) | (((long) docIds.apply(i + 2)) << 42)); |
| 148 | + } |
| 149 | + for (; i < count; ++i) { |
| 150 | + out.writeShort((short) docIds.apply(i)); |
| 151 | + out.writeByte((byte) (docIds.apply(i) >>> 16)); |
| 152 | + } |
| 153 | + } else if (max <= 0xFFFFFF) { |
| 154 | + out.writeByte(BPV_24); |
| 155 | + |
| 156 | + // encode the docs in the format that can be vectorized decoded. |
| 157 | + final int quarter = count >> 2; |
| 158 | + final int numInts = quarter * 3; |
| 159 | + for (int i = 0; i < numInts; i++) { |
| 160 | + scratch[i] = docIds.apply(i) << 8; |
| 161 | + } |
| 162 | + for (int i = 0; i < quarter; i++) { |
| 163 | + final int longIdx = i + numInts; |
| 164 | + scratch[i] |= docIds.apply(longIdx) & 0xFF; |
| 165 | + scratch[i + quarter] |= (docIds.apply(longIdx) >>> 8) & 0xFF; |
| 166 | + scratch[i + quarter * 2] |= docIds.apply(longIdx) >>> 16; |
| 167 | + } |
| 168 | + for (int i = 0; i < numInts; i++) { |
| 169 | + out.writeInt(scratch[i]); |
| 170 | + } |
| 171 | + for (int i = quarter << 2; i < count; ++i) { |
| 172 | + out.writeShort((short) docIds.apply(i)); |
| 173 | + out.writeByte((byte) (docIds.apply(i) >>> 16)); |
| 174 | + } |
| 175 | + } else { |
| 176 | + out.writeByte(BPV_32); |
| 177 | + for (int i = 0; i < count; i++) { |
| 178 | + out.writeInt(docIds.apply(i)); |
| 179 | + } |
| 180 | + } |
| 181 | + } |
| 182 | + } |
| 183 | + |
| 184 | + private static void writeIdsAsBitSet(IntToIntFunction docIds, int count, DataOutput out) throws IOException { |
| 185 | + int min = docIds.apply(0); |
| 186 | + int max = docIds.apply(count - 1); |
| 187 | + |
| 188 | + final int offsetWords = min >> 6; |
| 189 | + final int offsetBits = offsetWords << 6; |
| 190 | + final int totalWordCount = FixedBitSet.bits2words(max - offsetBits + 1); |
| 191 | + long currentWord = 0; |
| 192 | + int currentWordIndex = 0; |
| 193 | + |
| 194 | + out.writeVInt(offsetWords); |
| 195 | + out.writeVInt(totalWordCount); |
| 196 | + // build bit set streaming |
| 197 | + for (int i = 0; i < count; i++) { |
| 198 | + final int index = docIds.apply(i) - offsetBits; |
| 199 | + final int nextWordIndex = index >> 6; |
| 200 | + assert currentWordIndex <= nextWordIndex; |
| 201 | + if (currentWordIndex < nextWordIndex) { |
| 202 | + out.writeLong(currentWord); |
| 203 | + currentWord = 0L; |
| 204 | + currentWordIndex++; |
| 205 | + while (currentWordIndex < nextWordIndex) { |
| 206 | + currentWordIndex++; |
| 207 | + out.writeLong(0L); |
| 208 | + } |
| 209 | + } |
| 210 | + currentWord |= 1L << index; |
| 211 | + } |
| 212 | + out.writeLong(currentWord); |
| 213 | + assert currentWordIndex + 1 == totalWordCount; |
| 214 | + } |
| 215 | + |
| 216 | + /** Read {@code count} integers into {@code docIDs}. */ |
| 217 | + void readInts(IndexInput in, int count, int[] docIDs) throws IOException { |
| 218 | + if (count > scratch.length) { |
| 219 | + scratch = new int[count]; |
| 220 | + } |
| 221 | + final int bpv = in.readByte(); |
| 222 | + switch (bpv) { |
| 223 | + case CONTINUOUS_IDS: |
| 224 | + readContinuousIds(in, count, docIDs); |
| 225 | + break; |
| 226 | + case BITSET_IDS: |
| 227 | + readBitSet(in, count, docIDs); |
| 228 | + break; |
| 229 | + case DELTA_BPV_16: |
| 230 | + readDelta16(in, count, docIDs); |
| 231 | + break; |
| 232 | + case BPV_21: |
| 233 | + readInts21(in, count, docIDs); |
| 234 | + break; |
| 235 | + case BPV_24: |
| 236 | + readInts24(in, count, docIDs); |
| 237 | + break; |
| 238 | + case BPV_32: |
| 239 | + readInts32(in, count, docIDs); |
| 240 | + break; |
| 241 | + default: |
| 242 | + throw new IOException("Unsupported number of bits per value: " + bpv); |
| 243 | + } |
| 244 | + } |
| 245 | + |
| 246 | + private DocIdSetIterator readBitSetIterator(IndexInput in, int count) throws IOException { |
| 247 | + int offsetWords = in.readVInt(); |
| 248 | + int longLen = in.readVInt(); |
| 249 | + scratchLongs.longs = ArrayUtil.growNoCopy(scratchLongs.longs, longLen); |
| 250 | + in.readLongs(scratchLongs.longs, 0, longLen); |
| 251 | + // make ghost bits clear for FixedBitSet. |
| 252 | + if (longLen < scratchLongs.length) { |
| 253 | + Arrays.fill(scratchLongs.longs, longLen, scratchLongs.longs.length, 0); |
| 254 | + } |
| 255 | + scratchLongs.length = longLen; |
| 256 | + FixedBitSet bitSet = new FixedBitSet(scratchLongs.longs, longLen << 6); |
| 257 | + return new DocBaseBitSetIterator(bitSet, count, offsetWords << 6); |
| 258 | + } |
| 259 | + |
| 260 | + private static void readContinuousIds(IndexInput in, int count, int[] docIDs) throws IOException { |
| 261 | + int start = in.readVInt(); |
| 262 | + for (int i = 0; i < count; i++) { |
| 263 | + docIDs[i] = start + i; |
| 264 | + } |
| 265 | + } |
| 266 | + |
| 267 | + private void readBitSet(IndexInput in, int count, int[] docIDs) throws IOException { |
| 268 | + DocIdSetIterator iterator = readBitSetIterator(in, count); |
| 269 | + int docId, pos = 0; |
| 270 | + while ((docId = iterator.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) { |
| 271 | + docIDs[pos++] = docId; |
| 272 | + } |
| 273 | + assert pos == count : "pos: " + pos + ", count: " + count; |
| 274 | + } |
| 275 | + |
| 276 | + private static void readDelta16(IndexInput in, int count, int[] docIds) throws IOException { |
| 277 | + final int min = in.readVInt(); |
| 278 | + final int half = count >> 1; |
| 279 | + in.readInts(docIds, 0, half); |
| 280 | + if (count == DEFAULT_MAX_POINTS_IN_LEAF_NODE) { |
| 281 | + // Same format, but enabling the JVM to specialize the decoding logic for the default number |
| 282 | + // of points per node proved to help on benchmarks |
| 283 | + decode16(docIds, DEFAULT_MAX_POINTS_IN_LEAF_NODE / 2, min); |
| 284 | + } else { |
| 285 | + decode16(docIds, half, min); |
| 286 | + } |
| 287 | + // read the remaining doc if count is odd. |
| 288 | + for (int i = half << 1; i < count; i++) { |
| 289 | + docIds[i] = Short.toUnsignedInt(in.readShort()) + min; |
| 290 | + } |
| 291 | + } |
| 292 | + |
| 293 | + private static void decode16(int[] docIDs, int half, int min) { |
| 294 | + for (int i = 0; i < half; ++i) { |
| 295 | + final int l = docIDs[i]; |
| 296 | + docIDs[i] = (l >>> 16) + min; |
| 297 | + docIDs[i + half] = (l & 0xFFFF) + min; |
| 298 | + } |
| 299 | + } |
| 300 | + |
| 301 | + private static int floorToMultipleOf16(int n) { |
| 302 | + assert n >= 0; |
| 303 | + return n & 0xFFFFFFF0; |
| 304 | + } |
| 305 | + |
| 306 | + private void readInts21(IndexInput in, int count, int[] docIDs) throws IOException { |
| 307 | + int oneThird = floorToMultipleOf16(count / 3); |
| 308 | + int numInts = oneThird << 1; |
| 309 | + in.readInts(scratch, 0, numInts); |
| 310 | + if (count == DEFAULT_MAX_POINTS_IN_LEAF_NODE) { |
| 311 | + // Same format, but enabling the JVM to specialize the decoding logic for the default number |
| 312 | + // of points per node proved to help on benchmarks |
| 313 | + decode21( |
| 314 | + docIDs, |
| 315 | + scratch, |
| 316 | + floorToMultipleOf16(DEFAULT_MAX_POINTS_IN_LEAF_NODE / 3), |
| 317 | + floorToMultipleOf16(DEFAULT_MAX_POINTS_IN_LEAF_NODE / 3) * 2 |
| 318 | + ); |
| 319 | + } else { |
| 320 | + decode21(docIDs, scratch, oneThird, numInts); |
| 321 | + } |
| 322 | + int i = oneThird * 3; |
| 323 | + for (; i < count - 2; i += 3) { |
| 324 | + long l = in.readLong(); |
| 325 | + docIDs[i] = (int) (l & 0x1FFFFFL); |
| 326 | + docIDs[i + 1] = (int) ((l >>> 21) & 0x1FFFFFL); |
| 327 | + docIDs[i + 2] = (int) (l >>> 42); |
| 328 | + } |
| 329 | + for (; i < count; ++i) { |
| 330 | + docIDs[i] = (in.readShort() & 0xFFFF) | (in.readByte() & 0xFF) << 16; |
| 331 | + } |
| 332 | + } |
| 333 | + |
| 334 | + private static void decode21(int[] docIds, int[] scratch, int oneThird, int numInts) { |
| 335 | + for (int i = 0; i < numInts; ++i) { |
| 336 | + docIds[i] = scratch[i] >>> 11; |
| 337 | + } |
| 338 | + for (int i = 0; i < oneThird; i++) { |
| 339 | + docIds[i + numInts] = (scratch[i] & 0x7FF) | ((scratch[i + oneThird] & 0x7FF) << 11); |
| 340 | + } |
| 341 | + } |
| 342 | + |
| 343 | + private void readInts24(IndexInput in, int count, int[] docIDs) throws IOException { |
| 344 | + int quarter = count >> 2; |
| 345 | + int numInts = quarter * 3; |
| 346 | + in.readInts(scratch, 0, numInts); |
| 347 | + if (count == DEFAULT_MAX_POINTS_IN_LEAF_NODE) { |
| 348 | + // Same format, but enabling the JVM to specialize the decoding logic for the default number |
| 349 | + // of points per node proved to help on benchmarks |
| 350 | + assert floorToMultipleOf16(quarter) == quarter |
| 351 | + : "We are relying on the fact that quarter of DEFAULT_MAX_POINTS_IN_LEAF_NODE" |
| 352 | + + " is a multiple of 16 to vectorize the decoding loop," |
| 353 | + + " please check performance issue if you want to break this assumption."; |
| 354 | + decode24(docIDs, scratch, DEFAULT_MAX_POINTS_IN_LEAF_NODE / 4, DEFAULT_MAX_POINTS_IN_LEAF_NODE / 4 * 3); |
| 355 | + } else { |
| 356 | + decode24(docIDs, scratch, quarter, numInts); |
| 357 | + } |
| 358 | + // Now read the remaining 0, 1, 2 or 3 values |
| 359 | + for (int i = quarter << 2; i < count; ++i) { |
| 360 | + docIDs[i] = (in.readShort() & 0xFFFF) | (in.readByte() & 0xFF) << 16; |
| 361 | + } |
| 362 | + } |
| 363 | + |
| 364 | + private static void decode24(int[] docIDs, int[] scratch, int quarter, int numInts) { |
| 365 | + for (int i = 0; i < numInts; ++i) { |
| 366 | + docIDs[i] = scratch[i] >>> 8; |
| 367 | + } |
| 368 | + for (int i = 0; i < quarter; i++) { |
| 369 | + docIDs[i + numInts] = (scratch[i] & 0xFF) | ((scratch[i + quarter] & 0xFF) << 8) | ((scratch[i + quarter * 2] & 0xFF) << 16); |
| 370 | + } |
| 371 | + } |
| 372 | + |
| 373 | + private static void readInts32(IndexInput in, int count, int[] docIDs) throws IOException { |
| 374 | + in.readInts(docIDs, 0, count); |
| 375 | + } |
| 376 | +} |
0 commit comments