-
Notifications
You must be signed in to change notification settings - Fork 25.7k
Add binary doc value compression with variable doc count blocks #137139
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 22 commits
74880a0
a973713
3fc95dc
c302cc2
99748c8
fa2ea11
b67dd58
638dbbc
fdf3428
36b3e10
eeded36
2d8e6dc
efa270f
c4d67e5
06a2035
7ccb18d
a57e0d4
f156e55
91e5842
401a041
ad55bc3
4d4e153
f1ff182
9d2f237
2269f9c
1c4e9dc
3ddb649
dbcd1c6
5537d8c
d7fce75
bb8361c
aa3d44f
2c1f143
636c150
09898ff
8b8b50b
8a82c23
cef255f
718ffc6
ebda5b0
9fc23f1
49e5425
80525bf
b1d4b17
e332619
60ebfaa
1209e78
80c14a3
602c203
d6293d9
982386e
e61b8c2
a225b98
f6fd5bd
5fe2c80
51b21ae
07eeb5a
d56d12f
980df97
21a98ac
2239732
15823e8
25dcb56
200e14c
5ca24b4
7f8fa16
91c23ee
92c8050
016352a
8a2af81
026406b
d27bb8b
db68af6
50d9a26
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,30 @@ | ||
| /* | ||
| * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one | ||
| * or more contributor license agreements. Licensed under the "Elastic License | ||
| * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side | ||
| * Public License v 1"; you may not use this file except in compliance with, at | ||
| * your election, the "Elastic License 2.0", the "GNU Affero General Public | ||
| * License v3.0 only", or the "Server Side Public License, v 1". | ||
| */ | ||
|
|
||
| package org.elasticsearch.index.codec.tsdb; | ||
|
|
||
| public enum BinaryDVCompressionMode { | ||
|
|
||
| NO_COMPRESS((byte) 0), | ||
| COMPRESSED_WITH_ZSTD((byte) 1); | ||
parkertimmins marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
|
|
||
| public final byte code; | ||
|
|
||
| BinaryDVCompressionMode(byte code) { | ||
| this.code = code; | ||
| } | ||
|
|
||
| public static BinaryDVCompressionMode fromMode(byte mode) { | ||
| return switch (mode) { | ||
| case 0 -> NO_COMPRESS; | ||
| case 1 -> COMPRESSED_WITH_ZSTD; | ||
| default -> throw new IllegalStateException("unknown compression mode [" + mode + "]"); | ||
| }; | ||
| } | ||
| } | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,100 @@ | ||
| /* | ||
| * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one | ||
| * or more contributor license agreements. Licensed under the "Elastic License | ||
| * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side | ||
| * Public License v 1"; you may not use this file except in compliance with, at | ||
| * your election, the "Elastic License 2.0", the "GNU Affero General Public | ||
| * License v3.0 only", or the "Server Side Public License, v 1". | ||
| */ | ||
|
|
||
| package org.elasticsearch.index.codec.tsdb.es819; | ||
|
|
||
| import org.apache.lucene.codecs.CodecUtil; | ||
| import org.apache.lucene.store.ChecksumIndexInput; | ||
| import org.apache.lucene.store.Directory; | ||
| import org.apache.lucene.store.IOContext; | ||
| import org.apache.lucene.store.IndexOutput; | ||
| import org.apache.lucene.util.packed.DirectMonotonicWriter; | ||
| import org.elasticsearch.core.IOUtils; | ||
|
|
||
| import java.io.Closeable; | ||
| import java.io.IOException; | ||
|
|
||
| /** | ||
| * Like OffsetsAccumulator builds offsets and stores in a DirectMonotonicWriter. But write to temp file | ||
| * rather than directly to a DirectMonotonicWriter because the number of values is unknown. If number of | ||
| * values if known prefer OffsetsWriter. | ||
| */ | ||
| final class DelayedOffsetAccumulator implements Closeable { | ||
parkertimmins marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
parkertimmins marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
| private final Directory dir; | ||
| private final long startOffset; | ||
|
|
||
| private int numValues = 0; | ||
| private final IndexOutput tempOutput; | ||
| private final String suffix; | ||
|
|
||
| DelayedOffsetAccumulator(Directory dir, IOContext context, IndexOutput data, String suffix, long startOffset) throws IOException { | ||
| this.dir = dir; | ||
| this.startOffset = startOffset; | ||
| this.suffix = suffix; | ||
|
|
||
| boolean success = false; | ||
| try { | ||
| tempOutput = dir.createTempOutput(data.getName(), suffix, context); | ||
| CodecUtil.writeHeader(tempOutput, ES819TSDBDocValuesFormat.META_CODEC + suffix, ES819TSDBDocValuesFormat.VERSION_CURRENT); | ||
| success = true; | ||
| } finally { | ||
| if (success == false) { | ||
| IOUtils.closeWhileHandlingException(this); // self-close because constructor caller can't | ||
|
||
| } | ||
| } | ||
| } | ||
|
|
||
| public void addDoc(long delta) throws IOException { | ||
| tempOutput.writeVLong(delta); | ||
| numValues++; | ||
| } | ||
|
|
||
| public void build(IndexOutput meta, IndexOutput data) throws IOException { | ||
| CodecUtil.writeFooter(tempOutput); | ||
| IOUtils.close(tempOutput); | ||
|
|
||
| // write the offsets info to the meta file by reading from temp file | ||
| try (ChecksumIndexInput tempInput = dir.openChecksumInput(tempOutput.getName());) { | ||
| CodecUtil.checkHeader( | ||
| tempInput, | ||
| ES819TSDBDocValuesFormat.META_CODEC + suffix, | ||
| ES819TSDBDocValuesFormat.VERSION_CURRENT, | ||
| ES819TSDBDocValuesFormat.VERSION_CURRENT | ||
| ); | ||
| Throwable priorE = null; | ||
| try { | ||
| final DirectMonotonicWriter writer = DirectMonotonicWriter.getInstance( | ||
| meta, | ||
| data, | ||
| numValues + 1, | ||
| ES819TSDBDocValuesFormat.DIRECT_MONOTONIC_BLOCK_SHIFT | ||
| ); | ||
|
|
||
| long offset = startOffset; | ||
| writer.add(offset); | ||
| for (int i = 0; i < numValues; ++i) { | ||
| offset += tempInput.readVLong(); | ||
| writer.add(offset); | ||
| } | ||
| writer.finish(); | ||
| } catch (Throwable e) { | ||
| priorE = e; | ||
| } finally { | ||
| CodecUtil.checkFooter(tempInput, priorE); | ||
| } | ||
| } | ||
| } | ||
|
|
||
| @Override | ||
| public void close() throws IOException { | ||
| if (tempOutput != null) { | ||
| IOUtils.close(tempOutput, () -> dir.deleteFile(tempOutput.getName())); | ||
parkertimmins marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
| } | ||
| } | ||
| } | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -27,8 +27,10 @@ | |
| import org.apache.lucene.search.DocIdSetIterator; | ||
| import org.apache.lucene.search.SortedSetSelector; | ||
| import org.apache.lucene.store.ByteArrayDataOutput; | ||
| import org.apache.lucene.store.ByteBuffersDataInput; | ||
| import org.apache.lucene.store.ByteBuffersDataOutput; | ||
| import org.apache.lucene.store.ByteBuffersIndexOutput; | ||
| import org.apache.lucene.store.DataOutput; | ||
| import org.apache.lucene.store.Directory; | ||
| import org.apache.lucene.store.IOContext; | ||
| import org.apache.lucene.store.IndexOutput; | ||
|
|
@@ -41,15 +43,20 @@ | |
| import org.apache.lucene.util.packed.DirectMonotonicWriter; | ||
| import org.apache.lucene.util.packed.PackedInts; | ||
| import org.elasticsearch.core.IOUtils; | ||
| import org.elasticsearch.index.codec.tsdb.BinaryDVCompressionMode; | ||
| import org.elasticsearch.index.codec.tsdb.TSDBDocValuesEncoder; | ||
| import org.elasticsearch.index.codec.zstd.Zstd814StoredFieldsFormat; | ||
|
|
||
| import java.io.Closeable; | ||
| import java.io.IOException; | ||
| import java.nio.ByteBuffer; | ||
| import java.util.ArrayList; | ||
| import java.util.Arrays; | ||
| import java.util.List; | ||
|
|
||
| import static org.elasticsearch.index.codec.tsdb.es819.DocValuesConsumerUtil.compatibleWithOptimizedMerge; | ||
| import static org.elasticsearch.index.codec.tsdb.es819.ES819TSDBDocValuesFormat.DIRECT_MONOTONIC_BLOCK_SHIFT; | ||
| import static org.elasticsearch.index.codec.tsdb.es819.ES819TSDBDocValuesFormat.NUMERIC_BLOCK_SIZE; | ||
| import static org.elasticsearch.index.codec.tsdb.es819.ES819TSDBDocValuesFormat.SKIP_INDEX_LEVEL_SHIFT; | ||
| import static org.elasticsearch.index.codec.tsdb.es819.ES819TSDBDocValuesFormat.SKIP_INDEX_MAX_LEVEL; | ||
| import static org.elasticsearch.index.codec.tsdb.es819.ES819TSDBDocValuesFormat.SORTED_SET; | ||
|
|
@@ -65,8 +72,11 @@ final class ES819TSDBDocValuesConsumer extends XDocValuesConsumer { | |
| private final int minDocsPerOrdinalForOrdinalRangeEncoding; | ||
| final boolean enableOptimizedMerge; | ||
| private final int primarySortFieldNumber; | ||
| final SegmentWriteState state; | ||
| final BinaryDVCompressionMode binaryDVCompressionMode; | ||
|
|
||
| ES819TSDBDocValuesConsumer( | ||
| BinaryDVCompressionMode binaryDVCompressionMode, | ||
| SegmentWriteState state, | ||
| int skipIndexIntervalSize, | ||
| int minDocsPerOrdinalForOrdinalRangeEncoding, | ||
|
|
@@ -76,6 +86,8 @@ final class ES819TSDBDocValuesConsumer extends XDocValuesConsumer { | |
| String metaCodec, | ||
| String metaExtension | ||
| ) throws IOException { | ||
| this.binaryDVCompressionMode = binaryDVCompressionMode; | ||
| this.state = state; | ||
| this.termsDictBuffer = new byte[1 << 14]; | ||
| this.dir = state.directory; | ||
| this.minDocsPerOrdinalForOrdinalRangeEncoding = minDocsPerOrdinalForOrdinalRangeEncoding; | ||
|
|
@@ -315,7 +327,14 @@ public void mergeBinaryField(FieldInfo mergeFieldInfo, MergeState mergeState) th | |
| public void addBinaryField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException { | ||
| meta.writeInt(field.number); | ||
| meta.writeByte(ES819TSDBDocValuesFormat.BINARY); | ||
| meta.writeByte(binaryDVCompressionMode.code); | ||
| switch (binaryDVCompressionMode) { | ||
| case NO_COMPRESS -> doAddUncompressedBinary(field, valuesProducer); | ||
| case COMPRESSED_WITH_ZSTD -> doAddCompressedBinary(field, valuesProducer); | ||
| } | ||
| } | ||
|
|
||
| public void doAddUncompressedBinary(FieldInfo field, DocValuesProducer valuesProducer) throws IOException { | ||
| if (valuesProducer instanceof TsdbDocValuesProducer tsdbValuesProducer && tsdbValuesProducer.mergeStats.supported()) { | ||
| final int numDocsWithField = tsdbValuesProducer.mergeStats.sumNumDocsWithField(); | ||
| final int minLength = tsdbValuesProducer.mergeStats.minLength(); | ||
|
|
@@ -444,6 +463,181 @@ public void addBinaryField(FieldInfo field, DocValuesProducer valuesProducer) th | |
| } | ||
| } | ||
|
|
||
| public void doAddCompressedBinary(FieldInfo field, DocValuesProducer valuesProducer) throws IOException { | ||
parkertimmins marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
| try (CompressedBinaryBlockWriter blockWriter = new CompressedBinaryBlockWriter()) { | ||
|
||
| BinaryDocValues values = valuesProducer.getBinary(field); | ||
| long start = data.getFilePointer(); | ||
| meta.writeLong(start); // dataOffset | ||
| int numDocsWithField = 0; | ||
| int minLength = Integer.MAX_VALUE; | ||
| int maxLength = 0; | ||
| for (int doc = values.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = values.nextDoc()) { | ||
| numDocsWithField++; | ||
| BytesRef v = values.binaryValue(); | ||
| blockWriter.addDoc(v); | ||
| int length = v.length; | ||
| minLength = Math.min(length, minLength); | ||
| maxLength = Math.max(length, maxLength); | ||
| } | ||
| blockWriter.flushData(); | ||
|
|
||
| assert numDocsWithField <= maxDoc; | ||
| meta.writeLong(data.getFilePointer() - start); // dataLength | ||
|
|
||
| if (numDocsWithField == 0) { | ||
| meta.writeLong(-2); // docsWithFieldOffset | ||
| meta.writeLong(0L); // docsWithFieldLength | ||
| meta.writeShort((short) -1); // jumpTableEntryCount | ||
| meta.writeByte((byte) -1); // denseRankPower | ||
| } else if (numDocsWithField == maxDoc) { | ||
| meta.writeLong(-1); // docsWithFieldOffset | ||
| meta.writeLong(0L); // docsWithFieldLength | ||
| meta.writeShort((short) -1); // jumpTableEntryCount | ||
| meta.writeByte((byte) -1); // denseRankPower | ||
| } else { | ||
| long offset = data.getFilePointer(); | ||
| meta.writeLong(offset); // docsWithFieldOffset | ||
| values = valuesProducer.getBinary(field); | ||
| final short jumpTableEntryCount = IndexedDISI.writeBitSet(values, data, IndexedDISI.DEFAULT_DENSE_RANK_POWER); | ||
| meta.writeLong(data.getFilePointer() - offset); // docsWithFieldLength | ||
| meta.writeShort(jumpTableEntryCount); | ||
| meta.writeByte(IndexedDISI.DEFAULT_DENSE_RANK_POWER); | ||
| } | ||
|
|
||
| meta.writeInt(numDocsWithField); | ||
| meta.writeInt(minLength); | ||
| meta.writeInt(maxLength); | ||
|
|
||
| blockWriter.writeMetaData(); | ||
| } | ||
| } | ||
|
|
||
| private class CompressedBinaryBlockWriter implements Closeable { | ||
| static final int MIN_BLOCK_BYTES = 256 * 1024; | ||
| static final int START_BLOCK_DOCS = 1024; | ||
| static final int ZSTD_LEVEL = 1; | ||
|
|
||
| final Zstd814StoredFieldsFormat.ZstdCompressor compressor = new Zstd814StoredFieldsFormat.ZstdCompressor(ZSTD_LEVEL); | ||
|
|
||
| final TSDBDocValuesEncoder encoder = new TSDBDocValuesEncoder(ES819TSDBDocValuesFormat.NUMERIC_BLOCK_SIZE); | ||
| final long[] docOffsetsCompressBuffer = new long[ES819TSDBDocValuesFormat.NUMERIC_BLOCK_SIZE]; | ||
| int[] docOffsets = new int[START_BLOCK_DOCS]; | ||
|
|
||
| int uncompressedBlockLength = 0; | ||
| int maxUncompressedBlockLength = 0; | ||
| int numDocsInCurrentBlock = 0; | ||
|
|
||
| byte[] block = BytesRef.EMPTY_BYTES; | ||
| int totalChunks = 0; | ||
| long maxPointer = 0; | ||
| int maxNumDocsInAnyBlock = 0; | ||
|
|
||
| final DelayedOffsetAccumulator blockAddressAcc; | ||
| final DelayedOffsetAccumulator blockDocRangeAcc; | ||
|
|
||
| CompressedBinaryBlockWriter() throws IOException { | ||
| long blockAddressesStart = data.getFilePointer(); | ||
| blockAddressAcc = new DelayedOffsetAccumulator(state.directory, state.context, data, "block-addresses", blockAddressesStart); | ||
|
|
||
| try { | ||
| blockDocRangeAcc = new DelayedOffsetAccumulator(state.directory, state.context, data, "block-doc-ranges", 0); | ||
| } catch (IOException e) { | ||
| blockAddressAcc.close(); | ||
| throw e; | ||
| } | ||
| } | ||
|
|
||
| void addDoc(BytesRef v) throws IOException { | ||
parkertimmins marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
| block = ArrayUtil.grow(block, uncompressedBlockLength + v.length); | ||
parkertimmins marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| System.arraycopy(v.bytes, v.offset, block, uncompressedBlockLength, v.length); | ||
| uncompressedBlockLength += v.length; | ||
|
|
||
| numDocsInCurrentBlock++; | ||
| docOffsets = ArrayUtil.grow(docOffsets, numDocsInCurrentBlock + 1); // need one extra since writing start for next block | ||
| docOffsets[numDocsInCurrentBlock] = uncompressedBlockLength; | ||
|
|
||
| if (uncompressedBlockLength > MIN_BLOCK_BYTES) { | ||
| flushData(); | ||
| } | ||
| } | ||
|
|
||
| private void flushData() throws IOException { | ||
| if (numDocsInCurrentBlock > 0) { | ||
parkertimmins marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
| totalChunks++; | ||
| long thisBlockStartPointer = data.getFilePointer(); | ||
|
|
||
| // write length of string data | ||
| data.writeInt(uncompressedBlockLength); | ||
|
|
||
| maxUncompressedBlockLength = Math.max(maxUncompressedBlockLength, uncompressedBlockLength); | ||
| maxNumDocsInAnyBlock = Math.max(maxNumDocsInAnyBlock, numDocsInCurrentBlock); | ||
|
|
||
| compressOffsets(data, numDocsInCurrentBlock); | ||
| compress(block, uncompressedBlockLength, data); | ||
|
|
||
| blockDocRangeAcc.addDoc(numDocsInCurrentBlock); | ||
| numDocsInCurrentBlock = 0; | ||
|
|
||
| uncompressedBlockLength = 0; | ||
| maxPointer = data.getFilePointer(); | ||
| long blockLenBytes = maxPointer - thisBlockStartPointer; | ||
| blockAddressAcc.addDoc(blockLenBytes); | ||
| } | ||
| } | ||
|
|
||
| void compressOffsets(DataOutput output, int numDocsInCurrentBlock) throws IOException { | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Should we encode the lengths using GroupVIntUtil#writeGroupVInts instead? I'm not sure TSDBDocValuesEncoder is suitable for encoding these offsets. Also, always padding 128 offsets may be wasteful.
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Good point, especially after limiting the number of docs per block to 1024 the padding could be a concern. Sounds good, I'll give this a try 👍
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Hmm, so I'm seeing a slow-down with readGroupVInts on some benchmark queries. Mostly small decreases that could be noise, but some in the 25-40% range that are concerning. I'd think that GroupVIntUtil would be quite fast. Is there possibly something I'm missing in the decompression code that could speed it up? I'm currently benchmarking with uncompressed offsets to get a baseline for offset (de)compression. |
||
| int batchStart = 0; | ||
| int numOffsets = numDocsInCurrentBlock + 1; | ||
| while (batchStart < numOffsets) { | ||
| int batchLength = Math.min(numOffsets - batchStart, NUMERIC_BLOCK_SIZE); | ||
| for (int i = 0; i < batchLength; i++) { | ||
| docOffsetsCompressBuffer[i] = docOffsets[batchStart + i]; | ||
| } | ||
| if (batchLength < docOffsetsCompressBuffer.length) { | ||
| Arrays.fill(docOffsetsCompressBuffer, batchLength, docOffsetsCompressBuffer.length, 0); | ||
| } | ||
| encoder.encode(docOffsetsCompressBuffer, output); | ||
| batchStart += batchLength; | ||
| } | ||
| } | ||
|
|
||
| void compress(byte[] data, int uncompressedLength, DataOutput output) throws IOException { | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. If we use Zstd directly, should we also handle cases where compression does not reduce storage and store the raw bytes instead?
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I like this the idea of not compressing if it doesn't help. This would still apply with non-direct Zstd, right? I guess for non-direct Zstd we'd need a separate output buffer to check the length before sending to output. I discussed some with Martijn and he suggested adding a signal byte now, which says whether or not the data is compressed. It would always be set to true now, but can will support false once we add Zstd direct, and enable this optimization. What do you think? |
||
| ByteBuffer inputBuffer = ByteBuffer.wrap(data, 0, uncompressedLength); | ||
| ByteBuffersDataInput input = new ByteBuffersDataInput(List.of(inputBuffer)); | ||
| compressor.compress(input, output); | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Should we use Zstd from NativeAccess directly to avoid copying data to an intermediate buffer before the native buffer?
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. My only concern is that currently this uses lucene's Compressor/CompressionMode. Which will make it easy to add other compressors. On the other hand, as we previously spoke about, it might make sense to use LZ4 to partially decompress blocks. If that is the case, we may not want to use the Compressor interface ... though I'm actually not sure either way. Anyway, I split a hacky version of this off here, and will benchmark it to see if it's worth doing.
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I ran some benchmarks on the above hacky version and got some weird results. Some of the queries got a nice throughput increase. The weird part is that the Store Size increased by an amount that was not reflected in the output of disk_usage. There must be a bug in my version that is causing this. To keep this PR small(er), what do you think about updating to using NativeAccess directly in a separate PR? |
||
| } | ||
|
|
||
| void writeMetaData() throws IOException { | ||
| if (totalChunks == 0) { | ||
| return; | ||
| } | ||
|
|
||
| long dataAddressesStart = data.getFilePointer(); | ||
|
|
||
| meta.writeLong(dataAddressesStart); | ||
| meta.writeVInt(totalChunks); | ||
| meta.writeVInt(maxUncompressedBlockLength); | ||
| meta.writeVInt(maxNumDocsInAnyBlock); | ||
| meta.writeVInt(DIRECT_MONOTONIC_BLOCK_SHIFT); | ||
|
|
||
| blockAddressAcc.build(meta, data); | ||
| long dataDocRangeStart = data.getFilePointer(); | ||
| long addressesLength = dataDocRangeStart - dataAddressesStart; | ||
| meta.writeLong(addressesLength); | ||
|
|
||
| meta.writeLong(dataDocRangeStart); | ||
| blockDocRangeAcc.build(meta, data); | ||
| long docRangesLen = data.getFilePointer() - dataDocRangeStart; | ||
| meta.writeLong(docRangesLen); | ||
| } | ||
|
|
||
| @Override | ||
| public void close() throws IOException { | ||
| blockDocRangeAcc.close(); | ||
| blockAddressAcc.close(); | ||
| } | ||
| } | ||
|
|
||
| @Override | ||
| public void addSortedField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException { | ||
| meta.writeInt(field.number); | ||
|
|
||
Uh oh!
There was an error while loading. Please reload this page.