|
9 | 9 |
|
10 | 10 | package org.elasticsearch.index.engine; |
11 | 11 |
|
| 12 | +import com.carrotsearch.hppc.IntArrayList; |
| 13 | + |
12 | 14 | import org.apache.lucene.index.LeafReaderContext; |
13 | 15 | import org.apache.lucene.search.FieldDoc; |
14 | 16 | import org.apache.lucene.search.ScoreDoc; |
@@ -84,14 +86,7 @@ public LuceneSyntheticSourceChangesSnapshot( |
84 | 86 | this.sourceLoader = mapperService.mappingLookup().newSourceLoader(null, SourceFieldMetrics.NOOP); |
85 | 87 | Set<String> storedFields = sourceLoader.requiredStoredFields(); |
86 | 88 |
|
87 | | - // If more than a few ops are requested let's enforce a sequential reader. Typically, thousands of ops may be requested. |
88 | | - // Given how LuceneSyntheticSourceChangesSnapshot accesses stored field, it should always benefit from using sequential reader. |
89 | | - // |
90 | | - // A sequential reader decompresses a block eagerly, so that increasing adjacent doc ids can access stored fields without |
91 | | - // compressing on each StoredFields#document(docId) invocation. The only downside is the last few operations in the request |
92 | | - // seq_no range are at the beginning of a block, which means stored fields for many docs are being decompressed that isn't used. |
93 | | - boolean shouldForceSequentialReader = toSeqNo - fromSeqNo > 10; |
94 | | - this.storedFieldLoader = StoredFieldLoader.create(false, storedFields, shouldForceSequentialReader); |
| 89 | + this.storedFieldLoader = StoredFieldLoader.createLoaderWithMaybeSequentialReader(storedFields); |
95 | 90 | this.lastSeenSeqNo = fromSeqNo - 1; |
96 | 91 | } |
97 | 92 |
|
@@ -199,8 +194,19 @@ private Translog.Operation[] loadDocuments(List<SearchRecord> documentRecords) t |
199 | 194 | maxDoc = leafReaderContext.reader().maxDoc(); |
200 | 195 | } while (docRecord.docID() >= docBase + maxDoc); |
201 | 196 |
|
202 | | - leafFieldLoader = storedFieldLoader.getLoader(leafReaderContext, null); |
203 | | - leafSourceLoader = sourceLoader.leaf(leafReaderContext.reader(), null); |
| 197 | + IntArrayList nextDocIds = new IntArrayList(); |
| 198 | + for (int j = i; j < documentRecords.size(); j++) { |
| 199 | + int docID = documentRecords.get(j).docID(); |
| 200 | + if (docBase + maxDoc >= docID) { |
| 201 | + break; |
| 202 | + } |
| 203 | + int segmentDocID = docID - docBase; |
| 204 | + nextDocIds.add(segmentDocID); |
| 205 | + } |
| 206 | + |
| 207 | + int[] nextDocIdArray = nextDocIds.toArray(); |
| 208 | + leafFieldLoader = storedFieldLoader.getLoader(leafReaderContext, nextDocIdArray); |
| 209 | + leafSourceLoader = sourceLoader.leaf(leafReaderContext.reader(), nextDocIdArray); |
204 | 210 | setNextSourceMetadataReader(leafReaderContext); |
205 | 211 | } |
206 | 212 | int segmentDocID = docRecord.docID() - docBase; |
|
0 commit comments