Skip to content

Commit 2971d5b

Browse files
authored
Merge branch 'main' into fix/script_sort_concurrency
2 parents 6249df8 + 03d8bdf commit 2971d5b

File tree

53 files changed

+320
-2499
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

53 files changed

+320
-2499
lines changed

docs/changelog/124394.yaml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
pr: 124394
2+
summary: Avoid `NamedWritable` in block serialization
3+
area: ES|QL
4+
type: enhancement
5+
issues: []

libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/FileAccessTree.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,8 +23,8 @@
2323
import java.nio.file.Paths;
2424
import java.util.ArrayList;
2525
import java.util.Arrays;
26+
import java.util.HashMap;
2627
import java.util.HashSet;
27-
import java.util.LinkedHashMap;
2828
import java.util.List;
2929
import java.util.Map;
3030
import java.util.Objects;
@@ -55,7 +55,7 @@ public String toString() {
5555
}
5656

5757
static List<ExclusivePath> buildExclusivePathList(List<ExclusiveFileEntitlement> exclusiveFileEntitlements, PathLookup pathLookup) {
58-
Map<String, ExclusivePath> exclusivePaths = new LinkedHashMap<>();
58+
Map<String, ExclusivePath> exclusivePaths = new HashMap<>();
5959
for (ExclusiveFileEntitlement efe : exclusiveFileEntitlements) {
6060
for (FilesEntitlement.FileData fd : efe.filesEntitlement().filesData()) {
6161
if (fd.exclusive()) {

libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyManagerTests.java

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,8 @@
3838
import static org.elasticsearch.entitlement.runtime.policy.PolicyManager.ALL_UNNAMED;
3939
import static org.elasticsearch.entitlement.runtime.policy.PolicyManager.SERVER_COMPONENT_NAME;
4040
import static org.hamcrest.Matchers.aMapWithSize;
41-
import static org.hamcrest.Matchers.equalTo;
41+
import static org.hamcrest.Matchers.allOf;
42+
import static org.hamcrest.Matchers.containsString;
4243
import static org.hamcrest.Matchers.is;
4344
import static org.hamcrest.Matchers.sameInstance;
4445

@@ -493,9 +494,11 @@ public void testFilesEntitlementsWithExclusive() {
493494
);
494495
assertThat(
495496
iae.getMessage(),
496-
equalTo(
497-
"Path [/base/test] is already exclusive to [plugin1][test.module1],"
498-
+ " cannot add exclusive access for [plugin2][test.module2]"
497+
allOf(
498+
containsString("Path [/base/test] is already exclusive"),
499+
containsString("[plugin1][test.module1]"),
500+
containsString("[plugin2][test.module2]"),
501+
containsString("cannot add exclusive access")
499502
)
500503
);
501504

muted-tests.yml

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -330,12 +330,9 @@ tests:
330330
- class: org.elasticsearch.smoketest.MlWithSecurityIT
331331
method: test {yaml=ml/3rd_party_deployment/Test start deployment fails while model download in progress}
332332
issue: https://github.com/elastic/elasticsearch/issues/120814
333-
- class: org.elasticsearch.search.query.QueryPhaseTimeoutTests
334-
method: testScorerTimeoutPoints
335-
issue: https://github.com/elastic/elasticsearch/issues/124140
336-
- class: org.elasticsearch.search.query.QueryPhaseTimeoutTests
337-
method: testScorerTimeoutTerms
338-
issue: https://github.com/elastic/elasticsearch/issues/124141
333+
- class: org.elasticsearch.compute.data.BlockMultiValuedTests
334+
method: testToMask {elementType=BOOLEAN nullAllowed=true}
335+
issue: https://github.com/elastic/elasticsearch/issues/124165
339336
- class: org.elasticsearch.smoketest.MlWithSecurityIT
340337
method: test {yaml=ml/start_data_frame_analytics/Test start classification analysis when the dependent variable is missing}
341338
issue: https://github.com/elastic/elasticsearch/issues/124168

server/src/main/java/org/elasticsearch/TransportVersions.java

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -179,6 +179,7 @@ static TransportVersion def(int id) {
179179
public static final TransportVersion INCLUDE_INDEX_MODE_IN_GET_DATA_STREAM = def(9_023_0_00);
180180
public static final TransportVersion MAX_OPERATION_SIZE_REJECTIONS_ADDED = def(9_024_0_00);
181181
public static final TransportVersion RETRY_ILM_ASYNC_ACTION_REQUIRE_ERROR = def(9_025_0_00);
182+
public static final TransportVersion ESQL_SERIALIZE_BLOCK_TYPE_CODE = def(9_026_0_00);
182183

183184
/*
184185
* STOP! READ THIS FIRST! No, really,

server/src/test/java/org/elasticsearch/search/query/QueryPhaseTimeoutTests.java

Lines changed: 118 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -116,6 +116,7 @@ public void tearDown() throws Exception {
116116
}
117117

118118
private static ContextIndexSearcher newContextSearcher(IndexReader reader) throws IOException {
119+
// note that no executor is provided, as this test requires sequential execution
119120
return new ContextIndexSearcher(
120121
reader,
121122
IndexSearcher.getDefaultSimilarity(),
@@ -125,27 +126,35 @@ private static ContextIndexSearcher newContextSearcher(IndexReader reader) throw
125126
);
126127
}
127128

128-
public void testScorerTimeoutTerms() throws IOException {
129+
/**
130+
* Test that a timeout is appropriately handled when the (exitable) directory reader raises it while loading terms enum
131+
* as the scorer supplier is requested.
132+
*/
133+
public void testScorerSupplierTimeoutTerms() throws IOException {
129134
assumeTrue("Test requires more than one segment", reader.leaves().size() > 1);
130135
int size = randomBoolean() ? 0 : randomIntBetween(100, 500);
131-
scorerTimeoutTest(size, context -> {
136+
scorerSupplierTimeoutTest(size, context -> {
132137
final TermsEnum termsEnum = context.reader().terms("field").iterator();
133138
termsEnum.next();
134139
});
135140
}
136141

137-
public void testScorerTimeoutPoints() throws IOException {
142+
/**
143+
* Test that a timeout is appropriately handled when the (exitable) directory reader raises it while loading points
144+
* as the scorer supplier is requested.
145+
*/
146+
public void testScorerSupplierTimeoutPoints() throws IOException {
138147
assumeTrue("Test requires more than one segment", reader.leaves().size() > 1);
139148
int size = randomBoolean() ? 0 : randomIntBetween(100, 500);
140-
scorerTimeoutTest(size, context -> {
149+
scorerSupplierTimeoutTest(size, context -> {
141150
PointValues pointValues = context.reader().getPointValues("long");
142151
pointValues.size();
143152
});
144153
}
145154

146-
private void scorerTimeoutTest(int size, CheckedConsumer<LeafReaderContext, IOException> timeoutTrigger) throws IOException {
155+
private void scorerSupplierTimeoutTest(int size, CheckedConsumer<LeafReaderContext, IOException> timeoutTrigger) throws IOException {
147156
{
148-
TimeoutQuery query = newMatchAllScorerTimeoutQuery(timeoutTrigger, false);
157+
TimeoutQuery query = newMatchAllScorerSupplierTimeoutQuery(timeoutTrigger, false);
149158
try (SearchContext context = createSearchContext(query, size)) {
150159
QueryPhase.executeQuery(context);
151160
assertFalse(context.queryResult().searchTimedOut());
@@ -154,18 +163,20 @@ private void scorerTimeoutTest(int size, CheckedConsumer<LeafReaderContext, IOEx
154163
}
155164
}
156165
{
157-
TimeoutQuery query = newMatchAllScorerTimeoutQuery(timeoutTrigger, true);
166+
TimeoutQuery query = newMatchAllScorerSupplierTimeoutQuery(timeoutTrigger, true);
158167
try (SearchContext context = createSearchContextWithTimeout(query, size)) {
159168
QueryPhase.executeQuery(context);
160169
assertTrue(context.queryResult().searchTimedOut());
161170
int firstSegmentMaxDoc = reader.leaves().get(0).reader().maxDoc();
162-
assertEquals(Math.min(2048, firstSegmentMaxDoc), context.queryResult().topDocs().topDocs.totalHits.value());
171+
// we are artificially raising the timeout when pulling the scorer supplier.
172+
// We score the entire first segment, then trigger timeout.
173+
assertEquals(firstSegmentMaxDoc, context.queryResult().topDocs().topDocs.totalHits.value());
163174
assertEquals(Math.min(size, firstSegmentMaxDoc), context.queryResult().topDocs().topDocs.scoreDocs.length);
164175
}
165176
}
166177
}
167178

168-
private static TimeoutQuery newMatchAllScorerTimeoutQuery(
179+
private static TimeoutQuery newMatchAllScorerSupplierTimeoutQuery(
169180
CheckedConsumer<LeafReaderContext, IOException> timeoutTrigger,
170181
boolean isTimeoutExpected
171182
) {
@@ -177,6 +188,7 @@ public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float bo
177188

178189
@Override
179190
public ScorerSupplier scorerSupplier(LeafReaderContext context) throws IOException {
191+
// trigger the timeout as soon as the scorer supplier is request for the second segment
180192
if (firstSegment == false && isTimeoutExpected) {
181193
shouldTimeout = true;
182194
}
@@ -190,6 +202,96 @@ public ScorerSupplier scorerSupplier(LeafReaderContext context) throws IOExcepti
190202
};
191203
}
192204

205+
/**
206+
* Test that a timeout is appropriately handled when the (exitable) directory reader raises it while loading terms enum
207+
* as the scorer is retrieved from the scorer supplier
208+
*/
209+
public void testScorerGetTimeoutTerms() throws IOException {
210+
assumeTrue("Test requires more than one segment", reader.leaves().size() > 1);
211+
int size = randomBoolean() ? 0 : randomIntBetween(100, 500);
212+
scorerGetTimeoutTest(size, context -> {
213+
final TermsEnum termsEnum = context.reader().terms("field").iterator();
214+
termsEnum.next();
215+
});
216+
}
217+
218+
/**
219+
* Test that a timeout is appropriately handled when the (exitable) directory reader raises it while loading points
220+
* as the scorer is retrieved from the scorer supplier
221+
*/
222+
public void testScorerGetTimeoutPoints() throws IOException {
223+
assumeTrue("Test requires more than one segment", reader.leaves().size() > 1);
224+
int size = randomBoolean() ? 0 : randomIntBetween(100, 500);
225+
scorerGetTimeoutTest(size, context -> {
226+
PointValues pointValues = context.reader().getPointValues("long");
227+
pointValues.size();
228+
});
229+
}
230+
231+
private void scorerGetTimeoutTest(int size, CheckedConsumer<LeafReaderContext, IOException> timeoutTrigger) throws IOException {
232+
{
233+
TimeoutQuery query = newMatchAllScorerGetTimeoutQuery(timeoutTrigger, false);
234+
try (SearchContext context = createSearchContext(query, size)) {
235+
QueryPhase.executeQuery(context);
236+
assertFalse(context.queryResult().searchTimedOut());
237+
assertEquals(numDocs, context.queryResult().topDocs().topDocs.totalHits.value());
238+
assertEquals(size, context.queryResult().topDocs().topDocs.scoreDocs.length);
239+
}
240+
}
241+
{
242+
TimeoutQuery query = newMatchAllScorerGetTimeoutQuery(timeoutTrigger, true);
243+
try (SearchContext context = createSearchContextWithTimeout(query, size)) {
244+
QueryPhase.executeQuery(context);
245+
assertTrue(context.queryResult().searchTimedOut());
246+
int firstSegmentMaxDoc = reader.leaves().get(0).reader().maxDoc();
247+
// we are artificially raising the timeout when pulling the scorer supplier.
248+
// We score the entire first segment, then trigger timeout.
249+
assertEquals(firstSegmentMaxDoc, context.queryResult().topDocs().topDocs.totalHits.value());
250+
assertEquals(Math.min(size, firstSegmentMaxDoc), context.queryResult().topDocs().topDocs.scoreDocs.length);
251+
}
252+
}
253+
}
254+
255+
private static TimeoutQuery newMatchAllScorerGetTimeoutQuery(
256+
CheckedConsumer<LeafReaderContext, IOException> timeoutTrigger,
257+
boolean isTimeoutExpected
258+
) {
259+
return new TimeoutQuery() {
260+
@Override
261+
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) {
262+
return new MatchAllWeight(this, boost, scoreMode) {
263+
boolean firstSegment = true;
264+
265+
@Override
266+
public ScorerSupplier scorerSupplier(LeafReaderContext context) throws IOException {
267+
ScorerSupplier scorerSupplier = super.scorerSupplier(context);
268+
return new ScorerSupplier() {
269+
@Override
270+
public Scorer get(long leadCost) throws IOException {
271+
// trigger the timeout as soon as the scorer is requested for the second segment
272+
if (firstSegment == false && isTimeoutExpected) {
273+
shouldTimeout = true;
274+
}
275+
timeoutTrigger.accept(context);
276+
assert shouldTimeout == false : "should have already timed out";
277+
firstSegment = false;
278+
return scorerSupplier.get(leadCost);
279+
}
280+
281+
@Override
282+
public long cost() {
283+
return scorerSupplier.cost();
284+
}
285+
};
286+
}
287+
};
288+
}
289+
};
290+
}
291+
292+
/**
293+
* Test that a timeout is appropriately handled while bulk scoring, via cancellable bulk scorer
294+
*/
193295
public void testBulkScorerTimeout() throws IOException {
194296
int size = randomBoolean() ? 0 : randomIntBetween(100, 500);
195297
{
@@ -207,6 +309,8 @@ public void testBulkScorerTimeout() throws IOException {
207309
QueryPhase.executeQuery(context);
208310
assertTrue(context.queryResult().searchTimedOut());
209311
int firstSegmentMaxDoc = reader.leaves().get(0).reader().maxDoc();
312+
// See CancellableBulkScorer#INITIAL_INTERVAL for the source of 2048: we always score the first
313+
// batch of up to 2048 docs, and only then raise the timeout
210314
assertEquals(Math.min(2048, firstSegmentMaxDoc), context.queryResult().topDocs().topDocs.totalHits.value());
211315
assertEquals(Math.min(size, firstSegmentMaxDoc), context.queryResult().topDocs().topDocs.scoreDocs.length);
212316
}
@@ -233,7 +337,7 @@ public long cost() {
233337
}
234338

235339
@Override
236-
public BulkScorer bulkScorer() throws IOException {
340+
public BulkScorer bulkScorer() {
237341
final float score = score();
238342
final int maxDoc = context.reader().maxDoc();
239343
return new BulkScorer() {
@@ -251,7 +355,7 @@ public int score(LeafCollector collector, Bits acceptDocs, int min, int max) thr
251355
}
252356
if (timeoutExpected) {
253357
// timeout after collecting the first batch of documents from the 1st segment, or the entire 1st
254-
// segment
358+
// segment if max > firstSegment.maxDoc()
255359
shouldTimeout = true;
256360
}
257361
return max == maxDoc ? DocIdSetIterator.NO_MORE_DOCS : max;
@@ -274,6 +378,9 @@ private TestSearchContext createSearchContextWithTimeout(TimeoutQuery query, int
274378
TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader)) {
275379
@Override
276380
public long getRelativeTimeInMillis() {
381+
// this controls whether a timeout is raised or not. We abstract time away by pretending that the clock stops
382+
// when a timeout is not expected. The tiniest increment to relative time in millis triggers a timeout.
383+
// See QueryPhase#getTimeoutCheck
277384
return query.shouldTimeout ? 1L : 0L;
278385
}
279386
};

x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlock.java

Lines changed: 0 additions & 13 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlock.java

Lines changed: 0 additions & 13 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlock.java

Lines changed: 0 additions & 13 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FloatBlock.java

Lines changed: 0 additions & 13 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)