Skip to content

Commit daf6381

Browse files
authored
Merge branch 'main' into esql-test-cleanup
2 parents a90439f + 0b06d9f commit daf6381

File tree

30 files changed

+403
-149
lines changed

30 files changed

+403
-149
lines changed

libs/x-content/src/main/java/org/elasticsearch/xcontent/AbstractObjectParser.java

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
package org.elasticsearch.xcontent;
1111

1212
import org.elasticsearch.core.CheckedFunction;
13+
import org.elasticsearch.core.UpdateForV10;
1314
import org.elasticsearch.xcontent.ObjectParser.NamedObjectParser;
1415
import org.elasticsearch.xcontent.ObjectParser.ValueType;
1516

@@ -230,11 +231,13 @@ public void declareDoubleOrNull(BiConsumer<Value, Double> consumer, double nullV
230231
);
231232
}
232233

234+
@UpdateForV10(owner = UpdateForV10.Owner.CORE_INFRA) // https://github.com/elastic/elasticsearch/issues/130797
233235
public void declareLong(BiConsumer<Value, Long> consumer, ParseField field) {
234236
// Using a method reference here angers some compilers
235237
declareField(consumer, p -> p.longValue(), field, ValueType.LONG);
236238
}
237239

240+
@UpdateForV10(owner = UpdateForV10.Owner.CORE_INFRA) // https://github.com/elastic/elasticsearch/issues/130797
238241
public void declareLongOrNull(BiConsumer<Value, Long> consumer, long nullValue, ParseField field) {
239242
// Using a method reference here angers some compilers
240243
declareField(
@@ -245,6 +248,7 @@ public void declareLongOrNull(BiConsumer<Value, Long> consumer, long nullValue,
245248
);
246249
}
247250

251+
@UpdateForV10(owner = UpdateForV10.Owner.CORE_INFRA) // https://github.com/elastic/elasticsearch/issues/130797
248252
public void declareInt(BiConsumer<Value, Integer> consumer, ParseField field) {
249253
// Using a method reference here angers some compilers
250254
declareField(consumer, p -> p.intValue(), field, ValueType.INT);
@@ -253,6 +257,7 @@ public void declareInt(BiConsumer<Value, Integer> consumer, ParseField field) {
253257
/**
254258
* Declare an integer field that parses explicit {@code null}s in the json to a default value.
255259
*/
260+
@UpdateForV10(owner = UpdateForV10.Owner.CORE_INFRA) // https://github.com/elastic/elasticsearch/issues/130797
256261
public void declareIntOrNull(BiConsumer<Value, Integer> consumer, int nullValue, ParseField field) {
257262
declareField(
258263
consumer,
@@ -320,10 +325,12 @@ public void declareFloatArray(BiConsumer<Value, List<Float>> consumer, ParseFiel
320325
declareFieldArray(consumer, (p, c) -> p.floatValue(), field, ValueType.FLOAT_ARRAY);
321326
}
322327

328+
@UpdateForV10(owner = UpdateForV10.Owner.CORE_INFRA) // https://github.com/elastic/elasticsearch/issues/130797
323329
public void declareLongArray(BiConsumer<Value, List<Long>> consumer, ParseField field) {
324330
declareFieldArray(consumer, (p, c) -> p.longValue(), field, ValueType.LONG_ARRAY);
325331
}
326332

333+
@UpdateForV10(owner = UpdateForV10.Owner.CORE_INFRA) // https://github.com/elastic/elasticsearch/issues/130797
327334
public void declareIntArray(BiConsumer<Value, List<Integer>> consumer, ParseField field) {
328335
declareFieldArray(consumer, (p, c) -> p.intValue(), field, ValueType.INT_ARRAY);
329336
}

muted-tests.yml

Lines changed: 0 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -461,9 +461,6 @@ tests:
461461
- class: org.elasticsearch.upgrades.MlJobSnapshotUpgradeIT
462462
method: testSnapshotUpgrader
463463
issue: https://github.com/elastic/elasticsearch/issues/98560
464-
- class: org.elasticsearch.upgrades.QueryableBuiltInRolesUpgradeIT
465-
method: testBuiltInRolesSyncedOnClusterUpgrade
466-
issue: https://github.com/elastic/elasticsearch/issues/129534
467464
- class: org.elasticsearch.search.query.VectorIT
468465
method: testFilteredQueryStrategy
469466
issue: https://github.com/elastic/elasticsearch/issues/129517
@@ -546,12 +543,6 @@ tests:
546543
- class: org.elasticsearch.xpack.esql.action.CrossClusterAsyncQueryStopIT
547544
method: testStopQueryLocal
548545
issue: https://github.com/elastic/elasticsearch/issues/121672
549-
- class: org.elasticsearch.xpack.esql.qa.multi_node.GenerativeIT
550-
method: test
551-
issue: https://github.com/elastic/elasticsearch/issues/130067
552-
- class: org.elasticsearch.xpack.esql.qa.single_node.GenerativeIT
553-
method: test
554-
issue: https://github.com/elastic/elasticsearch/issues/130067
555546
- class: org.elasticsearch.xpack.esql.action.EsqlRemoteErrorWrapIT
556547
method: testThatRemoteErrorsAreWrapped
557548
issue: https://github.com/elastic/elasticsearch/issues/130794
@@ -576,9 +567,6 @@ tests:
576567
- class: org.elasticsearch.xpack.esql.action.EsqlActionBreakerIT
577568
method: testRowStatsProjectGroupByInt
578569
issue: https://github.com/elastic/elasticsearch/issues/131024
579-
- class: org.elasticsearch.xpack.esql.qa.mixed.EsqlClientYamlIT
580-
method: test {p0=esql/60_enrich/Enrich in fork}
581-
issue: https://github.com/elastic/elasticsearch/issues/131028
582570

583571
# Examples:
584572
#

rest-api-spec/src/main/resources/rest-api-spec/api/search_mvt.json

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -73,6 +73,15 @@
7373
"description":"Determines the geometry type for features in the aggs layer.",
7474
"default":"grid"
7575
},
76+
"grid_agg":{
77+
"type":"enum",
78+
"options":[
79+
"geotile",
80+
"geohex"
81+
],
82+
"description":"Aggregation used to create a grid for `field`.",
83+
"default":"geotile"
84+
},
7685
"size":{
7786
"type":"int",
7887
"description":"Maximum number of features to return in the hits layer. Accepts 0-10000.",

server/src/main/java/org/elasticsearch/cluster/ClusterInfo.java

Lines changed: 68 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -452,4 +452,72 @@ public Builder add(ShardId shardId, long reservedBytes) {
452452
}
453453
}
454454
}
455+
456+
public static Builder builder() {
457+
return new Builder();
458+
}
459+
460+
public static class Builder {
461+
private Map<String, DiskUsage> leastAvailableSpaceUsage = Map.of();
462+
private Map<String, DiskUsage> mostAvailableSpaceUsage = Map.of();
463+
private Map<String, Long> shardSizes = Map.of();
464+
private Map<ShardId, Long> shardDataSetSizes = Map.of();
465+
private Map<NodeAndShard, String> dataPath = Map.of();
466+
private Map<NodeAndPath, ReservedSpace> reservedSpace = Map.of();
467+
private Map<String, EstimatedHeapUsage> estimatedHeapUsages = Map.of();
468+
private Map<String, NodeUsageStatsForThreadPools> nodeUsageStatsForThreadPools = Map.of();
469+
470+
public ClusterInfo build() {
471+
return new ClusterInfo(
472+
leastAvailableSpaceUsage,
473+
mostAvailableSpaceUsage,
474+
shardSizes,
475+
shardDataSetSizes,
476+
dataPath,
477+
reservedSpace,
478+
estimatedHeapUsages,
479+
nodeUsageStatsForThreadPools
480+
);
481+
}
482+
483+
public Builder leastAvailableSpaceUsage(Map<String, DiskUsage> leastAvailableSpaceUsage) {
484+
this.leastAvailableSpaceUsage = leastAvailableSpaceUsage;
485+
return this;
486+
}
487+
488+
public Builder mostAvailableSpaceUsage(Map<String, DiskUsage> mostAvailableSpaceUsage) {
489+
this.mostAvailableSpaceUsage = mostAvailableSpaceUsage;
490+
return this;
491+
}
492+
493+
public Builder shardSizes(Map<String, Long> shardSizes) {
494+
this.shardSizes = shardSizes;
495+
return this;
496+
}
497+
498+
public Builder shardDataSetSizes(Map<ShardId, Long> shardDataSetSizes) {
499+
this.shardDataSetSizes = shardDataSetSizes;
500+
return this;
501+
}
502+
503+
public Builder dataPath(Map<NodeAndShard, String> dataPath) {
504+
this.dataPath = dataPath;
505+
return this;
506+
}
507+
508+
public Builder reservedSpace(Map<NodeAndPath, ReservedSpace> reservedSpace) {
509+
this.reservedSpace = reservedSpace;
510+
return this;
511+
}
512+
513+
public Builder estimatedHeapUsages(Map<String, EstimatedHeapUsage> estimatedHeapUsages) {
514+
this.estimatedHeapUsages = estimatedHeapUsages;
515+
return this;
516+
}
517+
518+
public Builder nodeUsageStatsForThreadPools(Map<String, NodeUsageStatsForThreadPools> nodeUsageStatsForThreadPools) {
519+
this.nodeUsageStatsForThreadPools = nodeUsageStatsForThreadPools;
520+
return this;
521+
}
522+
}
455523
}

server/src/main/java/org/elasticsearch/common/lucene/Lucene.java

Lines changed: 15 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -739,15 +739,26 @@ public static Version parseVersionLenient(String toParse, Version defaultValue)
739739
* If no SegmentReader can be extracted an {@link IllegalStateException} is thrown.
740740
*/
741741
public static SegmentReader segmentReader(LeafReader reader) {
742+
SegmentReader segmentReader = tryUnwrapSegmentReader(reader);
743+
if (segmentReader == null) {
744+
throw new IllegalStateException("Can not extract segment reader from given index reader [" + reader + "]");
745+
}
746+
return segmentReader;
747+
}
748+
749+
/**
750+
* Tries to extract a segment reader from the given index reader. Unlike {@link #segmentReader(LeafReader)} this method returns
751+
* null if no SegmentReader can be unwrapped instead of throwing an exception.
752+
*/
753+
public static SegmentReader tryUnwrapSegmentReader(LeafReader reader) {
742754
if (reader instanceof SegmentReader) {
743755
return (SegmentReader) reader;
744756
} else if (reader instanceof final FilterLeafReader fReader) {
745-
return segmentReader(FilterLeafReader.unwrap(fReader));
757+
return tryUnwrapSegmentReader(FilterLeafReader.unwrap(fReader));
746758
} else if (reader instanceof final FilterCodecReader fReader) {
747-
return segmentReader(FilterCodecReader.unwrap(fReader));
759+
return tryUnwrapSegmentReader(FilterCodecReader.unwrap(fReader));
748760
}
749-
// hard fail - we can't get a SegmentReader
750-
throw new IllegalStateException("Can not extract segment reader from given index reader [" + reader + "]");
761+
return null;
751762
}
752763

753764
@SuppressForbidden(reason = "Version#parseLeniently() used in a central place")

server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -737,6 +737,7 @@ public void sanitizeHeaders() {
737737
entry -> entry.getKey().equalsIgnoreCase("authorization")
738738
|| entry.getKey().equalsIgnoreCase("es-secondary-authorization")
739739
|| entry.getKey().equalsIgnoreCase("ES-Client-Authentication")
740+
|| entry.getKey().equalsIgnoreCase("X-Client-Authentication")
740741
);
741742

742743
final ThreadContextStruct newContext = new ThreadContextStruct(
Lines changed: 160 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,160 @@
1+
/*
2+
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
3+
* or more contributor license agreements. Licensed under the "Elastic License
4+
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
5+
* Public License v 1"; you may not use this file except in compliance with, at
6+
* your election, the "Elastic License 2.0", the "GNU Affero General Public
7+
* License v3.0 only", or the "Server Side Public License, v 1".
8+
*/
9+
10+
package org.elasticsearch.index.codec;
11+
12+
import org.apache.lucene.codecs.Codec;
13+
import org.apache.lucene.codecs.FieldsConsumer;
14+
import org.apache.lucene.codecs.FieldsProducer;
15+
import org.apache.lucene.codecs.FilterCodec;
16+
import org.apache.lucene.codecs.NormsProducer;
17+
import org.apache.lucene.codecs.PostingsFormat;
18+
import org.apache.lucene.index.FieldInfos;
19+
import org.apache.lucene.index.Fields;
20+
import org.apache.lucene.index.FilterLeafReader;
21+
import org.apache.lucene.index.SegmentReadState;
22+
import org.apache.lucene.index.SegmentWriteState;
23+
import org.apache.lucene.index.Terms;
24+
import org.apache.lucene.index.TermsEnum;
25+
import org.apache.lucene.internal.hppc.IntIntHashMap;
26+
import org.apache.lucene.util.BytesRef;
27+
import org.elasticsearch.common.util.FeatureFlag;
28+
29+
import java.io.IOException;
30+
import java.util.function.IntConsumer;
31+
32+
/**
33+
* A codec that tracks the length of the min and max written terms. Used to improve memory usage estimates in serverless, since
34+
* {@link org.apache.lucene.codecs.lucene90.blocktree.FieldReader} keeps an in-memory reference to the min and max term.
35+
*/
36+
public class TrackingPostingsInMemoryBytesCodec extends FilterCodec {
37+
public static final FeatureFlag TRACK_POSTINGS_IN_MEMORY_BYTES = new FeatureFlag("track_postings_in_memory_bytes");
38+
39+
public static final String IN_MEMORY_POSTINGS_BYTES_KEY = "es.postings.in_memory_bytes";
40+
41+
public TrackingPostingsInMemoryBytesCodec(Codec delegate) {
42+
super(delegate.getName(), delegate);
43+
}
44+
45+
@Override
46+
public PostingsFormat postingsFormat() {
47+
PostingsFormat format = super.postingsFormat();
48+
49+
return new PostingsFormat(format.getName()) {
50+
@Override
51+
public FieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException {
52+
FieldsConsumer consumer = format.fieldsConsumer(state);
53+
return new TrackingLengthFieldsConsumer(state, consumer);
54+
}
55+
56+
@Override
57+
public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException {
58+
return format.fieldsProducer(state);
59+
}
60+
};
61+
}
62+
63+
static final class TrackingLengthFieldsConsumer extends FieldsConsumer {
64+
final SegmentWriteState state;
65+
final FieldsConsumer in;
66+
final IntIntHashMap termsBytesPerField;
67+
68+
TrackingLengthFieldsConsumer(SegmentWriteState state, FieldsConsumer in) {
69+
this.state = state;
70+
this.in = in;
71+
this.termsBytesPerField = new IntIntHashMap(state.fieldInfos.size());
72+
}
73+
74+
@Override
75+
public void write(Fields fields, NormsProducer norms) throws IOException {
76+
in.write(new TrackingLengthFields(fields, termsBytesPerField, state.fieldInfos), norms);
77+
long totalBytes = 0;
78+
for (int bytes : termsBytesPerField.values) {
79+
totalBytes += bytes;
80+
}
81+
state.segmentInfo.putAttribute(IN_MEMORY_POSTINGS_BYTES_KEY, Long.toString(totalBytes));
82+
}
83+
84+
@Override
85+
public void close() throws IOException {
86+
in.close();
87+
}
88+
}
89+
90+
static final class TrackingLengthFields extends FilterLeafReader.FilterFields {
91+
final IntIntHashMap termsBytesPerField;
92+
final FieldInfos fieldInfos;
93+
94+
TrackingLengthFields(Fields in, IntIntHashMap termsBytesPerField, FieldInfos fieldInfos) {
95+
super(in);
96+
this.termsBytesPerField = termsBytesPerField;
97+
this.fieldInfos = fieldInfos;
98+
}
99+
100+
@Override
101+
public Terms terms(String field) throws IOException {
102+
Terms terms = super.terms(field);
103+
if (terms == null) {
104+
return null;
105+
}
106+
int fieldNum = fieldInfos.fieldInfo(field).number;
107+
return new TrackingLengthTerms(
108+
terms,
109+
bytes -> termsBytesPerField.put(fieldNum, Math.max(termsBytesPerField.getOrDefault(fieldNum, 0), bytes))
110+
);
111+
}
112+
}
113+
114+
static final class TrackingLengthTerms extends FilterLeafReader.FilterTerms {
115+
final IntConsumer onFinish;
116+
117+
TrackingLengthTerms(Terms in, IntConsumer onFinish) {
118+
super(in);
119+
this.onFinish = onFinish;
120+
}
121+
122+
@Override
123+
public TermsEnum iterator() throws IOException {
124+
return new TrackingLengthTermsEnum(super.iterator(), onFinish);
125+
}
126+
}
127+
128+
static final class TrackingLengthTermsEnum extends FilterLeafReader.FilterTermsEnum {
129+
int maxTermLength = 0;
130+
int minTermLength = 0;
131+
int termCount = 0;
132+
final IntConsumer onFinish;
133+
134+
TrackingLengthTermsEnum(TermsEnum in, IntConsumer onFinish) {
135+
super(in);
136+
this.onFinish = onFinish;
137+
}
138+
139+
@Override
140+
public BytesRef next() throws IOException {
141+
final BytesRef term = super.next();
142+
if (term != null) {
143+
if (termCount == 0) {
144+
minTermLength = term.length;
145+
}
146+
maxTermLength = term.length;
147+
termCount++;
148+
} else {
149+
if (termCount == 1) {
150+
// If the minTerm and maxTerm are the same, only one instance is kept on the heap.
151+
assert minTermLength == maxTermLength;
152+
onFinish.accept(maxTermLength);
153+
} else {
154+
onFinish.accept(maxTermLength + minTermLength);
155+
}
156+
}
157+
return term;
158+
}
159+
}
160+
}

0 commit comments

Comments
 (0)