Skip to content

Commit f0a37e3

Browse files
authored
Merge branch 'main' into online-prewarming-interface
2 parents a99ee7d + 4cc21b6 commit f0a37e3

File tree

16 files changed

+218
-98
lines changed

16 files changed

+218
-98
lines changed

.buildkite/scripts/dra-workflow.sh

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -70,6 +70,7 @@ echo --- Building release artifacts
7070
$VERSION_QUALIFIER_ARG \
7171
buildReleaseArtifacts \
7272
exportCompressedDockerImages \
73+
exportDockerContexts \
7374
:distribution:generateDependenciesReport
7475

7576
PATH="$PATH:${JAVA_HOME}/bin" # Required by the following script

distribution/docker/build.gradle

Lines changed: 22 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -132,7 +132,7 @@ dependencies {
132132
fips "org.bouncycastle:bctls-fips:1.0.19"
133133
}
134134

135-
ext.expansions = { Architecture architecture, DockerBase base ->
135+
ext.expansions = { Architecture architecture, DockerBase base, String publicationContext = '' ->
136136
def (major, minor) = VersionProperties.elasticsearch.split("\\.")
137137

138138
// We tag our Docker images with various pieces of information, including a timestamp
@@ -152,6 +152,7 @@ ext.expansions = { Architecture architecture, DockerBase base ->
152152
'license' : base == DockerBase.IRON_BANK ? 'Elastic License 2.0' : 'Elastic-License-2.0',
153153
'package_manager' : base.packageManager,
154154
'docker_base' : base.name().toLowerCase(),
155+
'docker_context' : publicationContext,
155156
'version' : VersionProperties.elasticsearch,
156157
'major_minor_version': "${major}.${minor}",
157158
'retry' : ShellRetry
@@ -179,9 +180,9 @@ private static String taskName(String prefix, Architecture architecture, DockerB
179180
suffix
180181
}
181182

182-
ext.dockerBuildContext = { Architecture architecture, DockerBase base ->
183+
ext.dockerBuildContext = { Architecture architecture, DockerBase base, String publicationContext = '' ->
183184
copySpec {
184-
final Map<String, String> varExpansions = expansions(architecture, base)
185+
final Map<String, String> varExpansions = expansions(architecture, base, publicationContext)
185186
final Path projectDir = project.projectDir.toPath()
186187

187188
if (base == DockerBase.IRON_BANK) {
@@ -291,17 +292,22 @@ tasks.named("composeUp").configure {
291292
dependsOn tasks.named("preProcessFixture")
292293
}
293294

294-
void addBuildDockerContextTask(Architecture architecture, DockerBase base) {
295+
296+
def exportDockerImages = tasks.register("exportDockerImages")
297+
def exportCompressedDockerImages = tasks.register("exportCompressedDockerImages")
298+
def exportDockerContexts = tasks.register("exportDockerContexts")
299+
300+
void addBuildDockerContextTask(Architecture architecture, DockerBase base, String taskSuffix = 'DockerContext', String classifier = "docker-build-context") {
295301
String configDirectory = base == DockerBase.IRON_BANK ? 'scripts' : 'config'
296302
String arch = architecture == Architecture.AARCH64 ? '-aarch64' : ''
297303

298304
final TaskProvider<Tar> buildDockerContextTask =
299-
tasks.register(taskName('build', architecture, base, 'DockerContext'), Tar) {
305+
tasks.register(taskName('build', architecture, base, taskSuffix), Tar) {
300306
archiveExtension = 'tar.gz'
301307
compression = Compression.GZIP
302-
archiveClassifier = "docker-build-context${arch}"
308+
archiveClassifier = "${classifier}${arch}"
303309
archiveBaseName = "elasticsearch${base.suffix}"
304-
with dockerBuildContext(architecture, base)
310+
with dockerBuildContext(architecture, base, classifier)
305311

306312
into(configDirectory) {
307313
from(configurations.log4jConfig) {
@@ -344,6 +350,10 @@ void addBuildDockerContextTask(Architecture architecture, DockerBase base) {
344350
onlyIf("$architecture supported") { serviceProvider.get().isArchitectureSupported(architecture) }
345351
}
346352

353+
exportDockerContexts.configure {
354+
dependsOn buildDockerContextTask
355+
}
356+
347357
if (base == DockerBase.IRON_BANK) {
348358
tasks.named("assemble").configure {
349359
dependsOn(buildDockerContextTask)
@@ -578,12 +588,14 @@ for (final Architecture architecture : Architecture.values()) {
578588
addTransformDockerContextTask(architecture, base)
579589
addBuildDockerImageTask(architecture, base)
580590
}
591+
if(base == DockerBase.DEFAULT) {
592+
// Add additional docker hub specific context which we use solely for publishing to docker hub.
593+
// At the moment it only differs in not labels added that we need for openshift certification
594+
addBuildDockerContextTask(architecture, base, 'DockerHubContext', "docker-hub-build-context")
595+
}
581596
}
582597
}
583598

584-
def exportDockerImages = tasks.register("exportDockerImages")
585-
def exportCompressedDockerImages = tasks.register("exportCompressedDockerImages")
586-
587599
/*
588600
* The export subprojects write out the generated Docker images to disk, so
589601
* that they can be easily reloaded, for example into a VM for distribution testing

distribution/docker/src/docker/Dockerfile.default

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -139,13 +139,15 @@ LABEL org.label-schema.build-date="${build_date}" \\
139139
org.opencontainers.image.vendor="Elastic" \\
140140
org.opencontainers.image.version="${version}"
141141

142+
<% if (docker_context != 'docker-hub-build-context') { %>
142143
LABEL name="Elasticsearch" \\
143144
maintainer="[email protected]" \\
144145
vendor="Elastic" \\
145146
version="${version}" \\
146147
release="1" \\
147148
summary="Elasticsearch" \\
148149
description="You know, for search."
150+
<% } %>
149151

150152
RUN mkdir /licenses && ln LICENSE.txt /licenses/LICENSE
151153

docs/changelog/125832.yaml

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
pr: 125832
2+
summary: "ESQL: Fix `NULL` handling in `IN` clause"
3+
area: ES|QL
4+
type: bug
5+
issues:
6+
- 119950

docs/reference/elasticsearch/mapping-reference/semantic-text.md

Lines changed: 24 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -109,10 +109,30 @@ to create the endpoint. If not specified, the {{infer}} endpoint defined by
109109
`inference_id` will be used at both index and query time.
110110

111111
`chunking_settings`
112-
: (Optional, object) Sets chunking settings that will override the settings
113-
configured by the `inference_id` endpoint.
114-
See [chunking settings attributes](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put)
115-
in the {{infer}} API documentation for a complete list of available options.
112+
: (Optional, object) Settings for chunking text into smaller passages.
113+
If specified, these will override the chunking settings set in the {infer-cap}
114+
endpoint associated with `inference_id`.
115+
If chunking settings are updated, they will not be applied to existing documents
116+
until they are reindexed.
117+
118+
::::{dropdown} Valid values for `chunking_settings`
119+
`type`
120+
: Indicates the type of chunking strategy to use. Valid values are `word` or
121+
`sentence`. Required.
122+
123+
`max_chunk_size`
124+
: The maximum number of works in a chunk. Required.
125+
126+
`overlap`
127+
: The number of overlapping words allowed in chunks. This cannot be defined as
128+
more than half of the `max_chunk_size`. Required for `word` type chunking
129+
settings.
130+
131+
`sentence_overlap`
132+
: The number of overlapping sentences allowed in chunks. Valid values are `0`
133+
or `1`. Required for `sentence` type chunking settings
134+
135+
::::
116136

117137
## {{infer-cap}} endpoint validation [infer-endpoint-validation]
118138

server/src/main/java/org/elasticsearch/action/get/TransportGetAction.java

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,7 @@
4040
import org.elasticsearch.index.IndexService;
4141
import org.elasticsearch.index.engine.Engine;
4242
import org.elasticsearch.index.get.GetResult;
43+
import org.elasticsearch.index.shard.IllegalIndexShardStateException;
4344
import org.elasticsearch.index.shard.IndexShard;
4445
import org.elasticsearch.index.shard.ShardId;
4546
import org.elasticsearch.index.shard.ShardNotFoundException;
@@ -235,10 +236,11 @@ private void getFromTranslog(
235236
final var retryingListener = listener.delegateResponse((l, e) -> {
236237
final var cause = ExceptionsHelper.unwrapCause(e);
237238
logger.debug("get_from_translog failed", cause);
239+
// All of the following exceptions can be thrown if the shard is relocated
238240
if (cause instanceof ShardNotFoundException
239241
|| cause instanceof IndexNotFoundException
242+
|| cause instanceof IllegalIndexShardStateException
240243
|| cause instanceof AlreadyClosedException) {
241-
// TODO AlreadyClosedException the engine reset should be fixed by ES-10826
242244
logger.debug("retrying get_from_translog");
243245
observer.waitForNextChange(new ClusterStateObserver.Listener() {
244246
@Override
@@ -253,13 +255,7 @@ public void onClusterServiceClose() {
253255

254256
@Override
255257
public void onTimeout(TimeValue timeout) {
256-
// TODO AlreadyClosedException the engine reset should be fixed by ES-10826
257-
if (cause instanceof AlreadyClosedException) {
258-
// Do an additional retry just in case AlreadyClosedException didn't generate a cluster update
259-
tryGetFromTranslog(request, indexShard, node, l);
260-
} else {
261-
l.onFailure(new ElasticsearchException("Timed out retrying get_from_translog", cause));
262-
}
258+
l.onFailure(new ElasticsearchException("Timed out retrying get_from_translog", cause));
263259
}
264260
});
265261
} else {

server/src/main/java/org/elasticsearch/action/get/TransportGetFromTranslogAction.java

Lines changed: 17 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,6 @@
99

1010
package org.elasticsearch.action.get;
1111

12-
import org.apache.lucene.store.AlreadyClosedException;
1312
import org.apache.lucene.util.BytesRef;
1413
import org.elasticsearch.TransportVersions;
1514
import org.elasticsearch.action.ActionListener;
@@ -65,25 +64,24 @@ protected void doExecute(Task task, Request request, ActionListener<Response> li
6564
assert indexShard.routingEntry().isPromotableToPrimary() : "not an indexing shard" + indexShard.routingEntry();
6665
assert getRequest.realtime();
6766
ActionListener.completeWith(listener, () -> {
68-
var result = indexShard.getService()
69-
.getFromTranslog(
70-
getRequest.id(),
71-
getRequest.storedFields(),
72-
getRequest.realtime(),
73-
getRequest.version(),
74-
getRequest.versionType(),
75-
getRequest.fetchSourceContext(),
76-
getRequest.isForceSyntheticSource()
77-
);
78-
long segmentGeneration = -1;
79-
if (result == null) {
80-
Engine engine = indexShard.getEngineOrNull();
81-
if (engine == null) {
82-
throw new AlreadyClosedException("engine closed");
67+
// Allows to keep the same engine instance for getFromTranslog and getLastUnsafeSegmentGenerationForGets
68+
return indexShard.withEngineException(engine -> {
69+
var result = indexShard.getService()
70+
.getFromTranslog(
71+
getRequest.id(),
72+
getRequest.storedFields(),
73+
getRequest.realtime(),
74+
getRequest.version(),
75+
getRequest.versionType(),
76+
getRequest.fetchSourceContext(),
77+
getRequest.isForceSyntheticSource()
78+
);
79+
long segmentGeneration = -1;
80+
if (result == null) {
81+
segmentGeneration = engine.getLastUnsafeSegmentGenerationForGets();
8382
}
84-
segmentGeneration = engine.getLastUnsafeSegmentGenerationForGets();
85-
}
86-
return new Response(result, indexShard.getOperationPrimaryTerm(), segmentGeneration);
83+
return new Response(result, indexShard.getOperationPrimaryTerm(), segmentGeneration);
84+
});
8785
});
8886
}
8987

server/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,7 @@
4040
import org.elasticsearch.index.IndexService;
4141
import org.elasticsearch.index.engine.Engine;
4242
import org.elasticsearch.index.get.GetResult;
43+
import org.elasticsearch.index.shard.IllegalIndexShardStateException;
4344
import org.elasticsearch.index.shard.IndexShard;
4445
import org.elasticsearch.index.shard.MultiEngineGet;
4546
import org.elasticsearch.index.shard.ShardId;
@@ -216,10 +217,11 @@ private void shardMultiGetFromTranslog(
216217
final var retryingListener = listener.delegateResponse((l, e) -> {
217218
final var cause = ExceptionsHelper.unwrapCause(e);
218219
logger.debug("mget_from_translog[shard] failed", cause);
220+
// All of the following exceptions can be thrown if the shard is relocated
219221
if (cause instanceof ShardNotFoundException
220222
|| cause instanceof IndexNotFoundException
223+
|| cause instanceof IllegalIndexShardStateException
221224
|| cause instanceof AlreadyClosedException) {
222-
// TODO AlreadyClosedException the engine reset should be fixed by ES-10826
223225
logger.debug("retrying mget_from_translog[shard]");
224226
observer.waitForNextChange(new ClusterStateObserver.Listener() {
225227
@Override
@@ -234,13 +236,7 @@ public void onClusterServiceClose() {
234236

235237
@Override
236238
public void onTimeout(TimeValue timeout) {
237-
// TODO AlreadyClosedException the engine reset should be fixed by ES-10826
238-
if (cause instanceof AlreadyClosedException) {
239-
// Do an additional retry just in case AlreadyClosedException didn't generate a cluster update
240-
tryShardMultiGetFromTranslog(request, indexShard, node, l);
241-
} else {
242-
l.onFailure(new ElasticsearchException("Timed out retrying mget_from_translog[shard]", cause));
243-
}
239+
l.onFailure(new ElasticsearchException("Timed out retrying mget_from_translog[shard]", cause));
244240
}
245241
});
246242
} else {

server/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetFomTranslogAction.java

Lines changed: 38 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,6 @@
99

1010
package org.elasticsearch.action.get;
1111

12-
import org.apache.lucene.store.AlreadyClosedException;
1312
import org.elasticsearch.TransportVersions;
1413
import org.elasticsearch.action.ActionListener;
1514
import org.elasticsearch.action.ActionRequest;
@@ -62,48 +61,48 @@ protected void doExecute(Task task, Request request, ActionListener<Response> li
6261
assert indexShard.routingEntry().isPromotableToPrimary() : "not an indexing shard" + indexShard.routingEntry();
6362
assert multiGetShardRequest.realtime();
6463
ActionListener.completeWith(listener, () -> {
65-
var multiGetShardResponse = new MultiGetShardResponse();
66-
var someItemsNotFoundInTranslog = false;
67-
for (int i = 0; i < multiGetShardRequest.locations.size(); i++) {
68-
var item = multiGetShardRequest.items.get(i);
69-
try {
70-
var result = indexShard.getService()
71-
.getFromTranslog(
72-
item.id(),
73-
item.storedFields(),
74-
multiGetShardRequest.realtime(),
75-
item.version(),
76-
item.versionType(),
77-
item.fetchSourceContext(),
78-
multiGetShardRequest.isForceSyntheticSource()
64+
// Allows to keep the same engine instance for getFromTranslog and getLastUnsafeSegmentGenerationForGets
65+
return indexShard.withEngineException(engine -> {
66+
var multiGetShardResponse = new MultiGetShardResponse();
67+
var someItemsNotFoundInTranslog = false;
68+
69+
for (int i = 0; i < multiGetShardRequest.locations.size(); i++) {
70+
var item = multiGetShardRequest.items.get(i);
71+
try {
72+
var result = indexShard.getService()
73+
.getFromTranslog(
74+
item.id(),
75+
item.storedFields(),
76+
multiGetShardRequest.realtime(),
77+
item.version(),
78+
item.versionType(),
79+
item.fetchSourceContext(),
80+
multiGetShardRequest.isForceSyntheticSource()
81+
);
82+
GetResponse getResponse = null;
83+
if (result == null) {
84+
someItemsNotFoundInTranslog = true;
85+
} else {
86+
getResponse = new GetResponse(result);
87+
}
88+
multiGetShardResponse.add(multiGetShardRequest.locations.get(i), getResponse);
89+
} catch (RuntimeException | IOException e) {
90+
if (TransportActions.isShardNotAvailableException(e)) {
91+
throw e;
92+
}
93+
logger.debug("failed to execute multi_get_from_translog for {}[id={}]: {}", shardId, item.id(), e);
94+
multiGetShardResponse.add(
95+
multiGetShardRequest.locations.get(i),
96+
new MultiGetResponse.Failure(multiGetShardRequest.index(), item.id(), e)
7997
);
80-
GetResponse getResponse = null;
81-
if (result == null) {
82-
someItemsNotFoundInTranslog = true;
83-
} else {
84-
getResponse = new GetResponse(result);
8598
}
86-
multiGetShardResponse.add(multiGetShardRequest.locations.get(i), getResponse);
87-
} catch (RuntimeException | IOException e) {
88-
if (TransportActions.isShardNotAvailableException(e)) {
89-
throw e;
90-
}
91-
logger.debug("failed to execute multi_get_from_translog for {}[id={}]: {}", shardId, item.id(), e);
92-
multiGetShardResponse.add(
93-
multiGetShardRequest.locations.get(i),
94-
new MultiGetResponse.Failure(multiGetShardRequest.index(), item.id(), e)
95-
);
9699
}
97-
}
98-
long segmentGeneration = -1;
99-
if (someItemsNotFoundInTranslog) {
100-
Engine engine = indexShard.getEngineOrNull();
101-
if (engine == null) {
102-
throw new AlreadyClosedException("engine closed");
100+
long segmentGeneration = -1;
101+
if (someItemsNotFoundInTranslog) {
102+
segmentGeneration = engine.getLastUnsafeSegmentGenerationForGets();
103103
}
104-
segmentGeneration = engine.getLastUnsafeSegmentGenerationForGets();
105-
}
106-
return new Response(multiGetShardResponse, indexShard.getOperationPrimaryTerm(), segmentGeneration);
104+
return new Response(multiGetShardResponse, indexShard.getOperationPrimaryTerm(), segmentGeneration);
105+
});
107106
});
108107
}
109108

0 commit comments

Comments
 (0)