Skip to content

Commit 4012de2

Browse files
Merge branch 'main' into svilen/multimatch_args
2 parents cd78510 + 83a13b9 commit 4012de2

File tree

129 files changed

+1661
-332
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

129 files changed

+1661
-332
lines changed

docs/changelog/128139.yaml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
pr: 128139
2+
summary: Skip indexing points for `seq_no` in tsdb and logsdb
3+
area: Mapping
4+
type: enhancement
5+
issues: []

docs/changelog/128163.yaml

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
pr: 128163
2+
summary: Make `skip_unavailable` catch all errors
3+
area: ES|QL
4+
type: breaking
5+
issues: [ ]
6+
breaking:
7+
title: Cluster setting "skip_unavailable" catches all runtime errors
8+
area: ES|QL
9+
details: "If `skip_unavailable` is set to `true`, the runtime errors from this cluster\
10+
\ do not lead to a failure of the query. Instead, the cluster is set to `skipped`\
11+
\ or `partial` status, and the query execution continues. This is a breaking change\
12+
\ from previous versions, where `skip_unavailable` only applied to errors related\
13+
\ to a cluster being unavailable."
14+
impact: "The errors on remote clusters, e.g. missing indices, will not lead to a\
15+
\ failure of the query. Instead, the cluster is set to `skipped` or `partial` status\
16+
\ in the response metadata."
17+
notable: false

docs/changelog/128218.yaml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
pr: 128218
2+
summary: Improve exception for trained model deployment scale up timeout
3+
area: Machine Learning
4+
type: enhancement
5+
issues: []

docs/release-notes/index.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -409,6 +409,8 @@ Machine Learning:
409409
* Support mTLS for the Elastic Inference Service integration inside the inference API [#119679](https://github.com/elastic/elasticsearch/pull/119679)
410410
* [Inference API] Add node-local rate limiting for the inference API [#120400](https://github.com/elastic/elasticsearch/pull/120400)
411411
* [Inference API] fix spell words: covertToString to convertToString [#119922](https://github.com/elastic/elasticsearch/pull/119922)
412+
* Update Linux build images to Rocky Linux 8 with gcc 13.3 [#2773](https://github.com/elastic/ml-cpp/pull/2773)
413+
412414

413415
Mapping:
414416
* Add Optional Source Filtering to Source Loaders [#113827](https://github.com/elastic/elasticsearch/pull/113827)

modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/TransportUpdateDataStreamSettingsAction.java

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@
3333
import org.elasticsearch.common.util.concurrent.EsExecutors;
3434
import org.elasticsearch.core.TimeValue;
3535
import org.elasticsearch.index.Index;
36+
import org.elasticsearch.index.IndexSettings;
3637
import org.elasticsearch.indices.SystemIndices;
3738
import org.elasticsearch.injection.guice.Inject;
3839
import org.elasticsearch.tasks.Task;
@@ -53,7 +54,7 @@ public class TransportUpdateDataStreamSettingsAction extends TransportMasterNode
5354
UpdateDataStreamSettingsAction.Request,
5455
UpdateDataStreamSettingsAction.Response> {
5556
private static final Logger logger = LogManager.getLogger(TransportUpdateDataStreamSettingsAction.class);
56-
private static final Set<String> APPLY_TO_BACKING_INDICES = Set.of("index.lifecycle.name");
57+
private static final Set<String> APPLY_TO_BACKING_INDICES = Set.of("index.lifecycle.name", IndexSettings.PREFER_ILM);
5758
private static final Set<String> APPLY_TO_DATA_STREAM_ONLY = Set.of("index.number_of_shards");
5859
private final MetadataDataStreamsService metadataDataStreamsService;
5960
private final MetadataUpdateSettingsService updateSettingsService;

modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/240_data_stream_settings.yml

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -51,16 +51,20 @@ setup:
5151
body:
5252
index:
5353
number_of_shards: 2
54-
lifecycle.name: my-new-policy
54+
lifecycle:
55+
name: my-new-policy
56+
prefer_ilm: true
5557
- match: { data_streams.0.name: my-data-stream-1 }
5658
- match: { data_streams.0.applied_to_data_stream: true }
5759
- match: { data_streams.0.index_settings_results.applied_to_data_stream_only: [index.number_of_shards]}
58-
- match: { data_streams.0.index_settings_results.applied_to_data_stream_and_backing_indices: [index.lifecycle.name] }
60+
- length: { data_streams.0.index_settings_results.applied_to_data_stream_and_backing_indices: 2 }
5961
- match: { data_streams.0.settings.index.number_of_shards: "2" }
6062
- match: { data_streams.0.settings.index.lifecycle.name: "my-new-policy" }
63+
- match: { data_streams.0.settings.index.lifecycle.prefer_ilm: "true" }
6164
- match: { data_streams.0.effective_settings.index.number_of_shards: "2" }
6265
- match: { data_streams.0.effective_settings.index.number_of_replicas: "0" }
6366
- match: { data_streams.0.effective_settings.index.lifecycle.name: "my-new-policy" }
67+
- match: { data_streams.0.effective_settings.index.lifecycle.prefer_ilm: "true" }
6468

6569
- do:
6670
indices.rollover:
@@ -79,13 +83,15 @@ setup:
7983
- match: { data_streams.0.effective_settings.index.number_of_shards: "2" }
8084
- match: { data_streams.0.effective_settings.index.number_of_replicas: "0" }
8185
- match: { data_streams.0.effective_settings.index.lifecycle.name: "my-new-policy" }
86+
- match: { data_streams.0.effective_settings.index.lifecycle.prefer_ilm: "true" }
8287

8388
- do:
8489
indices.get_data_stream:
8590
name: my-data-stream-1
8691
- match: { data_streams.0.name: my-data-stream-1 }
8792
- match: { data_streams.0.settings.index.number_of_shards: "2" }
8893
- match: { data_streams.0.settings.index.lifecycle.name: "my-new-policy" }
94+
- match: { data_streams.0.settings.index.lifecycle.prefer_ilm: "true" }
8995
- match: { data_streams.0.effective_settings: null }
9096

9197
- do:
@@ -101,6 +107,7 @@ setup:
101107
- match: { .$idx0name.settings.index.lifecycle.name: "my-new-policy" }
102108
- match: { .$idx1name.settings.index.number_of_shards: "2" }
103109
- match: { .$idx1name.settings.index.lifecycle.name: "my-new-policy" }
110+
- match: { .$idx1name.settings.index.lifecycle.prefer_ilm: "true" }
104111

105112
---
106113
"Test multiple data streams":

modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java

Lines changed: 88 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -39,13 +39,21 @@
3939
import org.elasticsearch.rest.RestStatus;
4040
import org.junit.ClassRule;
4141

42-
import java.io.ByteArrayInputStream;
42+
import java.io.BufferedInputStream;
43+
import java.io.BufferedOutputStream;
4344
import java.net.HttpURLConnection;
45+
import java.nio.channels.Channels;
46+
import java.nio.file.Files;
4447
import java.util.Collection;
48+
import java.util.zip.CRC32;
49+
import java.util.zip.CheckedInputStream;
50+
import java.util.zip.CheckedOutputStream;
4551

52+
import static org.elasticsearch.common.io.Streams.limitStream;
4653
import static org.elasticsearch.repositories.blobstore.BlobStoreTestUtil.randomPurpose;
4754
import static org.hamcrest.Matchers.blankOrNullString;
4855
import static org.hamcrest.Matchers.equalTo;
56+
import static org.hamcrest.Matchers.lessThan;
4957
import static org.hamcrest.Matchers.not;
5058

5159
/**
@@ -58,6 +66,27 @@ public class AzureStorageCleanupThirdPartyTests extends AbstractThirdPartyReposi
5866

5967
private static final String AZURE_ACCOUNT = System.getProperty("test.azure.account");
6068

69+
/**
70+
* AzureRepositoryPlugin that sets a low value for getUploadBlockSize()
71+
*/
72+
public static class TestAzureRepositoryPlugin extends AzureRepositoryPlugin {
73+
74+
public TestAzureRepositoryPlugin(Settings settings) {
75+
super(settings);
76+
}
77+
78+
@Override
79+
AzureStorageService createAzureStorageService(Settings settings, AzureClientProvider azureClientProvider) {
80+
final long blockSize = ByteSizeValue.ofKb(64L).getBytes() * randomIntBetween(1, 15);
81+
return new AzureStorageService(settings, azureClientProvider) {
82+
@Override
83+
long getUploadBlockSize() {
84+
return blockSize;
85+
}
86+
};
87+
}
88+
}
89+
6190
@ClassRule
6291
public static AzureHttpFixture fixture = new AzureHttpFixture(
6392
USE_FIXTURE ? AzureHttpFixture.Protocol.HTTP : AzureHttpFixture.Protocol.NONE,
@@ -71,7 +100,7 @@ public class AzureStorageCleanupThirdPartyTests extends AbstractThirdPartyReposi
71100

72101
@Override
73102
protected Collection<Class<? extends Plugin>> getPlugins() {
74-
return pluginList(AzureRepositoryPlugin.class);
103+
return pluginList(TestAzureRepositoryPlugin.class);
75104
}
76105

77106
@Override
@@ -158,19 +187,67 @@ private void ensureSasTokenPermissions() {
158187

159188
public void testMultiBlockUpload() throws Exception {
160189
final BlobStoreRepository repo = getRepository();
190+
assertThat(
191+
asInstanceOf(AzureBlobStore.class, repo.blobStore()).getLargeBlobThresholdInBytes(),
192+
equalTo(ByteSizeUnit.MB.toBytes(1L))
193+
);
194+
assertThat(asInstanceOf(AzureBlobStore.class, repo.blobStore()).getUploadBlockSize(), lessThan(ByteSizeUnit.MB.toBytes(1L)));
195+
161196
// The configured threshold for this test suite is 1mb
162-
final int blobSize = ByteSizeUnit.MB.toIntBytes(2);
197+
final long blobSize = randomLongBetween(ByteSizeUnit.MB.toBytes(2), ByteSizeUnit.MB.toBytes(4));
198+
final int bufferSize = 8192;
199+
200+
final var file = createTempFile();
201+
final long expectedChecksum;
202+
try (var output = new CheckedOutputStream(new BufferedOutputStream(Files.newOutputStream(file)), new CRC32())) {
203+
long remaining = blobSize;
204+
while (remaining > 0L) {
205+
final var buffer = randomByteArrayOfLength(Math.toIntExact(Math.min(bufferSize, remaining)));
206+
output.write(buffer);
207+
remaining -= buffer.length;
208+
}
209+
output.flush();
210+
expectedChecksum = output.getChecksum().getValue();
211+
}
212+
163213
PlainActionFuture<Void> future = new PlainActionFuture<>();
164214
repo.threadPool().generic().execute(ActionRunnable.run(future, () -> {
165215
final BlobContainer blobContainer = repo.blobStore().blobContainer(repo.basePath().add("large_write"));
166-
blobContainer.writeBlob(
167-
randomPurpose(),
168-
UUIDs.base64UUID(),
169-
new ByteArrayInputStream(randomByteArrayOfLength(blobSize)),
170-
blobSize,
171-
false
172-
);
173-
blobContainer.delete(randomPurpose());
216+
try {
217+
final var blobName = UUIDs.base64UUID();
218+
if (randomBoolean()) {
219+
try (var input = new BufferedInputStream(Files.newInputStream(file))) {
220+
blobContainer.writeBlob(randomPurpose(), blobName, input, blobSize, false);
221+
}
222+
} else {
223+
assertThat(blobContainer.supportsConcurrentMultipartUploads(), equalTo(true));
224+
blobContainer.writeBlobAtomic(randomPurpose(), blobName, blobSize, (offset, length) -> {
225+
var channel = Files.newByteChannel(file);
226+
if (offset > 0L) {
227+
if (channel.size() <= offset) {
228+
throw new AssertionError();
229+
}
230+
channel.position(offset);
231+
}
232+
assert channel.position() == offset;
233+
return new BufferedInputStream(limitStream(Channels.newInputStream(channel), length));
234+
}, false);
235+
}
236+
237+
long bytesCount = 0L;
238+
try (var input = new CheckedInputStream(blobContainer.readBlob(OperationPurpose.INDICES, blobName), new CRC32())) {
239+
var buffer = new byte[bufferSize];
240+
int bytesRead;
241+
while ((bytesRead = input.read(buffer)) != -1) {
242+
bytesCount += bytesRead;
243+
}
244+
245+
assertThat(bytesCount, equalTo(blobSize));
246+
assertThat(input.getChecksum().getValue(), equalTo(expectedChecksum));
247+
}
248+
} finally {
249+
blobContainer.delete(randomPurpose());
250+
}
174251
}));
175252
future.get();
176253
}

modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -504,12 +504,10 @@ void writeBlobAtomic(
504504
.collect(Collectors.toList())
505505
.flatMap(blockIds -> {
506506
logger.debug("{}: all {} parts uploaded, now committing", blobName, multiParts.size());
507-
var response = asyncClient.commitBlockList(
507+
return asyncClient.commitBlockList(
508508
multiParts.stream().map(MultiPart::blockId).toList(),
509509
failIfAlreadyExists == false
510-
);
511-
logger.debug("{}: all {} parts committed", blobName, multiParts.size());
512-
return response;
510+
).doOnSuccess(unused -> logger.debug("{}: all {} parts committed", blobName, multiParts.size()));
513511
})
514512
.block();
515513
}

modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111

1212
import org.apache.logging.log4j.LogManager;
1313
import org.apache.logging.log4j.Logger;
14+
import org.elasticsearch.cluster.metadata.ProjectId;
1415
import org.elasticsearch.cluster.metadata.RepositoryMetadata;
1516
import org.elasticsearch.cluster.service.ClusterService;
1617
import org.elasticsearch.common.Strings;
@@ -110,6 +111,7 @@ public static final class Repository {
110111
private final RepositoriesMetrics repositoriesMetrics;
111112

112113
public AzureRepository(
114+
final ProjectId projectId,
113115
final RepositoryMetadata metadata,
114116
final NamedXContentRegistry namedXContentRegistry,
115117
final AzureStorageService storageService,
@@ -119,6 +121,7 @@ public AzureRepository(
119121
final RepositoriesMetrics repositoriesMetrics
120122
) {
121123
super(
124+
projectId,
122125
metadata,
123126
namedXContentRegistry,
124127
clusterService,

modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -62,10 +62,11 @@ public Map<String, Repository.Factory> getRepositories(
6262
RecoverySettings recoverySettings,
6363
RepositoriesMetrics repositoriesMetrics
6464
) {
65-
return Collections.singletonMap(AzureRepository.TYPE, metadata -> {
65+
return Collections.singletonMap(AzureRepository.TYPE, (projectId, metadata) -> {
6666
AzureStorageService storageService = azureStoreService.get();
6767
assert storageService != null;
6868
return new AzureRepository(
69+
projectId,
6970
metadata,
7071
namedXContentRegistry,
7172
storageService,

0 commit comments

Comments
 (0)