Skip to content

Commit e06b636

Browse files
authored
Merge branch 'main' into fix-validation-test
2 parents a1b54cb + 83a13b9 commit e06b636

File tree

88 files changed

+981
-171
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

88 files changed

+981
-171
lines changed

docs/changelog/128218.yaml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
pr: 128218
2+
summary: Improve exception for trained model deployment scale up timeout
3+
area: Machine Learning
4+
type: enhancement
5+
issues: []

docs/release-notes/index.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -409,6 +409,8 @@ Machine Learning:
409409
* Support mTLS for the Elastic Inference Service integration inside the inference API [#119679](https://github.com/elastic/elasticsearch/pull/119679)
410410
* [Inference API] Add node-local rate limiting for the inference API [#120400](https://github.com/elastic/elasticsearch/pull/120400)
411411
* [Inference API] fix spell words: covertToString to convertToString [#119922](https://github.com/elastic/elasticsearch/pull/119922)
412+
* Update Linux build images to Rocky Linux 8 with gcc 13.3 [#2773](https://github.com/elastic/ml-cpp/pull/2773)
413+
412414

413415
Mapping:
414416
* Add Optional Source Filtering to Source Loaders [#113827](https://github.com/elastic/elasticsearch/pull/113827)

modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java

Lines changed: 88 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -39,13 +39,21 @@
3939
import org.elasticsearch.rest.RestStatus;
4040
import org.junit.ClassRule;
4141

42-
import java.io.ByteArrayInputStream;
42+
import java.io.BufferedInputStream;
43+
import java.io.BufferedOutputStream;
4344
import java.net.HttpURLConnection;
45+
import java.nio.channels.Channels;
46+
import java.nio.file.Files;
4447
import java.util.Collection;
48+
import java.util.zip.CRC32;
49+
import java.util.zip.CheckedInputStream;
50+
import java.util.zip.CheckedOutputStream;
4551

52+
import static org.elasticsearch.common.io.Streams.limitStream;
4653
import static org.elasticsearch.repositories.blobstore.BlobStoreTestUtil.randomPurpose;
4754
import static org.hamcrest.Matchers.blankOrNullString;
4855
import static org.hamcrest.Matchers.equalTo;
56+
import static org.hamcrest.Matchers.lessThan;
4957
import static org.hamcrest.Matchers.not;
5058

5159
/**
@@ -58,6 +66,27 @@ public class AzureStorageCleanupThirdPartyTests extends AbstractThirdPartyReposi
5866

5967
private static final String AZURE_ACCOUNT = System.getProperty("test.azure.account");
6068

69+
/**
70+
* AzureRepositoryPlugin that sets a low value for getUploadBlockSize()
71+
*/
72+
public static class TestAzureRepositoryPlugin extends AzureRepositoryPlugin {
73+
74+
public TestAzureRepositoryPlugin(Settings settings) {
75+
super(settings);
76+
}
77+
78+
@Override
79+
AzureStorageService createAzureStorageService(Settings settings, AzureClientProvider azureClientProvider) {
80+
final long blockSize = ByteSizeValue.ofKb(64L).getBytes() * randomIntBetween(1, 15);
81+
return new AzureStorageService(settings, azureClientProvider) {
82+
@Override
83+
long getUploadBlockSize() {
84+
return blockSize;
85+
}
86+
};
87+
}
88+
}
89+
6190
@ClassRule
6291
public static AzureHttpFixture fixture = new AzureHttpFixture(
6392
USE_FIXTURE ? AzureHttpFixture.Protocol.HTTP : AzureHttpFixture.Protocol.NONE,
@@ -71,7 +100,7 @@ public class AzureStorageCleanupThirdPartyTests extends AbstractThirdPartyReposi
71100

72101
@Override
73102
protected Collection<Class<? extends Plugin>> getPlugins() {
74-
return pluginList(AzureRepositoryPlugin.class);
103+
return pluginList(TestAzureRepositoryPlugin.class);
75104
}
76105

77106
@Override
@@ -158,19 +187,67 @@ private void ensureSasTokenPermissions() {
158187

159188
public void testMultiBlockUpload() throws Exception {
160189
final BlobStoreRepository repo = getRepository();
190+
assertThat(
191+
asInstanceOf(AzureBlobStore.class, repo.blobStore()).getLargeBlobThresholdInBytes(),
192+
equalTo(ByteSizeUnit.MB.toBytes(1L))
193+
);
194+
assertThat(asInstanceOf(AzureBlobStore.class, repo.blobStore()).getUploadBlockSize(), lessThan(ByteSizeUnit.MB.toBytes(1L)));
195+
161196
// The configured threshold for this test suite is 1mb
162-
final int blobSize = ByteSizeUnit.MB.toIntBytes(2);
197+
final long blobSize = randomLongBetween(ByteSizeUnit.MB.toBytes(2), ByteSizeUnit.MB.toBytes(4));
198+
final int bufferSize = 8192;
199+
200+
final var file = createTempFile();
201+
final long expectedChecksum;
202+
try (var output = new CheckedOutputStream(new BufferedOutputStream(Files.newOutputStream(file)), new CRC32())) {
203+
long remaining = blobSize;
204+
while (remaining > 0L) {
205+
final var buffer = randomByteArrayOfLength(Math.toIntExact(Math.min(bufferSize, remaining)));
206+
output.write(buffer);
207+
remaining -= buffer.length;
208+
}
209+
output.flush();
210+
expectedChecksum = output.getChecksum().getValue();
211+
}
212+
163213
PlainActionFuture<Void> future = new PlainActionFuture<>();
164214
repo.threadPool().generic().execute(ActionRunnable.run(future, () -> {
165215
final BlobContainer blobContainer = repo.blobStore().blobContainer(repo.basePath().add("large_write"));
166-
blobContainer.writeBlob(
167-
randomPurpose(),
168-
UUIDs.base64UUID(),
169-
new ByteArrayInputStream(randomByteArrayOfLength(blobSize)),
170-
blobSize,
171-
false
172-
);
173-
blobContainer.delete(randomPurpose());
216+
try {
217+
final var blobName = UUIDs.base64UUID();
218+
if (randomBoolean()) {
219+
try (var input = new BufferedInputStream(Files.newInputStream(file))) {
220+
blobContainer.writeBlob(randomPurpose(), blobName, input, blobSize, false);
221+
}
222+
} else {
223+
assertThat(blobContainer.supportsConcurrentMultipartUploads(), equalTo(true));
224+
blobContainer.writeBlobAtomic(randomPurpose(), blobName, blobSize, (offset, length) -> {
225+
var channel = Files.newByteChannel(file);
226+
if (offset > 0L) {
227+
if (channel.size() <= offset) {
228+
throw new AssertionError();
229+
}
230+
channel.position(offset);
231+
}
232+
assert channel.position() == offset;
233+
return new BufferedInputStream(limitStream(Channels.newInputStream(channel), length));
234+
}, false);
235+
}
236+
237+
long bytesCount = 0L;
238+
try (var input = new CheckedInputStream(blobContainer.readBlob(OperationPurpose.INDICES, blobName), new CRC32())) {
239+
var buffer = new byte[bufferSize];
240+
int bytesRead;
241+
while ((bytesRead = input.read(buffer)) != -1) {
242+
bytesCount += bytesRead;
243+
}
244+
245+
assertThat(bytesCount, equalTo(blobSize));
246+
assertThat(input.getChecksum().getValue(), equalTo(expectedChecksum));
247+
}
248+
} finally {
249+
blobContainer.delete(randomPurpose());
250+
}
174251
}));
175252
future.get();
176253
}

modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -504,12 +504,10 @@ void writeBlobAtomic(
504504
.collect(Collectors.toList())
505505
.flatMap(blockIds -> {
506506
logger.debug("{}: all {} parts uploaded, now committing", blobName, multiParts.size());
507-
var response = asyncClient.commitBlockList(
507+
return asyncClient.commitBlockList(
508508
multiParts.stream().map(MultiPart::blockId).toList(),
509509
failIfAlreadyExists == false
510-
);
511-
logger.debug("{}: all {} parts committed", blobName, multiParts.size());
512-
return response;
510+
).doOnSuccess(unused -> logger.debug("{}: all {} parts committed", blobName, multiParts.size()));
513511
})
514512
.block();
515513
}

modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111

1212
import org.apache.logging.log4j.LogManager;
1313
import org.apache.logging.log4j.Logger;
14+
import org.elasticsearch.cluster.metadata.ProjectId;
1415
import org.elasticsearch.cluster.metadata.RepositoryMetadata;
1516
import org.elasticsearch.cluster.service.ClusterService;
1617
import org.elasticsearch.common.Strings;
@@ -110,6 +111,7 @@ public static final class Repository {
110111
private final RepositoriesMetrics repositoriesMetrics;
111112

112113
public AzureRepository(
114+
final ProjectId projectId,
113115
final RepositoryMetadata metadata,
114116
final NamedXContentRegistry namedXContentRegistry,
115117
final AzureStorageService storageService,
@@ -119,6 +121,7 @@ public AzureRepository(
119121
final RepositoriesMetrics repositoriesMetrics
120122
) {
121123
super(
124+
projectId,
122125
metadata,
123126
namedXContentRegistry,
124127
clusterService,

modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -62,10 +62,11 @@ public Map<String, Repository.Factory> getRepositories(
6262
RecoverySettings recoverySettings,
6363
RepositoriesMetrics repositoriesMetrics
6464
) {
65-
return Collections.singletonMap(AzureRepository.TYPE, metadata -> {
65+
return Collections.singletonMap(AzureRepository.TYPE, (projectId, metadata) -> {
6666
AzureStorageService storageService = azureStoreService.get();
6767
assert storageService != null;
6868
return new AzureRepository(
69+
projectId,
6970
metadata,
7071
namedXContentRegistry,
7172
storageService,

modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99

1010
package org.elasticsearch.repositories.azure;
1111

12+
import org.elasticsearch.cluster.metadata.ProjectId;
1213
import org.elasticsearch.cluster.metadata.RepositoryMetadata;
1314
import org.elasticsearch.common.settings.ClusterSettings;
1415
import org.elasticsearch.common.settings.Settings;
@@ -23,6 +24,7 @@
2324
import org.elasticsearch.xcontent.NamedXContentRegistry;
2425

2526
import static org.elasticsearch.repositories.blobstore.BlobStoreRepository.READONLY_SETTING_KEY;
27+
import static org.hamcrest.Matchers.equalTo;
2628
import static org.hamcrest.Matchers.is;
2729
import static org.hamcrest.Matchers.nullValue;
2830
import static org.mockito.Mockito.mock;
@@ -35,7 +37,9 @@ private AzureRepository azureRepository(Settings settings) {
3537
.putList(Environment.PATH_DATA_SETTING.getKey(), tmpPaths())
3638
.put(settings)
3739
.build();
40+
final ProjectId projectId = randomProjectIdOrDefault();
3841
final AzureRepository azureRepository = new AzureRepository(
42+
projectId,
3943
new RepositoryMetadata("foo", "azure", internalSettings),
4044
NamedXContentRegistry.EMPTY,
4145
mock(AzureStorageService.class),
@@ -44,6 +48,7 @@ private AzureRepository azureRepository(Settings settings) {
4448
new RecoverySettings(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)),
4549
RepositoriesMetrics.NOOP
4650
);
51+
assertThat(azureRepository.getProjectId(), equalTo(projectId));
4752
assertThat(azureRepository.getBlobStore(), is(nullValue()));
4853
return azureRepository;
4954
}

modules/repository-gcs/src/internalClusterTest/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -276,7 +276,8 @@ public Map<String, Repository.Factory> getRepositories(
276276
) {
277277
return Collections.singletonMap(
278278
GoogleCloudStorageRepository.TYPE,
279-
metadata -> new GoogleCloudStorageRepository(
279+
(projectId, metadata) -> new GoogleCloudStorageRepository(
280+
projectId,
280281
metadata,
281282
registry,
282283
this.storageService,

modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStoragePlugin.java

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,8 @@ public Map<String, Repository.Factory> getRepositories(
5757
) {
5858
return Collections.singletonMap(
5959
GoogleCloudStorageRepository.TYPE,
60-
metadata -> new GoogleCloudStorageRepository(
60+
(projectId, metadata) -> new GoogleCloudStorageRepository(
61+
projectId,
6162
metadata,
6263
namedXContentRegistry,
6364
this.storageService,

modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111

1212
import org.apache.logging.log4j.LogManager;
1313
import org.apache.logging.log4j.Logger;
14+
import org.elasticsearch.cluster.metadata.ProjectId;
1415
import org.elasticsearch.cluster.metadata.RepositoryMetadata;
1516
import org.elasticsearch.cluster.service.ClusterService;
1617
import org.elasticsearch.common.BackoffPolicy;
@@ -88,6 +89,7 @@ class GoogleCloudStorageRepository extends MeteredBlobStoreRepository {
8889
private final GcsRepositoryStatsCollector statsCollector;
8990

9091
GoogleCloudStorageRepository(
92+
final ProjectId projectId,
9193
final RepositoryMetadata metadata,
9294
final NamedXContentRegistry namedXContentRegistry,
9395
final GoogleCloudStorageService storageService,
@@ -97,6 +99,7 @@ class GoogleCloudStorageRepository extends MeteredBlobStoreRepository {
9799
final GcsRepositoryStatsCollector statsCollector
98100
) {
99101
super(
102+
projectId,
100103
metadata,
101104
namedXContentRegistry,
102105
clusterService,

0 commit comments

Comments
 (0)