Skip to content

Commit 86002d3

Browse files
authored
Merge branch 'main' into presize_set
2 parents 8e2d165 + 44a74f9 commit 86002d3

File tree

46 files changed

+622
-371
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

46 files changed

+622
-371
lines changed

docs/changelog/125764.yaml

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
pr: 125764
2+
summary: Fix `ReplaceMissingFieldsWithNull`
3+
area: ES|QL
4+
type: bug
5+
issues:
6+
- 126036
7+
- 121754
8+
- 126030

docs/changelog/126191.yaml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
pr: 126191
2+
summary: Fix NPE for missing Content Type header in OIDC Authenticator
3+
area: Authentication
4+
type: bug
5+
issues: []

modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/TransportDeleteDataStreamOptionsAction.java

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,13 @@ protected void masterOperation(
8181
for (String name : dataStreamNames) {
8282
systemIndices.validateDataStreamAccess(name, threadPool.getThreadContext());
8383
}
84-
metadataDataStreamsService.removeDataStreamOptions(dataStreamNames, request.ackTimeout(), request.masterNodeTimeout(), listener);
84+
metadataDataStreamsService.removeDataStreamOptions(
85+
state.projectId(),
86+
dataStreamNames,
87+
request.ackTimeout(),
88+
request.masterNodeTimeout(),
89+
listener
90+
);
8591
}
8692

8793
@Override

modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/TransportPutDataStreamOptionsAction.java

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -81,6 +81,7 @@ protected void masterOperation(
8181
systemIndices.validateDataStreamAccess(name, threadPool.getThreadContext());
8282
}
8383
metadataDataStreamsService.setDataStreamOptions(
84+
state.projectId(),
8485
dataStreamNames,
8586
request.getOptions(),
8687
request.ackTimeout(),

modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java

Lines changed: 21 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -14,9 +14,7 @@
1414
import com.amazonaws.DnsResolver;
1515
import com.amazonaws.SdkClientException;
1616
import com.amazonaws.services.s3.AmazonS3ClientBuilder;
17-
import com.amazonaws.services.s3.internal.MD5DigestCalculatingInputStream;
1817
import com.amazonaws.services.s3.model.AmazonS3Exception;
19-
import com.amazonaws.util.Base16;
2018
import com.sun.net.httpserver.HttpExchange;
2119
import com.sun.net.httpserver.HttpHandler;
2220

@@ -30,7 +28,9 @@
3028
import org.elasticsearch.common.blobstore.BlobPath;
3129
import org.elasticsearch.common.blobstore.OperationPurpose;
3230
import org.elasticsearch.common.blobstore.OptionalBytesReference;
31+
import org.elasticsearch.common.bytes.BytesArray;
3332
import org.elasticsearch.common.bytes.BytesReference;
33+
import org.elasticsearch.common.hash.MessageDigests;
3434
import org.elasticsearch.common.io.Streams;
3535
import org.elasticsearch.common.lucene.store.ByteArrayIndexInput;
3636
import org.elasticsearch.common.lucene.store.InputStreamIndexInput;
@@ -377,13 +377,12 @@ public void testWriteLargeBlob() throws Exception {
377377
}
378378
} else if (s3Request.isUploadPartRequest()) {
379379
// upload part request
380-
MD5DigestCalculatingInputStream md5 = new MD5DigestCalculatingInputStream(exchange.getRequestBody());
381-
BytesReference bytes = Streams.readFully(md5);
380+
BytesReference bytes = Streams.readFully(exchange.getRequestBody());
382381
assertThat((long) bytes.length(), anyOf(equalTo(lastPartSize), equalTo(bufferSize.getBytes())));
383382
assertThat(contentLength, anyOf(equalTo(lastPartSize), equalTo(bufferSize.getBytes())));
384383

385384
if (countDownUploads.decrementAndGet() % 2 == 0) {
386-
exchange.getResponseHeaders().add("ETag", Base16.encodeAsString(md5.getMd5Digest()));
385+
exchange.getResponseHeaders().add("ETag", getBase16MD5Digest(bytes));
387386
exchange.sendResponseHeaders(HttpStatus.SC_OK, -1);
388387
exchange.close();
389388
return;
@@ -475,12 +474,11 @@ public void testWriteLargeBlobStreaming() throws Exception {
475474
}
476475
} else if (s3Request.isUploadPartRequest()) {
477476
// upload part request
478-
MD5DigestCalculatingInputStream md5 = new MD5DigestCalculatingInputStream(exchange.getRequestBody());
479-
BytesReference bytes = Streams.readFully(md5);
477+
BytesReference bytes = Streams.readFully(exchange.getRequestBody());
480478

481479
if (counterUploads.incrementAndGet() % 2 == 0) {
482480
bytesReceived.addAndGet(bytes.length());
483-
exchange.getResponseHeaders().add("ETag", Base16.encodeAsString(md5.getMd5Digest()));
481+
exchange.getResponseHeaders().add("ETag", getBase16MD5Digest(bytes));
484482
exchange.sendResponseHeaders(HttpStatus.SC_OK, -1);
485483
exchange.close();
486484
return;
@@ -1105,6 +1103,21 @@ public void testTrimmedLogAndCappedSuppressedErrorOnMultiObjectDeletionException
11051103
}
11061104
}
11071105

1106+
private static String getBase16MD5Digest(BytesReference bytesReference) {
1107+
return MessageDigests.toHexString(MessageDigests.digest(bytesReference, MessageDigests.md5()));
1108+
}
1109+
1110+
public void testGetBase16MD5Digest() {
1111+
// from Wikipedia, see also org.elasticsearch.common.hash.MessageDigestsTests.testMd5
1112+
assertBase16MD5Digest("", "d41d8cd98f00b204e9800998ecf8427e");
1113+
assertBase16MD5Digest("The quick brown fox jumps over the lazy dog", "9e107d9d372bb6826bd81d3542a419d6");
1114+
assertBase16MD5Digest("The quick brown fox jumps over the lazy dog.", "e4d909c290d0fb1ca068ffaddf22cbd0");
1115+
}
1116+
1117+
private static void assertBase16MD5Digest(String input, String expectedDigestString) {
1118+
assertEquals(expectedDigestString, getBase16MD5Digest(new BytesArray(input)));
1119+
}
1120+
11081121
@Override
11091122
protected Matcher<Integer> getMaxRetriesMatcher(int maxRetries) {
11101123
// some attempts make meaningful progress and do not count towards the max retry limit

muted-tests.yml

Lines changed: 12 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -308,9 +308,6 @@ tests:
308308
- class: org.elasticsearch.packaging.test.DockerTests
309309
method: test012SecurityCanBeDisabled
310310
issue: https://github.com/elastic/elasticsearch/issues/116636
311-
- class: org.elasticsearch.index.engine.ThreadPoolMergeSchedulerTests
312-
method: testSchedulerCloseWaitsForRunningMerge
313-
issue: https://github.com/elastic/elasticsearch/issues/125236
314311
- class: org.elasticsearch.index.shard.StoreRecoveryTests
315312
method: testAddIndices
316313
issue: https://github.com/elastic/elasticsearch/issues/124104
@@ -332,9 +329,6 @@ tests:
332329
- class: org.elasticsearch.xpack.ilm.actions.SearchableSnapshotActionIT
333330
method: testSearchableSnapshotsInHotPhasePinnedToHotNodes
334331
issue: https://github.com/elastic/elasticsearch/issues/125683
335-
- class: org.elasticsearch.xpack.esql.spatial.SpatialExtentAggregationNoLicenseIT
336-
method: testStExtentAggregationWithPoints
337-
issue: https://github.com/elastic/elasticsearch/issues/125735
338332
- class: org.elasticsearch.xpack.test.rest.XPackRestIT
339333
method: test {p0=transform/transforms_start_stop/Test schedule_now on an already started transform}
340334
issue: https://github.com/elastic/elasticsearch/issues/120720
@@ -356,9 +350,6 @@ tests:
356350
- class: org.elasticsearch.xpack.test.rest.XPackRestIT
357351
method: test {p0=ml/start_data_frame_analytics/Test start given dest index is not empty}
358352
issue: https://github.com/elastic/elasticsearch/issues/125909
359-
- class: org.elasticsearch.indices.stats.IndexStatsIT
360-
method: testThrottleStats
361-
issue: https://github.com/elastic/elasticsearch/issues/125910
362353
- class: org.elasticsearch.xpack.esql.action.ManyShardsIT
363354
method: testCancelUnnecessaryRequests
364355
issue: https://github.com/elastic/elasticsearch/issues/125947
@@ -392,6 +383,18 @@ tests:
392383
- class: org.elasticsearch.xpack.esql.qa.single_node.GenerativeIT
393384
method: test
394385
issue: https://github.com/elastic/elasticsearch/issues/126139
386+
- class: org.elasticsearch.snapshots.SharedClusterSnapshotRestoreIT
387+
method: testDeletionOfFailingToRecoverIndexShouldStopRestore
388+
issue: https://github.com/elastic/elasticsearch/issues/126204
389+
- class: org.elasticsearch.index.engine.ThreadPoolMergeSchedulerTests
390+
method: testSchedulerCloseWaitsForRunningMerge
391+
issue: https://github.com/elastic/elasticsearch/issues/125236
392+
- class: org.elasticsearch.entitlement.runtime.policy.PolicyUtilsTests
393+
method: testFormatFilesEntitlement
394+
issue: https://github.com/elastic/elasticsearch/issues/126176
395+
- class: org.elasticsearch.xpack.security.SecurityRolesMultiProjectIT
396+
method: testUpdatingFileBasedRoleAffectsAllProjects
397+
issue: https://github.com/elastic/elasticsearch/issues/126223
395398

396399
# Examples:
397400
#

server/src/internalClusterTest/java/org/elasticsearch/indices/stats/IndexStatsIT.java

Lines changed: 28 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -467,27 +467,26 @@ public void testNonThrottleStats() throws Exception {
467467
assertThat(stats.getPrimaries().getIndexing().getTotal().getThrottleTime().millis(), equalTo(0L));
468468
}
469469

470-
public void testThrottleStats() {
470+
public void testThrottleStats() throws Exception {
471471
assertAcked(
472-
prepareCreate("test").setSettings(
472+
prepareCreate("test_throttle_stats_index").setSettings(
473473
settingsBuilder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1")
474474
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0")
475475
.put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2")
476476
.put(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2")
477477
.put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), "1")
478478
.put(MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING.getKey(), "1")
479+
.put(MergeSchedulerConfig.AUTO_THROTTLE_SETTING.getKey(), "true")
479480
.put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.ASYNC.name())
480481
)
481482
);
482-
ensureGreen();
483+
ensureGreen("test_throttle_stats_index");
483484
// make sure we see throttling kicking in:
484485
AtomicBoolean done = new AtomicBoolean();
485486
AtomicLong termUpTo = new AtomicLong();
486-
long start = System.currentTimeMillis();
487-
for (int threadIdx = 0; threadIdx < 5; threadIdx++) {
488-
int finalThreadIdx = threadIdx;
489-
new Thread(() -> {
490-
IndicesStatsResponse stats;
487+
Thread[] indexingThreads = new Thread[5];
488+
for (int threadIdx = 0; threadIdx < indexingThreads.length; threadIdx++) {
489+
indexingThreads[threadIdx] = new Thread(() -> {
491490
while (done.get() == false) {
492491
for (int i = 0; i < 100; i++) {
493492
// Provoke slowish merging by making many unique terms:
@@ -496,30 +495,35 @@ public void testThrottleStats() {
496495
sb.append(' ');
497496
sb.append(termUpTo.incrementAndGet());
498497
}
499-
prepareIndex("test").setId("" + termUpTo.get()).setSource("field" + (i % 10), sb.toString()).get();
498+
prepareIndex("test_throttle_stats_index").setId("" + termUpTo.get())
499+
.setSource("field" + (i % 10), sb.toString())
500+
.get();
500501
if (i % 2 == 0) {
501-
refresh();
502+
refresh("test_throttle_stats_index");
502503
}
503504
}
504-
refresh();
505-
if (finalThreadIdx == 0) {
506-
stats = indicesAdmin().prepareStats().get();
507-
done.set(stats.getPrimaries().getIndexing().getTotal().getThrottleTime().millis() > 0);
508-
}
509-
if (System.currentTimeMillis() - start > 300 * 1000) { // Wait 5 minutes for throttling to kick in
510-
done.set(true);
511-
fail("index throttling didn't kick in after 5 minutes of intense merging");
512-
}
505+
refresh("test_throttle_stats_index");
513506
}
514-
}).start();
507+
});
508+
indexingThreads[threadIdx].start();
509+
}
510+
511+
assertBusy(() -> {
512+
IndicesStatsResponse stats = indicesAdmin().prepareStats("test_throttle_stats_index").get();
513+
assertTrue(stats.getPrimaries().getIndexing().getTotal().getThrottleTime().millis() > 0);
514+
done.set(true);
515+
}, 5L, TimeUnit.MINUTES);
516+
517+
for (Thread indexingThread : indexingThreads) {
518+
indexingThread.join();
515519
}
516520

517521
// Optimize & flush and wait; else we sometimes get a "Delete Index failed - not acked"
518522
// when ESIntegTestCase.after tries to remove indices created by the test:
519-
logger.info("test: now optimize");
520-
indicesAdmin().prepareForceMerge("test").get();
521-
flush();
522-
logger.info("test: test done");
523+
logger.info("test throttle stats: now optimize");
524+
indicesAdmin().prepareForceMerge("test_throttle_stats_index").get();
525+
flush("test_throttle_stats_index");
526+
logger.info("test throttle stats: test done");
523527
}
524528

525529
public void testSimpleStats() throws Exception {

server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java

Lines changed: 25 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,15 @@ public Tuple<ClusterState, ClusterStateAckListener> executeTask(
119119
ClusterState clusterState
120120
) {
121121
return new Tuple<>(
122-
updateDataStreamOptions(clusterState, modifyOptionsTask.getDataStreamNames(), modifyOptionsTask.getOptions()),
122+
ClusterState.builder(clusterState)
123+
.putProjectMetadata(
124+
updateDataStreamOptions(
125+
clusterState.projectState(modifyOptionsTask.projectId).metadata(),
126+
modifyOptionsTask.getDataStreamNames(),
127+
modifyOptionsTask.getOptions()
128+
)
129+
)
130+
.build(),
123131
modifyOptionsTask
124132
);
125133
}
@@ -195,6 +203,7 @@ public void removeLifecycle(
195203
* Submits the task to set the provided data stream options to the requested data streams.
196204
*/
197205
public void setDataStreamOptions(
206+
final ProjectId projectId,
198207
final List<String> dataStreamNames,
199208
DataStreamOptions options,
200209
TimeValue ackTimeout,
@@ -203,7 +212,7 @@ public void setDataStreamOptions(
203212
) {
204213
updateOptionsTaskQueue.submitTask(
205214
"set-data-stream-options",
206-
new UpdateOptionsTask(dataStreamNames, options, ackTimeout, listener),
215+
new UpdateOptionsTask(projectId, dataStreamNames, options, ackTimeout, listener),
207216
masterTimeout
208217
);
209218
}
@@ -212,14 +221,15 @@ public void setDataStreamOptions(
212221
* Submits the task to remove the data stream options from the requested data streams.
213222
*/
214223
public void removeDataStreamOptions(
224+
ProjectId projectId,
215225
List<String> dataStreamNames,
216226
TimeValue ackTimeout,
217227
TimeValue masterTimeout,
218228
ActionListener<AcknowledgedResponse> listener
219229
) {
220230
updateOptionsTaskQueue.submitTask(
221231
"delete-data-stream-options",
222-
new UpdateOptionsTask(dataStreamNames, null, ackTimeout, listener),
232+
new UpdateOptionsTask(projectId, dataStreamNames, null, ackTimeout, listener),
223233
masterTimeout
224234
);
225235
}
@@ -308,18 +318,17 @@ ProjectMetadata updateDataLifecycle(ProjectMetadata project, List<String> dataSt
308318
* Creates an updated cluster state in which the requested data streams have the data stream options provided.
309319
* Visible for testing.
310320
*/
311-
ClusterState updateDataStreamOptions(
312-
ClusterState currentState,
321+
ProjectMetadata updateDataStreamOptions(
322+
ProjectMetadata project,
313323
List<String> dataStreamNames,
314324
@Nullable DataStreamOptions dataStreamOptions
315325
) {
316-
Metadata metadata = currentState.metadata();
317-
Metadata.Builder builder = Metadata.builder(metadata);
326+
ProjectMetadata.Builder builder = ProjectMetadata.builder(project);
318327
for (var dataStreamName : dataStreamNames) {
319-
var dataStream = validateDataStream(metadata.getProject(), dataStreamName);
328+
var dataStream = validateDataStream(project, dataStreamName);
320329
builder.put(dataStream.copy().setDataStreamOptions(dataStreamOptions).build());
321330
}
322-
return ClusterState.builder(currentState).metadata(builder.build()).build();
331+
return builder.build();
323332
}
324333

325334
/**
@@ -525,21 +534,27 @@ public DataStreamLifecycle getDataLifecycle() {
525534
* A cluster state update task that consists of the cluster state request and the listeners that need to be notified upon completion.
526535
*/
527536
static class UpdateOptionsTask extends AckedBatchedClusterStateUpdateTask {
528-
537+
ProjectId projectId;
529538
private final List<String> dataStreamNames;
530539
private final DataStreamOptions options;
531540

532541
UpdateOptionsTask(
542+
ProjectId projectId,
533543
List<String> dataStreamNames,
534544
@Nullable DataStreamOptions options,
535545
TimeValue ackTimeout,
536546
ActionListener<AcknowledgedResponse> listener
537547
) {
538548
super(ackTimeout, listener);
549+
this.projectId = projectId;
539550
this.dataStreamNames = dataStreamNames;
540551
this.options = options;
541552
}
542553

554+
public ProjectId getProjectId() {
555+
return projectId;
556+
}
557+
543558
public List<String> getDataStreamNames() {
544559
return dataStreamNames;
545560
}

server/src/test/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsServiceTests.java

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -446,34 +446,39 @@ public void testUpdateLifecycle() {
446446
}
447447

448448
public void testUpdateDataStreamOptions() {
449+
final var projectId = randomProjectIdOrDefault();
449450
String dataStream = randomAlphaOfLength(5);
450451
// we want the data stream options to be non-empty, so we can see the removal in action
451452
DataStreamOptions dataStreamOptions = randomValueOtherThan(
452453
DataStreamOptions.EMPTY,
453454
DataStreamOptionsTests::randomDataStreamOptions
454455
);
455-
ClusterState before = DataStreamTestHelper.getClusterStateWithDataStreams(List.of(new Tuple<>(dataStream, 2)), List.of());
456+
ProjectMetadata before = DataStreamTestHelper.getClusterStateWithDataStreams(
457+
projectId,
458+
List.of(new Tuple<>(dataStream, 2)),
459+
List.of()
460+
).metadata().getProject(projectId);
456461
MetadataDataStreamsService service = new MetadataDataStreamsService(
457462
mock(ClusterService.class),
458463
mock(IndicesService.class),
459464
DataStreamGlobalRetentionSettings.create(ClusterSettings.createBuiltInClusterSettings())
460465
);
461466

462467
// Ensure no data stream options are stored
463-
DataStream updatedDataStream = before.metadata().getProject().dataStreams().get(dataStream);
468+
DataStream updatedDataStream = before.dataStreams().get(dataStream);
464469
assertNotNull(updatedDataStream);
465470
assertThat(updatedDataStream.getDataStreamOptions(), equalTo(DataStreamOptions.EMPTY));
466471

467472
// Set non-empty data stream options
468-
ClusterState after = service.updateDataStreamOptions(before, List.of(dataStream), dataStreamOptions);
469-
updatedDataStream = after.metadata().getProject().dataStreams().get(dataStream);
473+
ProjectMetadata after = service.updateDataStreamOptions(before, List.of(dataStream), dataStreamOptions);
474+
updatedDataStream = after.dataStreams().get(dataStream);
470475
assertNotNull(updatedDataStream);
471476
assertThat(updatedDataStream.getDataStreamOptions(), equalTo(dataStreamOptions));
472477
before = after;
473478

474479
// Remove data stream options
475480
after = service.updateDataStreamOptions(before, List.of(dataStream), null);
476-
updatedDataStream = after.metadata().getProject().dataStreams().get(dataStream);
481+
updatedDataStream = after.dataStreams().get(dataStream);
477482
assertNotNull(updatedDataStream);
478483
assertThat(updatedDataStream.getDataStreamOptions(), equalTo(DataStreamOptions.EMPTY));
479484
}

0 commit comments

Comments
 (0)