Skip to content

Commit e936fe7

Browse files
authored
Merge branch 'main' into remove_outdated_mutes_2
2 parents 06a8c20 + d5c0778 commit e936fe7

File tree

13 files changed

+162
-57
lines changed

13 files changed

+162
-57
lines changed

docs/changelog/126191.yaml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
pr: 126191
2+
summary: Fix NPE for missing Content Type header in OIDC Authenticator
3+
area: Authentication
4+
type: bug
5+
issues: []

modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/TransportDeleteDataStreamOptionsAction.java

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,13 @@ protected void masterOperation(
8181
for (String name : dataStreamNames) {
8282
systemIndices.validateDataStreamAccess(name, threadPool.getThreadContext());
8383
}
84-
metadataDataStreamsService.removeDataStreamOptions(dataStreamNames, request.ackTimeout(), request.masterNodeTimeout(), listener);
84+
metadataDataStreamsService.removeDataStreamOptions(
85+
state.projectId(),
86+
dataStreamNames,
87+
request.ackTimeout(),
88+
request.masterNodeTimeout(),
89+
listener
90+
);
8591
}
8692

8793
@Override

modules/data-streams/src/main/java/org/elasticsearch/datastreams/options/action/TransportPutDataStreamOptionsAction.java

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -81,6 +81,7 @@ protected void masterOperation(
8181
systemIndices.validateDataStreamAccess(name, threadPool.getThreadContext());
8282
}
8383
metadataDataStreamsService.setDataStreamOptions(
84+
state.projectId(),
8485
dataStreamNames,
8586
request.getOptions(),
8687
request.ackTimeout(),

muted-tests.yml

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -317,9 +317,6 @@ tests:
317317
- class: org.elasticsearch.xpack.ilm.actions.SearchableSnapshotActionIT
318318
method: testSearchableSnapshotsInHotPhasePinnedToHotNodes
319319
issue: https://github.com/elastic/elasticsearch/issues/125683
320-
- class: org.elasticsearch.xpack.esql.spatial.SpatialExtentAggregationNoLicenseIT
321-
method: testStExtentAggregationWithPoints
322-
issue: https://github.com/elastic/elasticsearch/issues/125735
323320
- class: org.elasticsearch.xpack.test.rest.XPackRestIT
324321
method: test {p0=transform/transforms_start_stop/Test schedule_now on an already started transform}
325322
issue: https://github.com/elastic/elasticsearch/issues/120720
@@ -341,9 +338,6 @@ tests:
341338
- class: org.elasticsearch.xpack.test.rest.XPackRestIT
342339
method: test {p0=ml/start_data_frame_analytics/Test start given dest index is not empty}
343340
issue: https://github.com/elastic/elasticsearch/issues/125909
344-
- class: org.elasticsearch.indices.stats.IndexStatsIT
345-
method: testThrottleStats
346-
issue: https://github.com/elastic/elasticsearch/issues/125910
347341
- class: org.elasticsearch.xpack.esql.action.ManyShardsIT
348342
method: testCancelUnnecessaryRequests
349343
issue: https://github.com/elastic/elasticsearch/issues/125947
@@ -377,6 +371,9 @@ tests:
377371
- class: org.elasticsearch.xpack.esql.qa.single_node.GenerativeIT
378372
method: test
379373
issue: https://github.com/elastic/elasticsearch/issues/126139
374+
- class: org.elasticsearch.snapshots.SharedClusterSnapshotRestoreIT
375+
method: testDeletionOfFailingToRecoverIndexShouldStopRestore
376+
issue: https://github.com/elastic/elasticsearch/issues/126204
380377

381378
# Examples:
382379
#

server/src/internalClusterTest/java/org/elasticsearch/indices/stats/IndexStatsIT.java

Lines changed: 28 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -467,27 +467,26 @@ public void testNonThrottleStats() throws Exception {
467467
assertThat(stats.getPrimaries().getIndexing().getTotal().getThrottleTime().millis(), equalTo(0L));
468468
}
469469

470-
public void testThrottleStats() {
470+
public void testThrottleStats() throws Exception {
471471
assertAcked(
472-
prepareCreate("test").setSettings(
472+
prepareCreate("test_throttle_stats_index").setSettings(
473473
settingsBuilder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1")
474474
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0")
475475
.put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2")
476476
.put(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2")
477477
.put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), "1")
478478
.put(MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING.getKey(), "1")
479+
.put(MergeSchedulerConfig.AUTO_THROTTLE_SETTING.getKey(), "true")
479480
.put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.ASYNC.name())
480481
)
481482
);
482-
ensureGreen();
483+
ensureGreen("test_throttle_stats_index");
483484
// make sure we see throttling kicking in:
484485
AtomicBoolean done = new AtomicBoolean();
485486
AtomicLong termUpTo = new AtomicLong();
486-
long start = System.currentTimeMillis();
487-
for (int threadIdx = 0; threadIdx < 5; threadIdx++) {
488-
int finalThreadIdx = threadIdx;
489-
new Thread(() -> {
490-
IndicesStatsResponse stats;
487+
Thread[] indexingThreads = new Thread[5];
488+
for (int threadIdx = 0; threadIdx < indexingThreads.length; threadIdx++) {
489+
indexingThreads[threadIdx] = new Thread(() -> {
491490
while (done.get() == false) {
492491
for (int i = 0; i < 100; i++) {
493492
// Provoke slowish merging by making many unique terms:
@@ -496,30 +495,35 @@ public void testThrottleStats() {
496495
sb.append(' ');
497496
sb.append(termUpTo.incrementAndGet());
498497
}
499-
prepareIndex("test").setId("" + termUpTo.get()).setSource("field" + (i % 10), sb.toString()).get();
498+
prepareIndex("test_throttle_stats_index").setId("" + termUpTo.get())
499+
.setSource("field" + (i % 10), sb.toString())
500+
.get();
500501
if (i % 2 == 0) {
501-
refresh();
502+
refresh("test_throttle_stats_index");
502503
}
503504
}
504-
refresh();
505-
if (finalThreadIdx == 0) {
506-
stats = indicesAdmin().prepareStats().get();
507-
done.set(stats.getPrimaries().getIndexing().getTotal().getThrottleTime().millis() > 0);
508-
}
509-
if (System.currentTimeMillis() - start > 300 * 1000) { // Wait 5 minutes for throttling to kick in
510-
done.set(true);
511-
fail("index throttling didn't kick in after 5 minutes of intense merging");
512-
}
505+
refresh("test_throttle_stats_index");
513506
}
514-
}).start();
507+
});
508+
indexingThreads[threadIdx].start();
509+
}
510+
511+
assertBusy(() -> {
512+
IndicesStatsResponse stats = indicesAdmin().prepareStats("test_throttle_stats_index").get();
513+
assertTrue(stats.getPrimaries().getIndexing().getTotal().getThrottleTime().millis() > 0);
514+
done.set(true);
515+
}, 5L, TimeUnit.MINUTES);
516+
517+
for (Thread indexingThread : indexingThreads) {
518+
indexingThread.join();
515519
}
516520

517521
// Optimize & flush and wait; else we sometimes get a "Delete Index failed - not acked"
518522
// when ESIntegTestCase.after tries to remove indices created by the test:
519-
logger.info("test: now optimize");
520-
indicesAdmin().prepareForceMerge("test").get();
521-
flush();
522-
logger.info("test: test done");
523+
logger.info("test throttle stats: now optimize");
524+
indicesAdmin().prepareForceMerge("test_throttle_stats_index").get();
525+
flush("test_throttle_stats_index");
526+
logger.info("test throttle stats: test done");
523527
}
524528

525529
public void testSimpleStats() throws Exception {

server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java

Lines changed: 25 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,15 @@ public Tuple<ClusterState, ClusterStateAckListener> executeTask(
119119
ClusterState clusterState
120120
) {
121121
return new Tuple<>(
122-
updateDataStreamOptions(clusterState, modifyOptionsTask.getDataStreamNames(), modifyOptionsTask.getOptions()),
122+
ClusterState.builder(clusterState)
123+
.putProjectMetadata(
124+
updateDataStreamOptions(
125+
clusterState.projectState(modifyOptionsTask.projectId).metadata(),
126+
modifyOptionsTask.getDataStreamNames(),
127+
modifyOptionsTask.getOptions()
128+
)
129+
)
130+
.build(),
123131
modifyOptionsTask
124132
);
125133
}
@@ -195,6 +203,7 @@ public void removeLifecycle(
195203
* Submits the task to set the provided data stream options to the requested data streams.
196204
*/
197205
public void setDataStreamOptions(
206+
final ProjectId projectId,
198207
final List<String> dataStreamNames,
199208
DataStreamOptions options,
200209
TimeValue ackTimeout,
@@ -203,7 +212,7 @@ public void setDataStreamOptions(
203212
) {
204213
updateOptionsTaskQueue.submitTask(
205214
"set-data-stream-options",
206-
new UpdateOptionsTask(dataStreamNames, options, ackTimeout, listener),
215+
new UpdateOptionsTask(projectId, dataStreamNames, options, ackTimeout, listener),
207216
masterTimeout
208217
);
209218
}
@@ -212,14 +221,15 @@ public void setDataStreamOptions(
212221
* Submits the task to remove the data stream options from the requested data streams.
213222
*/
214223
public void removeDataStreamOptions(
224+
ProjectId projectId,
215225
List<String> dataStreamNames,
216226
TimeValue ackTimeout,
217227
TimeValue masterTimeout,
218228
ActionListener<AcknowledgedResponse> listener
219229
) {
220230
updateOptionsTaskQueue.submitTask(
221231
"delete-data-stream-options",
222-
new UpdateOptionsTask(dataStreamNames, null, ackTimeout, listener),
232+
new UpdateOptionsTask(projectId, dataStreamNames, null, ackTimeout, listener),
223233
masterTimeout
224234
);
225235
}
@@ -308,18 +318,17 @@ ProjectMetadata updateDataLifecycle(ProjectMetadata project, List<String> dataSt
308318
* Creates an updated cluster state in which the requested data streams have the data stream options provided.
309319
* Visible for testing.
310320
*/
311-
ClusterState updateDataStreamOptions(
312-
ClusterState currentState,
321+
ProjectMetadata updateDataStreamOptions(
322+
ProjectMetadata project,
313323
List<String> dataStreamNames,
314324
@Nullable DataStreamOptions dataStreamOptions
315325
) {
316-
Metadata metadata = currentState.metadata();
317-
Metadata.Builder builder = Metadata.builder(metadata);
326+
ProjectMetadata.Builder builder = ProjectMetadata.builder(project);
318327
for (var dataStreamName : dataStreamNames) {
319-
var dataStream = validateDataStream(metadata.getProject(), dataStreamName);
328+
var dataStream = validateDataStream(project, dataStreamName);
320329
builder.put(dataStream.copy().setDataStreamOptions(dataStreamOptions).build());
321330
}
322-
return ClusterState.builder(currentState).metadata(builder.build()).build();
331+
return builder.build();
323332
}
324333

325334
/**
@@ -525,21 +534,27 @@ public DataStreamLifecycle getDataLifecycle() {
525534
* A cluster state update task that consists of the cluster state request and the listeners that need to be notified upon completion.
526535
*/
527536
static class UpdateOptionsTask extends AckedBatchedClusterStateUpdateTask {
528-
537+
ProjectId projectId;
529538
private final List<String> dataStreamNames;
530539
private final DataStreamOptions options;
531540

532541
UpdateOptionsTask(
542+
ProjectId projectId,
533543
List<String> dataStreamNames,
534544
@Nullable DataStreamOptions options,
535545
TimeValue ackTimeout,
536546
ActionListener<AcknowledgedResponse> listener
537547
) {
538548
super(ackTimeout, listener);
549+
this.projectId = projectId;
539550
this.dataStreamNames = dataStreamNames;
540551
this.options = options;
541552
}
542553

554+
public ProjectId getProjectId() {
555+
return projectId;
556+
}
557+
543558
public List<String> getDataStreamNames() {
544559
return dataStreamNames;
545560
}

server/src/test/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsServiceTests.java

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -446,34 +446,39 @@ public void testUpdateLifecycle() {
446446
}
447447

448448
public void testUpdateDataStreamOptions() {
449+
final var projectId = randomProjectIdOrDefault();
449450
String dataStream = randomAlphaOfLength(5);
450451
// we want the data stream options to be non-empty, so we can see the removal in action
451452
DataStreamOptions dataStreamOptions = randomValueOtherThan(
452453
DataStreamOptions.EMPTY,
453454
DataStreamOptionsTests::randomDataStreamOptions
454455
);
455-
ClusterState before = DataStreamTestHelper.getClusterStateWithDataStreams(List.of(new Tuple<>(dataStream, 2)), List.of());
456+
ProjectMetadata before = DataStreamTestHelper.getClusterStateWithDataStreams(
457+
projectId,
458+
List.of(new Tuple<>(dataStream, 2)),
459+
List.of()
460+
).metadata().getProject(projectId);
456461
MetadataDataStreamsService service = new MetadataDataStreamsService(
457462
mock(ClusterService.class),
458463
mock(IndicesService.class),
459464
DataStreamGlobalRetentionSettings.create(ClusterSettings.createBuiltInClusterSettings())
460465
);
461466

462467
// Ensure no data stream options are stored
463-
DataStream updatedDataStream = before.metadata().getProject().dataStreams().get(dataStream);
468+
DataStream updatedDataStream = before.dataStreams().get(dataStream);
464469
assertNotNull(updatedDataStream);
465470
assertThat(updatedDataStream.getDataStreamOptions(), equalTo(DataStreamOptions.EMPTY));
466471

467472
// Set non-empty data stream options
468-
ClusterState after = service.updateDataStreamOptions(before, List.of(dataStream), dataStreamOptions);
469-
updatedDataStream = after.metadata().getProject().dataStreams().get(dataStream);
473+
ProjectMetadata after = service.updateDataStreamOptions(before, List.of(dataStream), dataStreamOptions);
474+
updatedDataStream = after.dataStreams().get(dataStream);
470475
assertNotNull(updatedDataStream);
471476
assertThat(updatedDataStream.getDataStreamOptions(), equalTo(dataStreamOptions));
472477
before = after;
473478

474479
// Remove data stream options
475480
after = service.updateDataStreamOptions(before, List.of(dataStream), null);
476-
updatedDataStream = after.metadata().getProject().dataStreams().get(dataStream);
481+
updatedDataStream = after.dataStreams().get(dataStream);
477482
assertNotNull(updatedDataStream);
478483
assertThat(updatedDataStream.getDataStreamOptions(), equalTo(DataStreamOptions.EMPTY));
479484
}

server/src/test/java/org/elasticsearch/index/codec/tsdb/TsdbDocValueBwcTests.java

Lines changed: 19 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -235,8 +235,15 @@ private IndexWriterConfig getTimeSeriesIndexWriterConfig(String hostnameField, S
235235

236236
// A hacky way to figure out whether doc values format is written in what version. Need to use reflection, because
237237
// PerFieldDocValuesFormat hides the doc values formats it wraps.
238-
private static void assertOldDocValuesFormatVersion(DirectoryReader reader) throws NoSuchFieldException, IllegalAccessException,
239-
IOException {
238+
private void assertOldDocValuesFormatVersion(DirectoryReader reader) throws NoSuchFieldException, IllegalAccessException, IOException {
239+
if (System.getSecurityManager() != null) {
240+
// With jvm version 24 entitlements are used and security manager is nog longer used.
241+
// Making this assertion work with security manager requires granting the entire test codebase privileges to use
242+
// suppressAccessChecks and accessDeclaredMembers. This is undesired from a security manager perspective.
243+
logger.info("not asserting doc values format version, because security manager is used");
244+
return;
245+
}
246+
240247
for (var leafReaderContext : reader.leaves()) {
241248
var leaf = (SegmentReader) leafReaderContext.reader();
242249
var dvReader = leaf.getDocValuesReader();
@@ -248,8 +255,16 @@ private static void assertOldDocValuesFormatVersion(DirectoryReader reader) thro
248255
}
249256
}
250257

251-
private static void assertNewDocValuesFormatVersion(DirectoryReader reader) throws NoSuchFieldException, IllegalAccessException,
252-
IOException, ClassNotFoundException {
258+
private void assertNewDocValuesFormatVersion(DirectoryReader reader) throws NoSuchFieldException, IllegalAccessException, IOException,
259+
ClassNotFoundException {
260+
if (System.getSecurityManager() != null) {
261+
// With jvm version 24 entitlements are used and security manager is nog longer used.
262+
// Making this assertion work with security manager requires granting the entire test codebase privileges to use
263+
// suppressAccessChecks and suppressAccessChecks. This is undesired from a security manager perspective.
264+
logger.info("not asserting doc values format version, because security manager is used");
265+
return;
266+
}
267+
253268
for (var leafReaderContext : reader.leaves()) {
254269
var leaf = (SegmentReader) leafReaderContext.reader();
255270
var dvReader = leaf.getDocValuesReader();

x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/telemetry/VerifierMetricsTests.java

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -441,6 +441,31 @@ public void testKeep() {
441441
assertEquals(0, rename(c));
442442
}
443443

444+
public void testCategorize() {
445+
Counters c = esql("""
446+
from employees
447+
| keep emp_no, languages, gender
448+
| where languages is null or emp_no <= 10030
449+
| STATS COUNT() BY CATEGORIZE(gender)""");
450+
assertEquals(0, dissect(c));
451+
assertEquals(0, eval(c));
452+
assertEquals(0, grok(c));
453+
assertEquals(0, limit(c));
454+
assertEquals(0, sort(c));
455+
assertEquals(1L, stats(c));
456+
assertEquals(1L, where(c));
457+
assertEquals(0, enrich(c));
458+
assertEquals(0, mvExpand(c));
459+
assertEquals(0, show(c));
460+
assertEquals(0, row(c));
461+
assertEquals(1L, from(c));
462+
assertEquals(0, drop(c));
463+
assertEquals(1L, keep(c));
464+
assertEquals(0, rename(c));
465+
assertEquals(1, function("count", c));
466+
assertEquals(1, function("categorize", c));
467+
}
468+
444469
private long dissect(Counters c) {
445470
return c.get(FPREFIX + DISSECT);
446471
}

x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticator.java

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -629,18 +629,20 @@ public void cancelled() {
629629
/**
630630
* Handle the Token Response from the OpenID Connect Provider. If successful, extract the (yet not validated) Id Token
631631
* and access token and call the provided listener.
632+
* (Package private for testing purposes)
632633
*/
633-
private static void handleTokenResponse(HttpResponse httpResponse, ActionListener<Tuple<AccessToken, JWT>> tokensListener) {
634+
static void handleTokenResponse(HttpResponse httpResponse, ActionListener<Tuple<AccessToken, JWT>> tokensListener) {
634635
try {
635636
final HttpEntity entity = httpResponse.getEntity();
636637
final Header encodingHeader = entity.getContentEncoding();
637638
final Header contentHeader = entity.getContentType();
638-
if (ContentType.parse(contentHeader.getValue()).getMimeType().equals("application/json") == false) {
639+
final String contentHeaderValue = contentHeader == null ? null : ContentType.parse(contentHeader.getValue()).getMimeType();
640+
if (contentHeaderValue == null || contentHeaderValue.equals("application/json") == false) {
639641
tokensListener.onFailure(
640642
new IllegalStateException(
641643
"Unable to parse Token Response. Content type was expected to be "
642644
+ "[application/json] but was ["
643-
+ contentHeader.getValue()
645+
+ contentHeaderValue
644646
+ "]"
645647
)
646648
);
@@ -688,7 +690,7 @@ private static void handleTokenResponse(HttpResponse httpResponse, ActionListene
688690
} catch (Exception e) {
689691
tokensListener.onFailure(
690692
new ElasticsearchSecurityException(
691-
"Failed to exchange code for Id Token using the Token Endpoint. " + "Unable to parse Token Response",
693+
"Failed to exchange code for Id Token using the Token Endpoint. Unable to parse Token Response",
692694
e
693695
)
694696
);

0 commit comments

Comments
 (0)