Skip to content

Commit 3cb2bc4

Browse files
committed
Merge branch 'main' into carlosdelest/fix-synonyms-ci-tests-timeout
2 parents bbe4c9b + 64ae0ae commit 3cb2bc4

File tree

39 files changed

+1394
-1106
lines changed

39 files changed

+1394
-1106
lines changed

docs/changelog/114271.yaml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
pr: 114271
2+
summary: "[ES|QL] Skip validating remote cluster index names in parser"
3+
area: ES|QL
4+
type: bug
5+
issues: []

docs/changelog/114784.yaml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
pr: 114784
2+
summary: "[ES|QL] make named parameter for identifier and pattern snapshot"
3+
area: ES|QL
4+
type: bug
5+
issues: []

docs/changelog/114854.yaml

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
pr: 114854
2+
summary: Adding deprecation warnings for rrf using rank and `sub_searches`
3+
area: Search
4+
type: deprecation
5+
issues: []
6+
deprecation:
7+
title: Adding deprecation warnings for rrf using rank and `sub_searches`
8+
area: REST API
9+
details: Search API parameter `sub_searches` will no longer be a supported and will be removed in future releases. Similarly, `rrf` can only be used through the specified `retriever` and no longer though the `rank` parameter
10+
impact: Requests specifying rrf through `rank` and/or `sub_searches` elements will be disallowed in a future version. Users should instead utilize the new `retriever` parameter.

docs/changelog/114869.yaml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
pr: 114869
2+
summary: Standardize error code when bulk body is invalid
3+
area: CRUD
4+
type: bug
5+
issues: []

docs/changelog/114924.yaml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
pr: 114924
2+
summary: Reducing error-level stack trace logging for normal events in `GeoIpDownloader`
3+
area: Ingest Node
4+
type: bug
5+
issues: []

modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,6 @@
1111

1212
import org.apache.logging.log4j.LogManager;
1313
import org.apache.logging.log4j.Logger;
14-
import org.elasticsearch.ElasticsearchException;
1514
import org.elasticsearch.action.ActionListener;
1615
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
1716
import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
@@ -139,11 +138,18 @@ void updateDatabases() throws IOException {
139138
if (geoipIndex != null) {
140139
logger.trace("The {} index is not null", GeoIpDownloader.DATABASES_INDEX);
141140
if (clusterState.getRoutingTable().index(geoipIndex.getWriteIndex()).allPrimaryShardsActive() == false) {
142-
throw new ElasticsearchException("not all primary shards of [" + DATABASES_INDEX + "] index are active");
141+
logger.debug(
142+
"Not updating geoip database because not all primary shards of the [" + DATABASES_INDEX + "] index are active."
143+
);
144+
return;
143145
}
144146
var blockException = clusterState.blocks().indexBlockedException(ClusterBlockLevel.WRITE, geoipIndex.getWriteIndex().getName());
145147
if (blockException != null) {
146-
throw blockException;
148+
logger.debug(
149+
"Not updating geoip database because there is a write block on the " + geoipIndex.getWriteIndex().getName() + " index",
150+
blockException
151+
);
152+
return;
147153
}
148154
}
149155
if (eagerDownloadSupplier.get() || atLeastOneGeoipProcessorSupplier.get()) {

modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java

Lines changed: 11 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,6 @@
99

1010
package org.elasticsearch.ingest.geoip;
1111

12-
import org.elasticsearch.ElasticsearchException;
1312
import org.elasticsearch.action.ActionListener;
1413
import org.elasticsearch.action.ActionRequest;
1514
import org.elasticsearch.action.ActionResponse;
@@ -25,11 +24,9 @@
2524
import org.elasticsearch.action.index.TransportIndexAction;
2625
import org.elasticsearch.action.support.broadcast.BroadcastResponse;
2726
import org.elasticsearch.cluster.ClusterState;
28-
import org.elasticsearch.cluster.block.ClusterBlockException;
2927
import org.elasticsearch.cluster.block.ClusterBlocks;
3028
import org.elasticsearch.cluster.metadata.IndexMetadata;
3129
import org.elasticsearch.cluster.service.ClusterService;
32-
import org.elasticsearch.common.ReferenceDocs;
3330
import org.elasticsearch.common.settings.ClusterSettings;
3431
import org.elasticsearch.common.settings.Settings;
3532
import org.elasticsearch.index.reindex.BulkByScrollResponse;
@@ -583,37 +580,28 @@ void processDatabase(Map<String, Object> databaseInfo) {
583580
assertFalse(it.hasNext());
584581
}
585582

586-
public void testUpdateDatabasesWriteBlock() {
583+
public void testUpdateDatabasesWriteBlock() throws IOException {
584+
/*
585+
* Here we make sure that we bail out before making an httpClient request if there is write block on the .geoip_databases index
586+
*/
587587
ClusterState state = createClusterState(new PersistentTasksCustomMetadata(1L, Map.of()));
588588
var geoIpIndex = state.getMetadata().getIndicesLookup().get(GeoIpDownloader.DATABASES_INDEX).getWriteIndex().getName();
589589
state = ClusterState.builder(state)
590590
.blocks(new ClusterBlocks.Builder().addIndexBlock(geoIpIndex, IndexMetadata.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK))
591591
.build();
592592
when(clusterService.state()).thenReturn(state);
593-
var e = expectThrows(ClusterBlockException.class, () -> geoIpDownloader.updateDatabases());
594-
assertThat(
595-
e.getMessage(),
596-
equalTo(
597-
"index ["
598-
+ geoIpIndex
599-
+ "] blocked by: [TOO_MANY_REQUESTS/12/disk usage exceeded flood-stage watermark, "
600-
+ "index has read-only-allow-delete block; for more information, see "
601-
+ ReferenceDocs.FLOOD_STAGE_WATERMARK
602-
+ "];"
603-
)
604-
);
593+
geoIpDownloader.updateDatabases();
605594
verifyNoInteractions(httpClient);
606595
}
607596

608-
public void testUpdateDatabasesIndexNotReady() {
597+
public void testUpdateDatabasesIndexNotReady() throws IOException {
598+
/*
599+
* Here we make sure that we bail out before making an httpClient request if there are unallocated shards on the .geoip_databases
600+
* index
601+
*/
609602
ClusterState state = createClusterState(new PersistentTasksCustomMetadata(1L, Map.of()), true);
610-
var geoIpIndex = state.getMetadata().getIndicesLookup().get(GeoIpDownloader.DATABASES_INDEX).getWriteIndex().getName();
611-
state = ClusterState.builder(state)
612-
.blocks(new ClusterBlocks.Builder().addIndexBlock(geoIpIndex, IndexMetadata.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK))
613-
.build();
614603
when(clusterService.state()).thenReturn(state);
615-
var e = expectThrows(ElasticsearchException.class, () -> geoIpDownloader.updateDatabases());
616-
assertThat(e.getMessage(), equalTo("not all primary shards of [.geoip_databases] index are active"));
604+
geoIpDownloader.updateDatabases();
617605
verifyNoInteractions(httpClient);
618606
}
619607

modules/reindex/src/test/java/org/elasticsearch/reindex/BulkByScrollParallelizationHelperTests.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,16 +15,16 @@
1515
import org.elasticsearch.test.ESTestCase;
1616

1717
import java.io.IOException;
18+
import java.util.Collections;
1819

19-
import static java.util.Collections.emptyList;
2020
import static org.elasticsearch.reindex.BulkByScrollParallelizationHelper.sliceIntoSubRequests;
2121
import static org.elasticsearch.search.RandomSearchRequestGenerator.randomSearchRequest;
2222
import static org.elasticsearch.search.RandomSearchRequestGenerator.randomSearchSourceBuilder;
2323

2424
public class BulkByScrollParallelizationHelperTests extends ESTestCase {
2525
public void testSliceIntoSubRequests() throws IOException {
2626
SearchRequest searchRequest = randomSearchRequest(
27-
() -> randomSearchSourceBuilder(() -> null, () -> null, () -> null, () -> null, () -> emptyList(), () -> null, () -> null)
27+
() -> randomSearchSourceBuilder(() -> null, () -> null, () -> null, Collections::emptyList, () -> null, () -> null)
2828
);
2929
if (searchRequest.source() != null) {
3030
// Clear the slice builder if there is one set. We can't call sliceIntoSubRequests if it is.

muted-tests.yml

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -80,9 +80,6 @@ tests:
8080
- class: org.elasticsearch.xpack.restart.CoreFullClusterRestartIT
8181
method: testSnapshotRestore {cluster=UPGRADED}
8282
issue: https://github.com/elastic/elasticsearch/issues/111799
83-
- class: org.elasticsearch.xpack.inference.InferenceRestIT
84-
method: test {p0=inference/80_random_rerank_retriever/Random rerank retriever predictably shuffles results}
85-
issue: https://github.com/elastic/elasticsearch/issues/111999
8683
- class: org.elasticsearch.smoketest.SmokeTestMultiNodeClientYamlTestSuiteIT
8784
issue: https://github.com/elastic/elasticsearch/issues/112147
8885
- class: org.elasticsearch.smoketest.WatcherYamlRestIT
Lines changed: 33 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,8 @@
99

1010
package org.elasticsearch.http;
1111

12+
import org.apache.http.entity.ByteArrayEntity;
13+
import org.apache.http.entity.ContentType;
1214
import org.elasticsearch.action.bulk.IncrementalBulkService;
1315
import org.elasticsearch.client.Request;
1416
import org.elasticsearch.client.Response;
@@ -19,24 +21,30 @@
1921
import org.elasticsearch.xcontent.json.JsonXContent;
2022

2123
import java.io.IOException;
24+
import java.nio.charset.StandardCharsets;
2225
import java.util.List;
2326
import java.util.Map;
2427

28+
import static org.elasticsearch.rest.RestStatus.BAD_REQUEST;
2529
import static org.elasticsearch.rest.RestStatus.OK;
2630
import static org.hamcrest.CoreMatchers.containsString;
2731
import static org.hamcrest.Matchers.equalTo;
2832

2933
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE, supportsDedicatedMasters = false, numDataNodes = 2, numClientNodes = 0)
30-
public class IncrementalBulkRestIT extends HttpSmokeTestCase {
34+
public class BulkRestIT extends HttpSmokeTestCase {
3135

3236
@Override
3337
protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) {
3438
return Settings.builder()
3539
.put(super.nodeSettings(nodeOrdinal, otherSettings))
36-
.put(IncrementalBulkService.INCREMENTAL_BULK.getKey(), true)
40+
.put(IncrementalBulkService.INCREMENTAL_BULK.getKey(), seventyFivePercentOfTheTime())
3741
.build();
3842
}
3943

44+
private static boolean seventyFivePercentOfTheTime() {
45+
return (randomBoolean() && randomBoolean()) == false;
46+
}
47+
4048
public void testBulkUriMatchingDoesNotMatchBulkCapabilitiesApi() throws IOException {
4149
Request request = new Request("GET", "/_capabilities?method=GET&path=%2F_bulk&capabilities=failure_store_status&pretty");
4250
Response response = getRestClient().performRequest(request);
@@ -51,6 +59,26 @@ public void testBulkMissingBody() throws IOException {
5159
assertThat(responseException.getMessage(), containsString("request body is required"));
5260
}
5361

62+
public void testBulkInvalidIndexNameString() throws IOException {
63+
Request request = new Request("POST", "/_bulk");
64+
65+
byte[] bytes1 = "{\"create\":{\"_index\":\"".getBytes(StandardCharsets.UTF_8);
66+
byte[] bytes2 = new byte[] { (byte) 0xfe, (byte) 0xfe, (byte) 0xff, (byte) 0xff };
67+
byte[] bytes3 = "\",\"_id\":\"1\"}}\n{\"field\":1}\n\r\n".getBytes(StandardCharsets.UTF_8);
68+
byte[] bulkBody = new byte[bytes1.length + bytes2.length + bytes3.length];
69+
System.arraycopy(bytes1, 0, bulkBody, 0, bytes1.length);
70+
System.arraycopy(bytes2, 0, bulkBody, bytes1.length, bytes2.length);
71+
System.arraycopy(bytes3, 0, bulkBody, bytes1.length + bytes2.length, bytes3.length);
72+
73+
request.setEntity(new ByteArrayEntity(bulkBody, ContentType.APPLICATION_JSON));
74+
75+
ResponseException responseException = expectThrows(ResponseException.class, () -> getRestClient().performRequest(request));
76+
assertThat(responseException.getResponse().getStatusLine().getStatusCode(), equalTo(BAD_REQUEST.getStatus()));
77+
assertThat(responseException.getMessage(), containsString("could not parse bulk request body"));
78+
assertThat(responseException.getMessage(), containsString("json_parse_exception"));
79+
assertThat(responseException.getMessage(), containsString("Invalid UTF-8"));
80+
}
81+
5482
public void testBulkRequestBodyImproperlyTerminated() throws IOException {
5583
Request request = new Request(randomBoolean() ? "POST" : "PUT", "/_bulk");
5684
// missing final line of the bulk body. cannot process
@@ -61,10 +89,10 @@ public void testBulkRequestBodyImproperlyTerminated() throws IOException {
6189
);
6290
ResponseException responseException = expectThrows(ResponseException.class, () -> getRestClient().performRequest(request));
6391
assertEquals(400, responseException.getResponse().getStatusLine().getStatusCode());
64-
assertThat(responseException.getMessage(), containsString("could not parse bulk request body"));
92+
assertThat(responseException.getMessage(), containsString("The bulk request must be terminated by a newline"));
6593
}
6694

67-
public void testIncrementalBulk() throws IOException {
95+
public void testBulkRequest() throws IOException {
6896
Request createRequest = new Request("PUT", "/index_name");
6997
createRequest.setJsonEntity("""
7098
{
@@ -81,7 +109,6 @@ public void testIncrementalBulk() throws IOException {
81109

82110
Request firstBulkRequest = new Request("POST", "/index_name/_bulk");
83111

84-
// index documents for the rollup job
85112
String bulkBody = "{\"index\":{\"_index\":\"index_name\",\"_id\":\"1\"}}\n"
86113
+ "{\"field\":1}\n"
87114
+ "{\"index\":{\"_index\":\"index_name\",\"_id\":\"2\"}}\n"
@@ -113,7 +140,6 @@ public void testBulkWithIncrementalDisabled() throws IOException {
113140

114141
Request firstBulkRequest = new Request("POST", "/index_name/_bulk");
115142

116-
// index documents for the rollup job
117143
String bulkBody = "{\"index\":{\"_index\":\"index_name\",\"_id\":\"1\"}}\n"
118144
+ "{\"field\":1}\n"
119145
+ "{\"index\":{\"_index\":\"index_name\",\"_id\":\"2\"}}\n"
@@ -137,7 +163,7 @@ public void testBulkWithIncrementalDisabled() throws IOException {
137163
}
138164
}
139165

140-
public void testIncrementalMalformed() throws IOException {
166+
public void testMalformedActionLineBulk() throws IOException {
141167
Request createRequest = new Request("PUT", "/index_name");
142168
createRequest.setJsonEntity("""
143169
{
@@ -154,7 +180,6 @@ public void testIncrementalMalformed() throws IOException {
154180

155181
Request bulkRequest = new Request("POST", "/index_name/_bulk");
156182

157-
// index documents for the rollup job
158183
final StringBuilder bulk = new StringBuilder();
159184
bulk.append("{\"index\":{\"_index\":\"index_name\"}}\n");
160185
bulk.append("{\"field\":1}\n");
@@ -170,7 +195,6 @@ public void testIncrementalMalformed() throws IOException {
170195
private static void sendLargeBulk() throws IOException {
171196
Request bulkRequest = new Request("POST", "/index_name/_bulk");
172197

173-
// index documents for the rollup job
174198
final StringBuilder bulk = new StringBuilder();
175199
bulk.append("{\"delete\":{\"_index\":\"index_name\",\"_id\":\"1\"}}\n");
176200
int updates = 0;

0 commit comments

Comments
 (0)