Skip to content

Commit 1a2e208

Browse files
authored
Merge branch 'main' into api-remove-index-block
2 parents 15461ad + 2b8d9df commit 1a2e208

File tree

182 files changed

+6067
-3490
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

182 files changed

+6067
-3490
lines changed

distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/MachineDependentHeapTests.java

Lines changed: 17 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
package org.elasticsearch.server.cli;
1111

1212
import org.elasticsearch.common.settings.Settings;
13+
import org.elasticsearch.common.util.FeatureFlag;
1314
import org.elasticsearch.test.ESTestCase;
1415
import org.hamcrest.Matcher;
1516

@@ -55,7 +56,8 @@ public void testMasterOnlyOptions() throws Exception {
5556
assertHeapOptions(64, containsInAnyOrder("-Xmx31744m", "-Xms31744m"), "master");
5657
}
5758

58-
public void testMlOnlyOptions() throws Exception {
59+
public void testMlOnlyOptions_new() throws Exception {
60+
assumeTrue("feature flag must be enabled for new memory computation", new FeatureFlag("new_ml_memory_computation").isEnabled());
5961
assertHeapOptions(1, containsInAnyOrder("-Xmx272m", "-Xms272m"), "ml");
6062
assertHeapOptions(4, containsInAnyOrder("-Xmx1092m", "-Xms1092m"), "ml");
6163
assertHeapOptions(32, containsInAnyOrder("-Xmx5460m", "-Xms5460m"), "ml");
@@ -65,6 +67,20 @@ public void testMlOnlyOptions() throws Exception {
6567
assertHeapOptions(263, containsInAnyOrder("-Xmx21228m", "-Xms21228m"), "ml");
6668
}
6769

70+
public void testMlOnlyOptions_old() throws Exception {
71+
assumeTrue(
72+
"feature flag must be disabled for old memory computation",
73+
new FeatureFlag("new_ml_memory_computation").isEnabled() == false
74+
);
75+
assertHeapOptions(1, containsInAnyOrder("-Xmx408m", "-Xms408m"), "ml");
76+
assertHeapOptions(4, containsInAnyOrder("-Xmx1636m", "-Xms1636m"), "ml");
77+
assertHeapOptions(32, containsInAnyOrder("-Xmx8192m", "-Xms8192m"), "ml");
78+
assertHeapOptions(64, containsInAnyOrder("-Xmx11468m", "-Xms11468m"), "ml");
79+
// We'd never see a node this big in Cloud, but this assertion proves that the 31GB absolute maximum
80+
// eventually kicks in (because 0.4 * 16 + 0.1 * (263 - 16) > 31)
81+
assertHeapOptions(263, containsInAnyOrder("-Xmx31744m", "-Xms31744m"), "ml");
82+
}
83+
6884
public void testDataNodeOptions() throws Exception {
6985
assertHeapOptions(1, containsInAnyOrder("-Xmx512m", "-Xms512m"), "data");
7086
assertHeapOptions(8, containsInAnyOrder("-Xmx4096m", "-Xms4096m"), "data");

docs/changelog/129302.yaml

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
pr: 129302
2+
summary: Move HTTP content aggregation from Netty into `RestController`
3+
area: Network
4+
type: enhancement
5+
issues:
6+
- 120746

docs/changelog/129606.yaml

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
pr: 129606
2+
summary: Release FORK in tech preview
3+
area: ES|QL
4+
type: feature
5+
issues: []
6+
highlight:
7+
title: Release FORK in tech preview
8+
body: |-
9+
Fork is a foundational building block that allows multiple branches of execution.
10+
Conceptually, fork is:
11+
- a bifurcation of the stream, with all data going to each fork branch, followed by
12+
- a merge of the branches, enhanced with a discriminator column called FORK:
13+
14+
Example:
15+
16+
[source,yaml]
17+
----------------------------
18+
FROM test
19+
| FORK
20+
( WHERE content:"fox" )
21+
( WHERE content:"dog" )
22+
| SORT _fork
23+
----------------------------
24+
25+
The FORK command add a discriminator column called `_fork`:
26+
27+
[source,yaml]
28+
----------------------------
29+
| id | content | _fork |
30+
|-----|-----------|-------|
31+
| 3 | brown fox | fork1 |
32+
| 4 | white dog | fork2 |
33+
----------------------------
34+
35+
notable: true

docs/reference/query-languages/esql/_snippets/functions/layout/categorize.md

Lines changed: 0 additions & 6 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
11
* [`BUCKET`](../../functions-operators/grouping-functions.md#esql-bucket)
2-
* [preview] [`CATEGORIZE`](../../functions-operators/grouping-functions.md#esql-categorize)
2+
* [`CATEGORIZE`](../../functions-operators/grouping-functions.md#esql-categorize)

docs/reference/query-languages/esql/kibana/definition/functions/categorize.json

Lines changed: 1 addition & 1 deletion
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpCacheTests.java

Lines changed: 14 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
import org.elasticsearch.ingest.geoip.stats.CacheStats;
1818
import org.elasticsearch.test.ESTestCase;
1919

20+
import java.nio.file.Path;
2021
import java.util.concurrent.atomic.AtomicInteger;
2122
import java.util.concurrent.atomic.AtomicLong;
2223
import java.util.function.Function;
@@ -140,23 +141,25 @@ public void testPurgeCacheEntriesForDatabase() {
140141
GeoIpCache cache = new GeoIpCache(100);
141142
ProjectId projectId1 = randomUniqueProjectId();
142143
ProjectId projectId2 = randomUniqueProjectId();
143-
String databasePath1 = "path/to/db1";
144-
String databasePath2 = "path/to/db2";
144+
// Turn the path strings into Paths to ensure that we always use the canonical string representation (this string literal does not
145+
// round-trip when converting to a Path and back again on Windows):
146+
Path databasePath1 = PathUtils.get("path/to/db1");
147+
Path databasePath2 = PathUtils.get("path/to/db2");
145148
String ip1 = "127.0.0.1";
146149
String ip2 = "127.0.0.2";
147150

148151
AbstractResponse response = mock(AbstractResponse.class);
149-
cache.putIfAbsent(projectId1, ip1, databasePath1, ip -> response); // cache miss
150-
cache.putIfAbsent(projectId1, ip2, databasePath1, ip -> response); // cache miss
151-
cache.putIfAbsent(projectId2, ip1, databasePath1, ip -> response); // cache miss
152-
cache.putIfAbsent(projectId1, ip1, databasePath2, ip -> response); // cache miss
153-
cache.purgeCacheEntriesForDatabase(projectId1, PathUtils.get(databasePath1));
152+
cache.putIfAbsent(projectId1, ip1, databasePath1.toString(), ip -> response); // cache miss
153+
cache.putIfAbsent(projectId1, ip2, databasePath1.toString(), ip -> response); // cache miss
154+
cache.putIfAbsent(projectId2, ip1, databasePath1.toString(), ip -> response); // cache miss
155+
cache.putIfAbsent(projectId1, ip1, databasePath2.toString(), ip -> response); // cache miss
156+
cache.purgeCacheEntriesForDatabase(projectId1, databasePath1);
154157
// should have purged entries for projectId1 and databasePath1...
155-
assertNull(cache.get(projectId1, ip1, databasePath1));
156-
assertNull(cache.get(projectId1, ip2, databasePath1));
158+
assertNull(cache.get(projectId1, ip1, databasePath1.toString()));
159+
assertNull(cache.get(projectId1, ip2, databasePath1.toString()));
157160
// ...but left the one for projectId2...
158-
assertSame(response, cache.get(projectId2, ip1, databasePath1));
161+
assertSame(response, cache.get(projectId2, ip1, databasePath1.toString()));
159162
// ...and for databasePath2:
160-
assertSame(response, cache.get(projectId1, ip1, databasePath2));
163+
assertSame(response, cache.get(projectId1, ip1, databasePath2.toString()));
161164
}
162165
}

modules/ingest-otel/src/main/java/org/elasticsearch/ingest/otel/NormalizeForStreamPlugin.java

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99

1010
package org.elasticsearch.ingest.otel;
1111

12+
import org.elasticsearch.cluster.metadata.DataStream;
1213
import org.elasticsearch.ingest.Processor;
1314
import org.elasticsearch.plugins.IngestPlugin;
1415
import org.elasticsearch.plugins.Plugin;
@@ -19,6 +20,10 @@ public class NormalizeForStreamPlugin extends Plugin implements IngestPlugin {
1920

2021
@Override
2122
public Map<String, Processor.Factory> getProcessors(Processor.Parameters parameters) {
22-
return Map.of(NormalizeForStreamProcessor.TYPE, new NormalizeForStreamProcessor.Factory());
23+
if (DataStream.LOGS_STREAM_FEATURE_FLAG) {
24+
return Map.of(NormalizeForStreamProcessor.TYPE, new NormalizeForStreamProcessor.Factory());
25+
} else {
26+
return Map.of();
27+
}
2328
}
2429
}

modules/ingest-otel/src/yamlRestTest/java/org/elasticsearch/ingest/otel/IngestOtelClientYamlTestSuiteIT.java

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
1414

1515
import org.elasticsearch.test.cluster.ElasticsearchCluster;
16+
import org.elasticsearch.test.cluster.FeatureFlag;
1617
import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate;
1718
import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase;
1819
import org.junit.ClassRule;
@@ -24,7 +25,10 @@ public IngestOtelClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate tes
2425
}
2526

2627
@ClassRule
27-
public static ElasticsearchCluster cluster = ElasticsearchCluster.local().module("ingest-otel").build();
28+
public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
29+
.module("ingest-otel")
30+
.feature(FeatureFlag.LOGS_STREAM)
31+
.build();
2832

2933
@Override
3034
protected String getTestRestCluster() {

modules/repository-s3/qa/third-party/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope;
1919

2020
import org.elasticsearch.action.support.master.AcknowledgedResponse;
21+
import org.elasticsearch.cluster.metadata.ProjectId;
2122
import org.elasticsearch.cluster.service.ClusterService;
2223
import org.elasticsearch.common.blobstore.OptionalBytesReference;
2324
import org.elasticsearch.common.bytes.BytesArray;
@@ -147,7 +148,7 @@ public long absoluteTimeInMillis() {
147148
// construct our own repo instance so we can inject a threadpool that allows to control the passage of time
148149
try (
149150
var repository = new S3Repository(
150-
randomProjectIdOrDefault(),
151+
ProjectId.DEFAULT,
151152
node().injector().getInstance(RepositoriesService.class).repository(TEST_REPO_NAME).getMetadata(),
152153
xContentRegistry(),
153154
node().injector().getInstance(PluginsService.class).filterPlugins(S3RepositoryPlugin.class).findFirst().get().getService(),

0 commit comments

Comments
 (0)