Skip to content

Commit f2afd19

Browse files
Merge branch 'main' into consider-aborting-merges-while-enqueued
2 parents f47a419 + c173522 commit f2afd19

File tree

52 files changed

+3327
-2146
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

52 files changed

+3327
-2146
lines changed

distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/MachineDependentHeapTests.java

Lines changed: 17 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
package org.elasticsearch.server.cli;
1111

1212
import org.elasticsearch.common.settings.Settings;
13+
import org.elasticsearch.common.util.FeatureFlag;
1314
import org.elasticsearch.test.ESTestCase;
1415
import org.hamcrest.Matcher;
1516

@@ -55,7 +56,8 @@ public void testMasterOnlyOptions() throws Exception {
5556
assertHeapOptions(64, containsInAnyOrder("-Xmx31744m", "-Xms31744m"), "master");
5657
}
5758

58-
public void testMlOnlyOptions() throws Exception {
59+
public void testMlOnlyOptions_new() throws Exception {
60+
assumeTrue("feature flag must be enabled for new memory computation", new FeatureFlag("new_ml_memory_computation").isEnabled());
5961
assertHeapOptions(1, containsInAnyOrder("-Xmx272m", "-Xms272m"), "ml");
6062
assertHeapOptions(4, containsInAnyOrder("-Xmx1092m", "-Xms1092m"), "ml");
6163
assertHeapOptions(32, containsInAnyOrder("-Xmx5460m", "-Xms5460m"), "ml");
@@ -65,6 +67,20 @@ public void testMlOnlyOptions() throws Exception {
6567
assertHeapOptions(263, containsInAnyOrder("-Xmx21228m", "-Xms21228m"), "ml");
6668
}
6769

70+
public void testMlOnlyOptions_old() throws Exception {
71+
assumeTrue(
72+
"feature flag must be disabled for old memory computation",
73+
new FeatureFlag("new_ml_memory_computation").isEnabled() == false
74+
);
75+
assertHeapOptions(1, containsInAnyOrder("-Xmx408m", "-Xms408m"), "ml");
76+
assertHeapOptions(4, containsInAnyOrder("-Xmx1636m", "-Xms1636m"), "ml");
77+
assertHeapOptions(32, containsInAnyOrder("-Xmx8192m", "-Xms8192m"), "ml");
78+
assertHeapOptions(64, containsInAnyOrder("-Xmx11468m", "-Xms11468m"), "ml");
79+
// We'd never see a node this big in Cloud, but this assertion proves that the 31GB absolute maximum
80+
// eventually kicks in (because 0.4 * 16 + 0.1 * (263 - 16) > 31)
81+
assertHeapOptions(263, containsInAnyOrder("-Xmx31744m", "-Xms31744m"), "ml");
82+
}
83+
6884
public void testDataNodeOptions() throws Exception {
6985
assertHeapOptions(1, containsInAnyOrder("-Xmx512m", "-Xms512m"), "data");
7086
assertHeapOptions(8, containsInAnyOrder("-Xmx4096m", "-Xms4096m"), "data");

docs/changelog/129606.yaml

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
pr: 129606
2+
summary: Release FORK in tech preview
3+
area: ES|QL
4+
type: feature
5+
issues: []
6+
highlight:
7+
title: Release FORK in tech preview
8+
body: |-
9+
Fork is a foundational building block that allows multiple branches of execution.
10+
Conceptually, fork is:
11+
- a bifurcation of the stream, with all data going to each fork branch, followed by
12+
- a merge of the branches, enhanced with a discriminator column called FORK:
13+
14+
Example:
15+
16+
[source,yaml]
17+
----------------------------
18+
FROM test
19+
| FORK
20+
( WHERE content:"fox" )
21+
( WHERE content:"dog" )
22+
| SORT _fork
23+
----------------------------
24+
25+
The FORK command add a discriminator column called `_fork`:
26+
27+
[source,yaml]
28+
----------------------------
29+
| id | content | _fork |
30+
|-----|-----------|-------|
31+
| 3 | brown fox | fork1 |
32+
| 4 | white dog | fork2 |
33+
----------------------------
34+
35+
notable: true

docs/reference/query-languages/esql/_snippets/functions/layout/categorize.md

Lines changed: 0 additions & 6 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
11
* [`BUCKET`](../../functions-operators/grouping-functions.md#esql-bucket)
2-
* [preview] [`CATEGORIZE`](../../functions-operators/grouping-functions.md#esql-categorize)
2+
* [`CATEGORIZE`](../../functions-operators/grouping-functions.md#esql-categorize)

docs/reference/query-languages/esql/kibana/definition/functions/categorize.json

Lines changed: 1 addition & 1 deletion
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpCacheTests.java

Lines changed: 14 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
import org.elasticsearch.ingest.geoip.stats.CacheStats;
1818
import org.elasticsearch.test.ESTestCase;
1919

20+
import java.nio.file.Path;
2021
import java.util.concurrent.atomic.AtomicInteger;
2122
import java.util.concurrent.atomic.AtomicLong;
2223
import java.util.function.Function;
@@ -140,23 +141,25 @@ public void testPurgeCacheEntriesForDatabase() {
140141
GeoIpCache cache = new GeoIpCache(100);
141142
ProjectId projectId1 = randomUniqueProjectId();
142143
ProjectId projectId2 = randomUniqueProjectId();
143-
String databasePath1 = "path/to/db1";
144-
String databasePath2 = "path/to/db2";
144+
// Turn the path strings into Paths to ensure that we always use the canonical string representation (this string literal does not
145+
// round-trip when converting to a Path and back again on Windows):
146+
Path databasePath1 = PathUtils.get("path/to/db1");
147+
Path databasePath2 = PathUtils.get("path/to/db2");
145148
String ip1 = "127.0.0.1";
146149
String ip2 = "127.0.0.2";
147150

148151
AbstractResponse response = mock(AbstractResponse.class);
149-
cache.putIfAbsent(projectId1, ip1, databasePath1, ip -> response); // cache miss
150-
cache.putIfAbsent(projectId1, ip2, databasePath1, ip -> response); // cache miss
151-
cache.putIfAbsent(projectId2, ip1, databasePath1, ip -> response); // cache miss
152-
cache.putIfAbsent(projectId1, ip1, databasePath2, ip -> response); // cache miss
153-
cache.purgeCacheEntriesForDatabase(projectId1, PathUtils.get(databasePath1));
152+
cache.putIfAbsent(projectId1, ip1, databasePath1.toString(), ip -> response); // cache miss
153+
cache.putIfAbsent(projectId1, ip2, databasePath1.toString(), ip -> response); // cache miss
154+
cache.putIfAbsent(projectId2, ip1, databasePath1.toString(), ip -> response); // cache miss
155+
cache.putIfAbsent(projectId1, ip1, databasePath2.toString(), ip -> response); // cache miss
156+
cache.purgeCacheEntriesForDatabase(projectId1, databasePath1);
154157
// should have purged entries for projectId1 and databasePath1...
155-
assertNull(cache.get(projectId1, ip1, databasePath1));
156-
assertNull(cache.get(projectId1, ip2, databasePath1));
158+
assertNull(cache.get(projectId1, ip1, databasePath1.toString()));
159+
assertNull(cache.get(projectId1, ip2, databasePath1.toString()));
157160
// ...but left the one for projectId2...
158-
assertSame(response, cache.get(projectId2, ip1, databasePath1));
161+
assertSame(response, cache.get(projectId2, ip1, databasePath1.toString()));
159162
// ...and for databasePath2:
160-
assertSame(response, cache.get(projectId1, ip1, databasePath2));
163+
assertSame(response, cache.get(projectId1, ip1, databasePath2.toString()));
161164
}
162165
}

modules/streams/build.gradle

Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
/*
2+
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
3+
* or more contributor license agreements. Licensed under the "Elastic License
4+
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
5+
* Public License v 1"; you may not use this file except in compliance with, at
6+
* your election, the "Elastic License 2.0", the "GNU Affero General Public
7+
* License v3.0 only", or the "Server Side Public License, v 1".
8+
*/
9+
10+
apply plugin: 'elasticsearch.test-with-dependencies'
11+
apply plugin: 'elasticsearch.internal-cluster-test'
12+
apply plugin: 'elasticsearch.internal-yaml-rest-test'
13+
apply plugin: 'elasticsearch.internal-java-rest-test'
14+
apply plugin: 'elasticsearch.yaml-rest-compat-test'
15+
16+
esplugin {
17+
description = 'The module adds support for the wired streams functionality including logs ingest'
18+
classname = 'org.elasticsearch.rest.streams.StreamsPlugin'
19+
}
20+
21+
restResources {
22+
restApi {
23+
include '_common', 'streams'
24+
}
25+
}
26+
27+
configurations {
28+
basicRestSpecs {
29+
attributes {
30+
attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, ArtifactTypeDefinition.DIRECTORY_TYPE)
31+
}
32+
}
33+
}
34+
35+
artifacts {
36+
basicRestSpecs(new File(projectDir, "src/yamlRestTest/resources/rest-api-spec/test"))
37+
}
38+
39+
dependencies {
40+
testImplementation project(path: ':test:test-clusters')
41+
}
Lines changed: 62 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,62 @@
1+
/*
2+
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
3+
* or more contributor license agreements. Licensed under the "Elastic License
4+
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
5+
* Public License v 1"; you may not use this file except in compliance with, at
6+
* your election, the "Elastic License 2.0", the "GNU Affero General Public
7+
* License v3.0 only", or the "Server Side Public License, v 1".
8+
*/
9+
10+
package org.elasticsearch.rest.streams;
11+
12+
import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
13+
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
14+
import org.elasticsearch.action.support.master.AcknowledgedResponse;
15+
import org.elasticsearch.cluster.metadata.ProjectId;
16+
import org.elasticsearch.cluster.metadata.ProjectMetadata;
17+
import org.elasticsearch.cluster.metadata.StreamsMetadata;
18+
import org.elasticsearch.plugins.Plugin;
19+
import org.elasticsearch.rest.streams.logs.LogsStreamsActivationToggleAction;
20+
import org.elasticsearch.test.ESIntegTestCase;
21+
import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
22+
import org.elasticsearch.test.transport.MockTransportService;
23+
24+
import java.io.IOException;
25+
import java.util.Collection;
26+
import java.util.List;
27+
import java.util.concurrent.ExecutionException;
28+
29+
import static org.hamcrest.Matchers.is;
30+
31+
public class TestToggleIT extends ESIntegTestCase {
32+
33+
@Override
34+
protected Collection<Class<? extends Plugin>> nodePlugins() {
35+
return List.of(StreamsPlugin.class, MockTransportService.TestPlugin.class);
36+
}
37+
38+
public void testLogStreamToggle() throws IOException, ExecutionException, InterruptedException {
39+
boolean[] testParams = new boolean[] { true, false, true };
40+
for (boolean enable : testParams) {
41+
doLogStreamToggleTest(enable);
42+
}
43+
}
44+
45+
private void doLogStreamToggleTest(boolean enable) throws IOException, ExecutionException, InterruptedException {
46+
LogsStreamsActivationToggleAction.Request request = new LogsStreamsActivationToggleAction.Request(
47+
TEST_REQUEST_TIMEOUT,
48+
TEST_REQUEST_TIMEOUT,
49+
enable
50+
);
51+
52+
AcknowledgedResponse acknowledgedResponse = client().execute(LogsStreamsActivationToggleAction.INSTANCE, request).get();
53+
ElasticsearchAssertions.assertAcked(acknowledgedResponse);
54+
55+
ClusterStateRequest state = new ClusterStateRequest(TEST_REQUEST_TIMEOUT);
56+
ClusterStateResponse clusterStateResponse = client().admin().cluster().state(state).get();
57+
ProjectMetadata projectMetadata = clusterStateResponse.getState().metadata().getProject(ProjectId.DEFAULT);
58+
59+
assertThat(projectMetadata.<StreamsMetadata>custom(StreamsMetadata.TYPE).isLogsEnabled(), is(enable));
60+
}
61+
62+
}
Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
/*
2+
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
3+
* or more contributor license agreements. Licensed under the "Elastic License
4+
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
5+
* Public License v 1"; you may not use this file except in compliance with, at
6+
* your election, the "Elastic License 2.0", the "GNU Affero General Public
7+
* License v3.0 only", or the "Server Side Public License, v 1".
8+
*/
9+
10+
module org.elasticsearch.rest.root {
11+
requires org.elasticsearch.server;
12+
requires org.elasticsearch.xcontent;
13+
requires org.apache.lucene.core;
14+
requires org.elasticsearch.base;
15+
requires org.apache.logging.log4j;
16+
17+
exports org.elasticsearch.rest.streams;
18+
exports org.elasticsearch.rest.streams.logs;
19+
}
Lines changed: 69 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,69 @@
1+
/*
2+
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
3+
* or more contributor license agreements. Licensed under the "Elastic License
4+
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
5+
* Public License v 1"; you may not use this file except in compliance with, at
6+
* your election, the "Elastic License 2.0", the "GNU Affero General Public
7+
* License v3.0 only", or the "Server Side Public License, v 1".
8+
*/
9+
10+
package org.elasticsearch.rest.streams;
11+
12+
import org.elasticsearch.cluster.metadata.DataStream;
13+
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
14+
import org.elasticsearch.cluster.node.DiscoveryNodes;
15+
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
16+
import org.elasticsearch.common.settings.ClusterSettings;
17+
import org.elasticsearch.common.settings.IndexScopedSettings;
18+
import org.elasticsearch.common.settings.Settings;
19+
import org.elasticsearch.common.settings.SettingsFilter;
20+
import org.elasticsearch.features.NodeFeature;
21+
import org.elasticsearch.plugins.ActionPlugin;
22+
import org.elasticsearch.plugins.Plugin;
23+
import org.elasticsearch.rest.RestController;
24+
import org.elasticsearch.rest.RestHandler;
25+
import org.elasticsearch.rest.streams.logs.LogsStreamsActivationToggleAction;
26+
import org.elasticsearch.rest.streams.logs.RestSetLogStreamsEnabledAction;
27+
import org.elasticsearch.rest.streams.logs.RestStreamsStatusAction;
28+
import org.elasticsearch.rest.streams.logs.StreamsStatusAction;
29+
import org.elasticsearch.rest.streams.logs.TransportLogsStreamsToggleActivation;
30+
import org.elasticsearch.rest.streams.logs.TransportStreamsStatusAction;
31+
32+
import java.util.Collections;
33+
import java.util.List;
34+
import java.util.function.Predicate;
35+
import java.util.function.Supplier;
36+
37+
/**
38+
* This plugin provides the Streams feature which builds upon data streams to
39+
* provide the user with a more "batteries included" experience for ingesting large
40+
* streams of data, such as logs.
41+
*/
42+
public class StreamsPlugin extends Plugin implements ActionPlugin {
43+
44+
@Override
45+
public List<RestHandler> getRestHandlers(
46+
Settings settings,
47+
NamedWriteableRegistry namedWriteableRegistry,
48+
RestController restController,
49+
ClusterSettings clusterSettings,
50+
IndexScopedSettings indexScopedSettings,
51+
SettingsFilter settingsFilter,
52+
IndexNameExpressionResolver indexNameExpressionResolver,
53+
Supplier<DiscoveryNodes> nodesInCluster,
54+
Predicate<NodeFeature> clusterSupportsFeature
55+
) {
56+
if (DataStream.LOGS_STREAM_FEATURE_FLAG) {
57+
return List.of(new RestSetLogStreamsEnabledAction(), new RestStreamsStatusAction());
58+
}
59+
return Collections.emptyList();
60+
}
61+
62+
@Override
63+
public List<ActionHandler> getActions() {
64+
return List.of(
65+
new ActionHandler(LogsStreamsActivationToggleAction.INSTANCE, TransportLogsStreamsToggleActivation.class),
66+
new ActionHandler(StreamsStatusAction.INSTANCE, TransportStreamsStatusAction.class)
67+
);
68+
}
69+
}

0 commit comments

Comments
 (0)