Skip to content

Commit a8bf1ff

Browse files
Merge branch 'main' into parker/es819-doc-value-version-0-bwc-test
2 parents a74b6c9 + 81ff1a7 commit a8bf1ff

File tree

62 files changed

+1522
-255
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

62 files changed

+1522
-255
lines changed

build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java

Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,7 @@
4848
import java.util.Arrays;
4949
import java.util.List;
5050
import java.util.Optional;
51+
import java.util.concurrent.TimeUnit;
5152

5253
import javax.inject.Inject;
5354

@@ -199,7 +200,11 @@ public void execute(BuildFinishedFlowAction.Parameters parameters) throws FileNo
199200
try {
200201
// we are very generious here, as the upload can take
201202
// a long time depending on its size
202-
pb.start().waitFor(30, java.util.concurrent.TimeUnit.MINUTES);
203+
long timeoutSec = calculateUploadWaitTimeoutSeconds(uploadFile);
204+
boolean completedInTime = pb.start().waitFor(timeoutSec, TimeUnit.SECONDS);
205+
if (completedInTime == false) {
206+
System.out.println("Timed out waiting for buildkite artifact upload after " + timeoutSec + " seconds");
207+
}
203208
} catch (InterruptedException e) {
204209
System.out.println("Failed to upload buildkite artifact " + e.getMessage());
205210
}
@@ -304,5 +309,14 @@ private static String calculateArchivePath(Path path, Path projectPath) {
304309
}
305310
return archivePath;
306311
}
312+
313+
private static long calculateUploadWaitTimeoutSeconds(File file) {
314+
long fileSizeBytes = file.length();
315+
long fileSizeMB = fileSizeBytes / (1024 * 1024);
316+
317+
// Allocate 4 seconds per MB (assumes ~250 KB/s upload speed)
318+
// with min 10 seconds and max 30 minutes
319+
return Math.max(10, Math.min(1800, fileSizeMB * 4));
320+
}
307321
}
308322
}

build-tools/src/main/java/org/elasticsearch/gradle/testclusters/MockApmServer.java

Lines changed: 29 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -9,8 +9,11 @@
99

1010
package org.elasticsearch.gradle.testclusters;
1111

12+
import com.fasterxml.jackson.databind.JsonNode;
1213
import com.fasterxml.jackson.databind.ObjectMapper;
1314
import com.fasterxml.jackson.databind.node.ObjectNode;
15+
import com.fasterxml.jackson.databind.util.LRUMap;
16+
import com.fasterxml.jackson.databind.util.LookupCache;
1417
import com.sun.net.httpserver.HttpExchange;
1518
import com.sun.net.httpserver.HttpHandler;
1619
import com.sun.net.httpserver.HttpServer;
@@ -28,7 +31,9 @@
2831
import java.io.InputStreamReader;
2932
import java.io.OutputStream;
3033
import java.net.InetSocketAddress;
34+
import java.util.ArrayList;
3135
import java.util.Arrays;
36+
import java.util.List;
3237
import java.util.regex.Pattern;
3338
import java.util.stream.Collectors;
3439

@@ -48,6 +53,8 @@
4853
public class MockApmServer {
4954
private static final Logger logger = Logging.getLogger(MockApmServer.class);
5055
private static final org.slf4j.Logger log = LoggerFactory.getLogger(MockApmServer.class);
56+
private static final LookupCache<String, String> transactionCache = new LRUMap(16, 16);
57+
5158
private final Pattern metricFilter;
5259
private final Pattern transactionFilter;
5360
private final Pattern transactionExcludesFilter;
@@ -136,22 +143,28 @@ private void logFiltered(InputStream body) throws IOException {
136143
ObjectMapper mapper = new ObjectMapper();
137144
try (BufferedReader reader = new BufferedReader(new InputStreamReader(body))) {
138145
String line;
139-
String tier = null;
140-
String node = null;
146+
String nodeMetadata = null;
147+
148+
List<JsonNode> spans = new ArrayList<>();
141149

142150
while ((line = reader.readLine()) != null) {
143151
var jsonNode = mapper.readTree(line);
144152

145153
if (jsonNode.has("metadata")) {
146-
node = jsonNode.path("metadata").path("service").path("node").path("configured_name").asText(null);
147-
tier = jsonNode.path("metadata").path("labels").path("node_tier").asText(null);
154+
nodeMetadata = jsonNode.path("metadata").path("service").path("node").path("configured_name").asText(null);
155+
var tier = jsonNode.path("metadata").path("labels").path("node_tier").asText(null);
156+
nodeMetadata += tier != null ? "/" + tier : "";
157+
148158
} else if (transactionFilter != null && jsonNode.has("transaction")) {
149159
var transaction = jsonNode.get("transaction");
150160
var name = transaction.get("name").asText();
151161
if (transactionFilter.matcher(name).matches()
152162
&& (transactionExcludesFilter == null || transactionExcludesFilter.matcher(name).matches() == false)) {
153-
logger.lifecycle("Transaction [{}/{}]: {}", node, tier, transaction);
163+
transactionCache.put(transaction.get("id").asText(), name);
164+
logger.lifecycle("Transaction {} [{}]: {}", name, nodeMetadata, transaction);
154165
}
166+
} else if (jsonNode.has("span")) {
167+
spans.add(jsonNode.get("span")); // make sure to record all transactions first
155168
} else if (metricFilter != null && jsonNode.has("metricset")) {
156169
var metricset = jsonNode.get("metricset");
157170
var samples = (ObjectNode) metricset.get("samples");
@@ -161,10 +174,20 @@ private void logFiltered(InputStream body) throws IOException {
161174
}
162175
}
163176
if (samples.isEmpty() == false) {
164-
logger.lifecycle("Metricset [{}/{}]", node, tier, metricset);
177+
logger.lifecycle("Metricset [{}]: {}", nodeMetadata, metricset);
165178
}
166179
}
167180
}
181+
182+
// emit only spans for previously matched transactions using the transaction cache
183+
for (var span : spans) {
184+
var name = span.get("name").asText();
185+
var transactionId = span.get("transaction_id").asText();
186+
var transactionName = transactionCache.get(transactionId);
187+
if (transactionName != null) {
188+
logger.lifecycle("Span {} of {} [{}]: {}", name, transactionName, nodeMetadata, span);
189+
}
190+
}
168191
}
169192
}
170193
}

distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/APMJvmOptions.java

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,10 @@ class APMJvmOptions {
8181
"metrics_interval", "120s",
8282
"breakdown_metrics", "false",
8383
"central_config", "false",
84-
"transaction_sample_rate", "0.2"
84+
"transaction_sample_rate", "0.2",
85+
// Don't collect stacktraces for spans, typically these are of little use as
86+
// always pointing to APMTracer.stopTrace invoked from TaskManager
87+
"stack_trace_limit", "0"
8588
);
8689
// end::noformat
8790

docs/changelog/136951.yaml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
pr: 136951
2+
summary: Support different downsampling methods through ILM
3+
area: "ILM+SLM"
4+
type: enhancement
5+
issues: []

docs/changelog/137222.yaml

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
pr: 137222
2+
summary: "[Sentinel One] Add `manage`, `create_index`, `read`, `index`, `write`, `delete`, permission for third-party agent indices in the `Kibana system` to support the threat event data stream."
3+
area: Authorization
4+
type: enhancement
5+
issues:
6+
- 240901

docs/changelog/137375.yaml

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
pr: 137375
2+
summary: Allow opting out of force-merging on a cloned index in ILM's searchable snapshot
3+
action
4+
area: ILM+SLM
5+
type: enhancement
6+
issues: []

docs/changelog/137394.yaml

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
pr: 137394
2+
summary: Fix dropped ignore above fields
3+
area: Mapping
4+
type: bug
5+
issues:
6+
- 137360

docs/changelog/137399.yaml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
pr: 137399
2+
summary: Allow allocating clones over low watermark
3+
area: Allocation
4+
type: bug
5+
issues: []

docs/reference/elasticsearch/index-lifecycle-actions/ilm-downsample.md

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,9 +19,13 @@ To use the `downsample` action in the `hot` phase, the `rollover` action **must*
1919

2020
`fixed_interval`
2121
: (Required, string) The [fixed time interval](docs-content://manage-data/lifecycle/rollup/understanding-groups.md#rollup-understanding-group-intervals) into which the data will be downsampled.
22+
2223
`force_merge_index` {applies_to}`stack: ga 9.3`
2324
: (Optional, boolean) When true, the downsampled index will be [force merged](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-forcemerge) to one [segment](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-segments). Defaults to `true`.
2425

26+
`sampling_method` {applies_to}`stack: ga 9.3`
27+
: (Optional, string) The sampling method that will be used to sample metrics; there are two methods available `aggregate` and
28+
the `last_value`. Defaults to `aggregate`.
2529

2630
## Example [ilm-downsample-ex]
2731

docs/reference/elasticsearch/index-lifecycle-actions/ilm-searchable-snapshot.md

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,8 +46,11 @@ By default, this snapshot is deleted by the [delete action](/reference/elasticse
4646

4747
This force merging occurs in the phase that the index is in **prior** to the `searchable_snapshot` action. For example, if using a `searchable_snapshot` action in the `hot` phase, the force merge will be performed on the hot nodes. If using a `searchable_snapshot` action in the `cold` phase, the force merge will be performed on whatever tier the index is **prior** to the `cold` phase (either `hot` or `warm`).
4848

49+
`force_merge_on_clone` {applies_to}`stack: ga 9.2.1`
50+
: (Optional, Boolean) By default, if `force_merge_index` is `true`, the index will first be cloned with 0 replicas and the force-merge will be performed on the clone before the searchable snapshot is created. This avoids performing the force-merge redundantly on replica shards, as the snapshot operation only uses primary shards. Setting this option to `false` will skip the clone step and perform the force-merge directly on the managed index. Defaults to `true`.
51+
4952
`total_shards_per_node`
50-
: The maximum number of shards (replicas and primaries) that will be allocated to a single node for the searchable snapshot index. Defaults to unbounded.
53+
: (Optional, Integer) The maximum number of shards (replicas and primaries) that will be allocated to a single node for the searchable snapshot index. Defaults to unbounded.
5154

5255

5356
## Examples [ilm-searchable-snapshot-ex]

0 commit comments

Comments
 (0)