Skip to content

Commit bf9cf1d

Browse files
committed
Merge branch 'main' into transport/avoid_validating_newer_branches
2 parents af85081 + 26c629f commit bf9cf1d

File tree

113 files changed

+1800
-800
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

113 files changed

+1800
-800
lines changed

build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/FormattingPrecommitPlugin.java

Lines changed: 18 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,23 @@ public void apply(Project project) {
5252
// Spotless resolves required dependencies from project repositories, so we need maven central
5353
project.getRepositories().mavenCentral();
5454

55+
// we cannot update to a newer spotless plugin version yet but we want to use the latest eclipse formatter to be compatible
56+
// with latest java versions
57+
project.getConfigurations().matching(it -> it.getName().startsWith("spotless")).configureEach(conf -> {
58+
project.getDependencies().constraints(constraints -> {
59+
constraints.add(conf.getName(), "org.eclipse.jdt:org.eclipse.jdt.core:3.42.0", dependencyConstraint -> {
60+
dependencyConstraint.because(
61+
"We want to use a recent version of the Eclipse formatter libraries to support latest Java"
62+
);
63+
});
64+
constraints.add(conf.getName(), "org.eclipse.jdt:ecj:3.42.0", dependencyConstraint -> {
65+
dependencyConstraint.because(
66+
"We want to use a recent version of the Eclipse formatter libraries to support latest Java"
67+
);
68+
});
69+
});
70+
});
71+
5572
project.getExtensions().getByType(SpotlessExtension.class).java(java -> {
5673
File elasticsearchWorkspace = Util.locateElasticsearchWorkspace(project.getGradle());
5774
String importOrderPath = "build-conventions/elastic.importorder";
@@ -74,7 +91,7 @@ public void apply(Project project) {
7491
// When running build benchmarks we alter the source in some scenarios.
7592
// The gradle-profiler unfortunately does not generate compliant formatted
7693
// sources so we ignore that altered file when running build benchmarks
77-
if(Boolean.getBoolean("BUILD_PERFORMANCE_TEST") && project.getPath().equals(":server")) {
94+
if (Boolean.getBoolean("BUILD_PERFORMANCE_TEST") && project.getPath().equals(":server")) {
7895
java.targetExclude("src/main/java/org/elasticsearch/bootstrap/BootstrapInfo.java");
7996
}
8097
});

build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPlugin.java

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -113,6 +113,9 @@ private void registerInternalDistributionResolutions(List<DistributionResolution
113113
String versionProperty = System.getProperty("tests.bwc.main.version");
114114
// We use this phony version as a placeholder for the real version
115115
if (distribution.getVersion().equals("0.0.0")) {
116+
if (versionProperty == null) {
117+
throw new GradleException("System property 'tests.bwc.main.version' expected for building bwc version.");
118+
}
116119
BwcVersions.UnreleasedVersionInfo unreleasedVersionInfo = new BwcVersions.UnreleasedVersionInfo(
117120
Version.fromString(versionProperty),
118121
"main",

build-tools/src/main/java/org/elasticsearch/gradle/util/OsUtils.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ private OsUtils() {}
3838
* This method returns true if the given version of the JDK is known to be incompatible
3939
*/
4040
public static boolean jdkIsIncompatibleWithOS(Version version) {
41-
return version.onOrBefore("8.10.4") && isUbuntu2404OrLater();
41+
return version.after("0.0.0") && version.onOrBefore("8.10.4") && isUbuntu2404OrLater();
4242
}
4343

4444
private static boolean isUbuntu2404OrLater() {

build.gradle

Lines changed: 5 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -338,18 +338,13 @@ allprojects {
338338
tasks.register('resolveAllDependencies', ResolveAllDependencies) {
339339
def ignoredPrefixes = [DistributionDownloadPlugin.ES_DISTRO_CONFIG_PREFIX, "jdbcDriver"]
340340
configs = project.configurations.matching { config -> ignoredPrefixes.any { config.name.startsWith(it) } == false }
341+
341342
if (project.path == ':') {
342343
resolveJavaToolChain = true
343-
344-
// ensure we have best possible caching of bwc builds
345-
dependsOn ":distribution:bwc:major1:buildBwcLinuxTar"
346-
dependsOn ":distribution:bwc:major2:buildBwcLinuxTar"
347-
dependsOn ":distribution:bwc:major3:buildBwcLinuxTar"
348-
dependsOn ":distribution:bwc:major4:buildBwcLinuxTar"
349-
dependsOn ":distribution:bwc:minor1:buildBwcLinuxTar"
350-
dependsOn ":distribution:bwc:minor2:buildBwcLinuxTar"
351-
dependsOn ":distribution:bwc:minor3:buildBwcLinuxTar"
352-
dependsOn ":distribution:bwc:minor4:buildBwcLinuxTar"
344+
}
345+
// ensure we have best possible caching of bwc builds
346+
if(project.path.startsWith(":distribution:bwc:")) {
347+
dependsOn project.tasks.matching { it.name == 'buildBwcLinuxTar' }
353348
}
354349
if (project.path.contains("fixture")) {
355350
dependsOn tasks.withType(ComposePull)

docs/changelog/134673.yaml

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
pr: 134673
2+
summary: Gracefully shutdown model deployment when node is removed from assignment
3+
routing
4+
area: Machine Learning
5+
type: bug
6+
issues: []

docs/changelog/135244.yaml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
pr: 135244
2+
summary: Add reload listener to `SslProfile`
3+
area: TLS
4+
type: enhancement
5+
issues: []

docs/changelog/135342.yaml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
pr: 135342
2+
summary: Add 'profile' support for knn query on HNSW with early termination
3+
area: Vector Search
4+
type: enhancement
5+
issues: []

docs/changelog/135401.yaml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
pr: 135401
2+
summary: "Adds an OTLP metrics endpoint (`_otlp/v1/metrics`) as tech preview"
3+
area: TSDB
4+
type: enhancement
5+
issues: []

docs/reference/aggregations/search-aggregations-change-point-aggregation.md

Lines changed: 3 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -2,29 +2,24 @@
22
navigation_title: "Change point"
33
mapped_pages:
44
- https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-change-point-aggregation.html
5+
applies_to:
6+
stack: preview 9.0, ga 9.2
7+
serverless: ga
58
---
69

710
# Change point aggregation [search-aggregations-change-point-aggregation]
811

9-
10-
::::{warning}
11-
This functionality is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features.
12-
::::
13-
14-
1512
A sibling pipeline that detects, spikes, dips, and change points in a metric. Given a distribution of values provided by the sibling multi-bucket aggregation, this aggregation indicates the bucket of any spike or dip and/or the bucket at which the largest change in the distribution of values, if they are statistically significant.
1613

1714
::::{tip}
1815
It is recommended to use the change point aggregation to detect changes in time-based data, however, you can use any metric to create buckets.
1916
::::
2017

21-
2218
## Parameters [change-point-agg-syntax]
2319

2420
`buckets_path`
2521
: (Required, string) Path to the buckets that contain one set of values in which to detect a change point. There must be at least 22 bucketed values. Fewer than 1,000 is preferred. For syntax, see [`buckets_path` Syntax](/reference/aggregations/pipeline.md#buckets-path-syntax).
2622

27-
2823
## Syntax [_syntax_11]
2924

3025
A `change_point` aggregation looks like this in isolation:
@@ -39,8 +34,6 @@ A `change_point` aggregation looks like this in isolation:
3934

4035
1. The buckets containing the values to test against.
4136

42-
43-
4437
## Response body [change-point-agg-response]
4538

4639
`bucket`
@@ -54,7 +47,6 @@ A `change_point` aggregation looks like this in isolation:
5447
`doc_count`
5548
: (number) The document count of the bucket.
5649

57-
5850
`type`
5951
: (object) The found change point type and its related values. Possible types:
6052

@@ -67,7 +59,6 @@ A `change_point` aggregation looks like this in isolation:
6759
* `trend_change`: there is an overall trend change occurring at this point
6860

6961

70-
7162
## Example [_example_7]
7263

7364
The following example uses the Kibana sample data logs data set.
@@ -103,7 +94,6 @@ GET kibana_sample_data_logs/_search
10394
3. The change point detection aggregation configuration object.
10495
4. The path of the aggregation values to detect change points. In this case, the input of the change point aggregation is the value of `avg` which is a sibling aggregation of `date`.
10596

106-
10797
The request returns a response that is similar to the following:
10898

10999
```js
@@ -130,6 +120,3 @@ The request returns a response that is similar to the following:
130120
4. Type of change found.
131121
5. The `p_value` indicates how extreme the change is; lower values indicate greater change.
132122
6. The specific bucket where the change occurs (indexing starts at `0`).
133-
134-
135-

docs/reference/enrich-processor/inference-processor.md

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -127,8 +127,8 @@ Classification configuration for inference.
127127
* `deberta_v2`: Use for DeBERTa v2 and v3-style models
128128
* `mpnet`: Use for MPNet-style models
129129
* `roberta`: Use for RoBERTa-style and BART-style models
130-
* [preview] `xlm_roberta`: Use for XLMRoBERTa-style models
131-
* [preview] `bert_ja`: Use for BERT-style models trained for the Japanese language.
130+
* {applies_to}`stack: preview` {applies_to}`serverless: preview` `xlm_roberta`: Use for XLMRoBERTa-style models
131+
* {applies_to}`stack: preview` {applies_to}`serverless: preview` `bert_ja`: Use for BERT-style models trained for the Japanese language.
132132

133133
::::{dropdown} Properties of tokenization
134134
`bert`
@@ -191,8 +191,8 @@ Classification configuration for inference.
191191
* `deberta_v2`: Use for DeBERTa v2 and v3-style models
192192
* `mpnet`: Use for MPNet-style models
193193
* `roberta`: Use for RoBERTa-style and BART-style models
194-
* [preview] `xlm_roberta`: Use for XLMRoBERTa-style models
195-
* [preview] `bert_ja`: Use for BERT-style models trained for the Japanese language.
194+
* {applies_to}`stack: preview` {applies_to}`serverless: preview` `xlm_roberta`: Use for XLMRoBERTa-style models
195+
* {applies_to}`stack: preview` {applies_to}`serverless: preview` `bert_ja`: Use for BERT-style models trained for the Japanese language.
196196

197197
::::{dropdown} Properties of tokenization
198198
`bert`
@@ -271,8 +271,8 @@ Regression configuration for inference.
271271
* `deberta_v2`: Use for DeBERTa v2 and v3-style models
272272
* `mpnet`: Use for MPNet-style models
273273
* `roberta`: Use for RoBERTa-style and BART-style models
274-
* [preview] `xlm_roberta`: Use for XLMRoBERTa-style models
275-
* [preview] `bert_ja`: Use for BERT-style models trained for the Japanese language.
274+
* {applies_to}`stack: preview` {applies_to}`serverless: preview` `xlm_roberta`: Use for XLMRoBERTa-style models
275+
* {applies_to}`stack: preview` {applies_to}`serverless: preview` `bert_ja`: Use for BERT-style models trained for the Japanese language.
276276

277277
::::{dropdown} Properties of tokenization
278278
`bert`
@@ -353,8 +353,8 @@ Regression configuration for inference.
353353
* `deberta_v2`: Use for DeBERTa v2 and v3-style models
354354
* `mpnet`: Use for MPNet-style models
355355
* `roberta`: Use for RoBERTa-style and BART-style models
356-
* [preview] `xlm_roberta`: Use for XLMRoBERTa-style models
357-
* [preview] `bert_ja`: Use for BERT-style models trained for the Japanese language.
356+
* {applies_to}`stack: preview` {applies_to}`serverless: preview` `xlm_roberta`: Use for XLMRoBERTa-style models
357+
* {applies_to}`stack: preview` {applies_to}`serverless: preview` `bert_ja`: Use for BERT-style models trained for the Japanese language.
358358

359359
::::{dropdown} Properties of tokenization
360360
`bert`
@@ -417,8 +417,8 @@ Regression configuration for inference.
417417
* `deberta_v2`: Use for DeBERTa v2 and v3-style models
418418
* `mpnet`: Use for MPNet-style models
419419
* `roberta`: Use for RoBERTa-style and BART-style models
420-
* [preview] `xlm_roberta`: Use for XLMRoBERTa-style models
421-
* [preview] `bert_ja`: Use for BERT-style models trained for the Japanese language.
420+
* {applies_to}`stack: preview` {applies_to}`serverless: preview` `xlm_roberta`: Use for XLMRoBERTa-style models
421+
* {applies_to}`stack: preview` {applies_to}`serverless: preview` `bert_ja`: Use for BERT-style models trained for the Japanese language.
422422

423423
::::{dropdown} Properties of tokenization
424424
`bert`
@@ -504,8 +504,8 @@ Regression configuration for inference.
504504
* `deberta_v2`: Use for DeBERTa v2 and v3-style models
505505
* `mpnet`: Use for MPNet-style models
506506
* `roberta`: Use for RoBERTa-style and BART-style models
507-
* [preview] `xlm_roberta`: Use for XLMRoBERTa-style models
508-
* [preview] `bert_ja`: Use for BERT-style models trained for the Japanese language.
507+
* {applies_to}`stack: preview` {applies_to}`serverless: preview` `xlm_roberta`: Use for XLMRoBERTa-style models
508+
* {applies_to}`stack: preview` {applies_to}`serverless: preview` `bert_ja`: Use for BERT-style models trained for the Japanese language.
509509
Refer to [Properties of `tokenizaton`](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-ml-put-trained-model) to review the properties of the `tokenization` object.
510510
::::
511511

@@ -527,8 +527,8 @@ Refer to [Properties of `tokenizaton`](https://www.elastic.co/docs/api/doc/elast
527527
* `deberta_v2`: Use for DeBERTa v2 and v3-style models
528528
* `mpnet`: Use for MPNet-style models
529529
* `roberta`: Use for RoBERTa-style and BART-style models
530-
* [preview] `xlm_roberta`: Use for XLMRoBERTa-style models
531-
* [preview] `bert_ja`: Use for BERT-style models trained for the Japanese language.
530+
* {applies_to}`stack: preview` {applies_to}`serverless: preview` `xlm_roberta`: Use for XLMRoBERTa-style models
531+
* {applies_to}`stack: preview` {applies_to}`serverless: preview` `bert_ja`: Use for BERT-style models trained for the Japanese language.
532532

533533
::::{dropdown} Properties of tokenization
534534
`bert`

0 commit comments

Comments
 (0)