Skip to content

Commit 3178553

Browse files
committed
merge
2 parents cbc8039 + 8bcecb9 commit 3178553

File tree

57 files changed

+3211
-285
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

57 files changed

+3211
-285
lines changed

benchmarks/build.gradle

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,7 @@ dependencies {
4747
api(project(':x-pack:plugin:core'))
4848
api(project(':x-pack:plugin:esql'))
4949
api(project(':x-pack:plugin:esql:compute'))
50+
api(project(':x-pack:plugin:mapper-exponential-histogram'))
5051
implementation project(path: ':libs:native')
5152
implementation project(path: ':libs:simdvec')
5253
implementation project(path: ':libs:exponential-histogram')

benchmarks/src/main/java/org/elasticsearch/benchmark/exponentialhistogram/ExponentialHistogramMergeBench.java

Lines changed: 50 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,11 +9,15 @@
99

1010
package org.elasticsearch.benchmark.exponentialhistogram;
1111

12+
import org.apache.lucene.util.BytesRef;
13+
import org.elasticsearch.common.io.stream.BytesStreamOutput;
1214
import org.elasticsearch.exponentialhistogram.BucketIterator;
1315
import org.elasticsearch.exponentialhistogram.ExponentialHistogram;
1416
import org.elasticsearch.exponentialhistogram.ExponentialHistogramCircuitBreaker;
1517
import org.elasticsearch.exponentialhistogram.ExponentialHistogramGenerator;
1618
import org.elasticsearch.exponentialhistogram.ExponentialHistogramMerger;
19+
import org.elasticsearch.xpack.exponentialhistogram.CompressedExponentialHistogram;
20+
import org.elasticsearch.xpack.exponentialhistogram.IndexWithCount;
1721
import org.openjdk.jmh.annotations.Benchmark;
1822
import org.openjdk.jmh.annotations.BenchmarkMode;
1923
import org.openjdk.jmh.annotations.Fork;
@@ -27,6 +31,8 @@
2731
import org.openjdk.jmh.annotations.Threads;
2832
import org.openjdk.jmh.annotations.Warmup;
2933

34+
import java.io.IOException;
35+
import java.util.ArrayList;
3036
import java.util.List;
3137
import java.util.Random;
3238
import java.util.concurrent.ThreadLocalRandom;
@@ -47,6 +53,9 @@ public class ExponentialHistogramMergeBench {
4753
@Param({ "0.01", "0.1", "0.25", "0.5", "1.0", "2.0" })
4854
double mergedHistoSizeFactor;
4955

56+
@Param({ "array-backed", "compressed" })
57+
String histoImplementation;
58+
5059
Random random;
5160
ExponentialHistogramMerger histoMerger;
5261

@@ -81,16 +90,54 @@ public void setUp() {
8190
bucketIndex += 1 + random.nextInt(bucketCount) % (Math.max(1, bucketCount / dataPointSize));
8291
generator.add(Math.pow(1.001, bucketIndex));
8392
}
84-
toMerge[i] = generator.getAndClear();
85-
cnt = getBucketCount(toMerge[i]);
93+
ExponentialHistogram histogram = generator.getAndClear();
94+
cnt = getBucketCount(histogram);
8695
if (cnt < dataPointSize) {
87-
throw new IllegalArgumentException("Expected bucket count to be " + dataPointSize + ", but was " + cnt);
96+
throw new IllegalStateException("Expected bucket count to be " + dataPointSize + ", but was " + cnt);
97+
}
98+
99+
if ("array-backed".equals(histoImplementation)) {
100+
toMerge[i] = histogram;
101+
} else if ("compressed".equals(histoImplementation)) {
102+
toMerge[i] = asCompressedHistogram(histogram);
103+
} else {
104+
throw new IllegalArgumentException("Unknown implementation: " + histoImplementation);
88105
}
89106
}
90107

91108
index = 0;
92109
}
93110

111+
private ExponentialHistogram asCompressedHistogram(ExponentialHistogram histogram) {
112+
List<IndexWithCount> negativeBuckets = new ArrayList<>();
113+
List<IndexWithCount> positiveBuckets = new ArrayList<>();
114+
115+
BucketIterator it = histogram.negativeBuckets().iterator();
116+
while (it.hasNext()) {
117+
negativeBuckets.add(new IndexWithCount(it.peekIndex(), it.peekCount()));
118+
it.advance();
119+
}
120+
it = histogram.positiveBuckets().iterator();
121+
while (it.hasNext()) {
122+
positiveBuckets.add(new IndexWithCount(it.peekIndex(), it.peekCount()));
123+
it.advance();
124+
}
125+
126+
long totalCount = histogram.zeroBucket().count() + histogram.negativeBuckets().valueCount() + histogram.positiveBuckets()
127+
.valueCount();
128+
BytesStreamOutput histoBytes = new BytesStreamOutput();
129+
try {
130+
CompressedExponentialHistogram.writeHistogramBytes(histoBytes, histogram.scale(), negativeBuckets, positiveBuckets);
131+
CompressedExponentialHistogram result = new CompressedExponentialHistogram();
132+
BytesRef data = histoBytes.bytes().toBytesRef();
133+
result.reset(histogram.zeroBucket().zeroThreshold(), totalCount, data);
134+
return result;
135+
} catch (IOException e) {
136+
throw new RuntimeException(e);
137+
}
138+
139+
}
140+
94141
private static int getBucketCount(ExponentialHistogram histo) {
95142
int cnt = 0;
96143
for (BucketIterator it : List.of(histo.negativeBuckets().iterator(), histo.positiveBuckets().iterator())) {

benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/TSDBDocValuesMergeBenchmark.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -258,7 +258,7 @@ private static IndexWriterConfig createIndexWriterConfig(boolean optimizedMergeE
258258
);
259259
config.setLeafSorter(DataStream.TIMESERIES_LEAF_READERS_SORTER);
260260
config.setMergePolicy(new LogByteSizeMergePolicy());
261-
var docValuesFormat = new ES819TSDBDocValuesFormat(4096, optimizedMergeEnabled);
261+
var docValuesFormat = new ES819TSDBDocValuesFormat(4096, 512, optimizedMergeEnabled);
262262
config.setCodec(new Elasticsearch900Lucene101Codec() {
263263

264264
@Override

build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalTestClustersPlugin.java

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -34,12 +34,13 @@ public void apply(Project project) {
3434
|| buildParams.getBwcVersions().unreleasedInfo(version) == null
3535
);
3636

37-
if (shouldConfigureTestClustersWithOneProcessor()) {
38-
NamedDomainObjectContainer<ElasticsearchCluster> testClusters = (NamedDomainObjectContainer<ElasticsearchCluster>) project
39-
.getExtensions()
40-
.getByName(TestClustersPlugin.EXTENSION_NAME);
41-
testClusters.configureEach(elasticsearchCluster -> elasticsearchCluster.setting("node.processors", "1"));
42-
}
37+
NamedDomainObjectContainer<ElasticsearchCluster> testClusters = (NamedDomainObjectContainer<ElasticsearchCluster>) project
38+
.getExtensions()
39+
.getByName(TestClustersPlugin.EXTENSION_NAME);
40+
// Limit the number of allocated processors for all nodes to 2 in the cluster by default.
41+
// This is to ensure that the tests run consistently across different environments.
42+
String processorCount = shouldConfigureTestClustersWithOneProcessor() ? "1" : "2";
43+
testClusters.configureEach(elasticsearchCluster -> elasticsearchCluster.setting("node.processors", processorCount));
4344
}
4445

4546
private boolean shouldConfigureTestClustersWithOneProcessor() {

build-tools/src/main/java/org/elasticsearch/gradle/LazyFileOutputStream.java

Lines changed: 36 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,7 @@
33
* or more contributor license agreements. Licensed under the "Elastic License
44
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
55
* Public License v 1"; you may not use this file except in compliance with, at
6-
* your election, the "Elastic License 2.0", the "GNU Affero General Public
7-
* License v3.0 only", or the "Server Side Public License, v 1".
6+
* your election, the "Server Side Public License v3.0 only", or the "Server Side Public License, v 1".
87
*/
98

109
package org.elasticsearch.gradle;
@@ -18,48 +17,58 @@
1817
* An outputstream to a File that is lazily opened on the first write.
1918
*/
2019
class LazyFileOutputStream extends OutputStream {
21-
private OutputStream delegate;
20+
private final File file;
21+
private volatile OutputStream delegate;
22+
private volatile boolean initialized = false;
23+
private final Object lock = new Object();
2224

2325
LazyFileOutputStream(File file) {
24-
// use an initial dummy delegate to avoid doing a conditional on every write
25-
this.delegate = new OutputStream() {
26-
private void bootstrap() throws IOException {
27-
file.getParentFile().mkdirs();
28-
delegate = new FileOutputStream(file);
29-
}
30-
31-
@Override
32-
public void write(int b) throws IOException {
33-
bootstrap();
34-
delegate.write(b);
35-
}
36-
37-
@Override
38-
public void write(byte b[], int off, int len) throws IOException {
39-
bootstrap();
40-
delegate.write(b, off, len);
41-
}
26+
this.file = file;
27+
}
4228

43-
@Override
44-
public void write(byte b[]) throws IOException {
45-
bootstrap();
46-
delegate.write(b);
29+
private void ensureInitialized() throws IOException {
30+
if (initialized == false) {
31+
synchronized (lock) {
32+
if (initialized == false) {
33+
file.getParentFile().mkdirs();
34+
delegate = new FileOutputStream(file);
35+
initialized = true;
36+
}
4737
}
48-
};
38+
}
4939
}
5040

5141
@Override
5242
public void write(int b) throws IOException {
43+
ensureInitialized();
5344
delegate.write(b);
5445
}
5546

5647
@Override
5748
public void write(byte b[], int off, int len) throws IOException {
49+
ensureInitialized();
5850
delegate.write(b, off, len);
5951
}
6052

53+
@Override
54+
public void write(byte b[]) throws IOException {
55+
ensureInitialized();
56+
delegate.write(b);
57+
}
58+
6159
@Override
6260
public void close() throws IOException {
63-
delegate.close();
61+
synchronized (lock) {
62+
if (initialized && delegate != null) {
63+
delegate.close();
64+
}
65+
}
66+
}
67+
68+
@Override
69+
public void flush() throws IOException {
70+
if (initialized && delegate != null) {
71+
delegate.flush();
72+
}
6473
}
6574
}

docs/changelog/133018.yaml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
pr: 133018
2+
summary: Add ordinal range encode for tsid
3+
area: TSDB
4+
type: enhancement
5+
issues: []

docs/changelog/133113.yaml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
pr: 133113
2+
summary: Limit the depth of a filter
3+
area: Infra/REST API
4+
type: enhancement
5+
issues: []

docs/changelog/133188.yaml

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
pr: 133188
2+
summary: Don't fail search if bottom doc can't be formatted
3+
area: Search
4+
type: bug
5+
issues:
6+
- 125321

docs/changelog/133193.yaml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
pr: 133193
2+
summary: Fix offset handling in Murmur3Hasher
3+
area: Infra/Core
4+
type: bug
5+
issues: []

docs/release-notes/breaking-changes.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -111,7 +111,7 @@ Existing `repository-s3` configurations may no longer be compatible. Notable dif
111111
- AWS SDK v2 requires the use of the V4 signature algorithm, therefore, the `s3.client.${CLIENT_NAME}.signer_override` setting is deprecated and no longer has any effect.
112112
- AWS SDK v2 does not support the `log-delivery-write` canned ACL.
113113
- AWS SDK v2 counts 4xx responses differently in its metrics reporting.
114-
- AWS SDK v2 always uses the regional STS endpoint, whereas AWS SDK v2 could use either a regional endpoint or the global `https://sts.amazonaws.com` one.
114+
- AWS SDK v2 always uses the regional STS endpoint, whereas AWS SDK v1 could use either a regional endpoint or the global `https://sts.amazonaws.com` one.
115115

116116
**Action:**
117117
Test the upgrade in a non-production environment. Adapt your configuration to the new SDK functionality. This includes, but may not be limited to, the following items:

0 commit comments

Comments
 (0)