Skip to content

Commit 6a57a30

Browse files
authored
Merge branch 'main' into esql-inference-rerank-test-service-score-update
2 parents f0fa8d0 + d773b74 commit 6a57a30

File tree

66 files changed

+3062
-409
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

66 files changed

+3062
-409
lines changed

benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/DecodeConstantIntegerBenchmark.java

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,6 @@
4141
@State(Scope.Benchmark)
4242
public class DecodeConstantIntegerBenchmark {
4343
private static final int SEED = 17;
44-
private static final int BLOCK_SIZE = 128;
4544

4645
@Param({ "1", "4", "8", "9", "16", "17", "24", "25", "32", "33", "40", "48", "56", "57", "64" })
4746
private int bitsPerValue;
@@ -59,12 +58,12 @@ public void setupInvocation() throws IOException {
5958

6059
@Setup(Level.Trial)
6160
public void setupTrial() throws IOException {
62-
decode.setupTrial(new ConstantIntegerSupplier(SEED, bitsPerValue, BLOCK_SIZE));
61+
decode.setupTrial(new ConstantIntegerSupplier(SEED, bitsPerValue, decode.getBlockSize()));
6362
}
6463

6564
@Benchmark
6665
public void throughput(Blackhole bh, ThroughputMetrics metrics) throws IOException {
6766
decode.benchmark(bh);
68-
metrics.recordOperation(BLOCK_SIZE, decode.getEncodedSize());
67+
metrics.recordOperation(decode.getBlockSize(), decode.getEncodedSize());
6968
}
7069
}
Lines changed: 73 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,73 @@
1+
/*
2+
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
3+
* or more contributor license agreements. Licensed under the "Elastic License
4+
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
5+
* Public License v 1"; you may not use this file except in compliance with, at
6+
* your election, the "Elastic License 2.0", the "GNU Affero General Public
7+
* License v3.0 only", or the "Server Side Public License, v 1".
8+
*/
9+
10+
package org.elasticsearch.benchmark.index.codec.tsdb;
11+
12+
import org.elasticsearch.benchmark.index.codec.tsdb.internal.AbstractTSDBCodecBenchmark;
13+
import org.elasticsearch.benchmark.index.codec.tsdb.internal.CounterWithResetsSupplier;
14+
import org.elasticsearch.benchmark.index.codec.tsdb.internal.DecodeBenchmark;
15+
import org.elasticsearch.benchmark.index.codec.tsdb.internal.ThroughputMetrics;
16+
import org.openjdk.jmh.annotations.Benchmark;
17+
import org.openjdk.jmh.annotations.BenchmarkMode;
18+
import org.openjdk.jmh.annotations.Fork;
19+
import org.openjdk.jmh.annotations.Level;
20+
import org.openjdk.jmh.annotations.Measurement;
21+
import org.openjdk.jmh.annotations.Mode;
22+
import org.openjdk.jmh.annotations.OutputTimeUnit;
23+
import org.openjdk.jmh.annotations.Param;
24+
import org.openjdk.jmh.annotations.Scope;
25+
import org.openjdk.jmh.annotations.Setup;
26+
import org.openjdk.jmh.annotations.State;
27+
import org.openjdk.jmh.annotations.Warmup;
28+
import org.openjdk.jmh.infra.Blackhole;
29+
30+
import java.io.IOException;
31+
import java.util.concurrent.TimeUnit;
32+
33+
/**
34+
* Benchmark for decoding counter-with-resets data patterns.
35+
*
36+
* <p>Parameterized by resetProbability to test how reset frequency affects
37+
* decoding performance. Lower probability means longer monotonic runs between
38+
* resets, while higher probability creates more frequent jumps back to zero.
39+
*/
40+
@Fork(value = 1)
41+
@Warmup(iterations = 3)
42+
@Measurement(iterations = 5)
43+
@BenchmarkMode(Mode.Throughput)
44+
@OutputTimeUnit(TimeUnit.SECONDS)
45+
@State(Scope.Benchmark)
46+
public class DecodeCounterWithResetsBenchmark {
47+
private static final int SEED = 17;
48+
49+
@Param({ "0.01", "0.02", "0.05" })
50+
private double resetProbability;
51+
52+
private final AbstractTSDBCodecBenchmark decode;
53+
54+
public DecodeCounterWithResetsBenchmark() {
55+
this.decode = new DecodeBenchmark();
56+
}
57+
58+
@Setup(Level.Invocation)
59+
public void setupInvocation() throws IOException {
60+
decode.setupInvocation();
61+
}
62+
63+
@Setup(Level.Trial)
64+
public void setupTrial() throws IOException {
65+
decode.setupTrial(CounterWithResetsSupplier.builder(SEED, decode.getBlockSize()).withResetProbability(resetProbability).build());
66+
}
67+
68+
@Benchmark
69+
public void throughput(Blackhole bh, ThroughputMetrics metrics) throws IOException {
70+
decode.benchmark(bh);
71+
metrics.recordOperation(decode.getBlockSize(), decode.getEncodedSize());
72+
}
73+
}

benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/DecodeDecreasingIntegerBenchmark.java

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,6 @@
4141
@State(Scope.Benchmark)
4242
public class DecodeDecreasingIntegerBenchmark {
4343
private static final int SEED = 17;
44-
private static final int BLOCK_SIZE = 128;
4544

4645
@Param({ "1", "4", "8", "9", "16", "17", "24", "25", "32", "33", "40", "48", "56", "57", "64" })
4746
private int bitsPerValue;
@@ -59,12 +58,12 @@ public void setupInvocation() throws IOException {
5958

6059
@Setup(Level.Trial)
6160
public void setupTrial() throws IOException {
62-
decode.setupTrial(new DecreasingIntegerSupplier(SEED, bitsPerValue, BLOCK_SIZE));
61+
decode.setupTrial(new DecreasingIntegerSupplier(SEED, bitsPerValue, decode.getBlockSize()));
6362
}
6463

6564
@Benchmark
6665
public void throughput(Blackhole bh, ThroughputMetrics metrics) throws IOException {
6766
decode.benchmark(bh);
68-
metrics.recordOperation(BLOCK_SIZE, decode.getEncodedSize());
67+
metrics.recordOperation(decode.getBlockSize(), decode.getEncodedSize());
6968
}
7069
}
Lines changed: 73 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,73 @@
1+
/*
2+
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
3+
* or more contributor license agreements. Licensed under the "Elastic License
4+
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
5+
* Public License v 1"; you may not use this file except in compliance with, at
6+
* your election, the "Elastic License 2.0", the "GNU Affero General Public
7+
* License v3.0 only", or the "Server Side Public License, v 1".
8+
*/
9+
10+
package org.elasticsearch.benchmark.index.codec.tsdb;
11+
12+
import org.elasticsearch.benchmark.index.codec.tsdb.internal.AbstractTSDBCodecBenchmark;
13+
import org.elasticsearch.benchmark.index.codec.tsdb.internal.DecodeBenchmark;
14+
import org.elasticsearch.benchmark.index.codec.tsdb.internal.GaugeLikeSupplier;
15+
import org.elasticsearch.benchmark.index.codec.tsdb.internal.ThroughputMetrics;
16+
import org.openjdk.jmh.annotations.Benchmark;
17+
import org.openjdk.jmh.annotations.BenchmarkMode;
18+
import org.openjdk.jmh.annotations.Fork;
19+
import org.openjdk.jmh.annotations.Level;
20+
import org.openjdk.jmh.annotations.Measurement;
21+
import org.openjdk.jmh.annotations.Mode;
22+
import org.openjdk.jmh.annotations.OutputTimeUnit;
23+
import org.openjdk.jmh.annotations.Param;
24+
import org.openjdk.jmh.annotations.Scope;
25+
import org.openjdk.jmh.annotations.Setup;
26+
import org.openjdk.jmh.annotations.State;
27+
import org.openjdk.jmh.annotations.Warmup;
28+
import org.openjdk.jmh.infra.Blackhole;
29+
30+
import java.io.IOException;
31+
import java.util.concurrent.TimeUnit;
32+
33+
/**
34+
* Benchmark for decoding gauge-like data patterns.
35+
*
36+
* <p>Parameterized by varianceRatio to test how different fluctuation intensities
37+
* affect decoding performance. Lower variance means values stay closer to the baseline,
38+
* while higher variance creates more volatile oscillations.
39+
*/
40+
@Fork(value = 1)
41+
@Warmup(iterations = 3)
42+
@Measurement(iterations = 5)
43+
@BenchmarkMode(Mode.Throughput)
44+
@OutputTimeUnit(TimeUnit.SECONDS)
45+
@State(Scope.Benchmark)
46+
public class DecodeGaugeLikeBenchmark {
47+
private static final int SEED = 17;
48+
49+
@Param({ "0.05", "0.1", "0.2" })
50+
private double varianceRatio;
51+
52+
private final AbstractTSDBCodecBenchmark decode;
53+
54+
public DecodeGaugeLikeBenchmark() {
55+
this.decode = new DecodeBenchmark();
56+
}
57+
58+
@Setup(Level.Invocation)
59+
public void setupInvocation() throws IOException {
60+
decode.setupInvocation();
61+
}
62+
63+
@Setup(Level.Trial)
64+
public void setupTrial() throws IOException {
65+
decode.setupTrial(GaugeLikeSupplier.builder(SEED, decode.getBlockSize()).withVarianceRatio(varianceRatio).build());
66+
}
67+
68+
@Benchmark
69+
public void throughput(Blackhole bh, ThroughputMetrics metrics) throws IOException {
70+
decode.benchmark(bh);
71+
metrics.recordOperation(decode.getBlockSize(), decode.getEncodedSize());
72+
}
73+
}
Lines changed: 73 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,73 @@
1+
/*
2+
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
3+
* or more contributor license agreements. Licensed under the "Elastic License
4+
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
5+
* Public License v 1"; you may not use this file except in compliance with, at
6+
* your election, the "Elastic License 2.0", the "GNU Affero General Public
7+
* License v3.0 only", or the "Server Side Public License, v 1".
8+
*/
9+
10+
package org.elasticsearch.benchmark.index.codec.tsdb;
11+
12+
import org.elasticsearch.benchmark.index.codec.tsdb.internal.AbstractTSDBCodecBenchmark;
13+
import org.elasticsearch.benchmark.index.codec.tsdb.internal.DecodeBenchmark;
14+
import org.elasticsearch.benchmark.index.codec.tsdb.internal.GcdFriendlySupplier;
15+
import org.elasticsearch.benchmark.index.codec.tsdb.internal.ThroughputMetrics;
16+
import org.openjdk.jmh.annotations.Benchmark;
17+
import org.openjdk.jmh.annotations.BenchmarkMode;
18+
import org.openjdk.jmh.annotations.Fork;
19+
import org.openjdk.jmh.annotations.Level;
20+
import org.openjdk.jmh.annotations.Measurement;
21+
import org.openjdk.jmh.annotations.Mode;
22+
import org.openjdk.jmh.annotations.OutputTimeUnit;
23+
import org.openjdk.jmh.annotations.Param;
24+
import org.openjdk.jmh.annotations.Scope;
25+
import org.openjdk.jmh.annotations.Setup;
26+
import org.openjdk.jmh.annotations.State;
27+
import org.openjdk.jmh.annotations.Warmup;
28+
import org.openjdk.jmh.infra.Blackhole;
29+
30+
import java.io.IOException;
31+
import java.util.concurrent.TimeUnit;
32+
33+
/**
34+
* Benchmark for decoding GCD-friendly data patterns.
35+
*
36+
* <p>Parameterized by GCD value to test how the GCD compression stage handles
37+
* different divisors: 1 (no GCD benefit), small primes (7, 127), powers of 2
38+
* (64, 1024), and common values (100, 1000).
39+
*/
40+
@Fork(value = 1)
41+
@Warmup(iterations = 3)
42+
@Measurement(iterations = 5)
43+
@BenchmarkMode(Mode.Throughput)
44+
@OutputTimeUnit(TimeUnit.SECONDS)
45+
@State(Scope.Benchmark)
46+
public class DecodeGcdFriendlyBenchmark {
47+
private static final int SEED = 17;
48+
49+
@Param({ "1", "7", "64", "100", "127", "1000", "1024" })
50+
private long gcd;
51+
52+
private final AbstractTSDBCodecBenchmark decode;
53+
54+
public DecodeGcdFriendlyBenchmark() {
55+
this.decode = new DecodeBenchmark();
56+
}
57+
58+
@Setup(Level.Invocation)
59+
public void setupInvocation() throws IOException {
60+
decode.setupInvocation();
61+
}
62+
63+
@Setup(Level.Trial)
64+
public void setupTrial() throws IOException {
65+
decode.setupTrial(GcdFriendlySupplier.builder(SEED, decode.getBlockSize()).withGcd(gcd).build());
66+
}
67+
68+
@Benchmark
69+
public void throughput(Blackhole bh, ThroughputMetrics metrics) throws IOException {
70+
decode.benchmark(bh);
71+
metrics.recordOperation(decode.getBlockSize(), decode.getEncodedSize());
72+
}
73+
}

benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/DecodeIncreasingIntegerBenchmark.java

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,6 @@
4141
@State(Scope.Benchmark)
4242
public class DecodeIncreasingIntegerBenchmark {
4343
private static final int SEED = 17;
44-
private static final int BLOCK_SIZE = 128;
4544

4645
@Param({ "1", "4", "8", "9", "16", "17", "24", "25", "32", "33", "40", "48", "56", "57", "64" })
4746
private int bitsPerValue;
@@ -59,12 +58,12 @@ public void setupInvocation() throws IOException {
5958

6059
@Setup(Level.Trial)
6160
public void setupTrial() throws IOException {
62-
decode.setupTrial(new IncreasingIntegerSupplier(SEED, bitsPerValue, BLOCK_SIZE));
61+
decode.setupTrial(new IncreasingIntegerSupplier(SEED, bitsPerValue, decode.getBlockSize()));
6362
}
6463

6564
@Benchmark
6665
public void throughput(Blackhole bh, ThroughputMetrics metrics) throws IOException {
6766
decode.benchmark(bh);
68-
metrics.recordOperation(BLOCK_SIZE, decode.getEncodedSize());
67+
metrics.recordOperation(decode.getBlockSize(), decode.getEncodedSize());
6968
}
7069
}
Lines changed: 78 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,78 @@
1+
/*
2+
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
3+
* or more contributor license agreements. Licensed under the "Elastic License
4+
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
5+
* Public License v 1"; you may not use this file except in compliance with, at
6+
* your election, the "Elastic License 2.0", the "GNU Affero General Public
7+
* License v3.0 only", or the "Server Side Public License, v 1".
8+
*/
9+
10+
package org.elasticsearch.benchmark.index.codec.tsdb;
11+
12+
import org.elasticsearch.benchmark.index.codec.tsdb.internal.AbstractTSDBCodecBenchmark;
13+
import org.elasticsearch.benchmark.index.codec.tsdb.internal.DecodeBenchmark;
14+
import org.elasticsearch.benchmark.index.codec.tsdb.internal.LowCardinalitySupplier;
15+
import org.elasticsearch.benchmark.index.codec.tsdb.internal.ThroughputMetrics;
16+
import org.openjdk.jmh.annotations.Benchmark;
17+
import org.openjdk.jmh.annotations.BenchmarkMode;
18+
import org.openjdk.jmh.annotations.Fork;
19+
import org.openjdk.jmh.annotations.Level;
20+
import org.openjdk.jmh.annotations.Measurement;
21+
import org.openjdk.jmh.annotations.Mode;
22+
import org.openjdk.jmh.annotations.OutputTimeUnit;
23+
import org.openjdk.jmh.annotations.Param;
24+
import org.openjdk.jmh.annotations.Scope;
25+
import org.openjdk.jmh.annotations.Setup;
26+
import org.openjdk.jmh.annotations.State;
27+
import org.openjdk.jmh.annotations.Warmup;
28+
import org.openjdk.jmh.infra.Blackhole;
29+
30+
import java.io.IOException;
31+
import java.util.concurrent.TimeUnit;
32+
33+
/**
34+
* Benchmark for decoding low cardinality data patterns.
35+
*
36+
* <p>Parameterized by number of distinct values and Zipf skew to test how
37+
* the decoder handles data with limited value diversity. Higher skew means
38+
* the most frequent value dominates more strongly.
39+
*/
40+
@Fork(value = 1)
41+
@Warmup(iterations = 3)
42+
@Measurement(iterations = 5)
43+
@BenchmarkMode(Mode.Throughput)
44+
@OutputTimeUnit(TimeUnit.SECONDS)
45+
@State(Scope.Benchmark)
46+
public class DecodeLowCardinalityBenchmark {
47+
private static final int SEED = 17;
48+
49+
@Param({ "5", "10" })
50+
private int distinctValues;
51+
52+
@Param({ "1", "2", "3" })
53+
private double skew;
54+
55+
private final AbstractTSDBCodecBenchmark decode;
56+
57+
public DecodeLowCardinalityBenchmark() {
58+
this.decode = new DecodeBenchmark();
59+
}
60+
61+
@Setup(Level.Invocation)
62+
public void setupInvocation() throws IOException {
63+
decode.setupInvocation();
64+
}
65+
66+
@Setup(Level.Trial)
67+
public void setupTrial() throws IOException {
68+
decode.setupTrial(
69+
LowCardinalitySupplier.builder(SEED, decode.getBlockSize()).withDistinctValues(distinctValues).withSkew(skew).build()
70+
);
71+
}
72+
73+
@Benchmark
74+
public void throughput(Blackhole bh, ThroughputMetrics metrics) throws IOException {
75+
decode.benchmark(bh);
76+
metrics.recordOperation(decode.getBlockSize(), decode.getEncodedSize());
77+
}
78+
}

0 commit comments

Comments
 (0)