Skip to content

Commit 8fe6adf

Browse files
Merge branch 'main' into sort_other_numeric_types
2 parents 67f92b7 + 90dcccf commit 8fe6adf

File tree

381 files changed

+3176
-899
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

381 files changed

+3176
-899
lines changed
Lines changed: 196 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,196 @@
1+
/*
2+
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
3+
* or more contributor license agreements. Licensed under the "Elastic License
4+
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
5+
* Public License v 1"; you may not use this file except in compliance with, at
6+
* your election, the "Elastic License 2.0", the "GNU Affero General Public
7+
* License v3.0 only", or the "Server Side Public License, v 1".
8+
*/
9+
10+
package org.elasticsearch.benchmark.index.codec.tsdb;
11+
12+
import org.apache.lucene.analysis.standard.StandardAnalyzer;
13+
import org.apache.lucene.codecs.DocValuesFormat;
14+
import org.apache.lucene.document.Document;
15+
import org.apache.lucene.document.SortedDocValuesField;
16+
import org.apache.lucene.document.SortedNumericDocValuesField;
17+
import org.apache.lucene.document.SortedSetDocValuesField;
18+
import org.apache.lucene.index.IndexWriter;
19+
import org.apache.lucene.index.IndexWriterConfig;
20+
import org.apache.lucene.index.LogByteSizeMergePolicy;
21+
import org.apache.lucene.search.Sort;
22+
import org.apache.lucene.search.SortField;
23+
import org.apache.lucene.search.SortedNumericSortField;
24+
import org.apache.lucene.store.Directory;
25+
import org.apache.lucene.store.FSDirectory;
26+
import org.apache.lucene.util.BytesRef;
27+
import org.elasticsearch.cluster.metadata.DataStream;
28+
import org.elasticsearch.common.logging.LogConfigurator;
29+
import org.elasticsearch.index.codec.Elasticsearch900Lucene101Codec;
30+
import org.elasticsearch.index.codec.tsdb.es819.ES819TSDBDocValuesFormat;
31+
import org.openjdk.jmh.annotations.Benchmark;
32+
import org.openjdk.jmh.annotations.BenchmarkMode;
33+
import org.openjdk.jmh.annotations.Fork;
34+
import org.openjdk.jmh.annotations.Level;
35+
import org.openjdk.jmh.annotations.Measurement;
36+
import org.openjdk.jmh.annotations.Mode;
37+
import org.openjdk.jmh.annotations.OutputTimeUnit;
38+
import org.openjdk.jmh.annotations.Param;
39+
import org.openjdk.jmh.annotations.Scope;
40+
import org.openjdk.jmh.annotations.Setup;
41+
import org.openjdk.jmh.annotations.State;
42+
import org.openjdk.jmh.annotations.TearDown;
43+
import org.openjdk.jmh.annotations.Threads;
44+
import org.openjdk.jmh.annotations.Warmup;
45+
import org.openjdk.jmh.profile.AsyncProfiler;
46+
import org.openjdk.jmh.runner.Runner;
47+
import org.openjdk.jmh.runner.RunnerException;
48+
import org.openjdk.jmh.runner.options.Options;
49+
import org.openjdk.jmh.runner.options.OptionsBuilder;
50+
51+
import java.io.IOException;
52+
import java.nio.file.Files;
53+
import java.util.Random;
54+
import java.util.concurrent.ExecutorService;
55+
import java.util.concurrent.Executors;
56+
import java.util.concurrent.TimeUnit;
57+
58+
@BenchmarkMode(Mode.SampleTime)
59+
@OutputTimeUnit(TimeUnit.MILLISECONDS)
60+
@State(Scope.Benchmark)
61+
@Fork(1)
62+
@Threads(1)
63+
@Warmup(iterations = 0)
64+
@Measurement(iterations = 1)
65+
public class TSDBDocValuesMergeBenchmark {
66+
67+
static {
68+
// For Elasticsearch900Lucene101Codec:
69+
LogConfigurator.loadLog4jPlugins();
70+
LogConfigurator.configureESLogging();
71+
LogConfigurator.setNodeName("test");
72+
}
73+
74+
@Param("20431204")
75+
private int nDocs;
76+
77+
@Param("1000")
78+
private int deltaTime;
79+
80+
@Param("42")
81+
private int seed;
82+
83+
private static final String TIMESTAMP_FIELD = "@timestamp";
84+
private static final String HOSTNAME_FIELD = "host.name";
85+
private static final long BASE_TIMESTAMP = 1704067200000L;
86+
87+
private IndexWriter indexWriterWithoutOptimizedMerge;
88+
private IndexWriter indexWriterWithOptimizedMerge;
89+
private ExecutorService executorService;
90+
91+
public static void main(String[] args) throws RunnerException {
92+
final Options options = new OptionsBuilder().include(TSDBDocValuesMergeBenchmark.class.getSimpleName())
93+
.addProfiler(AsyncProfiler.class)
94+
.build();
95+
96+
new Runner(options).run();
97+
}
98+
99+
@Setup(Level.Trial)
100+
public void setup() throws IOException {
101+
executorService = Executors.newSingleThreadExecutor();
102+
103+
final Directory tempDirectoryWithoutDocValuesSkipper = FSDirectory.open(Files.createTempDirectory("temp1-"));
104+
final Directory tempDirectoryWithDocValuesSkipper = FSDirectory.open(Files.createTempDirectory("temp2-"));
105+
106+
indexWriterWithoutOptimizedMerge = createIndex(tempDirectoryWithoutDocValuesSkipper, false);
107+
indexWriterWithOptimizedMerge = createIndex(tempDirectoryWithDocValuesSkipper, true);
108+
}
109+
110+
private IndexWriter createIndex(final Directory directory, final boolean optimizedMergeEnabled) throws IOException {
111+
final var iwc = createIndexWriterConfig(optimizedMergeEnabled);
112+
long counter1 = 0;
113+
long counter2 = 10_000_000;
114+
long[] gauge1Values = new long[] { 2, 4, 6, 8, 10, 12, 14, 16 };
115+
long[] gauge2Values = new long[] { -2, -4, -6, -8, -10, -12, -14, -16 };
116+
int numHosts = 1000;
117+
String[] tags = new String[] { "tag_1", "tag_2", "tag_3", "tag_4", "tag_5", "tag_6", "tag_7", "tag_8" };
118+
119+
final Random random = new Random(seed);
120+
IndexWriter indexWriter = new IndexWriter(directory, iwc);
121+
for (int i = 0; i < nDocs; i++) {
122+
final Document doc = new Document();
123+
124+
final int batchIndex = i / numHosts;
125+
final String hostName = "host-" + batchIndex;
126+
// Slightly vary the timestamp in each document
127+
final long timestamp = BASE_TIMESTAMP + ((i % numHosts) * deltaTime) + random.nextInt(0, deltaTime);
128+
129+
doc.add(new SortedDocValuesField(HOSTNAME_FIELD, new BytesRef(hostName)));
130+
doc.add(new SortedNumericDocValuesField(TIMESTAMP_FIELD, timestamp));
131+
doc.add(new SortedNumericDocValuesField("counter_1", counter1++));
132+
doc.add(new SortedNumericDocValuesField("counter_2", counter2++));
133+
doc.add(new SortedNumericDocValuesField("gauge_1", gauge1Values[i % gauge1Values.length]));
134+
doc.add(new SortedNumericDocValuesField("gauge_2", gauge2Values[i % gauge1Values.length]));
135+
int numTags = tags.length % (i + 1);
136+
for (int j = 0; j < numTags; j++) {
137+
doc.add(new SortedSetDocValuesField("tags", new BytesRef(tags[j])));
138+
}
139+
140+
indexWriter.addDocument(doc);
141+
}
142+
indexWriter.commit();
143+
return indexWriter;
144+
}
145+
146+
@Benchmark
147+
public void forceMergeWithoutOptimizedMerge() throws IOException {
148+
forceMerge(indexWriterWithoutOptimizedMerge);
149+
}
150+
151+
@Benchmark
152+
public void forceMergeWithOptimizedMerge() throws IOException {
153+
forceMerge(indexWriterWithOptimizedMerge);
154+
}
155+
156+
private void forceMerge(final IndexWriter indexWriter) throws IOException {
157+
indexWriter.forceMerge(1);
158+
}
159+
160+
@TearDown(Level.Trial)
161+
public void tearDown() {
162+
if (executorService != null) {
163+
executorService.shutdown();
164+
try {
165+
if (executorService.awaitTermination(30, TimeUnit.SECONDS) == false) {
166+
executorService.shutdownNow();
167+
}
168+
} catch (InterruptedException e) {
169+
executorService.shutdownNow();
170+
Thread.currentThread().interrupt();
171+
}
172+
}
173+
}
174+
175+
private static IndexWriterConfig createIndexWriterConfig(boolean optimizedMergeEnabled) {
176+
var config = new IndexWriterConfig(new StandardAnalyzer());
177+
// NOTE: index sort config matching LogsDB's sort order
178+
config.setIndexSort(
179+
new Sort(
180+
new SortField(HOSTNAME_FIELD, SortField.Type.STRING, false),
181+
new SortedNumericSortField(TIMESTAMP_FIELD, SortField.Type.LONG, true)
182+
)
183+
);
184+
config.setLeafSorter(DataStream.TIMESERIES_LEAF_READERS_SORTER);
185+
config.setMergePolicy(new LogByteSizeMergePolicy());
186+
var docValuesFormat = new ES819TSDBDocValuesFormat(4096, optimizedMergeEnabled);
187+
config.setCodec(new Elasticsearch900Lucene101Codec() {
188+
189+
@Override
190+
public DocValuesFormat getDocValuesFormatForField(String field) {
191+
return docValuesFormat;
192+
}
193+
});
194+
return config;
195+
}
196+
}

distribution/docker/build.gradle

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -448,8 +448,8 @@ void addBuildDockerImageTask(Architecture architecture, DockerBase base) {
448448
// is functional.
449449
if (base == DockerBase.IRON_BANK) {
450450
Map<String, String> buildArgsMap = [
451-
'BASE_REGISTRY': 'docker.elastic.co',
452-
'BASE_IMAGE' : 'ubi9/ubi',
451+
'BASE_REGISTRY': 'docker.io',
452+
'BASE_IMAGE' : 'redhat/ubi9',
453453
'BASE_TAG' : 'latest'
454454
]
455455

docs/changelog/125403.yaml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
pr: 125403
2+
summary: First step optimizing tsdb doc values codec merging
3+
area: Codec
4+
type: enhancement
5+
issues: []

docs/reference/query-languages/esql/esql-commands.md

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -802,6 +802,8 @@ RENAME old_name1 AS new_name1[, ..., old_nameN AS new_nameN]
802802

803803
The `RENAME` processing command renames one or more columns. If a column with the new name already exists, it will be replaced by the new column.
804804

805+
A `RENAME` with multiple column renames is equivalent to multiple sequential `RENAME` commands.
806+
805807
**Examples**
806808

807809
```esql
@@ -818,6 +820,15 @@ FROM employees
818820
| RENAME first_name AS fn, last_name AS ln
819821
```
820822

823+
With multiple `RENAME` commands:
824+
825+
```esql
826+
FROM employees
827+
| KEEP first_name, last_name
828+
| RENAME first_name AS fn
829+
| RENAME last_name AS ln
830+
```
831+
821832

822833
## `SORT` [esql-sort]
823834

muted-tests.yml

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -261,9 +261,6 @@ tests:
261261
- class: org.elasticsearch.env.NodeEnvironmentTests
262262
method: testIndexCompatibilityChecks
263263
issue: https://github.com/elastic/elasticsearch/issues/124388
264-
- class: org.elasticsearch.xpack.restart.FullClusterRestartIT
265-
method: testWatcherWithApiKey {cluster=UPGRADED}
266-
issue: https://github.com/elastic/elasticsearch/issues/124159
267264
- class: org.elasticsearch.multiproject.test.CoreWithMultipleProjectsClientYamlTestSuiteIT
268265
method: test {yaml=data_stream/190_failure_store_redirection/Redirect ingest failure in data stream to failure store}
269266
issue: https://github.com/elastic/elasticsearch/issues/124518
@@ -357,9 +354,6 @@ tests:
357354
- class: org.elasticsearch.search.basic.SearchWithRandomDisconnectsIT
358355
method: testSearchWithRandomDisconnects
359356
issue: https://github.com/elastic/elasticsearch/issues/122707
360-
- class: org.elasticsearch.snapshots.SharedClusterSnapshotRestoreIT
361-
method: testDeletionOfFailingToRecoverIndexShouldStopRestore
362-
issue: https://github.com/elastic/elasticsearch/issues/126204
363357
- class: org.elasticsearch.xpack.esql.inference.RerankOperatorTests
364358
method: testSimpleCircuitBreaking
365359
issue: https://github.com/elastic/elasticsearch/issues/124337
@@ -450,6 +444,15 @@ tests:
450444
- class: org.elasticsearch.xpack.security.cli.HttpCertificateCommandTests
451445
method: testGenerateMultipleCertificateWithNewCA
452446
issue: https://github.com/elastic/elasticsearch/issues/126471
447+
- class: org.elasticsearch.xpack.downsample.ILMDownsampleDisruptionIT
448+
method: testILMDownsampleRollingRestart
449+
issue: https://github.com/elastic/elasticsearch/issues/126495
450+
- class: org.elasticsearch.xpack.search.AsyncSearchErrorTraceIT
451+
method: testDataNodeLogsStackTraceWhenErrorTraceFalseOrEmpty
452+
issue: https://github.com/elastic/elasticsearch/issues/126357
453+
- class: org.elasticsearch.smoketest.MlWithSecurityIT
454+
method: test {yaml=ml/get_trained_model_stats/Test get stats given trained models}
455+
issue: https://github.com/elastic/elasticsearch/issues/126510
453456

454457
# Examples:
455458
#

rest-api-spec/src/main/resources/rest-api-spec/api/inference.put_eis.json

Lines changed: 0 additions & 35 deletions
This file was deleted.

server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java

Lines changed: 1 addition & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -748,7 +748,7 @@ public void testDeletionOfFailingToRecoverIndexShouldStopRestore() throws Except
748748

749749
logger.info("--> wait for the index to appear");
750750
// that would mean that recovery process started and failing
751-
waitForIndex("test-idx", TimeValue.timeValueSeconds(10));
751+
safeGet(clusterAdmin().prepareHealth(SAFE_AWAIT_TIMEOUT, "test-idx").execute());
752752

753753
logger.info("--> delete index");
754754
cluster().wipeIndices("test-idx");
@@ -1619,14 +1619,6 @@ public void testDeleteSnapshotWhileRestoringFails() throws Exception {
16191619
restoreFut.get();
16201620
}
16211621

1622-
private void waitForIndex(final String index, TimeValue timeout) throws Exception {
1623-
assertBusy(
1624-
() -> assertTrue("Expected index [" + index + "] to exist", indexExists(index)),
1625-
timeout.millis(),
1626-
TimeUnit.MILLISECONDS
1627-
);
1628-
}
1629-
16301622
public void testSnapshotName() throws Exception {
16311623
disableRepoConsistencyCheck("This test does not create any data in the repository");
16321624

server/src/main/java/module-info.java

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -476,4 +476,5 @@
476476
exports org.elasticsearch.monitor.metrics;
477477
exports org.elasticsearch.plugins.internal.rewriter to org.elasticsearch.inference;
478478
exports org.elasticsearch.lucene.util.automaton;
479+
exports org.elasticsearch.index.codec.perfield;
479480
}

server/src/main/java/org/elasticsearch/ElasticsearchException.java

Lines changed: 16 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -353,7 +353,8 @@ public static ElasticsearchException readException(StreamInput input, int id) th
353353
public static boolean isRegistered(Class<? extends Throwable> exception, TransportVersion version) {
354354
ElasticsearchExceptionHandle elasticsearchExceptionHandle = CLASS_TO_ELASTICSEARCH_EXCEPTION_HANDLE.get(exception);
355355
if (elasticsearchExceptionHandle != null) {
356-
return version.onOrAfter(elasticsearchExceptionHandle.versionAdded);
356+
return version.onOrAfter(elasticsearchExceptionHandle.versionAdded)
357+
|| Arrays.stream(elasticsearchExceptionHandle.patchVersions).anyMatch(version::isPatchFrom);
357358
}
358359
return false;
359360
}
@@ -1983,24 +1984,34 @@ private enum ElasticsearchExceptionHandle {
19831984
183,
19841985
TransportVersions.V_8_16_0
19851986
),
1986-
REMOTE_EXCEPTION(RemoteException.class, RemoteException::new, 184, TransportVersions.REMOTE_EXCEPTION);
1987+
REMOTE_EXCEPTION(
1988+
RemoteException.class,
1989+
RemoteException::new,
1990+
184,
1991+
TransportVersions.REMOTE_EXCEPTION,
1992+
TransportVersions.REMOTE_EXCEPTION_8_19
1993+
);
19871994

19881995
final Class<? extends ElasticsearchException> exceptionClass;
19891996
final CheckedFunction<StreamInput, ? extends ElasticsearchException, IOException> constructor;
19901997
final int id;
1991-
final TransportVersion versionAdded;
1998+
private final TransportVersion versionAdded;
1999+
private final TransportVersion[] patchVersions;
19922000

19932001
<E extends ElasticsearchException> ElasticsearchExceptionHandle(
19942002
Class<E> exceptionClass,
19952003
CheckedFunction<StreamInput, E, IOException> constructor,
19962004
int id,
1997-
TransportVersion versionAdded
2005+
TransportVersion versionAdded,
2006+
TransportVersion... patchVersions
19982007
) {
19992008
// We need the exceptionClass because you can't dig it out of the constructor reliably.
20002009
this.exceptionClass = exceptionClass;
20012010
this.constructor = constructor;
2002-
this.versionAdded = versionAdded;
2011+
20032012
this.id = id;
2013+
this.versionAdded = versionAdded;
2014+
this.patchVersions = patchVersions;
20042015
}
20052016
}
20062017

0 commit comments

Comments
 (0)