Skip to content
4 changes: 4 additions & 0 deletions server/src/main/java/org/elasticsearch/TransportVersions.java
Original file line number Diff line number Diff line change
Expand Up @@ -264,6 +264,10 @@ static TransportVersion def(int id) {
public static final TransportVersion STATE_PARAM_GET_SNAPSHOT = def(9_100_0_00);
public static final TransportVersion PROJECT_ID_IN_SNAPSHOTS_DELETIONS_AND_REPO_CLEANUP = def(9_101_0_00);
public static final TransportVersion CLUSTER_STATE_PROJECTS_SETTINGS = def(9_108_0_00);
/**
* Also used for determining when some new ESQL data types were introduced, as a workaround to then-missing proper versioning of data
* types. Do not backport without updating that workaround as well, otherwise mixed-version clusters may misbehave.
*/
public static final TransportVersion INDEX_SOURCE = def(9_158_0_00);
public static final TransportVersion MAX_HEAP_SIZE_PER_NODE_IN_CLUSTER_INFO = def(9_159_0_00);
public static final TransportVersion TIMESERIES_DEFAULT_LIMIT = def(9_160_0_00);
Expand Down

This file was deleted.

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/

package org.elasticsearch.xpack.esql.core.type;

import org.elasticsearch.Build;
import org.elasticsearch.TransportVersion;

public interface SupportedVersion {
boolean supports(TransportVersion version);

default boolean supportedLocally() {
return supports(TransportVersion.current());
}

SupportedVersion SUPPORTED_ON_ALL_NODES = new SupportedVersion() {
@Override
public boolean supports(TransportVersion version) {
return true;
}

@Override
public String toString() {
return "SupportedOnAllVersions";
}
};

/**
* Types that are actively being built. These types are
* <ul>
* <li>Not returned from Elasticsearch on release builds.</li>
* <li>Not included in generated documentation</li>
* <li>
* Not tested by {@code ErrorsForCasesWithoutExamplesTestCase} subclasses.
* When a function supports a type it includes a test case in its subclass
* of {@code AbstractFunctionTestCase}. If a function does not support.
* them like {@code TO_STRING} then the tests won't notice. See class javadoc
* for instructions on adding new types, but that usually involves adding support
* for that type to a handful of functions. Once you've done that you should be
* able to remove your new type from UNDER_CONSTRUCTION and update a few error
* messages.
* </li>
* </ul>
* Snapshot builds treat these as supported. Mixed/multi cluster tests with older nodes
* have to just be skipped using capabilites, as always.
*/
SupportedVersion UNDER_CONSTRUCTION = new SupportedVersion() {
@Override
public boolean supports(TransportVersion version) {
return Build.current().isSnapshot();
}

@Override
public String toString() {
return "UnderConstruction";
}
};

static SupportedVersion supportedOn(TransportVersion supportedVersion) {
return new SupportedVersion() {
@Override
public boolean supports(TransportVersion version) {
return version.supports(supportedVersion) || Build.current().isSnapshot();
Copy link
Contributor Author

@alex-spies alex-spies Oct 1, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is a subtle change that considers released types supported on snapshot builds, always.

Similarly, under construction types are also always considered supported on snapshot builds.

I thought about making a distinction between "created at version" and "supported from version", but that wouldn't really add anything as we already have capabilities to control the behavior of bwc tests in snapshot builds.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm good with just having the supported from version.

It does feel weird to say that released types are always supported on snapshots though.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'll add a comment to explain that this is so that we don't break a bunch of existing tests when finally enabling a type.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Added some javadoc.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

👍

This is an interesting compromise that makes tests flow more nicely. We don't discard any backwards compatibility tests that we wrote while working on the new type. That's good. But those tests might be guaranteeing more backwards compatibility than we actually offer in production. That's ok. And it makes it explicit when we disable that backward compatibility. All good.

}

@Override
public String toString() {
return "SupportedOn[" + supportedVersion + "]";
}
};
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;

import org.apache.http.util.EntityUtils;
import org.elasticsearch.Build;
import org.elasticsearch.TransportVersion;
import org.elasticsearch.client.Request;
import org.elasticsearch.client.ResponseException;
Expand Down Expand Up @@ -46,22 +47,25 @@
import static org.hamcrest.Matchers.any;
import static org.hamcrest.Matchers.anyOf;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.either;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.nullValue;

/**
* Creates indices with all supported fields and fetches values from them.
* Creates indices with all supported fields and fetches values from them to
* confirm that release builds correctly handle data types, even if they were
* introduced in later versions.
* <p>
* Entirely skipped in snapshot builds; data types that are under
* construction are normally tested well enough in spec tests, skipping
* old versions via {@link org.elasticsearch.xpack.esql.action.EsqlCapabilities}.
* <p>
* In a single cluster where all nodes are on a single version this is
* just an "is it plugged in" style smoke test. In a mixed version cluster
* this is testing the behavior of fetching potentially unsupported field
* types. The same is true multi-cluster cases.
* </p>
* types. The same is true for multi-cluster cases.
* <p>
* This isn't trying to test complex interactions with field loading so we
* load constant field values and have simple mappings.
* </p>
*/
public class AllSupportedFieldsTestCase extends ESRestTestCase {
private static final Logger logger = LogManager.getLogger(FieldExtractorTestCase.class);
Expand All @@ -71,6 +75,11 @@ public class AllSupportedFieldsTestCase extends ESRestTestCase {

@ParametersFactory(argumentFormatting = "pref=%s mode=%s")
public static List<Object[]> args() {
if (Build.current().isSnapshot()) {
// We only test behavior in release builds. Snapshot builds will have data types enabled that are still under construction.
return List.of();
}

List<Object[]> args = new ArrayList<>();
for (MappedFieldType.FieldExtractPreference extractPreference : Arrays.asList(
null,
Expand Down Expand Up @@ -168,6 +177,7 @@ public void createIndices() throws IOException {
}
}

// TODO: Also add a test for _tsid once we can determine the minimum transport version of all nodes.
public final void testFetchAll() throws IOException {
Map<String, Object> response = esql("""
, _id, _ignored, _index_mode, _score, _source, _version
Expand Down Expand Up @@ -204,7 +214,7 @@ public final void testFetchAll() throws IOException {
if (supportedInIndex(type) == false) {
continue;
}
expectedValues = expectedValues.entry(fieldName(type), expectedValue(nodeInfo.version, type));
expectedValues = expectedValues.entry(fieldName(type), expectedValue(type));
}
expectedValues = expectedValues.entry("_id", any(String.class))
.entry("_ignored", nullValue())
Expand All @@ -219,6 +229,7 @@ public final void testFetchAll() throws IOException {
profileLogger.clearProfile();
}

// Tests a workaround and will become obsolete once we can determine the actual minimum transport version of all nodes.
public final void testFetchDenseVector() throws IOException {
Map<String, Object> response;
try {
Expand Down Expand Up @@ -400,7 +411,8 @@ private void createAllTypesDoc(RestClient client, String indexName) throws IOExc
client.performRequest(request);
}

private Matcher<?> expectedValue(TransportVersion version, DataType type) throws IOException {
// This will become dependent on the minimum transport version of all nodes once we can determine that.
private Matcher<?> expectedValue(DataType type) {
return switch (type) {
case BOOLEAN -> equalTo(true);
case COUNTER_LONG, LONG, COUNTER_INTEGER, INTEGER, UNSIGNED_LONG, SHORT, BYTE -> equalTo(1);
Expand All @@ -419,19 +431,14 @@ private Matcher<?> expectedValue(TransportVersion version, DataType type) throws
case GEO_SHAPE -> equalTo("POINT (-71.34 41.12)");
case NULL -> nullValue();
case AGGREGATE_METRIC_DOUBLE -> {
if (denseVectorAggMetricDoubleIfFns()) {
// If all versions are new we get null. If any are old, some *might* support aggregate_metric_double
yield nullValue();
}
Matcher<String> expected = equalTo("{\"min\":-302.5,\"max\":702.3,\"sum\":200.0,\"value_count\":25}");
yield anyOf(nullValue(), expected);
// Currently, we cannot tell if all nodes support it or not so we treat it as unsupported.
// TODO: Fix this once we know the node versions.
yield nullValue();
}
case DENSE_VECTOR -> {
if (denseVectorAggMetricDoubleIfFns()) {
// If all versions are new we get null. If any are old, some *might* support dense_vector
yield nullValue();
}
yield anyOf(nullValue(), expectedDenseVector(version));
// Currently, we cannot tell if all nodes support it or not so we treat it as unsupported.
// TODO: Fix this once we know the node versions.
yield nullValue();
}
default -> throw new AssertionError("unsupported field type [" + type + "]");
};
Expand All @@ -449,6 +456,7 @@ private Matcher<List<?>> expectedDenseVector(TransportVersion version) {
private static boolean supportedInIndex(DataType t) {
return switch (t) {
// These are supported but implied by the index process.
// TODO: current versions already support _tsid; update this once we can tell whether all nodes support it.
case OBJECT, SOURCE, DOC_DATA_TYPE, TSID_DATA_TYPE,
// Internal only
UNSUPPORTED, PARTIAL_AGG,
Expand Down Expand Up @@ -500,7 +508,8 @@ private Map<String, Object> nameToValue(List<String> names, List<?> values) {
return result;
}

private Matcher<String> expectedType(DataType type) throws IOException {
// This will become dependent on the minimum transport version of all nodes once we can determine that.
private Matcher<String> expectedType(DataType type) {
return switch (type) {
case COUNTER_DOUBLE, COUNTER_LONG, COUNTER_INTEGER -> {
if (indexMode == IndexMode.TIME_SERIES) {
Expand All @@ -512,13 +521,9 @@ private Matcher<String> expectedType(DataType type) throws IOException {
case HALF_FLOAT, SCALED_FLOAT, FLOAT -> equalTo("double");
case NULL -> equalTo("keyword");
// Currently unsupported without TS command or KNN function
case AGGREGATE_METRIC_DOUBLE, DENSE_VECTOR -> {
if (denseVectorAggMetricDoubleIfFns()) {
// If all versions are new we get null. If any are old, some *might* support dense_vector
yield equalTo("unsupported");
}
yield either(equalTo("unsupported")).or(equalTo(type.esType()));
}
case AGGREGATE_METRIC_DOUBLE, DENSE_VECTOR ->
// TODO: Fix this once we know the node versions.
equalTo("unsupported");
default -> equalTo(type.esType());
};
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1024,7 +1024,7 @@ public void doTest() {
}

private boolean isValidDataType(DataType dataType) {
return UNDER_CONSTRUCTION.get(dataType) == null || UNDER_CONSTRUCTION.get(dataType).isEnabled();
return UNDER_CONSTRUCTION.contains(dataType) == false;
}

private static void saveJoinTypes(Supplier<Set<DocsV3Support.TypeSignature>> signatures) throws Exception {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
import org.elasticsearch.Build;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.util.CollectionUtils;
import org.elasticsearch.common.util.FeatureFlag;
import org.elasticsearch.xpack.esql.core.QlIllegalArgumentException;
import org.elasticsearch.xpack.esql.core.expression.Expression;
import org.elasticsearch.xpack.esql.core.expression.MapExpression;
Expand Down Expand Up @@ -795,9 +794,9 @@ public static ArgSignature paramWithoutAnnotation(String name) {
* Remove types that are being actively built.
*/
private static String[] removeUnderConstruction(String[] types) {
for (Map.Entry<DataType, FeatureFlag> underConstruction : DataType.UNDER_CONSTRUCTION.entrySet()) {
if (underConstruction.getValue().isEnabled() == false) {
types = Arrays.stream(types).filter(t -> underConstruction.getKey().typeName().equals(t) == false).toArray(String[]::new);
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I suppose you could pass the feature flag into the under construction SupportVersion variant and use the enabled/disabled-ness of the feature flag. I'm wondering whether a createdVersion AND a featureFlag might be the right thing here. It's more work when you make a new data type, but it feels a little more true.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I guess you could have a feature-flag based override to enable under-construction types outside of SNAPSHOT; e.g. so that folks toying around with in-development data types could enable them on their local cluster (and live with the fact that queries might break in unexpected ways, esp. if connected to a remote cluster that may not enable the same feature flag).

That said, we currently have no under construction types anymore, so that would complicate the system for no gain. But I could leave a comment in place so that future-us will know how to do that.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Added a comment.

for (DataType underConstruction : DataType.UNDER_CONSTRUCTION) {
if (underConstruction.supportedVersion().supportedLocally() == false) {
types = Arrays.stream(types).filter(t -> underConstruction.typeName().equals(t) == false).toArray(String[]::new);
}
}
return types;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -418,7 +418,7 @@ public static Stream<DataType> validFunctionParameters() {
// We don't test that functions don't take date_period or time_duration. We should.
return false;
}
if (DataType.UNDER_CONSTRUCTION.containsKey(t)) {
if (DataType.UNDER_CONSTRUCTION.contains(t)) {
/*
* Types under construction aren't checked because we're actively
* adding support for them to functions. That’s *why* they are
Expand Down Expand Up @@ -763,19 +763,19 @@ public static void testFunctionInfo() {
for (int i = 0; i < args.size() && i < types.size(); i++) {
typesFromSignature.get(i).add(types.get(i).dataType().esNameIfPossible());
}
if (DataType.UNDER_CONSTRUCTION.containsKey(entry.returnType()) == false) {
if (DataType.UNDER_CONSTRUCTION.contains(entry.returnType()) == false) {
returnFromSignature.add(entry.returnType().esNameIfPossible());
}
}

for (int i = 0; i < args.size(); i++) {
EsqlFunctionRegistry.ArgSignature arg = args.get(i);
Set<String> annotationTypes = Arrays.stream(arg.type())
.filter(t -> DataType.UNDER_CONSTRUCTION.containsKey(DataType.fromNameOrAlias(t)) == false)
.filter(t -> DataType.UNDER_CONSTRUCTION.contains(DataType.fromNameOrAlias(t)) == false)
.collect(Collectors.toCollection(TreeSet::new));
Set<String> signatureTypes = typesFromSignature.get(i)
.stream()
.filter(t -> DataType.UNDER_CONSTRUCTION.containsKey(DataType.fromNameOrAlias(t)) == false)
.filter(t -> DataType.UNDER_CONSTRUCTION.contains(DataType.fromNameOrAlias(t)) == false)
.collect(Collectors.toCollection(TreeSet::new));
if (signatureTypes.isEmpty()) {
log.info("{}: skipping", arg.name());
Expand All @@ -796,7 +796,7 @@ public static void testFunctionInfo() {
}

Set<String> returnTypes = Arrays.stream(description.returnType())
.filter(t -> DataType.UNDER_CONSTRUCTION.containsKey(DataType.fromNameOrAlias(t)) == false)
.filter(t -> DataType.UNDER_CONSTRUCTION.contains(DataType.fromNameOrAlias(t)) == false)
.collect(Collectors.toCollection(TreeSet::new));
assertEquals(returnFromSignature, returnTypes);
}
Expand Down Expand Up @@ -1060,7 +1060,7 @@ static boolean shouldHideSignature(List<DocsV3Support.Param> argTypes, DataType
return true;
}

for (DataType dt : DataType.UNDER_CONSTRUCTION.keySet()) {
for (DataType dt : DataType.UNDER_CONSTRUCTION) {
if (returnType == dt || argTypes.stream().anyMatch(p -> p.dataType() == dt)) {
return true;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -338,7 +338,7 @@ private static void addQueryAsStringTestCases(List<TestCaseSupplier> suppliers)

private static void addStringTestCases(List<TestCaseSupplier> suppliers) {
for (DataType fieldType : DataType.stringTypes()) {
if (DataType.UNDER_CONSTRUCTION.containsKey(fieldType)) {
if (DataType.UNDER_CONSTRUCTION.contains(fieldType)) {
continue;
}
for (TestCaseSupplier.TypedDataSupplier queryDataSupplier : stringCases(fieldType)) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ private static List<TestCaseSupplier> testCaseSuppliers() {

public static void addStringTestCases(List<TestCaseSupplier> suppliers) {
for (DataType fieldType : DataType.stringTypes()) {
if (DataType.UNDER_CONSTRUCTION.containsKey(fieldType)) {
if (DataType.UNDER_CONSTRUCTION.contains(fieldType)) {
continue;
}
for (TestCaseSupplier.TypedDataSupplier queryDataSupplier : stringCases(fieldType)) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -64,8 +64,7 @@ public class CaseTests extends AbstractScalarFunctionTestCase {
).collect(Collectors.toList());
if (Build.current().isSnapshot()) {
t.addAll(
DataType.UNDER_CONSTRUCTION.keySet()
.stream()
DataType.UNDER_CONSTRUCTION.stream()
.filter(type -> type != DataType.AGGREGATE_METRIC_DOUBLE && type != DataType.DENSE_VECTOR)
.toList()
);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1226,7 +1226,7 @@ private void checkMatchFunctionPushDown(
var analyzer = makeAnalyzer("mapping-all-types.json");
// Check for every possible query data type
for (DataType fieldDataType : fieldDataTypes) {
if (DataType.UNDER_CONSTRUCTION.containsKey(fieldDataType)) {
if (DataType.UNDER_CONSTRUCTION.contains(fieldDataType)) {
continue;
}

Expand Down
Loading