From 06a38f3fdc924000099808e6fa07706fd3246c1d Mon Sep 17 00:00:00 2001 From: Przemyslaw Witek Date: Mon, 4 Aug 2025 16:03:19 +0200 Subject: [PATCH] Add SPARKLINE aggregation fuction --- x-pack/plugin/esql/compute/build.gradle | 22 + .../compute/gen/AggregatorImplementer.java | 2 +- .../SparklineDoubleAggregator.java | 385 ++++++++++++++++ .../aggregation/SparklineFloatAggregator.java | 392 ++++++++++++++++ .../aggregation/SparklineIntAggregator.java | 392 ++++++++++++++++ .../aggregation/SparklineLongAggregator.java | 385 ++++++++++++++++ .../SparklineBooleanAggregatorFunction.java | 169 +++++++ ...lineBooleanAggregatorFunctionSupplier.java | 47 ++ ...lineBooleanGroupingAggregatorFunction.java | 303 +++++++++++++ .../SparklineBytesRefAggregatorFunction.java | 176 ++++++++ ...ineBytesRefAggregatorFunctionSupplier.java | 47 ++ ...ineBytesRefGroupingAggregatorFunction.java | 268 +++++++++++ .../SparklineDoubleAggregatorFunction.java | 210 +++++++++ ...klineDoubleAggregatorFunctionSupplier.java | 47 ++ ...klineDoubleGroupingAggregatorFunction.java | 384 ++++++++++++++++ .../SparklineFloatAggregatorFunction.java | 210 +++++++++ ...rklineFloatAggregatorFunctionSupplier.java | 47 ++ ...rklineFloatGroupingAggregatorFunction.java | 384 ++++++++++++++++ .../SparklineIntAggregatorFunction.java | 209 +++++++++ ...parklineIntAggregatorFunctionSupplier.java | 47 ++ ...parklineIntGroupingAggregatorFunction.java | 383 ++++++++++++++++ .../SparklineLongAggregatorFunction.java | 208 +++++++++ ...arklineLongAggregatorFunctionSupplier.java | 47 ++ ...arklineLongGroupingAggregatorFunction.java | 382 ++++++++++++++++ .../SparklineBooleanAggregator.java | 156 +++++++ .../aggregation/X-SparklineAggregator.java.st | 425 ++++++++++++++++++ .../src/main/resources/sparkline.csv-spec | 119 +++++ .../xpack/esql/action/EsqlCapabilities.java | 7 +- .../function/EsqlFunctionRegistry.java | 2 + .../aggregate/AggregateWritables.java | 1 + .../function/aggregate/Sparkline.java | 169 +++++++ .../SparklineSerializationTests.java | 28 ++ 32 files changed, 6051 insertions(+), 2 deletions(-) create mode 100644 x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/SparklineDoubleAggregator.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/SparklineFloatAggregator.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/SparklineIntAggregator.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/SparklineLongAggregator.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineBooleanAggregatorFunction.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineBooleanAggregatorFunctionSupplier.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineBooleanGroupingAggregatorFunction.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineBytesRefAggregatorFunction.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineBytesRefAggregatorFunctionSupplier.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineBytesRefGroupingAggregatorFunction.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineDoubleAggregatorFunction.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineDoubleAggregatorFunctionSupplier.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineDoubleGroupingAggregatorFunction.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineFloatAggregatorFunction.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineFloatAggregatorFunctionSupplier.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineFloatGroupingAggregatorFunction.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineIntAggregatorFunction.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineIntAggregatorFunctionSupplier.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineIntGroupingAggregatorFunction.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineLongAggregatorFunction.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineLongAggregatorFunctionSupplier.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineLongGroupingAggregatorFunction.java create mode 100644 x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/SparklineBooleanAggregator.java create mode 100644 x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-SparklineAggregator.java.st create mode 100644 x-pack/plugin/esql/qa/testFixtures/src/main/resources/sparkline.csv-spec create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Sparkline.java create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/SparklineSerializationTests.java diff --git a/x-pack/plugin/esql/compute/build.gradle b/x-pack/plugin/esql/compute/build.gradle index 435069a20f533..ce4534c28a4c5 100644 --- a/x-pack/plugin/esql/compute/build.gradle +++ b/x-pack/plugin/esql/compute/build.gradle @@ -606,6 +606,28 @@ tasks.named('stringTemplates').configure { it.outputFile = "org/elasticsearch/compute/aggregation/ValuesBytesRefAggregator.java" } + File sparklineAggregatorInputFile = new File("${projectDir}/src/main/java/org/elasticsearch/compute/aggregation/X-SparklineAggregator.java.st") + template { + it.properties = intProperties + it.inputFile = sparklineAggregatorInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/SparklineIntAggregator.java" + } + template { + it.properties = longProperties + it.inputFile = sparklineAggregatorInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/SparklineLongAggregator.java" + } + template { + it.properties = floatProperties + it.inputFile = sparklineAggregatorInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/SparklineFloatAggregator.java" + } + template { + it.properties = doubleProperties + it.inputFile = sparklineAggregatorInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/SparklineDoubleAggregator.java" + } + File rateAggregatorInputFile = file("src/main/java/org/elasticsearch/compute/aggregation/X-RateAggregator.java.st") template { it.properties = intProperties diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorImplementer.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorImplementer.java index 2edc567f6e744..96f3a345f3886 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorImplementer.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorImplementer.java @@ -539,7 +539,7 @@ private MethodSpec addIntermediateInput() { declarationType, requireVoidType(), requireName("combineIntermediate"), - requireArgs( + requireArgsStartsWith( Stream.concat( Stream.of(aggState.declaredType()), // aggState intermediateState.stream().map(IntermediateStateDesc::combineArgType) // intermediate state diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/SparklineDoubleAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/SparklineDoubleAggregator.java new file mode 100644 index 0000000000000..02508a5aa5612 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/SparklineDoubleAggregator.java @@ -0,0 +1,385 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +// begin generated imports +import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.BitArray; +import org.elasticsearch.common.util.LongHash; +import org.elasticsearch.common.util.LongLongHash; +import org.elasticsearch.common.util.DoubleArray; +import org.elasticsearch.compute.aggregation.blockhash.BlockHash; +import org.elasticsearch.compute.ann.Aggregator; +import org.elasticsearch.compute.ann.GroupingAggregator; +import org.elasticsearch.compute.ann.IntermediateState; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.core.Tuple; + +import java.util.ArrayList; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +// end generated imports + +/** + * Aggregates field values for double. + * This class is generated. Edit @{code X-SparklineAggregator.java.st} instead + * of this file. + */ +@Aggregator({ @IntermediateState(name = "values", type = "DOUBLE_BLOCK"), @IntermediateState(name = "timestamps", type = "LONG_BLOCK") }) +@GroupingAggregator +class SparklineDoubleAggregator { + public static SingleState initSingle(BigArrays bigArrays) { + return new SingleState(bigArrays); + } + + public static void combine(SingleState state, double value, long timestamp) { + state.values.add(Tuple.tuple(timestamp, value)); + } + + public static void combineIntermediate(SingleState state, DoubleBlock values, LongBlock timestamps) { + int start = values.getFirstValueIndex(0); + int end = start + values.getValueCount(0); + for (int i = start; i < end; i++) { + combine(state, values.getDouble(i), timestamps.getLong(i)); + } + } + + public static Block evaluateFinal(SingleState state, DriverContext driverContext) { + return state.toFinal(driverContext.blockFactory()); + } + + public static GroupingState initGrouping(DriverContext driverContext) { + return new GroupingState(driverContext); + } + + public static void combine(GroupingState state, int groupId, double value, long timestamp) { + state.addValue(groupId, value, timestamp); + } + + public static void combineIntermediate(GroupingState state, int groupId, DoubleBlock values, LongBlock timestamps, int valuesPosition) { + int start = values.getFirstValueIndex(valuesPosition); + int end = start + values.getValueCount(valuesPosition); + for (int i = start; i < end; i++) { + combine(state, groupId, values.getDouble(i), timestamps.getLong(i)); + } + } + + public static Block evaluateFinal(GroupingState state, IntVector selected, GroupingAggregatorEvaluationContext ctx) { + return state.toFinal(ctx.blockFactory(), selected); + } + + public static class SingleState implements AggregatorState { + private final List> values; + + private SingleState(BigArrays bigArrays) { + values = new ArrayList<>(); + } + + // TODO: Why does it have to output all the blocks? Where are these blocks used? + @Override + public void toIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + BlockFactory blockFactory = driverContext.blockFactory(); + try ( + DoubleBlock.Builder builder = blockFactory.newDoubleBlockBuilder(values.size()); + LongBlock.Builder timestampsBuilder = blockFactory.newLongBlockBuilder(values.size()); + ) { + builder.beginPositionEntry(); + timestampsBuilder.beginPositionEntry(); + for (int id = 0; id < values.size(); id++) { + builder.appendDouble(values.get(id).v2()); + timestampsBuilder.appendLong(values.get(id).v1()); + } + builder.endPositionEntry(); + timestampsBuilder.endPositionEntry(); + blocks[offset] = builder.build(); + blocks[offset + 1] = timestampsBuilder.build(); + } + } + + Block toFinal(BlockFactory blockFactory) { + values.sort(Comparator.comparingLong(Tuple::v1)); + try (DoubleBlock.Builder builder = blockFactory.newDoubleBlockBuilder(values.size())) { + builder.beginPositionEntry(); + for (int id = 0; id < values.size(); id++) { + builder.appendDouble(values.get(id).v2()); + } + builder.endPositionEntry(); + return builder.build(); + } + } + + @Override + public void close() {} + } + + /** + * Values after the first in each group are collected in a hash, keyed by the pair of groupId and value. + * When emitting the output, we need to iterate the hash one group at a time to build the output block, + * which would require O(N^2). To avoid this, we compute the counts for each group and remap the hash id + * to an array, allowing us to build the output in O(N) instead. + */ + private static class NextValues implements Releasable { + private final BlockFactory blockFactory; + private final LongLongHash hashes; + private int[] selectedCounts = null; + private int[] ids = null; + private long extraMemoryUsed = 0; + + private NextValues(BlockFactory blockFactory) { + this.blockFactory = blockFactory; + this.hashes = new LongLongHash(1, blockFactory.bigArrays()); + } + + void addValue(int groupId, double v) { + hashes.add(groupId, Double.doubleToLongBits(v)); + } + + double getValue(int index) { + return Double.longBitsToDouble(hashes.getKey2(ids[index])); + } + + private void reserveBytesForIntArray(long numElements) { + long adjust = RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + numElements * Integer.BYTES); + blockFactory.adjustBreaker(adjust); + extraMemoryUsed += adjust; + } + + private void prepareForEmitting(IntVector selected) { + if (hashes.size() == 0) { + return; + } + /* + * Get a count of all groups less than the maximum selected group. Count + * *downwards* so that we can flip the sign on all of the actually selected + * groups. Negative values in this array are always unselected groups. + */ + int selectedCountsLen = selected.max() + 1; + reserveBytesForIntArray(selectedCountsLen); + this.selectedCounts = new int[selectedCountsLen]; + for (int id = 0; id < hashes.size(); id++) { + int group = (int) hashes.getKey1(id); + if (group < selectedCounts.length) { + selectedCounts[group]--; + } + } + + /* + * Total the selected groups and turn the counts into the start index into a sort-of + * off-by-one running count. It's really the number of values that have been inserted + * into the results before starting on this group. Unselected groups will still + * have negative counts. + * + * For example, if + * | Group | Value Count | Selected | + * |-------|-------------|----------| + * | 0 | 3 | <- | + * | 1 | 1 | <- | + * | 2 | 2 | | + * | 3 | 1 | <- | + * | 4 | 4 | <- | + * + * Then the total is 9 and the counts array will contain 0, 3, -2, 4, 5 + */ + int total = 0; + for (int s = 0; s < selected.getPositionCount(); s++) { + int group = selected.getInt(s); + int count = -selectedCounts[group]; + selectedCounts[group] = total; + total += count; + } + + /* + * Build a list of ids to insert in order *and* convert the running + * count in selectedCounts[group] into the end index (exclusive) in + * ids for each group. + * Here we use the negative counts to signal that a group hasn't been + * selected and the id containing values for that group is ignored. + * + * For example, if + * | Group | Value Count | Selected | + * |-------|-------------|----------| + * | 0 | 3 | <- | + * | 1 | 1 | <- | + * | 2 | 2 | | + * | 3 | 1 | <- | + * | 4 | 4 | <- | + * + * Then the total is 9 and the counts array will start with 0, 3, -2, 4, 5. + * The counts will end with 3, 4, -2, 5, 9. + */ + reserveBytesForIntArray(total); + + this.ids = new int[total]; + for (int id = 0; id < hashes.size(); id++) { + int group = (int) hashes.getKey1(id); + ids[selectedCounts[group]++] = id; + } + } + + @Override + public void close() { + Releasables.closeExpectNoException(hashes, () -> blockFactory.adjustBreaker(-extraMemoryUsed)); + } + } + + /** + * State for a grouped {@code SPARKLINE} aggregation. This implementation + * emphasizes collect-time performance over result rendering performance. + * The first value in each group is collected in the {@code firstValues} + * array, and subsequent values for each group are collected in {@code nextValues}. + */ + public static class GroupingState implements GroupingAggregatorState { + private final BlockFactory blockFactory; + DoubleArray firstValues; + private BitArray seen; + private int maxGroupId = -1; + private final NextValues nextValues; + private final Map>> values; + + private GroupingState(DriverContext driverContext) { + this.blockFactory = driverContext.blockFactory(); + boolean success = false; + try { + this.firstValues = driverContext.bigArrays().newDoubleArray(1, false); + this.nextValues = new NextValues(driverContext.blockFactory()); + success = true; + } finally { + if (success == false) { + this.close(); + } + } + values = new HashMap<>(); + } + + @Override + public void toIntermediate(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { + nextValues.prepareForEmitting(selected); + /* + * Insert the ids in order. + */ + final int[] nextValueCounts = nextValues.selectedCounts; + try ( + DoubleBlock.Builder builder = blockFactory.newDoubleBlockBuilder(selected.getPositionCount()); + LongBlock.Builder timestampsBuilder = blockFactory.newLongBlockBuilder(selected.getPositionCount()) + ) { + int nextValuesStart = 0; + for (int s = 0; s < selected.getPositionCount(); s++) { + int group = selected.getInt(s); + if (group > maxGroupId || hasValue(group) == false) { + builder.appendNull(); + timestampsBuilder.appendNull(); + continue; + } + + builder.beginPositionEntry(); + timestampsBuilder.beginPositionEntry(); + List> valuesInGroup = values.get(group); + for (int id = 0; id < valuesInGroup.size(); id++) { + builder.appendDouble(valuesInGroup.get(id).v2()); + timestampsBuilder.appendLong(valuesInGroup.get(id).v1()); + } + builder.endPositionEntry(); + timestampsBuilder.endPositionEntry(); + } + blocks[offset] = builder.build(); + blocks[offset + 1] = timestampsBuilder.build(); + } + } + + void addValue(int groupId, double v, long t) { + if (groupId > maxGroupId) { + firstValues = blockFactory.bigArrays().grow(firstValues, groupId + 1); + firstValues.set(groupId, v); + // We start in untracked mode, assuming every group has a value as an optimization to avoid allocating + // and updating the seen bitset. However, once some groups don't have values, we initialize the seen bitset, + // fill the groups that have values, and begin tracking incoming groups. + if (seen == null && groupId > maxGroupId + 1) { + seen = new BitArray(groupId + 1, blockFactory.bigArrays()); + seen.fill(0, maxGroupId + 1, true); + } + trackGroupId(groupId); + maxGroupId = groupId; + } else if (hasValue(groupId) == false) { + firstValues.set(groupId, v); + trackGroupId(groupId); + } else if (firstValues.get(groupId) != v) { + nextValues.addValue(groupId, v); + } + List> valuesInGroup = values.computeIfAbsent(groupId, g -> new ArrayList<>()); + valuesInGroup.add(Tuple.tuple(t, v)); + } + + @Override + public void enableGroupIdTracking(SeenGroupIds seen) { + // we track the seen values manually + } + + private void trackGroupId(int groupId) { + if (seen != null) { + seen.set(groupId); + } + } + + /** + * Returns true if the group has a value in firstValues; having a value in nextValues is optional. + * Returns false if the group does not have values in either firstValues or nextValues. + */ + private boolean hasValue(int groupId) { + return seen == null || seen.get(groupId); + } + + /** + * Builds a {@link Block} with the unique values collected for the {@code #selected} + * groups. This is the implementation of the final and intermediate results of the agg. + */ + Block toFinal(BlockFactory blockFactory, IntVector selected) { + nextValues.prepareForEmitting(selected); + /* + * Insert the ids in order. + */ + final int[] nextValueCounts = nextValues.selectedCounts; + try (DoubleBlock.Builder builder = blockFactory.newDoubleBlockBuilder(selected.getPositionCount())) { + int nextValuesStart = 0; + for (int s = 0; s < selected.getPositionCount(); s++) { + int group = selected.getInt(s); + if (group > maxGroupId || hasValue(group) == false) { + builder.appendNull(); + continue; + } + double firstValue = firstValues.get(group); + final int nextValuesEnd = nextValueCounts != null ? nextValueCounts[group] : nextValuesStart; + + List> valuesInGroup = values.get(group); + valuesInGroup.sort(Comparator.comparingLong(Tuple::v1)); + builder.beginPositionEntry(); + for (int id = 0; id < valuesInGroup.size(); id++) { + builder.appendDouble(valuesInGroup.get(id).v2()); + } + builder.endPositionEntry(); + } + return builder.build(); + } + } + + @Override + public void close() { + Releasables.closeExpectNoException(seen, firstValues, nextValues); + } + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/SparklineFloatAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/SparklineFloatAggregator.java new file mode 100644 index 0000000000000..9ddaac5129952 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/SparklineFloatAggregator.java @@ -0,0 +1,392 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +// begin generated imports +import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.BitArray; +import org.elasticsearch.common.util.LongHash; +import org.elasticsearch.common.util.LongLongHash; +import org.elasticsearch.common.util.FloatArray; +import org.elasticsearch.compute.aggregation.blockhash.BlockHash; +import org.elasticsearch.compute.ann.Aggregator; +import org.elasticsearch.compute.ann.GroupingAggregator; +import org.elasticsearch.compute.ann.IntermediateState; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.FloatBlock; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.core.Tuple; + +import java.util.ArrayList; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +// end generated imports + +/** + * Aggregates field values for float. + * This class is generated. Edit @{code X-SparklineAggregator.java.st} instead + * of this file. + */ +@Aggregator({ @IntermediateState(name = "values", type = "FLOAT_BLOCK"), @IntermediateState(name = "timestamps", type = "LONG_BLOCK") }) +@GroupingAggregator +class SparklineFloatAggregator { + public static SingleState initSingle(BigArrays bigArrays) { + return new SingleState(bigArrays); + } + + public static void combine(SingleState state, float value, long timestamp) { + state.values.add(Tuple.tuple(timestamp, value)); + } + + public static void combineIntermediate(SingleState state, FloatBlock values, LongBlock timestamps) { + int start = values.getFirstValueIndex(0); + int end = start + values.getValueCount(0); + for (int i = start; i < end; i++) { + combine(state, values.getFloat(i), timestamps.getLong(i)); + } + } + + public static Block evaluateFinal(SingleState state, DriverContext driverContext) { + return state.toFinal(driverContext.blockFactory()); + } + + public static GroupingState initGrouping(DriverContext driverContext) { + return new GroupingState(driverContext); + } + + public static void combine(GroupingState state, int groupId, float value, long timestamp) { + state.addValue(groupId, value, timestamp); + } + + public static void combineIntermediate(GroupingState state, int groupId, FloatBlock values, LongBlock timestamps, int valuesPosition) { + int start = values.getFirstValueIndex(valuesPosition); + int end = start + values.getValueCount(valuesPosition); + for (int i = start; i < end; i++) { + combine(state, groupId, values.getFloat(i), timestamps.getLong(i)); + } + } + + public static Block evaluateFinal(GroupingState state, IntVector selected, GroupingAggregatorEvaluationContext ctx) { + return state.toFinal(ctx.blockFactory(), selected); + } + + public static class SingleState implements AggregatorState { + private final List> values; + + private SingleState(BigArrays bigArrays) { + values = new ArrayList<>(); + } + + // TODO: Why does it have to output all the blocks? Where are these blocks used? + @Override + public void toIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + BlockFactory blockFactory = driverContext.blockFactory(); + try ( + FloatBlock.Builder builder = blockFactory.newFloatBlockBuilder(values.size()); + LongBlock.Builder timestampsBuilder = blockFactory.newLongBlockBuilder(values.size()); + ) { + builder.beginPositionEntry(); + timestampsBuilder.beginPositionEntry(); + for (int id = 0; id < values.size(); id++) { + builder.appendFloat(values.get(id).v2()); + timestampsBuilder.appendLong(values.get(id).v1()); + } + builder.endPositionEntry(); + timestampsBuilder.endPositionEntry(); + blocks[offset] = builder.build(); + blocks[offset + 1] = timestampsBuilder.build(); + } + } + + Block toFinal(BlockFactory blockFactory) { + values.sort(Comparator.comparingLong(Tuple::v1)); + try (FloatBlock.Builder builder = blockFactory.newFloatBlockBuilder(values.size())) { + builder.beginPositionEntry(); + for (int id = 0; id < values.size(); id++) { + builder.appendFloat(values.get(id).v2()); + } + builder.endPositionEntry(); + return builder.build(); + } + } + + @Override + public void close() {} + } + + /** + * Values after the first in each group are collected in a hash, keyed by the pair of groupId and value. + * When emitting the output, we need to iterate the hash one group at a time to build the output block, + * which would require O(N^2). To avoid this, we compute the counts for each group and remap the hash id + * to an array, allowing us to build the output in O(N) instead. + */ + private static class NextValues implements Releasable { + private final BlockFactory blockFactory; + private final LongHash hashes; + private int[] selectedCounts = null; + private int[] ids = null; + private long extraMemoryUsed = 0; + + private NextValues(BlockFactory blockFactory) { + this.blockFactory = blockFactory; + this.hashes = new LongHash(1, blockFactory.bigArrays()); + } + + void addValue(int groupId, float v) { + /* + * Encode the groupId and value into a single long - + * the top 32 bits for the group, the bottom 32 for the value. + */ + hashes.add((((long) groupId) << Float.SIZE) | (Float.floatToIntBits(v) & 0xFFFFFFFFL)); + } + + float getValue(int index) { + long both = hashes.get(ids[index]); + return Float.intBitsToFloat((int) (both & 0xFFFFFFFFL)); + } + + private void reserveBytesForIntArray(long numElements) { + long adjust = RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + numElements * Integer.BYTES); + blockFactory.adjustBreaker(adjust); + extraMemoryUsed += adjust; + } + + private void prepareForEmitting(IntVector selected) { + if (hashes.size() == 0) { + return; + } + /* + * Get a count of all groups less than the maximum selected group. Count + * *downwards* so that we can flip the sign on all of the actually selected + * groups. Negative values in this array are always unselected groups. + */ + int selectedCountsLen = selected.max() + 1; + reserveBytesForIntArray(selectedCountsLen); + this.selectedCounts = new int[selectedCountsLen]; + for (int id = 0; id < hashes.size(); id++) { + long both = hashes.get(id); + int group = (int) (both >>> Float.SIZE); + if (group < selectedCounts.length) { + selectedCounts[group]--; + } + } + + /* + * Total the selected groups and turn the counts into the start index into a sort-of + * off-by-one running count. It's really the number of values that have been inserted + * into the results before starting on this group. Unselected groups will still + * have negative counts. + * + * For example, if + * | Group | Value Count | Selected | + * |-------|-------------|----------| + * | 0 | 3 | <- | + * | 1 | 1 | <- | + * | 2 | 2 | | + * | 3 | 1 | <- | + * | 4 | 4 | <- | + * + * Then the total is 9 and the counts array will contain 0, 3, -2, 4, 5 + */ + int total = 0; + for (int s = 0; s < selected.getPositionCount(); s++) { + int group = selected.getInt(s); + int count = -selectedCounts[group]; + selectedCounts[group] = total; + total += count; + } + + /* + * Build a list of ids to insert in order *and* convert the running + * count in selectedCounts[group] into the end index (exclusive) in + * ids for each group. + * Here we use the negative counts to signal that a group hasn't been + * selected and the id containing values for that group is ignored. + * + * For example, if + * | Group | Value Count | Selected | + * |-------|-------------|----------| + * | 0 | 3 | <- | + * | 1 | 1 | <- | + * | 2 | 2 | | + * | 3 | 1 | <- | + * | 4 | 4 | <- | + * + * Then the total is 9 and the counts array will start with 0, 3, -2, 4, 5. + * The counts will end with 3, 4, -2, 5, 9. + */ + reserveBytesForIntArray(total); + + this.ids = new int[total]; + for (int id = 0; id < hashes.size(); id++) { + long both = hashes.get(id); + int group = (int) (both >>> Float.SIZE); + ids[selectedCounts[group]++] = id; + } + } + + @Override + public void close() { + Releasables.closeExpectNoException(hashes, () -> blockFactory.adjustBreaker(-extraMemoryUsed)); + } + } + + /** + * State for a grouped {@code SPARKLINE} aggregation. This implementation + * emphasizes collect-time performance over result rendering performance. + * The first value in each group is collected in the {@code firstValues} + * array, and subsequent values for each group are collected in {@code nextValues}. + */ + public static class GroupingState implements GroupingAggregatorState { + private final BlockFactory blockFactory; + FloatArray firstValues; + private BitArray seen; + private int maxGroupId = -1; + private final NextValues nextValues; + private final Map>> values; + + private GroupingState(DriverContext driverContext) { + this.blockFactory = driverContext.blockFactory(); + boolean success = false; + try { + this.firstValues = driverContext.bigArrays().newFloatArray(1, false); + this.nextValues = new NextValues(driverContext.blockFactory()); + success = true; + } finally { + if (success == false) { + this.close(); + } + } + values = new HashMap<>(); + } + + @Override + public void toIntermediate(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { + nextValues.prepareForEmitting(selected); + /* + * Insert the ids in order. + */ + final int[] nextValueCounts = nextValues.selectedCounts; + try ( + FloatBlock.Builder builder = blockFactory.newFloatBlockBuilder(selected.getPositionCount()); + LongBlock.Builder timestampsBuilder = blockFactory.newLongBlockBuilder(selected.getPositionCount()) + ) { + int nextValuesStart = 0; + for (int s = 0; s < selected.getPositionCount(); s++) { + int group = selected.getInt(s); + if (group > maxGroupId || hasValue(group) == false) { + builder.appendNull(); + timestampsBuilder.appendNull(); + continue; + } + + builder.beginPositionEntry(); + timestampsBuilder.beginPositionEntry(); + List> valuesInGroup = values.get(group); + for (int id = 0; id < valuesInGroup.size(); id++) { + builder.appendFloat(valuesInGroup.get(id).v2()); + timestampsBuilder.appendLong(valuesInGroup.get(id).v1()); + } + builder.endPositionEntry(); + timestampsBuilder.endPositionEntry(); + } + blocks[offset] = builder.build(); + blocks[offset + 1] = timestampsBuilder.build(); + } + } + + void addValue(int groupId, float v, long t) { + if (groupId > maxGroupId) { + firstValues = blockFactory.bigArrays().grow(firstValues, groupId + 1); + firstValues.set(groupId, v); + // We start in untracked mode, assuming every group has a value as an optimization to avoid allocating + // and updating the seen bitset. However, once some groups don't have values, we initialize the seen bitset, + // fill the groups that have values, and begin tracking incoming groups. + if (seen == null && groupId > maxGroupId + 1) { + seen = new BitArray(groupId + 1, blockFactory.bigArrays()); + seen.fill(0, maxGroupId + 1, true); + } + trackGroupId(groupId); + maxGroupId = groupId; + } else if (hasValue(groupId) == false) { + firstValues.set(groupId, v); + trackGroupId(groupId); + } else if (firstValues.get(groupId) != v) { + nextValues.addValue(groupId, v); + } + List> valuesInGroup = values.computeIfAbsent(groupId, g -> new ArrayList<>()); + valuesInGroup.add(Tuple.tuple(t, v)); + } + + @Override + public void enableGroupIdTracking(SeenGroupIds seen) { + // we track the seen values manually + } + + private void trackGroupId(int groupId) { + if (seen != null) { + seen.set(groupId); + } + } + + /** + * Returns true if the group has a value in firstValues; having a value in nextValues is optional. + * Returns false if the group does not have values in either firstValues or nextValues. + */ + private boolean hasValue(int groupId) { + return seen == null || seen.get(groupId); + } + + /** + * Builds a {@link Block} with the unique values collected for the {@code #selected} + * groups. This is the implementation of the final and intermediate results of the agg. + */ + Block toFinal(BlockFactory blockFactory, IntVector selected) { + nextValues.prepareForEmitting(selected); + /* + * Insert the ids in order. + */ + final int[] nextValueCounts = nextValues.selectedCounts; + try (FloatBlock.Builder builder = blockFactory.newFloatBlockBuilder(selected.getPositionCount())) { + int nextValuesStart = 0; + for (int s = 0; s < selected.getPositionCount(); s++) { + int group = selected.getInt(s); + if (group > maxGroupId || hasValue(group) == false) { + builder.appendNull(); + continue; + } + float firstValue = firstValues.get(group); + final int nextValuesEnd = nextValueCounts != null ? nextValueCounts[group] : nextValuesStart; + + List> valuesInGroup = values.get(group); + valuesInGroup.sort(Comparator.comparingLong(Tuple::v1)); + builder.beginPositionEntry(); + for (int id = 0; id < valuesInGroup.size(); id++) { + builder.appendFloat(valuesInGroup.get(id).v2()); + } + builder.endPositionEntry(); + } + return builder.build(); + } + } + + @Override + public void close() { + Releasables.closeExpectNoException(seen, firstValues, nextValues); + } + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/SparklineIntAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/SparklineIntAggregator.java new file mode 100644 index 0000000000000..f094702e85abe --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/SparklineIntAggregator.java @@ -0,0 +1,392 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +// begin generated imports +import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.BitArray; +import org.elasticsearch.common.util.LongHash; +import org.elasticsearch.common.util.LongLongHash; +import org.elasticsearch.common.util.IntArray; +import org.elasticsearch.compute.aggregation.blockhash.BlockHash; +import org.elasticsearch.compute.ann.Aggregator; +import org.elasticsearch.compute.ann.GroupingAggregator; +import org.elasticsearch.compute.ann.IntermediateState; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.core.Tuple; + +import java.util.ArrayList; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +// end generated imports + +/** + * Aggregates field values for int. + * This class is generated. Edit @{code X-SparklineAggregator.java.st} instead + * of this file. + */ +@Aggregator({ @IntermediateState(name = "values", type = "INT_BLOCK"), @IntermediateState(name = "timestamps", type = "LONG_BLOCK") }) +@GroupingAggregator +class SparklineIntAggregator { + public static SingleState initSingle(BigArrays bigArrays) { + return new SingleState(bigArrays); + } + + public static void combine(SingleState state, int value, long timestamp) { + state.values.add(Tuple.tuple(timestamp, value)); + } + + public static void combineIntermediate(SingleState state, IntBlock values, LongBlock timestamps) { + int start = values.getFirstValueIndex(0); + int end = start + values.getValueCount(0); + for (int i = start; i < end; i++) { + combine(state, values.getInt(i), timestamps.getLong(i)); + } + } + + public static Block evaluateFinal(SingleState state, DriverContext driverContext) { + return state.toFinal(driverContext.blockFactory()); + } + + public static GroupingState initGrouping(DriverContext driverContext) { + return new GroupingState(driverContext); + } + + public static void combine(GroupingState state, int groupId, int value, long timestamp) { + state.addValue(groupId, value, timestamp); + } + + public static void combineIntermediate(GroupingState state, int groupId, IntBlock values, LongBlock timestamps, int valuesPosition) { + int start = values.getFirstValueIndex(valuesPosition); + int end = start + values.getValueCount(valuesPosition); + for (int i = start; i < end; i++) { + combine(state, groupId, values.getInt(i), timestamps.getLong(i)); + } + } + + public static Block evaluateFinal(GroupingState state, IntVector selected, GroupingAggregatorEvaluationContext ctx) { + return state.toFinal(ctx.blockFactory(), selected); + } + + public static class SingleState implements AggregatorState { + private final List> values; + + private SingleState(BigArrays bigArrays) { + values = new ArrayList<>(); + } + + // TODO: Why does it have to output all the blocks? Where are these blocks used? + @Override + public void toIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + BlockFactory blockFactory = driverContext.blockFactory(); + try ( + IntBlock.Builder builder = blockFactory.newIntBlockBuilder(values.size()); + LongBlock.Builder timestampsBuilder = blockFactory.newLongBlockBuilder(values.size()); + ) { + builder.beginPositionEntry(); + timestampsBuilder.beginPositionEntry(); + for (int id = 0; id < values.size(); id++) { + builder.appendInt(values.get(id).v2()); + timestampsBuilder.appendLong(values.get(id).v1()); + } + builder.endPositionEntry(); + timestampsBuilder.endPositionEntry(); + blocks[offset] = builder.build(); + blocks[offset + 1] = timestampsBuilder.build(); + } + } + + Block toFinal(BlockFactory blockFactory) { + values.sort(Comparator.comparingLong(Tuple::v1)); + try (IntBlock.Builder builder = blockFactory.newIntBlockBuilder(values.size())) { + builder.beginPositionEntry(); + for (int id = 0; id < values.size(); id++) { + builder.appendInt(values.get(id).v2()); + } + builder.endPositionEntry(); + return builder.build(); + } + } + + @Override + public void close() {} + } + + /** + * Values after the first in each group are collected in a hash, keyed by the pair of groupId and value. + * When emitting the output, we need to iterate the hash one group at a time to build the output block, + * which would require O(N^2). To avoid this, we compute the counts for each group and remap the hash id + * to an array, allowing us to build the output in O(N) instead. + */ + private static class NextValues implements Releasable { + private final BlockFactory blockFactory; + private final LongHash hashes; + private int[] selectedCounts = null; + private int[] ids = null; + private long extraMemoryUsed = 0; + + private NextValues(BlockFactory blockFactory) { + this.blockFactory = blockFactory; + this.hashes = new LongHash(1, blockFactory.bigArrays()); + } + + void addValue(int groupId, int v) { + /* + * Encode the groupId and value into a single long - + * the top 32 bits for the group, the bottom 32 for the value. + */ + hashes.add((((long) groupId) << Integer.SIZE) | (v & 0xFFFFFFFFL)); + } + + int getValue(int index) { + long both = hashes.get(ids[index]); + return (int) (both & 0xFFFFFFFFL); + } + + private void reserveBytesForIntArray(long numElements) { + long adjust = RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + numElements * Integer.BYTES); + blockFactory.adjustBreaker(adjust); + extraMemoryUsed += adjust; + } + + private void prepareForEmitting(IntVector selected) { + if (hashes.size() == 0) { + return; + } + /* + * Get a count of all groups less than the maximum selected group. Count + * *downwards* so that we can flip the sign on all of the actually selected + * groups. Negative values in this array are always unselected groups. + */ + int selectedCountsLen = selected.max() + 1; + reserveBytesForIntArray(selectedCountsLen); + this.selectedCounts = new int[selectedCountsLen]; + for (int id = 0; id < hashes.size(); id++) { + long both = hashes.get(id); + int group = (int) (both >>> Float.SIZE); + if (group < selectedCounts.length) { + selectedCounts[group]--; + } + } + + /* + * Total the selected groups and turn the counts into the start index into a sort-of + * off-by-one running count. It's really the number of values that have been inserted + * into the results before starting on this group. Unselected groups will still + * have negative counts. + * + * For example, if + * | Group | Value Count | Selected | + * |-------|-------------|----------| + * | 0 | 3 | <- | + * | 1 | 1 | <- | + * | 2 | 2 | | + * | 3 | 1 | <- | + * | 4 | 4 | <- | + * + * Then the total is 9 and the counts array will contain 0, 3, -2, 4, 5 + */ + int total = 0; + for (int s = 0; s < selected.getPositionCount(); s++) { + int group = selected.getInt(s); + int count = -selectedCounts[group]; + selectedCounts[group] = total; + total += count; + } + + /* + * Build a list of ids to insert in order *and* convert the running + * count in selectedCounts[group] into the end index (exclusive) in + * ids for each group. + * Here we use the negative counts to signal that a group hasn't been + * selected and the id containing values for that group is ignored. + * + * For example, if + * | Group | Value Count | Selected | + * |-------|-------------|----------| + * | 0 | 3 | <- | + * | 1 | 1 | <- | + * | 2 | 2 | | + * | 3 | 1 | <- | + * | 4 | 4 | <- | + * + * Then the total is 9 and the counts array will start with 0, 3, -2, 4, 5. + * The counts will end with 3, 4, -2, 5, 9. + */ + reserveBytesForIntArray(total); + + this.ids = new int[total]; + for (int id = 0; id < hashes.size(); id++) { + long both = hashes.get(id); + int group = (int) (both >>> Float.SIZE); + ids[selectedCounts[group]++] = id; + } + } + + @Override + public void close() { + Releasables.closeExpectNoException(hashes, () -> blockFactory.adjustBreaker(-extraMemoryUsed)); + } + } + + /** + * State for a grouped {@code SPARKLINE} aggregation. This implementation + * emphasizes collect-time performance over result rendering performance. + * The first value in each group is collected in the {@code firstValues} + * array, and subsequent values for each group are collected in {@code nextValues}. + */ + public static class GroupingState implements GroupingAggregatorState { + private final BlockFactory blockFactory; + IntArray firstValues; + private BitArray seen; + private int maxGroupId = -1; + private final NextValues nextValues; + private final Map>> values; + + private GroupingState(DriverContext driverContext) { + this.blockFactory = driverContext.blockFactory(); + boolean success = false; + try { + this.firstValues = driverContext.bigArrays().newIntArray(1, false); + this.nextValues = new NextValues(driverContext.blockFactory()); + success = true; + } finally { + if (success == false) { + this.close(); + } + } + values = new HashMap<>(); + } + + @Override + public void toIntermediate(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { + nextValues.prepareForEmitting(selected); + /* + * Insert the ids in order. + */ + final int[] nextValueCounts = nextValues.selectedCounts; + try ( + IntBlock.Builder builder = blockFactory.newIntBlockBuilder(selected.getPositionCount()); + LongBlock.Builder timestampsBuilder = blockFactory.newLongBlockBuilder(selected.getPositionCount()) + ) { + int nextValuesStart = 0; + for (int s = 0; s < selected.getPositionCount(); s++) { + int group = selected.getInt(s); + if (group > maxGroupId || hasValue(group) == false) { + builder.appendNull(); + timestampsBuilder.appendNull(); + continue; + } + + builder.beginPositionEntry(); + timestampsBuilder.beginPositionEntry(); + List> valuesInGroup = values.get(group); + for (int id = 0; id < valuesInGroup.size(); id++) { + builder.appendInt(valuesInGroup.get(id).v2()); + timestampsBuilder.appendLong(valuesInGroup.get(id).v1()); + } + builder.endPositionEntry(); + timestampsBuilder.endPositionEntry(); + } + blocks[offset] = builder.build(); + blocks[offset + 1] = timestampsBuilder.build(); + } + } + + void addValue(int groupId, int v, long t) { + if (groupId > maxGroupId) { + firstValues = blockFactory.bigArrays().grow(firstValues, groupId + 1); + firstValues.set(groupId, v); + // We start in untracked mode, assuming every group has a value as an optimization to avoid allocating + // and updating the seen bitset. However, once some groups don't have values, we initialize the seen bitset, + // fill the groups that have values, and begin tracking incoming groups. + if (seen == null && groupId > maxGroupId + 1) { + seen = new BitArray(groupId + 1, blockFactory.bigArrays()); + seen.fill(0, maxGroupId + 1, true); + } + trackGroupId(groupId); + maxGroupId = groupId; + } else if (hasValue(groupId) == false) { + firstValues.set(groupId, v); + trackGroupId(groupId); + } else if (firstValues.get(groupId) != v) { + nextValues.addValue(groupId, v); + } + List> valuesInGroup = values.computeIfAbsent(groupId, g -> new ArrayList<>()); + valuesInGroup.add(Tuple.tuple(t, v)); + } + + @Override + public void enableGroupIdTracking(SeenGroupIds seen) { + // we track the seen values manually + } + + private void trackGroupId(int groupId) { + if (seen != null) { + seen.set(groupId); + } + } + + /** + * Returns true if the group has a value in firstValues; having a value in nextValues is optional. + * Returns false if the group does not have values in either firstValues or nextValues. + */ + private boolean hasValue(int groupId) { + return seen == null || seen.get(groupId); + } + + /** + * Builds a {@link Block} with the unique values collected for the {@code #selected} + * groups. This is the implementation of the final and intermediate results of the agg. + */ + Block toFinal(BlockFactory blockFactory, IntVector selected) { + nextValues.prepareForEmitting(selected); + /* + * Insert the ids in order. + */ + final int[] nextValueCounts = nextValues.selectedCounts; + try (IntBlock.Builder builder = blockFactory.newIntBlockBuilder(selected.getPositionCount())) { + int nextValuesStart = 0; + for (int s = 0; s < selected.getPositionCount(); s++) { + int group = selected.getInt(s); + if (group > maxGroupId || hasValue(group) == false) { + builder.appendNull(); + continue; + } + int firstValue = firstValues.get(group); + final int nextValuesEnd = nextValueCounts != null ? nextValueCounts[group] : nextValuesStart; + + List> valuesInGroup = values.get(group); + valuesInGroup.sort(Comparator.comparingLong(Tuple::v1)); + builder.beginPositionEntry(); + for (int id = 0; id < valuesInGroup.size(); id++) { + builder.appendInt(valuesInGroup.get(id).v2()); + } + builder.endPositionEntry(); + } + return builder.build(); + } + } + + @Override + public void close() { + Releasables.closeExpectNoException(seen, firstValues, nextValues); + } + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/SparklineLongAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/SparklineLongAggregator.java new file mode 100644 index 0000000000000..5eb46770d4f0c --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/SparklineLongAggregator.java @@ -0,0 +1,385 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +// begin generated imports +import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.BitArray; +import org.elasticsearch.common.util.LongHash; +import org.elasticsearch.common.util.LongLongHash; +import org.elasticsearch.common.util.LongArray; +import org.elasticsearch.compute.aggregation.blockhash.BlockHash; +import org.elasticsearch.compute.ann.Aggregator; +import org.elasticsearch.compute.ann.GroupingAggregator; +import org.elasticsearch.compute.ann.IntermediateState; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.core.Tuple; + +import java.util.ArrayList; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +// end generated imports + +/** + * Aggregates field values for long. + * This class is generated. Edit @{code X-SparklineAggregator.java.st} instead + * of this file. + */ +@Aggregator({ @IntermediateState(name = "values", type = "LONG_BLOCK"), @IntermediateState(name = "timestamps", type = "LONG_BLOCK") }) +@GroupingAggregator +class SparklineLongAggregator { + public static SingleState initSingle(BigArrays bigArrays) { + return new SingleState(bigArrays); + } + + public static void combine(SingleState state, long value, long timestamp) { + state.values.add(Tuple.tuple(timestamp, value)); + } + + public static void combineIntermediate(SingleState state, LongBlock values, LongBlock timestamps) { + int start = values.getFirstValueIndex(0); + int end = start + values.getValueCount(0); + for (int i = start; i < end; i++) { + combine(state, values.getLong(i), timestamps.getLong(i)); + } + } + + public static Block evaluateFinal(SingleState state, DriverContext driverContext) { + return state.toFinal(driverContext.blockFactory()); + } + + public static GroupingState initGrouping(DriverContext driverContext) { + return new GroupingState(driverContext); + } + + public static void combine(GroupingState state, int groupId, long value, long timestamp) { + state.addValue(groupId, value, timestamp); + } + + public static void combineIntermediate(GroupingState state, int groupId, LongBlock values, LongBlock timestamps, int valuesPosition) { + int start = values.getFirstValueIndex(valuesPosition); + int end = start + values.getValueCount(valuesPosition); + for (int i = start; i < end; i++) { + combine(state, groupId, values.getLong(i), timestamps.getLong(i)); + } + } + + public static Block evaluateFinal(GroupingState state, IntVector selected, GroupingAggregatorEvaluationContext ctx) { + return state.toFinal(ctx.blockFactory(), selected); + } + + public static class SingleState implements AggregatorState { + private final List> values; + + private SingleState(BigArrays bigArrays) { + values = new ArrayList<>(); + } + + // TODO: Why does it have to output all the blocks? Where are these blocks used? + @Override + public void toIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + BlockFactory blockFactory = driverContext.blockFactory(); + try ( + LongBlock.Builder builder = blockFactory.newLongBlockBuilder(values.size()); + LongBlock.Builder timestampsBuilder = blockFactory.newLongBlockBuilder(values.size()); + ) { + builder.beginPositionEntry(); + timestampsBuilder.beginPositionEntry(); + for (int id = 0; id < values.size(); id++) { + builder.appendLong(values.get(id).v2()); + timestampsBuilder.appendLong(values.get(id).v1()); + } + builder.endPositionEntry(); + timestampsBuilder.endPositionEntry(); + blocks[offset] = builder.build(); + blocks[offset + 1] = timestampsBuilder.build(); + } + } + + Block toFinal(BlockFactory blockFactory) { + values.sort(Comparator.comparingLong(Tuple::v1)); + try (LongBlock.Builder builder = blockFactory.newLongBlockBuilder(values.size())) { + builder.beginPositionEntry(); + for (int id = 0; id < values.size(); id++) { + builder.appendLong(values.get(id).v2()); + } + builder.endPositionEntry(); + return builder.build(); + } + } + + @Override + public void close() {} + } + + /** + * Values after the first in each group are collected in a hash, keyed by the pair of groupId and value. + * When emitting the output, we need to iterate the hash one group at a time to build the output block, + * which would require O(N^2). To avoid this, we compute the counts for each group and remap the hash id + * to an array, allowing us to build the output in O(N) instead. + */ + private static class NextValues implements Releasable { + private final BlockFactory blockFactory; + private final LongLongHash hashes; + private int[] selectedCounts = null; + private int[] ids = null; + private long extraMemoryUsed = 0; + + private NextValues(BlockFactory blockFactory) { + this.blockFactory = blockFactory; + this.hashes = new LongLongHash(1, blockFactory.bigArrays()); + } + + void addValue(int groupId, long v) { + hashes.add(groupId, v); + } + + long getValue(int index) { + return hashes.getKey2(ids[index]); + } + + private void reserveBytesForIntArray(long numElements) { + long adjust = RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + numElements * Integer.BYTES); + blockFactory.adjustBreaker(adjust); + extraMemoryUsed += adjust; + } + + private void prepareForEmitting(IntVector selected) { + if (hashes.size() == 0) { + return; + } + /* + * Get a count of all groups less than the maximum selected group. Count + * *downwards* so that we can flip the sign on all of the actually selected + * groups. Negative values in this array are always unselected groups. + */ + int selectedCountsLen = selected.max() + 1; + reserveBytesForIntArray(selectedCountsLen); + this.selectedCounts = new int[selectedCountsLen]; + for (int id = 0; id < hashes.size(); id++) { + int group = (int) hashes.getKey1(id); + if (group < selectedCounts.length) { + selectedCounts[group]--; + } + } + + /* + * Total the selected groups and turn the counts into the start index into a sort-of + * off-by-one running count. It's really the number of values that have been inserted + * into the results before starting on this group. Unselected groups will still + * have negative counts. + * + * For example, if + * | Group | Value Count | Selected | + * |-------|-------------|----------| + * | 0 | 3 | <- | + * | 1 | 1 | <- | + * | 2 | 2 | | + * | 3 | 1 | <- | + * | 4 | 4 | <- | + * + * Then the total is 9 and the counts array will contain 0, 3, -2, 4, 5 + */ + int total = 0; + for (int s = 0; s < selected.getPositionCount(); s++) { + int group = selected.getInt(s); + int count = -selectedCounts[group]; + selectedCounts[group] = total; + total += count; + } + + /* + * Build a list of ids to insert in order *and* convert the running + * count in selectedCounts[group] into the end index (exclusive) in + * ids for each group. + * Here we use the negative counts to signal that a group hasn't been + * selected and the id containing values for that group is ignored. + * + * For example, if + * | Group | Value Count | Selected | + * |-------|-------------|----------| + * | 0 | 3 | <- | + * | 1 | 1 | <- | + * | 2 | 2 | | + * | 3 | 1 | <- | + * | 4 | 4 | <- | + * + * Then the total is 9 and the counts array will start with 0, 3, -2, 4, 5. + * The counts will end with 3, 4, -2, 5, 9. + */ + reserveBytesForIntArray(total); + + this.ids = new int[total]; + for (int id = 0; id < hashes.size(); id++) { + int group = (int) hashes.getKey1(id); + ids[selectedCounts[group]++] = id; + } + } + + @Override + public void close() { + Releasables.closeExpectNoException(hashes, () -> blockFactory.adjustBreaker(-extraMemoryUsed)); + } + } + + /** + * State for a grouped {@code SPARKLINE} aggregation. This implementation + * emphasizes collect-time performance over result rendering performance. + * The first value in each group is collected in the {@code firstValues} + * array, and subsequent values for each group are collected in {@code nextValues}. + */ + public static class GroupingState implements GroupingAggregatorState { + private final BlockFactory blockFactory; + LongArray firstValues; + private BitArray seen; + private int maxGroupId = -1; + private final NextValues nextValues; + private final Map>> values; + + private GroupingState(DriverContext driverContext) { + this.blockFactory = driverContext.blockFactory(); + boolean success = false; + try { + this.firstValues = driverContext.bigArrays().newLongArray(1, false); + this.nextValues = new NextValues(driverContext.blockFactory()); + success = true; + } finally { + if (success == false) { + this.close(); + } + } + values = new HashMap<>(); + } + + @Override + public void toIntermediate(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { + nextValues.prepareForEmitting(selected); + /* + * Insert the ids in order. + */ + final int[] nextValueCounts = nextValues.selectedCounts; + try ( + LongBlock.Builder builder = blockFactory.newLongBlockBuilder(selected.getPositionCount()); + LongBlock.Builder timestampsBuilder = blockFactory.newLongBlockBuilder(selected.getPositionCount()) + ) { + int nextValuesStart = 0; + for (int s = 0; s < selected.getPositionCount(); s++) { + int group = selected.getInt(s); + if (group > maxGroupId || hasValue(group) == false) { + builder.appendNull(); + timestampsBuilder.appendNull(); + continue; + } + + builder.beginPositionEntry(); + timestampsBuilder.beginPositionEntry(); + List> valuesInGroup = values.get(group); + for (int id = 0; id < valuesInGroup.size(); id++) { + builder.appendLong(valuesInGroup.get(id).v2()); + timestampsBuilder.appendLong(valuesInGroup.get(id).v1()); + } + builder.endPositionEntry(); + timestampsBuilder.endPositionEntry(); + } + blocks[offset] = builder.build(); + blocks[offset + 1] = timestampsBuilder.build(); + } + } + + void addValue(int groupId, long v, long t) { + if (groupId > maxGroupId) { + firstValues = blockFactory.bigArrays().grow(firstValues, groupId + 1); + firstValues.set(groupId, v); + // We start in untracked mode, assuming every group has a value as an optimization to avoid allocating + // and updating the seen bitset. However, once some groups don't have values, we initialize the seen bitset, + // fill the groups that have values, and begin tracking incoming groups. + if (seen == null && groupId > maxGroupId + 1) { + seen = new BitArray(groupId + 1, blockFactory.bigArrays()); + seen.fill(0, maxGroupId + 1, true); + } + trackGroupId(groupId); + maxGroupId = groupId; + } else if (hasValue(groupId) == false) { + firstValues.set(groupId, v); + trackGroupId(groupId); + } else if (firstValues.get(groupId) != v) { + nextValues.addValue(groupId, v); + } + List> valuesInGroup = values.computeIfAbsent(groupId, g -> new ArrayList<>()); + valuesInGroup.add(Tuple.tuple(t, v)); + } + + @Override + public void enableGroupIdTracking(SeenGroupIds seen) { + // we track the seen values manually + } + + private void trackGroupId(int groupId) { + if (seen != null) { + seen.set(groupId); + } + } + + /** + * Returns true if the group has a value in firstValues; having a value in nextValues is optional. + * Returns false if the group does not have values in either firstValues or nextValues. + */ + private boolean hasValue(int groupId) { + return seen == null || seen.get(groupId); + } + + /** + * Builds a {@link Block} with the unique values collected for the {@code #selected} + * groups. This is the implementation of the final and intermediate results of the agg. + */ + Block toFinal(BlockFactory blockFactory, IntVector selected) { + nextValues.prepareForEmitting(selected); + /* + * Insert the ids in order. + */ + final int[] nextValueCounts = nextValues.selectedCounts; + try (LongBlock.Builder builder = blockFactory.newLongBlockBuilder(selected.getPositionCount())) { + int nextValuesStart = 0; + for (int s = 0; s < selected.getPositionCount(); s++) { + int group = selected.getInt(s); + if (group > maxGroupId || hasValue(group) == false) { + builder.appendNull(); + continue; + } + long firstValue = firstValues.get(group); + final int nextValuesEnd = nextValueCounts != null ? nextValueCounts[group] : nextValuesStart; + + List> valuesInGroup = values.get(group); + valuesInGroup.sort(Comparator.comparingLong(Tuple::v1)); + builder.beginPositionEntry(); + for (int id = 0; id < valuesInGroup.size(); id++) { + builder.appendLong(valuesInGroup.get(id).v2()); + } + builder.endPositionEntry(); + } + return builder.build(); + } + } + + @Override + public void close() { + Releasables.closeExpectNoException(seen, firstValues, nextValues); + } + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineBooleanAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineBooleanAggregatorFunction.java new file mode 100644 index 0000000000000..289bfb19f2ea8 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineBooleanAggregatorFunction.java @@ -0,0 +1,169 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.BooleanVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunction} implementation for {@link SparklineBooleanAggregator}. + * This class is generated. Edit {@code AggregatorImplementer} instead. + */ +public final class SparklineBooleanAggregatorFunction implements AggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("values", ElementType.BOOLEAN) ); + + private final DriverContext driverContext; + + private final SparklineBooleanAggregator.SingleState state; + + private final List channels; + + public SparklineBooleanAggregatorFunction(DriverContext driverContext, List channels, + SparklineBooleanAggregator.SingleState state) { + this.driverContext = driverContext; + this.channels = channels; + this.state = state; + } + + public static SparklineBooleanAggregatorFunction create(DriverContext driverContext, + List channels) { + return new SparklineBooleanAggregatorFunction(driverContext, channels, SparklineBooleanAggregator.initSingle()); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public void addRawInput(Page page, BooleanVector mask) { + if (mask.allFalse()) { + // Entire page masked away + } else if (mask.allTrue()) { + addRawInputNotMasked(page); + } else { + addRawInputMasked(page, mask); + } + } + + private void addRawInputMasked(Page page, BooleanVector mask) { + BooleanBlock vBlock = page.getBlock(channels.get(0)); + BooleanVector vVector = vBlock.asVector(); + if (vVector == null) { + addRawBlock(vBlock, mask); + return; + } + addRawVector(vVector, mask); + } + + private void addRawInputNotMasked(Page page) { + BooleanBlock vBlock = page.getBlock(channels.get(0)); + BooleanVector vVector = vBlock.asVector(); + if (vVector == null) { + addRawBlock(vBlock); + return; + } + addRawVector(vVector); + } + + private void addRawVector(BooleanVector vVector) { + for (int valuesPosition = 0; valuesPosition < vVector.getPositionCount(); valuesPosition++) { + boolean vValue = vVector.getBoolean(valuesPosition); + SparklineBooleanAggregator.combine(state, vValue); + } + } + + private void addRawVector(BooleanVector vVector, BooleanVector mask) { + for (int valuesPosition = 0; valuesPosition < vVector.getPositionCount(); valuesPosition++) { + if (mask.getBoolean(valuesPosition) == false) { + continue; + } + boolean vValue = vVector.getBoolean(valuesPosition); + SparklineBooleanAggregator.combine(state, vValue); + } + } + + private void addRawBlock(BooleanBlock vBlock) { + for (int p = 0; p < vBlock.getPositionCount(); p++) { + if (vBlock.isNull(p)) { + continue; + } + int vStart = vBlock.getFirstValueIndex(p); + int vEnd = vStart + vBlock.getValueCount(p); + for (int vOffset = vStart; vOffset < vEnd; vOffset++) { + boolean vValue = vBlock.getBoolean(vOffset); + SparklineBooleanAggregator.combine(state, vValue); + } + } + } + + private void addRawBlock(BooleanBlock vBlock, BooleanVector mask) { + for (int p = 0; p < vBlock.getPositionCount(); p++) { + if (mask.getBoolean(p) == false) { + continue; + } + if (vBlock.isNull(p)) { + continue; + } + int vStart = vBlock.getFirstValueIndex(p); + int vEnd = vStart + vBlock.getValueCount(p); + for (int vOffset = vStart; vOffset < vEnd; vOffset++) { + boolean vValue = vBlock.getBoolean(vOffset); + SparklineBooleanAggregator.combine(state, vValue); + } + } + } + + @Override + public void addIntermediateInput(Page page) { + assert channels.size() == intermediateBlockCount(); + assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); + Block valuesUncast = page.getBlock(channels.get(0)); + if (valuesUncast.areAllValuesNull()) { + return; + } + BooleanBlock values = (BooleanBlock) valuesUncast; + assert values.getPositionCount() == 1; + SparklineBooleanAggregator.combineIntermediate(state, values); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { + blocks[offset] = SparklineBooleanAggregator.evaluateFinal(state, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineBooleanAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineBooleanAggregatorFunctionSupplier.java new file mode 100644 index 0000000000000..2fadc90742f4d --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineBooleanAggregatorFunctionSupplier.java @@ -0,0 +1,47 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.util.List; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunctionSupplier} implementation for {@link SparklineBooleanAggregator}. + * This class is generated. Edit {@code AggregatorFunctionSupplierImplementer} instead. + */ +public final class SparklineBooleanAggregatorFunctionSupplier implements AggregatorFunctionSupplier { + public SparklineBooleanAggregatorFunctionSupplier() { + } + + @Override + public List nonGroupingIntermediateStateDesc() { + return SparklineBooleanAggregatorFunction.intermediateStateDesc(); + } + + @Override + public List groupingIntermediateStateDesc() { + return SparklineBooleanGroupingAggregatorFunction.intermediateStateDesc(); + } + + @Override + public SparklineBooleanAggregatorFunction aggregator(DriverContext driverContext, + List channels) { + return SparklineBooleanAggregatorFunction.create(driverContext, channels); + } + + @Override + public SparklineBooleanGroupingAggregatorFunction groupingAggregator(DriverContext driverContext, + List channels) { + return SparklineBooleanGroupingAggregatorFunction.create(channels, driverContext); + } + + @Override + public String describe() { + return "sparkline of booleans"; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineBooleanGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineBooleanGroupingAggregatorFunction.java new file mode 100644 index 0000000000000..84ccff5a416fe --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineBooleanGroupingAggregatorFunction.java @@ -0,0 +1,303 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.BooleanVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntArrayBlock; +import org.elasticsearch.compute.data.IntBigArrayBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link GroupingAggregatorFunction} implementation for {@link SparklineBooleanAggregator}. + * This class is generated. Edit {@code GroupingAggregatorImplementer} instead. + */ +public final class SparklineBooleanGroupingAggregatorFunction implements GroupingAggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("values", ElementType.BOOLEAN) ); + + private final SparklineBooleanAggregator.GroupingState state; + + private final List channels; + + private final DriverContext driverContext; + + public SparklineBooleanGroupingAggregatorFunction(List channels, + SparklineBooleanAggregator.GroupingState state, DriverContext driverContext) { + this.channels = channels; + this.state = state; + this.driverContext = driverContext; + } + + public static SparklineBooleanGroupingAggregatorFunction create(List channels, + DriverContext driverContext) { + return new SparklineBooleanGroupingAggregatorFunction(channels, SparklineBooleanAggregator.initGrouping(driverContext.bigArrays()), driverContext); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public GroupingAggregatorFunction.AddInput prepareProcessRawInputPage(SeenGroupIds seenGroupIds, + Page page) { + BooleanBlock vBlock = page.getBlock(channels.get(0)); + BooleanVector vVector = vBlock.asVector(); + if (vVector == null) { + if (vBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, vBlock); + } + + @Override + public void add(int positionOffset, IntBigArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, vBlock); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, vBlock); + } + + @Override + public void close() { + } + }; + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, vVector); + } + + @Override + public void add(int positionOffset, IntBigArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, vVector); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, vVector); + } + + @Override + public void close() { + } + }; + } + + private void addRawInput(int positionOffset, IntArrayBlock groups, BooleanBlock vBlock) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int valuesPosition = groupPosition + positionOffset; + if (vBlock.isNull(valuesPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int vStart = vBlock.getFirstValueIndex(valuesPosition); + int vEnd = vStart + vBlock.getValueCount(valuesPosition); + for (int vOffset = vStart; vOffset < vEnd; vOffset++) { + boolean vValue = vBlock.getBoolean(vOffset); + SparklineBooleanAggregator.combine(state, groupId, vValue); + } + } + } + } + + private void addRawInput(int positionOffset, IntArrayBlock groups, BooleanVector vVector) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int valuesPosition = groupPosition + positionOffset; + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + boolean vValue = vVector.getBoolean(valuesPosition); + SparklineBooleanAggregator.combine(state, groupId, vValue); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntArrayBlock groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block valuesUncast = page.getBlock(channels.get(0)); + if (valuesUncast.areAllValuesNull()) { + return; + } + BooleanBlock values = (BooleanBlock) valuesUncast; + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valuesPosition = groupPosition + positionOffset; + SparklineBooleanAggregator.combineIntermediate(state, groupId, values, valuesPosition); + } + } + } + + private void addRawInput(int positionOffset, IntBigArrayBlock groups, BooleanBlock vBlock) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int valuesPosition = groupPosition + positionOffset; + if (vBlock.isNull(valuesPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int vStart = vBlock.getFirstValueIndex(valuesPosition); + int vEnd = vStart + vBlock.getValueCount(valuesPosition); + for (int vOffset = vStart; vOffset < vEnd; vOffset++) { + boolean vValue = vBlock.getBoolean(vOffset); + SparklineBooleanAggregator.combine(state, groupId, vValue); + } + } + } + } + + private void addRawInput(int positionOffset, IntBigArrayBlock groups, BooleanVector vVector) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int valuesPosition = groupPosition + positionOffset; + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + boolean vValue = vVector.getBoolean(valuesPosition); + SparklineBooleanAggregator.combine(state, groupId, vValue); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntBigArrayBlock groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block valuesUncast = page.getBlock(channels.get(0)); + if (valuesUncast.areAllValuesNull()) { + return; + } + BooleanBlock values = (BooleanBlock) valuesUncast; + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valuesPosition = groupPosition + positionOffset; + SparklineBooleanAggregator.combineIntermediate(state, groupId, values, valuesPosition); + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, BooleanBlock vBlock) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int valuesPosition = groupPosition + positionOffset; + if (vBlock.isNull(valuesPosition)) { + continue; + } + int groupId = groups.getInt(groupPosition); + int vStart = vBlock.getFirstValueIndex(valuesPosition); + int vEnd = vStart + vBlock.getValueCount(valuesPosition); + for (int vOffset = vStart; vOffset < vEnd; vOffset++) { + boolean vValue = vBlock.getBoolean(vOffset); + SparklineBooleanAggregator.combine(state, groupId, vValue); + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, BooleanVector vVector) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int valuesPosition = groupPosition + positionOffset; + int groupId = groups.getInt(groupPosition); + boolean vValue = vVector.getBoolean(valuesPosition); + SparklineBooleanAggregator.combine(state, groupId, vValue); + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block valuesUncast = page.getBlock(channels.get(0)); + if (valuesUncast.areAllValuesNull()) { + return; + } + BooleanBlock values = (BooleanBlock) valuesUncast; + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = groups.getInt(groupPosition); + int valuesPosition = groupPosition + positionOffset; + SparklineBooleanAggregator.combineIntermediate(state, groupId, values, valuesPosition); + } + } + + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { + state.toIntermediate(blocks, offset, selected, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, IntVector selected, + GroupingAggregatorEvaluationContext ctx) { + blocks[offset] = SparklineBooleanAggregator.evaluateFinal(state, selected, ctx); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineBytesRefAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineBytesRefAggregatorFunction.java new file mode 100644 index 0000000000000..a1672b6d7e1d8 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineBytesRefAggregatorFunction.java @@ -0,0 +1,176 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanVector; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunction} implementation for {@link SparklineBytesRefAggregator}. + * This class is generated. Edit {@code AggregatorImplementer} instead. + */ +public final class SparklineBytesRefAggregatorFunction implements AggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("values", ElementType.BYTES_REF) ); + + private final DriverContext driverContext; + + private final SparklineBytesRefAggregator.SingleState state; + + private final List channels; + + public SparklineBytesRefAggregatorFunction(DriverContext driverContext, List channels, + SparklineBytesRefAggregator.SingleState state) { + this.driverContext = driverContext; + this.channels = channels; + this.state = state; + } + + public static SparklineBytesRefAggregatorFunction create(DriverContext driverContext, + List channels) { + return new SparklineBytesRefAggregatorFunction(driverContext, channels, SparklineBytesRefAggregator.initSingle(driverContext.bigArrays())); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public void addRawInput(Page page, BooleanVector mask) { + if (mask.allFalse()) { + // Entire page masked away + } else if (mask.allTrue()) { + addRawInputNotMasked(page); + } else { + addRawInputMasked(page, mask); + } + } + + private void addRawInputMasked(Page page, BooleanVector mask) { + BytesRefBlock vBlock = page.getBlock(channels.get(0)); + BytesRefVector vVector = vBlock.asVector(); + if (vVector == null) { + addRawBlock(vBlock, mask); + return; + } + addRawVector(vVector, mask); + } + + private void addRawInputNotMasked(Page page) { + BytesRefBlock vBlock = page.getBlock(channels.get(0)); + BytesRefVector vVector = vBlock.asVector(); + if (vVector == null) { + addRawBlock(vBlock); + return; + } + addRawVector(vVector); + } + + private void addRawVector(BytesRefVector vVector) { + BytesRef vScratch = new BytesRef(); + for (int i = 0; i < vVector.getPositionCount(); i++) { + BytesRef vValue = vVector.getBytesRef(i, vScratch); + SparklineBytesRefAggregator.combine(state, vValue); + } + } + + private void addRawVector(BytesRefVector vVector, BooleanVector mask) { + BytesRef vScratch = new BytesRef(); + for (int i = 0; i < vVector.getPositionCount(); i++) { + if (mask.getBoolean(i) == false) { + continue; + } + BytesRef vValue = vVector.getBytesRef(i, vScratch); + SparklineBytesRefAggregator.combine(state, vValue); + } + } + + private void addRawBlock(BytesRefBlock vBlock) { + BytesRef vScratch = new BytesRef(); + for (int p = 0; p < vBlock.getPositionCount(); p++) { + if (vBlock.isNull(p)) { + continue; + } + int vStart = vBlock.getFirstValueIndex(p); + int vEnd = vStart + vBlock.getValueCount(p); + for (int vOffset = vStart; vOffset < vEnd; vOffset++) { + BytesRef vValue = vBlock.getBytesRef(vOffset, vScratch); + SparklineBytesRefAggregator.combine(state, vValue); + } + } + } + + private void addRawBlock(BytesRefBlock vBlock, BooleanVector mask) { + BytesRef vScratch = new BytesRef(); + for (int p = 0; p < vBlock.getPositionCount(); p++) { + if (mask.getBoolean(p) == false) { + continue; + } + if (vBlock.isNull(p)) { + continue; + } + int vStart = vBlock.getFirstValueIndex(p); + int vEnd = vStart + vBlock.getValueCount(p); + for (int vOffset = vStart; vOffset < vEnd; vOffset++) { + BytesRef vValue = vBlock.getBytesRef(vOffset, vScratch); + SparklineBytesRefAggregator.combine(state, vValue); + } + } + } + + @Override + public void addIntermediateInput(Page page) { + assert channels.size() == intermediateBlockCount(); + assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); + Block valuesUncast = page.getBlock(channels.get(0)); + if (valuesUncast.areAllValuesNull()) { + return; + } + BytesRefBlock values = (BytesRefBlock) valuesUncast; + assert values.getPositionCount() == 1; + BytesRef scratch = new BytesRef(); + SparklineBytesRefAggregator.combineIntermediate(state, values); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { + blocks[offset] = SparklineBytesRefAggregator.evaluateFinal(state, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineBytesRefAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineBytesRefAggregatorFunctionSupplier.java new file mode 100644 index 0000000000000..b52981ad2dc04 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineBytesRefAggregatorFunctionSupplier.java @@ -0,0 +1,47 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.util.List; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunctionSupplier} implementation for {@link SparklineBytesRefAggregator}. + * This class is generated. Edit {@code AggregatorFunctionSupplierImplementer} instead. + */ +public final class SparklineBytesRefAggregatorFunctionSupplier implements AggregatorFunctionSupplier { + public SparklineBytesRefAggregatorFunctionSupplier() { + } + + @Override + public List nonGroupingIntermediateStateDesc() { + return SparklineBytesRefAggregatorFunction.intermediateStateDesc(); + } + + @Override + public List groupingIntermediateStateDesc() { + return SparklineBytesRefGroupingAggregatorFunction.intermediateStateDesc(); + } + + @Override + public SparklineBytesRefAggregatorFunction aggregator(DriverContext driverContext, + List channels) { + return SparklineBytesRefAggregatorFunction.create(driverContext, channels); + } + + @Override + public SparklineBytesRefGroupingAggregatorFunction groupingAggregator(DriverContext driverContext, + List channels) { + return SparklineBytesRefGroupingAggregatorFunction.create(channels, driverContext); + } + + @Override + public String describe() { + return "sparkline of bytes"; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineBytesRefGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineBytesRefGroupingAggregatorFunction.java new file mode 100644 index 0000000000000..a50e4e67a8261 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineBytesRefGroupingAggregatorFunction.java @@ -0,0 +1,268 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntArrayBlock; +import org.elasticsearch.compute.data.IntBigArrayBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link GroupingAggregatorFunction} implementation for {@link SparklineBytesRefAggregator}. + * This class is generated. Edit {@code GroupingAggregatorImplementer} instead. + */ +public final class SparklineBytesRefGroupingAggregatorFunction implements GroupingAggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("values", ElementType.BYTES_REF) ); + + private final SparklineBytesRefAggregator.GroupingState state; + + private final List channels; + + private final DriverContext driverContext; + + public SparklineBytesRefGroupingAggregatorFunction(List channels, + SparklineBytesRefAggregator.GroupingState state, DriverContext driverContext) { + this.channels = channels; + this.state = state; + this.driverContext = driverContext; + } + + public static SparklineBytesRefGroupingAggregatorFunction create(List channels, + DriverContext driverContext) { + return new SparklineBytesRefGroupingAggregatorFunction(channels, SparklineBytesRefAggregator.initGrouping(driverContext), driverContext); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public GroupingAggregatorFunction.AddInput prepareProcessRawInputPage(SeenGroupIds seenGroupIds, + Page page) { + BytesRefBlock valuesBlock = page.getBlock(channels.get(0)); + BytesRefVector valuesVector = valuesBlock.asVector(); + if (valuesVector == null) { + if (valuesBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + var addInput = new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + + @Override + public void add(int positionOffset, IntBigArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock); + } + + @Override + public void close() { + } + }; + return SparklineBytesRefAggregator.wrapAddInput(addInput, state, valuesBlock); + } + var addInput = new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + + @Override + public void add(int positionOffset, IntBigArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valuesVector); + } + + @Override + public void close() { + } + }; + return SparklineBytesRefAggregator.wrapAddInput(addInput, state, valuesVector); + } + + private void addRawInput(int positionOffset, IntArrayBlock groups, BytesRefBlock values) { + BytesRef scratch = new BytesRef(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition) || values.isNull(groupPosition + positionOffset)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + SparklineBytesRefAggregator.combine(state, groupId, values.getBytesRef(v, scratch)); + } + } + } + } + + private void addRawInput(int positionOffset, IntArrayBlock groups, BytesRefVector values) { + BytesRef scratch = new BytesRef(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + SparklineBytesRefAggregator.combine(state, groupId, values.getBytesRef(groupPosition + positionOffset, scratch)); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntArrayBlock groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block valuesUncast = page.getBlock(channels.get(0)); + if (valuesUncast.areAllValuesNull()) { + return; + } + BytesRefBlock values = (BytesRefBlock) valuesUncast; + SparklineBytesRefAggregator.combineIntermediate(state, positionOffset, groups, values); + } + + private void addRawInput(int positionOffset, IntBigArrayBlock groups, BytesRefBlock values) { + BytesRef scratch = new BytesRef(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition) || values.isNull(groupPosition + positionOffset)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + SparklineBytesRefAggregator.combine(state, groupId, values.getBytesRef(v, scratch)); + } + } + } + } + + private void addRawInput(int positionOffset, IntBigArrayBlock groups, BytesRefVector values) { + BytesRef scratch = new BytesRef(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + SparklineBytesRefAggregator.combine(state, groupId, values.getBytesRef(groupPosition + positionOffset, scratch)); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntBigArrayBlock groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block valuesUncast = page.getBlock(channels.get(0)); + if (valuesUncast.areAllValuesNull()) { + return; + } + BytesRefBlock values = (BytesRefBlock) valuesUncast; + SparklineBytesRefAggregator.combineIntermediate(state, positionOffset, groups, values); + } + + private void addRawInput(int positionOffset, IntVector groups, BytesRefBlock values) { + BytesRef scratch = new BytesRef(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (values.isNull(groupPosition + positionOffset)) { + continue; + } + int groupId = groups.getInt(groupPosition); + int valuesStart = values.getFirstValueIndex(groupPosition + positionOffset); + int valuesEnd = valuesStart + values.getValueCount(groupPosition + positionOffset); + for (int v = valuesStart; v < valuesEnd; v++) { + SparklineBytesRefAggregator.combine(state, groupId, values.getBytesRef(v, scratch)); + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, BytesRefVector values) { + BytesRef scratch = new BytesRef(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = groups.getInt(groupPosition); + SparklineBytesRefAggregator.combine(state, groupId, values.getBytesRef(groupPosition + positionOffset, scratch)); + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block valuesUncast = page.getBlock(channels.get(0)); + if (valuesUncast.areAllValuesNull()) { + return; + } + BytesRefBlock values = (BytesRefBlock) valuesUncast; + SparklineBytesRefAggregator.combineIntermediate(state, positionOffset, groups, values); + } + + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { + state.toIntermediate(blocks, offset, selected, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, IntVector selected, + GroupingAggregatorEvaluationContext ctx) { + blocks[offset] = SparklineBytesRefAggregator.evaluateFinal(state, selected, ctx); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineDoubleAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineDoubleAggregatorFunction.java new file mode 100644 index 0000000000000..655e47f7eaafd --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineDoubleAggregatorFunction.java @@ -0,0 +1,210 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanVector; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.DoubleVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunction} implementation for {@link SparklineDoubleAggregator}. + * This class is generated. Edit {@code AggregatorImplementer} instead. + */ +public final class SparklineDoubleAggregatorFunction implements AggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("values", ElementType.DOUBLE), + new IntermediateStateDesc("timestamps", ElementType.LONG) ); + + private final DriverContext driverContext; + + private final SparklineDoubleAggregator.SingleState state; + + private final List channels; + + public SparklineDoubleAggregatorFunction(DriverContext driverContext, List channels, + SparklineDoubleAggregator.SingleState state) { + this.driverContext = driverContext; + this.channels = channels; + this.state = state; + } + + public static SparklineDoubleAggregatorFunction create(DriverContext driverContext, + List channels) { + return new SparklineDoubleAggregatorFunction(driverContext, channels, SparklineDoubleAggregator.initSingle(driverContext.bigArrays())); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public void addRawInput(Page page, BooleanVector mask) { + if (mask.allFalse()) { + // Entire page masked away + } else if (mask.allTrue()) { + addRawInputNotMasked(page); + } else { + addRawInputMasked(page, mask); + } + } + + private void addRawInputMasked(Page page, BooleanVector mask) { + DoubleBlock valueBlock = page.getBlock(channels.get(0)); + LongBlock timestampBlock = page.getBlock(channels.get(1)); + DoubleVector valueVector = valueBlock.asVector(); + if (valueVector == null) { + addRawBlock(valueBlock, timestampBlock, mask); + return; + } + LongVector timestampVector = timestampBlock.asVector(); + if (timestampVector == null) { + addRawBlock(valueBlock, timestampBlock, mask); + return; + } + addRawVector(valueVector, timestampVector, mask); + } + + private void addRawInputNotMasked(Page page) { + DoubleBlock valueBlock = page.getBlock(channels.get(0)); + LongBlock timestampBlock = page.getBlock(channels.get(1)); + DoubleVector valueVector = valueBlock.asVector(); + if (valueVector == null) { + addRawBlock(valueBlock, timestampBlock); + return; + } + LongVector timestampVector = timestampBlock.asVector(); + if (timestampVector == null) { + addRawBlock(valueBlock, timestampBlock); + return; + } + addRawVector(valueVector, timestampVector); + } + + private void addRawVector(DoubleVector valueVector, LongVector timestampVector) { + for (int valuesPosition = 0; valuesPosition < valueVector.getPositionCount(); valuesPosition++) { + double valueValue = valueVector.getDouble(valuesPosition); + long timestampValue = timestampVector.getLong(valuesPosition); + SparklineDoubleAggregator.combine(state, valueValue, timestampValue); + } + } + + private void addRawVector(DoubleVector valueVector, LongVector timestampVector, + BooleanVector mask) { + for (int valuesPosition = 0; valuesPosition < valueVector.getPositionCount(); valuesPosition++) { + if (mask.getBoolean(valuesPosition) == false) { + continue; + } + double valueValue = valueVector.getDouble(valuesPosition); + long timestampValue = timestampVector.getLong(valuesPosition); + SparklineDoubleAggregator.combine(state, valueValue, timestampValue); + } + } + + private void addRawBlock(DoubleBlock valueBlock, LongBlock timestampBlock) { + for (int p = 0; p < valueBlock.getPositionCount(); p++) { + if (valueBlock.isNull(p)) { + continue; + } + if (timestampBlock.isNull(p)) { + continue; + } + int valueStart = valueBlock.getFirstValueIndex(p); + int valueEnd = valueStart + valueBlock.getValueCount(p); + for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { + double valueValue = valueBlock.getDouble(valueOffset); + int timestampStart = timestampBlock.getFirstValueIndex(p); + int timestampEnd = timestampStart + timestampBlock.getValueCount(p); + for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { + long timestampValue = timestampBlock.getLong(timestampOffset); + SparklineDoubleAggregator.combine(state, valueValue, timestampValue); + } + } + } + } + + private void addRawBlock(DoubleBlock valueBlock, LongBlock timestampBlock, BooleanVector mask) { + for (int p = 0; p < valueBlock.getPositionCount(); p++) { + if (mask.getBoolean(p) == false) { + continue; + } + if (valueBlock.isNull(p)) { + continue; + } + if (timestampBlock.isNull(p)) { + continue; + } + int valueStart = valueBlock.getFirstValueIndex(p); + int valueEnd = valueStart + valueBlock.getValueCount(p); + for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { + double valueValue = valueBlock.getDouble(valueOffset); + int timestampStart = timestampBlock.getFirstValueIndex(p); + int timestampEnd = timestampStart + timestampBlock.getValueCount(p); + for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { + long timestampValue = timestampBlock.getLong(timestampOffset); + SparklineDoubleAggregator.combine(state, valueValue, timestampValue); + } + } + } + } + + @Override + public void addIntermediateInput(Page page) { + assert channels.size() == intermediateBlockCount(); + assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); + Block valuesUncast = page.getBlock(channels.get(0)); + if (valuesUncast.areAllValuesNull()) { + return; + } + DoubleBlock values = (DoubleBlock) valuesUncast; + assert values.getPositionCount() == 1; + Block timestampsUncast = page.getBlock(channels.get(1)); + if (timestampsUncast.areAllValuesNull()) { + return; + } + LongBlock timestamps = (LongBlock) timestampsUncast; + assert timestamps.getPositionCount() == 1; + SparklineDoubleAggregator.combineIntermediate(state, values, timestamps); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { + blocks[offset] = SparklineDoubleAggregator.evaluateFinal(state, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineDoubleAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineDoubleAggregatorFunctionSupplier.java new file mode 100644 index 0000000000000..abe6b4024968e --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineDoubleAggregatorFunctionSupplier.java @@ -0,0 +1,47 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.util.List; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunctionSupplier} implementation for {@link SparklineDoubleAggregator}. + * This class is generated. Edit {@code AggregatorFunctionSupplierImplementer} instead. + */ +public final class SparklineDoubleAggregatorFunctionSupplier implements AggregatorFunctionSupplier { + public SparklineDoubleAggregatorFunctionSupplier() { + } + + @Override + public List nonGroupingIntermediateStateDesc() { + return SparklineDoubleAggregatorFunction.intermediateStateDesc(); + } + + @Override + public List groupingIntermediateStateDesc() { + return SparklineDoubleGroupingAggregatorFunction.intermediateStateDesc(); + } + + @Override + public SparklineDoubleAggregatorFunction aggregator(DriverContext driverContext, + List channels) { + return SparklineDoubleAggregatorFunction.create(driverContext, channels); + } + + @Override + public SparklineDoubleGroupingAggregatorFunction groupingAggregator(DriverContext driverContext, + List channels) { + return SparklineDoubleGroupingAggregatorFunction.create(channels, driverContext); + } + + @Override + public String describe() { + return "sparkline of doubles"; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineDoubleGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineDoubleGroupingAggregatorFunction.java new file mode 100644 index 0000000000000..2d86aa2414e52 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineDoubleGroupingAggregatorFunction.java @@ -0,0 +1,384 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.DoubleVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntArrayBlock; +import org.elasticsearch.compute.data.IntBigArrayBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link GroupingAggregatorFunction} implementation for {@link SparklineDoubleAggregator}. + * This class is generated. Edit {@code GroupingAggregatorImplementer} instead. + */ +public final class SparklineDoubleGroupingAggregatorFunction implements GroupingAggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("values", ElementType.DOUBLE), + new IntermediateStateDesc("timestamps", ElementType.LONG) ); + + private final SparklineDoubleAggregator.GroupingState state; + + private final List channels; + + private final DriverContext driverContext; + + public SparklineDoubleGroupingAggregatorFunction(List channels, + SparklineDoubleAggregator.GroupingState state, DriverContext driverContext) { + this.channels = channels; + this.state = state; + this.driverContext = driverContext; + } + + public static SparklineDoubleGroupingAggregatorFunction create(List channels, + DriverContext driverContext) { + return new SparklineDoubleGroupingAggregatorFunction(channels, SparklineDoubleAggregator.initGrouping(driverContext), driverContext); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public GroupingAggregatorFunction.AddInput prepareProcessRawInputPage(SeenGroupIds seenGroupIds, + Page page) { + DoubleBlock valueBlock = page.getBlock(channels.get(0)); + LongBlock timestampBlock = page.getBlock(channels.get(1)); + DoubleVector valueVector = valueBlock.asVector(); + if (valueVector == null) { + if (valueBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void add(int positionOffset, IntBigArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void close() { + } + }; + } + LongVector timestampVector = timestampBlock.asVector(); + if (timestampVector == null) { + if (timestampBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void add(int positionOffset, IntBigArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void close() { + } + }; + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueVector, timestampVector); + } + + @Override + public void add(int positionOffset, IntBigArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueVector, timestampVector); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valueVector, timestampVector); + } + + @Override + public void close() { + } + }; + } + + private void addRawInput(int positionOffset, IntArrayBlock groups, DoubleBlock valueBlock, + LongBlock timestampBlock) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int valuesPosition = groupPosition + positionOffset; + if (valueBlock.isNull(valuesPosition)) { + continue; + } + if (timestampBlock.isNull(valuesPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valueStart = valueBlock.getFirstValueIndex(valuesPosition); + int valueEnd = valueStart + valueBlock.getValueCount(valuesPosition); + for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { + double valueValue = valueBlock.getDouble(valueOffset); + int timestampStart = timestampBlock.getFirstValueIndex(valuesPosition); + int timestampEnd = timestampStart + timestampBlock.getValueCount(valuesPosition); + for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { + long timestampValue = timestampBlock.getLong(timestampOffset); + SparklineDoubleAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + } + } + + private void addRawInput(int positionOffset, IntArrayBlock groups, DoubleVector valueVector, + LongVector timestampVector) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int valuesPosition = groupPosition + positionOffset; + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + double valueValue = valueVector.getDouble(valuesPosition); + long timestampValue = timestampVector.getLong(valuesPosition); + SparklineDoubleAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntArrayBlock groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block valuesUncast = page.getBlock(channels.get(0)); + if (valuesUncast.areAllValuesNull()) { + return; + } + DoubleBlock values = (DoubleBlock) valuesUncast; + Block timestampsUncast = page.getBlock(channels.get(1)); + if (timestampsUncast.areAllValuesNull()) { + return; + } + LongBlock timestamps = (LongBlock) timestampsUncast; + assert values.getPositionCount() == timestamps.getPositionCount(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valuesPosition = groupPosition + positionOffset; + SparklineDoubleAggregator.combineIntermediate(state, groupId, values, timestamps, valuesPosition); + } + } + } + + private void addRawInput(int positionOffset, IntBigArrayBlock groups, DoubleBlock valueBlock, + LongBlock timestampBlock) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int valuesPosition = groupPosition + positionOffset; + if (valueBlock.isNull(valuesPosition)) { + continue; + } + if (timestampBlock.isNull(valuesPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valueStart = valueBlock.getFirstValueIndex(valuesPosition); + int valueEnd = valueStart + valueBlock.getValueCount(valuesPosition); + for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { + double valueValue = valueBlock.getDouble(valueOffset); + int timestampStart = timestampBlock.getFirstValueIndex(valuesPosition); + int timestampEnd = timestampStart + timestampBlock.getValueCount(valuesPosition); + for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { + long timestampValue = timestampBlock.getLong(timestampOffset); + SparklineDoubleAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + } + } + + private void addRawInput(int positionOffset, IntBigArrayBlock groups, DoubleVector valueVector, + LongVector timestampVector) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int valuesPosition = groupPosition + positionOffset; + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + double valueValue = valueVector.getDouble(valuesPosition); + long timestampValue = timestampVector.getLong(valuesPosition); + SparklineDoubleAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntBigArrayBlock groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block valuesUncast = page.getBlock(channels.get(0)); + if (valuesUncast.areAllValuesNull()) { + return; + } + DoubleBlock values = (DoubleBlock) valuesUncast; + Block timestampsUncast = page.getBlock(channels.get(1)); + if (timestampsUncast.areAllValuesNull()) { + return; + } + LongBlock timestamps = (LongBlock) timestampsUncast; + assert values.getPositionCount() == timestamps.getPositionCount(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valuesPosition = groupPosition + positionOffset; + SparklineDoubleAggregator.combineIntermediate(state, groupId, values, timestamps, valuesPosition); + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, DoubleBlock valueBlock, + LongBlock timestampBlock) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int valuesPosition = groupPosition + positionOffset; + if (valueBlock.isNull(valuesPosition)) { + continue; + } + if (timestampBlock.isNull(valuesPosition)) { + continue; + } + int groupId = groups.getInt(groupPosition); + int valueStart = valueBlock.getFirstValueIndex(valuesPosition); + int valueEnd = valueStart + valueBlock.getValueCount(valuesPosition); + for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { + double valueValue = valueBlock.getDouble(valueOffset); + int timestampStart = timestampBlock.getFirstValueIndex(valuesPosition); + int timestampEnd = timestampStart + timestampBlock.getValueCount(valuesPosition); + for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { + long timestampValue = timestampBlock.getLong(timestampOffset); + SparklineDoubleAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, DoubleVector valueVector, + LongVector timestampVector) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int valuesPosition = groupPosition + positionOffset; + int groupId = groups.getInt(groupPosition); + double valueValue = valueVector.getDouble(valuesPosition); + long timestampValue = timestampVector.getLong(valuesPosition); + SparklineDoubleAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block valuesUncast = page.getBlock(channels.get(0)); + if (valuesUncast.areAllValuesNull()) { + return; + } + DoubleBlock values = (DoubleBlock) valuesUncast; + Block timestampsUncast = page.getBlock(channels.get(1)); + if (timestampsUncast.areAllValuesNull()) { + return; + } + LongBlock timestamps = (LongBlock) timestampsUncast; + assert values.getPositionCount() == timestamps.getPositionCount(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = groups.getInt(groupPosition); + int valuesPosition = groupPosition + positionOffset; + SparklineDoubleAggregator.combineIntermediate(state, groupId, values, timestamps, valuesPosition); + } + } + + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { + state.toIntermediate(blocks, offset, selected, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, IntVector selected, + GroupingAggregatorEvaluationContext ctx) { + blocks[offset] = SparklineDoubleAggregator.evaluateFinal(state, selected, ctx); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineFloatAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineFloatAggregatorFunction.java new file mode 100644 index 0000000000000..b09471c2d12e8 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineFloatAggregatorFunction.java @@ -0,0 +1,210 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.FloatBlock; +import org.elasticsearch.compute.data.FloatVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunction} implementation for {@link SparklineFloatAggregator}. + * This class is generated. Edit {@code AggregatorImplementer} instead. + */ +public final class SparklineFloatAggregatorFunction implements AggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("values", ElementType.FLOAT), + new IntermediateStateDesc("timestamps", ElementType.LONG) ); + + private final DriverContext driverContext; + + private final SparklineFloatAggregator.SingleState state; + + private final List channels; + + public SparklineFloatAggregatorFunction(DriverContext driverContext, List channels, + SparklineFloatAggregator.SingleState state) { + this.driverContext = driverContext; + this.channels = channels; + this.state = state; + } + + public static SparklineFloatAggregatorFunction create(DriverContext driverContext, + List channels) { + return new SparklineFloatAggregatorFunction(driverContext, channels, SparklineFloatAggregator.initSingle(driverContext.bigArrays())); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public void addRawInput(Page page, BooleanVector mask) { + if (mask.allFalse()) { + // Entire page masked away + } else if (mask.allTrue()) { + addRawInputNotMasked(page); + } else { + addRawInputMasked(page, mask); + } + } + + private void addRawInputMasked(Page page, BooleanVector mask) { + FloatBlock valueBlock = page.getBlock(channels.get(0)); + LongBlock timestampBlock = page.getBlock(channels.get(1)); + FloatVector valueVector = valueBlock.asVector(); + if (valueVector == null) { + addRawBlock(valueBlock, timestampBlock, mask); + return; + } + LongVector timestampVector = timestampBlock.asVector(); + if (timestampVector == null) { + addRawBlock(valueBlock, timestampBlock, mask); + return; + } + addRawVector(valueVector, timestampVector, mask); + } + + private void addRawInputNotMasked(Page page) { + FloatBlock valueBlock = page.getBlock(channels.get(0)); + LongBlock timestampBlock = page.getBlock(channels.get(1)); + FloatVector valueVector = valueBlock.asVector(); + if (valueVector == null) { + addRawBlock(valueBlock, timestampBlock); + return; + } + LongVector timestampVector = timestampBlock.asVector(); + if (timestampVector == null) { + addRawBlock(valueBlock, timestampBlock); + return; + } + addRawVector(valueVector, timestampVector); + } + + private void addRawVector(FloatVector valueVector, LongVector timestampVector) { + for (int valuesPosition = 0; valuesPosition < valueVector.getPositionCount(); valuesPosition++) { + float valueValue = valueVector.getFloat(valuesPosition); + long timestampValue = timestampVector.getLong(valuesPosition); + SparklineFloatAggregator.combine(state, valueValue, timestampValue); + } + } + + private void addRawVector(FloatVector valueVector, LongVector timestampVector, + BooleanVector mask) { + for (int valuesPosition = 0; valuesPosition < valueVector.getPositionCount(); valuesPosition++) { + if (mask.getBoolean(valuesPosition) == false) { + continue; + } + float valueValue = valueVector.getFloat(valuesPosition); + long timestampValue = timestampVector.getLong(valuesPosition); + SparklineFloatAggregator.combine(state, valueValue, timestampValue); + } + } + + private void addRawBlock(FloatBlock valueBlock, LongBlock timestampBlock) { + for (int p = 0; p < valueBlock.getPositionCount(); p++) { + if (valueBlock.isNull(p)) { + continue; + } + if (timestampBlock.isNull(p)) { + continue; + } + int valueStart = valueBlock.getFirstValueIndex(p); + int valueEnd = valueStart + valueBlock.getValueCount(p); + for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { + float valueValue = valueBlock.getFloat(valueOffset); + int timestampStart = timestampBlock.getFirstValueIndex(p); + int timestampEnd = timestampStart + timestampBlock.getValueCount(p); + for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { + long timestampValue = timestampBlock.getLong(timestampOffset); + SparklineFloatAggregator.combine(state, valueValue, timestampValue); + } + } + } + } + + private void addRawBlock(FloatBlock valueBlock, LongBlock timestampBlock, BooleanVector mask) { + for (int p = 0; p < valueBlock.getPositionCount(); p++) { + if (mask.getBoolean(p) == false) { + continue; + } + if (valueBlock.isNull(p)) { + continue; + } + if (timestampBlock.isNull(p)) { + continue; + } + int valueStart = valueBlock.getFirstValueIndex(p); + int valueEnd = valueStart + valueBlock.getValueCount(p); + for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { + float valueValue = valueBlock.getFloat(valueOffset); + int timestampStart = timestampBlock.getFirstValueIndex(p); + int timestampEnd = timestampStart + timestampBlock.getValueCount(p); + for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { + long timestampValue = timestampBlock.getLong(timestampOffset); + SparklineFloatAggregator.combine(state, valueValue, timestampValue); + } + } + } + } + + @Override + public void addIntermediateInput(Page page) { + assert channels.size() == intermediateBlockCount(); + assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); + Block valuesUncast = page.getBlock(channels.get(0)); + if (valuesUncast.areAllValuesNull()) { + return; + } + FloatBlock values = (FloatBlock) valuesUncast; + assert values.getPositionCount() == 1; + Block timestampsUncast = page.getBlock(channels.get(1)); + if (timestampsUncast.areAllValuesNull()) { + return; + } + LongBlock timestamps = (LongBlock) timestampsUncast; + assert timestamps.getPositionCount() == 1; + SparklineFloatAggregator.combineIntermediate(state, values, timestamps); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { + blocks[offset] = SparklineFloatAggregator.evaluateFinal(state, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineFloatAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineFloatAggregatorFunctionSupplier.java new file mode 100644 index 0000000000000..222737ebe4f34 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineFloatAggregatorFunctionSupplier.java @@ -0,0 +1,47 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.util.List; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunctionSupplier} implementation for {@link SparklineFloatAggregator}. + * This class is generated. Edit {@code AggregatorFunctionSupplierImplementer} instead. + */ +public final class SparklineFloatAggregatorFunctionSupplier implements AggregatorFunctionSupplier { + public SparklineFloatAggregatorFunctionSupplier() { + } + + @Override + public List nonGroupingIntermediateStateDesc() { + return SparklineFloatAggregatorFunction.intermediateStateDesc(); + } + + @Override + public List groupingIntermediateStateDesc() { + return SparklineFloatGroupingAggregatorFunction.intermediateStateDesc(); + } + + @Override + public SparklineFloatAggregatorFunction aggregator(DriverContext driverContext, + List channels) { + return SparklineFloatAggregatorFunction.create(driverContext, channels); + } + + @Override + public SparklineFloatGroupingAggregatorFunction groupingAggregator(DriverContext driverContext, + List channels) { + return SparklineFloatGroupingAggregatorFunction.create(channels, driverContext); + } + + @Override + public String describe() { + return "sparkline of floats"; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineFloatGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineFloatGroupingAggregatorFunction.java new file mode 100644 index 0000000000000..59c0b0caeef68 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineFloatGroupingAggregatorFunction.java @@ -0,0 +1,384 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.FloatBlock; +import org.elasticsearch.compute.data.FloatVector; +import org.elasticsearch.compute.data.IntArrayBlock; +import org.elasticsearch.compute.data.IntBigArrayBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link GroupingAggregatorFunction} implementation for {@link SparklineFloatAggregator}. + * This class is generated. Edit {@code GroupingAggregatorImplementer} instead. + */ +public final class SparklineFloatGroupingAggregatorFunction implements GroupingAggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("values", ElementType.FLOAT), + new IntermediateStateDesc("timestamps", ElementType.LONG) ); + + private final SparklineFloatAggregator.GroupingState state; + + private final List channels; + + private final DriverContext driverContext; + + public SparklineFloatGroupingAggregatorFunction(List channels, + SparklineFloatAggregator.GroupingState state, DriverContext driverContext) { + this.channels = channels; + this.state = state; + this.driverContext = driverContext; + } + + public static SparklineFloatGroupingAggregatorFunction create(List channels, + DriverContext driverContext) { + return new SparklineFloatGroupingAggregatorFunction(channels, SparklineFloatAggregator.initGrouping(driverContext), driverContext); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public GroupingAggregatorFunction.AddInput prepareProcessRawInputPage(SeenGroupIds seenGroupIds, + Page page) { + FloatBlock valueBlock = page.getBlock(channels.get(0)); + LongBlock timestampBlock = page.getBlock(channels.get(1)); + FloatVector valueVector = valueBlock.asVector(); + if (valueVector == null) { + if (valueBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void add(int positionOffset, IntBigArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void close() { + } + }; + } + LongVector timestampVector = timestampBlock.asVector(); + if (timestampVector == null) { + if (timestampBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void add(int positionOffset, IntBigArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void close() { + } + }; + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueVector, timestampVector); + } + + @Override + public void add(int positionOffset, IntBigArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueVector, timestampVector); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valueVector, timestampVector); + } + + @Override + public void close() { + } + }; + } + + private void addRawInput(int positionOffset, IntArrayBlock groups, FloatBlock valueBlock, + LongBlock timestampBlock) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int valuesPosition = groupPosition + positionOffset; + if (valueBlock.isNull(valuesPosition)) { + continue; + } + if (timestampBlock.isNull(valuesPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valueStart = valueBlock.getFirstValueIndex(valuesPosition); + int valueEnd = valueStart + valueBlock.getValueCount(valuesPosition); + for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { + float valueValue = valueBlock.getFloat(valueOffset); + int timestampStart = timestampBlock.getFirstValueIndex(valuesPosition); + int timestampEnd = timestampStart + timestampBlock.getValueCount(valuesPosition); + for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { + long timestampValue = timestampBlock.getLong(timestampOffset); + SparklineFloatAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + } + } + + private void addRawInput(int positionOffset, IntArrayBlock groups, FloatVector valueVector, + LongVector timestampVector) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int valuesPosition = groupPosition + positionOffset; + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + float valueValue = valueVector.getFloat(valuesPosition); + long timestampValue = timestampVector.getLong(valuesPosition); + SparklineFloatAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntArrayBlock groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block valuesUncast = page.getBlock(channels.get(0)); + if (valuesUncast.areAllValuesNull()) { + return; + } + FloatBlock values = (FloatBlock) valuesUncast; + Block timestampsUncast = page.getBlock(channels.get(1)); + if (timestampsUncast.areAllValuesNull()) { + return; + } + LongBlock timestamps = (LongBlock) timestampsUncast; + assert values.getPositionCount() == timestamps.getPositionCount(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valuesPosition = groupPosition + positionOffset; + SparklineFloatAggregator.combineIntermediate(state, groupId, values, timestamps, valuesPosition); + } + } + } + + private void addRawInput(int positionOffset, IntBigArrayBlock groups, FloatBlock valueBlock, + LongBlock timestampBlock) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int valuesPosition = groupPosition + positionOffset; + if (valueBlock.isNull(valuesPosition)) { + continue; + } + if (timestampBlock.isNull(valuesPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valueStart = valueBlock.getFirstValueIndex(valuesPosition); + int valueEnd = valueStart + valueBlock.getValueCount(valuesPosition); + for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { + float valueValue = valueBlock.getFloat(valueOffset); + int timestampStart = timestampBlock.getFirstValueIndex(valuesPosition); + int timestampEnd = timestampStart + timestampBlock.getValueCount(valuesPosition); + for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { + long timestampValue = timestampBlock.getLong(timestampOffset); + SparklineFloatAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + } + } + + private void addRawInput(int positionOffset, IntBigArrayBlock groups, FloatVector valueVector, + LongVector timestampVector) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int valuesPosition = groupPosition + positionOffset; + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + float valueValue = valueVector.getFloat(valuesPosition); + long timestampValue = timestampVector.getLong(valuesPosition); + SparklineFloatAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntBigArrayBlock groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block valuesUncast = page.getBlock(channels.get(0)); + if (valuesUncast.areAllValuesNull()) { + return; + } + FloatBlock values = (FloatBlock) valuesUncast; + Block timestampsUncast = page.getBlock(channels.get(1)); + if (timestampsUncast.areAllValuesNull()) { + return; + } + LongBlock timestamps = (LongBlock) timestampsUncast; + assert values.getPositionCount() == timestamps.getPositionCount(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valuesPosition = groupPosition + positionOffset; + SparklineFloatAggregator.combineIntermediate(state, groupId, values, timestamps, valuesPosition); + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, FloatBlock valueBlock, + LongBlock timestampBlock) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int valuesPosition = groupPosition + positionOffset; + if (valueBlock.isNull(valuesPosition)) { + continue; + } + if (timestampBlock.isNull(valuesPosition)) { + continue; + } + int groupId = groups.getInt(groupPosition); + int valueStart = valueBlock.getFirstValueIndex(valuesPosition); + int valueEnd = valueStart + valueBlock.getValueCount(valuesPosition); + for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { + float valueValue = valueBlock.getFloat(valueOffset); + int timestampStart = timestampBlock.getFirstValueIndex(valuesPosition); + int timestampEnd = timestampStart + timestampBlock.getValueCount(valuesPosition); + for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { + long timestampValue = timestampBlock.getLong(timestampOffset); + SparklineFloatAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, FloatVector valueVector, + LongVector timestampVector) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int valuesPosition = groupPosition + positionOffset; + int groupId = groups.getInt(groupPosition); + float valueValue = valueVector.getFloat(valuesPosition); + long timestampValue = timestampVector.getLong(valuesPosition); + SparklineFloatAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block valuesUncast = page.getBlock(channels.get(0)); + if (valuesUncast.areAllValuesNull()) { + return; + } + FloatBlock values = (FloatBlock) valuesUncast; + Block timestampsUncast = page.getBlock(channels.get(1)); + if (timestampsUncast.areAllValuesNull()) { + return; + } + LongBlock timestamps = (LongBlock) timestampsUncast; + assert values.getPositionCount() == timestamps.getPositionCount(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = groups.getInt(groupPosition); + int valuesPosition = groupPosition + positionOffset; + SparklineFloatAggregator.combineIntermediate(state, groupId, values, timestamps, valuesPosition); + } + } + + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { + state.toIntermediate(blocks, offset, selected, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, IntVector selected, + GroupingAggregatorEvaluationContext ctx) { + blocks[offset] = SparklineFloatAggregator.evaluateFinal(state, selected, ctx); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineIntAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineIntAggregatorFunction.java new file mode 100644 index 0000000000000..b0bbeeb5abb0d --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineIntAggregatorFunction.java @@ -0,0 +1,209 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunction} implementation for {@link SparklineIntAggregator}. + * This class is generated. Edit {@code AggregatorImplementer} instead. + */ +public final class SparklineIntAggregatorFunction implements AggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("values", ElementType.INT), + new IntermediateStateDesc("timestamps", ElementType.LONG) ); + + private final DriverContext driverContext; + + private final SparklineIntAggregator.SingleState state; + + private final List channels; + + public SparklineIntAggregatorFunction(DriverContext driverContext, List channels, + SparklineIntAggregator.SingleState state) { + this.driverContext = driverContext; + this.channels = channels; + this.state = state; + } + + public static SparklineIntAggregatorFunction create(DriverContext driverContext, + List channels) { + return new SparklineIntAggregatorFunction(driverContext, channels, SparklineIntAggregator.initSingle(driverContext.bigArrays())); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public void addRawInput(Page page, BooleanVector mask) { + if (mask.allFalse()) { + // Entire page masked away + } else if (mask.allTrue()) { + addRawInputNotMasked(page); + } else { + addRawInputMasked(page, mask); + } + } + + private void addRawInputMasked(Page page, BooleanVector mask) { + IntBlock valueBlock = page.getBlock(channels.get(0)); + LongBlock timestampBlock = page.getBlock(channels.get(1)); + IntVector valueVector = valueBlock.asVector(); + if (valueVector == null) { + addRawBlock(valueBlock, timestampBlock, mask); + return; + } + LongVector timestampVector = timestampBlock.asVector(); + if (timestampVector == null) { + addRawBlock(valueBlock, timestampBlock, mask); + return; + } + addRawVector(valueVector, timestampVector, mask); + } + + private void addRawInputNotMasked(Page page) { + IntBlock valueBlock = page.getBlock(channels.get(0)); + LongBlock timestampBlock = page.getBlock(channels.get(1)); + IntVector valueVector = valueBlock.asVector(); + if (valueVector == null) { + addRawBlock(valueBlock, timestampBlock); + return; + } + LongVector timestampVector = timestampBlock.asVector(); + if (timestampVector == null) { + addRawBlock(valueBlock, timestampBlock); + return; + } + addRawVector(valueVector, timestampVector); + } + + private void addRawVector(IntVector valueVector, LongVector timestampVector) { + for (int valuesPosition = 0; valuesPosition < valueVector.getPositionCount(); valuesPosition++) { + int valueValue = valueVector.getInt(valuesPosition); + long timestampValue = timestampVector.getLong(valuesPosition); + SparklineIntAggregator.combine(state, valueValue, timestampValue); + } + } + + private void addRawVector(IntVector valueVector, LongVector timestampVector, BooleanVector mask) { + for (int valuesPosition = 0; valuesPosition < valueVector.getPositionCount(); valuesPosition++) { + if (mask.getBoolean(valuesPosition) == false) { + continue; + } + int valueValue = valueVector.getInt(valuesPosition); + long timestampValue = timestampVector.getLong(valuesPosition); + SparklineIntAggregator.combine(state, valueValue, timestampValue); + } + } + + private void addRawBlock(IntBlock valueBlock, LongBlock timestampBlock) { + for (int p = 0; p < valueBlock.getPositionCount(); p++) { + if (valueBlock.isNull(p)) { + continue; + } + if (timestampBlock.isNull(p)) { + continue; + } + int valueStart = valueBlock.getFirstValueIndex(p); + int valueEnd = valueStart + valueBlock.getValueCount(p); + for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { + int valueValue = valueBlock.getInt(valueOffset); + int timestampStart = timestampBlock.getFirstValueIndex(p); + int timestampEnd = timestampStart + timestampBlock.getValueCount(p); + for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { + long timestampValue = timestampBlock.getLong(timestampOffset); + SparklineIntAggregator.combine(state, valueValue, timestampValue); + } + } + } + } + + private void addRawBlock(IntBlock valueBlock, LongBlock timestampBlock, BooleanVector mask) { + for (int p = 0; p < valueBlock.getPositionCount(); p++) { + if (mask.getBoolean(p) == false) { + continue; + } + if (valueBlock.isNull(p)) { + continue; + } + if (timestampBlock.isNull(p)) { + continue; + } + int valueStart = valueBlock.getFirstValueIndex(p); + int valueEnd = valueStart + valueBlock.getValueCount(p); + for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { + int valueValue = valueBlock.getInt(valueOffset); + int timestampStart = timestampBlock.getFirstValueIndex(p); + int timestampEnd = timestampStart + timestampBlock.getValueCount(p); + for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { + long timestampValue = timestampBlock.getLong(timestampOffset); + SparklineIntAggregator.combine(state, valueValue, timestampValue); + } + } + } + } + + @Override + public void addIntermediateInput(Page page) { + assert channels.size() == intermediateBlockCount(); + assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); + Block valuesUncast = page.getBlock(channels.get(0)); + if (valuesUncast.areAllValuesNull()) { + return; + } + IntBlock values = (IntBlock) valuesUncast; + assert values.getPositionCount() == 1; + Block timestampsUncast = page.getBlock(channels.get(1)); + if (timestampsUncast.areAllValuesNull()) { + return; + } + LongBlock timestamps = (LongBlock) timestampsUncast; + assert timestamps.getPositionCount() == 1; + SparklineIntAggregator.combineIntermediate(state, values, timestamps); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { + blocks[offset] = SparklineIntAggregator.evaluateFinal(state, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineIntAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineIntAggregatorFunctionSupplier.java new file mode 100644 index 0000000000000..cf2fdec03be24 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineIntAggregatorFunctionSupplier.java @@ -0,0 +1,47 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.util.List; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunctionSupplier} implementation for {@link SparklineIntAggregator}. + * This class is generated. Edit {@code AggregatorFunctionSupplierImplementer} instead. + */ +public final class SparklineIntAggregatorFunctionSupplier implements AggregatorFunctionSupplier { + public SparklineIntAggregatorFunctionSupplier() { + } + + @Override + public List nonGroupingIntermediateStateDesc() { + return SparklineIntAggregatorFunction.intermediateStateDesc(); + } + + @Override + public List groupingIntermediateStateDesc() { + return SparklineIntGroupingAggregatorFunction.intermediateStateDesc(); + } + + @Override + public SparklineIntAggregatorFunction aggregator(DriverContext driverContext, + List channels) { + return SparklineIntAggregatorFunction.create(driverContext, channels); + } + + @Override + public SparklineIntGroupingAggregatorFunction groupingAggregator(DriverContext driverContext, + List channels) { + return SparklineIntGroupingAggregatorFunction.create(channels, driverContext); + } + + @Override + public String describe() { + return "sparkline of ints"; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineIntGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineIntGroupingAggregatorFunction.java new file mode 100644 index 0000000000000..f35da658d1858 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineIntGroupingAggregatorFunction.java @@ -0,0 +1,383 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntArrayBlock; +import org.elasticsearch.compute.data.IntBigArrayBlock; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link GroupingAggregatorFunction} implementation for {@link SparklineIntAggregator}. + * This class is generated. Edit {@code GroupingAggregatorImplementer} instead. + */ +public final class SparklineIntGroupingAggregatorFunction implements GroupingAggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("values", ElementType.INT), + new IntermediateStateDesc("timestamps", ElementType.LONG) ); + + private final SparklineIntAggregator.GroupingState state; + + private final List channels; + + private final DriverContext driverContext; + + public SparklineIntGroupingAggregatorFunction(List channels, + SparklineIntAggregator.GroupingState state, DriverContext driverContext) { + this.channels = channels; + this.state = state; + this.driverContext = driverContext; + } + + public static SparklineIntGroupingAggregatorFunction create(List channels, + DriverContext driverContext) { + return new SparklineIntGroupingAggregatorFunction(channels, SparklineIntAggregator.initGrouping(driverContext), driverContext); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public GroupingAggregatorFunction.AddInput prepareProcessRawInputPage(SeenGroupIds seenGroupIds, + Page page) { + IntBlock valueBlock = page.getBlock(channels.get(0)); + LongBlock timestampBlock = page.getBlock(channels.get(1)); + IntVector valueVector = valueBlock.asVector(); + if (valueVector == null) { + if (valueBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void add(int positionOffset, IntBigArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void close() { + } + }; + } + LongVector timestampVector = timestampBlock.asVector(); + if (timestampVector == null) { + if (timestampBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void add(int positionOffset, IntBigArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void close() { + } + }; + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueVector, timestampVector); + } + + @Override + public void add(int positionOffset, IntBigArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueVector, timestampVector); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valueVector, timestampVector); + } + + @Override + public void close() { + } + }; + } + + private void addRawInput(int positionOffset, IntArrayBlock groups, IntBlock valueBlock, + LongBlock timestampBlock) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int valuesPosition = groupPosition + positionOffset; + if (valueBlock.isNull(valuesPosition)) { + continue; + } + if (timestampBlock.isNull(valuesPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valueStart = valueBlock.getFirstValueIndex(valuesPosition); + int valueEnd = valueStart + valueBlock.getValueCount(valuesPosition); + for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { + int valueValue = valueBlock.getInt(valueOffset); + int timestampStart = timestampBlock.getFirstValueIndex(valuesPosition); + int timestampEnd = timestampStart + timestampBlock.getValueCount(valuesPosition); + for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { + long timestampValue = timestampBlock.getLong(timestampOffset); + SparklineIntAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + } + } + + private void addRawInput(int positionOffset, IntArrayBlock groups, IntVector valueVector, + LongVector timestampVector) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int valuesPosition = groupPosition + positionOffset; + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valueValue = valueVector.getInt(valuesPosition); + long timestampValue = timestampVector.getLong(valuesPosition); + SparklineIntAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntArrayBlock groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block valuesUncast = page.getBlock(channels.get(0)); + if (valuesUncast.areAllValuesNull()) { + return; + } + IntBlock values = (IntBlock) valuesUncast; + Block timestampsUncast = page.getBlock(channels.get(1)); + if (timestampsUncast.areAllValuesNull()) { + return; + } + LongBlock timestamps = (LongBlock) timestampsUncast; + assert values.getPositionCount() == timestamps.getPositionCount(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valuesPosition = groupPosition + positionOffset; + SparklineIntAggregator.combineIntermediate(state, groupId, values, timestamps, valuesPosition); + } + } + } + + private void addRawInput(int positionOffset, IntBigArrayBlock groups, IntBlock valueBlock, + LongBlock timestampBlock) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int valuesPosition = groupPosition + positionOffset; + if (valueBlock.isNull(valuesPosition)) { + continue; + } + if (timestampBlock.isNull(valuesPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valueStart = valueBlock.getFirstValueIndex(valuesPosition); + int valueEnd = valueStart + valueBlock.getValueCount(valuesPosition); + for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { + int valueValue = valueBlock.getInt(valueOffset); + int timestampStart = timestampBlock.getFirstValueIndex(valuesPosition); + int timestampEnd = timestampStart + timestampBlock.getValueCount(valuesPosition); + for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { + long timestampValue = timestampBlock.getLong(timestampOffset); + SparklineIntAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + } + } + + private void addRawInput(int positionOffset, IntBigArrayBlock groups, IntVector valueVector, + LongVector timestampVector) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int valuesPosition = groupPosition + positionOffset; + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valueValue = valueVector.getInt(valuesPosition); + long timestampValue = timestampVector.getLong(valuesPosition); + SparklineIntAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntBigArrayBlock groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block valuesUncast = page.getBlock(channels.get(0)); + if (valuesUncast.areAllValuesNull()) { + return; + } + IntBlock values = (IntBlock) valuesUncast; + Block timestampsUncast = page.getBlock(channels.get(1)); + if (timestampsUncast.areAllValuesNull()) { + return; + } + LongBlock timestamps = (LongBlock) timestampsUncast; + assert values.getPositionCount() == timestamps.getPositionCount(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valuesPosition = groupPosition + positionOffset; + SparklineIntAggregator.combineIntermediate(state, groupId, values, timestamps, valuesPosition); + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, IntBlock valueBlock, + LongBlock timestampBlock) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int valuesPosition = groupPosition + positionOffset; + if (valueBlock.isNull(valuesPosition)) { + continue; + } + if (timestampBlock.isNull(valuesPosition)) { + continue; + } + int groupId = groups.getInt(groupPosition); + int valueStart = valueBlock.getFirstValueIndex(valuesPosition); + int valueEnd = valueStart + valueBlock.getValueCount(valuesPosition); + for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { + int valueValue = valueBlock.getInt(valueOffset); + int timestampStart = timestampBlock.getFirstValueIndex(valuesPosition); + int timestampEnd = timestampStart + timestampBlock.getValueCount(valuesPosition); + for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { + long timestampValue = timestampBlock.getLong(timestampOffset); + SparklineIntAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, IntVector valueVector, + LongVector timestampVector) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int valuesPosition = groupPosition + positionOffset; + int groupId = groups.getInt(groupPosition); + int valueValue = valueVector.getInt(valuesPosition); + long timestampValue = timestampVector.getLong(valuesPosition); + SparklineIntAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block valuesUncast = page.getBlock(channels.get(0)); + if (valuesUncast.areAllValuesNull()) { + return; + } + IntBlock values = (IntBlock) valuesUncast; + Block timestampsUncast = page.getBlock(channels.get(1)); + if (timestampsUncast.areAllValuesNull()) { + return; + } + LongBlock timestamps = (LongBlock) timestampsUncast; + assert values.getPositionCount() == timestamps.getPositionCount(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = groups.getInt(groupPosition); + int valuesPosition = groupPosition + positionOffset; + SparklineIntAggregator.combineIntermediate(state, groupId, values, timestamps, valuesPosition); + } + } + + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { + state.toIntermediate(blocks, offset, selected, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, IntVector selected, + GroupingAggregatorEvaluationContext ctx) { + blocks[offset] = SparklineIntAggregator.evaluateFinal(state, selected, ctx); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineLongAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineLongAggregatorFunction.java new file mode 100644 index 0000000000000..c7ab91d1f8045 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineLongAggregatorFunction.java @@ -0,0 +1,208 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BooleanVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunction} implementation for {@link SparklineLongAggregator}. + * This class is generated. Edit {@code AggregatorImplementer} instead. + */ +public final class SparklineLongAggregatorFunction implements AggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("values", ElementType.LONG), + new IntermediateStateDesc("timestamps", ElementType.LONG) ); + + private final DriverContext driverContext; + + private final SparklineLongAggregator.SingleState state; + + private final List channels; + + public SparklineLongAggregatorFunction(DriverContext driverContext, List channels, + SparklineLongAggregator.SingleState state) { + this.driverContext = driverContext; + this.channels = channels; + this.state = state; + } + + public static SparklineLongAggregatorFunction create(DriverContext driverContext, + List channels) { + return new SparklineLongAggregatorFunction(driverContext, channels, SparklineLongAggregator.initSingle(driverContext.bigArrays())); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public void addRawInput(Page page, BooleanVector mask) { + if (mask.allFalse()) { + // Entire page masked away + } else if (mask.allTrue()) { + addRawInputNotMasked(page); + } else { + addRawInputMasked(page, mask); + } + } + + private void addRawInputMasked(Page page, BooleanVector mask) { + LongBlock valueBlock = page.getBlock(channels.get(0)); + LongBlock timestampBlock = page.getBlock(channels.get(1)); + LongVector valueVector = valueBlock.asVector(); + if (valueVector == null) { + addRawBlock(valueBlock, timestampBlock, mask); + return; + } + LongVector timestampVector = timestampBlock.asVector(); + if (timestampVector == null) { + addRawBlock(valueBlock, timestampBlock, mask); + return; + } + addRawVector(valueVector, timestampVector, mask); + } + + private void addRawInputNotMasked(Page page) { + LongBlock valueBlock = page.getBlock(channels.get(0)); + LongBlock timestampBlock = page.getBlock(channels.get(1)); + LongVector valueVector = valueBlock.asVector(); + if (valueVector == null) { + addRawBlock(valueBlock, timestampBlock); + return; + } + LongVector timestampVector = timestampBlock.asVector(); + if (timestampVector == null) { + addRawBlock(valueBlock, timestampBlock); + return; + } + addRawVector(valueVector, timestampVector); + } + + private void addRawVector(LongVector valueVector, LongVector timestampVector) { + for (int valuesPosition = 0; valuesPosition < valueVector.getPositionCount(); valuesPosition++) { + long valueValue = valueVector.getLong(valuesPosition); + long timestampValue = timestampVector.getLong(valuesPosition); + SparklineLongAggregator.combine(state, valueValue, timestampValue); + } + } + + private void addRawVector(LongVector valueVector, LongVector timestampVector, + BooleanVector mask) { + for (int valuesPosition = 0; valuesPosition < valueVector.getPositionCount(); valuesPosition++) { + if (mask.getBoolean(valuesPosition) == false) { + continue; + } + long valueValue = valueVector.getLong(valuesPosition); + long timestampValue = timestampVector.getLong(valuesPosition); + SparklineLongAggregator.combine(state, valueValue, timestampValue); + } + } + + private void addRawBlock(LongBlock valueBlock, LongBlock timestampBlock) { + for (int p = 0; p < valueBlock.getPositionCount(); p++) { + if (valueBlock.isNull(p)) { + continue; + } + if (timestampBlock.isNull(p)) { + continue; + } + int valueStart = valueBlock.getFirstValueIndex(p); + int valueEnd = valueStart + valueBlock.getValueCount(p); + for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { + long valueValue = valueBlock.getLong(valueOffset); + int timestampStart = timestampBlock.getFirstValueIndex(p); + int timestampEnd = timestampStart + timestampBlock.getValueCount(p); + for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { + long timestampValue = timestampBlock.getLong(timestampOffset); + SparklineLongAggregator.combine(state, valueValue, timestampValue); + } + } + } + } + + private void addRawBlock(LongBlock valueBlock, LongBlock timestampBlock, BooleanVector mask) { + for (int p = 0; p < valueBlock.getPositionCount(); p++) { + if (mask.getBoolean(p) == false) { + continue; + } + if (valueBlock.isNull(p)) { + continue; + } + if (timestampBlock.isNull(p)) { + continue; + } + int valueStart = valueBlock.getFirstValueIndex(p); + int valueEnd = valueStart + valueBlock.getValueCount(p); + for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { + long valueValue = valueBlock.getLong(valueOffset); + int timestampStart = timestampBlock.getFirstValueIndex(p); + int timestampEnd = timestampStart + timestampBlock.getValueCount(p); + for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { + long timestampValue = timestampBlock.getLong(timestampOffset); + SparklineLongAggregator.combine(state, valueValue, timestampValue); + } + } + } + } + + @Override + public void addIntermediateInput(Page page) { + assert channels.size() == intermediateBlockCount(); + assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); + Block valuesUncast = page.getBlock(channels.get(0)); + if (valuesUncast.areAllValuesNull()) { + return; + } + LongBlock values = (LongBlock) valuesUncast; + assert values.getPositionCount() == 1; + Block timestampsUncast = page.getBlock(channels.get(1)); + if (timestampsUncast.areAllValuesNull()) { + return; + } + LongBlock timestamps = (LongBlock) timestampsUncast; + assert timestamps.getPositionCount() == 1; + SparklineLongAggregator.combineIntermediate(state, values, timestamps); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + state.toIntermediate(blocks, offset, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { + blocks[offset] = SparklineLongAggregator.evaluateFinal(state, driverContext); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineLongAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineLongAggregatorFunctionSupplier.java new file mode 100644 index 0000000000000..43d6e12cc4cfb --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineLongAggregatorFunctionSupplier.java @@ -0,0 +1,47 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.util.List; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunctionSupplier} implementation for {@link SparklineLongAggregator}. + * This class is generated. Edit {@code AggregatorFunctionSupplierImplementer} instead. + */ +public final class SparklineLongAggregatorFunctionSupplier implements AggregatorFunctionSupplier { + public SparklineLongAggregatorFunctionSupplier() { + } + + @Override + public List nonGroupingIntermediateStateDesc() { + return SparklineLongAggregatorFunction.intermediateStateDesc(); + } + + @Override + public List groupingIntermediateStateDesc() { + return SparklineLongGroupingAggregatorFunction.intermediateStateDesc(); + } + + @Override + public SparklineLongAggregatorFunction aggregator(DriverContext driverContext, + List channels) { + return SparklineLongAggregatorFunction.create(driverContext, channels); + } + + @Override + public SparklineLongGroupingAggregatorFunction groupingAggregator(DriverContext driverContext, + List channels) { + return SparklineLongGroupingAggregatorFunction.create(channels, driverContext); + } + + @Override + public String describe() { + return "sparkline of longs"; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineLongGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineLongGroupingAggregatorFunction.java new file mode 100644 index 0000000000000..7b6e7f4efd85e --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SparklineLongGroupingAggregatorFunction.java @@ -0,0 +1,382 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntArrayBlock; +import org.elasticsearch.compute.data.IntBigArrayBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link GroupingAggregatorFunction} implementation for {@link SparklineLongAggregator}. + * This class is generated. Edit {@code GroupingAggregatorImplementer} instead. + */ +public final class SparklineLongGroupingAggregatorFunction implements GroupingAggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("values", ElementType.LONG), + new IntermediateStateDesc("timestamps", ElementType.LONG) ); + + private final SparklineLongAggregator.GroupingState state; + + private final List channels; + + private final DriverContext driverContext; + + public SparklineLongGroupingAggregatorFunction(List channels, + SparklineLongAggregator.GroupingState state, DriverContext driverContext) { + this.channels = channels; + this.state = state; + this.driverContext = driverContext; + } + + public static SparklineLongGroupingAggregatorFunction create(List channels, + DriverContext driverContext) { + return new SparklineLongGroupingAggregatorFunction(channels, SparklineLongAggregator.initGrouping(driverContext), driverContext); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public GroupingAggregatorFunction.AddInput prepareProcessRawInputPage(SeenGroupIds seenGroupIds, + Page page) { + LongBlock valueBlock = page.getBlock(channels.get(0)); + LongBlock timestampBlock = page.getBlock(channels.get(1)); + LongVector valueVector = valueBlock.asVector(); + if (valueVector == null) { + if (valueBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void add(int positionOffset, IntBigArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void close() { + } + }; + } + LongVector timestampVector = timestampBlock.asVector(); + if (timestampVector == null) { + if (timestampBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void add(int positionOffset, IntBigArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void close() { + } + }; + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueVector, timestampVector); + } + + @Override + public void add(int positionOffset, IntBigArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueVector, timestampVector); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valueVector, timestampVector); + } + + @Override + public void close() { + } + }; + } + + private void addRawInput(int positionOffset, IntArrayBlock groups, LongBlock valueBlock, + LongBlock timestampBlock) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int valuesPosition = groupPosition + positionOffset; + if (valueBlock.isNull(valuesPosition)) { + continue; + } + if (timestampBlock.isNull(valuesPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valueStart = valueBlock.getFirstValueIndex(valuesPosition); + int valueEnd = valueStart + valueBlock.getValueCount(valuesPosition); + for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { + long valueValue = valueBlock.getLong(valueOffset); + int timestampStart = timestampBlock.getFirstValueIndex(valuesPosition); + int timestampEnd = timestampStart + timestampBlock.getValueCount(valuesPosition); + for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { + long timestampValue = timestampBlock.getLong(timestampOffset); + SparklineLongAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + } + } + + private void addRawInput(int positionOffset, IntArrayBlock groups, LongVector valueVector, + LongVector timestampVector) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int valuesPosition = groupPosition + positionOffset; + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + long valueValue = valueVector.getLong(valuesPosition); + long timestampValue = timestampVector.getLong(valuesPosition); + SparklineLongAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntArrayBlock groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block valuesUncast = page.getBlock(channels.get(0)); + if (valuesUncast.areAllValuesNull()) { + return; + } + LongBlock values = (LongBlock) valuesUncast; + Block timestampsUncast = page.getBlock(channels.get(1)); + if (timestampsUncast.areAllValuesNull()) { + return; + } + LongBlock timestamps = (LongBlock) timestampsUncast; + assert values.getPositionCount() == timestamps.getPositionCount(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valuesPosition = groupPosition + positionOffset; + SparklineLongAggregator.combineIntermediate(state, groupId, values, timestamps, valuesPosition); + } + } + } + + private void addRawInput(int positionOffset, IntBigArrayBlock groups, LongBlock valueBlock, + LongBlock timestampBlock) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int valuesPosition = groupPosition + positionOffset; + if (valueBlock.isNull(valuesPosition)) { + continue; + } + if (timestampBlock.isNull(valuesPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valueStart = valueBlock.getFirstValueIndex(valuesPosition); + int valueEnd = valueStart + valueBlock.getValueCount(valuesPosition); + for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { + long valueValue = valueBlock.getLong(valueOffset); + int timestampStart = timestampBlock.getFirstValueIndex(valuesPosition); + int timestampEnd = timestampStart + timestampBlock.getValueCount(valuesPosition); + for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { + long timestampValue = timestampBlock.getLong(timestampOffset); + SparklineLongAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + } + } + + private void addRawInput(int positionOffset, IntBigArrayBlock groups, LongVector valueVector, + LongVector timestampVector) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int valuesPosition = groupPosition + positionOffset; + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + long valueValue = valueVector.getLong(valuesPosition); + long timestampValue = timestampVector.getLong(valuesPosition); + SparklineLongAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntBigArrayBlock groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block valuesUncast = page.getBlock(channels.get(0)); + if (valuesUncast.areAllValuesNull()) { + return; + } + LongBlock values = (LongBlock) valuesUncast; + Block timestampsUncast = page.getBlock(channels.get(1)); + if (timestampsUncast.areAllValuesNull()) { + return; + } + LongBlock timestamps = (LongBlock) timestampsUncast; + assert values.getPositionCount() == timestamps.getPositionCount(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valuesPosition = groupPosition + positionOffset; + SparklineLongAggregator.combineIntermediate(state, groupId, values, timestamps, valuesPosition); + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, LongBlock valueBlock, + LongBlock timestampBlock) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int valuesPosition = groupPosition + positionOffset; + if (valueBlock.isNull(valuesPosition)) { + continue; + } + if (timestampBlock.isNull(valuesPosition)) { + continue; + } + int groupId = groups.getInt(groupPosition); + int valueStart = valueBlock.getFirstValueIndex(valuesPosition); + int valueEnd = valueStart + valueBlock.getValueCount(valuesPosition); + for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { + long valueValue = valueBlock.getLong(valueOffset); + int timestampStart = timestampBlock.getFirstValueIndex(valuesPosition); + int timestampEnd = timestampStart + timestampBlock.getValueCount(valuesPosition); + for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { + long timestampValue = timestampBlock.getLong(timestampOffset); + SparklineLongAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, LongVector valueVector, + LongVector timestampVector) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int valuesPosition = groupPosition + positionOffset; + int groupId = groups.getInt(groupPosition); + long valueValue = valueVector.getLong(valuesPosition); + long timestampValue = timestampVector.getLong(valuesPosition); + SparklineLongAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block valuesUncast = page.getBlock(channels.get(0)); + if (valuesUncast.areAllValuesNull()) { + return; + } + LongBlock values = (LongBlock) valuesUncast; + Block timestampsUncast = page.getBlock(channels.get(1)); + if (timestampsUncast.areAllValuesNull()) { + return; + } + LongBlock timestamps = (LongBlock) timestampsUncast; + assert values.getPositionCount() == timestamps.getPositionCount(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = groups.getInt(groupPosition); + int valuesPosition = groupPosition + positionOffset; + SparklineLongAggregator.combineIntermediate(state, groupId, values, timestamps, valuesPosition); + } + } + + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { + state.toIntermediate(blocks, offset, selected, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, IntVector selected, + GroupingAggregatorEvaluationContext ctx) { + blocks[offset] = SparklineLongAggregator.evaluateFinal(state, selected, ctx); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/SparklineBooleanAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/SparklineBooleanAggregator.java new file mode 100644 index 0000000000000..d46af141a29ca --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/SparklineBooleanAggregator.java @@ -0,0 +1,156 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.BitArray; +import org.elasticsearch.compute.ann.Aggregator; +import org.elasticsearch.compute.ann.GroupingAggregator; +import org.elasticsearch.compute.ann.IntermediateState; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasables; + +/** + * Aggregates field values for booleans. + */ +@Aggregator({ @IntermediateState(name = "values", type = "BOOLEAN_BLOCK") }) +@GroupingAggregator +class SparklineBooleanAggregator { + public static SingleState initSingle() { + return new SingleState(); + } + + public static void combine(SingleState state, boolean v) { + if (v) { + state.seenTrue = true; + } else { + state.seenFalse = true; + } + } + + public static void combineIntermediate(SingleState state, BooleanBlock values) { + int start = values.getFirstValueIndex(0); + int end = start + values.getValueCount(0); + for (int i = start; i < end; i++) { + combine(state, values.getBoolean(i)); + } + } + + public static Block evaluateFinal(SingleState state, DriverContext driverContext) { + return state.toBlock(driverContext.blockFactory()); + } + + public static GroupingState initGrouping(BigArrays bigArrays) { + return new GroupingState(bigArrays); + } + + public static void combine(GroupingState state, int groupId, boolean v) { + long index = ((long) groupId) << 1 | (v ? 1 : 0); + state.values.set(index); + } + + public static void combineIntermediate(GroupingState state, int groupId, BooleanBlock values, int valuesPosition) { + int start = values.getFirstValueIndex(valuesPosition); + int end = start + values.getValueCount(valuesPosition); + for (int i = start; i < end; i++) { + combine(state, groupId, values.getBoolean(i)); + } + } + + public static Block evaluateFinal(GroupingState state, IntVector selected, GroupingAggregatorEvaluationContext ctx) { + return state.toBlock(ctx.blockFactory(), selected); + } + + public static class SingleState implements AggregatorState { + private boolean seenFalse; + private boolean seenTrue; + + @Override + public void toIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + blocks[offset] = toBlock(driverContext.blockFactory()); + } + + Block toBlock(BlockFactory blockFactory) { + if (seenFalse == false && seenTrue == false) { + return blockFactory.newConstantNullBlock(1); + } + try (BooleanBlock.Builder builder = blockFactory.newBooleanBlockBuilder(2)) { + builder.beginPositionEntry(); + if (seenFalse) { + builder.appendBoolean(false); + } + if (seenTrue) { + builder.appendBoolean(true); + } + builder.endPositionEntry(); + return builder.build(); + } + } + + @Override + public void close() {} + } + + public static class GroupingState implements GroupingAggregatorState { + private final BitArray values; + + private GroupingState(BigArrays bigArrays) { + values = new BitArray(1, bigArrays); + } + + @Override + public void toIntermediate(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { + blocks[offset] = toBlock(driverContext.blockFactory(), selected); + } + + Block toBlock(BlockFactory blockFactory, IntVector selected) { + if (values.size() == 0) { + return blockFactory.newConstantNullBlock(selected.getPositionCount()); + } + try (BooleanBlock.Builder builder = blockFactory.newBooleanBlockBuilder(selected.getPositionCount())) { + for (int s = 0; s < selected.getPositionCount(); s++) { + int selectedGroup = selected.getInt(s); + long index = ((long) selectedGroup) << 1; + boolean seenFalse = values.get(index); + boolean seenTrue = values.get(index | 1); + if (seenFalse) { + if (seenTrue) { + builder.beginPositionEntry(); + builder.appendBoolean(false); + builder.appendBoolean(true); + builder.endPositionEntry(); + } else { + builder.appendBoolean(false); + } + } else { + if (seenTrue) { + builder.appendBoolean(true); + } else { + builder.appendNull(); + } + } + } + return builder.build(); + } + } + + @Override + public void enableGroupIdTracking(SeenGroupIds seen) { + // we don't need to track which values have been seen because we don't do anything special for groups without values + } + + @Override + public void close() { + Releasables.closeExpectNoException(values); + } + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-SparklineAggregator.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-SparklineAggregator.java.st new file mode 100644 index 0000000000000..faaa7f1385cbb --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-SparklineAggregator.java.st @@ -0,0 +1,425 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +// begin generated imports +import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.BitArray; +import org.elasticsearch.common.util.LongHash; +import org.elasticsearch.common.util.LongLongHash; +import org.elasticsearch.common.util.$Type$Array; +import org.elasticsearch.compute.aggregation.blockhash.BlockHash; +import org.elasticsearch.compute.ann.Aggregator; +import org.elasticsearch.compute.ann.GroupingAggregator; +import org.elasticsearch.compute.ann.IntermediateState; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.$Type$Block; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.core.Tuple; + +import java.util.ArrayList; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +// end generated imports + +/** + * Aggregates field values for $type$. + * This class is generated. Edit @{code X-SparklineAggregator.java.st} instead + * of this file. + */ +@Aggregator({ @IntermediateState(name = "values", type = "$TYPE$_BLOCK"), @IntermediateState(name = "timestamps", type = "LONG_BLOCK") }) +@GroupingAggregator +class Sparkline$Type$Aggregator { + public static SingleState initSingle(BigArrays bigArrays) { + return new SingleState(bigArrays); + } + + public static void combine(SingleState state, $type$ value, long timestamp) { + state.values.add(Tuple.tuple(timestamp, value)); + } + + public static void combineIntermediate(SingleState state, $Type$Block values, LongBlock timestamps) { + int start = values.getFirstValueIndex(0); + int end = start + values.getValueCount(0); + for (int i = start; i < end; i++) { + combine(state, values.get$Type$(i), timestamps.getLong(i)); + } + } + + public static Block evaluateFinal(SingleState state, DriverContext driverContext) { + return state.toFinal(driverContext.blockFactory()); + } + + public static GroupingState initGrouping(DriverContext driverContext) { + return new GroupingState(driverContext); + } + + public static void combine(GroupingState state, int groupId, $type$ value, long timestamp) { + state.addValue(groupId, value, timestamp); + } + + public static void combineIntermediate(GroupingState state, int groupId, $Type$Block values, LongBlock timestamps, int valuesPosition) { + int start = values.getFirstValueIndex(valuesPosition); + int end = start + values.getValueCount(valuesPosition); + for (int i = start; i < end; i++) { + combine(state, groupId, values.get$Type$(i), timestamps.getLong(i)); + } + } + + public static Block evaluateFinal(GroupingState state, IntVector selected, GroupingAggregatorEvaluationContext ctx) { + return state.toFinal(ctx.blockFactory(), selected); + } + + public static class SingleState implements AggregatorState { + private final List> values; + + private SingleState(BigArrays bigArrays) { + values = new ArrayList<>(); + } + + // TODO: Why does it have to output all the blocks? Where are these blocks used? + @Override + public void toIntermediate(Block[] blocks, int offset, DriverContext driverContext) { + BlockFactory blockFactory = driverContext.blockFactory(); + try ( + $Type$Block.Builder builder = blockFactory.new$Type$BlockBuilder(values.size()); + LongBlock.Builder timestampsBuilder = blockFactory.newLongBlockBuilder(values.size()); + ) { + builder.beginPositionEntry(); + timestampsBuilder.beginPositionEntry(); + for (int id = 0; id < values.size(); id++) { + builder.append$Type$(values.get(id).v2()); + timestampsBuilder.appendLong(values.get(id).v1()); + } + builder.endPositionEntry(); + timestampsBuilder.endPositionEntry(); + blocks[offset] = builder.build(); + blocks[offset + 1] = timestampsBuilder.build(); + } + } + + Block toFinal(BlockFactory blockFactory) { + values.sort(Comparator.comparingLong(Tuple::v1)); + try ($Type$Block.Builder builder = blockFactory.new$Type$BlockBuilder(values.size())) { + builder.beginPositionEntry(); + for (int id = 0; id < values.size(); id++) { + builder.append$Type$(values.get(id).v2()); + } + builder.endPositionEntry(); + return builder.build(); + } + } + + @Override + public void close() {} + } + + /** + * Values after the first in each group are collected in a hash, keyed by the pair of groupId and value. + * When emitting the output, we need to iterate the hash one group at a time to build the output block, + * which would require O(N^2). To avoid this, we compute the counts for each group and remap the hash id + * to an array, allowing us to build the output in O(N) instead. + */ + private static class NextValues implements Releasable { + private final BlockFactory blockFactory; +$if(long||double)$ + private final LongLongHash hashes; +$else$ + private final LongHash hashes; +$endif$ + private int[] selectedCounts = null; + private int[] ids = null; + private long extraMemoryUsed = 0; + + private NextValues(BlockFactory blockFactory) { + this.blockFactory = blockFactory; + this.hashes = new Long$if(long||double)$Long$endif$Hash(1, blockFactory.bigArrays()); + } + + void addValue(int groupId, $type$ v) { +$if(long)$ + hashes.add(groupId, v); +$elseif(double)$ + hashes.add(groupId, Double.doubleToLongBits(v)); +$elseif(int)$ + /* + * Encode the groupId and value into a single long - + * the top 32 bits for the group, the bottom 32 for the value. + */ + hashes.add((((long) groupId) << Integer.SIZE) | (v & 0xFFFFFFFFL)); +$elseif(float)$ + /* + * Encode the groupId and value into a single long - + * the top 32 bits for the group, the bottom 32 for the value. + */ + hashes.add((((long) groupId) << Float.SIZE) | (Float.floatToIntBits(v) & 0xFFFFFFFFL)); +$endif$ + } + + $type$ getValue(int index) { +$if(long)$ + return hashes.getKey2(ids[index]); +$elseif(double)$ + return Double.longBitsToDouble(hashes.getKey2(ids[index])); +$elseif(float)$ + long both = hashes.get(ids[index]); + return Float.intBitsToFloat((int) (both & 0xFFFFFFFFL)); +$elseif(int)$ + long both = hashes.get(ids[index]); + return (int) (both & 0xFFFFFFFFL); +$endif$ + } + + private void reserveBytesForIntArray(long numElements) { + long adjust = RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + numElements * Integer.BYTES); + blockFactory.adjustBreaker(adjust); + extraMemoryUsed += adjust; + } + + private void prepareForEmitting(IntVector selected) { + if (hashes.size() == 0) { + return; + } + /* + * Get a count of all groups less than the maximum selected group. Count + * *downwards* so that we can flip the sign on all of the actually selected + * groups. Negative values in this array are always unselected groups. + */ + int selectedCountsLen = selected.max() + 1; + reserveBytesForIntArray(selectedCountsLen); + this.selectedCounts = new int[selectedCountsLen]; + for (int id = 0; id < hashes.size(); id++) { +$if(long||double)$ + int group = (int) hashes.getKey1(id); +$elseif(float||int)$ + long both = hashes.get(id); + int group = (int) (both >>> Float.SIZE); +$endif$ + if (group < selectedCounts.length) { + selectedCounts[group]--; + } + } + + /* + * Total the selected groups and turn the counts into the start index into a sort-of + * off-by-one running count. It's really the number of values that have been inserted + * into the results before starting on this group. Unselected groups will still + * have negative counts. + * + * For example, if + * | Group | Value Count | Selected | + * |-------|-------------|----------| + * | 0 | 3 | <- | + * | 1 | 1 | <- | + * | 2 | 2 | | + * | 3 | 1 | <- | + * | 4 | 4 | <- | + * + * Then the total is 9 and the counts array will contain 0, 3, -2, 4, 5 + */ + int total = 0; + for (int s = 0; s < selected.getPositionCount(); s++) { + int group = selected.getInt(s); + int count = -selectedCounts[group]; + selectedCounts[group] = total; + total += count; + } + + /* + * Build a list of ids to insert in order *and* convert the running + * count in selectedCounts[group] into the end index (exclusive) in + * ids for each group. + * Here we use the negative counts to signal that a group hasn't been + * selected and the id containing values for that group is ignored. + * + * For example, if + * | Group | Value Count | Selected | + * |-------|-------------|----------| + * | 0 | 3 | <- | + * | 1 | 1 | <- | + * | 2 | 2 | | + * | 3 | 1 | <- | + * | 4 | 4 | <- | + * + * Then the total is 9 and the counts array will start with 0, 3, -2, 4, 5. + * The counts will end with 3, 4, -2, 5, 9. + */ + reserveBytesForIntArray(total); + + this.ids = new int[total]; + for (int id = 0; id < hashes.size(); id++) { +$if(long||double)$ + int group = (int) hashes.getKey1(id); +$elseif(float||int)$ + long both = hashes.get(id); + int group = (int) (both >>> Float.SIZE); +$endif$ + ids[selectedCounts[group]++] = id; + } + } + + @Override + public void close() { + Releasables.closeExpectNoException(hashes, () -> blockFactory.adjustBreaker(-extraMemoryUsed)); + } + } + + /** + * State for a grouped {@code SPARKLINE} aggregation. This implementation + * emphasizes collect-time performance over result rendering performance. + * The first value in each group is collected in the {@code firstValues} + * array, and subsequent values for each group are collected in {@code nextValues}. + */ + public static class GroupingState implements GroupingAggregatorState { + private final BlockFactory blockFactory; + $Type$Array firstValues; + private BitArray seen; + private int maxGroupId = -1; + private final NextValues nextValues; + private final Map>> values; + + private GroupingState(DriverContext driverContext) { + this.blockFactory = driverContext.blockFactory(); + boolean success = false; + try { + this.firstValues = driverContext.bigArrays().new$Type$Array(1, false); + this.nextValues = new NextValues(driverContext.blockFactory()); + success = true; + } finally { + if (success == false) { + this.close(); + } + } + values = new HashMap<>(); + } + + @Override + public void toIntermediate(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { + nextValues.prepareForEmitting(selected); + /* + * Insert the ids in order. + */ + final int[] nextValueCounts = nextValues.selectedCounts; + try ( + $Type$Block.Builder builder = blockFactory.new$Type$BlockBuilder(selected.getPositionCount()); + LongBlock.Builder timestampsBuilder = blockFactory.newLongBlockBuilder(selected.getPositionCount()) + ) { + int nextValuesStart = 0; + for (int s = 0; s < selected.getPositionCount(); s++) { + int group = selected.getInt(s); + if (group > maxGroupId || hasValue(group) == false) { + builder.appendNull(); + timestampsBuilder.appendNull(); + continue; + } + + builder.beginPositionEntry(); + timestampsBuilder.beginPositionEntry(); + List> valuesInGroup = values.get(group); + for (int id = 0; id < valuesInGroup.size(); id++) { + builder.append$Type$(valuesInGroup.get(id).v2()); + timestampsBuilder.appendLong(valuesInGroup.get(id).v1()); + } + builder.endPositionEntry(); + timestampsBuilder.endPositionEntry(); + } + blocks[offset] = builder.build(); + blocks[offset + 1] = timestampsBuilder.build(); + } + } + + void addValue(int groupId, $type$ v, long t) { + if (groupId > maxGroupId) { + firstValues = blockFactory.bigArrays().grow(firstValues, groupId + 1); + firstValues.set(groupId, v); + // We start in untracked mode, assuming every group has a value as an optimization to avoid allocating + // and updating the seen bitset. However, once some groups don't have values, we initialize the seen bitset, + // fill the groups that have values, and begin tracking incoming groups. + if (seen == null && groupId > maxGroupId + 1) { + seen = new BitArray(groupId + 1, blockFactory.bigArrays()); + seen.fill(0, maxGroupId + 1, true); + } + trackGroupId(groupId); + maxGroupId = groupId; + } else if (hasValue(groupId) == false) { + firstValues.set(groupId, v); + trackGroupId(groupId); + } else if (firstValues.get(groupId) != v) { + nextValues.addValue(groupId, v); + } + List> valuesInGroup = values.computeIfAbsent(groupId, g -> new ArrayList<>()); + valuesInGroup.add(Tuple.tuple(t, v)); + } + + @Override + public void enableGroupIdTracking(SeenGroupIds seen) { + // we track the seen values manually + } + + private void trackGroupId(int groupId) { + if (seen != null) { + seen.set(groupId); + } + } + + /** + * Returns true if the group has a value in firstValues; having a value in nextValues is optional. + * Returns false if the group does not have values in either firstValues or nextValues. + */ + private boolean hasValue(int groupId) { + return seen == null || seen.get(groupId); + } + + /** + * Builds a {@link Block} with the unique values collected for the {@code #selected} + * groups. This is the implementation of the final and intermediate results of the agg. + */ + Block toFinal(BlockFactory blockFactory, IntVector selected) { + nextValues.prepareForEmitting(selected); + /* + * Insert the ids in order. + */ + final int[] nextValueCounts = nextValues.selectedCounts; + try ($Type$Block.Builder builder = blockFactory.new$Type$BlockBuilder(selected.getPositionCount())) { + int nextValuesStart = 0; + for (int s = 0; s < selected.getPositionCount(); s++) { + int group = selected.getInt(s); + if (group > maxGroupId || hasValue(group) == false) { + builder.appendNull(); + continue; + } + $type$ firstValue = firstValues.get(group); + final int nextValuesEnd = nextValueCounts != null ? nextValueCounts[group] : nextValuesStart; + + List> valuesInGroup = values.get(group); + valuesInGroup.sort(Comparator.comparingLong(Tuple::v1)); + builder.beginPositionEntry(); + for (int id = 0; id < valuesInGroup.size(); id++) { + builder.append$Type$(valuesInGroup.get(id).v2()); + } + builder.endPositionEntry(); + } + return builder.build(); + } + } + + @Override + public void close() { + Releasables.closeExpectNoException(seen, firstValues, nextValues); + } + } +} diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/sparkline.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/sparkline.csv-spec new file mode 100644 index 0000000000000..b712528b0b1f4 --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/sparkline.csv-spec @@ -0,0 +1,119 @@ +empNo +required_capability: mv_sort + +FROM employees +| STATS v = MV_SORT(VALUES(emp_no)) +; + +v:integer +[10001, 10002, 10003, 10004, 10005, 10006, 10007, 10008, 10009, 10010, 10011, 10012, 10013, 10014, 10015, 10016, 10017, 10018, 10019, 10020, 10021, 10022, 10023, 10024, 10025, 10026, 10027, 10028, 10029, 10030, 10031, 10032, 10033, 10034, 10035, 10036, 10037, 10038, 10039, 10040, 10041, 10042, 10043, 10044, 10045, 10046, 10047, 10048, 10049, 10050, 10051, 10052, 10053, 10054, 10055, 10056, 10057, 10058, 10059, 10060, 10061, 10062, 10063, 10064, 10065, 10066, 10067, 10068, 10069, 10070, 10071, 10072, 10073, 10074, 10075, 10076, 10077, 10078, 10079, 10080, 10081, 10082, 10083, 10084, 10085, 10086, 10087, 10088, 10089, 10090, 10091, 10092, 10093, 10094, 10095, 10096, 10097, 10098, 10099, 10100] +; + +empNoByHireDate +required_capability: sparkline + +FROM employees +| STATS v = SPARKLINE(emp_no, hire_date) +; + +v:integer +[10009, 10048, 10098, 10076, 10061, 10070, 10013, 10080, 10029, 10064, 10002, 10053, 10066, 10090, 10079, 10001, 10095, 10089, 10003, 10087, 10081, 10004, 10067, 10014, 10033, 10075, 10083, 10018, 10058, 10094, 10078, 10015, 10068, 10025, 10100, 10071, 10060, 10039, 10021, 10065, 10052, 10072, 10088, 10035, 10034, 10099, 10007, 10047, 10063, 10006, 10027, 10010, 10045, 10005, 10038, 10092, 10069, 10041, 10023, 10082, 10096, 10011, 10056, 10086, 10077, 10032, 10074, 10097, 10043, 10037, 10050, 10020, 10059, 10062, 10031, 10028, 10073, 10036, 10057, 10055, 10049, 10046, 10051, 10091, 10012, 10040, 10042, 10017, 10030, 10085, 10044, 10008, 10016, 10054, 10026, 10022, 10084, 10093, 10024, 10019] +; + +empNoByBirthYear +required_capability: mv_sort + +FROM employees +| STATS v = MV_SORT(VALUES(emp_no)) BY birth_year = DATE_EXTRACT("year", BUCKET(birth_date, 1 year)) +| SORT birth_year +| KEEP birth_year, v +; + +birth_year:long | v:integer +1952 | [10009, 10020, 10022, 10063, 10066, 10072, 10076, 10097] +1953 | [10001, 10006, 10011, 10019, 10023, 10026, 10035, 10051, 10059, 10067, 10100] +1954 | [10004, 10018, 10053, 10057, 10058, 10073, 10088, 10096] +1955 | [10005, 10070, 10074, 10091] +1956 | [10014, 10029, 10033, 10055, 10099] +1957 | [10007, 10054, 10080, 10094] +1958 | [10008, 10017, 10024, 10025, 10030, 10050, 10071] +1959 | [10003, 10015, 10031, 10036, 10039, 10064, 10078, 10083, 10087] +1960 | [10012, 10021, 10032, 10038, 10069, 10075, 10081, 10084] +1961 | [10016, 10052, 10056, 10060, 10062, 10079, 10090, 10098] +1962 | [10027, 10034, 10061, 10068, 10085, 10086] +1963 | [10010, 10013, 10028, 10037, 10065, 10082, 10089] +1964 | [10002, 10077, 10092, 10093] +1965 | 10095 +null | [10040, 10041, 10042, 10043, 10044, 10045, 10046, 10047, 10048, 10049] +; + +countByBirthYearByGender +required_capability: sparkline + +FROM employees +| STATS count_per_year=COUNT(*) BY gender, @timestamp=BUCKET(birth_date, 1 year) +| STATS Count=SUM(count_per_year), Trend=SPARKLINE(count_per_year, @timestamp), Timestamps=MV_SORT(VALUES(@timestamp)) BY gender +| KEEP gender, Count, Trend, Timestamps +; + +# 57 vs 52 (M) and 33 vs 28 (F) because of birth_date being null +gender:keyword | Count:long | Trend:long | Timestamps:datetime +null | 10 | [2, 1, 1, 1, 1, 1, 1, 2] | [1953-01-01T00:00:00.000Z, 1954-01-01T00:00:00.000Z, 1956-01-01T00:00:00.000Z, 1958-01-01T00:00:00.000Z, 1959-01-01T00:00:00.000Z, 1960-01-01T00:00:00.000Z, 1961-01-01T00:00:00.000Z, 1963-01-01T00:00:00.000Z] +M | 57 | [4, 5, 4, 3, 3, 2, 5, 6, 4, 4, 5, 4, 2, 1] | [1952-01-01T00:00:00.000Z, 1953-01-01T00:00:00.000Z, 1954-01-01T00:00:00.000Z, 1955-01-01T00:00:00.000Z, 1956-01-01T00:00:00.000Z, 1957-01-01T00:00:00.000Z, 1958-01-01T00:00:00.000Z, 1959-01-01T00:00:00.000Z, 1960-01-01T00:00:00.000Z, 1961-01-01T00:00:00.000Z, 1962-01-01T00:00:00.000Z, 1963-01-01T00:00:00.000Z, 1964-01-01T00:00:00.000Z, 1965-01-01T00:00:00.000Z] +F | 33 | [4, 4, 3, 1, 1, 2, 1, 2, 3, 3, 1, 1, 2] | [1952-01-01T00:00:00.000Z, 1953-01-01T00:00:00.000Z, 1954-01-01T00:00:00.000Z, 1955-01-01T00:00:00.000Z, 1956-01-01T00:00:00.000Z, 1957-01-01T00:00:00.000Z, 1958-01-01T00:00:00.000Z, 1959-01-01T00:00:00.000Z, 1960-01-01T00:00:00.000Z, 1961-01-01T00:00:00.000Z, 1962-01-01T00:00:00.000Z, 1963-01-01T00:00:00.000Z, 1964-01-01T00:00:00.000Z] +; + +empNoByHireDateByBirthYear +required_capability: sparkline + +FROM employees +| STATS v = SPARKLINE(emp_no, hire_date) BY birth_year = DATE_EXTRACT("year", BUCKET(birth_date, 1 year)) +| SORT birth_year +| KEEP birth_year, v +; + +birth_year:long | v:integer +1952 | [10009, 10076, 10066, 10072, 10063, 10097, 10020, 10022] +1953 | [10001, 10067, 10100, 10035, 10006, 10023, 10011, 10059, 10051, 10026, 10019] +1954 | [10053, 10004, 10018, 10058, 10088, 10096, 10073, 10057] +1955 | [10070, 10005, 10074, 10091] +1956 | [10029, 10014, 10033, 10099, 10055] +1957 | [10080, 10094, 10007, 10054] +1958 | [10025, 10071, 10050, 10017, 10030, 10008, 10024] +1959 | [10064, 10003, 10087, 10083, 10078, 10015, 10039, 10031, 10036] +1960 | [10081, 10075, 10021, 10038, 10069, 10032, 10012, 10084] +1961 | [10098, 10090, 10079, 10060, 10052, 10056, 10062, 10016] +1962 | [10061, 10068, 10034, 10027, 10086, 10085] +1963 | [10013, 10089, 10065, 10010, 10082, 10037, 10028] +1964 | [10002, 10092, 10077, 10093] +1965 | 10095 +null | [10048, 10047, 10045, 10041, 10043, 10049, 10046, 10040, 10042, 10044] +; + +empNoByHireDateByBirthYearWithConcatTrick +required_capability: mv_sort + +FROM employees +| EVAL hire_date_and_emp_no=CONCAT(DATE_FORMAT("yyyy-MM-dd", hire_date), "/", TO_STRING(emp_no)) +| STATS v = MV_SORT(VALUES(hire_date_and_emp_no)) BY birth_year = DATE_EXTRACT("year", BUCKET(birth_date, 1 year)) +| SORT birth_year +| KEEP birth_year, v +; + +birth_year:long | v:keyword +1952 | [1985-02-18/10009, 1985-07-09/10076, 1986-02-26/10066, 1988-07-21/10072, 1989-04-08/10063, 1990-09-15/10097, 1991-01-26/10020, 1995-08-22/10022] +1953 | [1986-06-26/10001, 1987-03-04/10067, 1987-09-21/10100, 1988-09-05/10035, 1989-06-02/10006, 1989-12-17/10023, 1990-01-22/10011, 1991-06-26/10059, 1992-10-15/10051, 1995-03-20/10026, 1999-04-30/10019] +1954 | [1986-02-04/10053, 1986-12-01/10004, 1987-04-03/10018, 1987-04-13/10058, 1988-09-02/10088, 1990-01-14/10096, 1991-12-01/10073, 1992-01-15/10057] +1955 | [1985-10-14/10070, 1989-09-12/10005, 1990-08-13/10074, 1992-11-18/10091] +1956 | [1985-11-20/10029, 1987-03-11/10014, 1987-03-18/10033, 1988-10-18/10099, 1992-04-27/10055] +1957 | [1985-11-19/10080, 1987-04-18/10094, 1989-02-10/10007, 1995-03-13/10054] +1958 | [1987-08-17/10025, 1987-10-01/10071, 1990-12-25/10050, 1993-08-03/10017, 1994-02-17/10030, 1994-09-15/10008, 1997-05-19/10024] +1959 | [1985-11-20/10064, 1986-08-28/10003, 1986-09-08/10087, 1987-03-31/10083, 1987-05-26/10078, 1987-07-02/10015, 1988-01-19/10039, 1991-09-01/10031, 1992-01-03/10036] +1960 | [1986-10-30/10081, 1987-03-19/10075, 1988-02-10/10021, 1989-09-20/10038, 1989-11-05/10069, 1990-06-20/10032, 1992-12-18/10012, 1995-12-15/10084] +1961 | [1985-05-13/10098, 1986-03-14/10090, 1986-03-27/10079, 1987-11-02/10060, 1988-05-21/10052, 1990-02-01/10056, 1991-08-30/10062, 1995-01-27/10016] +1962 | [1985-09-17/10061, 1987-08-07/10068, 1988-09-21/10034, 1989-07-07/10027, 1990-02-16/10086, 1994-04-09/10085] +1963 | [1985-10-20/10013, 1986-08-12/10089, 1988-05-18/10065, 1989-08-24/10010, 1990-01-03/10082, 1990-12-05/10037, 1991-10-22/10028] +1964 | [1985-11-21/10002, 1989-09-22/10092, 1990-03-02/10077, 1996-11-05/10093] +1965 | 1986-07-15/10095 +null | [1985-02-24/10048, 1989-03-31/10047, 1989-09-02/10045, 1989-11-12/10041, 1990-10-20/10043, 1992-05-04/10049, 1992-06-20/10046, 1993-02-14/10040, 1993-03-21/10042, 1994-05-21/10044] +; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index 6a2b112b58deb..301d9fddd937f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -1335,7 +1335,12 @@ public enum Cap { /** * Support correct counting of skipped shards. */ - CORRECT_SKIPPED_SHARDS_COUNT; + CORRECT_SKIPPED_SHARDS_COUNT, + + /** + * Introduce SPARKLINE aggregate function. + */ + SPARKLINE; private final boolean enabled; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java index 649503b1443d2..0a56344fda697 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java @@ -36,6 +36,7 @@ import org.elasticsearch.xpack.esql.expression.function.aggregate.Percentile; import org.elasticsearch.xpack.esql.expression.function.aggregate.Rate; import org.elasticsearch.xpack.esql.expression.function.aggregate.Sample; +import org.elasticsearch.xpack.esql.expression.function.aggregate.Sparkline; import org.elasticsearch.xpack.esql.expression.function.aggregate.SpatialCentroid; import org.elasticsearch.xpack.esql.expression.function.aggregate.SpatialExtent; import org.elasticsearch.xpack.esql.expression.function.aggregate.StdDev; @@ -325,6 +326,7 @@ private static FunctionDefinition[][] functions() { def(Sum.class, uni(Sum::new), "sum"), def(Top.class, tri(Top::new), "top"), def(Values.class, uni(Values::new), "values"), + def(Sparkline.class, bi(Sparkline::new), "sparkline"), def(WeightedAvg.class, bi(WeightedAvg::new), "weighted_avg") }, // math new FunctionDefinition[] { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/AggregateWritables.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/AggregateWritables.java index 7dfacafbc2c53..d4d05830b5df1 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/AggregateWritables.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/AggregateWritables.java @@ -31,6 +31,7 @@ public static List getNamedWriteables() { Sum.ENTRY, Top.ENTRY, Values.ENTRY, + Sparkline.ENTRY, MinOverTime.ENTRY, MaxOverTime.ENTRY, AvgOverTime.ENTRY, diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Sparkline.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Sparkline.java new file mode 100644 index 0000000000000..b1257d1a7f285 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Sparkline.java @@ -0,0 +1,169 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.aggregate; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.compute.aggregation.AggregatorFunctionSupplier; +import org.elasticsearch.compute.aggregation.SparklineBooleanAggregatorFunctionSupplier; +import org.elasticsearch.compute.aggregation.SparklineDoubleAggregatorFunctionSupplier; +import org.elasticsearch.compute.aggregation.SparklineIntAggregatorFunctionSupplier; +import org.elasticsearch.compute.aggregation.SparklineLongAggregatorFunctionSupplier; +import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.Literal; +import org.elasticsearch.xpack.esql.core.expression.TypeResolutions; +import org.elasticsearch.xpack.esql.core.tree.NodeInfo; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.expression.function.Example; +import org.elasticsearch.xpack.esql.expression.function.FunctionAppliesTo; +import org.elasticsearch.xpack.esql.expression.function.FunctionAppliesToLifecycle; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.FunctionType; +import org.elasticsearch.xpack.esql.expression.function.OptionalArgument; +import org.elasticsearch.xpack.esql.expression.function.Param; +import org.elasticsearch.xpack.esql.planner.ToAggregator; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.function.Supplier; + +import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.DEFAULT; + +public class Sparkline extends AggregateFunction implements OptionalArgument, ToAggregator { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( + Expression.class, + "Sparkline", + Sparkline::new + ); + + private static final Map> SUPPLIERS = Map.ofEntries( + Map.entry(DataType.INTEGER, SparklineIntAggregatorFunctionSupplier::new), + Map.entry(DataType.LONG, SparklineLongAggregatorFunctionSupplier::new), + Map.entry(DataType.UNSIGNED_LONG, SparklineLongAggregatorFunctionSupplier::new), + Map.entry(DataType.DATETIME, SparklineLongAggregatorFunctionSupplier::new), + Map.entry(DataType.DATE_NANOS, SparklineLongAggregatorFunctionSupplier::new), + Map.entry(DataType.DOUBLE, SparklineDoubleAggregatorFunctionSupplier::new), + Map.entry(DataType.BOOLEAN, SparklineBooleanAggregatorFunctionSupplier::new) + ); + + public Sparkline(Source source, Expression field, Expression filter, Expression sortField) { + super(source, field, filter, sortField != null ? List.of(sortField) : List.of()); + } + + @FunctionInfo( + returnType = { + "boolean", + "cartesian_point", + "cartesian_shape", + "date", + "date_nanos", + "double", + "geo_point", + "geo_shape", + "integer", + "ip", + "keyword", + "long", + "unsigned_long", + "version" }, + preview = true, + appliesTo = { @FunctionAppliesTo(lifeCycle = FunctionAppliesToLifecycle.PREVIEW) }, + description = """ + Returns values as a multivalued field. The order of the returned values is determined by the given sortField.""", + appendix = """ + ::::{tip} + Use [`TOP`](/reference/query-languages/esql/functions-operators/aggregation-functions.md#esql-top) + if you need to keep repeated values. + :::: + ::::{warning} + This can use a significant amount of memory and ES|QL doesn’t yet + grow aggregations beyond memory. So this aggregation will work until + it is used to collect more values than can fit into memory. Once it + collects too many values it will fail the query with + a [Circuit Breaker Error](docs-content://troubleshoot/elasticsearch/circuit-breaker-errors.md). + ::::""", + type = FunctionType.AGGREGATE, + examples = @Example(file = "string", tag = "values-grouped") + ) + public Sparkline( + Source source, + @Param( + name = "field", + type = { + "boolean", + "cartesian_point", + "cartesian_shape", + "date", + "date_nanos", + "double", + "geo_point", + "geo_shape", + "integer", + "ip", + "keyword", + "long", + "unsigned_long", + "text", + "version" } + ) Expression v, + @Param(name = "sortField", type = { "date" }) Expression sortField + ) { + this(source, v, Literal.TRUE, sortField); + } + + private Sparkline(StreamInput in) throws IOException { + super(in); + } + + public Expression sortField() { + return parameters().get(0); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Sparkline::new, field(), filter(), sortField()); + } + + @Override + public Sparkline replaceChildren(List newChildren) { + return new Sparkline(source(), newChildren.get(0), newChildren.get(1), newChildren.get(2)); + } + + @Override + public Sparkline withFilter(Expression filter) { + return new Sparkline(source(), field(), filter, sortField()); + } + + @Override + public DataType dataType() { + return field().dataType().noText(); + } + + @Override + protected TypeResolution resolveType() { + return TypeResolutions.isRepresentableExceptCounters(field(), sourceText(), DEFAULT); + } + + @Override + public AggregatorFunctionSupplier supplier() { + DataType type = field().dataType(); + if (SUPPLIERS.containsKey(type) == false) { + // If the type checking did its job, this should never happen + throw EsqlIllegalArgumentException.illegalDataType(type); + } + return SUPPLIERS.get(type).get(); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/SparklineSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/SparklineSerializationTests.java new file mode 100644 index 0000000000000..f08a0e646fa43 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/SparklineSerializationTests.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.aggregate; + +import org.elasticsearch.xpack.esql.expression.AbstractExpressionSerializationTests; + +import java.io.IOException; + +public class SparklineSerializationTests extends AbstractExpressionSerializationTests { + @Override + protected Sparkline createTestInstance() { + return new Sparkline(randomSource(), randomChild(), randomChild()); + } + + @Override + protected Sparkline mutateInstance(Sparkline instance) throws IOException { + return new Sparkline( + instance.source(), + randomValueOtherThan(instance.field(), AbstractExpressionSerializationTests::randomChild), + randomValueOtherThan(instance.sortField(), AbstractExpressionSerializationTests::randomChild) + ); + } +}