diff --git a/x-pack/plugin/esql/compute/build.gradle b/x-pack/plugin/esql/compute/build.gradle index 8acb7697b9f15..aa249a8883382 100644 --- a/x-pack/plugin/esql/compute/build.gradle +++ b/x-pack/plugin/esql/compute/build.gradle @@ -628,28 +628,6 @@ tasks.named('stringTemplates').configure { it.outputFile = "org/elasticsearch/compute/aggregation/ValuesBytesRefAggregator.java" } - File rateAggregatorInputFile = file("src/main/java/org/elasticsearch/compute/aggregation/X-RateAggregator.java.st") - template { - it.properties = intProperties - it.inputFile = rateAggregatorInputFile - it.outputFile = "org/elasticsearch/compute/aggregation/RateIntAggregator.java" - } - template { - it.properties = longProperties - it.inputFile = rateAggregatorInputFile - it.outputFile = "org/elasticsearch/compute/aggregation/RateLongAggregator.java" - } - template { - it.properties = floatProperties - it.inputFile = rateAggregatorInputFile - it.outputFile = "org/elasticsearch/compute/aggregation/RateFloatAggregator.java" - } - template { - it.properties = doubleProperties - it.inputFile = rateAggregatorInputFile - it.outputFile = "org/elasticsearch/compute/aggregation/RateDoubleAggregator.java" - } - File stdDevAggregatorInputFile = file("src/main/java/org/elasticsearch/compute/aggregation/X-StdDevAggregator.java.st") template { it.properties = intProperties diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateFloatAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateFloatAggregator.java deleted file mode 100644 index 11a529b97835e..0000000000000 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateFloatAggregator.java +++ /dev/null @@ -1,376 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.compute.aggregation; - -// begin generated imports -import org.apache.lucene.util.Accountable; -import org.apache.lucene.util.RamUsageEstimator; -import org.elasticsearch.common.breaker.CircuitBreaker; -import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.common.util.ObjectArray; -import org.elasticsearch.compute.ann.GroupingAggregator; -import org.elasticsearch.compute.ann.IntermediateState; -import org.elasticsearch.compute.data.Block; -import org.elasticsearch.compute.data.BlockFactory; -import org.elasticsearch.compute.data.DoubleBlock; -import org.elasticsearch.compute.data.DoubleVector; -import org.elasticsearch.compute.data.FloatBlock; -import org.elasticsearch.compute.data.IntBlock; -import org.elasticsearch.compute.data.IntVector; -import org.elasticsearch.compute.data.LongBlock; -import org.elasticsearch.compute.operator.DriverContext; -import org.elasticsearch.core.Releasable; -import org.elasticsearch.core.Releasables; -// end generated imports - -/** - * A rate grouping aggregation definition for float. - * This class is generated. Edit `X-RateAggregator.java.st` instead. - */ -@GroupingAggregator( - value = { - @IntermediateState(name = "timestamps", type = "LONG_BLOCK"), - @IntermediateState(name = "values", type = "FLOAT_BLOCK"), - @IntermediateState(name = "sampleCounts", type = "INT"), - @IntermediateState(name = "resets", type = "DOUBLE") } -) -public class RateFloatAggregator { - - public static FloatRateGroupingState initGrouping(DriverContext driverContext) { - return new FloatRateGroupingState(driverContext.bigArrays(), driverContext.breaker()); - } - - public static void combine(FloatRateGroupingState current, int groupId, float value, long timestamp) { - current.append(groupId, timestamp, value); - } - - public static void combineIntermediate( - FloatRateGroupingState current, - int groupId, - LongBlock timestamps, - FloatBlock values, - int sampleCount, - double reset, - int otherPosition - ) { - current.combine(groupId, timestamps, values, sampleCount, reset, otherPosition); - } - - public static Block evaluateFinal(FloatRateGroupingState state, IntVector selected, GroupingAggregatorEvaluationContext evalContext) { - return state.evaluateFinal(selected, evalContext); - } - - private static class FloatRateState { - static final long BASE_RAM_USAGE = RamUsageEstimator.sizeOfObject(FloatRateState.class); - final long[] timestamps; // descending order - final float[] values; - // the timestamps and values arrays might have collapsed to fewer values than the actual sample count - int sampleCount = 0; - double reset = 0; - - FloatRateState(int initialSize) { - this.timestamps = new long[initialSize]; - this.values = new float[initialSize]; - } - - FloatRateState(long[] ts, float[] vs) { - this.timestamps = ts; - this.values = vs; - this.sampleCount = values.length; - } - - private float dv(float v0, float v1) { - // counter reset detection - return v0 > v1 ? v1 : v1 - v0; - } - - void append(long t, float v) { - assert timestamps.length == 2 : "expected two timestamps; got " + timestamps.length; - assert t < timestamps[1] : "@timestamp goes backward: " + t + " >= " + timestamps[1]; - reset += dv(v, values[1]) + dv(values[1], values[0]) - dv(v, values[0]); - timestamps[1] = t; - values[1] = v; - sampleCount++; - } - - int entries() { - return timestamps.length; - } - - static long bytesUsed(int entries) { - var ts = RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) Long.BYTES * entries); - var vs = RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) Float.BYTES * entries); - return BASE_RAM_USAGE + ts + vs; - } - } - - public static final class FloatRateGroupingState implements Releasable, Accountable, GroupingAggregatorState { - private ObjectArray states; - private final BigArrays bigArrays; - private final CircuitBreaker breaker; - private long stateBytes; // for individual states - - FloatRateGroupingState(BigArrays bigArrays, CircuitBreaker breaker) { - this.bigArrays = bigArrays; - this.breaker = breaker; - this.states = bigArrays.newObjectArray(1); - } - - void ensureCapacity(int groupId) { - states = bigArrays.grow(states, groupId + 1); - } - - void adjustBreaker(long bytes) { - breaker.addEstimateBytesAndMaybeBreak(bytes, "<>"); - stateBytes += bytes; - assert stateBytes >= 0 : stateBytes; - } - - void append(int groupId, long timestamp, float value) { - ensureCapacity(groupId); - var state = states.get(groupId); - if (state == null) { - adjustBreaker(FloatRateState.bytesUsed(1)); - state = new FloatRateState(new long[] { timestamp }, new float[] { value }); - states.set(groupId, state); - } else { - if (state.entries() == 1) { - adjustBreaker(FloatRateState.bytesUsed(2)); - state = new FloatRateState(new long[] { state.timestamps[0], timestamp }, new float[] { state.values[0], value }); - states.set(groupId, state); - adjustBreaker(-FloatRateState.bytesUsed(1)); // old state - } else { - state.append(timestamp, value); - } - } - } - - void combine(int groupId, LongBlock timestamps, FloatBlock values, int sampleCount, double reset, int otherPosition) { - final int valueCount = timestamps.getValueCount(otherPosition); - if (valueCount == 0) { - return; - } - final int firstIndex = timestamps.getFirstValueIndex(otherPosition); - ensureCapacity(groupId); - var state = states.get(groupId); - if (state == null) { - adjustBreaker(FloatRateState.bytesUsed(valueCount)); - state = new FloatRateState(valueCount); - state.reset = reset; - state.sampleCount = sampleCount; - states.set(groupId, state); - // TODO: add bulk_copy to Block - for (int i = 0; i < valueCount; i++) { - state.timestamps[i] = timestamps.getLong(firstIndex + i); - state.values[i] = values.getFloat(firstIndex + i); - } - } else { - adjustBreaker(FloatRateState.bytesUsed(state.entries() + valueCount)); - var newState = new FloatRateState(state.entries() + valueCount); - newState.reset = state.reset + reset; - newState.sampleCount = state.sampleCount + sampleCount; - states.set(groupId, newState); - merge(state, newState, firstIndex, valueCount, timestamps, values); - adjustBreaker(-FloatRateState.bytesUsed(state.entries())); // old state - } - } - - void merge(FloatRateState curr, FloatRateState dst, int firstIndex, int rightCount, LongBlock timestamps, FloatBlock values) { - int i = 0, j = 0, k = 0; - final int leftCount = curr.entries(); - while (i < leftCount && j < rightCount) { - final var t1 = curr.timestamps[i]; - final var t2 = timestamps.getLong(firstIndex + j); - if (t1 > t2) { - dst.timestamps[k] = t1; - dst.values[k] = curr.values[i]; - ++i; - } else { - dst.timestamps[k] = t2; - dst.values[k] = values.getFloat(firstIndex + j); - ++j; - } - ++k; - } - if (i < leftCount) { - System.arraycopy(curr.timestamps, i, dst.timestamps, k, leftCount - i); - System.arraycopy(curr.values, i, dst.values, k, leftCount - i); - } - while (j < rightCount) { - dst.timestamps[k] = timestamps.getLong(firstIndex + j); - dst.values[k] = values.getFloat(firstIndex + j); - ++k; - ++j; - } - } - - FloatRateState mergeState(FloatRateState s1, FloatRateState s2) { - var newLen = s1.entries() + s2.entries(); - adjustBreaker(FloatRateState.bytesUsed(newLen)); - var dst = new FloatRateState(newLen); - dst.reset = s1.reset + s2.reset; - dst.sampleCount = s1.sampleCount + s2.sampleCount; - int i = 0, j = 0, k = 0; - while (i < s1.entries() && j < s2.entries()) { - if (s1.timestamps[i] > s2.timestamps[j]) { - dst.timestamps[k] = s1.timestamps[i]; - dst.values[k] = s1.values[i]; - ++i; - } else { - dst.timestamps[k] = s2.timestamps[j]; - dst.values[k] = s2.values[j]; - ++j; - } - ++k; - } - System.arraycopy(s1.timestamps, i, dst.timestamps, k, s1.entries() - i); - System.arraycopy(s1.values, i, dst.values, k, s1.entries() - i); - System.arraycopy(s2.timestamps, j, dst.timestamps, k, s2.entries() - j); - System.arraycopy(s2.values, j, dst.values, k, s2.entries() - j); - return dst; - } - - @Override - public long ramBytesUsed() { - return states.ramBytesUsed() + stateBytes; - } - - @Override - public void close() { - Releasables.close(states, () -> adjustBreaker(-stateBytes)); - } - - @Override - public void toIntermediate(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { - assert blocks.length >= offset + 3 : "blocks=" + blocks.length + ",offset=" + offset; - final BlockFactory blockFactory = driverContext.blockFactory(); - final int positionCount = selected.getPositionCount(); - try ( - LongBlock.Builder timestamps = blockFactory.newLongBlockBuilder(positionCount * 2); - FloatBlock.Builder values = blockFactory.newFloatBlockBuilder(positionCount * 2); - IntVector.FixedBuilder sampleCounts = blockFactory.newIntVectorFixedBuilder(positionCount); - DoubleVector.FixedBuilder resets = blockFactory.newDoubleVectorFixedBuilder(positionCount) - ) { - for (int i = 0; i < positionCount; i++) { - final var groupId = selected.getInt(i); - final var state = groupId < states.size() ? states.get(groupId) : null; - if (state != null) { - timestamps.beginPositionEntry(); - for (long t : state.timestamps) { - timestamps.appendLong(t); - } - timestamps.endPositionEntry(); - - values.beginPositionEntry(); - for (float v : state.values) { - values.appendFloat(v); - } - values.endPositionEntry(); - sampleCounts.appendInt(i, state.sampleCount); - resets.appendDouble(i, state.reset); - } else { - timestamps.appendNull(); - values.appendNull(); - sampleCounts.appendInt(i, 0); - resets.appendDouble(i, 0); - } - } - blocks[offset] = timestamps.build(); - blocks[offset + 1] = values.build(); - blocks[offset + 2] = sampleCounts.build().asBlock(); - blocks[offset + 3] = resets.build().asBlock(); - } - } - - private static double computeRateWithoutExtrapolate(FloatRateState state) { - final int len = state.entries(); - assert len >= 2 : "rate requires at least two samples; got " + len; - final long firstTS = state.timestamps[state.timestamps.length - 1]; - final long lastTS = state.timestamps[0]; - double reset = state.reset; - for (int i = 1; i < len; i++) { - if (state.values[i - 1] < state.values[i]) { - reset += state.values[i]; - } - } - final double firstValue = state.values[len - 1]; - final double lastValue = state.values[0] + reset; - return (lastValue - firstValue) * 1000.0 / (lastTS - firstTS); - } - - /** - * Credit to PromQL for this extrapolation algorithm: - * If samples are close enough to the rangeStart and rangeEnd, we extrapolate the rate all the way to the boundary in question. - * "Close enough" is defined as "up to 10% more than the average duration between samples within the range". - * Essentially, we assume a more or less regular spacing between samples. If we don't see a sample where we would expect one, - * we assume the series does not cover the whole range but starts and/or ends within the range. - * We still extrapolate the rate in this case, but not all the way to the boundary, only by half of the average duration between - * samples (which is our guess for where the series actually starts or ends). - */ - private static double extrapolateRate(FloatRateState state, long rangeStart, long rangeEnd) { - final int len = state.entries(); - assert len >= 2 : "rate requires at least two samples; got " + len; - final long firstTS = state.timestamps[state.timestamps.length - 1]; - final long lastTS = state.timestamps[0]; - double reset = state.reset; - for (int i = 1; i < len; i++) { - if (state.values[i - 1] < state.values[i]) { - reset += state.values[i]; - } - } - double firstValue = state.values[len - 1]; - double lastValue = state.values[0] + reset; - final double sampleTS = lastTS - firstTS; - final double averageSampleInterval = sampleTS / state.sampleCount; - final double slope = (lastValue - firstValue) / sampleTS; - double startGap = firstTS - rangeStart; - if (startGap > 0) { - if (startGap > averageSampleInterval * 1.1) { - startGap = averageSampleInterval / 2.0; - } - firstValue = Math.max(0.0, firstValue - startGap * slope); - } - double endGap = rangeEnd - lastTS; - if (endGap > 0) { - if (endGap > averageSampleInterval * 1.1) { - endGap = averageSampleInterval / 2.0; - } - lastValue = lastValue + endGap * slope; - } - return (lastValue - firstValue) * 1000.0 / (rangeEnd - rangeStart); - } - - Block evaluateFinal(IntVector selected, GroupingAggregatorEvaluationContext evalContext) { - int positionCount = selected.getPositionCount(); - try (DoubleBlock.Builder rates = evalContext.blockFactory().newDoubleBlockBuilder(positionCount)) { - for (int p = 0; p < positionCount; p++) { - final var groupId = selected.getInt(p); - final var state = groupId < states.size() ? states.get(groupId) : null; - if (state == null || state.sampleCount < 2) { - rates.appendNull(); - continue; - } - int len = state.entries(); - final double rate; - if (evalContext instanceof TimeSeriesGroupingAggregatorEvaluationContext tsContext) { - rate = extrapolateRate(state, tsContext.rangeStartInMillis(groupId), tsContext.rangeEndInMillis(groupId)); - } else { - rate = computeRateWithoutExtrapolate(state); - } - rates.appendDouble(rate); - } - return rates.build(); - } - } - - @Override - public void enableGroupIdTracking(SeenGroupIds seenGroupIds) { - // noop - we handle the null states inside `toIntermediate` and `evaluateFinal` - } - } -} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateDoubleAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateDoubleAggregatorFunctionSupplier.java deleted file mode 100644 index 2bdc1e6a5e84a..0000000000000 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateDoubleAggregatorFunctionSupplier.java +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License -// 2.0; you may not use this file except in compliance with the Elastic License -// 2.0. -package org.elasticsearch.compute.aggregation; - -import java.lang.Integer; -import java.lang.Override; -import java.lang.String; -import java.util.List; -import org.elasticsearch.compute.operator.DriverContext; - -/** - * {@link AggregatorFunctionSupplier} implementation for {@link RateDoubleAggregator}. - * This class is generated. Edit {@code AggregatorFunctionSupplierImplementer} instead. - */ -public final class RateDoubleAggregatorFunctionSupplier implements AggregatorFunctionSupplier { - public RateDoubleAggregatorFunctionSupplier() { - } - - @Override - public List nonGroupingIntermediateStateDesc() { - throw new UnsupportedOperationException("non-grouping aggregator is not supported"); - } - - @Override - public List groupingIntermediateStateDesc() { - return RateDoubleGroupingAggregatorFunction.intermediateStateDesc(); - } - - @Override - public AggregatorFunction aggregator(DriverContext driverContext, List channels) { - throw new UnsupportedOperationException("non-grouping aggregator is not supported"); - } - - @Override - public RateDoubleGroupingAggregatorFunction groupingAggregator(DriverContext driverContext, - List channels) { - return RateDoubleGroupingAggregatorFunction.create(channels, driverContext); - } - - @Override - public String describe() { - return "rate of doubles"; - } -} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateDoubleGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateDoubleGroupingAggregatorFunction.java deleted file mode 100644 index 58c540c6135fe..0000000000000 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateDoubleGroupingAggregatorFunction.java +++ /dev/null @@ -1,423 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License -// 2.0; you may not use this file except in compliance with the Elastic License -// 2.0. -package org.elasticsearch.compute.aggregation; - -import java.lang.Integer; -import java.lang.Override; -import java.lang.String; -import java.lang.StringBuilder; -import java.util.List; -import org.elasticsearch.compute.data.Block; -import org.elasticsearch.compute.data.DoubleBlock; -import org.elasticsearch.compute.data.DoubleVector; -import org.elasticsearch.compute.data.ElementType; -import org.elasticsearch.compute.data.IntArrayBlock; -import org.elasticsearch.compute.data.IntBigArrayBlock; -import org.elasticsearch.compute.data.IntBlock; -import org.elasticsearch.compute.data.IntVector; -import org.elasticsearch.compute.data.LongBlock; -import org.elasticsearch.compute.data.LongVector; -import org.elasticsearch.compute.data.Page; -import org.elasticsearch.compute.operator.DriverContext; - -/** - * {@link GroupingAggregatorFunction} implementation for {@link RateDoubleAggregator}. - * This class is generated. Edit {@code GroupingAggregatorImplementer} instead. - */ -public final class RateDoubleGroupingAggregatorFunction implements GroupingAggregatorFunction { - private static final List INTERMEDIATE_STATE_DESC = List.of( - new IntermediateStateDesc("timestamps", ElementType.LONG), - new IntermediateStateDesc("values", ElementType.DOUBLE), - new IntermediateStateDesc("sampleCounts", ElementType.INT), - new IntermediateStateDesc("resets", ElementType.DOUBLE) ); - - private final RateDoubleAggregator.DoubleRateGroupingState state; - - private final List channels; - - private final DriverContext driverContext; - - public RateDoubleGroupingAggregatorFunction(List channels, - RateDoubleAggregator.DoubleRateGroupingState state, DriverContext driverContext) { - this.channels = channels; - this.state = state; - this.driverContext = driverContext; - } - - public static RateDoubleGroupingAggregatorFunction create(List channels, - DriverContext driverContext) { - return new RateDoubleGroupingAggregatorFunction(channels, RateDoubleAggregator.initGrouping(driverContext), driverContext); - } - - public static List intermediateStateDesc() { - return INTERMEDIATE_STATE_DESC; - } - - @Override - public int intermediateBlockCount() { - return INTERMEDIATE_STATE_DESC.size(); - } - - @Override - public GroupingAggregatorFunction.AddInput prepareProcessRawInputPage(SeenGroupIds seenGroupIds, - Page page) { - DoubleBlock valueBlock = page.getBlock(channels.get(0)); - LongBlock timestampBlock = page.getBlock(channels.get(1)); - DoubleVector valueVector = valueBlock.asVector(); - if (valueVector == null) { - maybeEnableGroupIdTracking(seenGroupIds, valueBlock, timestampBlock); - return new GroupingAggregatorFunction.AddInput() { - @Override - public void add(int positionOffset, IntArrayBlock groupIds) { - addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); - } - - @Override - public void add(int positionOffset, IntBigArrayBlock groupIds) { - addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); - } - - @Override - public void add(int positionOffset, IntVector groupIds) { - addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); - } - - @Override - public void close() { - } - }; - } - LongVector timestampVector = timestampBlock.asVector(); - if (timestampVector == null) { - maybeEnableGroupIdTracking(seenGroupIds, valueBlock, timestampBlock); - return new GroupingAggregatorFunction.AddInput() { - @Override - public void add(int positionOffset, IntArrayBlock groupIds) { - addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); - } - - @Override - public void add(int positionOffset, IntBigArrayBlock groupIds) { - addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); - } - - @Override - public void add(int positionOffset, IntVector groupIds) { - addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); - } - - @Override - public void close() { - } - }; - } - return new GroupingAggregatorFunction.AddInput() { - @Override - public void add(int positionOffset, IntArrayBlock groupIds) { - addRawInput(positionOffset, groupIds, valueVector, timestampVector); - } - - @Override - public void add(int positionOffset, IntBigArrayBlock groupIds) { - addRawInput(positionOffset, groupIds, valueVector, timestampVector); - } - - @Override - public void add(int positionOffset, IntVector groupIds) { - addRawInput(positionOffset, groupIds, valueVector, timestampVector); - } - - @Override - public void close() { - } - }; - } - - private void addRawInput(int positionOffset, IntArrayBlock groups, DoubleBlock valueBlock, - LongBlock timestampBlock) { - for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - if (groups.isNull(groupPosition)) { - continue; - } - int valuesPosition = groupPosition + positionOffset; - if (valueBlock.isNull(valuesPosition)) { - continue; - } - if (timestampBlock.isNull(valuesPosition)) { - continue; - } - int groupStart = groups.getFirstValueIndex(groupPosition); - int groupEnd = groupStart + groups.getValueCount(groupPosition); - for (int g = groupStart; g < groupEnd; g++) { - int groupId = groups.getInt(g); - int valueStart = valueBlock.getFirstValueIndex(valuesPosition); - int valueEnd = valueStart + valueBlock.getValueCount(valuesPosition); - for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { - double valueValue = valueBlock.getDouble(valueOffset); - int timestampStart = timestampBlock.getFirstValueIndex(valuesPosition); - int timestampEnd = timestampStart + timestampBlock.getValueCount(valuesPosition); - for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { - long timestampValue = timestampBlock.getLong(timestampOffset); - RateDoubleAggregator.combine(state, groupId, valueValue, timestampValue); - } - } - } - } - } - - private void addRawInput(int positionOffset, IntArrayBlock groups, DoubleVector valueVector, - LongVector timestampVector) { - for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - if (groups.isNull(groupPosition)) { - continue; - } - int valuesPosition = groupPosition + positionOffset; - int groupStart = groups.getFirstValueIndex(groupPosition); - int groupEnd = groupStart + groups.getValueCount(groupPosition); - for (int g = groupStart; g < groupEnd; g++) { - int groupId = groups.getInt(g); - double valueValue = valueVector.getDouble(valuesPosition); - long timestampValue = timestampVector.getLong(valuesPosition); - RateDoubleAggregator.combine(state, groupId, valueValue, timestampValue); - } - } - } - - @Override - public void addIntermediateInput(int positionOffset, IntArrayBlock groups, Page page) { - state.enableGroupIdTracking(new SeenGroupIds.Empty()); - assert channels.size() == intermediateBlockCount(); - Block timestampsUncast = page.getBlock(channels.get(0)); - if (timestampsUncast.areAllValuesNull()) { - return; - } - LongBlock timestamps = (LongBlock) timestampsUncast; - Block valuesUncast = page.getBlock(channels.get(1)); - if (valuesUncast.areAllValuesNull()) { - return; - } - DoubleBlock values = (DoubleBlock) valuesUncast; - Block sampleCountsUncast = page.getBlock(channels.get(2)); - if (sampleCountsUncast.areAllValuesNull()) { - return; - } - IntVector sampleCounts = ((IntBlock) sampleCountsUncast).asVector(); - Block resetsUncast = page.getBlock(channels.get(3)); - if (resetsUncast.areAllValuesNull()) { - return; - } - DoubleVector resets = ((DoubleBlock) resetsUncast).asVector(); - assert timestamps.getPositionCount() == values.getPositionCount() && timestamps.getPositionCount() == sampleCounts.getPositionCount() && timestamps.getPositionCount() == resets.getPositionCount(); - for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - if (groups.isNull(groupPosition)) { - continue; - } - int groupStart = groups.getFirstValueIndex(groupPosition); - int groupEnd = groupStart + groups.getValueCount(groupPosition); - for (int g = groupStart; g < groupEnd; g++) { - int groupId = groups.getInt(g); - int valuesPosition = groupPosition + positionOffset; - RateDoubleAggregator.combineIntermediate(state, groupId, timestamps, values, sampleCounts.getInt(valuesPosition), resets.getDouble(valuesPosition), valuesPosition); - } - } - } - - private void addRawInput(int positionOffset, IntBigArrayBlock groups, DoubleBlock valueBlock, - LongBlock timestampBlock) { - for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - if (groups.isNull(groupPosition)) { - continue; - } - int valuesPosition = groupPosition + positionOffset; - if (valueBlock.isNull(valuesPosition)) { - continue; - } - if (timestampBlock.isNull(valuesPosition)) { - continue; - } - int groupStart = groups.getFirstValueIndex(groupPosition); - int groupEnd = groupStart + groups.getValueCount(groupPosition); - for (int g = groupStart; g < groupEnd; g++) { - int groupId = groups.getInt(g); - int valueStart = valueBlock.getFirstValueIndex(valuesPosition); - int valueEnd = valueStart + valueBlock.getValueCount(valuesPosition); - for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { - double valueValue = valueBlock.getDouble(valueOffset); - int timestampStart = timestampBlock.getFirstValueIndex(valuesPosition); - int timestampEnd = timestampStart + timestampBlock.getValueCount(valuesPosition); - for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { - long timestampValue = timestampBlock.getLong(timestampOffset); - RateDoubleAggregator.combine(state, groupId, valueValue, timestampValue); - } - } - } - } - } - - private void addRawInput(int positionOffset, IntBigArrayBlock groups, DoubleVector valueVector, - LongVector timestampVector) { - for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - if (groups.isNull(groupPosition)) { - continue; - } - int valuesPosition = groupPosition + positionOffset; - int groupStart = groups.getFirstValueIndex(groupPosition); - int groupEnd = groupStart + groups.getValueCount(groupPosition); - for (int g = groupStart; g < groupEnd; g++) { - int groupId = groups.getInt(g); - double valueValue = valueVector.getDouble(valuesPosition); - long timestampValue = timestampVector.getLong(valuesPosition); - RateDoubleAggregator.combine(state, groupId, valueValue, timestampValue); - } - } - } - - @Override - public void addIntermediateInput(int positionOffset, IntBigArrayBlock groups, Page page) { - state.enableGroupIdTracking(new SeenGroupIds.Empty()); - assert channels.size() == intermediateBlockCount(); - Block timestampsUncast = page.getBlock(channels.get(0)); - if (timestampsUncast.areAllValuesNull()) { - return; - } - LongBlock timestamps = (LongBlock) timestampsUncast; - Block valuesUncast = page.getBlock(channels.get(1)); - if (valuesUncast.areAllValuesNull()) { - return; - } - DoubleBlock values = (DoubleBlock) valuesUncast; - Block sampleCountsUncast = page.getBlock(channels.get(2)); - if (sampleCountsUncast.areAllValuesNull()) { - return; - } - IntVector sampleCounts = ((IntBlock) sampleCountsUncast).asVector(); - Block resetsUncast = page.getBlock(channels.get(3)); - if (resetsUncast.areAllValuesNull()) { - return; - } - DoubleVector resets = ((DoubleBlock) resetsUncast).asVector(); - assert timestamps.getPositionCount() == values.getPositionCount() && timestamps.getPositionCount() == sampleCounts.getPositionCount() && timestamps.getPositionCount() == resets.getPositionCount(); - for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - if (groups.isNull(groupPosition)) { - continue; - } - int groupStart = groups.getFirstValueIndex(groupPosition); - int groupEnd = groupStart + groups.getValueCount(groupPosition); - for (int g = groupStart; g < groupEnd; g++) { - int groupId = groups.getInt(g); - int valuesPosition = groupPosition + positionOffset; - RateDoubleAggregator.combineIntermediate(state, groupId, timestamps, values, sampleCounts.getInt(valuesPosition), resets.getDouble(valuesPosition), valuesPosition); - } - } - } - - private void addRawInput(int positionOffset, IntVector groups, DoubleBlock valueBlock, - LongBlock timestampBlock) { - for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int valuesPosition = groupPosition + positionOffset; - if (valueBlock.isNull(valuesPosition)) { - continue; - } - if (timestampBlock.isNull(valuesPosition)) { - continue; - } - int groupId = groups.getInt(groupPosition); - int valueStart = valueBlock.getFirstValueIndex(valuesPosition); - int valueEnd = valueStart + valueBlock.getValueCount(valuesPosition); - for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { - double valueValue = valueBlock.getDouble(valueOffset); - int timestampStart = timestampBlock.getFirstValueIndex(valuesPosition); - int timestampEnd = timestampStart + timestampBlock.getValueCount(valuesPosition); - for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { - long timestampValue = timestampBlock.getLong(timestampOffset); - RateDoubleAggregator.combine(state, groupId, valueValue, timestampValue); - } - } - } - } - - private void addRawInput(int positionOffset, IntVector groups, DoubleVector valueVector, - LongVector timestampVector) { - for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int valuesPosition = groupPosition + positionOffset; - int groupId = groups.getInt(groupPosition); - double valueValue = valueVector.getDouble(valuesPosition); - long timestampValue = timestampVector.getLong(valuesPosition); - RateDoubleAggregator.combine(state, groupId, valueValue, timestampValue); - } - } - - @Override - public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { - state.enableGroupIdTracking(new SeenGroupIds.Empty()); - assert channels.size() == intermediateBlockCount(); - Block timestampsUncast = page.getBlock(channels.get(0)); - if (timestampsUncast.areAllValuesNull()) { - return; - } - LongBlock timestamps = (LongBlock) timestampsUncast; - Block valuesUncast = page.getBlock(channels.get(1)); - if (valuesUncast.areAllValuesNull()) { - return; - } - DoubleBlock values = (DoubleBlock) valuesUncast; - Block sampleCountsUncast = page.getBlock(channels.get(2)); - if (sampleCountsUncast.areAllValuesNull()) { - return; - } - IntVector sampleCounts = ((IntBlock) sampleCountsUncast).asVector(); - Block resetsUncast = page.getBlock(channels.get(3)); - if (resetsUncast.areAllValuesNull()) { - return; - } - DoubleVector resets = ((DoubleBlock) resetsUncast).asVector(); - assert timestamps.getPositionCount() == values.getPositionCount() && timestamps.getPositionCount() == sampleCounts.getPositionCount() && timestamps.getPositionCount() == resets.getPositionCount(); - for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = groups.getInt(groupPosition); - int valuesPosition = groupPosition + positionOffset; - RateDoubleAggregator.combineIntermediate(state, groupId, timestamps, values, sampleCounts.getInt(valuesPosition), resets.getDouble(valuesPosition), valuesPosition); - } - } - - private void maybeEnableGroupIdTracking(SeenGroupIds seenGroupIds, DoubleBlock valueBlock, - LongBlock timestampBlock) { - if (valueBlock.mayHaveNulls()) { - state.enableGroupIdTracking(seenGroupIds); - } - if (timestampBlock.mayHaveNulls()) { - state.enableGroupIdTracking(seenGroupIds); - } - } - - @Override - public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { - state.enableGroupIdTracking(seenGroupIds); - } - - @Override - public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { - state.toIntermediate(blocks, offset, selected, driverContext); - } - - @Override - public void evaluateFinal(Block[] blocks, int offset, IntVector selected, - GroupingAggregatorEvaluationContext ctx) { - blocks[offset] = RateDoubleAggregator.evaluateFinal(state, selected, ctx); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append(getClass().getSimpleName()).append("["); - sb.append("channels=").append(channels); - sb.append("]"); - return sb.toString(); - } - - @Override - public void close() { - state.close(); - } -} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateFloatAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateFloatAggregatorFunctionSupplier.java deleted file mode 100644 index 47e550d1ee6a7..0000000000000 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateFloatAggregatorFunctionSupplier.java +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License -// 2.0; you may not use this file except in compliance with the Elastic License -// 2.0. -package org.elasticsearch.compute.aggregation; - -import java.lang.Integer; -import java.lang.Override; -import java.lang.String; -import java.util.List; -import org.elasticsearch.compute.operator.DriverContext; - -/** - * {@link AggregatorFunctionSupplier} implementation for {@link RateFloatAggregator}. - * This class is generated. Edit {@code AggregatorFunctionSupplierImplementer} instead. - */ -public final class RateFloatAggregatorFunctionSupplier implements AggregatorFunctionSupplier { - public RateFloatAggregatorFunctionSupplier() { - } - - @Override - public List nonGroupingIntermediateStateDesc() { - throw new UnsupportedOperationException("non-grouping aggregator is not supported"); - } - - @Override - public List groupingIntermediateStateDesc() { - return RateFloatGroupingAggregatorFunction.intermediateStateDesc(); - } - - @Override - public AggregatorFunction aggregator(DriverContext driverContext, List channels) { - throw new UnsupportedOperationException("non-grouping aggregator is not supported"); - } - - @Override - public RateFloatGroupingAggregatorFunction groupingAggregator(DriverContext driverContext, - List channels) { - return RateFloatGroupingAggregatorFunction.create(channels, driverContext); - } - - @Override - public String describe() { - return "rate of floats"; - } -} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateFloatGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateFloatGroupingAggregatorFunction.java deleted file mode 100644 index 9f73e964b1d36..0000000000000 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateFloatGroupingAggregatorFunction.java +++ /dev/null @@ -1,425 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License -// 2.0; you may not use this file except in compliance with the Elastic License -// 2.0. -package org.elasticsearch.compute.aggregation; - -import java.lang.Integer; -import java.lang.Override; -import java.lang.String; -import java.lang.StringBuilder; -import java.util.List; -import org.elasticsearch.compute.data.Block; -import org.elasticsearch.compute.data.DoubleBlock; -import org.elasticsearch.compute.data.DoubleVector; -import org.elasticsearch.compute.data.ElementType; -import org.elasticsearch.compute.data.FloatBlock; -import org.elasticsearch.compute.data.FloatVector; -import org.elasticsearch.compute.data.IntArrayBlock; -import org.elasticsearch.compute.data.IntBigArrayBlock; -import org.elasticsearch.compute.data.IntBlock; -import org.elasticsearch.compute.data.IntVector; -import org.elasticsearch.compute.data.LongBlock; -import org.elasticsearch.compute.data.LongVector; -import org.elasticsearch.compute.data.Page; -import org.elasticsearch.compute.operator.DriverContext; - -/** - * {@link GroupingAggregatorFunction} implementation for {@link RateFloatAggregator}. - * This class is generated. Edit {@code GroupingAggregatorImplementer} instead. - */ -public final class RateFloatGroupingAggregatorFunction implements GroupingAggregatorFunction { - private static final List INTERMEDIATE_STATE_DESC = List.of( - new IntermediateStateDesc("timestamps", ElementType.LONG), - new IntermediateStateDesc("values", ElementType.FLOAT), - new IntermediateStateDesc("sampleCounts", ElementType.INT), - new IntermediateStateDesc("resets", ElementType.DOUBLE) ); - - private final RateFloatAggregator.FloatRateGroupingState state; - - private final List channels; - - private final DriverContext driverContext; - - public RateFloatGroupingAggregatorFunction(List channels, - RateFloatAggregator.FloatRateGroupingState state, DriverContext driverContext) { - this.channels = channels; - this.state = state; - this.driverContext = driverContext; - } - - public static RateFloatGroupingAggregatorFunction create(List channels, - DriverContext driverContext) { - return new RateFloatGroupingAggregatorFunction(channels, RateFloatAggregator.initGrouping(driverContext), driverContext); - } - - public static List intermediateStateDesc() { - return INTERMEDIATE_STATE_DESC; - } - - @Override - public int intermediateBlockCount() { - return INTERMEDIATE_STATE_DESC.size(); - } - - @Override - public GroupingAggregatorFunction.AddInput prepareProcessRawInputPage(SeenGroupIds seenGroupIds, - Page page) { - FloatBlock valueBlock = page.getBlock(channels.get(0)); - LongBlock timestampBlock = page.getBlock(channels.get(1)); - FloatVector valueVector = valueBlock.asVector(); - if (valueVector == null) { - maybeEnableGroupIdTracking(seenGroupIds, valueBlock, timestampBlock); - return new GroupingAggregatorFunction.AddInput() { - @Override - public void add(int positionOffset, IntArrayBlock groupIds) { - addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); - } - - @Override - public void add(int positionOffset, IntBigArrayBlock groupIds) { - addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); - } - - @Override - public void add(int positionOffset, IntVector groupIds) { - addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); - } - - @Override - public void close() { - } - }; - } - LongVector timestampVector = timestampBlock.asVector(); - if (timestampVector == null) { - maybeEnableGroupIdTracking(seenGroupIds, valueBlock, timestampBlock); - return new GroupingAggregatorFunction.AddInput() { - @Override - public void add(int positionOffset, IntArrayBlock groupIds) { - addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); - } - - @Override - public void add(int positionOffset, IntBigArrayBlock groupIds) { - addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); - } - - @Override - public void add(int positionOffset, IntVector groupIds) { - addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); - } - - @Override - public void close() { - } - }; - } - return new GroupingAggregatorFunction.AddInput() { - @Override - public void add(int positionOffset, IntArrayBlock groupIds) { - addRawInput(positionOffset, groupIds, valueVector, timestampVector); - } - - @Override - public void add(int positionOffset, IntBigArrayBlock groupIds) { - addRawInput(positionOffset, groupIds, valueVector, timestampVector); - } - - @Override - public void add(int positionOffset, IntVector groupIds) { - addRawInput(positionOffset, groupIds, valueVector, timestampVector); - } - - @Override - public void close() { - } - }; - } - - private void addRawInput(int positionOffset, IntArrayBlock groups, FloatBlock valueBlock, - LongBlock timestampBlock) { - for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - if (groups.isNull(groupPosition)) { - continue; - } - int valuesPosition = groupPosition + positionOffset; - if (valueBlock.isNull(valuesPosition)) { - continue; - } - if (timestampBlock.isNull(valuesPosition)) { - continue; - } - int groupStart = groups.getFirstValueIndex(groupPosition); - int groupEnd = groupStart + groups.getValueCount(groupPosition); - for (int g = groupStart; g < groupEnd; g++) { - int groupId = groups.getInt(g); - int valueStart = valueBlock.getFirstValueIndex(valuesPosition); - int valueEnd = valueStart + valueBlock.getValueCount(valuesPosition); - for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { - float valueValue = valueBlock.getFloat(valueOffset); - int timestampStart = timestampBlock.getFirstValueIndex(valuesPosition); - int timestampEnd = timestampStart + timestampBlock.getValueCount(valuesPosition); - for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { - long timestampValue = timestampBlock.getLong(timestampOffset); - RateFloatAggregator.combine(state, groupId, valueValue, timestampValue); - } - } - } - } - } - - private void addRawInput(int positionOffset, IntArrayBlock groups, FloatVector valueVector, - LongVector timestampVector) { - for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - if (groups.isNull(groupPosition)) { - continue; - } - int valuesPosition = groupPosition + positionOffset; - int groupStart = groups.getFirstValueIndex(groupPosition); - int groupEnd = groupStart + groups.getValueCount(groupPosition); - for (int g = groupStart; g < groupEnd; g++) { - int groupId = groups.getInt(g); - float valueValue = valueVector.getFloat(valuesPosition); - long timestampValue = timestampVector.getLong(valuesPosition); - RateFloatAggregator.combine(state, groupId, valueValue, timestampValue); - } - } - } - - @Override - public void addIntermediateInput(int positionOffset, IntArrayBlock groups, Page page) { - state.enableGroupIdTracking(new SeenGroupIds.Empty()); - assert channels.size() == intermediateBlockCount(); - Block timestampsUncast = page.getBlock(channels.get(0)); - if (timestampsUncast.areAllValuesNull()) { - return; - } - LongBlock timestamps = (LongBlock) timestampsUncast; - Block valuesUncast = page.getBlock(channels.get(1)); - if (valuesUncast.areAllValuesNull()) { - return; - } - FloatBlock values = (FloatBlock) valuesUncast; - Block sampleCountsUncast = page.getBlock(channels.get(2)); - if (sampleCountsUncast.areAllValuesNull()) { - return; - } - IntVector sampleCounts = ((IntBlock) sampleCountsUncast).asVector(); - Block resetsUncast = page.getBlock(channels.get(3)); - if (resetsUncast.areAllValuesNull()) { - return; - } - DoubleVector resets = ((DoubleBlock) resetsUncast).asVector(); - assert timestamps.getPositionCount() == values.getPositionCount() && timestamps.getPositionCount() == sampleCounts.getPositionCount() && timestamps.getPositionCount() == resets.getPositionCount(); - for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - if (groups.isNull(groupPosition)) { - continue; - } - int groupStart = groups.getFirstValueIndex(groupPosition); - int groupEnd = groupStart + groups.getValueCount(groupPosition); - for (int g = groupStart; g < groupEnd; g++) { - int groupId = groups.getInt(g); - int valuesPosition = groupPosition + positionOffset; - RateFloatAggregator.combineIntermediate(state, groupId, timestamps, values, sampleCounts.getInt(valuesPosition), resets.getDouble(valuesPosition), valuesPosition); - } - } - } - - private void addRawInput(int positionOffset, IntBigArrayBlock groups, FloatBlock valueBlock, - LongBlock timestampBlock) { - for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - if (groups.isNull(groupPosition)) { - continue; - } - int valuesPosition = groupPosition + positionOffset; - if (valueBlock.isNull(valuesPosition)) { - continue; - } - if (timestampBlock.isNull(valuesPosition)) { - continue; - } - int groupStart = groups.getFirstValueIndex(groupPosition); - int groupEnd = groupStart + groups.getValueCount(groupPosition); - for (int g = groupStart; g < groupEnd; g++) { - int groupId = groups.getInt(g); - int valueStart = valueBlock.getFirstValueIndex(valuesPosition); - int valueEnd = valueStart + valueBlock.getValueCount(valuesPosition); - for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { - float valueValue = valueBlock.getFloat(valueOffset); - int timestampStart = timestampBlock.getFirstValueIndex(valuesPosition); - int timestampEnd = timestampStart + timestampBlock.getValueCount(valuesPosition); - for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { - long timestampValue = timestampBlock.getLong(timestampOffset); - RateFloatAggregator.combine(state, groupId, valueValue, timestampValue); - } - } - } - } - } - - private void addRawInput(int positionOffset, IntBigArrayBlock groups, FloatVector valueVector, - LongVector timestampVector) { - for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - if (groups.isNull(groupPosition)) { - continue; - } - int valuesPosition = groupPosition + positionOffset; - int groupStart = groups.getFirstValueIndex(groupPosition); - int groupEnd = groupStart + groups.getValueCount(groupPosition); - for (int g = groupStart; g < groupEnd; g++) { - int groupId = groups.getInt(g); - float valueValue = valueVector.getFloat(valuesPosition); - long timestampValue = timestampVector.getLong(valuesPosition); - RateFloatAggregator.combine(state, groupId, valueValue, timestampValue); - } - } - } - - @Override - public void addIntermediateInput(int positionOffset, IntBigArrayBlock groups, Page page) { - state.enableGroupIdTracking(new SeenGroupIds.Empty()); - assert channels.size() == intermediateBlockCount(); - Block timestampsUncast = page.getBlock(channels.get(0)); - if (timestampsUncast.areAllValuesNull()) { - return; - } - LongBlock timestamps = (LongBlock) timestampsUncast; - Block valuesUncast = page.getBlock(channels.get(1)); - if (valuesUncast.areAllValuesNull()) { - return; - } - FloatBlock values = (FloatBlock) valuesUncast; - Block sampleCountsUncast = page.getBlock(channels.get(2)); - if (sampleCountsUncast.areAllValuesNull()) { - return; - } - IntVector sampleCounts = ((IntBlock) sampleCountsUncast).asVector(); - Block resetsUncast = page.getBlock(channels.get(3)); - if (resetsUncast.areAllValuesNull()) { - return; - } - DoubleVector resets = ((DoubleBlock) resetsUncast).asVector(); - assert timestamps.getPositionCount() == values.getPositionCount() && timestamps.getPositionCount() == sampleCounts.getPositionCount() && timestamps.getPositionCount() == resets.getPositionCount(); - for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - if (groups.isNull(groupPosition)) { - continue; - } - int groupStart = groups.getFirstValueIndex(groupPosition); - int groupEnd = groupStart + groups.getValueCount(groupPosition); - for (int g = groupStart; g < groupEnd; g++) { - int groupId = groups.getInt(g); - int valuesPosition = groupPosition + positionOffset; - RateFloatAggregator.combineIntermediate(state, groupId, timestamps, values, sampleCounts.getInt(valuesPosition), resets.getDouble(valuesPosition), valuesPosition); - } - } - } - - private void addRawInput(int positionOffset, IntVector groups, FloatBlock valueBlock, - LongBlock timestampBlock) { - for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int valuesPosition = groupPosition + positionOffset; - if (valueBlock.isNull(valuesPosition)) { - continue; - } - if (timestampBlock.isNull(valuesPosition)) { - continue; - } - int groupId = groups.getInt(groupPosition); - int valueStart = valueBlock.getFirstValueIndex(valuesPosition); - int valueEnd = valueStart + valueBlock.getValueCount(valuesPosition); - for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { - float valueValue = valueBlock.getFloat(valueOffset); - int timestampStart = timestampBlock.getFirstValueIndex(valuesPosition); - int timestampEnd = timestampStart + timestampBlock.getValueCount(valuesPosition); - for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { - long timestampValue = timestampBlock.getLong(timestampOffset); - RateFloatAggregator.combine(state, groupId, valueValue, timestampValue); - } - } - } - } - - private void addRawInput(int positionOffset, IntVector groups, FloatVector valueVector, - LongVector timestampVector) { - for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int valuesPosition = groupPosition + positionOffset; - int groupId = groups.getInt(groupPosition); - float valueValue = valueVector.getFloat(valuesPosition); - long timestampValue = timestampVector.getLong(valuesPosition); - RateFloatAggregator.combine(state, groupId, valueValue, timestampValue); - } - } - - @Override - public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { - state.enableGroupIdTracking(new SeenGroupIds.Empty()); - assert channels.size() == intermediateBlockCount(); - Block timestampsUncast = page.getBlock(channels.get(0)); - if (timestampsUncast.areAllValuesNull()) { - return; - } - LongBlock timestamps = (LongBlock) timestampsUncast; - Block valuesUncast = page.getBlock(channels.get(1)); - if (valuesUncast.areAllValuesNull()) { - return; - } - FloatBlock values = (FloatBlock) valuesUncast; - Block sampleCountsUncast = page.getBlock(channels.get(2)); - if (sampleCountsUncast.areAllValuesNull()) { - return; - } - IntVector sampleCounts = ((IntBlock) sampleCountsUncast).asVector(); - Block resetsUncast = page.getBlock(channels.get(3)); - if (resetsUncast.areAllValuesNull()) { - return; - } - DoubleVector resets = ((DoubleBlock) resetsUncast).asVector(); - assert timestamps.getPositionCount() == values.getPositionCount() && timestamps.getPositionCount() == sampleCounts.getPositionCount() && timestamps.getPositionCount() == resets.getPositionCount(); - for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = groups.getInt(groupPosition); - int valuesPosition = groupPosition + positionOffset; - RateFloatAggregator.combineIntermediate(state, groupId, timestamps, values, sampleCounts.getInt(valuesPosition), resets.getDouble(valuesPosition), valuesPosition); - } - } - - private void maybeEnableGroupIdTracking(SeenGroupIds seenGroupIds, FloatBlock valueBlock, - LongBlock timestampBlock) { - if (valueBlock.mayHaveNulls()) { - state.enableGroupIdTracking(seenGroupIds); - } - if (timestampBlock.mayHaveNulls()) { - state.enableGroupIdTracking(seenGroupIds); - } - } - - @Override - public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { - state.enableGroupIdTracking(seenGroupIds); - } - - @Override - public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { - state.toIntermediate(blocks, offset, selected, driverContext); - } - - @Override - public void evaluateFinal(Block[] blocks, int offset, IntVector selected, - GroupingAggregatorEvaluationContext ctx) { - blocks[offset] = RateFloatAggregator.evaluateFinal(state, selected, ctx); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append(getClass().getSimpleName()).append("["); - sb.append("channels=").append(channels); - sb.append("]"); - return sb.toString(); - } - - @Override - public void close() { - state.close(); - } -} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateIntAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateIntAggregatorFunctionSupplier.java deleted file mode 100644 index fb8e2546e6ec8..0000000000000 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateIntAggregatorFunctionSupplier.java +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License -// 2.0; you may not use this file except in compliance with the Elastic License -// 2.0. -package org.elasticsearch.compute.aggregation; - -import java.lang.Integer; -import java.lang.Override; -import java.lang.String; -import java.util.List; -import org.elasticsearch.compute.operator.DriverContext; - -/** - * {@link AggregatorFunctionSupplier} implementation for {@link RateIntAggregator}. - * This class is generated. Edit {@code AggregatorFunctionSupplierImplementer} instead. - */ -public final class RateIntAggregatorFunctionSupplier implements AggregatorFunctionSupplier { - public RateIntAggregatorFunctionSupplier() { - } - - @Override - public List nonGroupingIntermediateStateDesc() { - throw new UnsupportedOperationException("non-grouping aggregator is not supported"); - } - - @Override - public List groupingIntermediateStateDesc() { - return RateIntGroupingAggregatorFunction.intermediateStateDesc(); - } - - @Override - public AggregatorFunction aggregator(DriverContext driverContext, List channels) { - throw new UnsupportedOperationException("non-grouping aggregator is not supported"); - } - - @Override - public RateIntGroupingAggregatorFunction groupingAggregator(DriverContext driverContext, - List channels) { - return RateIntGroupingAggregatorFunction.create(channels, driverContext); - } - - @Override - public String describe() { - return "rate of ints"; - } -} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateIntGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateIntGroupingAggregatorFunction.java deleted file mode 100644 index 1da0d4df35bae..0000000000000 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateIntGroupingAggregatorFunction.java +++ /dev/null @@ -1,423 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License -// 2.0; you may not use this file except in compliance with the Elastic License -// 2.0. -package org.elasticsearch.compute.aggregation; - -import java.lang.Integer; -import java.lang.Override; -import java.lang.String; -import java.lang.StringBuilder; -import java.util.List; -import org.elasticsearch.compute.data.Block; -import org.elasticsearch.compute.data.DoubleBlock; -import org.elasticsearch.compute.data.DoubleVector; -import org.elasticsearch.compute.data.ElementType; -import org.elasticsearch.compute.data.IntArrayBlock; -import org.elasticsearch.compute.data.IntBigArrayBlock; -import org.elasticsearch.compute.data.IntBlock; -import org.elasticsearch.compute.data.IntVector; -import org.elasticsearch.compute.data.LongBlock; -import org.elasticsearch.compute.data.LongVector; -import org.elasticsearch.compute.data.Page; -import org.elasticsearch.compute.operator.DriverContext; - -/** - * {@link GroupingAggregatorFunction} implementation for {@link RateIntAggregator}. - * This class is generated. Edit {@code GroupingAggregatorImplementer} instead. - */ -public final class RateIntGroupingAggregatorFunction implements GroupingAggregatorFunction { - private static final List INTERMEDIATE_STATE_DESC = List.of( - new IntermediateStateDesc("timestamps", ElementType.LONG), - new IntermediateStateDesc("values", ElementType.INT), - new IntermediateStateDesc("sampleCounts", ElementType.INT), - new IntermediateStateDesc("resets", ElementType.DOUBLE) ); - - private final RateIntAggregator.IntRateGroupingState state; - - private final List channels; - - private final DriverContext driverContext; - - public RateIntGroupingAggregatorFunction(List channels, - RateIntAggregator.IntRateGroupingState state, DriverContext driverContext) { - this.channels = channels; - this.state = state; - this.driverContext = driverContext; - } - - public static RateIntGroupingAggregatorFunction create(List channels, - DriverContext driverContext) { - return new RateIntGroupingAggregatorFunction(channels, RateIntAggregator.initGrouping(driverContext), driverContext); - } - - public static List intermediateStateDesc() { - return INTERMEDIATE_STATE_DESC; - } - - @Override - public int intermediateBlockCount() { - return INTERMEDIATE_STATE_DESC.size(); - } - - @Override - public GroupingAggregatorFunction.AddInput prepareProcessRawInputPage(SeenGroupIds seenGroupIds, - Page page) { - IntBlock valueBlock = page.getBlock(channels.get(0)); - LongBlock timestampBlock = page.getBlock(channels.get(1)); - IntVector valueVector = valueBlock.asVector(); - if (valueVector == null) { - maybeEnableGroupIdTracking(seenGroupIds, valueBlock, timestampBlock); - return new GroupingAggregatorFunction.AddInput() { - @Override - public void add(int positionOffset, IntArrayBlock groupIds) { - addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); - } - - @Override - public void add(int positionOffset, IntBigArrayBlock groupIds) { - addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); - } - - @Override - public void add(int positionOffset, IntVector groupIds) { - addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); - } - - @Override - public void close() { - } - }; - } - LongVector timestampVector = timestampBlock.asVector(); - if (timestampVector == null) { - maybeEnableGroupIdTracking(seenGroupIds, valueBlock, timestampBlock); - return new GroupingAggregatorFunction.AddInput() { - @Override - public void add(int positionOffset, IntArrayBlock groupIds) { - addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); - } - - @Override - public void add(int positionOffset, IntBigArrayBlock groupIds) { - addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); - } - - @Override - public void add(int positionOffset, IntVector groupIds) { - addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); - } - - @Override - public void close() { - } - }; - } - return new GroupingAggregatorFunction.AddInput() { - @Override - public void add(int positionOffset, IntArrayBlock groupIds) { - addRawInput(positionOffset, groupIds, valueVector, timestampVector); - } - - @Override - public void add(int positionOffset, IntBigArrayBlock groupIds) { - addRawInput(positionOffset, groupIds, valueVector, timestampVector); - } - - @Override - public void add(int positionOffset, IntVector groupIds) { - addRawInput(positionOffset, groupIds, valueVector, timestampVector); - } - - @Override - public void close() { - } - }; - } - - private void addRawInput(int positionOffset, IntArrayBlock groups, IntBlock valueBlock, - LongBlock timestampBlock) { - for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - if (groups.isNull(groupPosition)) { - continue; - } - int valuesPosition = groupPosition + positionOffset; - if (valueBlock.isNull(valuesPosition)) { - continue; - } - if (timestampBlock.isNull(valuesPosition)) { - continue; - } - int groupStart = groups.getFirstValueIndex(groupPosition); - int groupEnd = groupStart + groups.getValueCount(groupPosition); - for (int g = groupStart; g < groupEnd; g++) { - int groupId = groups.getInt(g); - int valueStart = valueBlock.getFirstValueIndex(valuesPosition); - int valueEnd = valueStart + valueBlock.getValueCount(valuesPosition); - for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { - int valueValue = valueBlock.getInt(valueOffset); - int timestampStart = timestampBlock.getFirstValueIndex(valuesPosition); - int timestampEnd = timestampStart + timestampBlock.getValueCount(valuesPosition); - for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { - long timestampValue = timestampBlock.getLong(timestampOffset); - RateIntAggregator.combine(state, groupId, valueValue, timestampValue); - } - } - } - } - } - - private void addRawInput(int positionOffset, IntArrayBlock groups, IntVector valueVector, - LongVector timestampVector) { - for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - if (groups.isNull(groupPosition)) { - continue; - } - int valuesPosition = groupPosition + positionOffset; - int groupStart = groups.getFirstValueIndex(groupPosition); - int groupEnd = groupStart + groups.getValueCount(groupPosition); - for (int g = groupStart; g < groupEnd; g++) { - int groupId = groups.getInt(g); - int valueValue = valueVector.getInt(valuesPosition); - long timestampValue = timestampVector.getLong(valuesPosition); - RateIntAggregator.combine(state, groupId, valueValue, timestampValue); - } - } - } - - @Override - public void addIntermediateInput(int positionOffset, IntArrayBlock groups, Page page) { - state.enableGroupIdTracking(new SeenGroupIds.Empty()); - assert channels.size() == intermediateBlockCount(); - Block timestampsUncast = page.getBlock(channels.get(0)); - if (timestampsUncast.areAllValuesNull()) { - return; - } - LongBlock timestamps = (LongBlock) timestampsUncast; - Block valuesUncast = page.getBlock(channels.get(1)); - if (valuesUncast.areAllValuesNull()) { - return; - } - IntBlock values = (IntBlock) valuesUncast; - Block sampleCountsUncast = page.getBlock(channels.get(2)); - if (sampleCountsUncast.areAllValuesNull()) { - return; - } - IntVector sampleCounts = ((IntBlock) sampleCountsUncast).asVector(); - Block resetsUncast = page.getBlock(channels.get(3)); - if (resetsUncast.areAllValuesNull()) { - return; - } - DoubleVector resets = ((DoubleBlock) resetsUncast).asVector(); - assert timestamps.getPositionCount() == values.getPositionCount() && timestamps.getPositionCount() == sampleCounts.getPositionCount() && timestamps.getPositionCount() == resets.getPositionCount(); - for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - if (groups.isNull(groupPosition)) { - continue; - } - int groupStart = groups.getFirstValueIndex(groupPosition); - int groupEnd = groupStart + groups.getValueCount(groupPosition); - for (int g = groupStart; g < groupEnd; g++) { - int groupId = groups.getInt(g); - int valuesPosition = groupPosition + positionOffset; - RateIntAggregator.combineIntermediate(state, groupId, timestamps, values, sampleCounts.getInt(valuesPosition), resets.getDouble(valuesPosition), valuesPosition); - } - } - } - - private void addRawInput(int positionOffset, IntBigArrayBlock groups, IntBlock valueBlock, - LongBlock timestampBlock) { - for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - if (groups.isNull(groupPosition)) { - continue; - } - int valuesPosition = groupPosition + positionOffset; - if (valueBlock.isNull(valuesPosition)) { - continue; - } - if (timestampBlock.isNull(valuesPosition)) { - continue; - } - int groupStart = groups.getFirstValueIndex(groupPosition); - int groupEnd = groupStart + groups.getValueCount(groupPosition); - for (int g = groupStart; g < groupEnd; g++) { - int groupId = groups.getInt(g); - int valueStart = valueBlock.getFirstValueIndex(valuesPosition); - int valueEnd = valueStart + valueBlock.getValueCount(valuesPosition); - for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { - int valueValue = valueBlock.getInt(valueOffset); - int timestampStart = timestampBlock.getFirstValueIndex(valuesPosition); - int timestampEnd = timestampStart + timestampBlock.getValueCount(valuesPosition); - for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { - long timestampValue = timestampBlock.getLong(timestampOffset); - RateIntAggregator.combine(state, groupId, valueValue, timestampValue); - } - } - } - } - } - - private void addRawInput(int positionOffset, IntBigArrayBlock groups, IntVector valueVector, - LongVector timestampVector) { - for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - if (groups.isNull(groupPosition)) { - continue; - } - int valuesPosition = groupPosition + positionOffset; - int groupStart = groups.getFirstValueIndex(groupPosition); - int groupEnd = groupStart + groups.getValueCount(groupPosition); - for (int g = groupStart; g < groupEnd; g++) { - int groupId = groups.getInt(g); - int valueValue = valueVector.getInt(valuesPosition); - long timestampValue = timestampVector.getLong(valuesPosition); - RateIntAggregator.combine(state, groupId, valueValue, timestampValue); - } - } - } - - @Override - public void addIntermediateInput(int positionOffset, IntBigArrayBlock groups, Page page) { - state.enableGroupIdTracking(new SeenGroupIds.Empty()); - assert channels.size() == intermediateBlockCount(); - Block timestampsUncast = page.getBlock(channels.get(0)); - if (timestampsUncast.areAllValuesNull()) { - return; - } - LongBlock timestamps = (LongBlock) timestampsUncast; - Block valuesUncast = page.getBlock(channels.get(1)); - if (valuesUncast.areAllValuesNull()) { - return; - } - IntBlock values = (IntBlock) valuesUncast; - Block sampleCountsUncast = page.getBlock(channels.get(2)); - if (sampleCountsUncast.areAllValuesNull()) { - return; - } - IntVector sampleCounts = ((IntBlock) sampleCountsUncast).asVector(); - Block resetsUncast = page.getBlock(channels.get(3)); - if (resetsUncast.areAllValuesNull()) { - return; - } - DoubleVector resets = ((DoubleBlock) resetsUncast).asVector(); - assert timestamps.getPositionCount() == values.getPositionCount() && timestamps.getPositionCount() == sampleCounts.getPositionCount() && timestamps.getPositionCount() == resets.getPositionCount(); - for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - if (groups.isNull(groupPosition)) { - continue; - } - int groupStart = groups.getFirstValueIndex(groupPosition); - int groupEnd = groupStart + groups.getValueCount(groupPosition); - for (int g = groupStart; g < groupEnd; g++) { - int groupId = groups.getInt(g); - int valuesPosition = groupPosition + positionOffset; - RateIntAggregator.combineIntermediate(state, groupId, timestamps, values, sampleCounts.getInt(valuesPosition), resets.getDouble(valuesPosition), valuesPosition); - } - } - } - - private void addRawInput(int positionOffset, IntVector groups, IntBlock valueBlock, - LongBlock timestampBlock) { - for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int valuesPosition = groupPosition + positionOffset; - if (valueBlock.isNull(valuesPosition)) { - continue; - } - if (timestampBlock.isNull(valuesPosition)) { - continue; - } - int groupId = groups.getInt(groupPosition); - int valueStart = valueBlock.getFirstValueIndex(valuesPosition); - int valueEnd = valueStart + valueBlock.getValueCount(valuesPosition); - for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { - int valueValue = valueBlock.getInt(valueOffset); - int timestampStart = timestampBlock.getFirstValueIndex(valuesPosition); - int timestampEnd = timestampStart + timestampBlock.getValueCount(valuesPosition); - for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { - long timestampValue = timestampBlock.getLong(timestampOffset); - RateIntAggregator.combine(state, groupId, valueValue, timestampValue); - } - } - } - } - - private void addRawInput(int positionOffset, IntVector groups, IntVector valueVector, - LongVector timestampVector) { - for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int valuesPosition = groupPosition + positionOffset; - int groupId = groups.getInt(groupPosition); - int valueValue = valueVector.getInt(valuesPosition); - long timestampValue = timestampVector.getLong(valuesPosition); - RateIntAggregator.combine(state, groupId, valueValue, timestampValue); - } - } - - @Override - public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { - state.enableGroupIdTracking(new SeenGroupIds.Empty()); - assert channels.size() == intermediateBlockCount(); - Block timestampsUncast = page.getBlock(channels.get(0)); - if (timestampsUncast.areAllValuesNull()) { - return; - } - LongBlock timestamps = (LongBlock) timestampsUncast; - Block valuesUncast = page.getBlock(channels.get(1)); - if (valuesUncast.areAllValuesNull()) { - return; - } - IntBlock values = (IntBlock) valuesUncast; - Block sampleCountsUncast = page.getBlock(channels.get(2)); - if (sampleCountsUncast.areAllValuesNull()) { - return; - } - IntVector sampleCounts = ((IntBlock) sampleCountsUncast).asVector(); - Block resetsUncast = page.getBlock(channels.get(3)); - if (resetsUncast.areAllValuesNull()) { - return; - } - DoubleVector resets = ((DoubleBlock) resetsUncast).asVector(); - assert timestamps.getPositionCount() == values.getPositionCount() && timestamps.getPositionCount() == sampleCounts.getPositionCount() && timestamps.getPositionCount() == resets.getPositionCount(); - for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = groups.getInt(groupPosition); - int valuesPosition = groupPosition + positionOffset; - RateIntAggregator.combineIntermediate(state, groupId, timestamps, values, sampleCounts.getInt(valuesPosition), resets.getDouble(valuesPosition), valuesPosition); - } - } - - private void maybeEnableGroupIdTracking(SeenGroupIds seenGroupIds, IntBlock valueBlock, - LongBlock timestampBlock) { - if (valueBlock.mayHaveNulls()) { - state.enableGroupIdTracking(seenGroupIds); - } - if (timestampBlock.mayHaveNulls()) { - state.enableGroupIdTracking(seenGroupIds); - } - } - - @Override - public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { - state.enableGroupIdTracking(seenGroupIds); - } - - @Override - public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { - state.toIntermediate(blocks, offset, selected, driverContext); - } - - @Override - public void evaluateFinal(Block[] blocks, int offset, IntVector selected, - GroupingAggregatorEvaluationContext ctx) { - blocks[offset] = RateIntAggregator.evaluateFinal(state, selected, ctx); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append(getClass().getSimpleName()).append("["); - sb.append("channels=").append(channels); - sb.append("]"); - return sb.toString(); - } - - @Override - public void close() { - state.close(); - } -} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateLongAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateLongAggregatorFunctionSupplier.java deleted file mode 100644 index 9b1200b9bd920..0000000000000 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateLongAggregatorFunctionSupplier.java +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License -// 2.0; you may not use this file except in compliance with the Elastic License -// 2.0. -package org.elasticsearch.compute.aggregation; - -import java.lang.Integer; -import java.lang.Override; -import java.lang.String; -import java.util.List; -import org.elasticsearch.compute.operator.DriverContext; - -/** - * {@link AggregatorFunctionSupplier} implementation for {@link RateLongAggregator}. - * This class is generated. Edit {@code AggregatorFunctionSupplierImplementer} instead. - */ -public final class RateLongAggregatorFunctionSupplier implements AggregatorFunctionSupplier { - public RateLongAggregatorFunctionSupplier() { - } - - @Override - public List nonGroupingIntermediateStateDesc() { - throw new UnsupportedOperationException("non-grouping aggregator is not supported"); - } - - @Override - public List groupingIntermediateStateDesc() { - return RateLongGroupingAggregatorFunction.intermediateStateDesc(); - } - - @Override - public AggregatorFunction aggregator(DriverContext driverContext, List channels) { - throw new UnsupportedOperationException("non-grouping aggregator is not supported"); - } - - @Override - public RateLongGroupingAggregatorFunction groupingAggregator(DriverContext driverContext, - List channels) { - return RateLongGroupingAggregatorFunction.create(channels, driverContext); - } - - @Override - public String describe() { - return "rate of longs"; - } -} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateLongGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateLongGroupingAggregatorFunction.java deleted file mode 100644 index f1bfccb9dd848..0000000000000 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateLongGroupingAggregatorFunction.java +++ /dev/null @@ -1,423 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License -// 2.0; you may not use this file except in compliance with the Elastic License -// 2.0. -package org.elasticsearch.compute.aggregation; - -import java.lang.Integer; -import java.lang.Override; -import java.lang.String; -import java.lang.StringBuilder; -import java.util.List; -import org.elasticsearch.compute.data.Block; -import org.elasticsearch.compute.data.DoubleBlock; -import org.elasticsearch.compute.data.DoubleVector; -import org.elasticsearch.compute.data.ElementType; -import org.elasticsearch.compute.data.IntArrayBlock; -import org.elasticsearch.compute.data.IntBigArrayBlock; -import org.elasticsearch.compute.data.IntBlock; -import org.elasticsearch.compute.data.IntVector; -import org.elasticsearch.compute.data.LongBlock; -import org.elasticsearch.compute.data.LongVector; -import org.elasticsearch.compute.data.Page; -import org.elasticsearch.compute.operator.DriverContext; - -/** - * {@link GroupingAggregatorFunction} implementation for {@link RateLongAggregator}. - * This class is generated. Edit {@code GroupingAggregatorImplementer} instead. - */ -public final class RateLongGroupingAggregatorFunction implements GroupingAggregatorFunction { - private static final List INTERMEDIATE_STATE_DESC = List.of( - new IntermediateStateDesc("timestamps", ElementType.LONG), - new IntermediateStateDesc("values", ElementType.LONG), - new IntermediateStateDesc("sampleCounts", ElementType.INT), - new IntermediateStateDesc("resets", ElementType.DOUBLE) ); - - private final RateLongAggregator.LongRateGroupingState state; - - private final List channels; - - private final DriverContext driverContext; - - public RateLongGroupingAggregatorFunction(List channels, - RateLongAggregator.LongRateGroupingState state, DriverContext driverContext) { - this.channels = channels; - this.state = state; - this.driverContext = driverContext; - } - - public static RateLongGroupingAggregatorFunction create(List channels, - DriverContext driverContext) { - return new RateLongGroupingAggregatorFunction(channels, RateLongAggregator.initGrouping(driverContext), driverContext); - } - - public static List intermediateStateDesc() { - return INTERMEDIATE_STATE_DESC; - } - - @Override - public int intermediateBlockCount() { - return INTERMEDIATE_STATE_DESC.size(); - } - - @Override - public GroupingAggregatorFunction.AddInput prepareProcessRawInputPage(SeenGroupIds seenGroupIds, - Page page) { - LongBlock valueBlock = page.getBlock(channels.get(0)); - LongBlock timestampBlock = page.getBlock(channels.get(1)); - LongVector valueVector = valueBlock.asVector(); - if (valueVector == null) { - maybeEnableGroupIdTracking(seenGroupIds, valueBlock, timestampBlock); - return new GroupingAggregatorFunction.AddInput() { - @Override - public void add(int positionOffset, IntArrayBlock groupIds) { - addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); - } - - @Override - public void add(int positionOffset, IntBigArrayBlock groupIds) { - addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); - } - - @Override - public void add(int positionOffset, IntVector groupIds) { - addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); - } - - @Override - public void close() { - } - }; - } - LongVector timestampVector = timestampBlock.asVector(); - if (timestampVector == null) { - maybeEnableGroupIdTracking(seenGroupIds, valueBlock, timestampBlock); - return new GroupingAggregatorFunction.AddInput() { - @Override - public void add(int positionOffset, IntArrayBlock groupIds) { - addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); - } - - @Override - public void add(int positionOffset, IntBigArrayBlock groupIds) { - addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); - } - - @Override - public void add(int positionOffset, IntVector groupIds) { - addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); - } - - @Override - public void close() { - } - }; - } - return new GroupingAggregatorFunction.AddInput() { - @Override - public void add(int positionOffset, IntArrayBlock groupIds) { - addRawInput(positionOffset, groupIds, valueVector, timestampVector); - } - - @Override - public void add(int positionOffset, IntBigArrayBlock groupIds) { - addRawInput(positionOffset, groupIds, valueVector, timestampVector); - } - - @Override - public void add(int positionOffset, IntVector groupIds) { - addRawInput(positionOffset, groupIds, valueVector, timestampVector); - } - - @Override - public void close() { - } - }; - } - - private void addRawInput(int positionOffset, IntArrayBlock groups, LongBlock valueBlock, - LongBlock timestampBlock) { - for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - if (groups.isNull(groupPosition)) { - continue; - } - int valuesPosition = groupPosition + positionOffset; - if (valueBlock.isNull(valuesPosition)) { - continue; - } - if (timestampBlock.isNull(valuesPosition)) { - continue; - } - int groupStart = groups.getFirstValueIndex(groupPosition); - int groupEnd = groupStart + groups.getValueCount(groupPosition); - for (int g = groupStart; g < groupEnd; g++) { - int groupId = groups.getInt(g); - int valueStart = valueBlock.getFirstValueIndex(valuesPosition); - int valueEnd = valueStart + valueBlock.getValueCount(valuesPosition); - for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { - long valueValue = valueBlock.getLong(valueOffset); - int timestampStart = timestampBlock.getFirstValueIndex(valuesPosition); - int timestampEnd = timestampStart + timestampBlock.getValueCount(valuesPosition); - for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { - long timestampValue = timestampBlock.getLong(timestampOffset); - RateLongAggregator.combine(state, groupId, valueValue, timestampValue); - } - } - } - } - } - - private void addRawInput(int positionOffset, IntArrayBlock groups, LongVector valueVector, - LongVector timestampVector) { - for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - if (groups.isNull(groupPosition)) { - continue; - } - int valuesPosition = groupPosition + positionOffset; - int groupStart = groups.getFirstValueIndex(groupPosition); - int groupEnd = groupStart + groups.getValueCount(groupPosition); - for (int g = groupStart; g < groupEnd; g++) { - int groupId = groups.getInt(g); - long valueValue = valueVector.getLong(valuesPosition); - long timestampValue = timestampVector.getLong(valuesPosition); - RateLongAggregator.combine(state, groupId, valueValue, timestampValue); - } - } - } - - @Override - public void addIntermediateInput(int positionOffset, IntArrayBlock groups, Page page) { - state.enableGroupIdTracking(new SeenGroupIds.Empty()); - assert channels.size() == intermediateBlockCount(); - Block timestampsUncast = page.getBlock(channels.get(0)); - if (timestampsUncast.areAllValuesNull()) { - return; - } - LongBlock timestamps = (LongBlock) timestampsUncast; - Block valuesUncast = page.getBlock(channels.get(1)); - if (valuesUncast.areAllValuesNull()) { - return; - } - LongBlock values = (LongBlock) valuesUncast; - Block sampleCountsUncast = page.getBlock(channels.get(2)); - if (sampleCountsUncast.areAllValuesNull()) { - return; - } - IntVector sampleCounts = ((IntBlock) sampleCountsUncast).asVector(); - Block resetsUncast = page.getBlock(channels.get(3)); - if (resetsUncast.areAllValuesNull()) { - return; - } - DoubleVector resets = ((DoubleBlock) resetsUncast).asVector(); - assert timestamps.getPositionCount() == values.getPositionCount() && timestamps.getPositionCount() == sampleCounts.getPositionCount() && timestamps.getPositionCount() == resets.getPositionCount(); - for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - if (groups.isNull(groupPosition)) { - continue; - } - int groupStart = groups.getFirstValueIndex(groupPosition); - int groupEnd = groupStart + groups.getValueCount(groupPosition); - for (int g = groupStart; g < groupEnd; g++) { - int groupId = groups.getInt(g); - int valuesPosition = groupPosition + positionOffset; - RateLongAggregator.combineIntermediate(state, groupId, timestamps, values, sampleCounts.getInt(valuesPosition), resets.getDouble(valuesPosition), valuesPosition); - } - } - } - - private void addRawInput(int positionOffset, IntBigArrayBlock groups, LongBlock valueBlock, - LongBlock timestampBlock) { - for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - if (groups.isNull(groupPosition)) { - continue; - } - int valuesPosition = groupPosition + positionOffset; - if (valueBlock.isNull(valuesPosition)) { - continue; - } - if (timestampBlock.isNull(valuesPosition)) { - continue; - } - int groupStart = groups.getFirstValueIndex(groupPosition); - int groupEnd = groupStart + groups.getValueCount(groupPosition); - for (int g = groupStart; g < groupEnd; g++) { - int groupId = groups.getInt(g); - int valueStart = valueBlock.getFirstValueIndex(valuesPosition); - int valueEnd = valueStart + valueBlock.getValueCount(valuesPosition); - for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { - long valueValue = valueBlock.getLong(valueOffset); - int timestampStart = timestampBlock.getFirstValueIndex(valuesPosition); - int timestampEnd = timestampStart + timestampBlock.getValueCount(valuesPosition); - for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { - long timestampValue = timestampBlock.getLong(timestampOffset); - RateLongAggregator.combine(state, groupId, valueValue, timestampValue); - } - } - } - } - } - - private void addRawInput(int positionOffset, IntBigArrayBlock groups, LongVector valueVector, - LongVector timestampVector) { - for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - if (groups.isNull(groupPosition)) { - continue; - } - int valuesPosition = groupPosition + positionOffset; - int groupStart = groups.getFirstValueIndex(groupPosition); - int groupEnd = groupStart + groups.getValueCount(groupPosition); - for (int g = groupStart; g < groupEnd; g++) { - int groupId = groups.getInt(g); - long valueValue = valueVector.getLong(valuesPosition); - long timestampValue = timestampVector.getLong(valuesPosition); - RateLongAggregator.combine(state, groupId, valueValue, timestampValue); - } - } - } - - @Override - public void addIntermediateInput(int positionOffset, IntBigArrayBlock groups, Page page) { - state.enableGroupIdTracking(new SeenGroupIds.Empty()); - assert channels.size() == intermediateBlockCount(); - Block timestampsUncast = page.getBlock(channels.get(0)); - if (timestampsUncast.areAllValuesNull()) { - return; - } - LongBlock timestamps = (LongBlock) timestampsUncast; - Block valuesUncast = page.getBlock(channels.get(1)); - if (valuesUncast.areAllValuesNull()) { - return; - } - LongBlock values = (LongBlock) valuesUncast; - Block sampleCountsUncast = page.getBlock(channels.get(2)); - if (sampleCountsUncast.areAllValuesNull()) { - return; - } - IntVector sampleCounts = ((IntBlock) sampleCountsUncast).asVector(); - Block resetsUncast = page.getBlock(channels.get(3)); - if (resetsUncast.areAllValuesNull()) { - return; - } - DoubleVector resets = ((DoubleBlock) resetsUncast).asVector(); - assert timestamps.getPositionCount() == values.getPositionCount() && timestamps.getPositionCount() == sampleCounts.getPositionCount() && timestamps.getPositionCount() == resets.getPositionCount(); - for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - if (groups.isNull(groupPosition)) { - continue; - } - int groupStart = groups.getFirstValueIndex(groupPosition); - int groupEnd = groupStart + groups.getValueCount(groupPosition); - for (int g = groupStart; g < groupEnd; g++) { - int groupId = groups.getInt(g); - int valuesPosition = groupPosition + positionOffset; - RateLongAggregator.combineIntermediate(state, groupId, timestamps, values, sampleCounts.getInt(valuesPosition), resets.getDouble(valuesPosition), valuesPosition); - } - } - } - - private void addRawInput(int positionOffset, IntVector groups, LongBlock valueBlock, - LongBlock timestampBlock) { - for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int valuesPosition = groupPosition + positionOffset; - if (valueBlock.isNull(valuesPosition)) { - continue; - } - if (timestampBlock.isNull(valuesPosition)) { - continue; - } - int groupId = groups.getInt(groupPosition); - int valueStart = valueBlock.getFirstValueIndex(valuesPosition); - int valueEnd = valueStart + valueBlock.getValueCount(valuesPosition); - for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { - long valueValue = valueBlock.getLong(valueOffset); - int timestampStart = timestampBlock.getFirstValueIndex(valuesPosition); - int timestampEnd = timestampStart + timestampBlock.getValueCount(valuesPosition); - for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { - long timestampValue = timestampBlock.getLong(timestampOffset); - RateLongAggregator.combine(state, groupId, valueValue, timestampValue); - } - } - } - } - - private void addRawInput(int positionOffset, IntVector groups, LongVector valueVector, - LongVector timestampVector) { - for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int valuesPosition = groupPosition + positionOffset; - int groupId = groups.getInt(groupPosition); - long valueValue = valueVector.getLong(valuesPosition); - long timestampValue = timestampVector.getLong(valuesPosition); - RateLongAggregator.combine(state, groupId, valueValue, timestampValue); - } - } - - @Override - public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { - state.enableGroupIdTracking(new SeenGroupIds.Empty()); - assert channels.size() == intermediateBlockCount(); - Block timestampsUncast = page.getBlock(channels.get(0)); - if (timestampsUncast.areAllValuesNull()) { - return; - } - LongBlock timestamps = (LongBlock) timestampsUncast; - Block valuesUncast = page.getBlock(channels.get(1)); - if (valuesUncast.areAllValuesNull()) { - return; - } - LongBlock values = (LongBlock) valuesUncast; - Block sampleCountsUncast = page.getBlock(channels.get(2)); - if (sampleCountsUncast.areAllValuesNull()) { - return; - } - IntVector sampleCounts = ((IntBlock) sampleCountsUncast).asVector(); - Block resetsUncast = page.getBlock(channels.get(3)); - if (resetsUncast.areAllValuesNull()) { - return; - } - DoubleVector resets = ((DoubleBlock) resetsUncast).asVector(); - assert timestamps.getPositionCount() == values.getPositionCount() && timestamps.getPositionCount() == sampleCounts.getPositionCount() && timestamps.getPositionCount() == resets.getPositionCount(); - for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { - int groupId = groups.getInt(groupPosition); - int valuesPosition = groupPosition + positionOffset; - RateLongAggregator.combineIntermediate(state, groupId, timestamps, values, sampleCounts.getInt(valuesPosition), resets.getDouble(valuesPosition), valuesPosition); - } - } - - private void maybeEnableGroupIdTracking(SeenGroupIds seenGroupIds, LongBlock valueBlock, - LongBlock timestampBlock) { - if (valueBlock.mayHaveNulls()) { - state.enableGroupIdTracking(seenGroupIds); - } - if (timestampBlock.mayHaveNulls()) { - state.enableGroupIdTracking(seenGroupIds); - } - } - - @Override - public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { - state.enableGroupIdTracking(seenGroupIds); - } - - @Override - public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { - state.toIntermediate(blocks, offset, selected, driverContext); - } - - @Override - public void evaluateFinal(Block[] blocks, int offset, IntVector selected, - GroupingAggregatorEvaluationContext ctx) { - blocks[offset] = RateLongAggregator.evaluateFinal(state, selected, ctx); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append(getClass().getSimpleName()).append("["); - sb.append("channels=").append(channels); - sb.append("]"); - return sb.toString(); - } - - @Override - public void close() { - state.close(); - } -} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/RateDoubleGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/RateDoubleGroupingAggregatorFunction.java new file mode 100644 index 0000000000000..adec9103d08fa --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/RateDoubleGroupingAggregatorFunction.java @@ -0,0 +1,446 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.compute.aggregation; + +import org.apache.lucene.util.ArrayUtil; +import org.apache.lucene.util.PriorityQueue; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.DoubleArray; +import org.elasticsearch.common.util.LongArray; +import org.elasticsearch.common.util.ObjectArray; +import org.elasticsearch.compute.aggregation.oldrate.OldRateDoubleAggregator; +import org.elasticsearch.compute.aggregation.oldrate.OldRateDoubleGroupingAggregatorFunction; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.DoubleVector; +import org.elasticsearch.compute.data.IntArrayBlock; +import org.elasticsearch.compute.data.IntBigArrayBlock; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; + +import java.util.List; + +public final class RateDoubleGroupingAggregatorFunction implements GroupingAggregatorFunction { + + public static final class FunctionSupplier implements AggregatorFunctionSupplier { + public FunctionSupplier() { + + } + + @Override + public List nonGroupingIntermediateStateDesc() { + throw new UnsupportedOperationException("non-grouping aggregator is not supported"); + } + + @Override + public List groupingIntermediateStateDesc() { + return RateDoubleGroupingAggregatorFunction.intermediateStateDesc(); + } + + @Override + public AggregatorFunction aggregator(DriverContext driverContext, List channels) { + throw new UnsupportedOperationException("non-grouping aggregator is not supported"); + } + + @Override + public RateDoubleGroupingAggregatorFunction groupingAggregator(DriverContext driverContext, List channels) { + return new RateDoubleGroupingAggregatorFunction(channels, driverContext); + } + + @Override + public String describe() { + return "rate of doubles"; + } + } + + private final List channels; + + private final DriverContext driverContext; + private ObjectArray buffers; + private final OldRateDoubleAggregator.DoubleRateGroupingState oldState; + private final OldRateDoubleGroupingAggregatorFunction oldRate; + + public RateDoubleGroupingAggregatorFunction(List channels, DriverContext driverContext) { + this.channels = channels; + this.driverContext = driverContext; + this.buffers = driverContext.bigArrays().newObjectArray(256); + this.oldState = new OldRateDoubleAggregator.DoubleRateGroupingState(driverContext.bigArrays(), driverContext.breaker()); + this.oldRate = new OldRateDoubleGroupingAggregatorFunction(channels, oldState, driverContext); + } + + public static List intermediateStateDesc() { + return OldRateDoubleGroupingAggregatorFunction.intermediateStateDesc(); + } + + @Override + public int intermediateBlockCount() { + return intermediateStateDesc().size(); + } + + @Override + public AddInput prepareProcessRawInputPage(SeenGroupIds seenGroupIds, Page page) { + DoubleBlock valuesBlock = page.getBlock(channels.get(0)); + LongBlock timestampsBlock = page.getBlock(channels.get(1)); + LongVector timestampsVector = timestampsBlock.asVector(); + if (timestampsVector == null) { + assert false : "expected timestamp vector in time-series aggregation"; + throw new IllegalStateException("expected timestamp vector in time-series aggregation"); + } + return new AddInput() { + @Override + public void add(int positionOffset, IntArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock, timestampsVector); + } + + @Override + public void add(int positionOffset, IntBigArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock, timestampsVector); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + var valuesVector = valuesBlock.asVector(); + if (valuesVector != null) { + addRawInput(positionOffset, groupIds, valuesVector, timestampsVector); + } else { + addRawInput(positionOffset, groupIds, valuesBlock, timestampsVector); + } + } + + @Override + public void close() { + + } + }; + } + + // Note that this path can be executed randomly in tests, not in production + private void addRawInput(int positionOffset, IntBlock groups, DoubleBlock valueBlock, LongVector timestampVector) { + int lastGroup = -1; + Buffer buffer = null; + int positionCount = groups.getPositionCount(); + for (int p = 0; p < positionCount; p++) { + if (groups.isNull(p)) { + continue; + } + int valuePosition = p + positionOffset; + if (valueBlock.isNull(valuePosition)) { + continue; + } + assert valueBlock.getValueCount(valuePosition) == 1 : "expected single-valued block " + valueBlock; + int groupStart = groups.getFirstValueIndex(p); + int groupEnd = groupStart + groups.getValueCount(p); + long timestamp = timestampVector.getLong(valuePosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + if (lastGroup != groupId) { + buffer = getBuffer(groupId, positionCount - p, timestamp); + lastGroup = groupId; + } + int valueStart = valueBlock.getFirstValueIndex(valuePosition); + buffer.appendOneValue(timestamp, valueBlock.getDouble(valueStart)); + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, DoubleBlock valueBlock, LongVector timestampVector) { + if (groups.isConstant()) { + int groupId = groups.getInt(0); + Buffer buffer = getBuffer(groupId, groups.getPositionCount(), timestampVector.getLong(0)); + for (int p = 0; p < groups.getPositionCount(); p++) { + int valuePosition = positionOffset + p; + if (valueBlock.isNull(valuePosition)) { + continue; + } + assert valueBlock.getValueCount(valuePosition) == 1 : "expected single-valued block " + valueBlock; + buffer.appendOneValue(timestampVector.getLong(valuePosition), valueBlock.getDouble(valuePosition)); + } + } else { + int lastGroup = -1; + Buffer buffer = null; + for (int p = 0; p < groups.getPositionCount(); p++) { + int valuePosition = positionOffset + p; + if (valueBlock.isNull(valuePosition) == false) { + continue; + } + assert valueBlock.getValueCount(valuePosition) == 1 : "expected single-valued block " + valueBlock; + long timestamp = timestampVector.getLong(valuePosition); + int groupId = groups.getInt(p); + if (lastGroup != groupId) { + buffer = getBuffer(groupId, groups.getPositionCount() - p, timestamp); + lastGroup = groupId; + } + buffer.appendOneValue(timestamp, valueBlock.getDouble(valuePosition)); + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, DoubleVector valueVector, LongVector timestampVector) { + int positionCount = groups.getPositionCount(); + if (groups.isConstant()) { + int groupId = groups.getInt(0); + Buffer state = getBuffer(groupId, positionCount, timestampVector.getLong(0)); + for (int p = 0; p < positionCount; p++) { + int valuePosition = positionOffset + p; + state.appendOneValue(timestampVector.getLong(valuePosition), valueVector.getDouble(valuePosition)); + } + } else { + int lastGroup = -1; + Buffer buffer = null; + for (int p = 0; p < positionCount; p++) { + int valuePosition = positionOffset + p; + long timestamp = timestampVector.getLong(valuePosition); + int groupId = groups.getInt(p); + if (lastGroup != groupId) { + buffer = getBuffer(groupId, positionCount - p, timestamp); + lastGroup = groupId; + } + buffer.appendOneValue(timestamp, valueVector.getDouble(valuePosition)); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntArrayBlock groups, Page page) { + oldRate.addIntermediateInput(positionOffset, groups, page); + } + + @Override + public void addIntermediateInput(int positionOffset, IntBigArrayBlock groups, Page page) { + oldRate.addIntermediateInput(positionOffset, groups, page); + } + + @Override + public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { + oldRate.addIntermediateInput(positionOffset, groups, page); + } + + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + oldRate.selectedMayContainUnseenGroups(seenGroupIds); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { + flushBuffers(selected); + oldRate.evaluateIntermediate(blocks, offset, selected); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, IntVector selected, GroupingAggregatorEvaluationContext ctx) { + flushBuffers(selected); + oldRate.evaluateFinal(blocks, offset, selected, ctx); + } + + void flushBuffers(IntVector selected) { + for (int i = 0; i < selected.getPositionCount(); i++) { + int groupId = selected.getInt(i); + if (groupId < buffers.size()) { + var buffer = buffers.getAndSet(groupId, null); + if (buffer != null) { + try (buffer) { + flushBufferToOldRate(buffer, groupId); + } + } + } + } + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + for (long i = 0; i < buffers.size(); i++) { + Buffer buffer = buffers.get(i); + if (buffer != null) { + buffer.close(); + } + } + buffers.close(); + Releasables.close(oldRate); + } + + private static class Slice { + int start; + long timestamp; + final int end; + final Buffer buffer; + + Slice(Buffer buffer, int start, int end) { + this.buffer = buffer; + this.start = start; + this.end = end; + this.timestamp = buffer.timestamps.get(start); + } + + boolean exhausted() { + return start >= end; + } + + int next() { + int index = start++; + if (start < end) { + timestamp = buffer.timestamps.get(start); + } + return index; + } + } + + /** + * Flushes the buffering data points to the old rate state. + */ + void flushBufferToOldRate(Buffer buffer, int groupId) { + if (buffer.totalCount == 1) { + try ( + var ts = driverContext.blockFactory().newConstantLongVector(buffer.timestamps.get(0), 1); + var vs = driverContext.blockFactory().newConstantDoubleVector(buffer.values.get(0), 1) + ) { + oldState.combine(groupId, ts.asBlock(), vs.asBlock(), 1, 0.0, 0); + } + return; + } + var pq = buffer.mergeQueue(); + // first + final long lastTimestamp; + final double lastValue; + { + Slice top = pq.top(); + int position = top.next(); + if (top.exhausted()) { + pq.pop(); + } else { + pq.updateTop(); + } + lastTimestamp = buffer.timestamps.get(position); + lastValue = buffer.values.get(position); + } + var prevValue = lastValue; + double reset = 0; + int position = -1; + while (pq.size() > 0) { + Slice top = pq.top(); + position = top.next(); + if (top.exhausted()) { + pq.pop(); + } else { + pq.updateTop(); + } + var val = buffer.values.get(position); + reset += dv(val, prevValue) + dv(prevValue, lastValue) - dv(val, lastValue); + prevValue = val; + } + try ( + var tBuilder = driverContext.blockFactory().newLongBlockBuilder(2); + var vBuilder = driverContext.blockFactory().newDoubleBlockBuilder(2) + ) { + tBuilder.beginPositionEntry(); + tBuilder.appendLong(lastTimestamp); + tBuilder.appendLong(buffer.timestamps.get(position)); + tBuilder.endPositionEntry(); + + vBuilder.beginPositionEntry(); + vBuilder.appendDouble(lastValue); + vBuilder.appendDouble(buffer.values.get(position)); + vBuilder.endPositionEntry(); + + try (var ts = tBuilder.build(); var vs = vBuilder.build()) { + oldState.combine(groupId, ts, vs, buffer.totalCount, reset, 0); + } + } + } + + // TODO: copied from old rate - simplify this or explain why we need it? + private double dv(double v0, double v1) { + return v0 > v1 ? v1 : v1 - v0; + } + + private Buffer getBuffer(int groupId, int extraSize, long firstTimestamp) { + buffers = driverContext.bigArrays().grow(buffers, groupId + 1); + Buffer state = buffers.get(groupId); + if (state == null) { + state = new Buffer(driverContext.bigArrays(), Math.max(16, extraSize)); + buffers.set(groupId, state); + } else { + state.ensureCapacity(driverContext.bigArrays(), extraSize, firstTimestamp); + } + return state; + } + + /** + * Buffers data points in two arrays: one for timestamps and one for values, partitioned into multiple slices. + * Each slice is sorted in descending order of timestamp. A new slice is created when a data point has a + * timestamp greater than the last point of the current slice. Since each page is sorted by descending timestamp, + * we only need to compare the first point of the new page with the last point of the current slice to decide + * if a new slice is needed. During merging, a priority queue is used to iterate through the slices, selecting + * the slice with the greatest timestamp. + */ + static final class Buffer implements Releasable { + private LongArray timestamps; + private DoubleArray values; + private int totalCount; + int[] sliceOffsets; + private static final int[] EMPTY_SLICES = new int[0]; + + Buffer(BigArrays bigArrays, int initialSize) { + this.timestamps = bigArrays.newLongArray(initialSize, false); + this.values = bigArrays.newDoubleArray(initialSize, false); + this.sliceOffsets = EMPTY_SLICES; + } + + void appendOneValue(long timestamp, double value) { + timestamps.set(totalCount, timestamp); + values.set(totalCount, value); + totalCount++; + } + + void ensureCapacity(BigArrays bigArrays, int count, long firstTimestamp) { + int newSize = totalCount + count; + timestamps = bigArrays.grow(timestamps, newSize); + values = bigArrays.grow(values, newSize); + if (totalCount > 0 && firstTimestamp > timestamps.get(totalCount - 1)) { + sliceOffsets = ArrayUtil.growExact(sliceOffsets, sliceOffsets.length + 1); + sliceOffsets[sliceOffsets.length - 1] = totalCount; + } + } + + PriorityQueue mergeQueue() { + PriorityQueue pq = new PriorityQueue<>(this.sliceOffsets.length + 1) { + @Override + protected boolean lessThan(Slice a, Slice b) { + return a.timestamp > b.timestamp; // want the latest timestamp first + } + }; + int startOffset = 0; + for (int sliceOffset : sliceOffsets) { + pq.add(new Slice(this, startOffset, sliceOffset)); + startOffset = sliceOffset; + } + pq.add(new Slice(this, startOffset, totalCount)); + return pq; + } + + @Override + public void close() { + timestamps.close(); + values.close(); + } + } + +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/RateIntGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/RateIntGroupingAggregatorFunction.java new file mode 100644 index 0000000000000..2e78cfad7e276 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/RateIntGroupingAggregatorFunction.java @@ -0,0 +1,444 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.compute.aggregation; + +import org.apache.lucene.util.ArrayUtil; +import org.apache.lucene.util.PriorityQueue; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.IntArray; +import org.elasticsearch.common.util.LongArray; +import org.elasticsearch.common.util.ObjectArray; +import org.elasticsearch.compute.aggregation.oldrate.OldRateIntAggregator; +import org.elasticsearch.compute.aggregation.oldrate.OldRateIntGroupingAggregatorFunction; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.IntArrayBlock; +import org.elasticsearch.compute.data.IntBigArrayBlock; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; + +import java.util.List; + +public final class RateIntGroupingAggregatorFunction implements GroupingAggregatorFunction { + + public static final class FunctionSupplier implements AggregatorFunctionSupplier { + public FunctionSupplier() { + + } + + @Override + public List nonGroupingIntermediateStateDesc() { + throw new UnsupportedOperationException("non-grouping aggregator is not supported"); + } + + @Override + public List groupingIntermediateStateDesc() { + return RateIntGroupingAggregatorFunction.intermediateStateDesc(); + } + + @Override + public AggregatorFunction aggregator(DriverContext driverContext, List channels) { + throw new UnsupportedOperationException("non-grouping aggregator is not supported"); + } + + @Override + public RateIntGroupingAggregatorFunction groupingAggregator(DriverContext driverContext, List channels) { + return new RateIntGroupingAggregatorFunction(channels, driverContext); + } + + @Override + public String describe() { + return "rate of ints"; + } + } + + private final List channels; + + private final DriverContext driverContext; + private ObjectArray buffers; + private final OldRateIntAggregator.IntRateGroupingState oldState; + private final OldRateIntGroupingAggregatorFunction oldRate; + + public RateIntGroupingAggregatorFunction(List channels, DriverContext driverContext) { + this.channels = channels; + this.driverContext = driverContext; + this.buffers = driverContext.bigArrays().newObjectArray(256); + this.oldState = new OldRateIntAggregator.IntRateGroupingState(driverContext.bigArrays(), driverContext.breaker()); + this.oldRate = new OldRateIntGroupingAggregatorFunction(channels, oldState, driverContext); + } + + public static List intermediateStateDesc() { + return OldRateIntGroupingAggregatorFunction.intermediateStateDesc(); + } + + @Override + public int intermediateBlockCount() { + return intermediateStateDesc().size(); + } + + @Override + public AddInput prepareProcessRawInputPage(SeenGroupIds seenGroupIds, Page page) { + IntBlock valuesBlock = page.getBlock(channels.get(0)); + LongBlock timestampsBlock = page.getBlock(channels.get(1)); + LongVector timestampsVector = timestampsBlock.asVector(); + if (timestampsVector == null) { + assert false : "expected timestamp vector in time-series aggregation"; + throw new IllegalStateException("expected timestamp vector in time-series aggregation"); + } + return new AddInput() { + @Override + public void add(int positionOffset, IntArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock, timestampsVector); + } + + @Override + public void add(int positionOffset, IntBigArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock, timestampsVector); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + var valuesVector = valuesBlock.asVector(); + if (valuesVector != null) { + addRawInput(positionOffset, groupIds, valuesVector, timestampsVector); + } else { + addRawInput(positionOffset, groupIds, valuesBlock, timestampsVector); + } + } + + @Override + public void close() { + + } + }; + } + + // Note that this path can be executed randomly in tests, not in production + private void addRawInput(int positionOffset, IntBlock groups, IntBlock valueBlock, LongVector timestampVector) { + int lastGroup = -1; + Buffer buffer = null; + int positionCount = groups.getPositionCount(); + for (int p = 0; p < positionCount; p++) { + if (groups.isNull(p)) { + continue; + } + int valuePosition = p + positionOffset; + if (valueBlock.isNull(valuePosition)) { + continue; + } + assert valueBlock.getValueCount(valuePosition) == 1 : "expected single-valued block " + valueBlock; + int groupStart = groups.getFirstValueIndex(p); + int groupEnd = groupStart + groups.getValueCount(p); + long timestamp = timestampVector.getLong(valuePosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + if (lastGroup != groupId) { + buffer = getBuffer(groupId, positionCount - p, timestamp); + lastGroup = groupId; + } + int valueStart = valueBlock.getFirstValueIndex(valuePosition); + buffer.appendOneValue(timestamp, valueBlock.getInt(valueStart)); + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, IntBlock valueBlock, LongVector timestampVector) { + if (groups.isConstant()) { + int groupId = groups.getInt(0); + Buffer buffer = getBuffer(groupId, groups.getPositionCount(), timestampVector.getLong(0)); + for (int p = 0; p < groups.getPositionCount(); p++) { + int valuePosition = positionOffset + p; + if (valueBlock.isNull(valuePosition)) { + continue; + } + assert valueBlock.getValueCount(valuePosition) == 1 : "expected single-valued block " + valueBlock; + buffer.appendOneValue(timestampVector.getLong(valuePosition), valueBlock.getInt(valuePosition)); + } + } else { + int lastGroup = -1; + Buffer buffer = null; + for (int p = 0; p < groups.getPositionCount(); p++) { + int valuePosition = positionOffset + p; + if (valueBlock.isNull(valuePosition) == false) { + continue; + } + assert valueBlock.getValueCount(valuePosition) == 1 : "expected single-valued block " + valueBlock; + long timestamp = timestampVector.getLong(valuePosition); + int groupId = groups.getInt(p); + if (lastGroup != groupId) { + buffer = getBuffer(groupId, groups.getPositionCount() - p, timestamp); + lastGroup = groupId; + } + buffer.appendOneValue(timestamp, valueBlock.getInt(valuePosition)); + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, IntVector valueVector, LongVector timestampVector) { + int positionCount = groups.getPositionCount(); + if (groups.isConstant()) { + int groupId = groups.getInt(0); + Buffer state = getBuffer(groupId, positionCount, timestampVector.getLong(0)); + for (int p = 0; p < positionCount; p++) { + int valuePosition = positionOffset + p; + state.appendOneValue(timestampVector.getLong(valuePosition), valueVector.getInt(valuePosition)); + } + } else { + int lastGroup = -1; + Buffer buffer = null; + for (int p = 0; p < positionCount; p++) { + int valuePosition = positionOffset + p; + long timestamp = timestampVector.getLong(valuePosition); + int groupId = groups.getInt(p); + if (lastGroup != groupId) { + buffer = getBuffer(groupId, positionCount - p, timestamp); + lastGroup = groupId; + } + buffer.appendOneValue(timestamp, valueVector.getInt(valuePosition)); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntArrayBlock groups, Page page) { + oldRate.addIntermediateInput(positionOffset, groups, page); + } + + @Override + public void addIntermediateInput(int positionOffset, IntBigArrayBlock groups, Page page) { + oldRate.addIntermediateInput(positionOffset, groups, page); + } + + @Override + public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { + oldRate.addIntermediateInput(positionOffset, groups, page); + } + + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + oldRate.selectedMayContainUnseenGroups(seenGroupIds); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { + flushBuffers(selected); + oldRate.evaluateIntermediate(blocks, offset, selected); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, IntVector selected, GroupingAggregatorEvaluationContext ctx) { + flushBuffers(selected); + oldRate.evaluateFinal(blocks, offset, selected, ctx); + } + + void flushBuffers(IntVector selected) { + for (int i = 0; i < selected.getPositionCount(); i++) { + int groupId = selected.getInt(i); + if (groupId < buffers.size()) { + var buffer = buffers.getAndSet(groupId, null); + if (buffer != null) { + try (buffer) { + flushBufferToOldRate(buffer, groupId); + } + } + } + } + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + for (long i = 0; i < buffers.size(); i++) { + Buffer buffer = buffers.get(i); + if (buffer != null) { + buffer.close(); + } + } + buffers.close(); + Releasables.close(oldRate); + } + + private static class Slice { + int start; + long timestamp; + final int end; + final Buffer buffer; + + Slice(Buffer buffer, int start, int end) { + this.buffer = buffer; + this.start = start; + this.end = end; + this.timestamp = buffer.timestamps.get(start); + } + + boolean exhausted() { + return start >= end; + } + + int next() { + int index = start++; + if (start < end) { + timestamp = buffer.timestamps.get(start); + } + return index; + } + } + + /** + * Flushes the buffering data points to the old rate state. + */ + void flushBufferToOldRate(Buffer buffer, int groupId) { + if (buffer.totalCount == 1) { + try ( + var ts = driverContext.blockFactory().newConstantLongVector(buffer.timestamps.get(0), 1); + var vs = driverContext.blockFactory().newConstantIntVector(buffer.values.get(0), 1) + ) { + oldState.combine(groupId, ts.asBlock(), vs.asBlock(), 1, 0.0, 0); + } + return; + } + var pq = buffer.mergeQueue(); + // first + final long lastTimestamp; + final int lastValue; + { + Slice top = pq.top(); + int position = top.next(); + if (top.exhausted()) { + pq.pop(); + } else { + pq.updateTop(); + } + lastTimestamp = buffer.timestamps.get(position); + lastValue = buffer.values.get(position); + } + var prevValue = lastValue; + double reset = 0; + int position = -1; + while (pq.size() > 0) { + Slice top = pq.top(); + position = top.next(); + if (top.exhausted()) { + pq.pop(); + } else { + pq.updateTop(); + } + var val = buffer.values.get(position); + reset += dv(val, prevValue) + dv(prevValue, lastValue) - dv(val, lastValue); + prevValue = val; + } + try ( + var tBuilder = driverContext.blockFactory().newLongBlockBuilder(2); + var vBuilder = driverContext.blockFactory().newIntBlockBuilder(2) + ) { + tBuilder.beginPositionEntry(); + tBuilder.appendLong(lastTimestamp); + tBuilder.appendLong(buffer.timestamps.get(position)); + tBuilder.endPositionEntry(); + + vBuilder.beginPositionEntry(); + vBuilder.appendInt(lastValue); + vBuilder.appendInt(buffer.values.get(position)); + vBuilder.endPositionEntry(); + + try (var ts = tBuilder.build(); var vs = vBuilder.build()) { + oldState.combine(groupId, ts, vs, buffer.totalCount, reset, 0); + } + } + } + + // TODO: copied from old rate - simplify this or explain why we need it? + private double dv(double v0, double v1) { + return v0 > v1 ? v1 : v1 - v0; + } + + private Buffer getBuffer(int groupId, int extraSize, long firstTimestamp) { + buffers = driverContext.bigArrays().grow(buffers, groupId + 1); + Buffer state = buffers.get(groupId); + if (state == null) { + state = new Buffer(driverContext.bigArrays(), Math.max(16, extraSize)); + buffers.set(groupId, state); + } else { + state.ensureCapacity(driverContext.bigArrays(), extraSize, firstTimestamp); + } + return state; + } + + /** + * Buffers data points in two arrays: one for timestamps and one for values, partitioned into multiple slices. + * Each slice is sorted in descending order of timestamp. A new slice is created when a data point has a + * timestamp greater than the last point of the current slice. Since each page is sorted by descending timestamp, + * we only need to compare the first point of the new page with the last point of the current slice to decide + * if a new slice is needed. During merging, a priority queue is used to iterate through the slices, selecting + * the slice with the greatest timestamp. + */ + static final class Buffer implements Releasable { + private LongArray timestamps; + private IntArray values; + private int totalCount; + int[] sliceOffsets; + private static final int[] EMPTY_SLICES = new int[0]; + + Buffer(BigArrays bigArrays, int initialSize) { + this.timestamps = bigArrays.newLongArray(initialSize, false); + this.values = bigArrays.newIntArray(initialSize, false); + this.sliceOffsets = EMPTY_SLICES; + } + + void appendOneValue(long timestamp, int value) { + timestamps.set(totalCount, timestamp); + values.set(totalCount, value); + totalCount++; + } + + void ensureCapacity(BigArrays bigArrays, int count, long firstTimestamp) { + int newSize = totalCount + count; + timestamps = bigArrays.grow(timestamps, newSize); + values = bigArrays.grow(values, newSize); + if (totalCount > 0 && firstTimestamp > timestamps.get(totalCount - 1)) { + sliceOffsets = ArrayUtil.growExact(sliceOffsets, sliceOffsets.length + 1); + sliceOffsets[sliceOffsets.length - 1] = totalCount; + } + } + + PriorityQueue mergeQueue() { + PriorityQueue pq = new PriorityQueue<>(this.sliceOffsets.length + 1) { + @Override + protected boolean lessThan(Slice a, Slice b) { + return a.timestamp > b.timestamp; // want the latest timestamp first + } + }; + int startOffset = 0; + for (int sliceOffset : sliceOffsets) { + pq.add(new Slice(this, startOffset, sliceOffset)); + startOffset = sliceOffset; + } + pq.add(new Slice(this, startOffset, totalCount)); + return pq; + } + + @Override + public void close() { + timestamps.close(); + values.close(); + } + } + +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/RateLongGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/RateLongGroupingAggregatorFunction.java new file mode 100644 index 0000000000000..00d410e5b382e --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/RateLongGroupingAggregatorFunction.java @@ -0,0 +1,443 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.compute.aggregation; + +import org.apache.lucene.util.ArrayUtil; +import org.apache.lucene.util.PriorityQueue; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.LongArray; +import org.elasticsearch.common.util.ObjectArray; +import org.elasticsearch.compute.aggregation.oldrate.OldRateLongAggregator; +import org.elasticsearch.compute.aggregation.oldrate.OldRateLongGroupingAggregatorFunction; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.IntArrayBlock; +import org.elasticsearch.compute.data.IntBigArrayBlock; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; + +import java.util.List; + +public final class RateLongGroupingAggregatorFunction implements GroupingAggregatorFunction { + + public static final class FunctionSupplier implements AggregatorFunctionSupplier { + public FunctionSupplier() { + + } + + @Override + public List nonGroupingIntermediateStateDesc() { + throw new UnsupportedOperationException("non-grouping aggregator is not supported"); + } + + @Override + public List groupingIntermediateStateDesc() { + return RateLongGroupingAggregatorFunction.intermediateStateDesc(); + } + + @Override + public AggregatorFunction aggregator(DriverContext driverContext, List channels) { + throw new UnsupportedOperationException("non-grouping aggregator is not supported"); + } + + @Override + public RateLongGroupingAggregatorFunction groupingAggregator(DriverContext driverContext, List channels) { + return new RateLongGroupingAggregatorFunction(channels, driverContext); + } + + @Override + public String describe() { + return "rate of longs"; + } + } + + private final List channels; + + private final DriverContext driverContext; + private ObjectArray buffers; + private final OldRateLongAggregator.LongRateGroupingState oldState; + private final OldRateLongGroupingAggregatorFunction oldRate; + + public RateLongGroupingAggregatorFunction(List channels, DriverContext driverContext) { + this.channels = channels; + this.driverContext = driverContext; + this.buffers = driverContext.bigArrays().newObjectArray(256); + this.oldState = new OldRateLongAggregator.LongRateGroupingState(driverContext.bigArrays(), driverContext.breaker()); + this.oldRate = new OldRateLongGroupingAggregatorFunction(channels, oldState, driverContext); + } + + public static List intermediateStateDesc() { + return OldRateLongGroupingAggregatorFunction.intermediateStateDesc(); + } + + @Override + public int intermediateBlockCount() { + return intermediateStateDesc().size(); + } + + @Override + public AddInput prepareProcessRawInputPage(SeenGroupIds seenGroupIds, Page page) { + LongBlock valuesBlock = page.getBlock(channels.get(0)); + LongBlock timestampsBlock = page.getBlock(channels.get(1)); + LongVector timestampsVector = timestampsBlock.asVector(); + if (timestampsVector == null) { + assert false : "expected timestamp vector in time-series aggregation"; + throw new IllegalStateException("expected timestamp vector in time-series aggregation"); + } + return new AddInput() { + @Override + public void add(int positionOffset, IntArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock, timestampsVector); + } + + @Override + public void add(int positionOffset, IntBigArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valuesBlock, timestampsVector); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + var valuesVector = valuesBlock.asVector(); + if (valuesVector != null) { + addRawInput(positionOffset, groupIds, valuesVector, timestampsVector); + } else { + addRawInput(positionOffset, groupIds, valuesBlock, timestampsVector); + } + } + + @Override + public void close() { + + } + }; + } + + // Note that this path can be executed randomly in tests, not in production + private void addRawInput(int positionOffset, IntBlock groups, LongBlock valueBlock, LongVector timestampVector) { + int lastGroup = -1; + Buffer buffer = null; + int positionCount = groups.getPositionCount(); + for (int p = 0; p < positionCount; p++) { + if (groups.isNull(p)) { + continue; + } + int valuePosition = p + positionOffset; + if (valueBlock.isNull(valuePosition)) { + continue; + } + assert valueBlock.getValueCount(valuePosition) == 1 : "expected single-valued block " + valueBlock; + int groupStart = groups.getFirstValueIndex(p); + int groupEnd = groupStart + groups.getValueCount(p); + long timestamp = timestampVector.getLong(valuePosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + if (lastGroup != groupId) { + buffer = getBuffer(groupId, positionCount - p, timestamp); + lastGroup = groupId; + } + int valueStart = valueBlock.getFirstValueIndex(valuePosition); + buffer.appendOneValue(timestamp, valueBlock.getLong(valueStart)); + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, LongBlock valueBlock, LongVector timestampVector) { + if (groups.isConstant()) { + int groupId = groups.getInt(0); + Buffer buffer = getBuffer(groupId, groups.getPositionCount(), timestampVector.getLong(0)); + for (int p = 0; p < groups.getPositionCount(); p++) { + int valuePosition = positionOffset + p; + if (valueBlock.isNull(valuePosition)) { + continue; + } + assert valueBlock.getValueCount(valuePosition) == 1 : "expected single-valued block " + valueBlock; + buffer.appendOneValue(timestampVector.getLong(valuePosition), valueBlock.getLong(valuePosition)); + } + } else { + int lastGroup = -1; + Buffer buffer = null; + for (int p = 0; p < groups.getPositionCount(); p++) { + int valuePosition = positionOffset + p; + if (valueBlock.isNull(valuePosition) == false) { + continue; + } + assert valueBlock.getValueCount(valuePosition) == 1 : "expected single-valued block " + valueBlock; + long timestamp = timestampVector.getLong(valuePosition); + int groupId = groups.getInt(p); + if (lastGroup != groupId) { + buffer = getBuffer(groupId, groups.getPositionCount() - p, timestamp); + lastGroup = groupId; + } + buffer.appendOneValue(timestamp, valueBlock.getLong(valuePosition)); + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, LongVector valueVector, LongVector timestampVector) { + int positionCount = groups.getPositionCount(); + if (groups.isConstant()) { + int groupId = groups.getInt(0); + Buffer state = getBuffer(groupId, positionCount, timestampVector.getLong(0)); + for (int p = 0; p < positionCount; p++) { + int valuePosition = positionOffset + p; + state.appendOneValue(timestampVector.getLong(valuePosition), valueVector.getLong(valuePosition)); + } + } else { + int lastGroup = -1; + Buffer buffer = null; + for (int p = 0; p < positionCount; p++) { + int valuePosition = positionOffset + p; + long timestamp = timestampVector.getLong(valuePosition); + int groupId = groups.getInt(p); + if (lastGroup != groupId) { + buffer = getBuffer(groupId, positionCount - p, timestamp); + lastGroup = groupId; + } + buffer.appendOneValue(timestamp, valueVector.getLong(valuePosition)); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntArrayBlock groups, Page page) { + oldRate.addIntermediateInput(positionOffset, groups, page); + } + + @Override + public void addIntermediateInput(int positionOffset, IntBigArrayBlock groups, Page page) { + oldRate.addIntermediateInput(positionOffset, groups, page); + } + + @Override + public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { + oldRate.addIntermediateInput(positionOffset, groups, page); + } + + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + oldRate.selectedMayContainUnseenGroups(seenGroupIds); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { + flushBuffers(selected); + oldRate.evaluateIntermediate(blocks, offset, selected); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, IntVector selected, GroupingAggregatorEvaluationContext ctx) { + flushBuffers(selected); + oldRate.evaluateFinal(blocks, offset, selected, ctx); + } + + void flushBuffers(IntVector selected) { + for (int i = 0; i < selected.getPositionCount(); i++) { + int groupId = selected.getInt(i); + if (groupId < buffers.size()) { + var buffer = buffers.getAndSet(groupId, null); + if (buffer != null) { + try (buffer) { + flushBufferToOldRate(buffer, groupId); + } + } + } + } + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + for (long i = 0; i < buffers.size(); i++) { + Buffer buffer = buffers.get(i); + if (buffer != null) { + buffer.close(); + } + } + buffers.close(); + Releasables.close(oldRate); + } + + private static class Slice { + int start; + long timestamp; + final int end; + final Buffer buffer; + + Slice(Buffer buffer, int start, int end) { + this.buffer = buffer; + this.start = start; + this.end = end; + this.timestamp = buffer.timestamps.get(start); + } + + boolean exhausted() { + return start >= end; + } + + int next() { + int index = start++; + if (start < end) { + timestamp = buffer.timestamps.get(start); + } + return index; + } + } + + /** + * Flushes the buffering data points to the old rate state. + */ + void flushBufferToOldRate(Buffer buffer, int groupId) { + if (buffer.totalCount == 1) { + try ( + var ts = driverContext.blockFactory().newConstantLongVector(buffer.timestamps.get(0), 1); + var vs = driverContext.blockFactory().newConstantLongVector(buffer.values.get(0), 1) + ) { + oldState.combine(groupId, ts.asBlock(), vs.asBlock(), 1, 0.0, 0); + } + return; + } + var pq = buffer.mergeQueue(); + // first + final long lastTimestamp; + final long lastValue; + { + Slice top = pq.top(); + int position = top.next(); + if (top.exhausted()) { + pq.pop(); + } else { + pq.updateTop(); + } + lastTimestamp = buffer.timestamps.get(position); + lastValue = buffer.values.get(position); + } + var prevValue = lastValue; + double reset = 0; + int position = -1; + while (pq.size() > 0) { + Slice top = pq.top(); + position = top.next(); + if (top.exhausted()) { + pq.pop(); + } else { + pq.updateTop(); + } + var val = buffer.values.get(position); + reset += dv(val, prevValue) + dv(prevValue, lastValue) - dv(val, lastValue); + prevValue = val; + } + try ( + var tBuilder = driverContext.blockFactory().newLongBlockBuilder(2); + var vBuilder = driverContext.blockFactory().newLongBlockBuilder(2) + ) { + tBuilder.beginPositionEntry(); + tBuilder.appendLong(lastTimestamp); + tBuilder.appendLong(buffer.timestamps.get(position)); + tBuilder.endPositionEntry(); + + vBuilder.beginPositionEntry(); + vBuilder.appendLong(lastValue); + vBuilder.appendLong(buffer.values.get(position)); + vBuilder.endPositionEntry(); + + try (var ts = tBuilder.build(); var vs = vBuilder.build()) { + oldState.combine(groupId, ts, vs, buffer.totalCount, reset, 0); + } + } + } + + // TODO: copied from old rate - simplify this or explain why we need it? + private double dv(double v0, double v1) { + return v0 > v1 ? v1 : v1 - v0; + } + + private Buffer getBuffer(int groupId, int extraSize, long firstTimestamp) { + buffers = driverContext.bigArrays().grow(buffers, groupId + 1); + Buffer state = buffers.get(groupId); + if (state == null) { + state = new Buffer(driverContext.bigArrays(), Math.max(16, extraSize)); + buffers.set(groupId, state); + } else { + state.ensureCapacity(driverContext.bigArrays(), extraSize, firstTimestamp); + } + return state; + } + + /** + * Buffers data points in two arrays: one for timestamps and one for values, partitioned into multiple slices. + * Each slice is sorted in descending order of timestamp. A new slice is created when a data point has a + * timestamp greater than the last point of the current slice. Since each page is sorted by descending timestamp, + * we only need to compare the first point of the new page with the last point of the current slice to decide + * if a new slice is needed. During merging, a priority queue is used to iterate through the slices, selecting + * the slice with the greatest timestamp. + */ + static final class Buffer implements Releasable { + private LongArray timestamps; + private LongArray values; + private int totalCount; + int[] sliceOffsets; + private static final int[] EMPTY_SLICES = new int[0]; + + Buffer(BigArrays bigArrays, int initialSize) { + this.timestamps = bigArrays.newLongArray(initialSize, false); + this.values = bigArrays.newLongArray(initialSize, false); + this.sliceOffsets = EMPTY_SLICES; + } + + void appendOneValue(long timestamp, long value) { + timestamps.set(totalCount, timestamp); + values.set(totalCount, value); + totalCount++; + } + + void ensureCapacity(BigArrays bigArrays, int count, long firstTimestamp) { + int newSize = totalCount + count; + timestamps = bigArrays.grow(timestamps, newSize); + values = bigArrays.grow(values, newSize); + if (totalCount > 0 && firstTimestamp > timestamps.get(totalCount - 1)) { + sliceOffsets = ArrayUtil.growExact(sliceOffsets, sliceOffsets.length + 1); + sliceOffsets[sliceOffsets.length - 1] = totalCount; + } + } + + PriorityQueue mergeQueue() { + PriorityQueue pq = new PriorityQueue<>(this.sliceOffsets.length + 1) { + @Override + protected boolean lessThan(Slice a, Slice b) { + return a.timestamp > b.timestamp; // want the latest timestamp first + } + }; + int startOffset = 0; + for (int sliceOffset : sliceOffsets) { + pq.add(new Slice(this, startOffset, sliceOffset)); + startOffset = sliceOffset; + } + pq.add(new Slice(this, startOffset, totalCount)); + return pq; + } + + @Override + public void close() { + timestamps.close(); + values.close(); + } + } + +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-RateAggregator.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-RateAggregator.java.st deleted file mode 100644 index 3059ec96f2f7b..0000000000000 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-RateAggregator.java.st +++ /dev/null @@ -1,376 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.compute.aggregation; - -// begin generated imports -import org.apache.lucene.util.Accountable; -import org.apache.lucene.util.RamUsageEstimator; -import org.elasticsearch.common.breaker.CircuitBreaker; -import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.common.util.ObjectArray; -import org.elasticsearch.compute.ann.GroupingAggregator; -import org.elasticsearch.compute.ann.IntermediateState; -import org.elasticsearch.compute.data.Block; -import org.elasticsearch.compute.data.BlockFactory; -import org.elasticsearch.compute.data.DoubleBlock; -import org.elasticsearch.compute.data.DoubleVector; -import org.elasticsearch.compute.data.FloatBlock; -import org.elasticsearch.compute.data.IntBlock; -import org.elasticsearch.compute.data.IntVector; -import org.elasticsearch.compute.data.LongBlock; -import org.elasticsearch.compute.operator.DriverContext; -import org.elasticsearch.core.Releasable; -import org.elasticsearch.core.Releasables; -// end generated imports - -/** - * A rate grouping aggregation definition for $type$. - * This class is generated. Edit `X-RateAggregator.java.st` instead. - */ -@GroupingAggregator( - value = { - @IntermediateState(name = "timestamps", type = "LONG_BLOCK"), - @IntermediateState(name = "values", type = "$TYPE$_BLOCK"), - @IntermediateState(name = "sampleCounts", type = "INT"), - @IntermediateState(name = "resets", type = "DOUBLE") } -) -public class Rate$Type$Aggregator { - - public static $Type$RateGroupingState initGrouping(DriverContext driverContext) { - return new $Type$RateGroupingState(driverContext.bigArrays(), driverContext.breaker()); - } - - public static void combine($Type$RateGroupingState current, int groupId, $type$ value, long timestamp) { - current.append(groupId, timestamp, value); - } - - public static void combineIntermediate( - $Type$RateGroupingState current, - int groupId, - LongBlock timestamps, - $Type$Block values, - int sampleCount, - double reset, - int otherPosition - ) { - current.combine(groupId, timestamps, values, sampleCount, reset, otherPosition); - } - - public static Block evaluateFinal($Type$RateGroupingState state, IntVector selected, GroupingAggregatorEvaluationContext evalContext) { - return state.evaluateFinal(selected, evalContext); - } - - private static class $Type$RateState { - static final long BASE_RAM_USAGE = RamUsageEstimator.sizeOfObject($Type$RateState.class); - final long[] timestamps; // descending order - final $type$[] values; - // the timestamps and values arrays might have collapsed to fewer values than the actual sample count - int sampleCount = 0; - double reset = 0; - - $Type$RateState(int initialSize) { - this.timestamps = new long[initialSize]; - this.values = new $type$[initialSize]; - } - - $Type$RateState(long[] ts, $type$[] vs) { - this.timestamps = ts; - this.values = vs; - this.sampleCount = values.length; - } - - private $type$ dv($type$ v0, $type$ v1) { - // counter reset detection - return v0 > v1 ? v1 : v1 - v0; - } - - void append(long t, $type$ v) { - assert timestamps.length == 2 : "expected two timestamps; got " + timestamps.length; - assert t < timestamps[1] : "@timestamp goes backward: " + t + " >= " + timestamps[1]; - reset += dv(v, values[1]) + dv(values[1], values[0]) - dv(v, values[0]); - timestamps[1] = t; - values[1] = v; - sampleCount++; - } - - int entries() { - return timestamps.length; - } - - static long bytesUsed(int entries) { - var ts = RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) Long.BYTES * entries); - var vs = RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) $BYTES$ * entries); - return BASE_RAM_USAGE + ts + vs; - } - } - - public static final class $Type$RateGroupingState implements Releasable, Accountable, GroupingAggregatorState { - private ObjectArray<$Type$RateState> states; - private final BigArrays bigArrays; - private final CircuitBreaker breaker; - private long stateBytes; // for individual states - - $Type$RateGroupingState(BigArrays bigArrays, CircuitBreaker breaker) { - this.bigArrays = bigArrays; - this.breaker = breaker; - this.states = bigArrays.newObjectArray(1); - } - - void ensureCapacity(int groupId) { - states = bigArrays.grow(states, groupId + 1); - } - - void adjustBreaker(long bytes) { - breaker.addEstimateBytesAndMaybeBreak(bytes, "<>"); - stateBytes += bytes; - assert stateBytes >= 0 : stateBytes; - } - - void append(int groupId, long timestamp, $type$ value) { - ensureCapacity(groupId); - var state = states.get(groupId); - if (state == null) { - adjustBreaker($Type$RateState.bytesUsed(1)); - state = new $Type$RateState(new long[] { timestamp }, new $type$[] { value }); - states.set(groupId, state); - } else { - if (state.entries() == 1) { - adjustBreaker($Type$RateState.bytesUsed(2)); - state = new $Type$RateState(new long[] { state.timestamps[0], timestamp }, new $type$[] { state.values[0], value }); - states.set(groupId, state); - adjustBreaker(-$Type$RateState.bytesUsed(1)); // old state - } else { - state.append(timestamp, value); - } - } - } - - void combine(int groupId, LongBlock timestamps, $Type$Block values, int sampleCount, double reset, int otherPosition) { - final int valueCount = timestamps.getValueCount(otherPosition); - if (valueCount == 0) { - return; - } - final int firstIndex = timestamps.getFirstValueIndex(otherPosition); - ensureCapacity(groupId); - var state = states.get(groupId); - if (state == null) { - adjustBreaker($Type$RateState.bytesUsed(valueCount)); - state = new $Type$RateState(valueCount); - state.reset = reset; - state.sampleCount = sampleCount; - states.set(groupId, state); - // TODO: add bulk_copy to Block - for (int i = 0; i < valueCount; i++) { - state.timestamps[i] = timestamps.getLong(firstIndex + i); - state.values[i] = values.get$Type$(firstIndex + i); - } - } else { - adjustBreaker($Type$RateState.bytesUsed(state.entries() + valueCount)); - var newState = new $Type$RateState(state.entries() + valueCount); - newState.reset = state.reset + reset; - newState.sampleCount = state.sampleCount + sampleCount; - states.set(groupId, newState); - merge(state, newState, firstIndex, valueCount, timestamps, values); - adjustBreaker(-$Type$RateState.bytesUsed(state.entries())); // old state - } - } - - void merge($Type$RateState curr, $Type$RateState dst, int firstIndex, int rightCount, LongBlock timestamps, $Type$Block values) { - int i = 0, j = 0, k = 0; - final int leftCount = curr.entries(); - while (i < leftCount && j < rightCount) { - final var t1 = curr.timestamps[i]; - final var t2 = timestamps.getLong(firstIndex + j); - if (t1 > t2) { - dst.timestamps[k] = t1; - dst.values[k] = curr.values[i]; - ++i; - } else { - dst.timestamps[k] = t2; - dst.values[k] = values.get$Type$(firstIndex + j); - ++j; - } - ++k; - } - if (i < leftCount) { - System.arraycopy(curr.timestamps, i, dst.timestamps, k, leftCount - i); - System.arraycopy(curr.values, i, dst.values, k, leftCount - i); - } - while (j < rightCount) { - dst.timestamps[k] = timestamps.getLong(firstIndex + j); - dst.values[k] = values.get$Type$(firstIndex + j); - ++k; - ++j; - } - } - - $Type$RateState mergeState($Type$RateState s1, $Type$RateState s2) { - var newLen = s1.entries() + s2.entries(); - adjustBreaker($Type$RateState.bytesUsed(newLen)); - var dst = new $Type$RateState(newLen); - dst.reset = s1.reset + s2.reset; - dst.sampleCount = s1.sampleCount + s2.sampleCount; - int i = 0, j = 0, k = 0; - while (i < s1.entries() && j < s2.entries()) { - if (s1.timestamps[i] > s2.timestamps[j]) { - dst.timestamps[k] = s1.timestamps[i]; - dst.values[k] = s1.values[i]; - ++i; - } else { - dst.timestamps[k] = s2.timestamps[j]; - dst.values[k] = s2.values[j]; - ++j; - } - ++k; - } - System.arraycopy(s1.timestamps, i, dst.timestamps, k, s1.entries() - i); - System.arraycopy(s1.values, i, dst.values, k, s1.entries() - i); - System.arraycopy(s2.timestamps, j, dst.timestamps, k, s2.entries() - j); - System.arraycopy(s2.values, j, dst.values, k, s2.entries() - j); - return dst; - } - - @Override - public long ramBytesUsed() { - return states.ramBytesUsed() + stateBytes; - } - - @Override - public void close() { - Releasables.close(states, () -> adjustBreaker(-stateBytes)); - } - - @Override - public void toIntermediate(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { - assert blocks.length >= offset + 3 : "blocks=" + blocks.length + ",offset=" + offset; - final BlockFactory blockFactory = driverContext.blockFactory(); - final int positionCount = selected.getPositionCount(); - try ( - LongBlock.Builder timestamps = blockFactory.newLongBlockBuilder(positionCount * 2); - $Type$Block.Builder values = blockFactory.new$Type$BlockBuilder(positionCount * 2); - IntVector.FixedBuilder sampleCounts = blockFactory.newIntVectorFixedBuilder(positionCount); - DoubleVector.FixedBuilder resets = blockFactory.newDoubleVectorFixedBuilder(positionCount) - ) { - for (int i = 0; i < positionCount; i++) { - final var groupId = selected.getInt(i); - final var state = groupId < states.size() ? states.get(groupId) : null; - if (state != null) { - timestamps.beginPositionEntry(); - for (long t : state.timestamps) { - timestamps.appendLong(t); - } - timestamps.endPositionEntry(); - - values.beginPositionEntry(); - for ($type$ v : state.values) { - values.append$Type$(v); - } - values.endPositionEntry(); - sampleCounts.appendInt(i, state.sampleCount); - resets.appendDouble(i, state.reset); - } else { - timestamps.appendNull(); - values.appendNull(); - sampleCounts.appendInt(i, 0); - resets.appendDouble(i, 0); - } - } - blocks[offset] = timestamps.build(); - blocks[offset + 1] = values.build(); - blocks[offset + 2] = sampleCounts.build().asBlock(); - blocks[offset + 3] = resets.build().asBlock(); - } - } - - private static double computeRateWithoutExtrapolate($Type$RateState state) { - final int len = state.entries(); - assert len >= 2 : "rate requires at least two samples; got " + len; - final long firstTS = state.timestamps[state.timestamps.length - 1]; - final long lastTS = state.timestamps[0]; - double reset = state.reset; - for (int i = 1; i < len; i++) { - if (state.values[i - 1] < state.values[i]) { - reset += state.values[i]; - } - } - final double firstValue = state.values[len - 1]; - final double lastValue = state.values[0] + reset; - return (lastValue - firstValue) * 1000.0 / (lastTS - firstTS); - } - - /** - * Credit to PromQL for this extrapolation algorithm: - * If samples are close enough to the rangeStart and rangeEnd, we extrapolate the rate all the way to the boundary in question. - * "Close enough" is defined as "up to 10% more than the average duration between samples within the range". - * Essentially, we assume a more or less regular spacing between samples. If we don't see a sample where we would expect one, - * we assume the series does not cover the whole range but starts and/or ends within the range. - * We still extrapolate the rate in this case, but not all the way to the boundary, only by half of the average duration between - * samples (which is our guess for where the series actually starts or ends). - */ - private static double extrapolateRate($Type$RateState state, long rangeStart, long rangeEnd) { - final int len = state.entries(); - assert len >= 2 : "rate requires at least two samples; got " + len; - final long firstTS = state.timestamps[state.timestamps.length - 1]; - final long lastTS = state.timestamps[0]; - double reset = state.reset; - for (int i = 1; i < len; i++) { - if (state.values[i - 1] < state.values[i]) { - reset += state.values[i]; - } - } - double firstValue = state.values[len - 1]; - double lastValue = state.values[0] + reset; - final double sampleTS = lastTS - firstTS; - final double averageSampleInterval = sampleTS / state.sampleCount; - final double slope = (lastValue - firstValue) / sampleTS; - double startGap = firstTS - rangeStart; - if (startGap > 0) { - if (startGap > averageSampleInterval * 1.1) { - startGap = averageSampleInterval / 2.0; - } - firstValue = Math.max(0.0, firstValue - startGap * slope); - } - double endGap = rangeEnd - lastTS; - if (endGap > 0) { - if (endGap > averageSampleInterval * 1.1) { - endGap = averageSampleInterval / 2.0; - } - lastValue = lastValue + endGap * slope; - } - return (lastValue - firstValue) * 1000.0 / (rangeEnd - rangeStart); - } - - Block evaluateFinal(IntVector selected, GroupingAggregatorEvaluationContext evalContext) { - int positionCount = selected.getPositionCount(); - try (DoubleBlock.Builder rates = evalContext.blockFactory().newDoubleBlockBuilder(positionCount)) { - for (int p = 0; p < positionCount; p++) { - final var groupId = selected.getInt(p); - final var state = groupId < states.size() ? states.get(groupId) : null; - if (state == null || state.sampleCount < 2) { - rates.appendNull(); - continue; - } - int len = state.entries(); - final double rate; - if (evalContext instanceof TimeSeriesGroupingAggregatorEvaluationContext tsContext) { - rate = extrapolateRate(state, tsContext.rangeStartInMillis(groupId), tsContext.rangeEndInMillis(groupId)); - } else { - rate = computeRateWithoutExtrapolate(state); - } - rates.appendDouble(rate); - } - return rates.build(); - } - } - - @Override - public void enableGroupIdTracking(SeenGroupIds seenGroupIds) { - // noop - we handle the null states inside `toIntermediate` and `evaluateFinal` - } - } -} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateDoubleAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/oldrate/OldRateDoubleAggregator.java similarity index 94% rename from x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateDoubleAggregator.java rename to x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/oldrate/OldRateDoubleAggregator.java index 23e6942217e21..1df2c9bfd99f3 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateDoubleAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/oldrate/OldRateDoubleAggregator.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.compute.aggregation; +package org.elasticsearch.compute.aggregation.oldrate; // begin generated imports import org.apache.lucene.util.Accountable; @@ -13,14 +13,14 @@ import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.ObjectArray; -import org.elasticsearch.compute.ann.GroupingAggregator; -import org.elasticsearch.compute.ann.IntermediateState; +import org.elasticsearch.compute.aggregation.GroupingAggregatorEvaluationContext; +import org.elasticsearch.compute.aggregation.GroupingAggregatorState; +import org.elasticsearch.compute.aggregation.SeenGroupIds; +import org.elasticsearch.compute.aggregation.TimeSeriesGroupingAggregatorEvaluationContext; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.DoubleBlock; import org.elasticsearch.compute.data.DoubleVector; -import org.elasticsearch.compute.data.FloatBlock; -import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.IntVector; import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.operator.DriverContext; @@ -30,16 +30,9 @@ /** * A rate grouping aggregation definition for double. - * This class is generated. Edit `X-RateAggregator.java.st` instead. + * */ -@GroupingAggregator( - value = { - @IntermediateState(name = "timestamps", type = "LONG_BLOCK"), - @IntermediateState(name = "values", type = "DOUBLE_BLOCK"), - @IntermediateState(name = "sampleCounts", type = "INT"), - @IntermediateState(name = "resets", type = "DOUBLE") } -) -public class RateDoubleAggregator { +public final class OldRateDoubleAggregator { public static DoubleRateGroupingState initGrouping(DriverContext driverContext) { return new DoubleRateGroupingState(driverContext.bigArrays(), driverContext.breaker()); @@ -115,7 +108,7 @@ public static final class DoubleRateGroupingState implements Releasable, Account private final CircuitBreaker breaker; private long stateBytes; // for individual states - DoubleRateGroupingState(BigArrays bigArrays, CircuitBreaker breaker) { + public DoubleRateGroupingState(BigArrays bigArrays, CircuitBreaker breaker) { this.bigArrays = bigArrays; this.breaker = breaker; this.states = bigArrays.newObjectArray(1); @@ -150,7 +143,7 @@ void append(int groupId, long timestamp, double value) { } } - void combine(int groupId, LongBlock timestamps, DoubleBlock values, int sampleCount, double reset, int otherPosition) { + public void combine(int groupId, LongBlock timestamps, DoubleBlock values, int sampleCount, double reset, int otherPosition) { final int valueCount = timestamps.getValueCount(otherPosition); if (valueCount == 0) { return; diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/oldrate/OldRateDoubleGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/oldrate/OldRateDoubleGroupingAggregatorFunction.java new file mode 100644 index 0000000000000..dde1f567a9dd7 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/oldrate/OldRateDoubleGroupingAggregatorFunction.java @@ -0,0 +1,446 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.compute.aggregation.oldrate; + +import org.elasticsearch.compute.aggregation.GroupingAggregatorEvaluationContext; +import org.elasticsearch.compute.aggregation.GroupingAggregatorFunction; +import org.elasticsearch.compute.aggregation.IntermediateStateDesc; +import org.elasticsearch.compute.aggregation.SeenGroupIds; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.DoubleVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntArrayBlock; +import org.elasticsearch.compute.data.IntBigArrayBlock; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +import java.util.List; + +/** + * {@link GroupingAggregatorFunction} implementation for {@link OldRateDoubleAggregator}. + */ +public final class OldRateDoubleGroupingAggregatorFunction implements GroupingAggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("timestamps", ElementType.LONG), + new IntermediateStateDesc("values", ElementType.DOUBLE), + new IntermediateStateDesc("sampleCounts", ElementType.INT), + new IntermediateStateDesc("resets", ElementType.DOUBLE) + ); + + private final OldRateDoubleAggregator.DoubleRateGroupingState state; + + private final List channels; + + private final DriverContext driverContext; + + public OldRateDoubleGroupingAggregatorFunction( + List channels, + OldRateDoubleAggregator.DoubleRateGroupingState state, + DriverContext driverContext + ) { + this.channels = channels; + this.state = state; + this.driverContext = driverContext; + } + + public static OldRateDoubleGroupingAggregatorFunction create(List channels, DriverContext driverContext) { + return new OldRateDoubleGroupingAggregatorFunction(channels, OldRateDoubleAggregator.initGrouping(driverContext), driverContext); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public GroupingAggregatorFunction.AddInput prepareProcessRawInputPage(SeenGroupIds seenGroupIds, Page page) { + DoubleBlock valueBlock = page.getBlock(channels.get(0)); + LongBlock timestampBlock = page.getBlock(channels.get(1)); + DoubleVector valueVector = valueBlock.asVector(); + if (valueVector == null) { + maybeEnableGroupIdTracking(seenGroupIds, valueBlock, timestampBlock); + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void add(int positionOffset, IntBigArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void close() {} + }; + } + LongVector timestampVector = timestampBlock.asVector(); + if (timestampVector == null) { + maybeEnableGroupIdTracking(seenGroupIds, valueBlock, timestampBlock); + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void add(int positionOffset, IntBigArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void close() {} + }; + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueVector, timestampVector); + } + + @Override + public void add(int positionOffset, IntBigArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueVector, timestampVector); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valueVector, timestampVector); + } + + @Override + public void close() {} + }; + } + + private void addRawInput(int positionOffset, IntArrayBlock groups, DoubleBlock valueBlock, LongBlock timestampBlock) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int valuesPosition = groupPosition + positionOffset; + if (valueBlock.isNull(valuesPosition)) { + continue; + } + if (timestampBlock.isNull(valuesPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valueStart = valueBlock.getFirstValueIndex(valuesPosition); + int valueEnd = valueStart + valueBlock.getValueCount(valuesPosition); + for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { + double valueValue = valueBlock.getDouble(valueOffset); + int timestampStart = timestampBlock.getFirstValueIndex(valuesPosition); + int timestampEnd = timestampStart + timestampBlock.getValueCount(valuesPosition); + for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { + long timestampValue = timestampBlock.getLong(timestampOffset); + OldRateDoubleAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + } + } + + private void addRawInput(int positionOffset, IntArrayBlock groups, DoubleVector valueVector, LongVector timestampVector) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int valuesPosition = groupPosition + positionOffset; + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + double valueValue = valueVector.getDouble(valuesPosition); + long timestampValue = timestampVector.getLong(valuesPosition); + OldRateDoubleAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntArrayBlock groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block timestampsUncast = page.getBlock(channels.get(0)); + if (timestampsUncast.areAllValuesNull()) { + return; + } + LongBlock timestamps = (LongBlock) timestampsUncast; + Block valuesUncast = page.getBlock(channels.get(1)); + if (valuesUncast.areAllValuesNull()) { + return; + } + DoubleBlock values = (DoubleBlock) valuesUncast; + Block sampleCountsUncast = page.getBlock(channels.get(2)); + if (sampleCountsUncast.areAllValuesNull()) { + return; + } + IntVector sampleCounts = ((IntBlock) sampleCountsUncast).asVector(); + Block resetsUncast = page.getBlock(channels.get(3)); + if (resetsUncast.areAllValuesNull()) { + return; + } + DoubleVector resets = ((DoubleBlock) resetsUncast).asVector(); + assert timestamps.getPositionCount() == values.getPositionCount() + && timestamps.getPositionCount() == sampleCounts.getPositionCount() + && timestamps.getPositionCount() == resets.getPositionCount(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valuesPosition = groupPosition + positionOffset; + OldRateDoubleAggregator.combineIntermediate( + state, + groupId, + timestamps, + values, + sampleCounts.getInt(valuesPosition), + resets.getDouble(valuesPosition), + valuesPosition + ); + } + } + } + + private void addRawInput(int positionOffset, IntBigArrayBlock groups, DoubleBlock valueBlock, LongBlock timestampBlock) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int valuesPosition = groupPosition + positionOffset; + if (valueBlock.isNull(valuesPosition)) { + continue; + } + if (timestampBlock.isNull(valuesPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valueStart = valueBlock.getFirstValueIndex(valuesPosition); + int valueEnd = valueStart + valueBlock.getValueCount(valuesPosition); + for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { + double valueValue = valueBlock.getDouble(valueOffset); + int timestampStart = timestampBlock.getFirstValueIndex(valuesPosition); + int timestampEnd = timestampStart + timestampBlock.getValueCount(valuesPosition); + for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { + long timestampValue = timestampBlock.getLong(timestampOffset); + OldRateDoubleAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + } + } + + private void addRawInput(int positionOffset, IntBigArrayBlock groups, DoubleVector valueVector, LongVector timestampVector) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int valuesPosition = groupPosition + positionOffset; + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + double valueValue = valueVector.getDouble(valuesPosition); + long timestampValue = timestampVector.getLong(valuesPosition); + OldRateDoubleAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntBigArrayBlock groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block timestampsUncast = page.getBlock(channels.get(0)); + if (timestampsUncast.areAllValuesNull()) { + return; + } + LongBlock timestamps = (LongBlock) timestampsUncast; + Block valuesUncast = page.getBlock(channels.get(1)); + if (valuesUncast.areAllValuesNull()) { + return; + } + DoubleBlock values = (DoubleBlock) valuesUncast; + Block sampleCountsUncast = page.getBlock(channels.get(2)); + if (sampleCountsUncast.areAllValuesNull()) { + return; + } + IntVector sampleCounts = ((IntBlock) sampleCountsUncast).asVector(); + Block resetsUncast = page.getBlock(channels.get(3)); + if (resetsUncast.areAllValuesNull()) { + return; + } + DoubleVector resets = ((DoubleBlock) resetsUncast).asVector(); + assert timestamps.getPositionCount() == values.getPositionCount() + && timestamps.getPositionCount() == sampleCounts.getPositionCount() + && timestamps.getPositionCount() == resets.getPositionCount(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valuesPosition = groupPosition + positionOffset; + OldRateDoubleAggregator.combineIntermediate( + state, + groupId, + timestamps, + values, + sampleCounts.getInt(valuesPosition), + resets.getDouble(valuesPosition), + valuesPosition + ); + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, DoubleBlock valueBlock, LongBlock timestampBlock) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int valuesPosition = groupPosition + positionOffset; + if (valueBlock.isNull(valuesPosition)) { + continue; + } + if (timestampBlock.isNull(valuesPosition)) { + continue; + } + int groupId = groups.getInt(groupPosition); + int valueStart = valueBlock.getFirstValueIndex(valuesPosition); + int valueEnd = valueStart + valueBlock.getValueCount(valuesPosition); + for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { + double valueValue = valueBlock.getDouble(valueOffset); + int timestampStart = timestampBlock.getFirstValueIndex(valuesPosition); + int timestampEnd = timestampStart + timestampBlock.getValueCount(valuesPosition); + for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { + long timestampValue = timestampBlock.getLong(timestampOffset); + OldRateDoubleAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, DoubleVector valueVector, LongVector timestampVector) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int valuesPosition = groupPosition + positionOffset; + int groupId = groups.getInt(groupPosition); + double valueValue = valueVector.getDouble(valuesPosition); + long timestampValue = timestampVector.getLong(valuesPosition); + OldRateDoubleAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block timestampsUncast = page.getBlock(channels.get(0)); + if (timestampsUncast.areAllValuesNull()) { + return; + } + LongBlock timestamps = (LongBlock) timestampsUncast; + Block valuesUncast = page.getBlock(channels.get(1)); + if (valuesUncast.areAllValuesNull()) { + return; + } + DoubleBlock values = (DoubleBlock) valuesUncast; + Block sampleCountsUncast = page.getBlock(channels.get(2)); + if (sampleCountsUncast.areAllValuesNull()) { + return; + } + IntVector sampleCounts = ((IntBlock) sampleCountsUncast).asVector(); + Block resetsUncast = page.getBlock(channels.get(3)); + if (resetsUncast.areAllValuesNull()) { + return; + } + DoubleVector resets = ((DoubleBlock) resetsUncast).asVector(); + assert timestamps.getPositionCount() == values.getPositionCount() + && timestamps.getPositionCount() == sampleCounts.getPositionCount() + && timestamps.getPositionCount() == resets.getPositionCount(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = groups.getInt(groupPosition); + int valuesPosition = groupPosition + positionOffset; + OldRateDoubleAggregator.combineIntermediate( + state, + groupId, + timestamps, + values, + sampleCounts.getInt(valuesPosition), + resets.getDouble(valuesPosition), + valuesPosition + ); + } + } + + private void maybeEnableGroupIdTracking(SeenGroupIds seenGroupIds, DoubleBlock valueBlock, LongBlock timestampBlock) { + if (valueBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + if (timestampBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + } + + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { + state.toIntermediate(blocks, offset, selected, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, IntVector selected, GroupingAggregatorEvaluationContext ctx) { + blocks[offset] = OldRateDoubleAggregator.evaluateFinal(state, selected, ctx); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateIntAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/oldrate/OldRateIntAggregator.java similarity index 95% rename from x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateIntAggregator.java rename to x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/oldrate/OldRateIntAggregator.java index 16f7269e8a4f2..c8795c56d5aa7 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateIntAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/oldrate/OldRateIntAggregator.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.compute.aggregation; +package org.elasticsearch.compute.aggregation.oldrate; // begin generated imports import org.apache.lucene.util.Accountable; @@ -13,13 +13,14 @@ import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.ObjectArray; -import org.elasticsearch.compute.ann.GroupingAggregator; -import org.elasticsearch.compute.ann.IntermediateState; +import org.elasticsearch.compute.aggregation.GroupingAggregatorEvaluationContext; +import org.elasticsearch.compute.aggregation.GroupingAggregatorState; +import org.elasticsearch.compute.aggregation.SeenGroupIds; +import org.elasticsearch.compute.aggregation.TimeSeriesGroupingAggregatorEvaluationContext; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.DoubleBlock; import org.elasticsearch.compute.data.DoubleVector; -import org.elasticsearch.compute.data.FloatBlock; import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.IntVector; import org.elasticsearch.compute.data.LongBlock; @@ -30,16 +31,8 @@ /** * A rate grouping aggregation definition for int. - * This class is generated. Edit `X-RateAggregator.java.st` instead. */ -@GroupingAggregator( - value = { - @IntermediateState(name = "timestamps", type = "LONG_BLOCK"), - @IntermediateState(name = "values", type = "INT_BLOCK"), - @IntermediateState(name = "sampleCounts", type = "INT"), - @IntermediateState(name = "resets", type = "DOUBLE") } -) -public class RateIntAggregator { +public class OldRateIntAggregator { public static IntRateGroupingState initGrouping(DriverContext driverContext) { return new IntRateGroupingState(driverContext.bigArrays(), driverContext.breaker()); @@ -115,7 +108,7 @@ public static final class IntRateGroupingState implements Releasable, Accountabl private final CircuitBreaker breaker; private long stateBytes; // for individual states - IntRateGroupingState(BigArrays bigArrays, CircuitBreaker breaker) { + public IntRateGroupingState(BigArrays bigArrays, CircuitBreaker breaker) { this.bigArrays = bigArrays; this.breaker = breaker; this.states = bigArrays.newObjectArray(1); @@ -150,7 +143,7 @@ void append(int groupId, long timestamp, int value) { } } - void combine(int groupId, LongBlock timestamps, IntBlock values, int sampleCount, double reset, int otherPosition) { + public void combine(int groupId, LongBlock timestamps, IntBlock values, int sampleCount, double reset, int otherPosition) { final int valueCount = timestamps.getValueCount(otherPosition); if (valueCount == 0) { return; diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/oldrate/OldRateIntGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/oldrate/OldRateIntGroupingAggregatorFunction.java new file mode 100644 index 0000000000000..5718af293ea17 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/oldrate/OldRateIntGroupingAggregatorFunction.java @@ -0,0 +1,447 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.compute.aggregation.oldrate; + +import org.elasticsearch.compute.aggregation.GroupingAggregatorEvaluationContext; +import org.elasticsearch.compute.aggregation.GroupingAggregatorFunction; +import org.elasticsearch.compute.aggregation.IntermediateStateDesc; +import org.elasticsearch.compute.aggregation.SeenGroupIds; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.DoubleVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntArrayBlock; +import org.elasticsearch.compute.data.IntBigArrayBlock; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +import java.util.List; + +/** + * {@link GroupingAggregatorFunction} implementation for {@link OldRateIntAggregator}. + * This class is generated. Edit {@code GroupingAggregatorImplementer} instead. + */ +public final class OldRateIntGroupingAggregatorFunction implements GroupingAggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("timestamps", ElementType.LONG), + new IntermediateStateDesc("values", ElementType.INT), + new IntermediateStateDesc("sampleCounts", ElementType.INT), + new IntermediateStateDesc("resets", ElementType.DOUBLE) + ); + + private final OldRateIntAggregator.IntRateGroupingState state; + + private final List channels; + + private final DriverContext driverContext; + + public OldRateIntGroupingAggregatorFunction( + List channels, + OldRateIntAggregator.IntRateGroupingState state, + DriverContext driverContext + ) { + this.channels = channels; + this.state = state; + this.driverContext = driverContext; + } + + public static OldRateIntGroupingAggregatorFunction create(List channels, DriverContext driverContext) { + return new OldRateIntGroupingAggregatorFunction(channels, OldRateIntAggregator.initGrouping(driverContext), driverContext); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public GroupingAggregatorFunction.AddInput prepareProcessRawInputPage(SeenGroupIds seenGroupIds, Page page) { + IntBlock valueBlock = page.getBlock(channels.get(0)); + LongBlock timestampBlock = page.getBlock(channels.get(1)); + IntVector valueVector = valueBlock.asVector(); + if (valueVector == null) { + maybeEnableGroupIdTracking(seenGroupIds, valueBlock, timestampBlock); + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void add(int positionOffset, IntBigArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void close() {} + }; + } + LongVector timestampVector = timestampBlock.asVector(); + if (timestampVector == null) { + maybeEnableGroupIdTracking(seenGroupIds, valueBlock, timestampBlock); + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void add(int positionOffset, IntBigArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void close() {} + }; + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueVector, timestampVector); + } + + @Override + public void add(int positionOffset, IntBigArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueVector, timestampVector); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valueVector, timestampVector); + } + + @Override + public void close() {} + }; + } + + private void addRawInput(int positionOffset, IntArrayBlock groups, IntBlock valueBlock, LongBlock timestampBlock) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int valuesPosition = groupPosition + positionOffset; + if (valueBlock.isNull(valuesPosition)) { + continue; + } + if (timestampBlock.isNull(valuesPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valueStart = valueBlock.getFirstValueIndex(valuesPosition); + int valueEnd = valueStart + valueBlock.getValueCount(valuesPosition); + for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { + int valueValue = valueBlock.getInt(valueOffset); + int timestampStart = timestampBlock.getFirstValueIndex(valuesPosition); + int timestampEnd = timestampStart + timestampBlock.getValueCount(valuesPosition); + for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { + long timestampValue = timestampBlock.getLong(timestampOffset); + OldRateIntAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + } + } + + private void addRawInput(int positionOffset, IntArrayBlock groups, IntVector valueVector, LongVector timestampVector) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int valuesPosition = groupPosition + positionOffset; + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valueValue = valueVector.getInt(valuesPosition); + long timestampValue = timestampVector.getLong(valuesPosition); + OldRateIntAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntArrayBlock groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block timestampsUncast = page.getBlock(channels.get(0)); + if (timestampsUncast.areAllValuesNull()) { + return; + } + LongBlock timestamps = (LongBlock) timestampsUncast; + Block valuesUncast = page.getBlock(channels.get(1)); + if (valuesUncast.areAllValuesNull()) { + return; + } + IntBlock values = (IntBlock) valuesUncast; + Block sampleCountsUncast = page.getBlock(channels.get(2)); + if (sampleCountsUncast.areAllValuesNull()) { + return; + } + IntVector sampleCounts = ((IntBlock) sampleCountsUncast).asVector(); + Block resetsUncast = page.getBlock(channels.get(3)); + if (resetsUncast.areAllValuesNull()) { + return; + } + DoubleVector resets = ((DoubleBlock) resetsUncast).asVector(); + assert timestamps.getPositionCount() == values.getPositionCount() + && timestamps.getPositionCount() == sampleCounts.getPositionCount() + && timestamps.getPositionCount() == resets.getPositionCount(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valuesPosition = groupPosition + positionOffset; + OldRateIntAggregator.combineIntermediate( + state, + groupId, + timestamps, + values, + sampleCounts.getInt(valuesPosition), + resets.getDouble(valuesPosition), + valuesPosition + ); + } + } + } + + private void addRawInput(int positionOffset, IntBigArrayBlock groups, IntBlock valueBlock, LongBlock timestampBlock) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int valuesPosition = groupPosition + positionOffset; + if (valueBlock.isNull(valuesPosition)) { + continue; + } + if (timestampBlock.isNull(valuesPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valueStart = valueBlock.getFirstValueIndex(valuesPosition); + int valueEnd = valueStart + valueBlock.getValueCount(valuesPosition); + for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { + int valueValue = valueBlock.getInt(valueOffset); + int timestampStart = timestampBlock.getFirstValueIndex(valuesPosition); + int timestampEnd = timestampStart + timestampBlock.getValueCount(valuesPosition); + for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { + long timestampValue = timestampBlock.getLong(timestampOffset); + OldRateIntAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + } + } + + private void addRawInput(int positionOffset, IntBigArrayBlock groups, IntVector valueVector, LongVector timestampVector) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int valuesPosition = groupPosition + positionOffset; + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valueValue = valueVector.getInt(valuesPosition); + long timestampValue = timestampVector.getLong(valuesPosition); + OldRateIntAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntBigArrayBlock groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block timestampsUncast = page.getBlock(channels.get(0)); + if (timestampsUncast.areAllValuesNull()) { + return; + } + LongBlock timestamps = (LongBlock) timestampsUncast; + Block valuesUncast = page.getBlock(channels.get(1)); + if (valuesUncast.areAllValuesNull()) { + return; + } + IntBlock values = (IntBlock) valuesUncast; + Block sampleCountsUncast = page.getBlock(channels.get(2)); + if (sampleCountsUncast.areAllValuesNull()) { + return; + } + IntVector sampleCounts = ((IntBlock) sampleCountsUncast).asVector(); + Block resetsUncast = page.getBlock(channels.get(3)); + if (resetsUncast.areAllValuesNull()) { + return; + } + DoubleVector resets = ((DoubleBlock) resetsUncast).asVector(); + assert timestamps.getPositionCount() == values.getPositionCount() + && timestamps.getPositionCount() == sampleCounts.getPositionCount() + && timestamps.getPositionCount() == resets.getPositionCount(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valuesPosition = groupPosition + positionOffset; + OldRateIntAggregator.combineIntermediate( + state, + groupId, + timestamps, + values, + sampleCounts.getInt(valuesPosition), + resets.getDouble(valuesPosition), + valuesPosition + ); + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, IntBlock valueBlock, LongBlock timestampBlock) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int valuesPosition = groupPosition + positionOffset; + if (valueBlock.isNull(valuesPosition)) { + continue; + } + if (timestampBlock.isNull(valuesPosition)) { + continue; + } + int groupId = groups.getInt(groupPosition); + int valueStart = valueBlock.getFirstValueIndex(valuesPosition); + int valueEnd = valueStart + valueBlock.getValueCount(valuesPosition); + for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { + int valueValue = valueBlock.getInt(valueOffset); + int timestampStart = timestampBlock.getFirstValueIndex(valuesPosition); + int timestampEnd = timestampStart + timestampBlock.getValueCount(valuesPosition); + for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { + long timestampValue = timestampBlock.getLong(timestampOffset); + OldRateIntAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, IntVector valueVector, LongVector timestampVector) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int valuesPosition = groupPosition + positionOffset; + int groupId = groups.getInt(groupPosition); + int valueValue = valueVector.getInt(valuesPosition); + long timestampValue = timestampVector.getLong(valuesPosition); + OldRateIntAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block timestampsUncast = page.getBlock(channels.get(0)); + if (timestampsUncast.areAllValuesNull()) { + return; + } + LongBlock timestamps = (LongBlock) timestampsUncast; + Block valuesUncast = page.getBlock(channels.get(1)); + if (valuesUncast.areAllValuesNull()) { + return; + } + IntBlock values = (IntBlock) valuesUncast; + Block sampleCountsUncast = page.getBlock(channels.get(2)); + if (sampleCountsUncast.areAllValuesNull()) { + return; + } + IntVector sampleCounts = ((IntBlock) sampleCountsUncast).asVector(); + Block resetsUncast = page.getBlock(channels.get(3)); + if (resetsUncast.areAllValuesNull()) { + return; + } + DoubleVector resets = ((DoubleBlock) resetsUncast).asVector(); + assert timestamps.getPositionCount() == values.getPositionCount() + && timestamps.getPositionCount() == sampleCounts.getPositionCount() + && timestamps.getPositionCount() == resets.getPositionCount(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = groups.getInt(groupPosition); + int valuesPosition = groupPosition + positionOffset; + OldRateIntAggregator.combineIntermediate( + state, + groupId, + timestamps, + values, + sampleCounts.getInt(valuesPosition), + resets.getDouble(valuesPosition), + valuesPosition + ); + } + } + + private void maybeEnableGroupIdTracking(SeenGroupIds seenGroupIds, IntBlock valueBlock, LongBlock timestampBlock) { + if (valueBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + if (timestampBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + } + + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { + state.toIntermediate(blocks, offset, selected, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, IntVector selected, GroupingAggregatorEvaluationContext ctx) { + blocks[offset] = OldRateIntAggregator.evaluateFinal(state, selected, ctx); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateLongAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/oldrate/OldRateLongAggregator.java similarity index 94% rename from x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateLongAggregator.java rename to x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/oldrate/OldRateLongAggregator.java index f03ba968e9648..c4f4d92561019 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateLongAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/oldrate/OldRateLongAggregator.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.compute.aggregation; +package org.elasticsearch.compute.aggregation.oldrate; // begin generated imports import org.apache.lucene.util.Accountable; @@ -13,14 +13,14 @@ import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.ObjectArray; -import org.elasticsearch.compute.ann.GroupingAggregator; -import org.elasticsearch.compute.ann.IntermediateState; +import org.elasticsearch.compute.aggregation.GroupingAggregatorEvaluationContext; +import org.elasticsearch.compute.aggregation.GroupingAggregatorState; +import org.elasticsearch.compute.aggregation.SeenGroupIds; +import org.elasticsearch.compute.aggregation.TimeSeriesGroupingAggregatorEvaluationContext; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.DoubleBlock; import org.elasticsearch.compute.data.DoubleVector; -import org.elasticsearch.compute.data.FloatBlock; -import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.IntVector; import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.operator.DriverContext; @@ -30,16 +30,8 @@ /** * A rate grouping aggregation definition for long. - * This class is generated. Edit `X-RateAggregator.java.st` instead. */ -@GroupingAggregator( - value = { - @IntermediateState(name = "timestamps", type = "LONG_BLOCK"), - @IntermediateState(name = "values", type = "LONG_BLOCK"), - @IntermediateState(name = "sampleCounts", type = "INT"), - @IntermediateState(name = "resets", type = "DOUBLE") } -) -public class RateLongAggregator { +public class OldRateLongAggregator { public static LongRateGroupingState initGrouping(DriverContext driverContext) { return new LongRateGroupingState(driverContext.bigArrays(), driverContext.breaker()); @@ -115,7 +107,7 @@ public static final class LongRateGroupingState implements Releasable, Accountab private final CircuitBreaker breaker; private long stateBytes; // for individual states - LongRateGroupingState(BigArrays bigArrays, CircuitBreaker breaker) { + public LongRateGroupingState(BigArrays bigArrays, CircuitBreaker breaker) { this.bigArrays = bigArrays; this.breaker = breaker; this.states = bigArrays.newObjectArray(1); @@ -150,7 +142,7 @@ void append(int groupId, long timestamp, long value) { } } - void combine(int groupId, LongBlock timestamps, LongBlock values, int sampleCount, double reset, int otherPosition) { + public void combine(int groupId, LongBlock timestamps, LongBlock values, int sampleCount, double reset, int otherPosition) { final int valueCount = timestamps.getValueCount(otherPosition); if (valueCount == 0) { return; diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/oldrate/OldRateLongGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/oldrate/OldRateLongGroupingAggregatorFunction.java new file mode 100644 index 0000000000000..5013ba1475524 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/oldrate/OldRateLongGroupingAggregatorFunction.java @@ -0,0 +1,447 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.compute.aggregation.oldrate; + +import org.elasticsearch.compute.aggregation.GroupingAggregatorEvaluationContext; +import org.elasticsearch.compute.aggregation.GroupingAggregatorFunction; +import org.elasticsearch.compute.aggregation.IntermediateStateDesc; +import org.elasticsearch.compute.aggregation.SeenGroupIds; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.DoubleVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntArrayBlock; +import org.elasticsearch.compute.data.IntBigArrayBlock; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +import java.util.List; + +/** + * {@link GroupingAggregatorFunction} implementation for {@link OldRateLongAggregator}. + * This class is generated. Edit {@code GroupingAggregatorImplementer} instead. + */ +public final class OldRateLongGroupingAggregatorFunction implements GroupingAggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("timestamps", ElementType.LONG), + new IntermediateStateDesc("values", ElementType.LONG), + new IntermediateStateDesc("sampleCounts", ElementType.INT), + new IntermediateStateDesc("resets", ElementType.DOUBLE) + ); + + private final OldRateLongAggregator.LongRateGroupingState state; + + private final List channels; + + private final DriverContext driverContext; + + public OldRateLongGroupingAggregatorFunction( + List channels, + OldRateLongAggregator.LongRateGroupingState state, + DriverContext driverContext + ) { + this.channels = channels; + this.state = state; + this.driverContext = driverContext; + } + + public static OldRateLongGroupingAggregatorFunction create(List channels, DriverContext driverContext) { + return new OldRateLongGroupingAggregatorFunction(channels, OldRateLongAggregator.initGrouping(driverContext), driverContext); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public GroupingAggregatorFunction.AddInput prepareProcessRawInputPage(SeenGroupIds seenGroupIds, Page page) { + LongBlock valueBlock = page.getBlock(channels.get(0)); + LongBlock timestampBlock = page.getBlock(channels.get(1)); + LongVector valueVector = valueBlock.asVector(); + if (valueVector == null) { + maybeEnableGroupIdTracking(seenGroupIds, valueBlock, timestampBlock); + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void add(int positionOffset, IntBigArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void close() {} + }; + } + LongVector timestampVector = timestampBlock.asVector(); + if (timestampVector == null) { + maybeEnableGroupIdTracking(seenGroupIds, valueBlock, timestampBlock); + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void add(int positionOffset, IntBigArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void close() {} + }; + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueVector, timestampVector); + } + + @Override + public void add(int positionOffset, IntBigArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueVector, timestampVector); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valueVector, timestampVector); + } + + @Override + public void close() {} + }; + } + + private void addRawInput(int positionOffset, IntArrayBlock groups, LongBlock valueBlock, LongBlock timestampBlock) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int valuesPosition = groupPosition + positionOffset; + if (valueBlock.isNull(valuesPosition)) { + continue; + } + if (timestampBlock.isNull(valuesPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valueStart = valueBlock.getFirstValueIndex(valuesPosition); + int valueEnd = valueStart + valueBlock.getValueCount(valuesPosition); + for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { + long valueValue = valueBlock.getLong(valueOffset); + int timestampStart = timestampBlock.getFirstValueIndex(valuesPosition); + int timestampEnd = timestampStart + timestampBlock.getValueCount(valuesPosition); + for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { + long timestampValue = timestampBlock.getLong(timestampOffset); + OldRateLongAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + } + } + + private void addRawInput(int positionOffset, IntArrayBlock groups, LongVector valueVector, LongVector timestampVector) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int valuesPosition = groupPosition + positionOffset; + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + long valueValue = valueVector.getLong(valuesPosition); + long timestampValue = timestampVector.getLong(valuesPosition); + OldRateLongAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntArrayBlock groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block timestampsUncast = page.getBlock(channels.get(0)); + if (timestampsUncast.areAllValuesNull()) { + return; + } + LongBlock timestamps = (LongBlock) timestampsUncast; + Block valuesUncast = page.getBlock(channels.get(1)); + if (valuesUncast.areAllValuesNull()) { + return; + } + LongBlock values = (LongBlock) valuesUncast; + Block sampleCountsUncast = page.getBlock(channels.get(2)); + if (sampleCountsUncast.areAllValuesNull()) { + return; + } + IntVector sampleCounts = ((IntBlock) sampleCountsUncast).asVector(); + Block resetsUncast = page.getBlock(channels.get(3)); + if (resetsUncast.areAllValuesNull()) { + return; + } + DoubleVector resets = ((DoubleBlock) resetsUncast).asVector(); + assert timestamps.getPositionCount() == values.getPositionCount() + && timestamps.getPositionCount() == sampleCounts.getPositionCount() + && timestamps.getPositionCount() == resets.getPositionCount(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valuesPosition = groupPosition + positionOffset; + OldRateLongAggregator.combineIntermediate( + state, + groupId, + timestamps, + values, + sampleCounts.getInt(valuesPosition), + resets.getDouble(valuesPosition), + valuesPosition + ); + } + } + } + + private void addRawInput(int positionOffset, IntBigArrayBlock groups, LongBlock valueBlock, LongBlock timestampBlock) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int valuesPosition = groupPosition + positionOffset; + if (valueBlock.isNull(valuesPosition)) { + continue; + } + if (timestampBlock.isNull(valuesPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valueStart = valueBlock.getFirstValueIndex(valuesPosition); + int valueEnd = valueStart + valueBlock.getValueCount(valuesPosition); + for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { + long valueValue = valueBlock.getLong(valueOffset); + int timestampStart = timestampBlock.getFirstValueIndex(valuesPosition); + int timestampEnd = timestampStart + timestampBlock.getValueCount(valuesPosition); + for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { + long timestampValue = timestampBlock.getLong(timestampOffset); + OldRateLongAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + } + } + + private void addRawInput(int positionOffset, IntBigArrayBlock groups, LongVector valueVector, LongVector timestampVector) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int valuesPosition = groupPosition + positionOffset; + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + long valueValue = valueVector.getLong(valuesPosition); + long timestampValue = timestampVector.getLong(valuesPosition); + OldRateLongAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntBigArrayBlock groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block timestampsUncast = page.getBlock(channels.get(0)); + if (timestampsUncast.areAllValuesNull()) { + return; + } + LongBlock timestamps = (LongBlock) timestampsUncast; + Block valuesUncast = page.getBlock(channels.get(1)); + if (valuesUncast.areAllValuesNull()) { + return; + } + LongBlock values = (LongBlock) valuesUncast; + Block sampleCountsUncast = page.getBlock(channels.get(2)); + if (sampleCountsUncast.areAllValuesNull()) { + return; + } + IntVector sampleCounts = ((IntBlock) sampleCountsUncast).asVector(); + Block resetsUncast = page.getBlock(channels.get(3)); + if (resetsUncast.areAllValuesNull()) { + return; + } + DoubleVector resets = ((DoubleBlock) resetsUncast).asVector(); + assert timestamps.getPositionCount() == values.getPositionCount() + && timestamps.getPositionCount() == sampleCounts.getPositionCount() + && timestamps.getPositionCount() == resets.getPositionCount(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valuesPosition = groupPosition + positionOffset; + OldRateLongAggregator.combineIntermediate( + state, + groupId, + timestamps, + values, + sampleCounts.getInt(valuesPosition), + resets.getDouble(valuesPosition), + valuesPosition + ); + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, LongBlock valueBlock, LongBlock timestampBlock) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int valuesPosition = groupPosition + positionOffset; + if (valueBlock.isNull(valuesPosition)) { + continue; + } + if (timestampBlock.isNull(valuesPosition)) { + continue; + } + int groupId = groups.getInt(groupPosition); + int valueStart = valueBlock.getFirstValueIndex(valuesPosition); + int valueEnd = valueStart + valueBlock.getValueCount(valuesPosition); + for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { + long valueValue = valueBlock.getLong(valueOffset); + int timestampStart = timestampBlock.getFirstValueIndex(valuesPosition); + int timestampEnd = timestampStart + timestampBlock.getValueCount(valuesPosition); + for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { + long timestampValue = timestampBlock.getLong(timestampOffset); + OldRateLongAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, LongVector valueVector, LongVector timestampVector) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int valuesPosition = groupPosition + positionOffset; + int groupId = groups.getInt(groupPosition); + long valueValue = valueVector.getLong(valuesPosition); + long timestampValue = timestampVector.getLong(valuesPosition); + OldRateLongAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block timestampsUncast = page.getBlock(channels.get(0)); + if (timestampsUncast.areAllValuesNull()) { + return; + } + LongBlock timestamps = (LongBlock) timestampsUncast; + Block valuesUncast = page.getBlock(channels.get(1)); + if (valuesUncast.areAllValuesNull()) { + return; + } + LongBlock values = (LongBlock) valuesUncast; + Block sampleCountsUncast = page.getBlock(channels.get(2)); + if (sampleCountsUncast.areAllValuesNull()) { + return; + } + IntVector sampleCounts = ((IntBlock) sampleCountsUncast).asVector(); + Block resetsUncast = page.getBlock(channels.get(3)); + if (resetsUncast.areAllValuesNull()) { + return; + } + DoubleVector resets = ((DoubleBlock) resetsUncast).asVector(); + assert timestamps.getPositionCount() == values.getPositionCount() + && timestamps.getPositionCount() == sampleCounts.getPositionCount() + && timestamps.getPositionCount() == resets.getPositionCount(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = groups.getInt(groupPosition); + int valuesPosition = groupPosition + positionOffset; + OldRateLongAggregator.combineIntermediate( + state, + groupId, + timestamps, + values, + sampleCounts.getInt(valuesPosition), + resets.getDouble(valuesPosition), + valuesPosition + ); + } + } + + private void maybeEnableGroupIdTracking(SeenGroupIds seenGroupIds, LongBlock valueBlock, LongBlock timestampBlock) { + if (valueBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + if (timestampBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + } + + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { + state.toIntermediate(blocks, offset, selected, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, IntVector selected, GroupingAggregatorEvaluationContext ctx) { + blocks[offset] = OldRateLongAggregator.evaluateFinal(state, selected, ctx); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneSourceOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneSourceOperator.java index afec708490779..57547afbdf70a 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneSourceOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneSourceOperator.java @@ -78,7 +78,9 @@ public Factory( contexts, queryFunction, dataPartitioning, - autoStrategy.pickStrategy(limit), + dataPartitioning == DataPartitioning.AUTO ? autoStrategy.pickStrategy(limit) : q -> { + throw new UnsupportedOperationException("locked in " + dataPartitioning); + }, taskConcurrency, limit, needsScore, diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/TimeSeriesSourceOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/TimeSeriesSourceOperator.java deleted file mode 100644 index 1ffe57fba4ef7..0000000000000 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/TimeSeriesSourceOperator.java +++ /dev/null @@ -1,555 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.compute.lucene; - -import org.apache.lucene.index.DocValues; -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.NumericDocValues; -import org.apache.lucene.index.SortedDocValues; -import org.apache.lucene.search.DocIdSetIterator; -import org.apache.lucene.search.Scorer; -import org.apache.lucene.search.Weight; -import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.PriorityQueue; -import org.elasticsearch.TransportVersion; -import org.elasticsearch.TransportVersions; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.compute.data.Block; -import org.elasticsearch.compute.data.BlockFactory; -import org.elasticsearch.compute.data.BytesRefVector; -import org.elasticsearch.compute.data.DocVector; -import org.elasticsearch.compute.data.IntVector; -import org.elasticsearch.compute.data.LongVector; -import org.elasticsearch.compute.data.OrdinalBytesRefVector; -import org.elasticsearch.compute.data.Page; -import org.elasticsearch.compute.operator.Operator; -import org.elasticsearch.core.Releasable; -import org.elasticsearch.core.Releasables; -import org.elasticsearch.xcontent.XContentBuilder; - -import java.io.IOException; -import java.io.UncheckedIOException; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Set; - -public final class TimeSeriesSourceOperator extends LuceneOperator { - private final int maxPageSize; - private final BlockFactory blockFactory; - private final LuceneSliceQueue sliceQueue; - private int currentPagePos = 0; - private int remainingDocs; - private boolean doneCollecting; - - private LongVector.Builder timestampsBuilder; - private TsidBuilder tsHashesBuilder; - private SegmentsIterator iterator; - private DocIdCollector docCollector; - private long tsidsLoaded; - - TimeSeriesSourceOperator( - List contexts, - BlockFactory blockFactory, - LuceneSliceQueue sliceQueue, - int maxPageSize, - int limit - ) { - super(contexts, blockFactory, maxPageSize, sliceQueue); - this.maxPageSize = maxPageSize; - this.blockFactory = blockFactory; - this.remainingDocs = limit; - this.timestampsBuilder = blockFactory.newLongVectorBuilder(Math.min(limit, maxPageSize)); - this.tsHashesBuilder = new TsidBuilder(blockFactory, Math.min(limit, maxPageSize)); - this.sliceQueue = sliceQueue; - } - - @Override - public void finish() { - this.doneCollecting = true; - } - - @Override - public boolean isFinished() { - return doneCollecting; - } - - @Override - public Page getCheckedOutput() throws IOException { - if (isFinished()) { - return null; - } - - if (remainingDocs <= 0) { - doneCollecting = true; - return null; - } - - Page page = null; - Block[] blocks = new Block[3]; - long startInNanos = System.nanoTime(); - try { - if (iterator == null) { - var slice = sliceQueue.nextSlice(null); - if (slice == null) { - doneCollecting = true; - return null; - } - if (slice.tags().isEmpty() == false) { - throw new UnsupportedOperationException("tags not supported by " + getClass()); - } - iterator = new SegmentsIterator(slice); - docCollector = new DocIdCollector(blockFactory, slice.shardContext()); - } - iterator.readDocsForNextPage(); - if (currentPagePos > 0) { - blocks[0] = docCollector.build().asBlock(); - OrdinalBytesRefVector tsidVector = tsHashesBuilder.build(); - blocks[1] = tsidVector.asBlock(); - tsHashesBuilder = new TsidBuilder(blockFactory, Math.min(remainingDocs, maxPageSize)); - blocks[2] = timestampsBuilder.build().asBlock(); - timestampsBuilder = blockFactory.newLongVectorBuilder(Math.min(remainingDocs, maxPageSize)); - page = new Page(currentPagePos, blocks); - currentPagePos = 0; - } - if (iterator.completed()) { - processedShards.add(iterator.luceneSlice.shardContext().shardIdentifier()); - processedSlices++; - Releasables.close(docCollector); - iterator = null; - } - } catch (IOException e) { - throw new UncheckedIOException(e); - } finally { - if (page == null) { - Releasables.closeExpectNoException(blocks); - } - processingNanos += System.nanoTime() - startInNanos; - } - return page; - } - - @Override - public void additionalClose() { - Releasables.closeExpectNoException(timestampsBuilder, tsHashesBuilder, docCollector); - } - - class SegmentsIterator { - private final PriorityQueue mainQueue; - private final PriorityQueue oneTsidQueue; - final LuceneSlice luceneSlice; - - SegmentsIterator(LuceneSlice luceneSlice) throws IOException { - this.luceneSlice = luceneSlice; - this.mainQueue = new PriorityQueue<>(luceneSlice.numLeaves()) { - @Override - protected boolean lessThan(LeafIterator a, LeafIterator b) { - return a.timeSeriesHash.compareTo(b.timeSeriesHash) < 0; - } - }; - Weight weight = luceneSlice.createWeight(); - processedQueries.add(weight.getQuery()); - int maxSegmentOrd = 0; - for (var leafReaderContext : luceneSlice.leaves()) { - LeafIterator leafIterator = new LeafIterator(weight, leafReaderContext.leafReaderContext()); - leafIterator.nextDoc(); - if (leafIterator.docID != DocIdSetIterator.NO_MORE_DOCS) { - mainQueue.add(leafIterator); - maxSegmentOrd = Math.max(maxSegmentOrd, leafIterator.segmentOrd); - } - } - this.oneTsidQueue = new PriorityQueue<>(mainQueue.size()) { - @Override - protected boolean lessThan(LeafIterator a, LeafIterator b) { - return a.timestamp > b.timestamp; - } - }; - } - - // TODO: add optimize for one leaf? - void readDocsForNextPage() throws IOException { - docCollector.prepareForCollecting(Math.min(remainingDocs, maxPageSize)); - Thread executingThread = Thread.currentThread(); - for (LeafIterator leaf : mainQueue) { - leaf.reinitializeIfNeeded(executingThread); - } - for (LeafIterator leaf : oneTsidQueue) { - leaf.reinitializeIfNeeded(executingThread); - } - if (mainQueue.size() + oneTsidQueue.size() == 1) { - readValuesFromSingleRemainingLeaf(); - } else { - do { - PriorityQueue sub = subQueueForNextTsid(); - if (sub.size() == 0) { - break; - } - tsHashesBuilder.appendNewTsid(sub.top().timeSeriesHash); - if (readValuesForOneTsid(sub)) { - break; - } - } while (mainQueue.size() > 0); - } - } - - private boolean readValuesForOneTsid(PriorityQueue sub) throws IOException { - do { - LeafIterator top = sub.top(); - currentPagePos++; - remainingDocs--; - docCollector.collect(top.segmentOrd, top.docID); - tsHashesBuilder.appendOrdinal(); - timestampsBuilder.appendLong(top.timestamp); - if (top.nextDoc()) { - sub.updateTop(); - } else if (top.docID == DocIdSetIterator.NO_MORE_DOCS) { - sub.pop(); - } else { - mainQueue.add(sub.pop()); - } - if (remainingDocs <= 0 || currentPagePos >= maxPageSize) { - return true; - } - } while (sub.size() > 0); - return false; - } - - private PriorityQueue subQueueForNextTsid() { - if (oneTsidQueue.size() == 0 && mainQueue.size() > 0) { - LeafIterator last = mainQueue.pop(); - oneTsidQueue.add(last); - while (mainQueue.size() > 0) { - var top = mainQueue.top(); - if (top.timeSeriesHash.equals(last.timeSeriesHash)) { - oneTsidQueue.add(mainQueue.pop()); - } else { - break; - } - } - if (oneTsidQueue.size() > 0) { - ++tsidsLoaded; - } - } - return oneTsidQueue; - } - - private void readValuesFromSingleRemainingLeaf() throws IOException { - if (oneTsidQueue.size() == 0) { - oneTsidQueue.add(getMainQueue().pop()); - tsidsLoaded++; - } - final LeafIterator sub = oneTsidQueue.top(); - int lastTsid = -1; - do { - currentPagePos++; - remainingDocs--; - docCollector.collect(sub.segmentOrd, sub.docID); - if (lastTsid != sub.lastTsidOrd) { - tsHashesBuilder.appendNewTsid(sub.timeSeriesHash); - lastTsid = sub.lastTsidOrd; - } - tsHashesBuilder.appendOrdinal(); - timestampsBuilder.appendLong(sub.timestamp); - if (sub.nextDoc() == false) { - if (sub.docID == DocIdSetIterator.NO_MORE_DOCS) { - oneTsidQueue.clear(); - return; - } else { - ++tsidsLoaded; - } - } - } while (remainingDocs > 0 && currentPagePos < maxPageSize); - } - - private PriorityQueue getMainQueue() { - return mainQueue; - } - - boolean completed() { - return mainQueue.size() == 0 && oneTsidQueue.size() == 0; - } - } - - static class LeafIterator { - private final int segmentOrd; - private final Weight weight; - private final LeafReaderContext leafContext; - private SortedDocValues tsids; - private NumericDocValues timestamps; - private DocIdSetIterator disi; - private Thread createdThread; - - private long timestamp; - private int lastTsidOrd = -1; - private BytesRef timeSeriesHash; - private int docID = -1; - - LeafIterator(Weight weight, LeafReaderContext leafContext) throws IOException { - this.segmentOrd = leafContext.ord; - this.weight = weight; - this.leafContext = leafContext; - this.createdThread = Thread.currentThread(); - tsids = leafContext.reader().getSortedDocValues("_tsid"); - timestamps = DocValues.unwrapSingleton(leafContext.reader().getSortedNumericDocValues("@timestamp")); - final Scorer scorer = weight.scorer(leafContext); - disi = scorer != null ? scorer.iterator() : DocIdSetIterator.empty(); - } - - boolean nextDoc() throws IOException { - docID = disi.nextDoc(); - if (docID == DocIdSetIterator.NO_MORE_DOCS) { - return false; - } - boolean advanced = timestamps.advanceExact(docID); - assert advanced; - timestamp = timestamps.longValue(); - advanced = tsids.advanceExact(docID); - assert advanced; - - int ord = tsids.ordValue(); - if (ord != lastTsidOrd) { - timeSeriesHash = tsids.lookupOrd(ord); - lastTsidOrd = ord; - return false; - } else { - return true; - } - } - - void reinitializeIfNeeded(Thread executingThread) throws IOException { - if (executingThread != createdThread) { - tsids = leafContext.reader().getSortedDocValues("_tsid"); - timestamps = DocValues.unwrapSingleton(leafContext.reader().getSortedNumericDocValues("@timestamp")); - final Scorer scorer = weight.scorer(leafContext); - disi = scorer != null ? scorer.iterator() : DocIdSetIterator.empty(); - if (docID != -1) { - disi.advance(docID); - } - createdThread = executingThread; - } - } - } - - /** - * Collect tsids then build a {@link OrdinalBytesRefVector} - */ - static final class TsidBuilder implements Releasable { - private int currentOrd = -1; - private final BytesRefVector.Builder dictBuilder; - private final IntVector.Builder ordinalsBuilder; - - TsidBuilder(BlockFactory blockFactory, int estimatedSize) { - final var dictBuilder = blockFactory.newBytesRefVectorBuilder(estimatedSize); - boolean success = false; - try { - this.dictBuilder = dictBuilder; - this.ordinalsBuilder = blockFactory.newIntVectorBuilder(estimatedSize); - success = true; - } finally { - if (success == false) { - dictBuilder.close(); - } - } - } - - void appendNewTsid(BytesRef tsid) { - currentOrd++; - dictBuilder.appendBytesRef(tsid); - } - - void appendOrdinal() { - assert currentOrd >= 0; - ordinalsBuilder.appendInt(currentOrd); - } - - @Override - public void close() { - Releasables.close(dictBuilder, ordinalsBuilder); - } - - OrdinalBytesRefVector build() throws IOException { - BytesRefVector dict = null; - OrdinalBytesRefVector result = null; - IntVector ordinals = null; - try { - dict = dictBuilder.build(); - ordinals = ordinalsBuilder.build(); - result = new OrdinalBytesRefVector(ordinals, dict); - } finally { - if (result == null) { - Releasables.close(dict, ordinals); - } - } - return result; - } - } - - static final class DocIdCollector implements Releasable { - private final BlockFactory blockFactory; - private final ShardContext shardContext; - private IntVector.Builder docsBuilder; - private IntVector.Builder segmentsBuilder; - - DocIdCollector(BlockFactory blockFactory, ShardContext shardContext) { - this.blockFactory = blockFactory; - this.shardContext = shardContext; - } - - void prepareForCollecting(int estimatedSize) { - assert docsBuilder == null; - docsBuilder = blockFactory.newIntVectorBuilder(estimatedSize); - segmentsBuilder = blockFactory.newIntVectorBuilder(estimatedSize); - } - - void collect(int segment, int docId) { - docsBuilder.appendInt(docId); - segmentsBuilder.appendInt(segment); - } - - DocVector build() { - IntVector shards = null; - IntVector segments = null; - IntVector docs = null; - DocVector docVector = null; - try { - docs = docsBuilder.build(); - docsBuilder = null; - segments = segmentsBuilder.build(); - segmentsBuilder = null; - shards = blockFactory.newConstantIntVector(shardContext.index(), docs.getPositionCount()); - docVector = new DocVector(ShardRefCounted.fromShardContext(shardContext), shards, segments, docs, segments.isConstant()); - return docVector; - } finally { - if (docVector == null) { - Releasables.close(docs, segments, shards); - } - } - } - - @Override - public void close() { - Releasables.close(docsBuilder, segmentsBuilder); - } - } - - @Override - protected void describe(StringBuilder sb) { - sb.append("[" + "maxPageSize=").append(maxPageSize).append(", remainingDocs=").append(remainingDocs).append("]"); - } - - @Override - public Operator.Status status() { - final long valuesLoaded = rowsEmitted; // @timestamp field - return new Status(this, tsidsLoaded, valuesLoaded); - } - - public static class Status extends LuceneOperator.Status { - public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( - Operator.Status.class, - "time_series_source", - Status::new - ); - - private final long tsidLoaded; - private final long valuesLoaded; - - Status(TimeSeriesSourceOperator operator, long tsidLoaded, long valuesLoaded) { - super(operator); - this.tsidLoaded = tsidLoaded; - this.valuesLoaded = valuesLoaded; - } - - Status( - int processedSlices, - Set processedQueries, - Set processedShards, - long processNanos, - int sliceIndex, - int totalSlices, - int pagesEmitted, - int sliceMin, - int sliceMax, - int current, - long rowsEmitted, - Map partitioningStrategies, - long tsidLoaded, - long valuesLoaded - ) { - super( - processedSlices, - processedQueries, - processedShards, - processNanos, - sliceIndex, - totalSlices, - pagesEmitted, - sliceMin, - sliceMax, - current, - rowsEmitted, - partitioningStrategies - ); - this.tsidLoaded = tsidLoaded; - this.valuesLoaded = valuesLoaded; - } - - Status(StreamInput in) throws IOException { - super(in); - this.tsidLoaded = in.readVLong(); - this.valuesLoaded = in.readVLong(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeVLong(tsidLoaded); - out.writeVLong(valuesLoaded); - } - - @Override - protected void toXContentFields(XContentBuilder builder, Params params) throws IOException { - super.toXContentFields(builder, params); - builder.field("tsid_loaded", tsidLoaded); - builder.field("values_loaded", valuesLoaded); - } - - public long tsidLoaded() { - return tsidLoaded; - } - - @Override - public String getWriteableName() { - return ENTRY.name; - } - - @Override - public boolean supportsVersion(TransportVersion version) { - return version.onOrAfter(TransportVersions.ESQL_TIME_SERIES_SOURCE_STATUS); - } - - @Override - public long valuesLoaded() { - return valuesLoaded; - } - - @Override - public boolean equals(Object o) { - if (o == null || getClass() != o.getClass()) return false; - if (super.equals(o) == false) return false; - Status status = (Status) o; - return tsidLoaded == status.tsidLoaded && valuesLoaded == status.valuesLoaded; - } - - @Override - public int hashCode() { - return Objects.hash(super.hashCode(), tsidLoaded, valuesLoaded); - } - } -} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/TimeSeriesSourceOperatorFactory.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/TimeSeriesSourceOperatorFactory.java index bb1d889db3f85..0fc4475b367ac 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/TimeSeriesSourceOperatorFactory.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/TimeSeriesSourceOperatorFactory.java @@ -7,29 +7,10 @@ package org.elasticsearch.compute.lucene; -import org.apache.lucene.search.ScoreMode; -import org.elasticsearch.compute.operator.DriverContext; -import org.elasticsearch.compute.operator.SourceOperator; - import java.util.List; import java.util.function.Function; -/** - * Creates a source operator that takes advantage of the natural sorting of segments in a tsdb index. - *

- * This source operator loads the _tsid and @timestamp fields, which is used for emitting documents in the correct order. These field values - * are included in the page as seperate blocks and downstream operators can make use of these loaded time series ids and timestamps. - *

- * The source operator includes all documents of a time serie with the same page. So the same time series never exists in multiple pages. - * Downstream operators can make use of this implementation detail. - *

- * This operator currently only supports shard level concurrency. A new concurrency mechanism should be introduced at the time serie level - * in order to read tsdb indices in parallel. - */ -public class TimeSeriesSourceOperatorFactory extends LuceneOperator.Factory { - private final List contexts; - private final int maxPageSize; - +public class TimeSeriesSourceOperatorFactory extends LuceneSourceOperator.Factory { private TimeSeriesSourceOperatorFactory( List contexts, Function> queryFunction, @@ -43,22 +24,10 @@ private TimeSeriesSourceOperatorFactory( DataPartitioning.SHARD, query -> { throw new UnsupportedOperationException("locked to SHARD partitioning"); }, taskConcurrency, + maxPageSize, limit, - false, - shardContext -> ScoreMode.COMPLETE_NO_SCORES + false ); - this.contexts = contexts; - this.maxPageSize = maxPageSize; - } - - @Override - public SourceOperator get(DriverContext driverContext) { - return new TimeSeriesSourceOperator(contexts, driverContext.blockFactory(), sliceQueue, maxPageSize, limit); - } - - @Override - public String describe() { - return "TimeSeriesSourceOperator[maxPageSize = " + maxPageSize + ", limit = " + limit + "]"; } public static TimeSeriesSourceOperatorFactory create( @@ -68,6 +37,7 @@ public static TimeSeriesSourceOperatorFactory create( List contexts, Function> queryFunction ) { + // TODO: custom slice and return the next max_timestamp return new TimeSeriesSourceOperatorFactory(contexts, queryFunction, taskConcurrency, maxPageSize, limit); } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/read/TimeSeriesExtractFieldOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/read/TimeSeriesExtractFieldOperator.java deleted file mode 100644 index f6712bf651da1..0000000000000 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/read/TimeSeriesExtractFieldOperator.java +++ /dev/null @@ -1,355 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.compute.lucene.read; - -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.SortedDocValues; -import org.elasticsearch.compute.data.Block; -import org.elasticsearch.compute.data.BlockFactory; -import org.elasticsearch.compute.data.BytesRefBlock; -import org.elasticsearch.compute.data.BytesRefVector; -import org.elasticsearch.compute.data.DocBlock; -import org.elasticsearch.compute.data.DocVector; -import org.elasticsearch.compute.data.IntVector; -import org.elasticsearch.compute.data.OrdinalBytesRefBlock; -import org.elasticsearch.compute.data.OrdinalBytesRefVector; -import org.elasticsearch.compute.data.Page; -import org.elasticsearch.compute.lucene.ShardContext; -import org.elasticsearch.compute.operator.AbstractPageMappingOperator; -import org.elasticsearch.compute.operator.DriverContext; -import org.elasticsearch.compute.operator.Operator; -import org.elasticsearch.core.Releasable; -import org.elasticsearch.core.Releasables; -import org.elasticsearch.index.fieldvisitor.StoredFieldLoader; -import org.elasticsearch.index.mapper.BlockLoader; -import org.elasticsearch.index.mapper.BlockLoaderStoredFieldsFromLeafLoader; -import org.elasticsearch.index.mapper.SourceLoader; -import org.elasticsearch.search.fetch.StoredFieldsSpec; - -import java.io.IOException; -import java.io.UncheckedIOException; -import java.util.Arrays; -import java.util.List; - -/** - * A variant of {@link ValuesSourceReaderOperator} for extracting fields in time-series indices. The differences are: - * 1. Caches all segments of the last shard instead of only the last segment, since data in time-series can come from - * any segment at any time - * 2. Although docs do not arrive in the global order (by shard, then segment, then docId), they are still sorted - * within each segment; hence, this reader does not perform sorting and regrouping, which are expensive. - * 3. For dimension fields, values are read only once per tsid. - * These changes are made purely for performance reasons. We should look into consolidating this operator with - * {@link ValuesSourceReaderOperator} by adding some metadata to the {@link DocVector} and handling them accordingly. - */ -public class TimeSeriesExtractFieldOperator extends AbstractPageMappingOperator { - - public record Factory(List fields, List shardContexts) - implements - OperatorFactory { - @Override - public Operator get(DriverContext driverContext) { - return new TimeSeriesExtractFieldOperator(driverContext.blockFactory(), fields, shardContexts); - } - - @Override - public String describe() { - StringBuilder sb = new StringBuilder(); - sb.append("TimeSeriesExtractFieldOperator[fields = ["); - if (fields.size() < 10) { - boolean first = true; - for (var f : fields) { - if (first) { - first = false; - } else { - sb.append(", "); - } - sb.append(f.name()); - } - } else { - sb.append(fields.size()).append(" fields"); - } - return sb.append("]]").toString(); - } - } - - private final BlockFactory blockFactory; - private final List fields; - private final List shardContexts; - - private ShardLevelFieldsReader fieldsReader; - - public TimeSeriesExtractFieldOperator( - BlockFactory blockFactory, - List fields, - List shardContexts - ) { - this.blockFactory = blockFactory; - this.fields = fields; - this.shardContexts = shardContexts; - } - - private OrdinalBytesRefVector getTsid(Page page, int channel) { - BytesRefBlock block = page.getBlock(channel); - OrdinalBytesRefBlock ordinals = block.asOrdinals(); - if (ordinals == null) { - throw new IllegalArgumentException("tsid must be an ordinals block, got: " + block.getClass().getName()); - } - OrdinalBytesRefVector vector = ordinals.asVector(); - if (vector == null) { - throw new IllegalArgumentException("tsid must be an ordinals vector, got: " + block.getClass().getName()); - } - return vector; - } - - private DocVector getDocVector(Page page, int channel) { - DocBlock docBlock = page.getBlock(channel); - DocVector docVector = docBlock.asVector(); - if (docVector == null) { - throw new IllegalArgumentException("doc must be a doc vector, got: " + docBlock.getClass().getName()); - } - return docVector; - } - - @Override - protected Page process(Page page) { - try { - return processUnchecked(page); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - } - - private Page processUnchecked(Page page) throws IOException { - DocVector docVector = getDocVector(page, 0); - IntVector shards = docVector.shards(); - if (shards.isConstant() == false) { - throw new IllegalArgumentException("shards must be a constant vector, got: " + shards.getClass().getName()); - } - OrdinalBytesRefVector tsidVector = getTsid(page, 1); - IntVector tsidOrdinals = tsidVector.getOrdinalsVector(); - int shardIndex = shards.getInt(0); - if (fieldsReader == null || fieldsReader.shardIndex != shardIndex) { - Releasables.close(fieldsReader); - fieldsReader = new ShardLevelFieldsReader(shardIndex, blockFactory, shardContexts.get(shardIndex), fields); - } - fieldsReader.prepareForReading(page.getPositionCount()); - IntVector docs = docVector.docs(); - IntVector segments = docVector.segments(); - int lastTsidOrdinal = -1; - for (int p = 0; p < docs.getPositionCount(); p++) { - int doc = docs.getInt(p); - int segment = segments.getInt(p); - int tsidOrd = tsidOrdinals.getInt(p); - if (tsidOrd == lastTsidOrdinal) { - fieldsReader.readValues(segment, doc, true); - } else { - fieldsReader.readValues(segment, doc, false); - lastTsidOrdinal = tsidOrd; - } - } - Block[] blocks = new Block[fields.size()]; - Page result = null; - try { - fieldsReader.buildBlocks(blocks, tsidOrdinals); - result = page.appendBlocks(blocks); - return result; - } finally { - if (result == null) { - Releasables.close(blocks); - } - } - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append("TimeSeriesExtractFieldOperator[fields = ["); - if (fields.size() < 10) { - boolean first = true; - for (var f : fields) { - if (first) { - first = false; - } else { - sb.append(", "); - } - sb.append(f.name()); - } - } else { - sb.append(fields.size()).append(" fields"); - } - return sb.append("]]").toString(); - } - - @Override - public void close() { - Releasables.close(fieldsReader, super::close); - } - - static class BlockLoaderFactory extends DelegatingBlockLoaderFactory { - BlockLoaderFactory(BlockFactory factory) { - super(factory); - } - - @Override - public BlockLoader.Block constantNulls(int count) { - throw new UnsupportedOperationException("must not be used by column readers"); - } - - @Override - public BlockLoader.SingletonOrdinalsBuilder singletonOrdinalsBuilder(SortedDocValues ordinals, int count, boolean isDense) { - throw new UnsupportedOperationException("must not be used by column readers"); - } - } - - static final class ShardLevelFieldsReader implements Releasable { - final int shardIndex; - private final BlockLoaderFactory blockFactory; - private final SegmentLevelFieldsReader[] segments; - private final BlockLoader[] loaders; - private final boolean[] dimensions; - private final Block.Builder[] builders; - private final StoredFieldsSpec storedFieldsSpec; - private final SourceLoader sourceLoader; - - ShardLevelFieldsReader( - int shardIndex, - BlockFactory blockFactory, - ShardContext shardContext, - List fields - ) { - this.shardIndex = shardIndex; - this.blockFactory = new BlockLoaderFactory(blockFactory); - final IndexReader indexReader = shardContext.searcher().getIndexReader(); - this.segments = new SegmentLevelFieldsReader[indexReader.leaves().size()]; - this.loaders = new BlockLoader[fields.size()]; - this.builders = new Block.Builder[loaders.length]; - StoredFieldsSpec storedFieldsSpec = StoredFieldsSpec.NO_REQUIREMENTS; - for (int i = 0; i < fields.size(); i++) { - BlockLoader loader = fields.get(i).blockLoader().apply(shardIndex); - storedFieldsSpec = storedFieldsSpec.merge(loader.rowStrideStoredFieldSpec()); - loaders[i] = loader; - } - for (int i = 0; i < indexReader.leaves().size(); i++) { - LeafReaderContext leafReaderContext = indexReader.leaves().get(i); - segments[i] = new SegmentLevelFieldsReader(leafReaderContext, loaders); - } - if (storedFieldsSpec.requiresSource()) { - sourceLoader = shardContext.newSourceLoader(); - storedFieldsSpec = storedFieldsSpec.merge(new StoredFieldsSpec(false, false, sourceLoader.requiredStoredFields())); - } else { - sourceLoader = null; - } - this.storedFieldsSpec = storedFieldsSpec; - this.dimensions = new boolean[fields.size()]; - for (int i = 0; i < fields.size(); i++) { - final var mappedFieldType = shardContext.fieldType(fields.get(i).name()); - dimensions[i] = mappedFieldType != null && mappedFieldType.isDimension(); - } - } - - /** - * For dimension fields, skips reading them when {@code nonDimensionFieldsOnly} is true, - * since they only need to be read once per tsid. - */ - void readValues(int segment, int docID, boolean nonDimensionFieldsOnly) throws IOException { - segments[segment].read(docID, builders, nonDimensionFieldsOnly, dimensions); - } - - void prepareForReading(int estimatedSize) throws IOException { - if (this.builders.length > 0 && this.builders[0] == null) { - for (int f = 0; f < builders.length; f++) { - builders[f] = (Block.Builder) loaders[f].builder(blockFactory, estimatedSize); - } - } - for (SegmentLevelFieldsReader segment : segments) { - segment.reinitializeIfNeeded(sourceLoader, storedFieldsSpec); - } - } - - void buildBlocks(Block[] blocks, IntVector tsidOrdinals) { - for (int i = 0; i < builders.length; i++) { - if (dimensions[i]) { - blocks[i] = buildBlockForDimensionField(builders[i], tsidOrdinals); - } else { - blocks[i] = builders[i].build(); - } - } - Arrays.fill(builders, null); - } - - private Block buildBlockForDimensionField(Block.Builder builder, IntVector tsidOrdinals) { - try (var values = builder.build()) { - if (values.asVector() instanceof BytesRefVector bytes) { - tsidOrdinals.incRef(); - values.incRef(); - return new OrdinalBytesRefVector(tsidOrdinals, bytes).asBlock(); - } else if (values.areAllValuesNull()) { - return blockFactory.factory.newConstantNullBlock(tsidOrdinals.getPositionCount()); - } else { - final int positionCount = tsidOrdinals.getPositionCount(); - try (var newBuilder = values.elementType().newBlockBuilder(positionCount, blockFactory.factory)) { - for (int p = 0; p < positionCount; p++) { - int pos = tsidOrdinals.getInt(p); - newBuilder.copyFrom(values, pos, pos + 1); - } - return newBuilder.build(); - } - } - } - } - - @Override - public void close() { - Releasables.close(builders); - } - } - - static final class SegmentLevelFieldsReader { - private final BlockLoader.RowStrideReader[] rowStride; - private final BlockLoader[] loaders; - private final LeafReaderContext leafContext; - private BlockLoaderStoredFieldsFromLeafLoader storedFields; - private Thread loadedThread = null; - - SegmentLevelFieldsReader(LeafReaderContext leafContext, BlockLoader[] loaders) { - this.leafContext = leafContext; - this.loaders = loaders; - this.rowStride = new BlockLoader.RowStrideReader[loaders.length]; - } - - private void reinitializeIfNeeded(SourceLoader sourceLoader, StoredFieldsSpec storedFieldsSpec) throws IOException { - final Thread currentThread = Thread.currentThread(); - if (loadedThread != currentThread) { - loadedThread = currentThread; - for (int f = 0; f < loaders.length; f++) { - rowStride[f] = loaders[f].rowStrideReader(leafContext); - } - storedFields = new BlockLoaderStoredFieldsFromLeafLoader( - StoredFieldLoader.fromSpec(storedFieldsSpec).getLoader(leafContext, null), - sourceLoader != null ? sourceLoader.leaf(leafContext.reader(), null) : null - ); - } - } - - void read(int docId, Block.Builder[] builder, boolean nonDimensionFieldsOnly, boolean[] dimensions) throws IOException { - storedFields.advanceTo(docId); - if (nonDimensionFieldsOnly) { - for (int i = 0; i < rowStride.length; i++) { - if (dimensions[i] == false) { - rowStride[i].read(docId, storedFields, builder[i]); - } - } - } else { - for (int i = 0; i < rowStride.length; i++) { - rowStride[i].read(docId, storedFields, builder[i]); - } - } - } - } -} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/TimeSeriesAggregationOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/TimeSeriesAggregationOperator.java index 6ab0291c718a7..be79cb48ef867 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/TimeSeriesAggregationOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/TimeSeriesAggregationOperator.java @@ -14,7 +14,6 @@ import org.elasticsearch.compute.aggregation.GroupingAggregatorEvaluationContext; import org.elasticsearch.compute.aggregation.TimeSeriesGroupingAggregatorEvaluationContext; import org.elasticsearch.compute.aggregation.blockhash.BlockHash; -import org.elasticsearch.compute.aggregation.blockhash.TimeSeriesBlockHash; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.ElementType; import org.elasticsearch.compute.data.LongBlock; @@ -31,7 +30,6 @@ public class TimeSeriesAggregationOperator extends HashAggregationOperator { public record Factory( Rounding.Prepared timeBucket, - boolean sortedInput, List groups, AggregatorMode aggregatorMode, List aggregators, @@ -40,18 +38,17 @@ public record Factory( @Override public Operator get(DriverContext driverContext) { // TODO: use TimeSeriesBlockHash when possible - return new TimeSeriesAggregationOperator(timeBucket, aggregators, () -> { - if (sortedInput && groups.size() == 2) { - return new TimeSeriesBlockHash(groups.get(0).channel(), groups.get(1).channel(), driverContext.blockFactory()); - } else { - return BlockHash.build( - groups, - driverContext.blockFactory(), - maxPageSize, - true // we can enable optimizations as the inputs are vectors - ); - } - }, driverContext); + return new TimeSeriesAggregationOperator( + timeBucket, + aggregators, + () -> BlockHash.build( + groups, + driverContext.blockFactory(), + maxPageSize, + true // we can enable optimizations as the inputs are vectors + ), + driverContext + ); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSourceOperatorStatusTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSourceOperatorStatusTests.java deleted file mode 100644 index 21c024e9eda4f..0000000000000 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSourceOperatorStatusTests.java +++ /dev/null @@ -1,187 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.compute.lucene; - -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.test.AbstractWireSerializingTestCase; -import org.elasticsearch.test.ESTestCase; - -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.TreeSet; - -import static org.hamcrest.Matchers.equalTo; - -public class TimeSeriesSourceOperatorStatusTests extends AbstractWireSerializingTestCase { - public static TimeSeriesSourceOperator.Status simple() { - return new TimeSeriesSourceOperator.Status( - 2, - Set.of("*:*"), - new TreeSet<>(List.of("a:0", "a:1")), - 1002, - 0, - 1, - 5, - 123, - 99990, - 8000, - 222, - Map.of("b:0", LuceneSliceQueue.PartitioningStrategy.SHARD, "a:1", LuceneSliceQueue.PartitioningStrategy.DOC), - 250, - 28000 - ); - } - - public static String simpleToJson() { - return """ - { - "processed_slices" : 2, - "processed_queries" : [ - "*:*" - ], - "processed_shards" : [ - "a:0", - "a:1" - ], - "process_nanos" : 1002, - "process_time" : "1micros", - "slice_index" : 0, - "total_slices" : 1, - "pages_emitted" : 5, - "slice_min" : 123, - "slice_max" : 99990, - "current" : 8000, - "rows_emitted" : 222, - "partitioning_strategies" : { - "a:1" : "DOC", - "b:0" : "SHARD" - }, - "tsid_loaded" : 250, - "values_loaded" : 28000 - }"""; - } - - public void testToXContent() { - assertThat(Strings.toString(simple(), true, true), equalTo(simpleToJson())); - } - - @Override - protected Writeable.Reader instanceReader() { - return TimeSeriesSourceOperator.Status::new; - } - - @Override - public TimeSeriesSourceOperator.Status createTestInstance() { - return new TimeSeriesSourceOperator.Status( - randomNonNegativeInt(), - randomProcessedQueries(), - randomProcessedShards(), - randomNonNegativeLong(), - randomNonNegativeInt(), - randomNonNegativeInt(), - randomNonNegativeInt(), - randomNonNegativeInt(), - randomNonNegativeInt(), - randomNonNegativeInt(), - randomNonNegativeLong(), - randomPartitioningStrategies(), - randomNonNegativeLong(), - randomNonNegativeLong() - ); - } - - private static Set randomProcessedQueries() { - int size = between(0, 10); - Set set = new TreeSet<>(); - while (set.size() < size) { - set.add(randomAlphaOfLength(5)); - } - return set; - } - - private static Set randomProcessedShards() { - int size = between(0, 10); - Set set = new TreeSet<>(); - while (set.size() < size) { - set.add(randomAlphaOfLength(3) + ":" + between(0, 10)); - } - return set; - } - - private static Map randomPartitioningStrategies() { - int size = between(0, 10); - Map partitioningStrategies = new HashMap<>(); - while (partitioningStrategies.size() < size) { - partitioningStrategies.put( - randomAlphaOfLength(3) + ":" + between(0, 10), - randomFrom(LuceneSliceQueue.PartitioningStrategy.values()) - ); - } - return partitioningStrategies; - } - - @Override - protected TimeSeriesSourceOperator.Status mutateInstance(TimeSeriesSourceOperator.Status instance) { - int processedSlices = instance.processedSlices(); - Set processedQueries = instance.processedQueries(); - Set processedShards = instance.processedShards(); - long processNanos = instance.processNanos(); - int sliceIndex = instance.sliceIndex(); - int totalSlices = instance.totalSlices(); - int pagesEmitted = instance.pagesEmitted(); - int sliceMin = instance.sliceMin(); - int sliceMax = instance.sliceMax(); - int current = instance.current(); - long rowsEmitted = instance.rowsEmitted(); - long tsidLoaded = instance.tsidLoaded(); - long valuesLoaded = instance.valuesLoaded(); - Map partitioningStrategies = instance.partitioningStrategies(); - switch (between(0, 13)) { - case 0 -> processedSlices = randomValueOtherThan(processedSlices, ESTestCase::randomNonNegativeInt); - case 1 -> processedQueries = randomValueOtherThan( - processedQueries, - TimeSeriesSourceOperatorStatusTests::randomProcessedQueries - ); - case 2 -> processedShards = randomValueOtherThan(processedShards, TimeSeriesSourceOperatorStatusTests::randomProcessedShards); - case 3 -> processNanos = randomValueOtherThan(processNanos, ESTestCase::randomNonNegativeLong); - case 4 -> sliceIndex = randomValueOtherThan(sliceIndex, ESTestCase::randomNonNegativeInt); - case 5 -> totalSlices = randomValueOtherThan(totalSlices, ESTestCase::randomNonNegativeInt); - case 6 -> pagesEmitted = randomValueOtherThan(pagesEmitted, ESTestCase::randomNonNegativeInt); - case 7 -> sliceMin = randomValueOtherThan(sliceMin, ESTestCase::randomNonNegativeInt); - case 8 -> sliceMax = randomValueOtherThan(sliceMax, ESTestCase::randomNonNegativeInt); - case 9 -> current = randomValueOtherThan(current, ESTestCase::randomNonNegativeInt); - case 10 -> rowsEmitted = randomValueOtherThan(rowsEmitted, ESTestCase::randomNonNegativeLong); - case 11 -> partitioningStrategies = randomValueOtherThan( - partitioningStrategies, - TimeSeriesSourceOperatorStatusTests::randomPartitioningStrategies - ); - case 12 -> tsidLoaded = randomValueOtherThan(tsidLoaded, ESTestCase::randomNonNegativeLong); - case 13 -> valuesLoaded = randomValueOtherThan(valuesLoaded, ESTestCase::randomNonNegativeLong); - default -> throw new UnsupportedOperationException(); - } - return new TimeSeriesSourceOperator.Status( - processedSlices, - processedQueries, - processedShards, - processNanos, - sliceIndex, - totalSlices, - pagesEmitted, - sliceMin, - sliceMax, - current, - rowsEmitted, - partitioningStrategies, - tsidLoaded, - valuesLoaded - ); - } -} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSourceOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSourceOperatorTests.java deleted file mode 100644 index b58f68ab53beb..0000000000000 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSourceOperatorTests.java +++ /dev/null @@ -1,500 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.compute.lucene; - -import org.apache.lucene.document.DoubleDocValuesField; -import org.apache.lucene.document.FloatDocValuesField; -import org.apache.lucene.document.LongField; -import org.apache.lucene.document.LongPoint; -import org.apache.lucene.document.NumericDocValuesField; -import org.apache.lucene.document.SortedDocValuesField; -import org.apache.lucene.document.SortedNumericDocValuesField; -import org.apache.lucene.document.SortedSetDocValuesField; -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.IndexableField; -import org.apache.lucene.index.NoMergePolicy; -import org.apache.lucene.search.MatchAllDocsQuery; -import org.apache.lucene.search.MatchNoDocsQuery; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.Sort; -import org.apache.lucene.search.SortField; -import org.apache.lucene.search.SortedNumericSortField; -import org.apache.lucene.store.Directory; -import org.apache.lucene.tests.index.RandomIndexWriter; -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.Randomness; -import org.elasticsearch.common.Strings; -import org.elasticsearch.compute.data.BytesRefVector; -import org.elasticsearch.compute.data.DocVector; -import org.elasticsearch.compute.data.ElementType; -import org.elasticsearch.compute.data.LongVector; -import org.elasticsearch.compute.data.Page; -import org.elasticsearch.compute.lucene.read.TimeSeriesExtractFieldOperator; -import org.elasticsearch.compute.lucene.read.ValuesSourceReaderOperator; -import org.elasticsearch.compute.lucene.read.ValuesSourceReaderOperatorTests; -import org.elasticsearch.compute.operator.Driver; -import org.elasticsearch.compute.operator.DriverContext; -import org.elasticsearch.compute.operator.DriverStatus; -import org.elasticsearch.compute.operator.Operator; -import org.elasticsearch.compute.test.OperatorTestCase; -import org.elasticsearch.compute.test.SourceOperatorTestCase; -import org.elasticsearch.compute.test.TestDriverFactory; -import org.elasticsearch.compute.test.TestResultPageSinkOperator; -import org.elasticsearch.core.CheckedFunction; -import org.elasticsearch.core.IOUtils; -import org.elasticsearch.core.TimeValue; -import org.elasticsearch.index.mapper.DataStreamTimestampFieldMapper; -import org.elasticsearch.index.mapper.DateFieldMapper; -import org.elasticsearch.index.mapper.KeywordFieldMapper; -import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.NumberFieldMapper; -import org.elasticsearch.index.mapper.RoutingPathFields; -import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; -import org.hamcrest.Matcher; -import org.junit.After; - -import java.io.IOException; -import java.io.UncheckedIOException; -import java.util.ArrayList; -import java.util.Comparator; -import java.util.HashMap; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.function.Consumer; -import java.util.function.Function; - -import static org.hamcrest.Matchers.empty; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.lessThanOrEqualTo; - -public class TimeSeriesSourceOperatorTests extends SourceOperatorTestCase { - - private IndexReader reader; - private final Directory directory = newDirectory(); - - @After - public void cleanup() throws IOException { - IOUtils.close(reader, directory); - } - - public void testSimple() { - int numTimeSeries = 3; - int numSamplesPerTS = 10; - long timestampStart = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2024-01-01T00:00:00Z"); - int maxPageSize = between(1, 1024); - List results = runDriver(1024, maxPageSize, randomBoolean(), numTimeSeries, numSamplesPerTS, timestampStart); - // for now we emit at most one time series each page - int offset = 0; - for (Page page : results) { - assertThat(page.getBlockCount(), equalTo(5)); - DocVector docVector = (DocVector) page.getBlock(0).asVector(); - BytesRefVector tsidVector = (BytesRefVector) page.getBlock(1).asVector(); - LongVector timestampVector = (LongVector) page.getBlock(2).asVector(); - LongVector voltageVector = (LongVector) page.getBlock(3).asVector(); - BytesRefVector hostnameVector = (BytesRefVector) page.getBlock(4).asVector(); - for (int i = 0; i < page.getPositionCount(); i++) { - int expectedTsidOrd = offset / numSamplesPerTS; - String expectedHostname = String.format(Locale.ROOT, "host-%02d", expectedTsidOrd); - long expectedVoltage = 5L + expectedTsidOrd; - int sampleIndex = offset - expectedTsidOrd * numSamplesPerTS; - long expectedTimestamp = timestampStart + ((numSamplesPerTS - sampleIndex - 1) * 10_000L); - assertThat(docVector.shards().getInt(i), equalTo(0)); - assertThat(voltageVector.getLong(i), equalTo(expectedVoltage)); - assertThat(hostnameVector.getBytesRef(i, new BytesRef()).utf8ToString(), equalTo(expectedHostname)); - assertThat(tsidVector.getBytesRef(i, new BytesRef()).utf8ToString(), equalTo("\u0001\bhostnames\u0007" + expectedHostname)); - assertThat(timestampVector.getLong(i), equalTo(expectedTimestamp)); - offset++; - } - } - } - - public void testStatus() { - int numTimeSeries = between(1, 5); - int numSamplesPerTS = between(1, 10); - long timestampStart = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2024-01-01T00:00:00Z"); - int maxPageSize = 128; - DriverContext driverContext = driverContext(); - Driver driver = createDriver( - driverContext, - 1024, - maxPageSize, - true, - numTimeSeries, - numSamplesPerTS, - timestampStart, - Page::releaseBlocks - ); - OperatorTestCase.runDriver(driver); - DriverStatus driverStatus = driver.status(); - var status = (TimeSeriesSourceOperator.Status) driverStatus.completedOperators().get(0).status(); - assertThat(status.tsidLoaded(), equalTo((long) numTimeSeries)); - assertThat(status.rowsEmitted(), equalTo((long) numTimeSeries * numSamplesPerTS)); - assertThat(status.documentsFound(), equalTo((long) numTimeSeries * numSamplesPerTS)); - assertThat(status.valuesLoaded(), equalTo((long) numTimeSeries * numSamplesPerTS)); - - String expected = String.format( - Locale.ROOT, - """ - { - "processed_slices" : 1, - "processed_queries" : [ - "*:*" - ], - "processed_shards" : [ - "test" - ], - "process_nanos" : %d, - "process_time" : "%s", - "slice_index" : 0, - "total_slices" : 1, - "pages_emitted" : 1, - "slice_min" : 0, - "slice_max" : 0, - "current" : 0, - "rows_emitted" : %s, - "partitioning_strategies" : { - "test" : "SHARD" - }, - "tsid_loaded" : %d, - "values_loaded" : %d - } - """, - status.processNanos(), - TimeValue.timeValueNanos(status.processNanos()), - status.rowsEmitted(), - status.tsidLoaded(), - status.valuesLoaded() - ); - assertThat(Strings.toString(status, true, true).trim(), equalTo(expected.trim())); - } - - public void testLimit() { - int numTimeSeries = 3; - int numSamplesPerTS = 10; - int limit = 1; - long timestampStart = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2024-01-01T00:00:00Z"); - List results = runDriver(limit, randomIntBetween(1, 1024), randomBoolean(), numTimeSeries, numSamplesPerTS, timestampStart); - assertThat(results, hasSize(1)); - Page page = results.get(0); - assertThat(page.getBlockCount(), equalTo(5)); - - DocVector docVector = (DocVector) page.getBlock(0).asVector(); - assertThat(docVector.getPositionCount(), equalTo(limit)); - - BytesRefVector tsidVector = (BytesRefVector) page.getBlock(1).asVector(); - assertThat(tsidVector.getPositionCount(), equalTo(limit)); - - LongVector timestampVector = (LongVector) page.getBlock(2).asVector(); - assertThat(timestampVector.getPositionCount(), equalTo(limit)); - - LongVector voltageVector = (LongVector) page.getBlock(3).asVector(); - assertThat(voltageVector.getPositionCount(), equalTo(limit)); - - BytesRefVector hostnameVector = (BytesRefVector) page.getBlock(4).asVector(); - assertThat(hostnameVector.getPositionCount(), equalTo(limit)); - - assertThat(docVector.shards().getInt(0), equalTo(0)); - assertThat(voltageVector.getLong(0), equalTo(5L)); - assertThat(hostnameVector.getBytesRef(0, new BytesRef()).utf8ToString(), equalTo("host-00")); - assertThat(tsidVector.getBytesRef(0, new BytesRef()).utf8ToString(), equalTo("\u0001\bhostnames\u0007host-00")); // legacy tsid - assertThat(timestampVector.getLong(0), equalTo(timestampStart + ((numSamplesPerTS - 1) * 10_000L))); - } - - public void testRandom() { - record Doc(int host, long timestamp, long metric) {} - int numDocs = between(1, 5000); - List docs = new ArrayList<>(); - Map timestamps = new HashMap<>(); - long t0 = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2024-01-01T00:00:00Z"); - for (int i = 0; i < numDocs; i++) { - int tsid = randomIntBetween(0, 9); - long timestamp = timestamps.compute(tsid, (k, curr) -> { - long t = curr != null ? curr : t0; - return t + randomIntBetween(1, 5000); - }); - docs.add(new Doc(tsid, timestamp, randomIntBetween(1, 10000))); - } - int maxPageSize = between(1, 1024); - int limit = randomBoolean() ? between(1, 100000) : Integer.MAX_VALUE; - var metricField = new NumberFieldMapper.NumberFieldType("metric", NumberFieldMapper.NumberType.LONG); - var timeSeriesFactory = createTimeSeriesSourceOperator( - directory, - r -> this.reader = r, - limit, - maxPageSize, - randomBoolean(), - writer -> { - Randomness.shuffle(docs); - for (Doc doc : docs) { - writeTS(writer, doc.timestamp, new Object[] { "hostname", "h" + doc.host }, new Object[] { "metric", doc.metric }); - } - return docs.size(); - } - ); - DriverContext driverContext = driverContext(); - List results = new ArrayList<>(); - OperatorTestCase.runDriver( - TestDriverFactory.create( - driverContext, - timeSeriesFactory.get(driverContext), - List.of(extractFieldsFactory(reader, List.of(new ExtractField(metricField, ElementType.LONG))).get(driverContext)), - new TestResultPageSinkOperator(results::add) - ) - ); - docs.sort(Comparator.comparing(Doc::host).thenComparing(Comparator.comparingLong(Doc::timestamp).reversed())); - Map hostToTsidOrd = new HashMap<>(); - timestamps.keySet().stream().sorted().forEach(n -> hostToTsidOrd.put(n, hostToTsidOrd.size())); - int offset = 0; - for (int p = 0; p < results.size(); p++) { - Page page = results.get(p); - if (p < results.size() - 1) { - assertThat(page.getPositionCount(), equalTo(maxPageSize)); - } else { - assertThat(page.getPositionCount(), lessThanOrEqualTo(limit)); - assertThat(page.getPositionCount(), lessThanOrEqualTo(maxPageSize)); - } - assertThat(page.getBlockCount(), equalTo(4)); - DocVector docVector = (DocVector) page.getBlock(0).asVector(); - BytesRefVector tsidVector = (BytesRefVector) page.getBlock(1).asVector(); - LongVector timestampVector = (LongVector) page.getBlock(2).asVector(); - LongVector metricVector = (LongVector) page.getBlock(3).asVector(); - for (int i = 0; i < page.getPositionCount(); i++) { - Doc doc = docs.get(offset); - offset++; - assertThat(docVector.shards().getInt(0), equalTo(0)); - assertThat(tsidVector.getBytesRef(i, new BytesRef()).utf8ToString(), equalTo("\u0001\bhostnames\u0002h" + doc.host)); - assertThat(timestampVector.getLong(i), equalTo(doc.timestamp)); - assertThat(metricVector.getLong(i), equalTo(doc.metric)); - } - } - assertThat(offset, equalTo(Math.min(limit, numDocs))); - } - - public void testMatchNone() throws Exception { - long t0 = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2024-01-01T00:00:00Z"); - Sort sort = new Sort( - new SortField(TimeSeriesIdFieldMapper.NAME, SortField.Type.STRING, false), - new SortedNumericSortField(DataStreamTimestampFieldMapper.DEFAULT_PATH, SortField.Type.LONG, true) - ); - try ( - var directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter( - random(), - directory, - newIndexWriterConfig().setIndexSort(sort).setMergePolicy(NoMergePolicy.INSTANCE) - ) - ) { - int numDocs = between(1, 100); - long timestamp = t0; - int metrics = randomIntBetween(1, 3); - for (int i = 0; i < numDocs; i++) { - timestamp += between(1, 1000); - for (int j = 0; j < metrics; j++) { - String hostname = String.format(Locale.ROOT, "sensor-%02d", j); - writeTS(writer, timestamp, new Object[] { "sensor", hostname }, new Object[] { "voltage", j + 5 }); - } - } - try (var reader = writer.getReader()) { - var ctx = new LuceneSourceOperatorTests.MockShardContext(reader, 0); - Query query = randomFrom(LongField.newRangeQuery("@timestamp", 0, t0), new MatchNoDocsQuery()); - var timeSeriesFactory = TimeSeriesSourceOperatorFactory.create( - Integer.MAX_VALUE, - randomIntBetween(1, 1024), - 1, - List.of(ctx), - unused -> List.of(new LuceneSliceQueue.QueryAndTags(query, List.of())) - ); - var driverContext = driverContext(); - List results = new ArrayList<>(); - OperatorTestCase.runDriver( - TestDriverFactory.create( - driverContext, - timeSeriesFactory.get(driverContext), - List.of(), - new TestResultPageSinkOperator(results::add) - ) - ); - assertThat(results, empty()); - } - } - } - - @Override - protected Operator.OperatorFactory simple(SimpleOptions options) { - return createTimeSeriesSourceOperator(directory, r -> this.reader = r, 1, 1, true, writer -> { - long timestamp = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2024-01-01T00:00:00Z"); - writeTS(writer, timestamp, new Object[] { "hostname", "host-01" }, new Object[] { "voltage", 2 }); - return 1; - }); - } - - @Override - protected Matcher expectedDescriptionOfSimple() { - return equalTo("TimeSeriesSourceOperator[maxPageSize = 1, limit = 1]"); - } - - @Override - protected Matcher expectedToStringOfSimple() { - return equalTo("TimeSeriesSourceOperator[shards = [test], maxPageSize = 1[maxPageSize=1, remainingDocs=1]]"); - } - - List runDriver(int limit, int maxPageSize, boolean forceMerge, int numTimeSeries, int numSamplesPerTS, long timestampStart) { - var ctx = driverContext(); - List results = new ArrayList<>(); - OperatorTestCase.runDriver( - createDriver(ctx, limit, maxPageSize, forceMerge, numTimeSeries, numSamplesPerTS, timestampStart, results::add) - ); - OperatorTestCase.assertDriverContext(ctx); - for (Page result : results) { - assertThat(result.getPositionCount(), lessThanOrEqualTo(maxPageSize)); - assertThat(result.getPositionCount(), lessThanOrEqualTo(limit)); - } - return results; - } - - Driver createDriver( - DriverContext driverContext, - int limit, - int maxPageSize, - boolean forceMerge, - int numTimeSeries, - int numSamplesPerTS, - long timestampStart, - Consumer consumer - ) { - var voltageField = new NumberFieldMapper.NumberFieldType("voltage", NumberFieldMapper.NumberType.LONG); - var hostnameField = new KeywordFieldMapper.KeywordFieldType("hostname"); - var extractFields = List.of( - new ExtractField(voltageField, ElementType.LONG), - new ExtractField(hostnameField, ElementType.BYTES_REF) - ); - var timeSeriesFactory = createTimeSeriesSourceOperator( - directory, - indexReader -> this.reader = indexReader, - limit, - maxPageSize, - forceMerge, - writer -> { - long timestamp = timestampStart; - for (int i = 0; i < numSamplesPerTS; i++) { - for (int j = 0; j < numTimeSeries; j++) { - String hostname = String.format(Locale.ROOT, "host-%02d", j); - writeTS(writer, timestamp, new Object[] { "hostname", hostname }, new Object[] { "voltage", j + 5 }); - } - timestamp += 10_000; - writer.commit(); - } - return numTimeSeries * numSamplesPerTS; - } - ); - return TestDriverFactory.create( - driverContext, - timeSeriesFactory.get(driverContext), - List.of(extractFieldsFactory(reader, extractFields).get(driverContext)), - new TestResultPageSinkOperator(consumer) - ); - } - - public record ExtractField(MappedFieldType ft, ElementType elementType) { - - } - - public static TimeSeriesSourceOperatorFactory createTimeSeriesSourceOperator( - Directory directory, - Consumer readerConsumer, - int limit, - int maxPageSize, - boolean forceMerge, - CheckedFunction indexingLogic - ) { - Sort sort = new Sort( - new SortField(TimeSeriesIdFieldMapper.NAME, SortField.Type.STRING, false), - new SortedNumericSortField(DataStreamTimestampFieldMapper.DEFAULT_PATH, SortField.Type.LONG, true) - ); - IndexReader reader; - try ( - RandomIndexWriter writer = new RandomIndexWriter( - random(), - directory, - newIndexWriterConfig().setIndexSort(sort).setMergePolicy(NoMergePolicy.INSTANCE) - ) - ) { - - int numDocs = indexingLogic.apply(writer); - if (forceMerge) { - writer.forceMerge(1); - } - reader = writer.getReader(); - readerConsumer.accept(reader); - assertThat(reader.numDocs(), equalTo(numDocs)); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - var ctx = new LuceneSourceOperatorTests.MockShardContext(reader, 0); - Function> queryFunction = c -> List.of( - new LuceneSliceQueue.QueryAndTags(new MatchAllDocsQuery(), List.of()) - ); - return TimeSeriesSourceOperatorFactory.create(limit, maxPageSize, 1, List.of(ctx), queryFunction); - } - - public static void writeTS(RandomIndexWriter iw, long timestamp, Object[] dimensions, Object[] metrics) throws IOException { - final List fields = new ArrayList<>(); - fields.add(new SortedNumericDocValuesField(DataStreamTimestampFieldMapper.DEFAULT_PATH, timestamp)); - fields.add(new LongPoint(DataStreamTimestampFieldMapper.DEFAULT_PATH, timestamp)); - var routingPathFields = new RoutingPathFields(null); - for (int i = 0; i < dimensions.length; i += 2) { - if (dimensions[i + 1] instanceof Number n) { - routingPathFields.addLong(dimensions[i].toString(), n.longValue()); - } else { - routingPathFields.addString(dimensions[i].toString(), dimensions[i + 1].toString()); - fields.add(new SortedSetDocValuesField(dimensions[i].toString(), new BytesRef(dimensions[i + 1].toString()))); - } - } - for (int i = 0; i < metrics.length; i += 2) { - if (metrics[i + 1] instanceof Integer || metrics[i + 1] instanceof Long) { - fields.add(new NumericDocValuesField(metrics[i].toString(), ((Number) metrics[i + 1]).longValue())); - } else if (metrics[i + 1] instanceof Float) { - fields.add(new FloatDocValuesField(metrics[i].toString(), (float) metrics[i + 1])); - } else if (metrics[i + 1] instanceof Double) { - fields.add(new DoubleDocValuesField(metrics[i].toString(), (double) metrics[i + 1])); - } - } - // Use legacy tsid to make tests easier to understand: - fields.add( - new SortedDocValuesField(TimeSeriesIdFieldMapper.NAME, TimeSeriesIdFieldMapper.buildLegacyTsid(routingPathFields).toBytesRef()) - ); - iw.addDocument(fields); - } - - TimeSeriesExtractFieldOperator.Factory extractFieldsFactory(IndexReader reader, List extractFields) { - var ctx = new LuceneSourceOperatorTests.MockShardContext(reader, 0) { - @Override - public MappedFieldType fieldType(String name) { - for (ExtractField e : extractFields) { - if (e.ft.name().equals(name)) { - return e.ft; - } - } - throw new IllegalArgumentException("Unknown field [" + name + "]"); - } - }; - var fieldInfos = extractFields.stream() - .map( - f -> new ValuesSourceReaderOperator.FieldInfo( - f.ft.name(), - f.elementType, - false, - n -> f.ft.blockLoader(ValuesSourceReaderOperatorTests.blContext()) - ) - ) - .toList(); - return new TimeSeriesExtractFieldOperator.Factory(fieldInfos, List.of(ctx)); - } -} diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/TimeSeriesIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/TimeSeriesIT.java index 121c884a57448..dffebfe117078 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/TimeSeriesIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/TimeSeriesIT.java @@ -12,8 +12,8 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.iterable.Iterables; +import org.elasticsearch.compute.lucene.LuceneSliceQueue; import org.elasticsearch.compute.lucene.LuceneSourceOperator; -import org.elasticsearch.compute.lucene.TimeSeriesSourceOperator; import org.elasticsearch.compute.operator.DriverProfile; import org.elasticsearch.compute.operator.OperatorStatus; import org.elasticsearch.compute.operator.TimeSeriesAggregationOperator; @@ -33,7 +33,6 @@ import static org.elasticsearch.index.mapper.DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER; import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; @@ -554,32 +553,17 @@ public void testProfile() { request.profile(true); request.pragmas(pragmas); request.acceptedPragmaRisks(true); - request.query("TS my-hosts | STATS sum(rate(request_count)) BY cluster, bucket(@timestamp, 1minute) | SORT cluster"); + request.query("TS my-hosts | STATS sum(rate(request_count)) BY cluster | SORT cluster"); try (var resp = run(request)) { EsqlQueryResponse.Profile profile = resp.profile(); List dataProfiles = profile.drivers().stream().filter(d -> d.description().equals("data")).toList(); for (DriverProfile p : dataProfiles) { - if (p.operators().stream().anyMatch(s -> s.status() instanceof TimeSeriesSourceOperator.Status)) { - assertThat(p.operators(), hasSize(2)); - TimeSeriesSourceOperator.Status status = (TimeSeriesSourceOperator.Status) p.operators().get(0).status(); - // If the target shard is empty or does not match the query, processedShards will be empty. - // TODO: Update ComputeService to avoid creating pipelines for non-matching or empty shards. - assertThat(status.processedShards(), either(hasSize(1)).or(empty())); - assertThat(p.operators().get(1).operator(), equalTo("ExchangeSinkOperator")); - } else if (p.operators().stream().anyMatch(s -> s.status() instanceof TimeSeriesAggregationOperator.Status)) { - assertThat(p.operators(), hasSize(3)); - assertThat(p.operators().get(0).operator(), equalTo("ExchangeSourceOperator")); - assertThat(p.operators().get(1).operator(), containsString("TimeSeriesAggregationOperator")); - assertThat(p.operators().get(2).operator(), equalTo("ExchangeSinkOperator")); - } else { - assertThat(p.operators(), hasSize(4)); - assertThat(p.operators().get(0).operator(), equalTo("ExchangeSourceOperator")); - assertThat(p.operators().get(1).operator(), containsString("TimeSeriesExtractFieldOperator")); - assertThat(p.operators().get(2).operator(), containsString("EvalOperator")); - assertThat(p.operators().get(3).operator(), equalTo("ExchangeSinkOperator")); + assertThat(p.operators().get(0).operator(), containsString("LuceneSourceOperator")); + LuceneSourceOperator.Status luceneStatus = (LuceneSourceOperator.Status) p.operators().get(0).status(); + for (LuceneSliceQueue.PartitioningStrategy v : luceneStatus.partitioningStrategies().values()) { + assertThat(v, equalTo(LuceneSliceQueue.PartitioningStrategy.SHARD)); } } - assertThat(dataProfiles, hasSize(9)); } } // non-rate aggregation is executed with multiple shards at a time diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Rate.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Rate.java index 9914d34ce80fd..b87cb8d40f34d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Rate.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Rate.java @@ -10,9 +10,9 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.compute.aggregation.AggregatorFunctionSupplier; -import org.elasticsearch.compute.aggregation.RateDoubleAggregatorFunctionSupplier; -import org.elasticsearch.compute.aggregation.RateIntAggregatorFunctionSupplier; -import org.elasticsearch.compute.aggregation.RateLongAggregatorFunctionSupplier; +import org.elasticsearch.compute.aggregation.RateDoubleGroupingAggregatorFunction; +import org.elasticsearch.compute.aggregation.RateIntGroupingAggregatorFunction; +import org.elasticsearch.compute.aggregation.RateLongGroupingAggregatorFunction; import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Literal; @@ -118,9 +118,9 @@ protected TypeResolution resolveType() { public AggregatorFunctionSupplier supplier() { final DataType type = field().dataType(); return switch (type) { - case COUNTER_LONG -> new RateLongAggregatorFunctionSupplier(); - case COUNTER_INTEGER -> new RateIntAggregatorFunctionSupplier(); - case COUNTER_DOUBLE -> new RateDoubleAggregatorFunctionSupplier(); + case COUNTER_LONG -> new RateLongGroupingAggregatorFunction.FunctionSupplier(); + case COUNTER_INTEGER -> new RateIntGroupingAggregatorFunction.FunctionSupplier(); + case COUNTER_DOUBLE -> new RateDoubleGroupingAggregatorFunction.FunctionSupplier(); default -> throw EsqlIllegalArgumentException.illegalDataType(type); }; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java index 15cc1d54eb3a6..deb79221b3ea0 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java @@ -12,7 +12,6 @@ import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.optimizer.rules.physical.local.EnableSpatialDistancePushdown; import org.elasticsearch.xpack.esql.optimizer.rules.physical.local.InsertFieldExtraction; -import org.elasticsearch.xpack.esql.optimizer.rules.physical.local.ParallelizeTimeSeriesSource; import org.elasticsearch.xpack.esql.optimizer.rules.physical.local.PushFiltersToSource; import org.elasticsearch.xpack.esql.optimizer.rules.physical.local.PushLimitToSource; import org.elasticsearch.xpack.esql.optimizer.rules.physical.local.PushSampleToSource; @@ -88,8 +87,7 @@ protected static List> rules(boolean optimizeForEsSource) { Limiter.ONCE, new InsertFieldExtraction(), new SpatialDocValuesExtraction(), - new SpatialShapeBoundsExtraction(), - new ParallelizeTimeSeriesSource() + new SpatialShapeBoundsExtraction() ); return optimizeForEsSource ? List.of(pushdown, substitutionRules, fieldExtraction) : List.of(pushdown, fieldExtraction); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/ParallelizeTimeSeriesSource.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/ParallelizeTimeSeriesSource.java deleted file mode 100644 index d26599d7a96c9..0000000000000 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/ParallelizeTimeSeriesSource.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.esql.optimizer.rules.physical.local; - -import org.elasticsearch.index.IndexMode; -import org.elasticsearch.xpack.esql.core.expression.Attribute; -import org.elasticsearch.xpack.esql.core.util.Holder; -import org.elasticsearch.xpack.esql.optimizer.LocalPhysicalOptimizerContext; -import org.elasticsearch.xpack.esql.optimizer.PhysicalOptimizerRules; -import org.elasticsearch.xpack.esql.plan.physical.EsQueryExec; -import org.elasticsearch.xpack.esql.plan.physical.FieldExtractExec; -import org.elasticsearch.xpack.esql.plan.physical.FilterExec; -import org.elasticsearch.xpack.esql.plan.physical.LimitExec; -import org.elasticsearch.xpack.esql.plan.physical.ParallelExec; -import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; -import org.elasticsearch.xpack.esql.plan.physical.TimeSeriesAggregateExec; -import org.elasticsearch.xpack.esql.plan.physical.TimeSeriesFieldExtractExec; -import org.elasticsearch.xpack.esql.plan.physical.TimeSeriesSourceExec; -import org.elasticsearch.xpack.esql.plan.physical.TopNExec; - -import java.util.ArrayList; -import java.util.HashSet; -import java.util.List; -import java.util.Set; - -/** - * An optimization rule vertically partitions the time-series into three parts: time-series source, field extraction, - * and time-series aggregation so that they can run parallel to speed up time-series query. - * For the field-extraction part, it will use a specialized version for time-series indices. - */ -public class ParallelizeTimeSeriesSource extends PhysicalOptimizerRules.ParameterizedOptimizerRule< - TimeSeriesAggregateExec, - LocalPhysicalOptimizerContext> { - - @Override - public PhysicalPlan rule(TimeSeriesAggregateExec plan, LocalPhysicalOptimizerContext context) { - if (plan.getMode().isInputPartial()) { - return plan; - } - if (plan.anyMatch(p -> p instanceof EsQueryExec q && q.indexMode() == IndexMode.TIME_SERIES) == false) { - return plan; - } - final List pushDownExtracts = new ArrayList<>(); - plan.forEachDown(p -> { - if (p instanceof FieldExtractExec) { - pushDownExtracts.add((FieldExtractExec) p); - } else if (stopPushDownExtract(p)) { - if (pushDownExtracts.isEmpty() == false) { - pushDownExtracts.clear(); - } - } - }); - final Holder aborted = new Holder<>(Boolean.FALSE); - PhysicalPlan newChild = plan.child().transformUp(PhysicalPlan.class, p -> { - if (aborted.get()) { - return p; - } - if (p instanceof EsQueryExec q && q.indexMode() == IndexMode.TIME_SERIES) { - return addFieldExtract(context, q, pushDownExtracts); - } - if (stopPushDownExtract(p)) { - aborted.set(Boolean.TRUE); - return p; - } - if (p instanceof FieldExtractExec e) { - return e.child(); - } - return p; - }); - return plan.replaceChild(new ParallelExec(plan.source(), newChild)); - } - - private static boolean stopPushDownExtract(PhysicalPlan p) { - return p instanceof FilterExec || p instanceof TopNExec || p instanceof LimitExec; - } - - private PhysicalPlan addFieldExtract(LocalPhysicalOptimizerContext context, EsQueryExec query, List extracts) { - Set docValuesAttributes = new HashSet<>(); - Set boundsAttributes = new HashSet<>(); - List attributesToExtract = new ArrayList<>(); - for (FieldExtractExec extract : extracts) { - docValuesAttributes.addAll(extract.docValuesAttributes()); - boundsAttributes.addAll(extract.boundsAttributes()); - attributesToExtract.addAll(extract.attributesToExtract()); - } - List attrs = query.attrs(); - var tsSource = new TimeSeriesSourceExec(query.source(), attrs, query.query(), query.limit(), query.estimatedRowSize()); - return new TimeSeriesFieldExtractExec( - query.source(), - new ParallelExec(query.source(), tsSource), - attributesToExtract, - context.configuration().pragmas().fieldExtractPreference(), - docValuesAttributes, - boundsAttributes - ); - } -} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/ReplaceSourceAttributes.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/ReplaceSourceAttributes.java index 5d7da31ebbea5..65e578e323763 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/ReplaceSourceAttributes.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/ReplaceSourceAttributes.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.esql.optimizer.rules.physical.local; -import org.elasticsearch.index.IndexMode; import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; import org.elasticsearch.xpack.esql.core.expression.MetadataAttribute; @@ -34,34 +33,19 @@ protected PhysicalPlan rule(EsSourceExec plan) { attributes.add(docId); var outputIterator = plan.output().iterator(); - var isTimeSeries = plan.indexMode() == IndexMode.TIME_SERIES; - var keepIterating = true; - Attribute tsid = null, timestamp = null, score = null; - - while (keepIterating && outputIterator.hasNext()) { + Attribute score = null; + while (score == null && outputIterator.hasNext()) { Attribute attr = outputIterator.next(); if (attr instanceof MetadataAttribute ma) { if (ma.name().equals(MetadataAttribute.SCORE)) { score = attr; - } else if (isTimeSeries && ma.name().equals(MetadataAttribute.TSID_FIELD)) { - tsid = attr; } - } else if (attr.name().equals(MetadataAttribute.TIMESTAMP_FIELD)) { - timestamp = attr; - } - keepIterating = score == null || (isTimeSeries && (tsid == null || timestamp == null)); - } - if (isTimeSeries) { - if (tsid == null || timestamp == null) { - throw new IllegalStateException("_tsid or @timestamp are missing from the time-series source"); } - attributes.add(tsid); - attributes.add(timestamp); } + // TODO: Add timestamp_watermark for time-series if (score != null) { attributes.add(score); } - return new EsQueryExec( plan.source(), plan.indexPattern(), diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EsQueryExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EsQueryExec.java index cec91aadcc3f2..2db4a49227502 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EsQueryExec.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EsQueryExec.java @@ -324,8 +324,8 @@ public List queryBuilderAndTags() { } public boolean canSubstituteRoundToWithQueryBuilderAndTags() { - // TimeSeriesSourceOperator and LuceneTopNSourceOperator do not support QueryAndTags - return indexMode != IndexMode.TIME_SERIES && (sorts == null || sorts.isEmpty()); + // LuceneTopNSourceOperator doesn't support QueryAndTags + return sorts == null || sorts.isEmpty(); } /** diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/ParallelExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/ParallelExec.java deleted file mode 100644 index 875bc825271fd..0000000000000 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/ParallelExec.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.esql.plan.physical; - -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.xpack.esql.core.tree.NodeInfo; -import org.elasticsearch.xpack.esql.core.tree.Source; - -import java.io.IOException; - -/** - * A physical plan node that hints the plan should be partitioned vertically and executed in parallel. - */ -public final class ParallelExec extends UnaryExec { - - public ParallelExec(Source source, PhysicalPlan child) { - super(source, child); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - throw new UnsupportedOperationException("local plan"); - } - - @Override - public String getWriteableName() { - throw new UnsupportedOperationException("local plan"); - } - - @Override - protected NodeInfo info() { - return NodeInfo.create(this, ParallelExec::new, child()); - } - - @Override - public ParallelExec replaceChild(PhysicalPlan newChild) { - return new ParallelExec(source(), newChild); - } -} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/TimeSeriesFieldExtractExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/TimeSeriesFieldExtractExec.java deleted file mode 100644 index ebd8fdc6c48f7..0000000000000 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/TimeSeriesFieldExtractExec.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.esql.plan.physical; - -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.xpack.esql.core.expression.Attribute; -import org.elasticsearch.xpack.esql.core.tree.NodeInfo; -import org.elasticsearch.xpack.esql.core.tree.Source; - -import java.io.IOException; -import java.util.List; -import java.util.Set; - -public class TimeSeriesFieldExtractExec extends FieldExtractExec { - public TimeSeriesFieldExtractExec( - Source source, - PhysicalPlan child, - List attributesToExtract, - MappedFieldType.FieldExtractPreference defaultPreference, - Set docValuesAttributes, - Set boundsAttributes - ) { - super(source, child, attributesToExtract, defaultPreference, docValuesAttributes, boundsAttributes); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - throw new UnsupportedOperationException("local plan"); - } - - @Override - public String getWriteableName() { - throw new UnsupportedOperationException("local plan"); - } - - @Override - protected NodeInfo info() { - return NodeInfo.create( - this, - TimeSeriesFieldExtractExec::new, - child(), - attributesToExtract, - defaultPreference, - docValuesAttributes, - boundsAttributes - ); - } - - @Override - public UnaryExec replaceChild(PhysicalPlan newChild) { - return new TimeSeriesFieldExtractExec( - source(), - newChild, - attributesToExtract, - defaultPreference, - docValuesAttributes, - boundsAttributes - ); - } - - @Override - public FieldExtractExec withDocValuesAttributes(Set docValuesAttributes) { - return new TimeSeriesFieldExtractExec( - source(), - child(), - attributesToExtract, - defaultPreference, - docValuesAttributes, - boundsAttributes - ); - } - - @Override - public FieldExtractExec withBoundsAttributes(Set boundsAttributes) { - return new TimeSeriesFieldExtractExec( - source(), - child(), - attributesToExtract, - defaultPreference, - docValuesAttributes, - boundsAttributes - ); - } -} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/TimeSeriesSourceExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/TimeSeriesSourceExec.java deleted file mode 100644 index 8776cf583f47d..0000000000000 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/TimeSeriesSourceExec.java +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.esql.plan.physical; - -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.xpack.esql.core.expression.Attribute; -import org.elasticsearch.xpack.esql.core.expression.Expression; -import org.elasticsearch.xpack.esql.core.tree.NodeInfo; -import org.elasticsearch.xpack.esql.core.tree.NodeUtils; -import org.elasticsearch.xpack.esql.core.tree.Source; - -import java.io.IOException; -import java.util.List; -import java.util.Objects; - -/** - * Similar to {@link EsQueryExec}, but this is a physical plan specifically for time series indices. - */ -public class TimeSeriesSourceExec extends LeafExec implements EstimatesRowSize { - - private final List attrs; - private final QueryBuilder query; - private final Expression limit; - private final Integer estimatedRowSize; - - public TimeSeriesSourceExec(Source source, List attrs, QueryBuilder query, Expression limit, Integer estimatedRowSize) { - super(source); - this.attrs = attrs; - this.query = query; - this.limit = limit; - this.estimatedRowSize = estimatedRowSize; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - throw new UnsupportedOperationException("local plan"); - } - - @Override - public String getWriteableName() { - throw new UnsupportedOperationException("local plan"); - } - - @Override - protected NodeInfo info() { - return NodeInfo.create(this, TimeSeriesSourceExec::new, attrs, query, limit, estimatedRowSize); - } - - public QueryBuilder query() { - return query; - } - - public List attrs() { - return attrs; - } - - @Override - public List output() { - return attrs; - } - - public Expression limit() { - return limit; - } - - public Integer estimatedRowSize() { - return estimatedRowSize; - } - - @Override - public PhysicalPlan estimateRowSize(State state) { - state.add(false, Integer.BYTES * 2); - state.add(false, 22); // tsid - state.add(false, 8); // timestamp - int size = state.consumeAllFields(false); - if (Objects.equals(this.estimatedRowSize, size)) { - return this; - } else { - return new TimeSeriesSourceExec(source(), attrs, query, limit, size); - } - } - - @Override - public int hashCode() { - return Objects.hash( - attrs, - query, - - limit, - estimatedRowSize - ); - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - - if (obj == null || getClass() != obj.getClass()) { - return false; - } - - TimeSeriesSourceExec other = (TimeSeriesSourceExec) obj; - return Objects.equals(attrs, other.attrs) - && Objects.equals(query, other.query) - && Objects.equals(limit, other.limit) - && Objects.equals(estimatedRowSize, other.estimatedRowSize); - } - - @Override - public String nodeString() { - return nodeName() - + "[" - + "query[" - + (query != null ? Strings.toString(query, false, true) : "") - + "] attributes: [" - + NodeUtils.limitedToString(attrs) - + "], estimatedRowSize[" - + estimatedRowSize - + "]"; - } -} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java index 9b9712289b9d3..dc0f4ea7dcdd3 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java @@ -28,7 +28,6 @@ import org.elasticsearch.compute.lucene.LuceneSourceOperator; import org.elasticsearch.compute.lucene.LuceneTopNSourceOperator; import org.elasticsearch.compute.lucene.TimeSeriesSourceOperatorFactory; -import org.elasticsearch.compute.lucene.read.TimeSeriesExtractFieldOperator; import org.elasticsearch.compute.lucene.read.ValuesSourceReaderOperator; import org.elasticsearch.compute.operator.Operator; import org.elasticsearch.compute.operator.SourceOperator; @@ -76,8 +75,6 @@ import org.elasticsearch.xpack.esql.plan.physical.EstimatesRowSize; import org.elasticsearch.xpack.esql.plan.physical.FieldExtractExec; import org.elasticsearch.xpack.esql.plan.physical.TimeSeriesAggregateExec; -import org.elasticsearch.xpack.esql.plan.physical.TimeSeriesFieldExtractExec; -import org.elasticsearch.xpack.esql.plan.physical.TimeSeriesSourceExec; import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner.DriverParallelism; import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner.LocalExecutionPlannerContext; import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner.PhysicalOperation; @@ -178,15 +175,10 @@ public final PhysicalOperation fieldExtractPhysicalOperation(FieldExtractExec fi layout.append(attr); } var fields = extractFields(fieldExtractExec); - if (fieldExtractExec instanceof TimeSeriesFieldExtractExec) { - // TODO: consolidate with ValuesSourceReaderOperator - return source.with(new TimeSeriesExtractFieldOperator.Factory(fields, shardContexts), layout.build()); - } else { - return source.with( - new ValuesSourceReaderOperator.Factory(physicalSettings.valuesLoadingJumboSize(), fields, readers, docChannel), - layout.build() - ); - } + return source.with( + new ValuesSourceReaderOperator.Factory(physicalSettings.valuesLoadingJumboSize(), fields, readers, docChannel), + layout.build() + ); } private static String getFieldName(Attribute attr) { @@ -281,10 +273,6 @@ public Function extractFields(FieldExtractExec fieldExtractExec) { List attributes = fieldExtractExec.attributesToExtract(); List fieldInfos = new ArrayList<>(attributes.size()); @@ -425,7 +405,6 @@ public Operator.OperatorFactory timeSeriesAggregatorOperatorFactory( ) { return new TimeSeriesAggregationOperator.Factory( ts.timeBucketRounding(context.foldCtx()), - shardContexts.size() == 1 && ts.anyMatch(p -> p instanceof TimeSeriesSourceExec), groupSpecs, aggregatorMode, aggregatorFactories, diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java index 259184714b40f..03f13eee24260 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java @@ -47,7 +47,6 @@ import org.elasticsearch.compute.operator.SourceOperator; import org.elasticsearch.compute.operator.SourceOperator.SourceOperatorFactory; import org.elasticsearch.compute.operator.StringExtractOperator; -import org.elasticsearch.compute.operator.exchange.DirectExchange; import org.elasticsearch.compute.operator.exchange.ExchangeSink; import org.elasticsearch.compute.operator.exchange.ExchangeSinkOperator.ExchangeSinkOperatorFactory; import org.elasticsearch.compute.operator.exchange.ExchangeSource; @@ -115,13 +114,11 @@ import org.elasticsearch.xpack.esql.plan.physical.LookupJoinExec; import org.elasticsearch.xpack.esql.plan.physical.MvExpandExec; import org.elasticsearch.xpack.esql.plan.physical.OutputExec; -import org.elasticsearch.xpack.esql.plan.physical.ParallelExec; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; import org.elasticsearch.xpack.esql.plan.physical.ProjectExec; import org.elasticsearch.xpack.esql.plan.physical.SampleExec; import org.elasticsearch.xpack.esql.plan.physical.ShowExec; import org.elasticsearch.xpack.esql.plan.physical.TimeSeriesAggregateExec; -import org.elasticsearch.xpack.esql.plan.physical.TimeSeriesSourceExec; import org.elasticsearch.xpack.esql.plan.physical.TopNExec; import org.elasticsearch.xpack.esql.plan.physical.inference.CompletionExec; import org.elasticsearch.xpack.esql.plan.physical.inference.RerankExec; @@ -294,10 +291,6 @@ else if (node instanceof EsQueryExec esQuery) { return planShow(show); } else if (node instanceof ExchangeSourceExec exchangeSource) { return planExchangeSource(exchangeSource, exchangeSourceSupplier); - } else if (node instanceof ParallelExec parallelExec) { - return planParallelNode(parallelExec, context); - } else if (node instanceof TimeSeriesSourceExec ts) { - return planTimeSeriesSource(ts, context); } // lookups and joins else if (node instanceof EnrichExec enrich) { @@ -369,10 +362,6 @@ private PhysicalOperation planEsQueryNode(EsQueryExec esQueryExec, LocalExecutio return physicalOperationProviders.sourcePhysicalOperation(esQueryExec, context); } - private PhysicalOperation planTimeSeriesSource(TimeSeriesSourceExec ts, LocalExecutionPlannerContext context) { - return physicalOperationProviders.timeSeriesSourceOperation(ts, context); - } - private PhysicalOperation planEsStats(EsStatsQueryExec statsQuery, LocalExecutionPlannerContext context) { if (physicalOperationProviders instanceof EsPhysicalOperationProviders == false) { throw new EsqlIllegalArgumentException("EsStatsQuery should only occur against a Lucene backend"); @@ -459,33 +448,6 @@ private PhysicalOperation planExchangeSource(ExchangeSourceExec exchangeSource, return PhysicalOperation.fromSource(new ExchangeSourceOperatorFactory(exchangeSourceSupplier), layout); } - private PhysicalOperation planParallelNode(ParallelExec parallelExec, LocalExecutionPlannerContext context) { - var exchange = new DirectExchange(context.queryPragmas.exchangeBufferSize()); - { - PhysicalOperation source = plan(parallelExec.child(), context); - var sinkOperator = source.withSink(new ExchangeSinkOperatorFactory(exchange::exchangeSink), source.layout); - final TimeValue statusInterval = configuration.pragmas().statusInterval(); - context.addDriverFactory( - new DriverFactory( - new DriverSupplier( - context.description, - ClusterName.CLUSTER_NAME_SETTING.get(settings).value(), - Node.NODE_NAME_SETTING.get(settings), - context.bigArrays, - context.blockFactory, - sinkOperator, - statusInterval, - settings - ), - DriverParallelism.SINGLE - ) - ); - context.driverParallelism.set(DriverParallelism.SINGLE); - } - var exchangeSource = new ExchangeSourceExec(parallelExec.source(), parallelExec.output(), false); - return planExchangeSource(exchangeSource, exchange::exchangeSource); - } - private PhysicalOperation planTopN(TopNExec topNExec, LocalExecutionPlannerContext context) { final Integer rowSize = topNExec.estimatedRowSize(); assert rowSize != null && rowSize > 0 : "estimated row size [" + rowSize + "] wasn't set"; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PhysicalOperationProviders.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PhysicalOperationProviders.java index 792bdd1c2b6d1..6353005f44ace 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PhysicalOperationProviders.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PhysicalOperationProviders.java @@ -10,7 +10,6 @@ import org.elasticsearch.xpack.esql.plan.physical.AggregateExec; import org.elasticsearch.xpack.esql.plan.physical.EsQueryExec; import org.elasticsearch.xpack.esql.plan.physical.FieldExtractExec; -import org.elasticsearch.xpack.esql.plan.physical.TimeSeriesSourceExec; import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner.LocalExecutionPlannerContext; import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner.PhysicalOperation; @@ -19,8 +18,6 @@ interface PhysicalOperationProviders { PhysicalOperation sourcePhysicalOperation(EsQueryExec esQuery, LocalExecutionPlannerContext context); - PhysicalOperation timeSeriesSourceOperation(TimeSeriesSourceExec ts, LocalExecutionPlannerContext context); - PhysicalOperation groupingPhysicalOperation( AggregateExec aggregateExec, PhysicalOperation source, diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java index abd19b92366f8..be019af8e03a9 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java @@ -22,7 +22,6 @@ import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BlockFactoryProvider; import org.elasticsearch.compute.lucene.LuceneOperator; -import org.elasticsearch.compute.lucene.TimeSeriesSourceOperator; import org.elasticsearch.compute.lucene.read.ValuesSourceReaderOperatorStatus; import org.elasticsearch.compute.operator.AbstractPageMappingOperator; import org.elasticsearch.compute.operator.AbstractPageMappingToIteratorOperator; @@ -316,7 +315,6 @@ public List getNamedWriteables() { entries.add(HashAggregationOperator.Status.ENTRY); entries.add(LimitOperator.Status.ENTRY); entries.add(LuceneOperator.Status.ENTRY); - entries.add(TimeSeriesSourceOperator.Status.ENTRY); entries.add(TopNOperatorStatus.ENTRY); entries.add(MvExpandOperator.Status.ENTRY); entries.add(ValuesSourceReaderOperatorStatus.ENTRY); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java index 87f356f8d30bf..eedd9aea3cbc1 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java @@ -92,11 +92,8 @@ import org.elasticsearch.xpack.esql.plan.physical.LocalSourceExec; import org.elasticsearch.xpack.esql.plan.physical.LookupJoinExec; import org.elasticsearch.xpack.esql.plan.physical.MvExpandExec; -import org.elasticsearch.xpack.esql.plan.physical.ParallelExec; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; import org.elasticsearch.xpack.esql.plan.physical.ProjectExec; -import org.elasticsearch.xpack.esql.plan.physical.TimeSeriesAggregateExec; -import org.elasticsearch.xpack.esql.plan.physical.TimeSeriesSourceExec; import org.elasticsearch.xpack.esql.plan.physical.TopNExec; import org.elasticsearch.xpack.esql.planner.FilterTests; import org.elasticsearch.xpack.esql.plugin.EsqlFlags; @@ -2238,24 +2235,6 @@ public void testMultipleKnnQueriesInPrefilters() { assertEquals(expectedQuery.toString(), queryExec.query().toString()); } - public void testParallelizeTimeSeriesPlan() { - assumeTrue("requires metrics command", EsqlCapabilities.Cap.METRICS_COMMAND.isEnabled()); - var query = "TS k8s | STATS max(rate(network.total_bytes_in)) BY bucket(@timestamp, 1h)"; - var optimizer = new TestPlannerOptimizer(config, timeSeriesAnalyzer); - PhysicalPlan plan = optimizer.plan(query); - var limit = as(plan, LimitExec.class); - var finalAgg = as(limit.child(), AggregateExec.class); - var partialAgg = as(finalAgg.child(), AggregateExec.class); - var timeSeriesFinalAgg = as(partialAgg.child(), TimeSeriesAggregateExec.class); - var exchange = as(timeSeriesFinalAgg.child(), ExchangeExec.class); - var timeSeriesPartialAgg = as(exchange.child(), TimeSeriesAggregateExec.class); - var parallel1 = as(timeSeriesPartialAgg.child(), ParallelExec.class); - var eval = as(parallel1.child(), EvalExec.class); - var fieldExtract = as(eval.child(), FieldExtractExec.class); - var parallel2 = as(fieldExtract.child(), ParallelExec.class); - as(parallel2.child(), TimeSeriesSourceExec.class); - } - /** * LimitExec[1000[INTEGER]] * \_ExchangeExec[[!alias_integer, boolean{f}#415, byte{f}#416, constant_keyword-foo{f}#417, date{f}#418, date_nanos{f}#419, diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlannerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlannerTests.java index e1ea1bf2fef94..83446d0f5fa80 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlannerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlannerTests.java @@ -56,8 +56,6 @@ import org.elasticsearch.xpack.esql.index.EsIndex; import org.elasticsearch.xpack.esql.plan.physical.EsQueryExec; import org.elasticsearch.xpack.esql.plan.physical.FieldExtractExec; -import org.elasticsearch.xpack.esql.plan.physical.LimitExec; -import org.elasticsearch.xpack.esql.plan.physical.ParallelExec; import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; import org.elasticsearch.xpack.esql.plugin.QueryPragmas; import org.elasticsearch.xpack.esql.session.Configuration; @@ -73,7 +71,6 @@ import java.util.Map; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -233,28 +230,6 @@ public void testDriverClusterAndNodeName() throws IOException { assertThat(supplier.nodeName(), equalTo("node-1")); } - public void testParallel() throws Exception { - EsQueryExec queryExec = new EsQueryExec( - Source.EMPTY, - index().name(), - IndexMode.STANDARD, - index().indexNameWithModes(), - List.of(), - null, - null, - between(1, 1000), - List.of(new EsQueryExec.QueryBuilderAndTags(null, List.of())) - ); - var limitExec = new LimitExec( - Source.EMPTY, - new ParallelExec(queryExec.source(), queryExec), - new Literal(Source.EMPTY, between(1, 100), DataType.INTEGER), - randomEstimatedRowSize(estimatedRowSizeIsHuge) - ); - LocalExecutionPlanner.LocalExecutionPlan plan = planner().plan("test", FoldContext.small(), limitExec); - assertThat(plan.driverFactories, hasSize(2)); - } - public void testPlanUnmappedFieldExtractStoredSource() throws Exception { var blockLoader = constructBlockLoader(); // In case of stored source we expect bytes based block source loader (this loads source from _source) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/TestPhysicalOperationProviders.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/TestPhysicalOperationProviders.java index ff15f3cc1e4ba..c3167573fbec3 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/TestPhysicalOperationProviders.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/TestPhysicalOperationProviders.java @@ -57,7 +57,6 @@ import org.elasticsearch.xpack.esql.plan.physical.EsQueryExec; import org.elasticsearch.xpack.esql.plan.physical.FieldExtractExec; import org.elasticsearch.xpack.esql.plan.physical.TimeSeriesAggregateExec; -import org.elasticsearch.xpack.esql.plan.physical.TimeSeriesSourceExec; import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner.LocalExecutionPlannerContext; import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner.PhysicalOperation; import org.elasticsearch.xpack.ml.MachineLearning; @@ -125,11 +124,6 @@ public PhysicalOperation sourcePhysicalOperation(EsQueryExec esQueryExec, LocalE return PhysicalOperation.fromSource(new TestSourceOperatorFactory(), layout.build()); } - @Override - public PhysicalOperation timeSeriesSourceOperation(TimeSeriesSourceExec ts, LocalExecutionPlannerContext context) { - throw new UnsupportedOperationException("time-series source is not supported in CSV tests"); - } - @Override public Operator.OperatorFactory timeSeriesAggregatorOperatorFactory( TimeSeriesAggregateExec ts, @@ -140,7 +134,6 @@ public Operator.OperatorFactory timeSeriesAggregatorOperatorFactory( ) { return new TimeSeriesAggregationOperator.Factory( ts.timeBucketRounding(context.foldCtx()), - false, groupSpecs, aggregatorMode, aggregatorFactories,