From 96255a5dbc652b7ac54490bac18c09dcbb462b94 Mon Sep 17 00:00:00 2001 From: Pablo Date: Wed, 3 Sep 2025 15:44:35 -0700 Subject: [PATCH 01/13] Implementing irate function for timeseries --- x-pack/plugin/esql/compute/build.gradle | 23 ++ .../aggregation/X-IRateAggregator.java.st | 278 ++++++++++++++++++ 2 files changed, 301 insertions(+) create mode 100644 x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-IRateAggregator.java.st diff --git a/x-pack/plugin/esql/compute/build.gradle b/x-pack/plugin/esql/compute/build.gradle index 8acb7697b9f15..fab5dfaa3f05a 100644 --- a/x-pack/plugin/esql/compute/build.gradle +++ b/x-pack/plugin/esql/compute/build.gradle @@ -601,6 +601,7 @@ tasks.named('stringTemplates').configure { it.inputFile = fallibleArrayStateInputFile it.outputFile = "org/elasticsearch/compute/aggregation/FloatFallibleArrayState.java" } + File valuesAggregatorInputFile = new File("${projectDir}/src/main/java/org/elasticsearch/compute/aggregation/X-ValuesAggregator.java.st") template { it.properties = intProperties @@ -650,6 +651,28 @@ tasks.named('stringTemplates').configure { it.outputFile = "org/elasticsearch/compute/aggregation/RateDoubleAggregator.java" } + File irateAggregatorInputFile = file("src/main/java/org/elasticsearch/compute/aggregation/X-IRateAggregator.java.st") + template { + it.properties = intProperties + it.inputFile = irateAggregatorInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/IRateIntAggregator.java" + } + template { + it.properties = longProperties + it.inputFile = irateAggregatorInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/IRateLongAggregator.java" + } + template { + it.properties = floatProperties + it.inputFile = irateAggregatorInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/IRateFloatAggregator.java" + } + template { + it.properties = doubleProperties + it.inputFile = irateAggregatorInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/IRateDoubleAggregator.java" + } + File stdDevAggregatorInputFile = file("src/main/java/org/elasticsearch/compute/aggregation/X-StdDevAggregator.java.st") template { it.properties = intProperties diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-IRateAggregator.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-IRateAggregator.java.st new file mode 100644 index 0000000000000..054f020bfca06 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-IRateAggregator.java.st @@ -0,0 +1,278 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +// begin generated imports +import org.apache.lucene.util.Accountable; +import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.ObjectArray; +import org.elasticsearch.compute.ann.GroupingAggregator; +import org.elasticsearch.compute.ann.IntermediateState; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.DoubleVector; +import org.elasticsearch.compute.data.FloatBlock; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; +// end generated imports + +/** + * A rate grouping aggregation definition for $type$. + * This class is generated. Edit `X-RateAggregator.java.st` instead. + */ +@GroupingAggregator( + value = { + @IntermediateState(name = "timestamps", type = "LONG_BLOCK"), + @IntermediateState(name = "values", type = "$TYPE$_BLOCK"), + @IntermediateState(name = "resets", type = "DOUBLE") } +) +public class IRate$Type$Aggregator { + + public static $Type$IRateGroupingState initGrouping(DriverContext driverContext) { + return new $Type$IRateGroupingState(driverContext.bigArrays(), driverContext.breaker()); + } + + public static void combine($Type$IRateGroupingState current, int groupId, $type$ value, long timestamp) { + current.append(groupId, timestamp, value); + } + + public static void combineIntermediate( + $Type$IRateGroupingState current, + int groupId, + LongBlock timestamps, + $Type$Block values, + double reset, + int otherPosition + ) { + current.combine(groupId, timestamps, values, reset, otherPosition); + } + + public static Block evaluateFinal($Type$IRateGroupingState state, IntVector selected, GroupingAggregatorEvaluationContext evalContext) { + return state.evaluateFinal(selected, evalContext); + } + + private static class $Type$IRateState { + static final long BASE_RAM_USAGE = RamUsageEstimator.sizeOfObject($Type$IRateState.class); + final long[] timestamps; // descending order + final $type$[] values; + + $Type$IRateState(int initialSize) { + this.timestamps = new long[initialSize]; + this.values = new $type$[initialSize]; + } + + $Type$IRateState(long[] ts, $type$[] vs) { + this.timestamps = ts; + this.values = vs; + } + + void append(long t, $type$ v) { + assert timestamps.length == 2 : "expected two timestamps; got " + timestamps.length; + assert t < timestamps[1] : "@timestamp goes backward: " + t + " >= " + timestamps[1]; + // This method does not need to do anything because we only need the last two values + // and timestamps, which are already in place. + } + + int entries() { + return timestamps.length; + } + + static long bytesUsed(int entries) { + var ts = RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) Long.BYTES * entries); + var vs = RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) $BYTES$ * entries); + return BASE_RAM_USAGE + ts + vs; + } + } + + public static final class $Type$IRateGroupingState implements Releasable, Accountable, GroupingAggregatorState { + private ObjectArray<$Type$IRateState> states; + private final BigArrays bigArrays; + private final CircuitBreaker breaker; + private long stateBytes; // for individual states + + $Type$IRateGroupingState(BigArrays bigArrays, CircuitBreaker breaker) { + this.bigArrays = bigArrays; + this.breaker = breaker; + this.states = bigArrays.newObjectArray(1); + } + + void ensureCapacity(int groupId) { + states = bigArrays.grow(states, groupId + 1); + } + + void adjustBreaker(long bytes) { + breaker.addEstimateBytesAndMaybeBreak(bytes, "<>"); + stateBytes += bytes; + assert stateBytes >= 0 : stateBytes; + } + + void append(int groupId, long timestamp, $type$ value) { + ensureCapacity(groupId); + var state = states.get(groupId); + if (state == null) { + adjustBreaker($Type$IRateState.bytesUsed(1)); + state = new $TypeIState(new long[] { timestamp }, new $type$[] { value }); + states.set(groupId, state); + } else { + if (state.entries() == 1) { + adjustBreaker($Type$IRateState.bytesUsed(2)); + state = new $Type$IRateState(new long[] { state.timestamps[0], timestamp }, new $type$[] { state.values[0], value }); + states.set(groupId, state); + adjustBreaker(-$Type$IRateState.bytesUsed(1)); // old state + } + } + } + + void combine(int groupId, LongBlock timestamps, $Type$Block values, double reset, int otherPosition) { + // TODO: Check this method pabloem + final int valueCount = timestamps.getValueCount(otherPosition); + if (valueCount == 0) { + return; + } + final int firstIndex = timestamps.getFirstValueIndex(otherPosition); + ensureCapacity(groupId); + var state = states.get(groupId); + if (state == null) { + adjustBreaker($Type$IRateState.bytesUsed(valueCount)); + state = new $Type$IRateState(valueCount); + states.set(groupId, state); + // TODO: add bulk_copy to Block + for (int i = 0; i < valueCount; i++) { + state.timestamps[i] = timestamps.getLong(firstIndex + i); + state.values[i] = values.get$Type$(firstIndex + i); + } + } else { + adjustBreaker($Type$IRateState.bytesUsed(state.entries() + valueCount)); + var newState = new $Type$IRateState(state.entries() + valueCount); + states.set(groupId, newState); + merge(state, newState, firstIndex, valueCount, timestamps, values); + adjustBreaker(-$Type$IRateState.bytesUsed(state.entries())); // old state + } + } + + void merge($Type$IRateState curr, $Type$IRateState dst, int firstIndex, int rightCount, LongBlock timestamps, $Type$Block values) { + int i = 0, j = 0, k = 0; + final int leftCount = curr.entries(); + // We do not merge more than two entries because we only need the last two. + // This merge thus ends when we have two entries in dst. + while (i < leftCount && j < rightCount && k < 2) { + final var t1 = curr.timestamps[i]; + final var t2 = timestamps.getLong(firstIndex + j); + if (t1 > t2) { + dst.timestamps[k] = t1; + dst.values[k] = curr.values[i]; + ++i; + } else { + dst.timestamps[k] = t2; + dst.values[k] = values.get$Type$(firstIndex + j); + ++j; + } + ++k; + } + } + + $Type$IRateState mergeState($Type$IRateState s1, $Type$IRateState s2) { + adjustBreaker($Type$IRateState.bytesUsed(newLen)); + var dst = new $Type$IRateState(newLen); + dst.reset = s1.reset + s2.reset; + int i = 0, j = 0, k = 0; + while (i < s1.entries() && j < s2.entries() && k < 2) { + if (s1.timestamps[i] > s2.timestamps[j]) { + dst.timestamps[k] = s1.timestamps[i]; + dst.values[k] = s1.values[i]; + ++i; + } else { + dst.timestamps[k] = s2.timestamps[j]; + dst.values[k] = s2.values[j]; + ++j; + } + ++k; + } + return dst; + } + + @Override + public long ramBytesUsed() { + return states.ramBytesUsed() + stateBytes; + } + + @Override + public void close() { + Releasables.close(states, () -> adjustBreaker(-stateBytes)); + } + + @Override + public void toIntermediate(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { + assert blocks.length >= offset + 3 : "blocks=" + blocks.length + ",offset=" + offset; + final BlockFactory blockFactory = driverContext.blockFactory(); + final int positionCount = selected.getPositionCount(); + try ( + LongBlock.Builder timestamps = blockFactory.newLongBlockBuilder(positionCount * 2); + $Type$Block.Builder values = blockFactory.new$Type$BlockBuilder(positionCount * 2); + DoubleVector.FixedBuilder resets = blockFactory.newDoubleVectorFixedBuilder(positionCount) + ) { + for (int i = 0; i < positionCount; i++) { + final var groupId = selected.getInt(i); + final var state = groupId < states.size() ? states.get(groupId) : null; + if (state != null) { + timestamps.beginPositionEntry(); + for (long t : state.timestamps) { + timestamps.appendLong(t); + } + timestamps.endPositionEntry(); + + values.beginPositionEntry(); + for ($type$ v : state.values) { + values.append$Type$(v); + } + values.endPositionEntry(); + resets.appendDouble(i, state.reset); + } else { + timestamps.appendNull(); + values.appendNull(); + resets.appendDouble(i, 0); + } + } + blocks[offset] = timestamps.build(); + blocks[offset + 1] = values.build(); + blocks[offset + 3] = resets.build().asBlock(); + } + } + + Block evaluateFinal(IntVector selected, GroupingAggregatorEvaluationContext evalContext) { + int positionCount = selected.getPositionCount(); + try (DoubleBlock.Builder rates = evalContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + final var groupId = selected.getInt(p); + final var state = groupId < states.size() ? states.get(groupId) : null; + if (state == null || state.values.length < 2) { + rates.appendNull(); + continue; + } + int len = state.entries(); + final double ydiff = state.values[0] > state.values[1] ? state.values[0] - state.values[1] : state.values[1] - state.values[0]; + final long xdiff = state.timestamps[0] - state.timestamps[1]; + rates.appendDouble(ydiff / xdiff * 1000); + } + return rates.build(); + } + } + + @Override + public void enableGroupIdTracking(SeenGroupIds seenGroupIds) { + // noop - we handle the null states inside `toIntermediate` and `evaluateFinal` + } + } +} From 578a9ffde9e03370539b07d8e74b6ea4135d604b Mon Sep 17 00:00:00 2001 From: Pablo Date: Wed, 3 Sep 2025 20:18:15 -0700 Subject: [PATCH 02/13] Fixes to irate function. Pending: test suite --- x-pack/plugin/esql/compute/build.gradle | 44 +- .../aggregation/IrateDoubleAggregator.java | 271 ++++++++++++ .../aggregation/IrateFloatAggregator.java | 271 ++++++++++++ .../aggregation/IrateIntAggregator.java | 271 ++++++++++++ .../aggregation/IrateLongAggregator.java | 271 ++++++++++++ ...IrateDoubleAggregatorFunctionSupplier.java | 46 +++ ...IrateDoubleGroupingAggregatorFunction.java | 390 ++++++++++++++++++ .../IrateFloatAggregatorFunctionSupplier.java | 46 +++ .../IrateFloatGroupingAggregatorFunction.java | 390 ++++++++++++++++++ .../IrateIntAggregatorFunctionSupplier.java | 46 +++ .../IrateIntGroupingAggregatorFunction.java | 389 +++++++++++++++++ .../IrateLongAggregatorFunctionSupplier.java | 46 +++ .../IrateLongGroupingAggregatorFunction.java | 388 +++++++++++++++++ ...ator.java.st => X-IrateAggregator.java.st} | 73 ++-- .../function/EsqlFunctionRegistry.java | 2 + .../expression/function/aggregate/Irate.java | 142 +++++++ 16 files changed, 3024 insertions(+), 62 deletions(-) create mode 100644 x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateDoubleAggregator.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateFloatAggregator.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateIntAggregator.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateLongAggregator.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/IrateDoubleAggregatorFunctionSupplier.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/IrateDoubleGroupingAggregatorFunction.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/IrateFloatAggregatorFunctionSupplier.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/IrateFloatGroupingAggregatorFunction.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/IrateIntAggregatorFunctionSupplier.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/IrateIntGroupingAggregatorFunction.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/IrateLongAggregatorFunctionSupplier.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/IrateLongGroupingAggregatorFunction.java rename x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/{X-IRateAggregator.java.st => X-IrateAggregator.java.st} (80%) create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Irate.java diff --git a/x-pack/plugin/esql/compute/build.gradle b/x-pack/plugin/esql/compute/build.gradle index fab5dfaa3f05a..07fefbf9e6ede 100644 --- a/x-pack/plugin/esql/compute/build.gradle +++ b/x-pack/plugin/esql/compute/build.gradle @@ -491,6 +491,28 @@ tasks.named('stringTemplates').configure { } } + File irateAggregatorInputFile = file("src/main/java/org/elasticsearch/compute/aggregation/X-IrateAggregator.java.st") + template { + it.properties = intProperties + it.inputFile = irateAggregatorInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/IrateIntAggregator.java" + } + template { + it.properties = longProperties + it.inputFile = irateAggregatorInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/IrateLongAggregator.java" + } + template { + it.properties = floatProperties + it.inputFile = irateAggregatorInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/IrateFloatAggregator.java" + } + template { + it.properties = doubleProperties + it.inputFile = irateAggregatorInputFile + it.outputFile = "org/elasticsearch/compute/aggregation/IrateDoubleAggregator.java" + } + File fallibleStateInputFile = new File("${projectDir}/src/main/java/org/elasticsearch/compute/aggregation/X-FallibleState.java.st") template { it.properties = booleanProperties @@ -651,28 +673,6 @@ tasks.named('stringTemplates').configure { it.outputFile = "org/elasticsearch/compute/aggregation/RateDoubleAggregator.java" } - File irateAggregatorInputFile = file("src/main/java/org/elasticsearch/compute/aggregation/X-IRateAggregator.java.st") - template { - it.properties = intProperties - it.inputFile = irateAggregatorInputFile - it.outputFile = "org/elasticsearch/compute/aggregation/IRateIntAggregator.java" - } - template { - it.properties = longProperties - it.inputFile = irateAggregatorInputFile - it.outputFile = "org/elasticsearch/compute/aggregation/IRateLongAggregator.java" - } - template { - it.properties = floatProperties - it.inputFile = irateAggregatorInputFile - it.outputFile = "org/elasticsearch/compute/aggregation/IRateFloatAggregator.java" - } - template { - it.properties = doubleProperties - it.inputFile = irateAggregatorInputFile - it.outputFile = "org/elasticsearch/compute/aggregation/IRateDoubleAggregator.java" - } - File stdDevAggregatorInputFile = file("src/main/java/org/elasticsearch/compute/aggregation/X-StdDevAggregator.java.st") template { it.properties = intProperties diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateDoubleAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateDoubleAggregator.java new file mode 100644 index 0000000000000..5496215022154 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateDoubleAggregator.java @@ -0,0 +1,271 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +// begin generated imports +import org.apache.lucene.util.Accountable; +import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.ObjectArray; +import org.elasticsearch.compute.ann.GroupingAggregator; +import org.elasticsearch.compute.ann.IntermediateState; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.DoubleVector; +import org.elasticsearch.compute.data.FloatBlock; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; +// end generated imports + +/** + * A rate grouping aggregation definition for double. + * This class is generated. Edit `X-RateAggregator.java.st` instead. + */ +@GroupingAggregator( + value = { @IntermediateState(name = "timestamps", type = "LONG_BLOCK"), @IntermediateState(name = "values", type = "DOUBLE_BLOCK") } +) +public class IrateDoubleAggregator { + + public static DoubleIrateGroupingState initGrouping(DriverContext driverContext) { + return new DoubleIrateGroupingState(driverContext.bigArrays(), driverContext.breaker()); + } + + public static void combine(DoubleIrateGroupingState current, int groupId, double value, long timestamp) { + current.append(groupId, timestamp, value); + } + + public static void combineIntermediate( + DoubleIrateGroupingState current, + int groupId, + LongBlock timestamps, + DoubleBlock values, + int otherPosition + ) { + current.combine(groupId, timestamps, values, otherPosition); + } + + public static Block evaluateFinal(DoubleIrateGroupingState state, IntVector selected, GroupingAggregatorEvaluationContext evalContext) { + return state.evaluateFinal(selected, evalContext); + } + + private static class DoubleIrateState { + static final long BASE_RAM_USAGE = RamUsageEstimator.sizeOfObject(DoubleIrateState.class); + final long[] timestamps; // descending order + final double[] values; + + DoubleIrateState(int initialSize) { + this.timestamps = new long[initialSize]; + this.values = new double[initialSize]; + } + + DoubleIrateState(long[] ts, double[] vs) { + this.timestamps = ts; + this.values = vs; + } + + void append(long t, double v) { + assert timestamps.length == 2 : "expected two timestamps; got " + timestamps.length; + assert t < timestamps[1] : "@timestamp goes backward: " + t + " >= " + timestamps[1]; + // This method does not need to do anything because we only need the last two values + // and timestamps, which are already in place. + } + + int entries() { + return timestamps.length; + } + + static long bytesUsed(int entries) { + var ts = RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) Long.BYTES * entries); + var vs = RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) Double.BYTES * entries); + return BASE_RAM_USAGE + ts + vs; + } + } + + public static final class DoubleIrateGroupingState implements Releasable, Accountable, GroupingAggregatorState { + private ObjectArray states; + private final BigArrays bigArrays; + private final CircuitBreaker breaker; + private long stateBytes; // for individual states + + DoubleIrateGroupingState(BigArrays bigArrays, CircuitBreaker breaker) { + this.bigArrays = bigArrays; + this.breaker = breaker; + this.states = bigArrays.newObjectArray(1); + } + + void ensureCapacity(int groupId) { + states = bigArrays.grow(states, groupId + 1); + } + + void adjustBreaker(long bytes) { + breaker.addEstimateBytesAndMaybeBreak(bytes, "<>"); + stateBytes += bytes; + assert stateBytes >= 0 : stateBytes; + } + + void append(int groupId, long timestamp, double value) { + ensureCapacity(groupId); + var state = states.get(groupId); + if (state == null) { + adjustBreaker(DoubleIrateState.bytesUsed(1)); + state = new DoubleIrateState(new long[] { timestamp }, new double[] { value }); + states.set(groupId, state); + } else { + if (state.entries() == 1) { + adjustBreaker(DoubleIrateState.bytesUsed(2)); + state = new DoubleIrateState(new long[] { state.timestamps[0], timestamp }, new double[] { state.values[0], value }); + states.set(groupId, state); + adjustBreaker(-DoubleIrateState.bytesUsed(1)); // old state + } + } + } + + void combine(int groupId, LongBlock timestamps, DoubleBlock values, int otherPosition) { + // TODO: Check this method pabloem + final int valueCount = timestamps.getValueCount(otherPosition); + if (valueCount == 0) { + return; + } + final int firstIndex = timestamps.getFirstValueIndex(otherPosition); + ensureCapacity(groupId); + var state = states.get(groupId); + if (state == null) { + adjustBreaker(DoubleIrateState.bytesUsed(valueCount)); + state = new DoubleIrateState(valueCount); + states.set(groupId, state); + // TODO: add bulk_copy to Block + for (int i = 0; i < valueCount; i++) { + state.timestamps[i] = timestamps.getLong(firstIndex + i); + state.values[i] = values.getDouble(firstIndex + i); + } + } else { + adjustBreaker(DoubleIrateState.bytesUsed(state.entries() + valueCount)); + var newState = new DoubleIrateState(state.entries() + valueCount); + states.set(groupId, newState); + merge(state, newState, firstIndex, valueCount, timestamps, values); + adjustBreaker(-DoubleIrateState.bytesUsed(state.entries())); // old state + } + } + + void merge(DoubleIrateState curr, DoubleIrateState dst, int firstIndex, int rightCount, LongBlock timestamps, DoubleBlock values) { + int i = 0, j = 0, k = 0; + final int leftCount = curr.entries(); + // We do not merge more than two entries because we only need the last two. + // This merge thus ends when we have two entries in dst. + while (i < leftCount && j < rightCount && k < 2) { + final var t1 = curr.timestamps[i]; + final var t2 = timestamps.getLong(firstIndex + j); + if (t1 > t2) { + dst.timestamps[k] = t1; + dst.values[k] = curr.values[i]; + ++i; + } else { + dst.timestamps[k] = t2; + dst.values[k] = values.getDouble(firstIndex + j); + ++j; + } + ++k; + } + } + + DoubleIrateState mergeState(DoubleIrateState s1, DoubleIrateState s2) { + adjustBreaker(DoubleIrateState.bytesUsed(2)); + var dst = new DoubleIrateState(2); + int i = 0, j = 0, k = 0; + while (i < s1.entries() && j < s2.entries() && k < 2) { + if (s1.timestamps[i] > s2.timestamps[j]) { + dst.timestamps[k] = s1.timestamps[i]; + dst.values[k] = s1.values[i]; + ++i; + } else { + dst.timestamps[k] = s2.timestamps[j]; + dst.values[k] = s2.values[j]; + ++j; + } + ++k; + } + return dst; + } + + @Override + public long ramBytesUsed() { + return states.ramBytesUsed() + stateBytes; + } + + @Override + public void close() { + Releasables.close(states, () -> adjustBreaker(-stateBytes)); + } + + @Override + public void toIntermediate(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { + assert blocks.length >= offset + 3 : "blocks=" + blocks.length + ",offset=" + offset; + final BlockFactory blockFactory = driverContext.blockFactory(); + final int positionCount = selected.getPositionCount(); + try ( + LongBlock.Builder timestamps = blockFactory.newLongBlockBuilder(positionCount * 2); + DoubleBlock.Builder values = blockFactory.newDoubleBlockBuilder(positionCount * 2); + ) { + for (int i = 0; i < positionCount; i++) { + final var groupId = selected.getInt(i); + final var state = groupId < states.size() ? states.get(groupId) : null; + if (state != null) { + timestamps.beginPositionEntry(); + for (long t : state.timestamps) { + timestamps.appendLong(t); + } + timestamps.endPositionEntry(); + + values.beginPositionEntry(); + for (double v : state.values) { + values.appendDouble(v); + } + values.endPositionEntry(); + } else { + timestamps.appendNull(); + values.appendNull(); + } + } + blocks[offset] = timestamps.build(); + blocks[offset + 1] = values.build(); + } + } + + Block evaluateFinal(IntVector selected, GroupingAggregatorEvaluationContext evalContext) { + int positionCount = selected.getPositionCount(); + try (DoubleBlock.Builder rates = evalContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + final var groupId = selected.getInt(p); + final var state = groupId < states.size() ? states.get(groupId) : null; + if (state == null || state.values.length < 2) { + rates.appendNull(); + continue; + } + int len = state.entries(); + final double ydiff = state.values[0] > state.values[1] + ? state.values[0] - state.values[1] + : state.values[1] - state.values[0]; + final long xdiff = state.timestamps[0] - state.timestamps[1]; + rates.appendDouble(ydiff / xdiff * 1000); + } + return rates.build(); + } + } + + @Override + public void enableGroupIdTracking(SeenGroupIds seenGroupIds) { + // noop - we handle the null states inside `toIntermediate` and `evaluateFinal` + } + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateFloatAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateFloatAggregator.java new file mode 100644 index 0000000000000..a10d65beaca8f --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateFloatAggregator.java @@ -0,0 +1,271 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +// begin generated imports +import org.apache.lucene.util.Accountable; +import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.ObjectArray; +import org.elasticsearch.compute.ann.GroupingAggregator; +import org.elasticsearch.compute.ann.IntermediateState; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.DoubleVector; +import org.elasticsearch.compute.data.FloatBlock; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; +// end generated imports + +/** + * A rate grouping aggregation definition for float. + * This class is generated. Edit `X-RateAggregator.java.st` instead. + */ +@GroupingAggregator( + value = { @IntermediateState(name = "timestamps", type = "LONG_BLOCK"), @IntermediateState(name = "values", type = "FLOAT_BLOCK") } +) +public class IrateFloatAggregator { + + public static FloatIrateGroupingState initGrouping(DriverContext driverContext) { + return new FloatIrateGroupingState(driverContext.bigArrays(), driverContext.breaker()); + } + + public static void combine(FloatIrateGroupingState current, int groupId, float value, long timestamp) { + current.append(groupId, timestamp, value); + } + + public static void combineIntermediate( + FloatIrateGroupingState current, + int groupId, + LongBlock timestamps, + FloatBlock values, + int otherPosition + ) { + current.combine(groupId, timestamps, values, otherPosition); + } + + public static Block evaluateFinal(FloatIrateGroupingState state, IntVector selected, GroupingAggregatorEvaluationContext evalContext) { + return state.evaluateFinal(selected, evalContext); + } + + private static class FloatIrateState { + static final long BASE_RAM_USAGE = RamUsageEstimator.sizeOfObject(FloatIrateState.class); + final long[] timestamps; // descending order + final float[] values; + + FloatIrateState(int initialSize) { + this.timestamps = new long[initialSize]; + this.values = new float[initialSize]; + } + + FloatIrateState(long[] ts, float[] vs) { + this.timestamps = ts; + this.values = vs; + } + + void append(long t, float v) { + assert timestamps.length == 2 : "expected two timestamps; got " + timestamps.length; + assert t < timestamps[1] : "@timestamp goes backward: " + t + " >= " + timestamps[1]; + // This method does not need to do anything because we only need the last two values + // and timestamps, which are already in place. + } + + int entries() { + return timestamps.length; + } + + static long bytesUsed(int entries) { + var ts = RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) Long.BYTES * entries); + var vs = RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) Float.BYTES * entries); + return BASE_RAM_USAGE + ts + vs; + } + } + + public static final class FloatIrateGroupingState implements Releasable, Accountable, GroupingAggregatorState { + private ObjectArray states; + private final BigArrays bigArrays; + private final CircuitBreaker breaker; + private long stateBytes; // for individual states + + FloatIrateGroupingState(BigArrays bigArrays, CircuitBreaker breaker) { + this.bigArrays = bigArrays; + this.breaker = breaker; + this.states = bigArrays.newObjectArray(1); + } + + void ensureCapacity(int groupId) { + states = bigArrays.grow(states, groupId + 1); + } + + void adjustBreaker(long bytes) { + breaker.addEstimateBytesAndMaybeBreak(bytes, "<>"); + stateBytes += bytes; + assert stateBytes >= 0 : stateBytes; + } + + void append(int groupId, long timestamp, float value) { + ensureCapacity(groupId); + var state = states.get(groupId); + if (state == null) { + adjustBreaker(FloatIrateState.bytesUsed(1)); + state = new FloatIrateState(new long[] { timestamp }, new float[] { value }); + states.set(groupId, state); + } else { + if (state.entries() == 1) { + adjustBreaker(FloatIrateState.bytesUsed(2)); + state = new FloatIrateState(new long[] { state.timestamps[0], timestamp }, new float[] { state.values[0], value }); + states.set(groupId, state); + adjustBreaker(-FloatIrateState.bytesUsed(1)); // old state + } + } + } + + void combine(int groupId, LongBlock timestamps, FloatBlock values, int otherPosition) { + // TODO: Check this method pabloem + final int valueCount = timestamps.getValueCount(otherPosition); + if (valueCount == 0) { + return; + } + final int firstIndex = timestamps.getFirstValueIndex(otherPosition); + ensureCapacity(groupId); + var state = states.get(groupId); + if (state == null) { + adjustBreaker(FloatIrateState.bytesUsed(valueCount)); + state = new FloatIrateState(valueCount); + states.set(groupId, state); + // TODO: add bulk_copy to Block + for (int i = 0; i < valueCount; i++) { + state.timestamps[i] = timestamps.getLong(firstIndex + i); + state.values[i] = values.getFloat(firstIndex + i); + } + } else { + adjustBreaker(FloatIrateState.bytesUsed(state.entries() + valueCount)); + var newState = new FloatIrateState(state.entries() + valueCount); + states.set(groupId, newState); + merge(state, newState, firstIndex, valueCount, timestamps, values); + adjustBreaker(-FloatIrateState.bytesUsed(state.entries())); // old state + } + } + + void merge(FloatIrateState curr, FloatIrateState dst, int firstIndex, int rightCount, LongBlock timestamps, FloatBlock values) { + int i = 0, j = 0, k = 0; + final int leftCount = curr.entries(); + // We do not merge more than two entries because we only need the last two. + // This merge thus ends when we have two entries in dst. + while (i < leftCount && j < rightCount && k < 2) { + final var t1 = curr.timestamps[i]; + final var t2 = timestamps.getLong(firstIndex + j); + if (t1 > t2) { + dst.timestamps[k] = t1; + dst.values[k] = curr.values[i]; + ++i; + } else { + dst.timestamps[k] = t2; + dst.values[k] = values.getFloat(firstIndex + j); + ++j; + } + ++k; + } + } + + FloatIrateState mergeState(FloatIrateState s1, FloatIrateState s2) { + adjustBreaker(FloatIrateState.bytesUsed(2)); + var dst = new FloatIrateState(2); + int i = 0, j = 0, k = 0; + while (i < s1.entries() && j < s2.entries() && k < 2) { + if (s1.timestamps[i] > s2.timestamps[j]) { + dst.timestamps[k] = s1.timestamps[i]; + dst.values[k] = s1.values[i]; + ++i; + } else { + dst.timestamps[k] = s2.timestamps[j]; + dst.values[k] = s2.values[j]; + ++j; + } + ++k; + } + return dst; + } + + @Override + public long ramBytesUsed() { + return states.ramBytesUsed() + stateBytes; + } + + @Override + public void close() { + Releasables.close(states, () -> adjustBreaker(-stateBytes)); + } + + @Override + public void toIntermediate(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { + assert blocks.length >= offset + 3 : "blocks=" + blocks.length + ",offset=" + offset; + final BlockFactory blockFactory = driverContext.blockFactory(); + final int positionCount = selected.getPositionCount(); + try ( + LongBlock.Builder timestamps = blockFactory.newLongBlockBuilder(positionCount * 2); + FloatBlock.Builder values = blockFactory.newFloatBlockBuilder(positionCount * 2); + ) { + for (int i = 0; i < positionCount; i++) { + final var groupId = selected.getInt(i); + final var state = groupId < states.size() ? states.get(groupId) : null; + if (state != null) { + timestamps.beginPositionEntry(); + for (long t : state.timestamps) { + timestamps.appendLong(t); + } + timestamps.endPositionEntry(); + + values.beginPositionEntry(); + for (float v : state.values) { + values.appendFloat(v); + } + values.endPositionEntry(); + } else { + timestamps.appendNull(); + values.appendNull(); + } + } + blocks[offset] = timestamps.build(); + blocks[offset + 1] = values.build(); + } + } + + Block evaluateFinal(IntVector selected, GroupingAggregatorEvaluationContext evalContext) { + int positionCount = selected.getPositionCount(); + try (DoubleBlock.Builder rates = evalContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + final var groupId = selected.getInt(p); + final var state = groupId < states.size() ? states.get(groupId) : null; + if (state == null || state.values.length < 2) { + rates.appendNull(); + continue; + } + int len = state.entries(); + final double ydiff = state.values[0] > state.values[1] + ? state.values[0] - state.values[1] + : state.values[1] - state.values[0]; + final long xdiff = state.timestamps[0] - state.timestamps[1]; + rates.appendDouble(ydiff / xdiff * 1000); + } + return rates.build(); + } + } + + @Override + public void enableGroupIdTracking(SeenGroupIds seenGroupIds) { + // noop - we handle the null states inside `toIntermediate` and `evaluateFinal` + } + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateIntAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateIntAggregator.java new file mode 100644 index 0000000000000..7d8d39552dcce --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateIntAggregator.java @@ -0,0 +1,271 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +// begin generated imports +import org.apache.lucene.util.Accountable; +import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.ObjectArray; +import org.elasticsearch.compute.ann.GroupingAggregator; +import org.elasticsearch.compute.ann.IntermediateState; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.DoubleVector; +import org.elasticsearch.compute.data.FloatBlock; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; +// end generated imports + +/** + * A rate grouping aggregation definition for int. + * This class is generated. Edit `X-RateAggregator.java.st` instead. + */ +@GroupingAggregator( + value = { @IntermediateState(name = "timestamps", type = "LONG_BLOCK"), @IntermediateState(name = "values", type = "INT_BLOCK") } +) +public class IrateIntAggregator { + + public static IntIrateGroupingState initGrouping(DriverContext driverContext) { + return new IntIrateGroupingState(driverContext.bigArrays(), driverContext.breaker()); + } + + public static void combine(IntIrateGroupingState current, int groupId, int value, long timestamp) { + current.append(groupId, timestamp, value); + } + + public static void combineIntermediate( + IntIrateGroupingState current, + int groupId, + LongBlock timestamps, + IntBlock values, + int otherPosition + ) { + current.combine(groupId, timestamps, values, otherPosition); + } + + public static Block evaluateFinal(IntIrateGroupingState state, IntVector selected, GroupingAggregatorEvaluationContext evalContext) { + return state.evaluateFinal(selected, evalContext); + } + + private static class IntIrateState { + static final long BASE_RAM_USAGE = RamUsageEstimator.sizeOfObject(IntIrateState.class); + final long[] timestamps; // descending order + final int[] values; + + IntIrateState(int initialSize) { + this.timestamps = new long[initialSize]; + this.values = new int[initialSize]; + } + + IntIrateState(long[] ts, int[] vs) { + this.timestamps = ts; + this.values = vs; + } + + void append(long t, int v) { + assert timestamps.length == 2 : "expected two timestamps; got " + timestamps.length; + assert t < timestamps[1] : "@timestamp goes backward: " + t + " >= " + timestamps[1]; + // This method does not need to do anything because we only need the last two values + // and timestamps, which are already in place. + } + + int entries() { + return timestamps.length; + } + + static long bytesUsed(int entries) { + var ts = RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) Long.BYTES * entries); + var vs = RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) Integer.BYTES * entries); + return BASE_RAM_USAGE + ts + vs; + } + } + + public static final class IntIrateGroupingState implements Releasable, Accountable, GroupingAggregatorState { + private ObjectArray states; + private final BigArrays bigArrays; + private final CircuitBreaker breaker; + private long stateBytes; // for individual states + + IntIrateGroupingState(BigArrays bigArrays, CircuitBreaker breaker) { + this.bigArrays = bigArrays; + this.breaker = breaker; + this.states = bigArrays.newObjectArray(1); + } + + void ensureCapacity(int groupId) { + states = bigArrays.grow(states, groupId + 1); + } + + void adjustBreaker(long bytes) { + breaker.addEstimateBytesAndMaybeBreak(bytes, "<>"); + stateBytes += bytes; + assert stateBytes >= 0 : stateBytes; + } + + void append(int groupId, long timestamp, int value) { + ensureCapacity(groupId); + var state = states.get(groupId); + if (state == null) { + adjustBreaker(IntIrateState.bytesUsed(1)); + state = new IntIrateState(new long[] { timestamp }, new int[] { value }); + states.set(groupId, state); + } else { + if (state.entries() == 1) { + adjustBreaker(IntIrateState.bytesUsed(2)); + state = new IntIrateState(new long[] { state.timestamps[0], timestamp }, new int[] { state.values[0], value }); + states.set(groupId, state); + adjustBreaker(-IntIrateState.bytesUsed(1)); // old state + } + } + } + + void combine(int groupId, LongBlock timestamps, IntBlock values, int otherPosition) { + // TODO: Check this method pabloem + final int valueCount = timestamps.getValueCount(otherPosition); + if (valueCount == 0) { + return; + } + final int firstIndex = timestamps.getFirstValueIndex(otherPosition); + ensureCapacity(groupId); + var state = states.get(groupId); + if (state == null) { + adjustBreaker(IntIrateState.bytesUsed(valueCount)); + state = new IntIrateState(valueCount); + states.set(groupId, state); + // TODO: add bulk_copy to Block + for (int i = 0; i < valueCount; i++) { + state.timestamps[i] = timestamps.getLong(firstIndex + i); + state.values[i] = values.getInt(firstIndex + i); + } + } else { + adjustBreaker(IntIrateState.bytesUsed(state.entries() + valueCount)); + var newState = new IntIrateState(state.entries() + valueCount); + states.set(groupId, newState); + merge(state, newState, firstIndex, valueCount, timestamps, values); + adjustBreaker(-IntIrateState.bytesUsed(state.entries())); // old state + } + } + + void merge(IntIrateState curr, IntIrateState dst, int firstIndex, int rightCount, LongBlock timestamps, IntBlock values) { + int i = 0, j = 0, k = 0; + final int leftCount = curr.entries(); + // We do not merge more than two entries because we only need the last two. + // This merge thus ends when we have two entries in dst. + while (i < leftCount && j < rightCount && k < 2) { + final var t1 = curr.timestamps[i]; + final var t2 = timestamps.getLong(firstIndex + j); + if (t1 > t2) { + dst.timestamps[k] = t1; + dst.values[k] = curr.values[i]; + ++i; + } else { + dst.timestamps[k] = t2; + dst.values[k] = values.getInt(firstIndex + j); + ++j; + } + ++k; + } + } + + IntIrateState mergeState(IntIrateState s1, IntIrateState s2) { + adjustBreaker(IntIrateState.bytesUsed(2)); + var dst = new IntIrateState(2); + int i = 0, j = 0, k = 0; + while (i < s1.entries() && j < s2.entries() && k < 2) { + if (s1.timestamps[i] > s2.timestamps[j]) { + dst.timestamps[k] = s1.timestamps[i]; + dst.values[k] = s1.values[i]; + ++i; + } else { + dst.timestamps[k] = s2.timestamps[j]; + dst.values[k] = s2.values[j]; + ++j; + } + ++k; + } + return dst; + } + + @Override + public long ramBytesUsed() { + return states.ramBytesUsed() + stateBytes; + } + + @Override + public void close() { + Releasables.close(states, () -> adjustBreaker(-stateBytes)); + } + + @Override + public void toIntermediate(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { + assert blocks.length >= offset + 3 : "blocks=" + blocks.length + ",offset=" + offset; + final BlockFactory blockFactory = driverContext.blockFactory(); + final int positionCount = selected.getPositionCount(); + try ( + LongBlock.Builder timestamps = blockFactory.newLongBlockBuilder(positionCount * 2); + IntBlock.Builder values = blockFactory.newIntBlockBuilder(positionCount * 2); + ) { + for (int i = 0; i < positionCount; i++) { + final var groupId = selected.getInt(i); + final var state = groupId < states.size() ? states.get(groupId) : null; + if (state != null) { + timestamps.beginPositionEntry(); + for (long t : state.timestamps) { + timestamps.appendLong(t); + } + timestamps.endPositionEntry(); + + values.beginPositionEntry(); + for (int v : state.values) { + values.appendInt(v); + } + values.endPositionEntry(); + } else { + timestamps.appendNull(); + values.appendNull(); + } + } + blocks[offset] = timestamps.build(); + blocks[offset + 1] = values.build(); + } + } + + Block evaluateFinal(IntVector selected, GroupingAggregatorEvaluationContext evalContext) { + int positionCount = selected.getPositionCount(); + try (DoubleBlock.Builder rates = evalContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + final var groupId = selected.getInt(p); + final var state = groupId < states.size() ? states.get(groupId) : null; + if (state == null || state.values.length < 2) { + rates.appendNull(); + continue; + } + int len = state.entries(); + final double ydiff = state.values[0] > state.values[1] + ? state.values[0] - state.values[1] + : state.values[1] - state.values[0]; + final long xdiff = state.timestamps[0] - state.timestamps[1]; + rates.appendDouble(ydiff / xdiff * 1000); + } + return rates.build(); + } + } + + @Override + public void enableGroupIdTracking(SeenGroupIds seenGroupIds) { + // noop - we handle the null states inside `toIntermediate` and `evaluateFinal` + } + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateLongAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateLongAggregator.java new file mode 100644 index 0000000000000..cde1e96bc8b57 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateLongAggregator.java @@ -0,0 +1,271 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.aggregation; + +// begin generated imports +import org.apache.lucene.util.Accountable; +import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.ObjectArray; +import org.elasticsearch.compute.ann.GroupingAggregator; +import org.elasticsearch.compute.ann.IntermediateState; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.DoubleVector; +import org.elasticsearch.compute.data.FloatBlock; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; +// end generated imports + +/** + * A rate grouping aggregation definition for long. + * This class is generated. Edit `X-RateAggregator.java.st` instead. + */ +@GroupingAggregator( + value = { @IntermediateState(name = "timestamps", type = "LONG_BLOCK"), @IntermediateState(name = "values", type = "LONG_BLOCK") } +) +public class IrateLongAggregator { + + public static LongIrateGroupingState initGrouping(DriverContext driverContext) { + return new LongIrateGroupingState(driverContext.bigArrays(), driverContext.breaker()); + } + + public static void combine(LongIrateGroupingState current, int groupId, long value, long timestamp) { + current.append(groupId, timestamp, value); + } + + public static void combineIntermediate( + LongIrateGroupingState current, + int groupId, + LongBlock timestamps, + LongBlock values, + int otherPosition + ) { + current.combine(groupId, timestamps, values, otherPosition); + } + + public static Block evaluateFinal(LongIrateGroupingState state, IntVector selected, GroupingAggregatorEvaluationContext evalContext) { + return state.evaluateFinal(selected, evalContext); + } + + private static class LongIrateState { + static final long BASE_RAM_USAGE = RamUsageEstimator.sizeOfObject(LongIrateState.class); + final long[] timestamps; // descending order + final long[] values; + + LongIrateState(int initialSize) { + this.timestamps = new long[initialSize]; + this.values = new long[initialSize]; + } + + LongIrateState(long[] ts, long[] vs) { + this.timestamps = ts; + this.values = vs; + } + + void append(long t, long v) { + assert timestamps.length == 2 : "expected two timestamps; got " + timestamps.length; + assert t < timestamps[1] : "@timestamp goes backward: " + t + " >= " + timestamps[1]; + // This method does not need to do anything because we only need the last two values + // and timestamps, which are already in place. + } + + int entries() { + return timestamps.length; + } + + static long bytesUsed(int entries) { + var ts = RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) Long.BYTES * entries); + var vs = RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) Long.BYTES * entries); + return BASE_RAM_USAGE + ts + vs; + } + } + + public static final class LongIrateGroupingState implements Releasable, Accountable, GroupingAggregatorState { + private ObjectArray states; + private final BigArrays bigArrays; + private final CircuitBreaker breaker; + private long stateBytes; // for individual states + + LongIrateGroupingState(BigArrays bigArrays, CircuitBreaker breaker) { + this.bigArrays = bigArrays; + this.breaker = breaker; + this.states = bigArrays.newObjectArray(1); + } + + void ensureCapacity(int groupId) { + states = bigArrays.grow(states, groupId + 1); + } + + void adjustBreaker(long bytes) { + breaker.addEstimateBytesAndMaybeBreak(bytes, "<>"); + stateBytes += bytes; + assert stateBytes >= 0 : stateBytes; + } + + void append(int groupId, long timestamp, long value) { + ensureCapacity(groupId); + var state = states.get(groupId); + if (state == null) { + adjustBreaker(LongIrateState.bytesUsed(1)); + state = new LongIrateState(new long[] { timestamp }, new long[] { value }); + states.set(groupId, state); + } else { + if (state.entries() == 1) { + adjustBreaker(LongIrateState.bytesUsed(2)); + state = new LongIrateState(new long[] { state.timestamps[0], timestamp }, new long[] { state.values[0], value }); + states.set(groupId, state); + adjustBreaker(-LongIrateState.bytesUsed(1)); // old state + } + } + } + + void combine(int groupId, LongBlock timestamps, LongBlock values, int otherPosition) { + // TODO: Check this method pabloem + final int valueCount = timestamps.getValueCount(otherPosition); + if (valueCount == 0) { + return; + } + final int firstIndex = timestamps.getFirstValueIndex(otherPosition); + ensureCapacity(groupId); + var state = states.get(groupId); + if (state == null) { + adjustBreaker(LongIrateState.bytesUsed(valueCount)); + state = new LongIrateState(valueCount); + states.set(groupId, state); + // TODO: add bulk_copy to Block + for (int i = 0; i < valueCount; i++) { + state.timestamps[i] = timestamps.getLong(firstIndex + i); + state.values[i] = values.getLong(firstIndex + i); + } + } else { + adjustBreaker(LongIrateState.bytesUsed(state.entries() + valueCount)); + var newState = new LongIrateState(state.entries() + valueCount); + states.set(groupId, newState); + merge(state, newState, firstIndex, valueCount, timestamps, values); + adjustBreaker(-LongIrateState.bytesUsed(state.entries())); // old state + } + } + + void merge(LongIrateState curr, LongIrateState dst, int firstIndex, int rightCount, LongBlock timestamps, LongBlock values) { + int i = 0, j = 0, k = 0; + final int leftCount = curr.entries(); + // We do not merge more than two entries because we only need the last two. + // This merge thus ends when we have two entries in dst. + while (i < leftCount && j < rightCount && k < 2) { + final var t1 = curr.timestamps[i]; + final var t2 = timestamps.getLong(firstIndex + j); + if (t1 > t2) { + dst.timestamps[k] = t1; + dst.values[k] = curr.values[i]; + ++i; + } else { + dst.timestamps[k] = t2; + dst.values[k] = values.getLong(firstIndex + j); + ++j; + } + ++k; + } + } + + LongIrateState mergeState(LongIrateState s1, LongIrateState s2) { + adjustBreaker(LongIrateState.bytesUsed(2)); + var dst = new LongIrateState(2); + int i = 0, j = 0, k = 0; + while (i < s1.entries() && j < s2.entries() && k < 2) { + if (s1.timestamps[i] > s2.timestamps[j]) { + dst.timestamps[k] = s1.timestamps[i]; + dst.values[k] = s1.values[i]; + ++i; + } else { + dst.timestamps[k] = s2.timestamps[j]; + dst.values[k] = s2.values[j]; + ++j; + } + ++k; + } + return dst; + } + + @Override + public long ramBytesUsed() { + return states.ramBytesUsed() + stateBytes; + } + + @Override + public void close() { + Releasables.close(states, () -> adjustBreaker(-stateBytes)); + } + + @Override + public void toIntermediate(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { + assert blocks.length >= offset + 3 : "blocks=" + blocks.length + ",offset=" + offset; + final BlockFactory blockFactory = driverContext.blockFactory(); + final int positionCount = selected.getPositionCount(); + try ( + LongBlock.Builder timestamps = blockFactory.newLongBlockBuilder(positionCount * 2); + LongBlock.Builder values = blockFactory.newLongBlockBuilder(positionCount * 2); + ) { + for (int i = 0; i < positionCount; i++) { + final var groupId = selected.getInt(i); + final var state = groupId < states.size() ? states.get(groupId) : null; + if (state != null) { + timestamps.beginPositionEntry(); + for (long t : state.timestamps) { + timestamps.appendLong(t); + } + timestamps.endPositionEntry(); + + values.beginPositionEntry(); + for (long v : state.values) { + values.appendLong(v); + } + values.endPositionEntry(); + } else { + timestamps.appendNull(); + values.appendNull(); + } + } + blocks[offset] = timestamps.build(); + blocks[offset + 1] = values.build(); + } + } + + Block evaluateFinal(IntVector selected, GroupingAggregatorEvaluationContext evalContext) { + int positionCount = selected.getPositionCount(); + try (DoubleBlock.Builder rates = evalContext.blockFactory().newDoubleBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + final var groupId = selected.getInt(p); + final var state = groupId < states.size() ? states.get(groupId) : null; + if (state == null || state.values.length < 2) { + rates.appendNull(); + continue; + } + int len = state.entries(); + final double ydiff = state.values[0] > state.values[1] + ? state.values[0] - state.values[1] + : state.values[1] - state.values[0]; + final long xdiff = state.timestamps[0] - state.timestamps[1]; + rates.appendDouble(ydiff / xdiff * 1000); + } + return rates.build(); + } + } + + @Override + public void enableGroupIdTracking(SeenGroupIds seenGroupIds) { + // noop - we handle the null states inside `toIntermediate` and `evaluateFinal` + } + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/IrateDoubleAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/IrateDoubleAggregatorFunctionSupplier.java new file mode 100644 index 0000000000000..c2a0c69464696 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/IrateDoubleAggregatorFunctionSupplier.java @@ -0,0 +1,46 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.util.List; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunctionSupplier} implementation for {@link IrateDoubleAggregator}. + * This class is generated. Edit {@code AggregatorFunctionSupplierImplementer} instead. + */ +public final class IrateDoubleAggregatorFunctionSupplier implements AggregatorFunctionSupplier { + public IrateDoubleAggregatorFunctionSupplier() { + } + + @Override + public List nonGroupingIntermediateStateDesc() { + throw new UnsupportedOperationException("non-grouping aggregator is not supported"); + } + + @Override + public List groupingIntermediateStateDesc() { + return IrateDoubleGroupingAggregatorFunction.intermediateStateDesc(); + } + + @Override + public AggregatorFunction aggregator(DriverContext driverContext, List channels) { + throw new UnsupportedOperationException("non-grouping aggregator is not supported"); + } + + @Override + public IrateDoubleGroupingAggregatorFunction groupingAggregator(DriverContext driverContext, + List channels) { + return IrateDoubleGroupingAggregatorFunction.create(channels, driverContext); + } + + @Override + public String describe() { + return "irate of doubles"; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/IrateDoubleGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/IrateDoubleGroupingAggregatorFunction.java new file mode 100644 index 0000000000000..6075a98f24f5a --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/IrateDoubleGroupingAggregatorFunction.java @@ -0,0 +1,390 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.DoubleVector; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntArrayBlock; +import org.elasticsearch.compute.data.IntBigArrayBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link GroupingAggregatorFunction} implementation for {@link IrateDoubleAggregator}. + * This class is generated. Edit {@code GroupingAggregatorImplementer} instead. + */ +public final class IrateDoubleGroupingAggregatorFunction implements GroupingAggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("timestamps", ElementType.LONG), + new IntermediateStateDesc("values", ElementType.DOUBLE) ); + + private final IrateDoubleAggregator.DoubleIrateGroupingState state; + + private final List channels; + + private final DriverContext driverContext; + + public IrateDoubleGroupingAggregatorFunction(List channels, + IrateDoubleAggregator.DoubleIrateGroupingState state, DriverContext driverContext) { + this.channels = channels; + this.state = state; + this.driverContext = driverContext; + } + + public static IrateDoubleGroupingAggregatorFunction create(List channels, + DriverContext driverContext) { + return new IrateDoubleGroupingAggregatorFunction(channels, IrateDoubleAggregator.initGrouping(driverContext), driverContext); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public GroupingAggregatorFunction.AddInput prepareProcessRawInputPage(SeenGroupIds seenGroupIds, + Page page) { + DoubleBlock valueBlock = page.getBlock(channels.get(0)); + LongBlock timestampBlock = page.getBlock(channels.get(1)); + DoubleVector valueVector = valueBlock.asVector(); + if (valueVector == null) { + maybeEnableGroupIdTracking(seenGroupIds, valueBlock, timestampBlock); + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void add(int positionOffset, IntBigArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void close() { + } + }; + } + LongVector timestampVector = timestampBlock.asVector(); + if (timestampVector == null) { + maybeEnableGroupIdTracking(seenGroupIds, valueBlock, timestampBlock); + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void add(int positionOffset, IntBigArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void close() { + } + }; + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueVector, timestampVector); + } + + @Override + public void add(int positionOffset, IntBigArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueVector, timestampVector); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valueVector, timestampVector); + } + + @Override + public void close() { + } + }; + } + + private void addRawInput(int positionOffset, IntArrayBlock groups, DoubleBlock valueBlock, + LongBlock timestampBlock) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int valuesPosition = groupPosition + positionOffset; + if (valueBlock.isNull(valuesPosition)) { + continue; + } + if (timestampBlock.isNull(valuesPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valueStart = valueBlock.getFirstValueIndex(valuesPosition); + int valueEnd = valueStart + valueBlock.getValueCount(valuesPosition); + for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { + double valueValue = valueBlock.getDouble(valueOffset); + int timestampStart = timestampBlock.getFirstValueIndex(valuesPosition); + int timestampEnd = timestampStart + timestampBlock.getValueCount(valuesPosition); + for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { + long timestampValue = timestampBlock.getLong(timestampOffset); + IrateDoubleAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + } + } + + private void addRawInput(int positionOffset, IntArrayBlock groups, DoubleVector valueVector, + LongVector timestampVector) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int valuesPosition = groupPosition + positionOffset; + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + double valueValue = valueVector.getDouble(valuesPosition); + long timestampValue = timestampVector.getLong(valuesPosition); + IrateDoubleAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntArrayBlock groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block timestampsUncast = page.getBlock(channels.get(0)); + if (timestampsUncast.areAllValuesNull()) { + return; + } + LongBlock timestamps = (LongBlock) timestampsUncast; + Block valuesUncast = page.getBlock(channels.get(1)); + if (valuesUncast.areAllValuesNull()) { + return; + } + DoubleBlock values = (DoubleBlock) valuesUncast; + assert timestamps.getPositionCount() == values.getPositionCount(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valuesPosition = groupPosition + positionOffset; + IrateDoubleAggregator.combineIntermediate(state, groupId, timestamps, values, valuesPosition); + } + } + } + + private void addRawInput(int positionOffset, IntBigArrayBlock groups, DoubleBlock valueBlock, + LongBlock timestampBlock) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int valuesPosition = groupPosition + positionOffset; + if (valueBlock.isNull(valuesPosition)) { + continue; + } + if (timestampBlock.isNull(valuesPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valueStart = valueBlock.getFirstValueIndex(valuesPosition); + int valueEnd = valueStart + valueBlock.getValueCount(valuesPosition); + for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { + double valueValue = valueBlock.getDouble(valueOffset); + int timestampStart = timestampBlock.getFirstValueIndex(valuesPosition); + int timestampEnd = timestampStart + timestampBlock.getValueCount(valuesPosition); + for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { + long timestampValue = timestampBlock.getLong(timestampOffset); + IrateDoubleAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + } + } + + private void addRawInput(int positionOffset, IntBigArrayBlock groups, DoubleVector valueVector, + LongVector timestampVector) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int valuesPosition = groupPosition + positionOffset; + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + double valueValue = valueVector.getDouble(valuesPosition); + long timestampValue = timestampVector.getLong(valuesPosition); + IrateDoubleAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntBigArrayBlock groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block timestampsUncast = page.getBlock(channels.get(0)); + if (timestampsUncast.areAllValuesNull()) { + return; + } + LongBlock timestamps = (LongBlock) timestampsUncast; + Block valuesUncast = page.getBlock(channels.get(1)); + if (valuesUncast.areAllValuesNull()) { + return; + } + DoubleBlock values = (DoubleBlock) valuesUncast; + assert timestamps.getPositionCount() == values.getPositionCount(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valuesPosition = groupPosition + positionOffset; + IrateDoubleAggregator.combineIntermediate(state, groupId, timestamps, values, valuesPosition); + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, DoubleBlock valueBlock, + LongBlock timestampBlock) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int valuesPosition = groupPosition + positionOffset; + if (valueBlock.isNull(valuesPosition)) { + continue; + } + if (timestampBlock.isNull(valuesPosition)) { + continue; + } + int groupId = groups.getInt(groupPosition); + int valueStart = valueBlock.getFirstValueIndex(valuesPosition); + int valueEnd = valueStart + valueBlock.getValueCount(valuesPosition); + for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { + double valueValue = valueBlock.getDouble(valueOffset); + int timestampStart = timestampBlock.getFirstValueIndex(valuesPosition); + int timestampEnd = timestampStart + timestampBlock.getValueCount(valuesPosition); + for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { + long timestampValue = timestampBlock.getLong(timestampOffset); + IrateDoubleAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, DoubleVector valueVector, + LongVector timestampVector) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int valuesPosition = groupPosition + positionOffset; + int groupId = groups.getInt(groupPosition); + double valueValue = valueVector.getDouble(valuesPosition); + long timestampValue = timestampVector.getLong(valuesPosition); + IrateDoubleAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block timestampsUncast = page.getBlock(channels.get(0)); + if (timestampsUncast.areAllValuesNull()) { + return; + } + LongBlock timestamps = (LongBlock) timestampsUncast; + Block valuesUncast = page.getBlock(channels.get(1)); + if (valuesUncast.areAllValuesNull()) { + return; + } + DoubleBlock values = (DoubleBlock) valuesUncast; + assert timestamps.getPositionCount() == values.getPositionCount(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = groups.getInt(groupPosition); + int valuesPosition = groupPosition + positionOffset; + IrateDoubleAggregator.combineIntermediate(state, groupId, timestamps, values, valuesPosition); + } + } + + private void maybeEnableGroupIdTracking(SeenGroupIds seenGroupIds, DoubleBlock valueBlock, + LongBlock timestampBlock) { + if (valueBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + if (timestampBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + } + + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { + state.toIntermediate(blocks, offset, selected, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, IntVector selected, + GroupingAggregatorEvaluationContext ctx) { + blocks[offset] = IrateDoubleAggregator.evaluateFinal(state, selected, ctx); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/IrateFloatAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/IrateFloatAggregatorFunctionSupplier.java new file mode 100644 index 0000000000000..5c656ad734e20 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/IrateFloatAggregatorFunctionSupplier.java @@ -0,0 +1,46 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.util.List; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunctionSupplier} implementation for {@link IrateFloatAggregator}. + * This class is generated. Edit {@code AggregatorFunctionSupplierImplementer} instead. + */ +public final class IrateFloatAggregatorFunctionSupplier implements AggregatorFunctionSupplier { + public IrateFloatAggregatorFunctionSupplier() { + } + + @Override + public List nonGroupingIntermediateStateDesc() { + throw new UnsupportedOperationException("non-grouping aggregator is not supported"); + } + + @Override + public List groupingIntermediateStateDesc() { + return IrateFloatGroupingAggregatorFunction.intermediateStateDesc(); + } + + @Override + public AggregatorFunction aggregator(DriverContext driverContext, List channels) { + throw new UnsupportedOperationException("non-grouping aggregator is not supported"); + } + + @Override + public IrateFloatGroupingAggregatorFunction groupingAggregator(DriverContext driverContext, + List channels) { + return IrateFloatGroupingAggregatorFunction.create(channels, driverContext); + } + + @Override + public String describe() { + return "irate of floats"; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/IrateFloatGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/IrateFloatGroupingAggregatorFunction.java new file mode 100644 index 0000000000000..d4e98548cca5a --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/IrateFloatGroupingAggregatorFunction.java @@ -0,0 +1,390 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.FloatBlock; +import org.elasticsearch.compute.data.FloatVector; +import org.elasticsearch.compute.data.IntArrayBlock; +import org.elasticsearch.compute.data.IntBigArrayBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link GroupingAggregatorFunction} implementation for {@link IrateFloatAggregator}. + * This class is generated. Edit {@code GroupingAggregatorImplementer} instead. + */ +public final class IrateFloatGroupingAggregatorFunction implements GroupingAggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("timestamps", ElementType.LONG), + new IntermediateStateDesc("values", ElementType.FLOAT) ); + + private final IrateFloatAggregator.FloatIrateGroupingState state; + + private final List channels; + + private final DriverContext driverContext; + + public IrateFloatGroupingAggregatorFunction(List channels, + IrateFloatAggregator.FloatIrateGroupingState state, DriverContext driverContext) { + this.channels = channels; + this.state = state; + this.driverContext = driverContext; + } + + public static IrateFloatGroupingAggregatorFunction create(List channels, + DriverContext driverContext) { + return new IrateFloatGroupingAggregatorFunction(channels, IrateFloatAggregator.initGrouping(driverContext), driverContext); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public GroupingAggregatorFunction.AddInput prepareProcessRawInputPage(SeenGroupIds seenGroupIds, + Page page) { + FloatBlock valueBlock = page.getBlock(channels.get(0)); + LongBlock timestampBlock = page.getBlock(channels.get(1)); + FloatVector valueVector = valueBlock.asVector(); + if (valueVector == null) { + maybeEnableGroupIdTracking(seenGroupIds, valueBlock, timestampBlock); + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void add(int positionOffset, IntBigArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void close() { + } + }; + } + LongVector timestampVector = timestampBlock.asVector(); + if (timestampVector == null) { + maybeEnableGroupIdTracking(seenGroupIds, valueBlock, timestampBlock); + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void add(int positionOffset, IntBigArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void close() { + } + }; + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueVector, timestampVector); + } + + @Override + public void add(int positionOffset, IntBigArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueVector, timestampVector); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valueVector, timestampVector); + } + + @Override + public void close() { + } + }; + } + + private void addRawInput(int positionOffset, IntArrayBlock groups, FloatBlock valueBlock, + LongBlock timestampBlock) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int valuesPosition = groupPosition + positionOffset; + if (valueBlock.isNull(valuesPosition)) { + continue; + } + if (timestampBlock.isNull(valuesPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valueStart = valueBlock.getFirstValueIndex(valuesPosition); + int valueEnd = valueStart + valueBlock.getValueCount(valuesPosition); + for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { + float valueValue = valueBlock.getFloat(valueOffset); + int timestampStart = timestampBlock.getFirstValueIndex(valuesPosition); + int timestampEnd = timestampStart + timestampBlock.getValueCount(valuesPosition); + for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { + long timestampValue = timestampBlock.getLong(timestampOffset); + IrateFloatAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + } + } + + private void addRawInput(int positionOffset, IntArrayBlock groups, FloatVector valueVector, + LongVector timestampVector) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int valuesPosition = groupPosition + positionOffset; + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + float valueValue = valueVector.getFloat(valuesPosition); + long timestampValue = timestampVector.getLong(valuesPosition); + IrateFloatAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntArrayBlock groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block timestampsUncast = page.getBlock(channels.get(0)); + if (timestampsUncast.areAllValuesNull()) { + return; + } + LongBlock timestamps = (LongBlock) timestampsUncast; + Block valuesUncast = page.getBlock(channels.get(1)); + if (valuesUncast.areAllValuesNull()) { + return; + } + FloatBlock values = (FloatBlock) valuesUncast; + assert timestamps.getPositionCount() == values.getPositionCount(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valuesPosition = groupPosition + positionOffset; + IrateFloatAggregator.combineIntermediate(state, groupId, timestamps, values, valuesPosition); + } + } + } + + private void addRawInput(int positionOffset, IntBigArrayBlock groups, FloatBlock valueBlock, + LongBlock timestampBlock) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int valuesPosition = groupPosition + positionOffset; + if (valueBlock.isNull(valuesPosition)) { + continue; + } + if (timestampBlock.isNull(valuesPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valueStart = valueBlock.getFirstValueIndex(valuesPosition); + int valueEnd = valueStart + valueBlock.getValueCount(valuesPosition); + for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { + float valueValue = valueBlock.getFloat(valueOffset); + int timestampStart = timestampBlock.getFirstValueIndex(valuesPosition); + int timestampEnd = timestampStart + timestampBlock.getValueCount(valuesPosition); + for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { + long timestampValue = timestampBlock.getLong(timestampOffset); + IrateFloatAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + } + } + + private void addRawInput(int positionOffset, IntBigArrayBlock groups, FloatVector valueVector, + LongVector timestampVector) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int valuesPosition = groupPosition + positionOffset; + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + float valueValue = valueVector.getFloat(valuesPosition); + long timestampValue = timestampVector.getLong(valuesPosition); + IrateFloatAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntBigArrayBlock groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block timestampsUncast = page.getBlock(channels.get(0)); + if (timestampsUncast.areAllValuesNull()) { + return; + } + LongBlock timestamps = (LongBlock) timestampsUncast; + Block valuesUncast = page.getBlock(channels.get(1)); + if (valuesUncast.areAllValuesNull()) { + return; + } + FloatBlock values = (FloatBlock) valuesUncast; + assert timestamps.getPositionCount() == values.getPositionCount(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valuesPosition = groupPosition + positionOffset; + IrateFloatAggregator.combineIntermediate(state, groupId, timestamps, values, valuesPosition); + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, FloatBlock valueBlock, + LongBlock timestampBlock) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int valuesPosition = groupPosition + positionOffset; + if (valueBlock.isNull(valuesPosition)) { + continue; + } + if (timestampBlock.isNull(valuesPosition)) { + continue; + } + int groupId = groups.getInt(groupPosition); + int valueStart = valueBlock.getFirstValueIndex(valuesPosition); + int valueEnd = valueStart + valueBlock.getValueCount(valuesPosition); + for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { + float valueValue = valueBlock.getFloat(valueOffset); + int timestampStart = timestampBlock.getFirstValueIndex(valuesPosition); + int timestampEnd = timestampStart + timestampBlock.getValueCount(valuesPosition); + for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { + long timestampValue = timestampBlock.getLong(timestampOffset); + IrateFloatAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, FloatVector valueVector, + LongVector timestampVector) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int valuesPosition = groupPosition + positionOffset; + int groupId = groups.getInt(groupPosition); + float valueValue = valueVector.getFloat(valuesPosition); + long timestampValue = timestampVector.getLong(valuesPosition); + IrateFloatAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block timestampsUncast = page.getBlock(channels.get(0)); + if (timestampsUncast.areAllValuesNull()) { + return; + } + LongBlock timestamps = (LongBlock) timestampsUncast; + Block valuesUncast = page.getBlock(channels.get(1)); + if (valuesUncast.areAllValuesNull()) { + return; + } + FloatBlock values = (FloatBlock) valuesUncast; + assert timestamps.getPositionCount() == values.getPositionCount(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = groups.getInt(groupPosition); + int valuesPosition = groupPosition + positionOffset; + IrateFloatAggregator.combineIntermediate(state, groupId, timestamps, values, valuesPosition); + } + } + + private void maybeEnableGroupIdTracking(SeenGroupIds seenGroupIds, FloatBlock valueBlock, + LongBlock timestampBlock) { + if (valueBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + if (timestampBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + } + + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { + state.toIntermediate(blocks, offset, selected, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, IntVector selected, + GroupingAggregatorEvaluationContext ctx) { + blocks[offset] = IrateFloatAggregator.evaluateFinal(state, selected, ctx); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/IrateIntAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/IrateIntAggregatorFunctionSupplier.java new file mode 100644 index 0000000000000..2392b6cfdaf0c --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/IrateIntAggregatorFunctionSupplier.java @@ -0,0 +1,46 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.util.List; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunctionSupplier} implementation for {@link IrateIntAggregator}. + * This class is generated. Edit {@code AggregatorFunctionSupplierImplementer} instead. + */ +public final class IrateIntAggregatorFunctionSupplier implements AggregatorFunctionSupplier { + public IrateIntAggregatorFunctionSupplier() { + } + + @Override + public List nonGroupingIntermediateStateDesc() { + throw new UnsupportedOperationException("non-grouping aggregator is not supported"); + } + + @Override + public List groupingIntermediateStateDesc() { + return IrateIntGroupingAggregatorFunction.intermediateStateDesc(); + } + + @Override + public AggregatorFunction aggregator(DriverContext driverContext, List channels) { + throw new UnsupportedOperationException("non-grouping aggregator is not supported"); + } + + @Override + public IrateIntGroupingAggregatorFunction groupingAggregator(DriverContext driverContext, + List channels) { + return IrateIntGroupingAggregatorFunction.create(channels, driverContext); + } + + @Override + public String describe() { + return "irate of ints"; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/IrateIntGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/IrateIntGroupingAggregatorFunction.java new file mode 100644 index 0000000000000..5802fa22225e3 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/IrateIntGroupingAggregatorFunction.java @@ -0,0 +1,389 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntArrayBlock; +import org.elasticsearch.compute.data.IntBigArrayBlock; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link GroupingAggregatorFunction} implementation for {@link IrateIntAggregator}. + * This class is generated. Edit {@code GroupingAggregatorImplementer} instead. + */ +public final class IrateIntGroupingAggregatorFunction implements GroupingAggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("timestamps", ElementType.LONG), + new IntermediateStateDesc("values", ElementType.INT) ); + + private final IrateIntAggregator.IntIrateGroupingState state; + + private final List channels; + + private final DriverContext driverContext; + + public IrateIntGroupingAggregatorFunction(List channels, + IrateIntAggregator.IntIrateGroupingState state, DriverContext driverContext) { + this.channels = channels; + this.state = state; + this.driverContext = driverContext; + } + + public static IrateIntGroupingAggregatorFunction create(List channels, + DriverContext driverContext) { + return new IrateIntGroupingAggregatorFunction(channels, IrateIntAggregator.initGrouping(driverContext), driverContext); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public GroupingAggregatorFunction.AddInput prepareProcessRawInputPage(SeenGroupIds seenGroupIds, + Page page) { + IntBlock valueBlock = page.getBlock(channels.get(0)); + LongBlock timestampBlock = page.getBlock(channels.get(1)); + IntVector valueVector = valueBlock.asVector(); + if (valueVector == null) { + maybeEnableGroupIdTracking(seenGroupIds, valueBlock, timestampBlock); + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void add(int positionOffset, IntBigArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void close() { + } + }; + } + LongVector timestampVector = timestampBlock.asVector(); + if (timestampVector == null) { + maybeEnableGroupIdTracking(seenGroupIds, valueBlock, timestampBlock); + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void add(int positionOffset, IntBigArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void close() { + } + }; + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueVector, timestampVector); + } + + @Override + public void add(int positionOffset, IntBigArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueVector, timestampVector); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valueVector, timestampVector); + } + + @Override + public void close() { + } + }; + } + + private void addRawInput(int positionOffset, IntArrayBlock groups, IntBlock valueBlock, + LongBlock timestampBlock) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int valuesPosition = groupPosition + positionOffset; + if (valueBlock.isNull(valuesPosition)) { + continue; + } + if (timestampBlock.isNull(valuesPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valueStart = valueBlock.getFirstValueIndex(valuesPosition); + int valueEnd = valueStart + valueBlock.getValueCount(valuesPosition); + for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { + int valueValue = valueBlock.getInt(valueOffset); + int timestampStart = timestampBlock.getFirstValueIndex(valuesPosition); + int timestampEnd = timestampStart + timestampBlock.getValueCount(valuesPosition); + for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { + long timestampValue = timestampBlock.getLong(timestampOffset); + IrateIntAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + } + } + + private void addRawInput(int positionOffset, IntArrayBlock groups, IntVector valueVector, + LongVector timestampVector) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int valuesPosition = groupPosition + positionOffset; + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valueValue = valueVector.getInt(valuesPosition); + long timestampValue = timestampVector.getLong(valuesPosition); + IrateIntAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntArrayBlock groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block timestampsUncast = page.getBlock(channels.get(0)); + if (timestampsUncast.areAllValuesNull()) { + return; + } + LongBlock timestamps = (LongBlock) timestampsUncast; + Block valuesUncast = page.getBlock(channels.get(1)); + if (valuesUncast.areAllValuesNull()) { + return; + } + IntBlock values = (IntBlock) valuesUncast; + assert timestamps.getPositionCount() == values.getPositionCount(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valuesPosition = groupPosition + positionOffset; + IrateIntAggregator.combineIntermediate(state, groupId, timestamps, values, valuesPosition); + } + } + } + + private void addRawInput(int positionOffset, IntBigArrayBlock groups, IntBlock valueBlock, + LongBlock timestampBlock) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int valuesPosition = groupPosition + positionOffset; + if (valueBlock.isNull(valuesPosition)) { + continue; + } + if (timestampBlock.isNull(valuesPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valueStart = valueBlock.getFirstValueIndex(valuesPosition); + int valueEnd = valueStart + valueBlock.getValueCount(valuesPosition); + for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { + int valueValue = valueBlock.getInt(valueOffset); + int timestampStart = timestampBlock.getFirstValueIndex(valuesPosition); + int timestampEnd = timestampStart + timestampBlock.getValueCount(valuesPosition); + for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { + long timestampValue = timestampBlock.getLong(timestampOffset); + IrateIntAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + } + } + + private void addRawInput(int positionOffset, IntBigArrayBlock groups, IntVector valueVector, + LongVector timestampVector) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int valuesPosition = groupPosition + positionOffset; + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valueValue = valueVector.getInt(valuesPosition); + long timestampValue = timestampVector.getLong(valuesPosition); + IrateIntAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntBigArrayBlock groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block timestampsUncast = page.getBlock(channels.get(0)); + if (timestampsUncast.areAllValuesNull()) { + return; + } + LongBlock timestamps = (LongBlock) timestampsUncast; + Block valuesUncast = page.getBlock(channels.get(1)); + if (valuesUncast.areAllValuesNull()) { + return; + } + IntBlock values = (IntBlock) valuesUncast; + assert timestamps.getPositionCount() == values.getPositionCount(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valuesPosition = groupPosition + positionOffset; + IrateIntAggregator.combineIntermediate(state, groupId, timestamps, values, valuesPosition); + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, IntBlock valueBlock, + LongBlock timestampBlock) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int valuesPosition = groupPosition + positionOffset; + if (valueBlock.isNull(valuesPosition)) { + continue; + } + if (timestampBlock.isNull(valuesPosition)) { + continue; + } + int groupId = groups.getInt(groupPosition); + int valueStart = valueBlock.getFirstValueIndex(valuesPosition); + int valueEnd = valueStart + valueBlock.getValueCount(valuesPosition); + for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { + int valueValue = valueBlock.getInt(valueOffset); + int timestampStart = timestampBlock.getFirstValueIndex(valuesPosition); + int timestampEnd = timestampStart + timestampBlock.getValueCount(valuesPosition); + for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { + long timestampValue = timestampBlock.getLong(timestampOffset); + IrateIntAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, IntVector valueVector, + LongVector timestampVector) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int valuesPosition = groupPosition + positionOffset; + int groupId = groups.getInt(groupPosition); + int valueValue = valueVector.getInt(valuesPosition); + long timestampValue = timestampVector.getLong(valuesPosition); + IrateIntAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block timestampsUncast = page.getBlock(channels.get(0)); + if (timestampsUncast.areAllValuesNull()) { + return; + } + LongBlock timestamps = (LongBlock) timestampsUncast; + Block valuesUncast = page.getBlock(channels.get(1)); + if (valuesUncast.areAllValuesNull()) { + return; + } + IntBlock values = (IntBlock) valuesUncast; + assert timestamps.getPositionCount() == values.getPositionCount(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = groups.getInt(groupPosition); + int valuesPosition = groupPosition + positionOffset; + IrateIntAggregator.combineIntermediate(state, groupId, timestamps, values, valuesPosition); + } + } + + private void maybeEnableGroupIdTracking(SeenGroupIds seenGroupIds, IntBlock valueBlock, + LongBlock timestampBlock) { + if (valueBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + if (timestampBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + } + + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { + state.toIntermediate(blocks, offset, selected, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, IntVector selected, + GroupingAggregatorEvaluationContext ctx) { + blocks[offset] = IrateIntAggregator.evaluateFinal(state, selected, ctx); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/IrateLongAggregatorFunctionSupplier.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/IrateLongAggregatorFunctionSupplier.java new file mode 100644 index 0000000000000..92cc7bdc7b2fc --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/IrateLongAggregatorFunctionSupplier.java @@ -0,0 +1,46 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.util.List; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link AggregatorFunctionSupplier} implementation for {@link IrateLongAggregator}. + * This class is generated. Edit {@code AggregatorFunctionSupplierImplementer} instead. + */ +public final class IrateLongAggregatorFunctionSupplier implements AggregatorFunctionSupplier { + public IrateLongAggregatorFunctionSupplier() { + } + + @Override + public List nonGroupingIntermediateStateDesc() { + throw new UnsupportedOperationException("non-grouping aggregator is not supported"); + } + + @Override + public List groupingIntermediateStateDesc() { + return IrateLongGroupingAggregatorFunction.intermediateStateDesc(); + } + + @Override + public AggregatorFunction aggregator(DriverContext driverContext, List channels) { + throw new UnsupportedOperationException("non-grouping aggregator is not supported"); + } + + @Override + public IrateLongGroupingAggregatorFunction groupingAggregator(DriverContext driverContext, + List channels) { + return IrateLongGroupingAggregatorFunction.create(channels, driverContext); + } + + @Override + public String describe() { + return "irate of longs"; + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/IrateLongGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/IrateLongGroupingAggregatorFunction.java new file mode 100644 index 0000000000000..bfac0fc0771e7 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/IrateLongGroupingAggregatorFunction.java @@ -0,0 +1,388 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.compute.aggregation; + +import java.lang.Integer; +import java.lang.Override; +import java.lang.String; +import java.lang.StringBuilder; +import java.util.List; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.IntArrayBlock; +import org.elasticsearch.compute.data.IntBigArrayBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; + +/** + * {@link GroupingAggregatorFunction} implementation for {@link IrateLongAggregator}. + * This class is generated. Edit {@code GroupingAggregatorImplementer} instead. + */ +public final class IrateLongGroupingAggregatorFunction implements GroupingAggregatorFunction { + private static final List INTERMEDIATE_STATE_DESC = List.of( + new IntermediateStateDesc("timestamps", ElementType.LONG), + new IntermediateStateDesc("values", ElementType.LONG) ); + + private final IrateLongAggregator.LongIrateGroupingState state; + + private final List channels; + + private final DriverContext driverContext; + + public IrateLongGroupingAggregatorFunction(List channels, + IrateLongAggregator.LongIrateGroupingState state, DriverContext driverContext) { + this.channels = channels; + this.state = state; + this.driverContext = driverContext; + } + + public static IrateLongGroupingAggregatorFunction create(List channels, + DriverContext driverContext) { + return new IrateLongGroupingAggregatorFunction(channels, IrateLongAggregator.initGrouping(driverContext), driverContext); + } + + public static List intermediateStateDesc() { + return INTERMEDIATE_STATE_DESC; + } + + @Override + public int intermediateBlockCount() { + return INTERMEDIATE_STATE_DESC.size(); + } + + @Override + public GroupingAggregatorFunction.AddInput prepareProcessRawInputPage(SeenGroupIds seenGroupIds, + Page page) { + LongBlock valueBlock = page.getBlock(channels.get(0)); + LongBlock timestampBlock = page.getBlock(channels.get(1)); + LongVector valueVector = valueBlock.asVector(); + if (valueVector == null) { + maybeEnableGroupIdTracking(seenGroupIds, valueBlock, timestampBlock); + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void add(int positionOffset, IntBigArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void close() { + } + }; + } + LongVector timestampVector = timestampBlock.asVector(); + if (timestampVector == null) { + maybeEnableGroupIdTracking(seenGroupIds, valueBlock, timestampBlock); + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void add(int positionOffset, IntBigArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valueBlock, timestampBlock); + } + + @Override + public void close() { + } + }; + } + return new GroupingAggregatorFunction.AddInput() { + @Override + public void add(int positionOffset, IntArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueVector, timestampVector); + } + + @Override + public void add(int positionOffset, IntBigArrayBlock groupIds) { + addRawInput(positionOffset, groupIds, valueVector, timestampVector); + } + + @Override + public void add(int positionOffset, IntVector groupIds) { + addRawInput(positionOffset, groupIds, valueVector, timestampVector); + } + + @Override + public void close() { + } + }; + } + + private void addRawInput(int positionOffset, IntArrayBlock groups, LongBlock valueBlock, + LongBlock timestampBlock) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int valuesPosition = groupPosition + positionOffset; + if (valueBlock.isNull(valuesPosition)) { + continue; + } + if (timestampBlock.isNull(valuesPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valueStart = valueBlock.getFirstValueIndex(valuesPosition); + int valueEnd = valueStart + valueBlock.getValueCount(valuesPosition); + for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { + long valueValue = valueBlock.getLong(valueOffset); + int timestampStart = timestampBlock.getFirstValueIndex(valuesPosition); + int timestampEnd = timestampStart + timestampBlock.getValueCount(valuesPosition); + for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { + long timestampValue = timestampBlock.getLong(timestampOffset); + IrateLongAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + } + } + + private void addRawInput(int positionOffset, IntArrayBlock groups, LongVector valueVector, + LongVector timestampVector) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int valuesPosition = groupPosition + positionOffset; + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + long valueValue = valueVector.getLong(valuesPosition); + long timestampValue = timestampVector.getLong(valuesPosition); + IrateLongAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntArrayBlock groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block timestampsUncast = page.getBlock(channels.get(0)); + if (timestampsUncast.areAllValuesNull()) { + return; + } + LongBlock timestamps = (LongBlock) timestampsUncast; + Block valuesUncast = page.getBlock(channels.get(1)); + if (valuesUncast.areAllValuesNull()) { + return; + } + LongBlock values = (LongBlock) valuesUncast; + assert timestamps.getPositionCount() == values.getPositionCount(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valuesPosition = groupPosition + positionOffset; + IrateLongAggregator.combineIntermediate(state, groupId, timestamps, values, valuesPosition); + } + } + } + + private void addRawInput(int positionOffset, IntBigArrayBlock groups, LongBlock valueBlock, + LongBlock timestampBlock) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int valuesPosition = groupPosition + positionOffset; + if (valueBlock.isNull(valuesPosition)) { + continue; + } + if (timestampBlock.isNull(valuesPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valueStart = valueBlock.getFirstValueIndex(valuesPosition); + int valueEnd = valueStart + valueBlock.getValueCount(valuesPosition); + for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { + long valueValue = valueBlock.getLong(valueOffset); + int timestampStart = timestampBlock.getFirstValueIndex(valuesPosition); + int timestampEnd = timestampStart + timestampBlock.getValueCount(valuesPosition); + for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { + long timestampValue = timestampBlock.getLong(timestampOffset); + IrateLongAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + } + } + + private void addRawInput(int positionOffset, IntBigArrayBlock groups, LongVector valueVector, + LongVector timestampVector) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int valuesPosition = groupPosition + positionOffset; + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + long valueValue = valueVector.getLong(valuesPosition); + long timestampValue = timestampVector.getLong(valuesPosition); + IrateLongAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntBigArrayBlock groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block timestampsUncast = page.getBlock(channels.get(0)); + if (timestampsUncast.areAllValuesNull()) { + return; + } + LongBlock timestamps = (LongBlock) timestampsUncast; + Block valuesUncast = page.getBlock(channels.get(1)); + if (valuesUncast.areAllValuesNull()) { + return; + } + LongBlock values = (LongBlock) valuesUncast; + assert timestamps.getPositionCount() == values.getPositionCount(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + if (groups.isNull(groupPosition)) { + continue; + } + int groupStart = groups.getFirstValueIndex(groupPosition); + int groupEnd = groupStart + groups.getValueCount(groupPosition); + for (int g = groupStart; g < groupEnd; g++) { + int groupId = groups.getInt(g); + int valuesPosition = groupPosition + positionOffset; + IrateLongAggregator.combineIntermediate(state, groupId, timestamps, values, valuesPosition); + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, LongBlock valueBlock, + LongBlock timestampBlock) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int valuesPosition = groupPosition + positionOffset; + if (valueBlock.isNull(valuesPosition)) { + continue; + } + if (timestampBlock.isNull(valuesPosition)) { + continue; + } + int groupId = groups.getInt(groupPosition); + int valueStart = valueBlock.getFirstValueIndex(valuesPosition); + int valueEnd = valueStart + valueBlock.getValueCount(valuesPosition); + for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) { + long valueValue = valueBlock.getLong(valueOffset); + int timestampStart = timestampBlock.getFirstValueIndex(valuesPosition); + int timestampEnd = timestampStart + timestampBlock.getValueCount(valuesPosition); + for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) { + long timestampValue = timestampBlock.getLong(timestampOffset); + IrateLongAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + } + } + + private void addRawInput(int positionOffset, IntVector groups, LongVector valueVector, + LongVector timestampVector) { + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int valuesPosition = groupPosition + positionOffset; + int groupId = groups.getInt(groupPosition); + long valueValue = valueVector.getLong(valuesPosition); + long timestampValue = timestampVector.getLong(valuesPosition); + IrateLongAggregator.combine(state, groupId, valueValue, timestampValue); + } + } + + @Override + public void addIntermediateInput(int positionOffset, IntVector groups, Page page) { + state.enableGroupIdTracking(new SeenGroupIds.Empty()); + assert channels.size() == intermediateBlockCount(); + Block timestampsUncast = page.getBlock(channels.get(0)); + if (timestampsUncast.areAllValuesNull()) { + return; + } + LongBlock timestamps = (LongBlock) timestampsUncast; + Block valuesUncast = page.getBlock(channels.get(1)); + if (valuesUncast.areAllValuesNull()) { + return; + } + LongBlock values = (LongBlock) valuesUncast; + assert timestamps.getPositionCount() == values.getPositionCount(); + for (int groupPosition = 0; groupPosition < groups.getPositionCount(); groupPosition++) { + int groupId = groups.getInt(groupPosition); + int valuesPosition = groupPosition + positionOffset; + IrateLongAggregator.combineIntermediate(state, groupId, timestamps, values, valuesPosition); + } + } + + private void maybeEnableGroupIdTracking(SeenGroupIds seenGroupIds, LongBlock valueBlock, + LongBlock timestampBlock) { + if (valueBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + if (timestampBlock.mayHaveNulls()) { + state.enableGroupIdTracking(seenGroupIds); + } + } + + @Override + public void selectedMayContainUnseenGroups(SeenGroupIds seenGroupIds) { + state.enableGroupIdTracking(seenGroupIds); + } + + @Override + public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) { + state.toIntermediate(blocks, offset, selected, driverContext); + } + + @Override + public void evaluateFinal(Block[] blocks, int offset, IntVector selected, + GroupingAggregatorEvaluationContext ctx) { + blocks[offset] = IrateLongAggregator.evaluateFinal(state, selected, ctx); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getClass().getSimpleName()).append("["); + sb.append("channels=").append(channels); + sb.append("]"); + return sb.toString(); + } + + @Override + public void close() { + state.close(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-IRateAggregator.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-IrateAggregator.java.st similarity index 80% rename from x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-IRateAggregator.java.st rename to x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-IrateAggregator.java.st index 054f020bfca06..363958a6239dd 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-IRateAggregator.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-IrateAggregator.java.st @@ -33,47 +33,43 @@ import org.elasticsearch.core.Releasables; * This class is generated. Edit `X-RateAggregator.java.st` instead. */ @GroupingAggregator( - value = { - @IntermediateState(name = "timestamps", type = "LONG_BLOCK"), - @IntermediateState(name = "values", type = "$TYPE$_BLOCK"), - @IntermediateState(name = "resets", type = "DOUBLE") } + value = { @IntermediateState(name = "timestamps", type = "LONG_BLOCK"), @IntermediateState(name = "values", type = "$TYPE$_BLOCK") } ) -public class IRate$Type$Aggregator { +public class Irate$Type$Aggregator { - public static $Type$IRateGroupingState initGrouping(DriverContext driverContext) { - return new $Type$IRateGroupingState(driverContext.bigArrays(), driverContext.breaker()); + public static $Type$IrateGroupingState initGrouping(DriverContext driverContext) { + return new $Type$IrateGroupingState(driverContext.bigArrays(), driverContext.breaker()); } - public static void combine($Type$IRateGroupingState current, int groupId, $type$ value, long timestamp) { + public static void combine($Type$IrateGroupingState current, int groupId, $type$ value, long timestamp) { current.append(groupId, timestamp, value); } public static void combineIntermediate( - $Type$IRateGroupingState current, + $Type$IrateGroupingState current, int groupId, LongBlock timestamps, $Type$Block values, - double reset, int otherPosition ) { - current.combine(groupId, timestamps, values, reset, otherPosition); + current.combine(groupId, timestamps, values, otherPosition); } - public static Block evaluateFinal($Type$IRateGroupingState state, IntVector selected, GroupingAggregatorEvaluationContext evalContext) { + public static Block evaluateFinal($Type$IrateGroupingState state, IntVector selected, GroupingAggregatorEvaluationContext evalContext) { return state.evaluateFinal(selected, evalContext); } - private static class $Type$IRateState { - static final long BASE_RAM_USAGE = RamUsageEstimator.sizeOfObject($Type$IRateState.class); + private static class $Type$IrateState { + static final long BASE_RAM_USAGE = RamUsageEstimator.sizeOfObject($Type$IrateState.class); final long[] timestamps; // descending order final $type$[] values; - $Type$IRateState(int initialSize) { + $Type$IrateState(int initialSize) { this.timestamps = new long[initialSize]; this.values = new $type$[initialSize]; } - $Type$IRateState(long[] ts, $type$[] vs) { + $Type$IrateState(long[] ts, $type$[] vs) { this.timestamps = ts; this.values = vs; } @@ -96,13 +92,13 @@ public class IRate$Type$Aggregator { } } - public static final class $Type$IRateGroupingState implements Releasable, Accountable, GroupingAggregatorState { - private ObjectArray<$Type$IRateState> states; + public static final class $Type$IrateGroupingState implements Releasable, Accountable, GroupingAggregatorState { + private ObjectArray<$Type$IrateState> states; private final BigArrays bigArrays; private final CircuitBreaker breaker; private long stateBytes; // for individual states - $Type$IRateGroupingState(BigArrays bigArrays, CircuitBreaker breaker) { + $Type$IrateGroupingState(BigArrays bigArrays, CircuitBreaker breaker) { this.bigArrays = bigArrays; this.breaker = breaker; this.states = bigArrays.newObjectArray(1); @@ -122,20 +118,20 @@ public class IRate$Type$Aggregator { ensureCapacity(groupId); var state = states.get(groupId); if (state == null) { - adjustBreaker($Type$IRateState.bytesUsed(1)); - state = new $TypeIState(new long[] { timestamp }, new $type$[] { value }); + adjustBreaker($Type$IrateState.bytesUsed(1)); + state = new $Type$IrateState(new long[] { timestamp }, new $type$[] { value }); states.set(groupId, state); } else { if (state.entries() == 1) { - adjustBreaker($Type$IRateState.bytesUsed(2)); - state = new $Type$IRateState(new long[] { state.timestamps[0], timestamp }, new $type$[] { state.values[0], value }); + adjustBreaker($Type$IrateState.bytesUsed(2)); + state = new $Type$IrateState(new long[] { state.timestamps[0], timestamp }, new $type$[] { state.values[0], value }); states.set(groupId, state); - adjustBreaker(-$Type$IRateState.bytesUsed(1)); // old state + adjustBreaker(-$Type$IrateState.bytesUsed(1)); // old state } } } - void combine(int groupId, LongBlock timestamps, $Type$Block values, double reset, int otherPosition) { + void combine(int groupId, LongBlock timestamps, $Type$Block values, int otherPosition) { // TODO: Check this method pabloem final int valueCount = timestamps.getValueCount(otherPosition); if (valueCount == 0) { @@ -145,8 +141,8 @@ public class IRate$Type$Aggregator { ensureCapacity(groupId); var state = states.get(groupId); if (state == null) { - adjustBreaker($Type$IRateState.bytesUsed(valueCount)); - state = new $Type$IRateState(valueCount); + adjustBreaker($Type$IrateState.bytesUsed(valueCount)); + state = new $Type$IrateState(valueCount); states.set(groupId, state); // TODO: add bulk_copy to Block for (int i = 0; i < valueCount; i++) { @@ -154,15 +150,15 @@ public class IRate$Type$Aggregator { state.values[i] = values.get$Type$(firstIndex + i); } } else { - adjustBreaker($Type$IRateState.bytesUsed(state.entries() + valueCount)); - var newState = new $Type$IRateState(state.entries() + valueCount); + adjustBreaker($Type$IrateState.bytesUsed(state.entries() + valueCount)); + var newState = new $Type$IrateState(state.entries() + valueCount); states.set(groupId, newState); merge(state, newState, firstIndex, valueCount, timestamps, values); - adjustBreaker(-$Type$IRateState.bytesUsed(state.entries())); // old state + adjustBreaker(-$Type$IrateState.bytesUsed(state.entries())); // old state } } - void merge($Type$IRateState curr, $Type$IRateState dst, int firstIndex, int rightCount, LongBlock timestamps, $Type$Block values) { + void merge($Type$IrateState curr, $Type$IrateState dst, int firstIndex, int rightCount, LongBlock timestamps, $Type$Block values) { int i = 0, j = 0, k = 0; final int leftCount = curr.entries(); // We do not merge more than two entries because we only need the last two. @@ -183,10 +179,9 @@ public class IRate$Type$Aggregator { } } - $Type$IRateState mergeState($Type$IRateState s1, $Type$IRateState s2) { - adjustBreaker($Type$IRateState.bytesUsed(newLen)); - var dst = new $Type$IRateState(newLen); - dst.reset = s1.reset + s2.reset; + $Type$IrateState mergeState($Type$IrateState s1, $Type$IrateState s2) { + adjustBreaker($Type$IrateState.bytesUsed(2)); + var dst = new $Type$IrateState(2); int i = 0, j = 0, k = 0; while (i < s1.entries() && j < s2.entries() && k < 2) { if (s1.timestamps[i] > s2.timestamps[j]) { @@ -221,7 +216,6 @@ public class IRate$Type$Aggregator { try ( LongBlock.Builder timestamps = blockFactory.newLongBlockBuilder(positionCount * 2); $Type$Block.Builder values = blockFactory.new$Type$BlockBuilder(positionCount * 2); - DoubleVector.FixedBuilder resets = blockFactory.newDoubleVectorFixedBuilder(positionCount) ) { for (int i = 0; i < positionCount; i++) { final var groupId = selected.getInt(i); @@ -238,16 +232,13 @@ public class IRate$Type$Aggregator { values.append$Type$(v); } values.endPositionEntry(); - resets.appendDouble(i, state.reset); } else { timestamps.appendNull(); values.appendNull(); - resets.appendDouble(i, 0); } } blocks[offset] = timestamps.build(); blocks[offset + 1] = values.build(); - blocks[offset + 3] = resets.build().asBlock(); } } @@ -262,7 +253,9 @@ public class IRate$Type$Aggregator { continue; } int len = state.entries(); - final double ydiff = state.values[0] > state.values[1] ? state.values[0] - state.values[1] : state.values[1] - state.values[0]; + final double ydiff = state.values[0] > state.values[1] + ? state.values[0] - state.values[1] + : state.values[1] - state.values[0]; final long xdiff = state.timestamps[0] - state.timestamps[1]; rates.appendDouble(ydiff / xdiff * 1000); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java index c94d93c0e9be8..1ecb6aa8b270c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java @@ -27,6 +27,7 @@ import org.elasticsearch.xpack.esql.expression.function.aggregate.CountOverTime; import org.elasticsearch.xpack.esql.expression.function.aggregate.First; import org.elasticsearch.xpack.esql.expression.function.aggregate.FirstOverTime; +import org.elasticsearch.xpack.esql.expression.function.aggregate.Irate; import org.elasticsearch.xpack.esql.expression.function.aggregate.Last; import org.elasticsearch.xpack.esql.expression.function.aggregate.LastOverTime; import org.elasticsearch.xpack.esql.expression.function.aggregate.Max; @@ -499,6 +500,7 @@ private static FunctionDefinition[][] snapshotFunctions() { def(First.class, bi(First::new), "first"), def(Last.class, bi(Last::new), "last"), def(Rate.class, uni(Rate::new), "rate"), + def(Irate.class, uni(Irate::new), "irate"), def(MaxOverTime.class, uni(MaxOverTime::new), "max_over_time"), def(MinOverTime.class, uni(MinOverTime::new), "min_over_time"), def(SumOverTime.class, uni(SumOverTime::new), "sum_over_time"), diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Irate.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Irate.java new file mode 100644 index 0000000000000..a6d4272943d38 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Irate.java @@ -0,0 +1,142 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.aggregate; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.compute.aggregation.AggregatorFunctionSupplier; +import org.elasticsearch.compute.aggregation.IrateDoubleAggregatorFunctionSupplier; +import org.elasticsearch.compute.aggregation.IrateIntAggregatorFunctionSupplier; +import org.elasticsearch.compute.aggregation.IrateLongAggregatorFunctionSupplier; +import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.expression.Literal; +import org.elasticsearch.xpack.esql.core.expression.UnresolvedAttribute; +import org.elasticsearch.xpack.esql.core.tree.NodeInfo; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.expression.function.Example; +import org.elasticsearch.xpack.esql.expression.function.FunctionAppliesTo; +import org.elasticsearch.xpack.esql.expression.function.FunctionAppliesToLifecycle; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.FunctionType; +import org.elasticsearch.xpack.esql.expression.function.OptionalArgument; +import org.elasticsearch.xpack.esql.expression.function.Param; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import org.elasticsearch.xpack.esql.planner.ToAggregator; + +import java.io.IOException; +import java.util.List; + +import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.FIRST; +import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isType; + +public class Irate extends TimeSeriesAggregateFunction implements OptionalArgument, ToAggregator { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Irate", Irate::new); + + private final Expression timestamp; + + @FunctionInfo( + type = FunctionType.TIME_SERIES_AGGREGATE, + returnType = { "double" }, + description = "The irate of a counter field.", + appliesTo = { @FunctionAppliesTo(lifeCycle = FunctionAppliesToLifecycle.UNAVAILABLE) }, + note = "Available with the [TS](/reference/query-languages/esql/commands/source-commands.md#esql-ts) command in snapshot builds", + examples = { @Example(file = "k8s-timeseries", tag = "irate") } + ) + public Irate(Source source, @Param(name = "field", type = { "counter_long", "counter_integer", "counter_double" }) Expression field) { + this(source, field, new UnresolvedAttribute(source, "@timestamp")); + } + + public Irate( + Source source, + @Param(name = "field", type = { "counter_long", "counter_integer", "counter_double" }) Expression field, + Expression timestamp + ) { + this(source, field, Literal.TRUE, timestamp); + } + + // compatibility constructor used when reading from the stream + private Irate(Source source, Expression field, Expression filter, List children) { + this(source, field, filter, children.getFirst()); + } + + private Irate(Source source, Expression field, Expression filter, Expression timestamp) { + super(source, field, filter, List.of(timestamp)); + this.timestamp = timestamp; + } + + public Irate(StreamInput in) throws IOException { + this( + Source.readFrom((PlanStreamInput) in), + in.readNamedWriteable(Expression.class), + in.readNamedWriteable(Expression.class), + in.readNamedWriteableCollectionAsList(Expression.class) + ); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Irate::new, field(), timestamp); + } + + @Override + public Irate replaceChildren(List newChildren) { + if (newChildren.size() != 3) { + assert false : "expected 3 children for field, filter, @timestamp; got " + newChildren; + throw new IllegalArgumentException("expected 3 children for field, filter, @timestamp; got " + newChildren); + } + return new Irate(source(), newChildren.get(0), newChildren.get(1), newChildren.get(2)); + } + + @Override + public Irate withFilter(Expression filter) { + return new Irate(source(), field(), filter, timestamp); + } + + @Override + public DataType dataType() { + return DataType.DOUBLE; + } + + @Override + protected TypeResolution resolveType() { + return isType(field(), dt -> DataType.isCounter(dt), sourceText(), FIRST, "counter_long", "counter_integer", "counter_double"); + } + + @Override + public AggregatorFunctionSupplier supplier() { + final DataType type = field().dataType(); + return switch (type) { + case COUNTER_LONG -> new IrateLongAggregatorFunctionSupplier(); + case COUNTER_INTEGER -> new IrateIntAggregatorFunctionSupplier(); + case COUNTER_DOUBLE -> new IrateDoubleAggregatorFunctionSupplier(); + default -> throw EsqlIllegalArgumentException.illegalDataType(type); + }; + } + + @Override + public Irate perTimeSeriesAggregation() { + return this; + } + + @Override + public String toString() { + return "irate(" + field() + ")"; + } + + Expression timestamp() { + return timestamp; + } +} + From bc1d34483934186d75af457f3b07fcef9114f943 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine Date: Thu, 4 Sep 2025 03:28:56 +0000 Subject: [PATCH 03/13] [CI] Auto commit changes from spotless --- .../xpack/esql/expression/function/aggregate/Irate.java | 1 - 1 file changed, 1 deletion(-) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Irate.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Irate.java index a6d4272943d38..7d2b7be6dc146 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Irate.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Irate.java @@ -139,4 +139,3 @@ Expression timestamp() { return timestamp; } } - From c563168853d6b5875044df3665a823dbf77ba204 Mon Sep 17 00:00:00 2001 From: Pablo Date: Thu, 4 Sep 2025 09:27:35 -0700 Subject: [PATCH 04/13] Adding support for irate in test framework --- .../esql/action/RandomizedTimeSeriesIT.java | 20 +++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/RandomizedTimeSeriesIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/RandomizedTimeSeriesIT.java index 4ae65eadf88e5..8d0c7674bfe8f 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/RandomizedTimeSeriesIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/RandomizedTimeSeriesIT.java @@ -243,12 +243,18 @@ public int compareToFindingMax(RateRange o) { } } + enum DeltaAgg { + RATE, + IRATE + } + // A record that holds min, max, avg, count and sum of rates calculated from a timeseries. record RateStats(Long count, RateRange max, RateRange avg, RateRange min, RateRange sum) {} - static RateStats calculateRateAggregation( + static RateStats calculateDeltaAggregation( Collection>>> allTimeseries, - Integer secondsInWindow + Integer secondsInWindow, + DeltaAgg deltaAgg ) { List allRates = allTimeseries.stream().map(timeseries -> { if (timeseries.size() < 2) { @@ -258,6 +264,12 @@ static RateStats calculateRateAggregation( timeseries.sort((t1, t2) -> t1.v2().v1().compareTo(t2.v2().v1())); var firstTs = timeseries.getFirst().v2().v1(); var lastTs = timeseries.getLast().v2().v1(); + if (deltaAgg.equals(DeltaAgg.IRATE)) { + var irate = Math.abs(timeseries.getLast().v2().v2() - timeseries.get(timeseries.size() - 2).v2().v2()) + / (lastTs.toEpochMilli() - timeseries.get(timeseries.size() - 2).v2().v1().toEpochMilli()) * 1000; + return new RateRange(irate, irate); + } + assert deltaAgg == DeltaAgg.RATE; Double lastValue = null; Double counterGrowth = 0.0; for (Tuple> point : timeseries) { @@ -391,7 +403,7 @@ public void testRateGroupBySubset() { var rowKey = getRowKey(row, dimensions, 5); var windowDataPoints = groups.get(rowKey); var docsPerTimeseries = groupByTimeseries(windowDataPoints, "counterl_hdd.bytes.read"); - var rateAgg = calculateRateAggregation(docsPerTimeseries.values(), windowSize); + var rateAgg = calculateDeltaAggregation(docsPerTimeseries.values(), windowSize, DeltaAgg.RATE); try { assertThat(row.getFirst(), equalTo(rateAgg.count)); checkWithin((Double) row.get(1), rateAgg.max); @@ -430,7 +442,7 @@ public void testRateGroupByNothing() { var windowStart = windowStart(row.get(4), SECONDS_IN_WINDOW); var windowDataPoints = groups.get(List.of(Long.toString(windowStart))); var docsPerTimeseries = groupByTimeseries(windowDataPoints, "counterl_hdd.bytes.read"); - var rateAgg = calculateRateAggregation(docsPerTimeseries.values(), SECONDS_IN_WINDOW); + var rateAgg = calculateDeltaAggregation(docsPerTimeseries.values(), SECONDS_IN_WINDOW, DeltaAgg.RATE); try { assertThat(row.getFirst(), equalTo(rateAgg.count)); checkWithin((Double) row.get(1), rateAgg.max); From 554be4c470434f02bb11e2c07ac4171f70b9b187 Mon Sep 17 00:00:00 2001 From: Pablo Date: Thu, 4 Sep 2025 11:54:10 -0700 Subject: [PATCH 05/13] fixup --- muted-tests.yml | 6 +++++ .../esql/action/RandomizedTimeSeriesIT.java | 26 ++++++++++++------- 2 files changed, 22 insertions(+), 10 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index cbd1c8d04bcfe..be76419c8aa9c 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -480,6 +480,12 @@ tests: - class: org.elasticsearch.xpack.esql.inference.rerank.RerankOperatorTests method: testSimpleCircuitBreaking issue: https://github.com/elastic/elasticsearch/issues/133619 +- class: org.elasticsearch.xpack.kql.parser.KqlParserBooleanQueryTests + method: testParseOrQuery + issue: https://github.com/elastic/elasticsearch/issues/133863 +- class: org.elasticsearch.xpack.kql.parser.KqlParserBooleanQueryTests + method: testParseAndQuery + issue: https://github.com/elastic/elasticsearch/issues/133871 - class: org.elasticsearch.xpack.ml.integration.InferenceIT method: testInferClassificationModel issue: https://github.com/elastic/elasticsearch/issues/133448 diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/RandomizedTimeSeriesIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/RandomizedTimeSeriesIT.java index 8d0c7674bfe8f..22f5a166a21a3 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/RandomizedTimeSeriesIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/RandomizedTimeSeriesIT.java @@ -71,6 +71,10 @@ public class RandomizedTimeSeriesIT extends AbstractEsqlIntegTestCase { Tuple.tuple("30 minutes", 1800), Tuple.tuple("1 hour", 3600) ); + private static final List> DELTA_AGG_OPTIONS = List.of( + Tuple.tuple("rate", DeltaAgg.RATE), + Tuple.tuple("irate", DeltaAgg.IRATE) + ); private List documents; private TSDataGenerationHelper dataGenerationHelper; @@ -265,9 +269,11 @@ static RateStats calculateDeltaAggregation( var firstTs = timeseries.getFirst().v2().v1(); var lastTs = timeseries.getLast().v2().v1(); if (deltaAgg.equals(DeltaAgg.IRATE)) { - var irate = Math.abs(timeseries.getLast().v2().v2() - timeseries.get(timeseries.size() - 2).v2().v2()) + var lastVal = timeseries.getLast().v2().v2(); + var secondLastVal = timeseries.get(timeseries.size() - 2).v2().v2(); + var irate = (lastVal > secondLastVal ? lastVal - secondLastVal : lastVal) / (lastTs.toEpochMilli() - timeseries.get(timeseries.size() - 2).v2().v1().toEpochMilli()) * 1000; - return new RateRange(irate, irate); + return new RateRange(irate * 0.999, irate * 1.001); // Add 0.1% tolerance } assert deltaAgg == DeltaAgg.RATE; Double lastValue = null; @@ -378,6 +384,7 @@ void assertNoFailedWindows(List failedWindows, List> rows) * the same values from the documents in the group. */ public void testRateGroupBySubset() { + var deltaAgg = ESTestCase.randomFrom(DELTA_AGG_OPTIONS); var window = ESTestCase.randomFrom(WINDOW_OPTIONS); var windowSize = window.v2(); var windowStr = window.v1(); @@ -387,15 +394,14 @@ public void testRateGroupBySubset() { : ", " + dimensions.stream().map(d -> "attributes." + d).collect(Collectors.joining(", ")); try (var resp = run(String.format(Locale.ROOT, """ TS %s - | STATS count(rate(metrics.counterl_hdd.bytes.read)), - max(rate(metrics.counterl_hdd.bytes.read)), - avg(rate(metrics.counterl_hdd.bytes.read)), - min(rate(metrics.counterl_hdd.bytes.read)), - sum(rate(metrics.counterl_hdd.bytes.read)) + | STATS count((metrics.counterl_hdd.bytes.read)), + max((metrics.counterl_hdd.bytes.read)), + avg((metrics.counterl_hdd.bytes.read)), + min((metrics.counterl_hdd.bytes.read)), + sum((metrics.counterl_hdd.bytes.read)) BY tbucket=bucket(@timestamp, %s) %s | SORT tbucket - | LIMIT 1000 - """, DATASTREAM_NAME, windowStr, dimensionsStr))) { + """, DATASTREAM_NAME, windowStr, dimensionsStr).replaceAll("", deltaAgg.v1()))) { List> rows = consumeRows(resp); List failedWindows = new ArrayList<>(); var groups = groupedRows(documents, dimensions, windowSize); @@ -403,7 +409,7 @@ public void testRateGroupBySubset() { var rowKey = getRowKey(row, dimensions, 5); var windowDataPoints = groups.get(rowKey); var docsPerTimeseries = groupByTimeseries(windowDataPoints, "counterl_hdd.bytes.read"); - var rateAgg = calculateDeltaAggregation(docsPerTimeseries.values(), windowSize, DeltaAgg.RATE); + var rateAgg = calculateDeltaAggregation(docsPerTimeseries.values(), windowSize, deltaAgg.v2()); try { assertThat(row.getFirst(), equalTo(rateAgg.count)); checkWithin((Double) row.get(1), rateAgg.max); From ff69d50f3513dacbe44e0881d39d386386b009fe Mon Sep 17 00:00:00 2001 From: Pablo Date: Thu, 4 Sep 2025 20:39:07 -0700 Subject: [PATCH 06/13] Progress on irate. Debugging one last issue --- .../aggregation/IrateDoubleAggregator.java | 6 +- .../aggregation/IrateFloatAggregator.java | 6 +- .../aggregation/IrateIntAggregator.java | 6 +- .../aggregation/IrateLongAggregator.java | 6 +- .../compute/aggregation/debugirate.txt | 10 + .../aggregation/X-IrateAggregator.java.st | 6 +- .../resources/k8s-timeseries-irate.csv-spec | 212 ++++++++++++++++++ .../esql/action/RandomizedTimeSeriesIT.java | 19 +- .../aggregate/AggregateWritables.java | 1 + 9 files changed, 253 insertions(+), 19 deletions(-) create mode 100644 x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/debugirate.txt create mode 100644 x-pack/plugin/esql/qa/testFixtures/src/main/resources/k8s-timeseries-irate.csv-spec diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateDoubleAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateDoubleAggregator.java index 5496215022154..4646ba6f9e12d 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateDoubleAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateDoubleAggregator.java @@ -210,7 +210,7 @@ public void close() { @Override public void toIntermediate(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { - assert blocks.length >= offset + 3 : "blocks=" + blocks.length + ",offset=" + offset; + assert blocks.length >= offset + 2 : "blocks=" + blocks.length + ",offset=" + offset; final BlockFactory blockFactory = driverContext.blockFactory(); final int positionCount = selected.getPositionCount(); try ( @@ -253,9 +253,11 @@ Block evaluateFinal(IntVector selected, GroupingAggregatorEvaluationContext eval continue; } int len = state.entries(); + // When the last value is less than the previous one, we assume a reset + // and use the last value directly. final double ydiff = state.values[0] > state.values[1] ? state.values[0] - state.values[1] - : state.values[1] - state.values[0]; + : state.values[0]; final long xdiff = state.timestamps[0] - state.timestamps[1]; rates.appendDouble(ydiff / xdiff * 1000); } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateFloatAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateFloatAggregator.java index a10d65beaca8f..c557cd5694038 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateFloatAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateFloatAggregator.java @@ -210,7 +210,7 @@ public void close() { @Override public void toIntermediate(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { - assert blocks.length >= offset + 3 : "blocks=" + blocks.length + ",offset=" + offset; + assert blocks.length >= offset + 2 : "blocks=" + blocks.length + ",offset=" + offset; final BlockFactory blockFactory = driverContext.blockFactory(); final int positionCount = selected.getPositionCount(); try ( @@ -253,9 +253,11 @@ Block evaluateFinal(IntVector selected, GroupingAggregatorEvaluationContext eval continue; } int len = state.entries(); + // When the last value is less than the previous one, we assume a reset + // and use the last value directly. final double ydiff = state.values[0] > state.values[1] ? state.values[0] - state.values[1] - : state.values[1] - state.values[0]; + : state.values[0]; final long xdiff = state.timestamps[0] - state.timestamps[1]; rates.appendDouble(ydiff / xdiff * 1000); } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateIntAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateIntAggregator.java index 7d8d39552dcce..e94da51881df2 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateIntAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateIntAggregator.java @@ -210,7 +210,7 @@ public void close() { @Override public void toIntermediate(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { - assert blocks.length >= offset + 3 : "blocks=" + blocks.length + ",offset=" + offset; + assert blocks.length >= offset + 2 : "blocks=" + blocks.length + ",offset=" + offset; final BlockFactory blockFactory = driverContext.blockFactory(); final int positionCount = selected.getPositionCount(); try ( @@ -253,9 +253,11 @@ Block evaluateFinal(IntVector selected, GroupingAggregatorEvaluationContext eval continue; } int len = state.entries(); + // When the last value is less than the previous one, we assume a reset + // and use the last value directly. final double ydiff = state.values[0] > state.values[1] ? state.values[0] - state.values[1] - : state.values[1] - state.values[0]; + : state.values[0]; final long xdiff = state.timestamps[0] - state.timestamps[1]; rates.appendDouble(ydiff / xdiff * 1000); } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateLongAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateLongAggregator.java index cde1e96bc8b57..4038a1d92e9ee 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateLongAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateLongAggregator.java @@ -210,7 +210,7 @@ public void close() { @Override public void toIntermediate(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { - assert blocks.length >= offset + 3 : "blocks=" + blocks.length + ",offset=" + offset; + assert blocks.length >= offset + 2 : "blocks=" + blocks.length + ",offset=" + offset; final BlockFactory blockFactory = driverContext.blockFactory(); final int positionCount = selected.getPositionCount(); try ( @@ -253,9 +253,11 @@ Block evaluateFinal(IntVector selected, GroupingAggregatorEvaluationContext eval continue; } int len = state.entries(); + // When the last value is less than the previous one, we assume a reset + // and use the last value directly. final double ydiff = state.values[0] > state.values[1] ? state.values[0] - state.values[1] - : state.values[1] - state.values[0]; + : state.values[0]; final long xdiff = state.timestamps[0] - state.timestamps[1]; rates.appendDouble(ydiff / xdiff * 1000); } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/debugirate.txt b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/debugirate.txt new file mode 100644 index 0000000000000..2a3fa0972bc42 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/debugirate.txt @@ -0,0 +1,10 @@ +IN ES: +VALUES: [614, 0] +TIMESTAMPS: [175395597 5918, 0] +// PROBLEM IN THE ABOVE!!!! +// The value here should be in the past, not 0! +// v2=Tuple[v1=2025-07-31T09:59:04.846Z, v2=927.0]] + +VALS: [167, 829] +TS: [1753955998118, 1753955983589] + diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-IrateAggregator.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-IrateAggregator.java.st index 363958a6239dd..3e90e92a626fe 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-IrateAggregator.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-IrateAggregator.java.st @@ -210,7 +210,7 @@ public class Irate$Type$Aggregator { @Override public void toIntermediate(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { - assert blocks.length >= offset + 3 : "blocks=" + blocks.length + ",offset=" + offset; + assert blocks.length >= offset + 2 : "blocks=" + blocks.length + ",offset=" + offset; final BlockFactory blockFactory = driverContext.blockFactory(); final int positionCount = selected.getPositionCount(); try ( @@ -253,9 +253,11 @@ public class Irate$Type$Aggregator { continue; } int len = state.entries(); + // When the last value is less than the previous one, we assume a reset + // and use the last value directly. final double ydiff = state.values[0] > state.values[1] ? state.values[0] - state.values[1] - : state.values[1] - state.values[0]; + : state.values[0]; final long xdiff = state.timestamps[0] - state.timestamps[1]; rates.appendDouble(ydiff / xdiff * 1000); } diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/k8s-timeseries-irate.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/k8s-timeseries-irate.csv-spec new file mode 100644 index 0000000000000..c2ab273610c14 --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/k8s-timeseries-irate.csv-spec @@ -0,0 +1,212 @@ +irate_of_long_no_grouping +required_capability: metrics_command +TS k8s +| STATS irate_bytes_in=avg(irate(network.total_bytes_in)) BY time_bucket = bucket(@timestamp,1minute) +| SORT irate_bytes_in DESC, time_bucket DESC | LIMIT 10; + +irate_bytes_in:double | time_bucket:datetime +null | 2024-05-10T00:01:00.000Z +140.89655067155067 | 2024-05-10T00:09:00.000Z +140.58333333333331 | 2024-05-10T00:02:00.000Z +116.41911764705883 | 2024-05-10T00:22:00.000Z +112.83333333333333 | 2024-05-10T00:00:00.000Z +93.43529411764706 | 2024-05-10T00:14:00.000Z +88.6875 | 2024-05-10T00:11:00.000Z +78.83333333333333 | 2024-05-10T00:13:00.000Z +71.04464285714286 | 2024-05-10T00:15:00.000Z +51.58823529411765 | 2024-05-10T00:19:00.000Z + +; + +irate_of_long_grouping +required_capability: metrics_command +TS k8s +| STATS irate_bytes_in=avg(irate(network.total_bytes_in)) BY cluster, time_bucket = bucket(@timestamp,5minute) +| SORT irate_bytes_in DESC, time_bucket, cluster | LIMIT 10; + +irate_bytes_in:double | cluster:keyword | time_bucket:datetime +284.63440860215053 | qa | 2024-05-10T00:05:00.000Z +119.52228682170542 | prod | 2024-05-10T00:20:00.000Z +62.32120383036936 | prod | 2024-05-10T00:10:00.000Z +40.90705128205128 | staging | 2024-05-10T00:05:00.000Z +31.92871485943775 | prod | 2024-05-10T00:00:00.000Z +30.83898647284474 | staging | 2024-05-10T00:00:00.000Z +28.57226890756303 | qa | 2024-05-10T00:15:00.000Z +21.898989322941418 | staging | 2024-05-10T00:15:00.000Z +14.23272442880286 | qa | 2024-05-10T00:00:00.000Z +10.889987485115794 | staging | 2024-05-10T00:10:00.000Z + + +; + +irate_of_double_no_grouping +required_capability: metrics_command +TS k8s +| STATS irate_cost=sum(irate(network.total_cost)) BY time_bucket = bucket(@timestamp,1minute) +| SORT irate_cost DESC, time_bucket | LIMIT 10; + +irate_cost:double | time_bucket:datetime +null | 2024-05-10T00:01:00.000Z +7.836832264957264 | 2024-05-10T00:09:00.000Z +6.754166666666666 | 2024-05-10T00:02:00.000Z +3.590324074074074 | 2024-05-10T00:17:00.000Z +2.2916666666666665 | 2024-05-10T00:08:00.000Z +2.265625 | 2024-05-10T00:11:00.000Z +2.2481617647058822 | 2024-05-10T00:22:00.000Z +2.020833333333333 | 2024-05-10T00:00:00.000Z +1.951470588235294 | 2024-05-10T00:14:00.000Z +1.8680555555555556 | 2024-05-10T00:13:00.000Z + +; + +irate_with_filtering +required_capability: metrics_command +TS k8s | WHERE pod == "one" +| STATS irate_bytes_in = sum(irate(network.total_bytes_in)) BY cluster, time_bucket = bucket(@timestamp, 10minute) +| SORT time_bucket, cluster | LIMIT 10; + +irate_bytes_in:double | cluster:keyword | time_bucket:datetime +0.07692307692307693 | prod | 2024-05-10T00:00:00.000Z +830.0 | qa | 2024-05-10T00:00:00.000Z +31.375 | staging | 2024-05-10T00:00:00.000Z +9.854545454545454 | prod | 2024-05-10T00:10:00.000Z +18.700000000000003 | qa | 2024-05-10T00:10:00.000Z +0.023952095808383235 | staging | 2024-05-10T00:10:00.000Z +232.75 | prod | 2024-05-10T00:20:00.000Z +3.2698412698412698 | qa | 2024-05-10T00:20:00.000Z +4.407407407407407 | staging | 2024-05-10T00:20:00.000Z + + +; + +eval_on_irate +required_capability: metrics_command +TS k8s +| STATS irate_bytes = avg(irate(network.total_bytes_in)) BY cluster, time_bucket = bucket(@timestamp, 10minute) +| EVAL irate_kb = irate_bytes / 1024.0 +| LIMIT 10 | SORT time_bucket, cluster ; + +irate_bytes:double | cluster:keyword | time_bucket:datetime | irate_kb:double +4.37482276552044 | prod | 2024-05-10T00:00:00.000Z | 0.004272287856953555 +284.63440860215053 | qa | 2024-05-10T00:00:00.000Z | 0.2779632896505376 +40.90705128205128 | staging | 2024-05-10T00:00:00.000Z | 0.0399482922676282 +9.893214497920377 | prod | 2024-05-10T00:10:00.000Z | 0.009661342283125368 +28.57226890756303 | qa | 2024-05-10T00:10:00.000Z | 0.02790260635504202 +21.898989322941418 | staging | 2024-05-10T00:10:00.000Z | 0.02138573176068498 +119.52228682170542 | prod | 2024-05-10T00:20:00.000Z | 0.1167209832243217 +4.428024083196497 | qa | 2024-05-10T00:20:00.000Z | 0.0043242422687465795 +1.5050835148874364 | staging | 2024-05-10T00:20:00.000Z | 0.0014698081200072621 +; + +irate_of_aggregate_metric +required_capability: metrics_command +TS k8s-downsampled +| STATS sum_bytes = sum(irate(network.total_bytes_in)), + max_bytes = max(irate(network.total_bytes_in)), + min_bytes = min(irate(network.total_bytes_in)), + avg_bytes = avg(irate(network.total_bytes_in)) BY time_bucket = bucket(@timestamp, 30minute) +| SORT time_bucket | LIMIT 10; + +sum_bytes:double | max_bytes:double | min_bytes:double | avg_bytes:double | time_bucket:datetime +1.145 | 0.39 | 0.008333333333333333 | 0.12722222222222224 | 2024-05-09T23:30:00.000Z + +; + +irate_of_expression +required_capability: metrics_command +TS k8s +| STATS irate_bytes_in=avg(irate(network.total_bytes_in) + 10) BY time_bucket = bucket(@timestamp,1minute) +| SORT irate_bytes_in DESC, time_bucket DESC | LIMIT 10; + +irate_bytes_in:double | time_bucket:datetime +null | 2024-05-10T00:01:00.000Z +150.89655067155067 | 2024-05-10T00:09:00.000Z +150.58333333333331 | 2024-05-10T00:02:00.000Z +126.41911764705883 | 2024-05-10T00:22:00.000Z +122.83333333333333 | 2024-05-10T00:00:00.000Z +103.43529411764706 | 2024-05-10T00:14:00.000Z +98.6875 | 2024-05-10T00:11:00.000Z +88.83333333333333 | 2024-05-10T00:13:00.000Z +81.04464285714286 | 2024-05-10T00:15:00.000Z +61.58823529411765 | 2024-05-10T00:19:00.000Z + +; + +irate_combined_avg +required_capability: metrics_command +TS k8s +| STATS avg_irate_bytes = avg(irate(network.total_bytes_in)), avg_irate_cost = avg(irate(network.total_cost)) BY cluster, time_bucket = bucket(@timestamp, 10minute) +| EVAL ratio = avg_irate_bytes / avg_irate_cost +| SORT time_bucket, cluster | LIMIT 10; + +avg_irate_bytes:double | avg_irate_cost:double | cluster:keyword | time_bucket:datetime | ratio:double +4.37482276552044 | 0.12927101967799642 | prod | 2024-05-10T00:00:00.000Z | 33.84225464004049 +284.63440860215053 | 2.112455197132616 | qa | 2024-05-10T00:00:00.000Z | 134.74103923647934 +40.90705128205128 | 0.46879451566951563 | staging | 2024-05-10T00:00:00.000Z | 87.26008926027917 +9.893214497920377 | 0.18585561497326206 | prod | 2024-05-10T00:10:00.000Z | 53.23064626992117 +28.57226890756303 | 0.20140056022408961 | qa | 2024-05-10T00:10:00.000Z | 141.8678720445063 +21.898989322941418 | 0.2425173462598612 | staging | 2024-05-10T00:10:00.000Z | 90.29865146007454 +119.52228682170542 | 1.0260416666666667 | prod | 2024-05-10T00:20:00.000Z | 116.48872624247431 +4.428024083196497 | 0.0808531746031746 | qa | 2024-05-10T00:20:00.000Z | 54.76623651364503 +1.5050835148874364 | 0.5028140885984024 | staging | 2024-05-10T00:20:00.000Z | 2.9933200938797615 + +; + +irate_combined_sum +required_capability: metrics_command +TS k8s +| STATS sum_irate_bytes = sum(irate(network.total_bytes_in)), sum_irate_cost = sum(irate(network.total_cost)) BY cluster, time_bucket = bucket(@timestamp, 10minute) +| EVAL ratio = sum_irate_bytes / sum_irate_cost +| SORT time_bucket, cluster | LIMIT 10; + +sum_irate_bytes:double | sum_irate_cost:double | cluster:keyword | time_bucket:datetime | ratio:double +13.12446829656132 | 0.38781305903398927 | prod | 2024-05-10T00:00:00.000Z | 33.84225464004049 +853.9032258064516 | 6.337365591397849 | qa | 2024-05-10T00:00:00.000Z | 134.74103923647934 +122.72115384615384 | 1.4063835470085468 | staging | 2024-05-10T00:00:00.000Z | 87.26008926027919 +29.679643493761134 | 0.5575668449197861 | prod | 2024-05-10T00:10:00.000Z | 53.23064626992118 +85.71680672268909 | 0.6042016806722689 | qa | 2024-05-10T00:10:00.000Z | 141.86787204450627 +65.69696796882425 | 0.7275520387795836 | staging | 2024-05-10T00:10:00.000Z | 90.29865146007454 +239.04457364341084 | 2.0520833333333335 | prod | 2024-05-10T00:20:00.000Z | 116.48872624247431 +8.856048166392995 | 0.1617063492063492 | qa | 2024-05-10T00:20:00.000Z | 54.76623651364503 +4.515250544662309 | 1.508442265795207 | staging | 2024-05-10T00:20:00.000Z | 2.9933200938797615 +; + +irate_of_ratio +required_capability: metrics_command +TS k8s +| STATS irate_of_ratio = sum(irate(network.total_cost) / irate(network.total_bytes_in)) BY cluster, time_bucket = bucket(@timestamp, 10minute) +| SORT time_bucket, cluster | LIMIT 10; + +irate_of_ratio:double | cluster:keyword | time_bucket:datetime +0.7377812779572093 | prod | 2024-05-10T00:00:00.000Z +0.15560960316361233 | qa | 2024-05-10T00:00:00.000Z +0.04287216214507051 | staging | 2024-05-10T00:00:00.000Z +3.088611728967339 | prod | 2024-05-10T00:10:00.000Z +0.019280051363230983 | qa | 2024-05-10T00:10:00.000Z +0.17121614905482155 | staging | 2024-05-10T00:10:00.000Z +0.021697562872698986 | prod | 2024-05-10T00:20:00.000Z +0.04152807743018099 | qa | 2024-05-10T00:20:00.000Z +4.02626050420168 | staging | 2024-05-10T00:20:00.000Z + +; + +irate_of_long_grouping_1min_nulls +required_capability: metrics_command +TS k8s +| STATS irate_bytes_in=avg(irate(network.total_bytes_in)) BY cluster, time_bucket = bucket(@timestamp,2minute) +| SORT irate_bytes_in NULLS FIRST, time_bucket, cluster | LIMIT 10; + +irate_bytes_in:double | cluster:keyword | time_bucket:datetime +null | qa | 2024-05-10T00:00:00.000Z +null | staging | 2024-05-10T00:00:00.000Z +null | prod | 2024-05-10T00:06:00.000Z +null | prod | 2024-05-10T00:10:00.000Z +null | staging | 2024-05-10T00:10:00.000Z +null | qa | 2024-05-10T00:22:00.000Z +0.08823529411764706 | staging | 2024-05-10T00:22:00.000Z +0.3 | prod | 2024-05-10T00:04:00.000Z +0.875 | qa | 2024-05-10T00:12:00.000Z +4.300925925925926 | qa | 2024-05-10T00:02:00.000Z + +; + diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/RandomizedTimeSeriesIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/RandomizedTimeSeriesIT.java index 22f5a166a21a3..e4dbe66e4d0f3 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/RandomizedTimeSeriesIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/RandomizedTimeSeriesIT.java @@ -56,8 +56,8 @@ @SuppressWarnings("unchecked") @ESIntegTestCase.ClusterScope(maxNumDataNodes = 1) public class RandomizedTimeSeriesIT extends AbstractEsqlIntegTestCase { - private static final Long NUM_DOCS = 500L; - private static final Long TIME_RANGE_SECONDS = 900L; + private static final Long NUM_DOCS = 6L; + private static final Long TIME_RANGE_SECONDS = 60L; private static final String DATASTREAM_NAME = "tsit_ds"; private static final Integer SECONDS_IN_WINDOW = 60; private static final List> WINDOW_OPTIONS = List.of( @@ -384,7 +384,8 @@ void assertNoFailedWindows(List failedWindows, List> rows) * the same values from the documents in the group. */ public void testRateGroupBySubset() { - var deltaAgg = ESTestCase.randomFrom(DELTA_AGG_OPTIONS); +// var deltaAgg = ESTestCase.randomFrom(DELTA_AGG_OPTIONS); + var deltaAgg = Tuple.tuple("irate", DeltaAgg.IRATE); // TODO: Re-enable irate after fixing var window = ESTestCase.randomFrom(WINDOW_OPTIONS); var windowSize = window.v2(); var windowStr = window.v1(); @@ -392,7 +393,7 @@ public void testRateGroupBySubset() { var dimensionsStr = dimensions.isEmpty() ? "" : ", " + dimensions.stream().map(d -> "attributes." + d).collect(Collectors.joining(", ")); - try (var resp = run(String.format(Locale.ROOT, """ + var query = String.format(Locale.ROOT, """ TS %s | STATS count((metrics.counterl_hdd.bytes.read)), max((metrics.counterl_hdd.bytes.read)), @@ -401,7 +402,8 @@ public void testRateGroupBySubset() { sum((metrics.counterl_hdd.bytes.read)) BY tbucket=bucket(@timestamp, %s) %s | SORT tbucket - """, DATASTREAM_NAME, windowStr, dimensionsStr).replaceAll("", deltaAgg.v1()))) { + """, DATASTREAM_NAME, windowStr, dimensionsStr).replaceAll("", deltaAgg.v1()); + try (var resp = run(query)) { List> rows = consumeRows(resp); List failedWindows = new ArrayList<>(); var groups = groupedRows(documents, dimensions, windowSize); @@ -440,7 +442,6 @@ public void testRateGroupByNothing() { min(rate(metrics.counterl_hdd.bytes.read)) BY tbucket=bucket(@timestamp, 1 minute) | SORT tbucket - | LIMIT 1000 """, DATASTREAM_NAME))) { List> rows = consumeRows(resp); List failedWindows = new ArrayList<>(); @@ -487,7 +488,7 @@ public void testGaugeGroupByRandomAndRandomAgg() { %s BY tbucket=bucket(@timestamp, %s) %s | SORT tbucket - | LIMIT 1000""", DATASTREAM_NAME, metricName, aggExpression, windowStr, dimensionsStr); + """, DATASTREAM_NAME, metricName, aggExpression, windowStr, dimensionsStr); try (EsqlQueryResponse resp = run(query)) { var groups = groupedRows(documents, dimensions, windowSize); List> rows = consumeRows(resp); @@ -553,7 +554,7 @@ public void testGroupBySubset() { count(count_over_time(metrics.gaugel_hdd.bytes.used)) BY tbucket=bucket(@timestamp, 1 minute), %s | SORT tbucket - | LIMIT 1000""", DATASTREAM_NAME, dimensionsStr))) { + """, DATASTREAM_NAME, dimensionsStr))) { var groups = groupedRows(documents, dimensions, 60); List> rows = consumeRows(resp); for (List row : rows) { @@ -594,7 +595,7 @@ public void testGroupByNothing() { count(count_over_time(metrics.gaugel_hdd.bytes.used)) BY tbucket=bucket(@timestamp, 1 minute) | SORT tbucket - | LIMIT 1000""", DATASTREAM_NAME))) { + """, DATASTREAM_NAME))) { List> rows = consumeRows(resp); var groups = groupedRows(documents, List.of(), 60); for (List row : rows) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/AggregateWritables.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/AggregateWritables.java index bc2ddc90591ef..fd799c6f47128 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/AggregateWritables.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/AggregateWritables.java @@ -25,6 +25,7 @@ public static List getNamedWriteables() { Min.ENTRY, Percentile.ENTRY, Rate.ENTRY, + Irate.ENTRY, Sample.ENTRY, SpatialCentroid.ENTRY, SpatialExtent.ENTRY, From 1921736d04bdd0bcb13a33f72e53b148f8c79152 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine Date: Fri, 5 Sep 2025 03:46:54 +0000 Subject: [PATCH 07/13] [CI] Auto commit changes from spotless --- .../compute/aggregation/IrateDoubleAggregator.java | 4 +--- .../compute/aggregation/IrateFloatAggregator.java | 4 +--- .../compute/aggregation/IrateIntAggregator.java | 4 +--- .../compute/aggregation/IrateLongAggregator.java | 4 +--- .../elasticsearch/compute/aggregation/debugirate.txt | 10 ---------- .../xpack/esql/action/RandomizedTimeSeriesIT.java | 7 ++++--- 6 files changed, 8 insertions(+), 25 deletions(-) delete mode 100644 x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/debugirate.txt diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateDoubleAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateDoubleAggregator.java index 4646ba6f9e12d..5c54af925f3f5 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateDoubleAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateDoubleAggregator.java @@ -255,9 +255,7 @@ Block evaluateFinal(IntVector selected, GroupingAggregatorEvaluationContext eval int len = state.entries(); // When the last value is less than the previous one, we assume a reset // and use the last value directly. - final double ydiff = state.values[0] > state.values[1] - ? state.values[0] - state.values[1] - : state.values[0]; + final double ydiff = state.values[0] > state.values[1] ? state.values[0] - state.values[1] : state.values[0]; final long xdiff = state.timestamps[0] - state.timestamps[1]; rates.appendDouble(ydiff / xdiff * 1000); } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateFloatAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateFloatAggregator.java index c557cd5694038..9d9f374316c0b 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateFloatAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateFloatAggregator.java @@ -255,9 +255,7 @@ Block evaluateFinal(IntVector selected, GroupingAggregatorEvaluationContext eval int len = state.entries(); // When the last value is less than the previous one, we assume a reset // and use the last value directly. - final double ydiff = state.values[0] > state.values[1] - ? state.values[0] - state.values[1] - : state.values[0]; + final double ydiff = state.values[0] > state.values[1] ? state.values[0] - state.values[1] : state.values[0]; final long xdiff = state.timestamps[0] - state.timestamps[1]; rates.appendDouble(ydiff / xdiff * 1000); } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateIntAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateIntAggregator.java index e94da51881df2..fef87c6002b42 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateIntAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateIntAggregator.java @@ -255,9 +255,7 @@ Block evaluateFinal(IntVector selected, GroupingAggregatorEvaluationContext eval int len = state.entries(); // When the last value is less than the previous one, we assume a reset // and use the last value directly. - final double ydiff = state.values[0] > state.values[1] - ? state.values[0] - state.values[1] - : state.values[0]; + final double ydiff = state.values[0] > state.values[1] ? state.values[0] - state.values[1] : state.values[0]; final long xdiff = state.timestamps[0] - state.timestamps[1]; rates.appendDouble(ydiff / xdiff * 1000); } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateLongAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateLongAggregator.java index 4038a1d92e9ee..6ac0f55190bb6 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateLongAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateLongAggregator.java @@ -255,9 +255,7 @@ Block evaluateFinal(IntVector selected, GroupingAggregatorEvaluationContext eval int len = state.entries(); // When the last value is less than the previous one, we assume a reset // and use the last value directly. - final double ydiff = state.values[0] > state.values[1] - ? state.values[0] - state.values[1] - : state.values[0]; + final double ydiff = state.values[0] > state.values[1] ? state.values[0] - state.values[1] : state.values[0]; final long xdiff = state.timestamps[0] - state.timestamps[1]; rates.appendDouble(ydiff / xdiff * 1000); } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/debugirate.txt b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/debugirate.txt deleted file mode 100644 index 2a3fa0972bc42..0000000000000 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/debugirate.txt +++ /dev/null @@ -1,10 +0,0 @@ -IN ES: -VALUES: [614, 0] -TIMESTAMPS: [175395597 5918, 0] -// PROBLEM IN THE ABOVE!!!! -// The value here should be in the past, not 0! -// v2=Tuple[v1=2025-07-31T09:59:04.846Z, v2=927.0]] - -VALS: [167, 829] -TS: [1753955998118, 1753955983589] - diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/RandomizedTimeSeriesIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/RandomizedTimeSeriesIT.java index e4dbe66e4d0f3..e9a7ef862ff25 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/RandomizedTimeSeriesIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/RandomizedTimeSeriesIT.java @@ -271,8 +271,9 @@ static RateStats calculateDeltaAggregation( if (deltaAgg.equals(DeltaAgg.IRATE)) { var lastVal = timeseries.getLast().v2().v2(); var secondLastVal = timeseries.get(timeseries.size() - 2).v2().v2(); - var irate = (lastVal > secondLastVal ? lastVal - secondLastVal : lastVal) - / (lastTs.toEpochMilli() - timeseries.get(timeseries.size() - 2).v2().v1().toEpochMilli()) * 1000; + var irate = (lastVal > secondLastVal ? lastVal - secondLastVal : lastVal) / (lastTs.toEpochMilli() - timeseries.get( + timeseries.size() - 2 + ).v2().v1().toEpochMilli()) * 1000; return new RateRange(irate * 0.999, irate * 1.001); // Add 0.1% tolerance } assert deltaAgg == DeltaAgg.RATE; @@ -384,7 +385,7 @@ void assertNoFailedWindows(List failedWindows, List> rows) * the same values from the documents in the group. */ public void testRateGroupBySubset() { -// var deltaAgg = ESTestCase.randomFrom(DELTA_AGG_OPTIONS); + // var deltaAgg = ESTestCase.randomFrom(DELTA_AGG_OPTIONS); var deltaAgg = Tuple.tuple("irate", DeltaAgg.IRATE); // TODO: Re-enable irate after fixing var window = ESTestCase.randomFrom(WINDOW_OPTIONS); var windowSize = window.v2(); From 2d0e5d61688075c01d3387ed2052890d5050af39 Mon Sep 17 00:00:00 2001 From: Pablo Date: Mon, 8 Sep 2025 12:28:16 -0700 Subject: [PATCH 08/13] Finalized irate --- .../aggregation/IrateDoubleAggregator.java | 23 ++++++++------ .../aggregation/IrateFloatAggregator.java | 23 ++++++++------ .../aggregation/IrateIntAggregator.java | 23 ++++++++------ .../aggregation/IrateLongAggregator.java | 23 ++++++++------ .../aggregation/X-IrateAggregator.java.st | 23 ++++++++------ .../esql/action/RandomizedTimeSeriesIT.java | 31 +++++++++++++------ 6 files changed, 92 insertions(+), 54 deletions(-) diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateDoubleAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateDoubleAggregator.java index 5c54af925f3f5..eea43cce57ec1 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateDoubleAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateDoubleAggregator.java @@ -36,7 +36,6 @@ value = { @IntermediateState(name = "timestamps", type = "LONG_BLOCK"), @IntermediateState(name = "values", type = "DOUBLE_BLOCK") } ) public class IrateDoubleAggregator { - public static DoubleIrateGroupingState initGrouping(DriverContext driverContext) { return new DoubleIrateGroupingState(driverContext.bigArrays(), driverContext.breaker()); } @@ -119,15 +118,21 @@ void append(int groupId, long timestamp, double value) { var state = states.get(groupId); if (state == null) { adjustBreaker(DoubleIrateState.bytesUsed(1)); - state = new DoubleIrateState(new long[] { timestamp }, new double[] { value }); + state = new DoubleIrateState(new long[] { timestamp, -1 }, new double[] { value, 0 }); states.set(groupId, state); } else { - if (state.entries() == 1) { - adjustBreaker(DoubleIrateState.bytesUsed(2)); - state = new DoubleIrateState(new long[] { state.timestamps[0], timestamp }, new double[] { state.values[0], value }); - states.set(groupId, state); - adjustBreaker(-DoubleIrateState.bytesUsed(1)); // old state - } + // We only need the last two values, but we need to keep them sorted by timestamp. + if (timestamp > state.timestamps[0]) { + // new timestamp is the most recent + state.timestamps[1] = state.timestamps[0]; + state.values[1] = state.values[0]; + state.timestamps[0] = timestamp; + state.values[0] = value; + } else if (timestamp > state.timestamps[1]) { + // new timestamp is the second most recent + state.timestamps[1] = timestamp; + state.values[1] = value; + } // else: ignore, too old } } @@ -248,7 +253,7 @@ Block evaluateFinal(IntVector selected, GroupingAggregatorEvaluationContext eval for (int p = 0; p < positionCount; p++) { final var groupId = selected.getInt(p); final var state = groupId < states.size() ? states.get(groupId) : null; - if (state == null || state.values.length < 2) { + if (state == null || state.values.length < 2 || state.timestamps[1] == -1) { rates.appendNull(); continue; } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateFloatAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateFloatAggregator.java index 9d9f374316c0b..8e58c5588ceef 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateFloatAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateFloatAggregator.java @@ -36,7 +36,6 @@ value = { @IntermediateState(name = "timestamps", type = "LONG_BLOCK"), @IntermediateState(name = "values", type = "FLOAT_BLOCK") } ) public class IrateFloatAggregator { - public static FloatIrateGroupingState initGrouping(DriverContext driverContext) { return new FloatIrateGroupingState(driverContext.bigArrays(), driverContext.breaker()); } @@ -119,15 +118,21 @@ void append(int groupId, long timestamp, float value) { var state = states.get(groupId); if (state == null) { adjustBreaker(FloatIrateState.bytesUsed(1)); - state = new FloatIrateState(new long[] { timestamp }, new float[] { value }); + state = new FloatIrateState(new long[] { timestamp, -1 }, new float[] { value, 0 }); states.set(groupId, state); } else { - if (state.entries() == 1) { - adjustBreaker(FloatIrateState.bytesUsed(2)); - state = new FloatIrateState(new long[] { state.timestamps[0], timestamp }, new float[] { state.values[0], value }); - states.set(groupId, state); - adjustBreaker(-FloatIrateState.bytesUsed(1)); // old state - } + // We only need the last two values, but we need to keep them sorted by timestamp. + if (timestamp > state.timestamps[0]) { + // new timestamp is the most recent + state.timestamps[1] = state.timestamps[0]; + state.values[1] = state.values[0]; + state.timestamps[0] = timestamp; + state.values[0] = value; + } else if (timestamp > state.timestamps[1]) { + // new timestamp is the second most recent + state.timestamps[1] = timestamp; + state.values[1] = value; + } // else: ignore, too old } } @@ -248,7 +253,7 @@ Block evaluateFinal(IntVector selected, GroupingAggregatorEvaluationContext eval for (int p = 0; p < positionCount; p++) { final var groupId = selected.getInt(p); final var state = groupId < states.size() ? states.get(groupId) : null; - if (state == null || state.values.length < 2) { + if (state == null || state.values.length < 2 || state.timestamps[1] == -1) { rates.appendNull(); continue; } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateIntAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateIntAggregator.java index fef87c6002b42..5b2308880bbc5 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateIntAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateIntAggregator.java @@ -36,7 +36,6 @@ value = { @IntermediateState(name = "timestamps", type = "LONG_BLOCK"), @IntermediateState(name = "values", type = "INT_BLOCK") } ) public class IrateIntAggregator { - public static IntIrateGroupingState initGrouping(DriverContext driverContext) { return new IntIrateGroupingState(driverContext.bigArrays(), driverContext.breaker()); } @@ -119,15 +118,21 @@ void append(int groupId, long timestamp, int value) { var state = states.get(groupId); if (state == null) { adjustBreaker(IntIrateState.bytesUsed(1)); - state = new IntIrateState(new long[] { timestamp }, new int[] { value }); + state = new IntIrateState(new long[] { timestamp, -1 }, new int[] { value, 0 }); states.set(groupId, state); } else { - if (state.entries() == 1) { - adjustBreaker(IntIrateState.bytesUsed(2)); - state = new IntIrateState(new long[] { state.timestamps[0], timestamp }, new int[] { state.values[0], value }); - states.set(groupId, state); - adjustBreaker(-IntIrateState.bytesUsed(1)); // old state - } + // We only need the last two values, but we need to keep them sorted by timestamp. + if (timestamp > state.timestamps[0]) { + // new timestamp is the most recent + state.timestamps[1] = state.timestamps[0]; + state.values[1] = state.values[0]; + state.timestamps[0] = timestamp; + state.values[0] = value; + } else if (timestamp > state.timestamps[1]) { + // new timestamp is the second most recent + state.timestamps[1] = timestamp; + state.values[1] = value; + } // else: ignore, too old } } @@ -248,7 +253,7 @@ Block evaluateFinal(IntVector selected, GroupingAggregatorEvaluationContext eval for (int p = 0; p < positionCount; p++) { final var groupId = selected.getInt(p); final var state = groupId < states.size() ? states.get(groupId) : null; - if (state == null || state.values.length < 2) { + if (state == null || state.values.length < 2 || state.timestamps[1] == -1) { rates.appendNull(); continue; } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateLongAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateLongAggregator.java index 6ac0f55190bb6..832e6f26b403d 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateLongAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateLongAggregator.java @@ -36,7 +36,6 @@ value = { @IntermediateState(name = "timestamps", type = "LONG_BLOCK"), @IntermediateState(name = "values", type = "LONG_BLOCK") } ) public class IrateLongAggregator { - public static LongIrateGroupingState initGrouping(DriverContext driverContext) { return new LongIrateGroupingState(driverContext.bigArrays(), driverContext.breaker()); } @@ -119,15 +118,21 @@ void append(int groupId, long timestamp, long value) { var state = states.get(groupId); if (state == null) { adjustBreaker(LongIrateState.bytesUsed(1)); - state = new LongIrateState(new long[] { timestamp }, new long[] { value }); + state = new LongIrateState(new long[] { timestamp, -1 }, new long[] { value, 0 }); states.set(groupId, state); } else { - if (state.entries() == 1) { - adjustBreaker(LongIrateState.bytesUsed(2)); - state = new LongIrateState(new long[] { state.timestamps[0], timestamp }, new long[] { state.values[0], value }); - states.set(groupId, state); - adjustBreaker(-LongIrateState.bytesUsed(1)); // old state - } + // We only need the last two values, but we need to keep them sorted by timestamp. + if (timestamp > state.timestamps[0]) { + // new timestamp is the most recent + state.timestamps[1] = state.timestamps[0]; + state.values[1] = state.values[0]; + state.timestamps[0] = timestamp; + state.values[0] = value; + } else if (timestamp > state.timestamps[1]) { + // new timestamp is the second most recent + state.timestamps[1] = timestamp; + state.values[1] = value; + } // else: ignore, too old } } @@ -248,7 +253,7 @@ Block evaluateFinal(IntVector selected, GroupingAggregatorEvaluationContext eval for (int p = 0; p < positionCount; p++) { final var groupId = selected.getInt(p); final var state = groupId < states.size() ? states.get(groupId) : null; - if (state == null || state.values.length < 2) { + if (state == null || state.values.length < 2 || state.timestamps[1] == -1) { rates.appendNull(); continue; } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-IrateAggregator.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-IrateAggregator.java.st index 3e90e92a626fe..446c34ac1a8ce 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-IrateAggregator.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-IrateAggregator.java.st @@ -36,7 +36,6 @@ import org.elasticsearch.core.Releasables; value = { @IntermediateState(name = "timestamps", type = "LONG_BLOCK"), @IntermediateState(name = "values", type = "$TYPE$_BLOCK") } ) public class Irate$Type$Aggregator { - public static $Type$IrateGroupingState initGrouping(DriverContext driverContext) { return new $Type$IrateGroupingState(driverContext.bigArrays(), driverContext.breaker()); } @@ -119,15 +118,21 @@ public class Irate$Type$Aggregator { var state = states.get(groupId); if (state == null) { adjustBreaker($Type$IrateState.bytesUsed(1)); - state = new $Type$IrateState(new long[] { timestamp }, new $type$[] { value }); + state = new $Type$IrateState(new long[] { timestamp, -1 }, new $type$[] { value, 0 }); states.set(groupId, state); } else { - if (state.entries() == 1) { - adjustBreaker($Type$IrateState.bytesUsed(2)); - state = new $Type$IrateState(new long[] { state.timestamps[0], timestamp }, new $type$[] { state.values[0], value }); - states.set(groupId, state); - adjustBreaker(-$Type$IrateState.bytesUsed(1)); // old state - } + // We only need the last two values, but we need to keep them sorted by timestamp. + if (timestamp > state.timestamps[0]) { + // new timestamp is the most recent + state.timestamps[1] = state.timestamps[0]; + state.values[1] = state.values[0]; + state.timestamps[0] = timestamp; + state.values[0] = value; + } else if (timestamp > state.timestamps[1]) { + // new timestamp is the second most recent + state.timestamps[1] = timestamp; + state.values[1] = value; + } // else: ignore, too old } } @@ -248,7 +253,7 @@ public class Irate$Type$Aggregator { for (int p = 0; p < positionCount; p++) { final var groupId = selected.getInt(p); final var state = groupId < states.size() ? states.get(groupId) : null; - if (state == null || state.values.length < 2) { + if (state == null || state.values.length < 2 || state.timestamps[1] == -1) { rates.appendNull(); continue; } diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/RandomizedTimeSeriesIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/RandomizedTimeSeriesIT.java index e9a7ef862ff25..67e061dcd4079 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/RandomizedTimeSeriesIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/RandomizedTimeSeriesIT.java @@ -56,8 +56,8 @@ @SuppressWarnings("unchecked") @ESIntegTestCase.ClusterScope(maxNumDataNodes = 1) public class RandomizedTimeSeriesIT extends AbstractEsqlIntegTestCase { - private static final Long NUM_DOCS = 6L; - private static final Long TIME_RANGE_SECONDS = 60L; + private static final Long NUM_DOCS = 2000L; + private static final Long TIME_RANGE_SECONDS = 3600L; private static final String DATASTREAM_NAME = "tsit_ds"; private static final Integer SECONDS_IN_WINDOW = 60; private static final List> WINDOW_OPTIONS = List.of( @@ -215,6 +215,19 @@ static List getRowKey(List row, List groupingAttributes, return rowKey; } + static Integer getTimestampIndex(String esqlQuery) { + // first we get the stats command after the pipe + var statsIndex = esqlQuery.indexOf("| STATS"); + var nextPipe = esqlQuery.indexOf("|", statsIndex + 1); + + var statsCommand = esqlQuery.substring(statsIndex, nextPipe); + // then we count the number of commas before "BY " + var byTbucketIndex = statsCommand.indexOf("BY "); + var statsPart = statsCommand.substring(0, byTbucketIndex); + // the number of columns is the number of commas + 1 + return (int) statsPart.chars().filter(ch -> ch == ',').count() + 1; + } + @Override public EsqlQueryResponse run(EsqlQueryRequest request) { assumeTrue("time series available in snapshot builds only", Build.current().isSnapshot()); @@ -385,8 +398,7 @@ void assertNoFailedWindows(List failedWindows, List> rows) * the same values from the documents in the group. */ public void testRateGroupBySubset() { - // var deltaAgg = ESTestCase.randomFrom(DELTA_AGG_OPTIONS); - var deltaAgg = Tuple.tuple("irate", DeltaAgg.IRATE); // TODO: Re-enable irate after fixing + var deltaAgg = ESTestCase.randomFrom(DELTA_AGG_OPTIONS); var window = ESTestCase.randomFrom(WINDOW_OPTIONS); var windowSize = window.v2(); var windowStr = window.v1(); @@ -409,7 +421,7 @@ public void testRateGroupBySubset() { List failedWindows = new ArrayList<>(); var groups = groupedRows(documents, dimensions, windowSize); for (List row : rows) { - var rowKey = getRowKey(row, dimensions, 5); + var rowKey = getRowKey(row, dimensions, getTimestampIndex(query)); var windowDataPoints = groups.get(rowKey); var docsPerTimeseries = groupByTimeseries(windowDataPoints, "counterl_hdd.bytes.read"); var rateAgg = calculateDeltaAggregation(docsPerTimeseries.values(), windowSize, deltaAgg.v2()); @@ -494,7 +506,7 @@ public void testGaugeGroupByRandomAndRandomAgg() { var groups = groupedRows(documents, dimensions, windowSize); List> rows = consumeRows(resp); for (List row : rows) { - var rowKey = getRowKey(row, dimensions, 1); + var rowKey = getRowKey(row, dimensions, getTimestampIndex(query)); var tsGroups = groupByTimeseries(groups.get(rowKey), metricName); Object expectedVal = aggregatePerTimeseries(tsGroups, selectedAggs.get(0), selectedAggs.get(1)); Double actualVal = switch (row.get(0)) { @@ -544,7 +556,7 @@ public void testGaugeGroupByRandomAndRandomAgg() { public void testGroupBySubset() { var dimensions = ESTestCase.randomNonEmptySubsetOf(dataGenerationHelper.attributesForMetrics); var dimensionsStr = dimensions.stream().map(d -> "attributes." + d).collect(Collectors.joining(", ")); - try (EsqlQueryResponse resp = run(String.format(Locale.ROOT, """ + var query = String.format(Locale.ROOT, """ TS %s | STATS max(max_over_time(metrics.gaugel_hdd.bytes.used)), @@ -555,11 +567,12 @@ public void testGroupBySubset() { count(count_over_time(metrics.gaugel_hdd.bytes.used)) BY tbucket=bucket(@timestamp, 1 minute), %s | SORT tbucket - """, DATASTREAM_NAME, dimensionsStr))) { + """, DATASTREAM_NAME, dimensionsStr); + try (EsqlQueryResponse resp = run(query)) { var groups = groupedRows(documents, dimensions, 60); List> rows = consumeRows(resp); for (List row : rows) { - var rowKey = getRowKey(row, dimensions, 6); + var rowKey = getRowKey(row, dimensions, getTimestampIndex(query)); var tsGroups = groupByTimeseries(groups.get(rowKey), "gaugel_hdd.bytes.used"); Function toDouble = cell -> switch (cell) { case Long l -> l.doubleValue(); From 5593613825c2330d93557953870c2de572daea36 Mon Sep 17 00:00:00 2001 From: Pablo Date: Mon, 8 Sep 2025 17:13:06 -0700 Subject: [PATCH 09/13] fixup --- .../compute/aggregation/X-IrateAggregator.java.st | 4 +--- .../resources/rest-api-spec/test/esql/60_usage.yml | 3 +-- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-IrateAggregator.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-IrateAggregator.java.st index 446c34ac1a8ce..cb1b15909c230 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-IrateAggregator.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-IrateAggregator.java.st @@ -260,9 +260,7 @@ public class Irate$Type$Aggregator { int len = state.entries(); // When the last value is less than the previous one, we assume a reset // and use the last value directly. - final double ydiff = state.values[0] > state.values[1] - ? state.values[0] - state.values[1] - : state.values[0]; + final double ydiff = state.values[0] > state.values[1] ? state.values[0] - state.values[1] : state.values[0]; final long xdiff = state.timestamps[0] - state.timestamps[1]; rates.appendDouble(ydiff / xdiff * 1000); } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_usage.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_usage.yml index 5f2470675714d..df7c9a67b1409 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_usage.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_usage.yml @@ -129,8 +129,7 @@ setup: - match: {esql.functions.coalesce: $functions_coalesce} - gt: {esql.functions.categorize: $functions_categorize} # Testing for the entire function set isn't feasible, so we just check that we return the correct count as an approximation. - - length: {esql.functions: 173} # check the "sister" test below for a likely update to the same esql.functions length check - + - length: {esql.functions: 174} # check the "sister" test below for a likely update to the same esql.functions length check --- "Basic ESQL usage output (telemetry) non-snapshot version": - requires: From 5edc4c86b841b0ffe0d71f0305d88b202c6539a4 Mon Sep 17 00:00:00 2001 From: Pablo Date: Mon, 8 Sep 2025 22:00:18 -0700 Subject: [PATCH 10/13] comments and simplifying state impl --- .../aggregation/IrateDoubleAggregator.java | 149 ++++++------------ .../aggregation/IrateFloatAggregator.java | 149 ++++++------------ .../aggregation/IrateIntAggregator.java | 149 ++++++------------ .../aggregation/IrateLongAggregator.java | 149 ++++++------------ .../aggregation/X-IrateAggregator.java.st | 149 ++++++------------ .../esql/action/RandomizedTimeSeriesIT.java | 4 +- .../expression/function/aggregate/Irate.java | 8 +- 7 files changed, 240 insertions(+), 517 deletions(-) diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateDoubleAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateDoubleAggregator.java index eea43cce57ec1..2a52a4ce12202 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateDoubleAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateDoubleAggregator.java @@ -30,7 +30,7 @@ /** * A rate grouping aggregation definition for double. - * This class is generated. Edit `X-RateAggregator.java.st` instead. + * This class is generated. Edit `X-IrateAggregator.java.st` instead. */ @GroupingAggregator( value = { @IntermediateState(name = "timestamps", type = "LONG_BLOCK"), @IntermediateState(name = "values", type = "DOUBLE_BLOCK") } @@ -41,7 +41,8 @@ public static DoubleIrateGroupingState initGrouping(DriverContext driverContext) } public static void combine(DoubleIrateGroupingState current, int groupId, double value, long timestamp) { - current.append(groupId, timestamp, value); + current.ensureCapacity(groupId); + DoubleIrateGroupingState.append(current.states, groupId, timestamp, value); } public static void combineIntermediate( @@ -60,34 +61,24 @@ public static Block evaluateFinal(DoubleIrateGroupingState state, IntVector sele private static class DoubleIrateState { static final long BASE_RAM_USAGE = RamUsageEstimator.sizeOfObject(DoubleIrateState.class); - final long[] timestamps; // descending order - final double[] values; + long lastTimestamp; + long secondLastTimestamp; + double lastValue; + double secondLastValue; + boolean hasSecond; - DoubleIrateState(int initialSize) { - this.timestamps = new long[initialSize]; - this.values = new double[initialSize]; + DoubleIrateState() { + hasSecond = false; } - DoubleIrateState(long[] ts, double[] vs) { - this.timestamps = ts; - this.values = vs; + DoubleIrateState(long lastTimestamp, double lastValue) { + this.lastTimestamp = lastTimestamp; + this.lastValue = lastValue; + this.hasSecond = false; } - void append(long t, double v) { - assert timestamps.length == 2 : "expected two timestamps; got " + timestamps.length; - assert t < timestamps[1] : "@timestamp goes backward: " + t + " >= " + timestamps[1]; - // This method does not need to do anything because we only need the last two values - // and timestamps, which are already in place. - } - - int entries() { - return timestamps.length; - } - - static long bytesUsed(int entries) { - var ts = RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) Long.BYTES * entries); - var vs = RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) Double.BYTES * entries); - return BASE_RAM_USAGE + ts + vs; + static long bytesUsed() { + return BASE_RAM_USAGE; } } @@ -113,94 +104,45 @@ void adjustBreaker(long bytes) { assert stateBytes >= 0 : stateBytes; } - void append(int groupId, long timestamp, double value) { - ensureCapacity(groupId); + static Long append(ObjectArray states, int groupId, long timestamp, double value) { var state = states.get(groupId); if (state == null) { - adjustBreaker(DoubleIrateState.bytesUsed(1)); - state = new DoubleIrateState(new long[] { timestamp, -1 }, new double[] { value, 0 }); + state = new DoubleIrateState(timestamp, value); states.set(groupId, state); + return DoubleIrateState.bytesUsed(); } else { // We only need the last two values, but we need to keep them sorted by timestamp. - if (timestamp > state.timestamps[0]) { + if (timestamp > state.lastTimestamp) { // new timestamp is the most recent - state.timestamps[1] = state.timestamps[0]; - state.values[1] = state.values[0]; - state.timestamps[0] = timestamp; - state.values[0] = value; - } else if (timestamp > state.timestamps[1]) { + state.secondLastTimestamp = state.lastTimestamp; + state.secondLastValue = state.lastValue; + state.lastTimestamp = timestamp; + state.lastValue = value; + state.hasSecond = true; + } else if (timestamp > state.secondLastTimestamp) { // new timestamp is the second most recent - state.timestamps[1] = timestamp; - state.values[1] = value; + state.secondLastTimestamp = timestamp; + state.secondLastValue = value; + state.hasSecond = true; } // else: ignore, too old + return 0L; } } void combine(int groupId, LongBlock timestamps, DoubleBlock values, int otherPosition) { - // TODO: Check this method pabloem final int valueCount = timestamps.getValueCount(otherPosition); if (valueCount == 0) { return; } final int firstIndex = timestamps.getFirstValueIndex(otherPosition); ensureCapacity(groupId); - var state = states.get(groupId); - if (state == null) { - adjustBreaker(DoubleIrateState.bytesUsed(valueCount)); - state = new DoubleIrateState(valueCount); - states.set(groupId, state); - // TODO: add bulk_copy to Block - for (int i = 0; i < valueCount; i++) { - state.timestamps[i] = timestamps.getLong(firstIndex + i); - state.values[i] = values.getDouble(firstIndex + i); - } - } else { - adjustBreaker(DoubleIrateState.bytesUsed(state.entries() + valueCount)); - var newState = new DoubleIrateState(state.entries() + valueCount); - states.set(groupId, newState); - merge(state, newState, firstIndex, valueCount, timestamps, values); - adjustBreaker(-DoubleIrateState.bytesUsed(state.entries())); // old state - } - } - - void merge(DoubleIrateState curr, DoubleIrateState dst, int firstIndex, int rightCount, LongBlock timestamps, DoubleBlock values) { - int i = 0, j = 0, k = 0; - final int leftCount = curr.entries(); - // We do not merge more than two entries because we only need the last two. - // This merge thus ends when we have two entries in dst. - while (i < leftCount && j < rightCount && k < 2) { - final var t1 = curr.timestamps[i]; - final var t2 = timestamps.getLong(firstIndex + j); - if (t1 > t2) { - dst.timestamps[k] = t1; - dst.values[k] = curr.values[i]; - ++i; - } else { - dst.timestamps[k] = t2; - dst.values[k] = values.getDouble(firstIndex + j); - ++j; - } - ++k; - } - } - - DoubleIrateState mergeState(DoubleIrateState s1, DoubleIrateState s2) { - adjustBreaker(DoubleIrateState.bytesUsed(2)); - var dst = new DoubleIrateState(2); - int i = 0, j = 0, k = 0; - while (i < s1.entries() && j < s2.entries() && k < 2) { - if (s1.timestamps[i] > s2.timestamps[j]) { - dst.timestamps[k] = s1.timestamps[i]; - dst.values[k] = s1.values[i]; - ++i; - } else { - dst.timestamps[k] = s2.timestamps[j]; - dst.values[k] = s2.values[j]; - ++j; - } - ++k; + var incr = append(states, groupId, timestamps.getLong(firstIndex), values.getDouble(firstIndex)); + adjustBreaker(incr); + if (valueCount > 1) { + ensureCapacity(groupId); + incr = append(states, groupId, timestamps.getLong(firstIndex + 1), values.getDouble(firstIndex + 1)); + adjustBreaker(incr); } - return dst; } @Override @@ -227,14 +169,16 @@ public void toIntermediate(Block[] blocks, int offset, IntVector selected, Drive final var state = groupId < states.size() ? states.get(groupId) : null; if (state != null) { timestamps.beginPositionEntry(); - for (long t : state.timestamps) { - timestamps.appendLong(t); + timestamps.appendLong(state.lastTimestamp); + if (state.hasSecond) { + timestamps.appendLong(state.secondLastTimestamp); } timestamps.endPositionEntry(); values.beginPositionEntry(); - for (double v : state.values) { - values.appendDouble(v); + values.appendDouble(state.lastValue); + if (state.hasSecond) { + values.appendDouble(state.secondLastValue); } values.endPositionEntry(); } else { @@ -253,15 +197,16 @@ Block evaluateFinal(IntVector selected, GroupingAggregatorEvaluationContext eval for (int p = 0; p < positionCount; p++) { final var groupId = selected.getInt(p); final var state = groupId < states.size() ? states.get(groupId) : null; - if (state == null || state.values.length < 2 || state.timestamps[1] == -1) { + if (state == null || state.hasSecond == false) { rates.appendNull(); continue; } - int len = state.entries(); // When the last value is less than the previous one, we assume a reset // and use the last value directly. - final double ydiff = state.values[0] > state.values[1] ? state.values[0] - state.values[1] : state.values[0]; - final long xdiff = state.timestamps[0] - state.timestamps[1]; + final double ydiff = state.lastValue >= state.secondLastValue + ? state.lastValue - state.secondLastValue + : state.lastValue; + final long xdiff = state.lastTimestamp - state.secondLastTimestamp; rates.appendDouble(ydiff / xdiff * 1000); } return rates.build(); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateFloatAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateFloatAggregator.java index 8e58c5588ceef..f9fdb867a235b 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateFloatAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateFloatAggregator.java @@ -30,7 +30,7 @@ /** * A rate grouping aggregation definition for float. - * This class is generated. Edit `X-RateAggregator.java.st` instead. + * This class is generated. Edit `X-IrateAggregator.java.st` instead. */ @GroupingAggregator( value = { @IntermediateState(name = "timestamps", type = "LONG_BLOCK"), @IntermediateState(name = "values", type = "FLOAT_BLOCK") } @@ -41,7 +41,8 @@ public static FloatIrateGroupingState initGrouping(DriverContext driverContext) } public static void combine(FloatIrateGroupingState current, int groupId, float value, long timestamp) { - current.append(groupId, timestamp, value); + current.ensureCapacity(groupId); + FloatIrateGroupingState.append(current.states, groupId, timestamp, value); } public static void combineIntermediate( @@ -60,34 +61,24 @@ public static Block evaluateFinal(FloatIrateGroupingState state, IntVector selec private static class FloatIrateState { static final long BASE_RAM_USAGE = RamUsageEstimator.sizeOfObject(FloatIrateState.class); - final long[] timestamps; // descending order - final float[] values; + long lastTimestamp; + long secondLastTimestamp; + float lastValue; + float secondLastValue; + boolean hasSecond; - FloatIrateState(int initialSize) { - this.timestamps = new long[initialSize]; - this.values = new float[initialSize]; + FloatIrateState() { + hasSecond = false; } - FloatIrateState(long[] ts, float[] vs) { - this.timestamps = ts; - this.values = vs; + FloatIrateState(long lastTimestamp, float lastValue) { + this.lastTimestamp = lastTimestamp; + this.lastValue = lastValue; + this.hasSecond = false; } - void append(long t, float v) { - assert timestamps.length == 2 : "expected two timestamps; got " + timestamps.length; - assert t < timestamps[1] : "@timestamp goes backward: " + t + " >= " + timestamps[1]; - // This method does not need to do anything because we only need the last two values - // and timestamps, which are already in place. - } - - int entries() { - return timestamps.length; - } - - static long bytesUsed(int entries) { - var ts = RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) Long.BYTES * entries); - var vs = RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) Float.BYTES * entries); - return BASE_RAM_USAGE + ts + vs; + static long bytesUsed() { + return BASE_RAM_USAGE; } } @@ -113,94 +104,45 @@ void adjustBreaker(long bytes) { assert stateBytes >= 0 : stateBytes; } - void append(int groupId, long timestamp, float value) { - ensureCapacity(groupId); + static Long append(ObjectArray states, int groupId, long timestamp, float value) { var state = states.get(groupId); if (state == null) { - adjustBreaker(FloatIrateState.bytesUsed(1)); - state = new FloatIrateState(new long[] { timestamp, -1 }, new float[] { value, 0 }); + state = new FloatIrateState(timestamp, value); states.set(groupId, state); + return FloatIrateState.bytesUsed(); } else { // We only need the last two values, but we need to keep them sorted by timestamp. - if (timestamp > state.timestamps[0]) { + if (timestamp > state.lastTimestamp) { // new timestamp is the most recent - state.timestamps[1] = state.timestamps[0]; - state.values[1] = state.values[0]; - state.timestamps[0] = timestamp; - state.values[0] = value; - } else if (timestamp > state.timestamps[1]) { + state.secondLastTimestamp = state.lastTimestamp; + state.secondLastValue = state.lastValue; + state.lastTimestamp = timestamp; + state.lastValue = value; + state.hasSecond = true; + } else if (timestamp > state.secondLastTimestamp) { // new timestamp is the second most recent - state.timestamps[1] = timestamp; - state.values[1] = value; + state.secondLastTimestamp = timestamp; + state.secondLastValue = value; + state.hasSecond = true; } // else: ignore, too old + return 0L; } } void combine(int groupId, LongBlock timestamps, FloatBlock values, int otherPosition) { - // TODO: Check this method pabloem final int valueCount = timestamps.getValueCount(otherPosition); if (valueCount == 0) { return; } final int firstIndex = timestamps.getFirstValueIndex(otherPosition); ensureCapacity(groupId); - var state = states.get(groupId); - if (state == null) { - adjustBreaker(FloatIrateState.bytesUsed(valueCount)); - state = new FloatIrateState(valueCount); - states.set(groupId, state); - // TODO: add bulk_copy to Block - for (int i = 0; i < valueCount; i++) { - state.timestamps[i] = timestamps.getLong(firstIndex + i); - state.values[i] = values.getFloat(firstIndex + i); - } - } else { - adjustBreaker(FloatIrateState.bytesUsed(state.entries() + valueCount)); - var newState = new FloatIrateState(state.entries() + valueCount); - states.set(groupId, newState); - merge(state, newState, firstIndex, valueCount, timestamps, values); - adjustBreaker(-FloatIrateState.bytesUsed(state.entries())); // old state - } - } - - void merge(FloatIrateState curr, FloatIrateState dst, int firstIndex, int rightCount, LongBlock timestamps, FloatBlock values) { - int i = 0, j = 0, k = 0; - final int leftCount = curr.entries(); - // We do not merge more than two entries because we only need the last two. - // This merge thus ends when we have two entries in dst. - while (i < leftCount && j < rightCount && k < 2) { - final var t1 = curr.timestamps[i]; - final var t2 = timestamps.getLong(firstIndex + j); - if (t1 > t2) { - dst.timestamps[k] = t1; - dst.values[k] = curr.values[i]; - ++i; - } else { - dst.timestamps[k] = t2; - dst.values[k] = values.getFloat(firstIndex + j); - ++j; - } - ++k; - } - } - - FloatIrateState mergeState(FloatIrateState s1, FloatIrateState s2) { - adjustBreaker(FloatIrateState.bytesUsed(2)); - var dst = new FloatIrateState(2); - int i = 0, j = 0, k = 0; - while (i < s1.entries() && j < s2.entries() && k < 2) { - if (s1.timestamps[i] > s2.timestamps[j]) { - dst.timestamps[k] = s1.timestamps[i]; - dst.values[k] = s1.values[i]; - ++i; - } else { - dst.timestamps[k] = s2.timestamps[j]; - dst.values[k] = s2.values[j]; - ++j; - } - ++k; + var incr = append(states, groupId, timestamps.getLong(firstIndex), values.getFloat(firstIndex)); + adjustBreaker(incr); + if (valueCount > 1) { + ensureCapacity(groupId); + incr = append(states, groupId, timestamps.getLong(firstIndex + 1), values.getFloat(firstIndex + 1)); + adjustBreaker(incr); } - return dst; } @Override @@ -227,14 +169,16 @@ public void toIntermediate(Block[] blocks, int offset, IntVector selected, Drive final var state = groupId < states.size() ? states.get(groupId) : null; if (state != null) { timestamps.beginPositionEntry(); - for (long t : state.timestamps) { - timestamps.appendLong(t); + timestamps.appendLong(state.lastTimestamp); + if (state.hasSecond) { + timestamps.appendLong(state.secondLastTimestamp); } timestamps.endPositionEntry(); values.beginPositionEntry(); - for (float v : state.values) { - values.appendFloat(v); + values.appendFloat(state.lastValue); + if (state.hasSecond) { + values.appendFloat(state.secondLastValue); } values.endPositionEntry(); } else { @@ -253,15 +197,16 @@ Block evaluateFinal(IntVector selected, GroupingAggregatorEvaluationContext eval for (int p = 0; p < positionCount; p++) { final var groupId = selected.getInt(p); final var state = groupId < states.size() ? states.get(groupId) : null; - if (state == null || state.values.length < 2 || state.timestamps[1] == -1) { + if (state == null || state.hasSecond == false) { rates.appendNull(); continue; } - int len = state.entries(); // When the last value is less than the previous one, we assume a reset // and use the last value directly. - final double ydiff = state.values[0] > state.values[1] ? state.values[0] - state.values[1] : state.values[0]; - final long xdiff = state.timestamps[0] - state.timestamps[1]; + final double ydiff = state.lastValue >= state.secondLastValue + ? state.lastValue - state.secondLastValue + : state.lastValue; + final long xdiff = state.lastTimestamp - state.secondLastTimestamp; rates.appendDouble(ydiff / xdiff * 1000); } return rates.build(); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateIntAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateIntAggregator.java index 5b2308880bbc5..298ff70bd9463 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateIntAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateIntAggregator.java @@ -30,7 +30,7 @@ /** * A rate grouping aggregation definition for int. - * This class is generated. Edit `X-RateAggregator.java.st` instead. + * This class is generated. Edit `X-IrateAggregator.java.st` instead. */ @GroupingAggregator( value = { @IntermediateState(name = "timestamps", type = "LONG_BLOCK"), @IntermediateState(name = "values", type = "INT_BLOCK") } @@ -41,7 +41,8 @@ public static IntIrateGroupingState initGrouping(DriverContext driverContext) { } public static void combine(IntIrateGroupingState current, int groupId, int value, long timestamp) { - current.append(groupId, timestamp, value); + current.ensureCapacity(groupId); + IntIrateGroupingState.append(current.states, groupId, timestamp, value); } public static void combineIntermediate( @@ -60,34 +61,24 @@ public static Block evaluateFinal(IntIrateGroupingState state, IntVector selecte private static class IntIrateState { static final long BASE_RAM_USAGE = RamUsageEstimator.sizeOfObject(IntIrateState.class); - final long[] timestamps; // descending order - final int[] values; + long lastTimestamp; + long secondLastTimestamp; + int lastValue; + int secondLastValue; + boolean hasSecond; - IntIrateState(int initialSize) { - this.timestamps = new long[initialSize]; - this.values = new int[initialSize]; + IntIrateState() { + hasSecond = false; } - IntIrateState(long[] ts, int[] vs) { - this.timestamps = ts; - this.values = vs; + IntIrateState(long lastTimestamp, int lastValue) { + this.lastTimestamp = lastTimestamp; + this.lastValue = lastValue; + this.hasSecond = false; } - void append(long t, int v) { - assert timestamps.length == 2 : "expected two timestamps; got " + timestamps.length; - assert t < timestamps[1] : "@timestamp goes backward: " + t + " >= " + timestamps[1]; - // This method does not need to do anything because we only need the last two values - // and timestamps, which are already in place. - } - - int entries() { - return timestamps.length; - } - - static long bytesUsed(int entries) { - var ts = RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) Long.BYTES * entries); - var vs = RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) Integer.BYTES * entries); - return BASE_RAM_USAGE + ts + vs; + static long bytesUsed() { + return BASE_RAM_USAGE; } } @@ -113,94 +104,45 @@ void adjustBreaker(long bytes) { assert stateBytes >= 0 : stateBytes; } - void append(int groupId, long timestamp, int value) { - ensureCapacity(groupId); + static Long append(ObjectArray states, int groupId, long timestamp, int value) { var state = states.get(groupId); if (state == null) { - adjustBreaker(IntIrateState.bytesUsed(1)); - state = new IntIrateState(new long[] { timestamp, -1 }, new int[] { value, 0 }); + state = new IntIrateState(timestamp, value); states.set(groupId, state); + return IntIrateState.bytesUsed(); } else { // We only need the last two values, but we need to keep them sorted by timestamp. - if (timestamp > state.timestamps[0]) { + if (timestamp > state.lastTimestamp) { // new timestamp is the most recent - state.timestamps[1] = state.timestamps[0]; - state.values[1] = state.values[0]; - state.timestamps[0] = timestamp; - state.values[0] = value; - } else if (timestamp > state.timestamps[1]) { + state.secondLastTimestamp = state.lastTimestamp; + state.secondLastValue = state.lastValue; + state.lastTimestamp = timestamp; + state.lastValue = value; + state.hasSecond = true; + } else if (timestamp > state.secondLastTimestamp) { // new timestamp is the second most recent - state.timestamps[1] = timestamp; - state.values[1] = value; + state.secondLastTimestamp = timestamp; + state.secondLastValue = value; + state.hasSecond = true; } // else: ignore, too old + return 0L; } } void combine(int groupId, LongBlock timestamps, IntBlock values, int otherPosition) { - // TODO: Check this method pabloem final int valueCount = timestamps.getValueCount(otherPosition); if (valueCount == 0) { return; } final int firstIndex = timestamps.getFirstValueIndex(otherPosition); ensureCapacity(groupId); - var state = states.get(groupId); - if (state == null) { - adjustBreaker(IntIrateState.bytesUsed(valueCount)); - state = new IntIrateState(valueCount); - states.set(groupId, state); - // TODO: add bulk_copy to Block - for (int i = 0; i < valueCount; i++) { - state.timestamps[i] = timestamps.getLong(firstIndex + i); - state.values[i] = values.getInt(firstIndex + i); - } - } else { - adjustBreaker(IntIrateState.bytesUsed(state.entries() + valueCount)); - var newState = new IntIrateState(state.entries() + valueCount); - states.set(groupId, newState); - merge(state, newState, firstIndex, valueCount, timestamps, values); - adjustBreaker(-IntIrateState.bytesUsed(state.entries())); // old state - } - } - - void merge(IntIrateState curr, IntIrateState dst, int firstIndex, int rightCount, LongBlock timestamps, IntBlock values) { - int i = 0, j = 0, k = 0; - final int leftCount = curr.entries(); - // We do not merge more than two entries because we only need the last two. - // This merge thus ends when we have two entries in dst. - while (i < leftCount && j < rightCount && k < 2) { - final var t1 = curr.timestamps[i]; - final var t2 = timestamps.getLong(firstIndex + j); - if (t1 > t2) { - dst.timestamps[k] = t1; - dst.values[k] = curr.values[i]; - ++i; - } else { - dst.timestamps[k] = t2; - dst.values[k] = values.getInt(firstIndex + j); - ++j; - } - ++k; - } - } - - IntIrateState mergeState(IntIrateState s1, IntIrateState s2) { - adjustBreaker(IntIrateState.bytesUsed(2)); - var dst = new IntIrateState(2); - int i = 0, j = 0, k = 0; - while (i < s1.entries() && j < s2.entries() && k < 2) { - if (s1.timestamps[i] > s2.timestamps[j]) { - dst.timestamps[k] = s1.timestamps[i]; - dst.values[k] = s1.values[i]; - ++i; - } else { - dst.timestamps[k] = s2.timestamps[j]; - dst.values[k] = s2.values[j]; - ++j; - } - ++k; + var incr = append(states, groupId, timestamps.getLong(firstIndex), values.getInt(firstIndex)); + adjustBreaker(incr); + if (valueCount > 1) { + ensureCapacity(groupId); + incr = append(states, groupId, timestamps.getLong(firstIndex + 1), values.getInt(firstIndex + 1)); + adjustBreaker(incr); } - return dst; } @Override @@ -227,14 +169,16 @@ public void toIntermediate(Block[] blocks, int offset, IntVector selected, Drive final var state = groupId < states.size() ? states.get(groupId) : null; if (state != null) { timestamps.beginPositionEntry(); - for (long t : state.timestamps) { - timestamps.appendLong(t); + timestamps.appendLong(state.lastTimestamp); + if (state.hasSecond) { + timestamps.appendLong(state.secondLastTimestamp); } timestamps.endPositionEntry(); values.beginPositionEntry(); - for (int v : state.values) { - values.appendInt(v); + values.appendInt(state.lastValue); + if (state.hasSecond) { + values.appendInt(state.secondLastValue); } values.endPositionEntry(); } else { @@ -253,15 +197,16 @@ Block evaluateFinal(IntVector selected, GroupingAggregatorEvaluationContext eval for (int p = 0; p < positionCount; p++) { final var groupId = selected.getInt(p); final var state = groupId < states.size() ? states.get(groupId) : null; - if (state == null || state.values.length < 2 || state.timestamps[1] == -1) { + if (state == null || state.hasSecond == false) { rates.appendNull(); continue; } - int len = state.entries(); // When the last value is less than the previous one, we assume a reset // and use the last value directly. - final double ydiff = state.values[0] > state.values[1] ? state.values[0] - state.values[1] : state.values[0]; - final long xdiff = state.timestamps[0] - state.timestamps[1]; + final double ydiff = state.lastValue >= state.secondLastValue + ? state.lastValue - state.secondLastValue + : state.lastValue; + final long xdiff = state.lastTimestamp - state.secondLastTimestamp; rates.appendDouble(ydiff / xdiff * 1000); } return rates.build(); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateLongAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateLongAggregator.java index 832e6f26b403d..22baeb1cc8e19 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateLongAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateLongAggregator.java @@ -30,7 +30,7 @@ /** * A rate grouping aggregation definition for long. - * This class is generated. Edit `X-RateAggregator.java.st` instead. + * This class is generated. Edit `X-IrateAggregator.java.st` instead. */ @GroupingAggregator( value = { @IntermediateState(name = "timestamps", type = "LONG_BLOCK"), @IntermediateState(name = "values", type = "LONG_BLOCK") } @@ -41,7 +41,8 @@ public static LongIrateGroupingState initGrouping(DriverContext driverContext) { } public static void combine(LongIrateGroupingState current, int groupId, long value, long timestamp) { - current.append(groupId, timestamp, value); + current.ensureCapacity(groupId); + LongIrateGroupingState.append(current.states, groupId, timestamp, value); } public static void combineIntermediate( @@ -60,34 +61,24 @@ public static Block evaluateFinal(LongIrateGroupingState state, IntVector select private static class LongIrateState { static final long BASE_RAM_USAGE = RamUsageEstimator.sizeOfObject(LongIrateState.class); - final long[] timestamps; // descending order - final long[] values; + long lastTimestamp; + long secondLastTimestamp; + long lastValue; + long secondLastValue; + boolean hasSecond; - LongIrateState(int initialSize) { - this.timestamps = new long[initialSize]; - this.values = new long[initialSize]; + LongIrateState() { + hasSecond = false; } - LongIrateState(long[] ts, long[] vs) { - this.timestamps = ts; - this.values = vs; + LongIrateState(long lastTimestamp, long lastValue) { + this.lastTimestamp = lastTimestamp; + this.lastValue = lastValue; + this.hasSecond = false; } - void append(long t, long v) { - assert timestamps.length == 2 : "expected two timestamps; got " + timestamps.length; - assert t < timestamps[1] : "@timestamp goes backward: " + t + " >= " + timestamps[1]; - // This method does not need to do anything because we only need the last two values - // and timestamps, which are already in place. - } - - int entries() { - return timestamps.length; - } - - static long bytesUsed(int entries) { - var ts = RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) Long.BYTES * entries); - var vs = RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) Long.BYTES * entries); - return BASE_RAM_USAGE + ts + vs; + static long bytesUsed() { + return BASE_RAM_USAGE; } } @@ -113,94 +104,45 @@ void adjustBreaker(long bytes) { assert stateBytes >= 0 : stateBytes; } - void append(int groupId, long timestamp, long value) { - ensureCapacity(groupId); + static Long append(ObjectArray states, int groupId, long timestamp, long value) { var state = states.get(groupId); if (state == null) { - adjustBreaker(LongIrateState.bytesUsed(1)); - state = new LongIrateState(new long[] { timestamp, -1 }, new long[] { value, 0 }); + state = new LongIrateState(timestamp, value); states.set(groupId, state); + return LongIrateState.bytesUsed(); } else { // We only need the last two values, but we need to keep them sorted by timestamp. - if (timestamp > state.timestamps[0]) { + if (timestamp > state.lastTimestamp) { // new timestamp is the most recent - state.timestamps[1] = state.timestamps[0]; - state.values[1] = state.values[0]; - state.timestamps[0] = timestamp; - state.values[0] = value; - } else if (timestamp > state.timestamps[1]) { + state.secondLastTimestamp = state.lastTimestamp; + state.secondLastValue = state.lastValue; + state.lastTimestamp = timestamp; + state.lastValue = value; + state.hasSecond = true; + } else if (timestamp > state.secondLastTimestamp) { // new timestamp is the second most recent - state.timestamps[1] = timestamp; - state.values[1] = value; + state.secondLastTimestamp = timestamp; + state.secondLastValue = value; + state.hasSecond = true; } // else: ignore, too old + return 0L; } } void combine(int groupId, LongBlock timestamps, LongBlock values, int otherPosition) { - // TODO: Check this method pabloem final int valueCount = timestamps.getValueCount(otherPosition); if (valueCount == 0) { return; } final int firstIndex = timestamps.getFirstValueIndex(otherPosition); ensureCapacity(groupId); - var state = states.get(groupId); - if (state == null) { - adjustBreaker(LongIrateState.bytesUsed(valueCount)); - state = new LongIrateState(valueCount); - states.set(groupId, state); - // TODO: add bulk_copy to Block - for (int i = 0; i < valueCount; i++) { - state.timestamps[i] = timestamps.getLong(firstIndex + i); - state.values[i] = values.getLong(firstIndex + i); - } - } else { - adjustBreaker(LongIrateState.bytesUsed(state.entries() + valueCount)); - var newState = new LongIrateState(state.entries() + valueCount); - states.set(groupId, newState); - merge(state, newState, firstIndex, valueCount, timestamps, values); - adjustBreaker(-LongIrateState.bytesUsed(state.entries())); // old state - } - } - - void merge(LongIrateState curr, LongIrateState dst, int firstIndex, int rightCount, LongBlock timestamps, LongBlock values) { - int i = 0, j = 0, k = 0; - final int leftCount = curr.entries(); - // We do not merge more than two entries because we only need the last two. - // This merge thus ends when we have two entries in dst. - while (i < leftCount && j < rightCount && k < 2) { - final var t1 = curr.timestamps[i]; - final var t2 = timestamps.getLong(firstIndex + j); - if (t1 > t2) { - dst.timestamps[k] = t1; - dst.values[k] = curr.values[i]; - ++i; - } else { - dst.timestamps[k] = t2; - dst.values[k] = values.getLong(firstIndex + j); - ++j; - } - ++k; - } - } - - LongIrateState mergeState(LongIrateState s1, LongIrateState s2) { - adjustBreaker(LongIrateState.bytesUsed(2)); - var dst = new LongIrateState(2); - int i = 0, j = 0, k = 0; - while (i < s1.entries() && j < s2.entries() && k < 2) { - if (s1.timestamps[i] > s2.timestamps[j]) { - dst.timestamps[k] = s1.timestamps[i]; - dst.values[k] = s1.values[i]; - ++i; - } else { - dst.timestamps[k] = s2.timestamps[j]; - dst.values[k] = s2.values[j]; - ++j; - } - ++k; + var incr = append(states, groupId, timestamps.getLong(firstIndex), values.getLong(firstIndex)); + adjustBreaker(incr); + if (valueCount > 1) { + ensureCapacity(groupId); + incr = append(states, groupId, timestamps.getLong(firstIndex + 1), values.getLong(firstIndex + 1)); + adjustBreaker(incr); } - return dst; } @Override @@ -227,14 +169,16 @@ public void toIntermediate(Block[] blocks, int offset, IntVector selected, Drive final var state = groupId < states.size() ? states.get(groupId) : null; if (state != null) { timestamps.beginPositionEntry(); - for (long t : state.timestamps) { - timestamps.appendLong(t); + timestamps.appendLong(state.lastTimestamp); + if (state.hasSecond) { + timestamps.appendLong(state.secondLastTimestamp); } timestamps.endPositionEntry(); values.beginPositionEntry(); - for (long v : state.values) { - values.appendLong(v); + values.appendLong(state.lastValue); + if (state.hasSecond) { + values.appendLong(state.secondLastValue); } values.endPositionEntry(); } else { @@ -253,15 +197,16 @@ Block evaluateFinal(IntVector selected, GroupingAggregatorEvaluationContext eval for (int p = 0; p < positionCount; p++) { final var groupId = selected.getInt(p); final var state = groupId < states.size() ? states.get(groupId) : null; - if (state == null || state.values.length < 2 || state.timestamps[1] == -1) { + if (state == null || state.hasSecond == false) { rates.appendNull(); continue; } - int len = state.entries(); // When the last value is less than the previous one, we assume a reset // and use the last value directly. - final double ydiff = state.values[0] > state.values[1] ? state.values[0] - state.values[1] : state.values[0]; - final long xdiff = state.timestamps[0] - state.timestamps[1]; + final double ydiff = state.lastValue >= state.secondLastValue + ? state.lastValue - state.secondLastValue + : state.lastValue; + final long xdiff = state.lastTimestamp - state.secondLastTimestamp; rates.appendDouble(ydiff / xdiff * 1000); } return rates.build(); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-IrateAggregator.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-IrateAggregator.java.st index cb1b15909c230..e000ba396498e 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-IrateAggregator.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-IrateAggregator.java.st @@ -30,7 +30,7 @@ import org.elasticsearch.core.Releasables; /** * A rate grouping aggregation definition for $type$. - * This class is generated. Edit `X-RateAggregator.java.st` instead. + * This class is generated. Edit `X-IrateAggregator.java.st` instead. */ @GroupingAggregator( value = { @IntermediateState(name = "timestamps", type = "LONG_BLOCK"), @IntermediateState(name = "values", type = "$TYPE$_BLOCK") } @@ -41,7 +41,8 @@ public class Irate$Type$Aggregator { } public static void combine($Type$IrateGroupingState current, int groupId, $type$ value, long timestamp) { - current.append(groupId, timestamp, value); + current.ensureCapacity(groupId); + $Type$IrateGroupingState.append(current.states, groupId, timestamp, value); } public static void combineIntermediate( @@ -60,34 +61,24 @@ public class Irate$Type$Aggregator { private static class $Type$IrateState { static final long BASE_RAM_USAGE = RamUsageEstimator.sizeOfObject($Type$IrateState.class); - final long[] timestamps; // descending order - final $type$[] values; + long lastTimestamp; + long secondLastTimestamp; + $type$ lastValue; + $type$ secondLastValue; + boolean hasSecond; - $Type$IrateState(int initialSize) { - this.timestamps = new long[initialSize]; - this.values = new $type$[initialSize]; + $Type$IrateState() { + hasSecond = false; } - $Type$IrateState(long[] ts, $type$[] vs) { - this.timestamps = ts; - this.values = vs; + $Type$IrateState(long lastTimestamp, $type$ lastValue) { + this.lastTimestamp = lastTimestamp; + this.lastValue = lastValue; + this.hasSecond = false; } - void append(long t, $type$ v) { - assert timestamps.length == 2 : "expected two timestamps; got " + timestamps.length; - assert t < timestamps[1] : "@timestamp goes backward: " + t + " >= " + timestamps[1]; - // This method does not need to do anything because we only need the last two values - // and timestamps, which are already in place. - } - - int entries() { - return timestamps.length; - } - - static long bytesUsed(int entries) { - var ts = RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) Long.BYTES * entries); - var vs = RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) $BYTES$ * entries); - return BASE_RAM_USAGE + ts + vs; + static long bytesUsed() { + return BASE_RAM_USAGE; } } @@ -113,94 +104,45 @@ public class Irate$Type$Aggregator { assert stateBytes >= 0 : stateBytes; } - void append(int groupId, long timestamp, $type$ value) { - ensureCapacity(groupId); + static Long append(ObjectArray<$Type$IrateState> states, int groupId, long timestamp, $type$ value) { var state = states.get(groupId); if (state == null) { - adjustBreaker($Type$IrateState.bytesUsed(1)); - state = new $Type$IrateState(new long[] { timestamp, -1 }, new $type$[] { value, 0 }); + state = new $Type$IrateState(timestamp, value); states.set(groupId, state); + return $Type$IrateState.bytesUsed(); } else { // We only need the last two values, but we need to keep them sorted by timestamp. - if (timestamp > state.timestamps[0]) { + if (timestamp > state.lastTimestamp) { // new timestamp is the most recent - state.timestamps[1] = state.timestamps[0]; - state.values[1] = state.values[0]; - state.timestamps[0] = timestamp; - state.values[0] = value; - } else if (timestamp > state.timestamps[1]) { + state.secondLastTimestamp = state.lastTimestamp; + state.secondLastValue = state.lastValue; + state.lastTimestamp = timestamp; + state.lastValue = value; + state.hasSecond = true; + } else if (timestamp > state.secondLastTimestamp) { // new timestamp is the second most recent - state.timestamps[1] = timestamp; - state.values[1] = value; + state.secondLastTimestamp = timestamp; + state.secondLastValue = value; + state.hasSecond = true; } // else: ignore, too old + return 0L; } } void combine(int groupId, LongBlock timestamps, $Type$Block values, int otherPosition) { - // TODO: Check this method pabloem final int valueCount = timestamps.getValueCount(otherPosition); if (valueCount == 0) { return; } final int firstIndex = timestamps.getFirstValueIndex(otherPosition); ensureCapacity(groupId); - var state = states.get(groupId); - if (state == null) { - adjustBreaker($Type$IrateState.bytesUsed(valueCount)); - state = new $Type$IrateState(valueCount); - states.set(groupId, state); - // TODO: add bulk_copy to Block - for (int i = 0; i < valueCount; i++) { - state.timestamps[i] = timestamps.getLong(firstIndex + i); - state.values[i] = values.get$Type$(firstIndex + i); - } - } else { - adjustBreaker($Type$IrateState.bytesUsed(state.entries() + valueCount)); - var newState = new $Type$IrateState(state.entries() + valueCount); - states.set(groupId, newState); - merge(state, newState, firstIndex, valueCount, timestamps, values); - adjustBreaker(-$Type$IrateState.bytesUsed(state.entries())); // old state - } - } - - void merge($Type$IrateState curr, $Type$IrateState dst, int firstIndex, int rightCount, LongBlock timestamps, $Type$Block values) { - int i = 0, j = 0, k = 0; - final int leftCount = curr.entries(); - // We do not merge more than two entries because we only need the last two. - // This merge thus ends when we have two entries in dst. - while (i < leftCount && j < rightCount && k < 2) { - final var t1 = curr.timestamps[i]; - final var t2 = timestamps.getLong(firstIndex + j); - if (t1 > t2) { - dst.timestamps[k] = t1; - dst.values[k] = curr.values[i]; - ++i; - } else { - dst.timestamps[k] = t2; - dst.values[k] = values.get$Type$(firstIndex + j); - ++j; - } - ++k; - } - } - - $Type$IrateState mergeState($Type$IrateState s1, $Type$IrateState s2) { - adjustBreaker($Type$IrateState.bytesUsed(2)); - var dst = new $Type$IrateState(2); - int i = 0, j = 0, k = 0; - while (i < s1.entries() && j < s2.entries() && k < 2) { - if (s1.timestamps[i] > s2.timestamps[j]) { - dst.timestamps[k] = s1.timestamps[i]; - dst.values[k] = s1.values[i]; - ++i; - } else { - dst.timestamps[k] = s2.timestamps[j]; - dst.values[k] = s2.values[j]; - ++j; - } - ++k; + var incr = append(states, groupId, timestamps.getLong(firstIndex), values.get$Type$(firstIndex)); + adjustBreaker(incr); + if (valueCount > 1) { + ensureCapacity(groupId); + incr = append(states, groupId, timestamps.getLong(firstIndex + 1), values.get$Type$(firstIndex + 1)); + adjustBreaker(incr); } - return dst; } @Override @@ -227,14 +169,16 @@ public class Irate$Type$Aggregator { final var state = groupId < states.size() ? states.get(groupId) : null; if (state != null) { timestamps.beginPositionEntry(); - for (long t : state.timestamps) { - timestamps.appendLong(t); + timestamps.appendLong(state.lastTimestamp); + if (state.hasSecond) { + timestamps.appendLong(state.secondLastTimestamp); } timestamps.endPositionEntry(); values.beginPositionEntry(); - for ($type$ v : state.values) { - values.append$Type$(v); + values.append$Type$(state.lastValue); + if (state.hasSecond) { + values.append$Type$(state.secondLastValue); } values.endPositionEntry(); } else { @@ -253,15 +197,16 @@ public class Irate$Type$Aggregator { for (int p = 0; p < positionCount; p++) { final var groupId = selected.getInt(p); final var state = groupId < states.size() ? states.get(groupId) : null; - if (state == null || state.values.length < 2 || state.timestamps[1] == -1) { + if (state == null || state.hasSecond == false) { rates.appendNull(); continue; } - int len = state.entries(); // When the last value is less than the previous one, we assume a reset // and use the last value directly. - final double ydiff = state.values[0] > state.values[1] ? state.values[0] - state.values[1] : state.values[0]; - final long xdiff = state.timestamps[0] - state.timestamps[1]; + final double ydiff = state.lastValue >= state.secondLastValue + ? state.lastValue - state.secondLastValue + : state.lastValue; + final long xdiff = state.lastTimestamp - state.secondLastTimestamp; rates.appendDouble(ydiff / xdiff * 1000); } return rates.build(); diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/RandomizedTimeSeriesIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/RandomizedTimeSeriesIT.java index 67e061dcd4079..afc4d42634dd0 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/RandomizedTimeSeriesIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/RandomizedTimeSeriesIT.java @@ -284,7 +284,7 @@ static RateStats calculateDeltaAggregation( if (deltaAgg.equals(DeltaAgg.IRATE)) { var lastVal = timeseries.getLast().v2().v2(); var secondLastVal = timeseries.get(timeseries.size() - 2).v2().v2(); - var irate = (lastVal > secondLastVal ? lastVal - secondLastVal : lastVal) / (lastTs.toEpochMilli() - timeseries.get( + var irate = (lastVal >= secondLastVal ? lastVal - secondLastVal : lastVal) / (lastTs.toEpochMilli() - timeseries.get( timeseries.size() - 2 ).v2().v1().toEpochMilli()) * 1000; return new RateRange(irate * 0.999, irate * 1.001); // Add 0.1% tolerance @@ -397,7 +397,7 @@ void assertNoFailedWindows(List failedWindows, List> rows) * The test checks that the count, max, min, and avg values of the rate metric - and calculates * the same values from the documents in the group. */ - public void testRateGroupBySubset() { + public void testRateSomethingSomething() { var deltaAgg = ESTestCase.randomFrom(DELTA_AGG_OPTIONS); var window = ESTestCase.randomFrom(WINDOW_OPTIONS); var windowSize = window.v2(); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Irate.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Irate.java index 7d2b7be6dc146..203b0f0043f94 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Irate.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Irate.java @@ -44,7 +44,9 @@ public class Irate extends TimeSeriesAggregateFunction implements OptionalArgume @FunctionInfo( type = FunctionType.TIME_SERIES_AGGREGATE, returnType = { "double" }, - description = "The irate of a counter field.", + description = "The irate of a counter field. irate is the per-second rate of increase between the last two data points (" + + "it ignores all but the last two data points in each time period). " + + "This function is very similar to rate, but is more responsive to recent changes in the rate of increase.", appliesTo = { @FunctionAppliesTo(lifeCycle = FunctionAppliesToLifecycle.UNAVAILABLE) }, note = "Available with the [TS](/reference/query-languages/esql/commands/source-commands.md#esql-ts) command in snapshot builds", examples = { @Example(file = "k8s-timeseries", tag = "irate") } @@ -134,8 +136,4 @@ public Irate perTimeSeriesAggregation() { public String toString() { return "irate(" + field() + ")"; } - - Expression timestamp() { - return timestamp; - } } From 52c908b8b580d07f517f00a277f4079f0c66d5a6 Mon Sep 17 00:00:00 2001 From: Pablo Date: Tue, 9 Sep 2025 10:18:32 -0700 Subject: [PATCH 11/13] comments --- .../aggregation/IrateDoubleAggregator.java | 21 +++++++------------ .../aggregation/IrateFloatAggregator.java | 21 +++++++------------ .../aggregation/IrateIntAggregator.java | 21 +++++++------------ .../aggregation/IrateLongAggregator.java | 21 +++++++------------ .../aggregation/X-IrateAggregator.java.st | 21 +++++++------------ 5 files changed, 35 insertions(+), 70 deletions(-) diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateDoubleAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateDoubleAggregator.java index 2a52a4ce12202..18f0d3726c775 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateDoubleAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateDoubleAggregator.java @@ -42,7 +42,7 @@ public static DoubleIrateGroupingState initGrouping(DriverContext driverContext) public static void combine(DoubleIrateGroupingState current, int groupId, double value, long timestamp) { current.ensureCapacity(groupId); - DoubleIrateGroupingState.append(current.states, groupId, timestamp, value); + current.append(groupId, timestamp, value); } public static void combineIntermediate( @@ -62,22 +62,18 @@ public static Block evaluateFinal(DoubleIrateGroupingState state, IntVector sele private static class DoubleIrateState { static final long BASE_RAM_USAGE = RamUsageEstimator.sizeOfObject(DoubleIrateState.class); long lastTimestamp; - long secondLastTimestamp; + long secondLastTimestamp = -1; double lastValue; double secondLastValue; boolean hasSecond; - DoubleIrateState() { - hasSecond = false; - } - DoubleIrateState(long lastTimestamp, double lastValue) { this.lastTimestamp = lastTimestamp; this.lastValue = lastValue; this.hasSecond = false; } - static long bytesUsed() { + long bytesUsed() { return BASE_RAM_USAGE; } } @@ -104,12 +100,12 @@ void adjustBreaker(long bytes) { assert stateBytes >= 0 : stateBytes; } - static Long append(ObjectArray states, int groupId, long timestamp, double value) { + void append(int groupId, long timestamp, double value) { var state = states.get(groupId); if (state == null) { state = new DoubleIrateState(timestamp, value); states.set(groupId, state); - return DoubleIrateState.bytesUsed(); + adjustBreaker(state.bytesUsed()); } else { // We only need the last two values, but we need to keep them sorted by timestamp. if (timestamp > state.lastTimestamp) { @@ -125,7 +121,6 @@ static Long append(ObjectArray states, int groupId, long times state.secondLastValue = value; state.hasSecond = true; } // else: ignore, too old - return 0L; } } @@ -136,12 +131,10 @@ void combine(int groupId, LongBlock timestamps, DoubleBlock values, int otherPos } final int firstIndex = timestamps.getFirstValueIndex(otherPosition); ensureCapacity(groupId); - var incr = append(states, groupId, timestamps.getLong(firstIndex), values.getDouble(firstIndex)); - adjustBreaker(incr); + append(groupId, timestamps.getLong(firstIndex), values.getDouble(firstIndex)); if (valueCount > 1) { ensureCapacity(groupId); - incr = append(states, groupId, timestamps.getLong(firstIndex + 1), values.getDouble(firstIndex + 1)); - adjustBreaker(incr); + append(groupId, timestamps.getLong(firstIndex + 1), values.getDouble(firstIndex + 1)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateFloatAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateFloatAggregator.java index f9fdb867a235b..010b239552a07 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateFloatAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateFloatAggregator.java @@ -42,7 +42,7 @@ public static FloatIrateGroupingState initGrouping(DriverContext driverContext) public static void combine(FloatIrateGroupingState current, int groupId, float value, long timestamp) { current.ensureCapacity(groupId); - FloatIrateGroupingState.append(current.states, groupId, timestamp, value); + current.append(groupId, timestamp, value); } public static void combineIntermediate( @@ -62,22 +62,18 @@ public static Block evaluateFinal(FloatIrateGroupingState state, IntVector selec private static class FloatIrateState { static final long BASE_RAM_USAGE = RamUsageEstimator.sizeOfObject(FloatIrateState.class); long lastTimestamp; - long secondLastTimestamp; + long secondLastTimestamp = -1; float lastValue; float secondLastValue; boolean hasSecond; - FloatIrateState() { - hasSecond = false; - } - FloatIrateState(long lastTimestamp, float lastValue) { this.lastTimestamp = lastTimestamp; this.lastValue = lastValue; this.hasSecond = false; } - static long bytesUsed() { + long bytesUsed() { return BASE_RAM_USAGE; } } @@ -104,12 +100,12 @@ void adjustBreaker(long bytes) { assert stateBytes >= 0 : stateBytes; } - static Long append(ObjectArray states, int groupId, long timestamp, float value) { + void append(int groupId, long timestamp, float value) { var state = states.get(groupId); if (state == null) { state = new FloatIrateState(timestamp, value); states.set(groupId, state); - return FloatIrateState.bytesUsed(); + adjustBreaker(state.bytesUsed()); } else { // We only need the last two values, but we need to keep them sorted by timestamp. if (timestamp > state.lastTimestamp) { @@ -125,7 +121,6 @@ static Long append(ObjectArray states, int groupId, long timest state.secondLastValue = value; state.hasSecond = true; } // else: ignore, too old - return 0L; } } @@ -136,12 +131,10 @@ void combine(int groupId, LongBlock timestamps, FloatBlock values, int otherPosi } final int firstIndex = timestamps.getFirstValueIndex(otherPosition); ensureCapacity(groupId); - var incr = append(states, groupId, timestamps.getLong(firstIndex), values.getFloat(firstIndex)); - adjustBreaker(incr); + append(groupId, timestamps.getLong(firstIndex), values.getFloat(firstIndex)); if (valueCount > 1) { ensureCapacity(groupId); - incr = append(states, groupId, timestamps.getLong(firstIndex + 1), values.getFloat(firstIndex + 1)); - adjustBreaker(incr); + append(groupId, timestamps.getLong(firstIndex + 1), values.getFloat(firstIndex + 1)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateIntAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateIntAggregator.java index 298ff70bd9463..0978b8f19f287 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateIntAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateIntAggregator.java @@ -42,7 +42,7 @@ public static IntIrateGroupingState initGrouping(DriverContext driverContext) { public static void combine(IntIrateGroupingState current, int groupId, int value, long timestamp) { current.ensureCapacity(groupId); - IntIrateGroupingState.append(current.states, groupId, timestamp, value); + current.append(groupId, timestamp, value); } public static void combineIntermediate( @@ -62,22 +62,18 @@ public static Block evaluateFinal(IntIrateGroupingState state, IntVector selecte private static class IntIrateState { static final long BASE_RAM_USAGE = RamUsageEstimator.sizeOfObject(IntIrateState.class); long lastTimestamp; - long secondLastTimestamp; + long secondLastTimestamp = -1; int lastValue; int secondLastValue; boolean hasSecond; - IntIrateState() { - hasSecond = false; - } - IntIrateState(long lastTimestamp, int lastValue) { this.lastTimestamp = lastTimestamp; this.lastValue = lastValue; this.hasSecond = false; } - static long bytesUsed() { + long bytesUsed() { return BASE_RAM_USAGE; } } @@ -104,12 +100,12 @@ void adjustBreaker(long bytes) { assert stateBytes >= 0 : stateBytes; } - static Long append(ObjectArray states, int groupId, long timestamp, int value) { + void append(int groupId, long timestamp, int value) { var state = states.get(groupId); if (state == null) { state = new IntIrateState(timestamp, value); states.set(groupId, state); - return IntIrateState.bytesUsed(); + adjustBreaker(state.bytesUsed()); } else { // We only need the last two values, but we need to keep them sorted by timestamp. if (timestamp > state.lastTimestamp) { @@ -125,7 +121,6 @@ static Long append(ObjectArray states, int groupId, long timestam state.secondLastValue = value; state.hasSecond = true; } // else: ignore, too old - return 0L; } } @@ -136,12 +131,10 @@ void combine(int groupId, LongBlock timestamps, IntBlock values, int otherPositi } final int firstIndex = timestamps.getFirstValueIndex(otherPosition); ensureCapacity(groupId); - var incr = append(states, groupId, timestamps.getLong(firstIndex), values.getInt(firstIndex)); - adjustBreaker(incr); + append(groupId, timestamps.getLong(firstIndex), values.getInt(firstIndex)); if (valueCount > 1) { ensureCapacity(groupId); - incr = append(states, groupId, timestamps.getLong(firstIndex + 1), values.getInt(firstIndex + 1)); - adjustBreaker(incr); + append(groupId, timestamps.getLong(firstIndex + 1), values.getInt(firstIndex + 1)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateLongAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateLongAggregator.java index 22baeb1cc8e19..c539937d58a73 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateLongAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateLongAggregator.java @@ -42,7 +42,7 @@ public static LongIrateGroupingState initGrouping(DriverContext driverContext) { public static void combine(LongIrateGroupingState current, int groupId, long value, long timestamp) { current.ensureCapacity(groupId); - LongIrateGroupingState.append(current.states, groupId, timestamp, value); + current.append(groupId, timestamp, value); } public static void combineIntermediate( @@ -62,22 +62,18 @@ public static Block evaluateFinal(LongIrateGroupingState state, IntVector select private static class LongIrateState { static final long BASE_RAM_USAGE = RamUsageEstimator.sizeOfObject(LongIrateState.class); long lastTimestamp; - long secondLastTimestamp; + long secondLastTimestamp = -1; long lastValue; long secondLastValue; boolean hasSecond; - LongIrateState() { - hasSecond = false; - } - LongIrateState(long lastTimestamp, long lastValue) { this.lastTimestamp = lastTimestamp; this.lastValue = lastValue; this.hasSecond = false; } - static long bytesUsed() { + long bytesUsed() { return BASE_RAM_USAGE; } } @@ -104,12 +100,12 @@ void adjustBreaker(long bytes) { assert stateBytes >= 0 : stateBytes; } - static Long append(ObjectArray states, int groupId, long timestamp, long value) { + void append(int groupId, long timestamp, long value) { var state = states.get(groupId); if (state == null) { state = new LongIrateState(timestamp, value); states.set(groupId, state); - return LongIrateState.bytesUsed(); + adjustBreaker(state.bytesUsed()); } else { // We only need the last two values, but we need to keep them sorted by timestamp. if (timestamp > state.lastTimestamp) { @@ -125,7 +121,6 @@ static Long append(ObjectArray states, int groupId, long timesta state.secondLastValue = value; state.hasSecond = true; } // else: ignore, too old - return 0L; } } @@ -136,12 +131,10 @@ void combine(int groupId, LongBlock timestamps, LongBlock values, int otherPosit } final int firstIndex = timestamps.getFirstValueIndex(otherPosition); ensureCapacity(groupId); - var incr = append(states, groupId, timestamps.getLong(firstIndex), values.getLong(firstIndex)); - adjustBreaker(incr); + append(groupId, timestamps.getLong(firstIndex), values.getLong(firstIndex)); if (valueCount > 1) { ensureCapacity(groupId); - incr = append(states, groupId, timestamps.getLong(firstIndex + 1), values.getLong(firstIndex + 1)); - adjustBreaker(incr); + append(groupId, timestamps.getLong(firstIndex + 1), values.getLong(firstIndex + 1)); } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-IrateAggregator.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-IrateAggregator.java.st index e000ba396498e..c1b2bcbd1997f 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-IrateAggregator.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-IrateAggregator.java.st @@ -42,7 +42,7 @@ public class Irate$Type$Aggregator { public static void combine($Type$IrateGroupingState current, int groupId, $type$ value, long timestamp) { current.ensureCapacity(groupId); - $Type$IrateGroupingState.append(current.states, groupId, timestamp, value); + current.append(groupId, timestamp, value); } public static void combineIntermediate( @@ -62,22 +62,18 @@ public class Irate$Type$Aggregator { private static class $Type$IrateState { static final long BASE_RAM_USAGE = RamUsageEstimator.sizeOfObject($Type$IrateState.class); long lastTimestamp; - long secondLastTimestamp; + long secondLastTimestamp = -1; $type$ lastValue; $type$ secondLastValue; boolean hasSecond; - $Type$IrateState() { - hasSecond = false; - } - $Type$IrateState(long lastTimestamp, $type$ lastValue) { this.lastTimestamp = lastTimestamp; this.lastValue = lastValue; this.hasSecond = false; } - static long bytesUsed() { + long bytesUsed() { return BASE_RAM_USAGE; } } @@ -104,12 +100,12 @@ public class Irate$Type$Aggregator { assert stateBytes >= 0 : stateBytes; } - static Long append(ObjectArray<$Type$IrateState> states, int groupId, long timestamp, $type$ value) { + void append(int groupId, long timestamp, $type$ value) { var state = states.get(groupId); if (state == null) { state = new $Type$IrateState(timestamp, value); states.set(groupId, state); - return $Type$IrateState.bytesUsed(); + adjustBreaker(state.bytesUsed()); } else { // We only need the last two values, but we need to keep them sorted by timestamp. if (timestamp > state.lastTimestamp) { @@ -125,7 +121,6 @@ public class Irate$Type$Aggregator { state.secondLastValue = value; state.hasSecond = true; } // else: ignore, too old - return 0L; } } @@ -136,12 +131,10 @@ public class Irate$Type$Aggregator { } final int firstIndex = timestamps.getFirstValueIndex(otherPosition); ensureCapacity(groupId); - var incr = append(states, groupId, timestamps.getLong(firstIndex), values.get$Type$(firstIndex)); - adjustBreaker(incr); + append(groupId, timestamps.getLong(firstIndex), values.get$Type$(firstIndex)); if (valueCount > 1) { ensureCapacity(groupId); - incr = append(states, groupId, timestamps.getLong(firstIndex + 1), values.get$Type$(firstIndex + 1)); - adjustBreaker(incr); + append(groupId, timestamps.getLong(firstIndex + 1), values.get$Type$(firstIndex + 1)); } } From 02097e62178f55b412aad13306175da0af3b24f2 Mon Sep 17 00:00:00 2001 From: Pablo Date: Tue, 9 Sep 2025 10:23:03 -0700 Subject: [PATCH 12/13] test --- .../resources/k8s-timeseries-irate.csv-spec | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/k8s-timeseries-irate.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/k8s-timeseries-irate.csv-spec index c2ab273610c14..c34f8e737022f 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/k8s-timeseries-irate.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/k8s-timeseries-irate.csv-spec @@ -6,8 +6,8 @@ TS k8s irate_bytes_in:double | time_bucket:datetime null | 2024-05-10T00:01:00.000Z -140.89655067155067 | 2024-05-10T00:09:00.000Z 140.58333333333331 | 2024-05-10T00:02:00.000Z +134.5314713064713 | 2024-05-10T00:09:00.000Z 116.41911764705883 | 2024-05-10T00:22:00.000Z 112.83333333333333 | 2024-05-10T00:00:00.000Z 93.43529411764706 | 2024-05-10T00:14:00.000Z @@ -28,10 +28,10 @@ irate_bytes_in:double | cluster:keyword | time_bucket:datetime 284.63440860215053 | qa | 2024-05-10T00:05:00.000Z 119.52228682170542 | prod | 2024-05-10T00:20:00.000Z 62.32120383036936 | prod | 2024-05-10T00:10:00.000Z -40.90705128205128 | staging | 2024-05-10T00:05:00.000Z 31.92871485943775 | prod | 2024-05-10T00:00:00.000Z 30.83898647284474 | staging | 2024-05-10T00:00:00.000Z 28.57226890756303 | qa | 2024-05-10T00:15:00.000Z +26.055199430199433 | staging | 2024-05-10T00:05:00.000Z 21.898989322941418 | staging | 2024-05-10T00:15:00.000Z 14.23272442880286 | qa | 2024-05-10T00:00:00.000Z 10.889987485115794 | staging | 2024-05-10T00:10:00.000Z @@ -48,8 +48,8 @@ TS k8s irate_cost:double | time_bucket:datetime null | 2024-05-10T00:01:00.000Z 7.836832264957264 | 2024-05-10T00:09:00.000Z -6.754166666666666 | 2024-05-10T00:02:00.000Z 3.590324074074074 | 2024-05-10T00:17:00.000Z +2.6708333333333334 | 2024-05-10T00:02:00.000Z 2.2916666666666665 | 2024-05-10T00:08:00.000Z 2.265625 | 2024-05-10T00:11:00.000Z 2.2481617647058822 | 2024-05-10T00:22:00.000Z @@ -89,13 +89,14 @@ TS k8s irate_bytes:double | cluster:keyword | time_bucket:datetime | irate_kb:double 4.37482276552044 | prod | 2024-05-10T00:00:00.000Z | 0.004272287856953555 284.63440860215053 | qa | 2024-05-10T00:00:00.000Z | 0.2779632896505376 -40.90705128205128 | staging | 2024-05-10T00:00:00.000Z | 0.0399482922676282 +26.055199430199433 | staging | 2024-05-10T00:00:00.000Z | 0.025444530693554134 9.893214497920377 | prod | 2024-05-10T00:10:00.000Z | 0.009661342283125368 28.57226890756303 | qa | 2024-05-10T00:10:00.000Z | 0.02790260635504202 21.898989322941418 | staging | 2024-05-10T00:10:00.000Z | 0.02138573176068498 119.52228682170542 | prod | 2024-05-10T00:20:00.000Z | 0.1167209832243217 4.428024083196497 | qa | 2024-05-10T00:20:00.000Z | 0.0043242422687465795 1.5050835148874364 | staging | 2024-05-10T00:20:00.000Z | 0.0014698081200072621 + ; irate_of_aggregate_metric @@ -120,8 +121,8 @@ TS k8s irate_bytes_in:double | time_bucket:datetime null | 2024-05-10T00:01:00.000Z -150.89655067155067 | 2024-05-10T00:09:00.000Z 150.58333333333331 | 2024-05-10T00:02:00.000Z +144.5314713064713 | 2024-05-10T00:09:00.000Z 126.41911764705883 | 2024-05-10T00:22:00.000Z 122.83333333333333 | 2024-05-10T00:00:00.000Z 103.43529411764706 | 2024-05-10T00:14:00.000Z @@ -162,13 +163,13 @@ TS k8s sum_irate_bytes:double | sum_irate_cost:double | cluster:keyword | time_bucket:datetime | ratio:double 13.12446829656132 | 0.38781305903398927 | prod | 2024-05-10T00:00:00.000Z | 33.84225464004049 853.9032258064516 | 6.337365591397849 | qa | 2024-05-10T00:00:00.000Z | 134.74103923647934 -122.72115384615384 | 1.4063835470085468 | staging | 2024-05-10T00:00:00.000Z | 87.26008926027919 +78.1655982905983 | 1.4063835470085468 | staging | 2024-05-10T00:00:00.000Z | 55.57914727946065 29.679643493761134 | 0.5575668449197861 | prod | 2024-05-10T00:10:00.000Z | 53.23064626992118 85.71680672268909 | 0.6042016806722689 | qa | 2024-05-10T00:10:00.000Z | 141.86787204450627 65.69696796882425 | 0.7275520387795836 | staging | 2024-05-10T00:10:00.000Z | 90.29865146007454 239.04457364341084 | 2.0520833333333335 | prod | 2024-05-10T00:20:00.000Z | 116.48872624247431 8.856048166392995 | 0.1617063492063492 | qa | 2024-05-10T00:20:00.000Z | 54.76623651364503 -4.515250544662309 | 1.508442265795207 | staging | 2024-05-10T00:20:00.000Z | 2.9933200938797615 +4.515250544662309 | 0.37649782135076254 | staging | 2024-05-10T00:20:00.000Z | 11.992766726943941 ; irate_of_ratio @@ -180,13 +181,13 @@ TS k8s irate_of_ratio:double | cluster:keyword | time_bucket:datetime 0.7377812779572093 | prod | 2024-05-10T00:00:00.000Z 0.15560960316361233 | qa | 2024-05-10T00:00:00.000Z -0.04287216214507051 | staging | 2024-05-10T00:00:00.000Z +0.05581954089819596 | staging | 2024-05-10T00:00:00.000Z 3.088611728967339 | prod | 2024-05-10T00:10:00.000Z 0.019280051363230983 | qa | 2024-05-10T00:10:00.000Z 0.17121614905482155 | staging | 2024-05-10T00:10:00.000Z 0.021697562872698986 | prod | 2024-05-10T00:20:00.000Z 0.04152807743018099 | qa | 2024-05-10T00:20:00.000Z -4.02626050420168 | staging | 2024-05-10T00:20:00.000Z +3.7694327731092434 | staging | 2024-05-10T00:20:00.000Z ; From d6ec9d08e6befdd0edfee72a4ca1216173a4e163 Mon Sep 17 00:00:00 2001 From: Pablo Date: Tue, 9 Sep 2025 11:09:01 -0700 Subject: [PATCH 13/13] comments andfixup --- .../compute/aggregation/IrateDoubleAggregator.java | 7 ++++--- .../compute/aggregation/IrateFloatAggregator.java | 7 ++++--- .../compute/aggregation/IrateIntAggregator.java | 7 ++++--- .../compute/aggregation/IrateLongAggregator.java | 7 ++++--- .../compute/aggregation/X-IrateAggregator.java.st | 7 ++++--- .../src/main/resources/k8s-timeseries-irate.csv-spec | 5 ++--- 6 files changed, 22 insertions(+), 18 deletions(-) diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateDoubleAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateDoubleAggregator.java index 18f0d3726c775..7c6af73d719bd 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateDoubleAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateDoubleAggregator.java @@ -129,12 +129,13 @@ void combine(int groupId, LongBlock timestamps, DoubleBlock values, int otherPos if (valueCount == 0) { return; } - final int firstIndex = timestamps.getFirstValueIndex(otherPosition); + final int firstTs = timestamps.getFirstValueIndex(otherPosition); + final int firstIndex = values.getFirstValueIndex(otherPosition); ensureCapacity(groupId); - append(groupId, timestamps.getLong(firstIndex), values.getDouble(firstIndex)); + append(groupId, timestamps.getLong(firstTs), values.getDouble(firstIndex)); if (valueCount > 1) { ensureCapacity(groupId); - append(groupId, timestamps.getLong(firstIndex + 1), values.getDouble(firstIndex + 1)); + append(groupId, timestamps.getLong(firstTs + 1), values.getDouble(firstIndex + 1)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateFloatAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateFloatAggregator.java index 010b239552a07..c4ce7a2955d73 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateFloatAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateFloatAggregator.java @@ -129,12 +129,13 @@ void combine(int groupId, LongBlock timestamps, FloatBlock values, int otherPosi if (valueCount == 0) { return; } - final int firstIndex = timestamps.getFirstValueIndex(otherPosition); + final int firstTs = timestamps.getFirstValueIndex(otherPosition); + final int firstIndex = values.getFirstValueIndex(otherPosition); ensureCapacity(groupId); - append(groupId, timestamps.getLong(firstIndex), values.getFloat(firstIndex)); + append(groupId, timestamps.getLong(firstTs), values.getFloat(firstIndex)); if (valueCount > 1) { ensureCapacity(groupId); - append(groupId, timestamps.getLong(firstIndex + 1), values.getFloat(firstIndex + 1)); + append(groupId, timestamps.getLong(firstTs + 1), values.getFloat(firstIndex + 1)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateIntAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateIntAggregator.java index 0978b8f19f287..1cf6a69e0ecb6 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateIntAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateIntAggregator.java @@ -129,12 +129,13 @@ void combine(int groupId, LongBlock timestamps, IntBlock values, int otherPositi if (valueCount == 0) { return; } - final int firstIndex = timestamps.getFirstValueIndex(otherPosition); + final int firstTs = timestamps.getFirstValueIndex(otherPosition); + final int firstIndex = values.getFirstValueIndex(otherPosition); ensureCapacity(groupId); - append(groupId, timestamps.getLong(firstIndex), values.getInt(firstIndex)); + append(groupId, timestamps.getLong(firstTs), values.getInt(firstIndex)); if (valueCount > 1) { ensureCapacity(groupId); - append(groupId, timestamps.getLong(firstIndex + 1), values.getInt(firstIndex + 1)); + append(groupId, timestamps.getLong(firstTs + 1), values.getInt(firstIndex + 1)); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateLongAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateLongAggregator.java index c539937d58a73..44419e5db116e 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateLongAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IrateLongAggregator.java @@ -129,12 +129,13 @@ void combine(int groupId, LongBlock timestamps, LongBlock values, int otherPosit if (valueCount == 0) { return; } - final int firstIndex = timestamps.getFirstValueIndex(otherPosition); + final int firstTs = timestamps.getFirstValueIndex(otherPosition); + final int firstIndex = values.getFirstValueIndex(otherPosition); ensureCapacity(groupId); - append(groupId, timestamps.getLong(firstIndex), values.getLong(firstIndex)); + append(groupId, timestamps.getLong(firstTs), values.getLong(firstIndex)); if (valueCount > 1) { ensureCapacity(groupId); - append(groupId, timestamps.getLong(firstIndex + 1), values.getLong(firstIndex + 1)); + append(groupId, timestamps.getLong(firstTs + 1), values.getLong(firstIndex + 1)); } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-IrateAggregator.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-IrateAggregator.java.st index c1b2bcbd1997f..4300c9f6c6c73 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-IrateAggregator.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-IrateAggregator.java.st @@ -129,12 +129,13 @@ public class Irate$Type$Aggregator { if (valueCount == 0) { return; } - final int firstIndex = timestamps.getFirstValueIndex(otherPosition); + final int firstTs = timestamps.getFirstValueIndex(otherPosition); + final int firstIndex = values.getFirstValueIndex(otherPosition); ensureCapacity(groupId); - append(groupId, timestamps.getLong(firstIndex), values.get$Type$(firstIndex)); + append(groupId, timestamps.getLong(firstTs), values.get$Type$(firstIndex)); if (valueCount > 1) { ensureCapacity(groupId); - append(groupId, timestamps.getLong(firstIndex + 1), values.get$Type$(firstIndex + 1)); + append(groupId, timestamps.getLong(firstTs + 1), values.get$Type$(firstIndex + 1)); } } diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/k8s-timeseries-irate.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/k8s-timeseries-irate.csv-spec index c34f8e737022f..2fb818caad04d 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/k8s-timeseries-irate.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/k8s-timeseries-irate.csv-spec @@ -143,14 +143,13 @@ TS k8s avg_irate_bytes:double | avg_irate_cost:double | cluster:keyword | time_bucket:datetime | ratio:double 4.37482276552044 | 0.12927101967799642 | prod | 2024-05-10T00:00:00.000Z | 33.84225464004049 284.63440860215053 | 2.112455197132616 | qa | 2024-05-10T00:00:00.000Z | 134.74103923647934 -40.90705128205128 | 0.46879451566951563 | staging | 2024-05-10T00:00:00.000Z | 87.26008926027917 +26.055199430199433 | 0.46879451566951563 | staging | 2024-05-10T00:00:00.000Z | 55.57914727946065 9.893214497920377 | 0.18585561497326206 | prod | 2024-05-10T00:10:00.000Z | 53.23064626992117 28.57226890756303 | 0.20140056022408961 | qa | 2024-05-10T00:10:00.000Z | 141.8678720445063 21.898989322941418 | 0.2425173462598612 | staging | 2024-05-10T00:10:00.000Z | 90.29865146007454 119.52228682170542 | 1.0260416666666667 | prod | 2024-05-10T00:20:00.000Z | 116.48872624247431 4.428024083196497 | 0.0808531746031746 | qa | 2024-05-10T00:20:00.000Z | 54.76623651364503 -1.5050835148874364 | 0.5028140885984024 | staging | 2024-05-10T00:20:00.000Z | 2.9933200938797615 - +1.5050835148874364 | 0.12549927378358752 | staging | 2024-05-10T00:20:00.000Z | 11.992766726943941 ; irate_combined_sum