Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,24 @@ clients:double | cluster:keyword | time_bucket:datetime
357.0 | staging | 2024-05-10T00:03:00.000Z
;

implicit_last_over_time_of_integer
required_capability: metrics_command
required_capability: implicit_last_over_time
TS k8s | STATS clients = avg(network.eth0.currently_connected_clients) BY cluster, time_bucket = bucket(@timestamp,1minute) | SORT time_bucket, cluster | LIMIT 10;

clients:double | cluster:keyword | time_bucket:datetime
429.0 | prod | 2024-05-10T00:00:00.000Z
615.5 | staging | 2024-05-10T00:00:00.000Z
396.5 | prod | 2024-05-10T00:01:00.000Z
440.0 | qa | 2024-05-10T00:01:00.000Z
632.5 | prod | 2024-05-10T00:02:00.000Z
565.0 | qa | 2024-05-10T00:02:00.000Z
205.0 | staging | 2024-05-10T00:02:00.000Z
742.0 | prod | 2024-05-10T00:03:00.000Z
454.0 | qa | 2024-05-10T00:03:00.000Z
357.0 | staging | 2024-05-10T00:03:00.000Z
;

last_over_time_of_long
required_capability: metrics_command
required_capability: last_over_time
Expand All @@ -35,6 +53,24 @@ bytes:double | cluster:keyword | time_bucket:datetime
612.5 | staging | 2024-05-10T00:03:00.000Z
;

implicit_last_over_time_of_long
required_capability: metrics_command
required_capability: implicit_last_over_time
TS k8s | STATS bytes = avg(network.bytes_in) BY cluster, time_bucket = bucket(@timestamp,1minute) | SORT time_bucket, cluster | LIMIT 10;

bytes:double | cluster:keyword | time_bucket:datetime
677.0 | prod | 2024-05-10T00:00:00.000Z
586.0 | staging | 2024-05-10T00:00:00.000Z
628.5 | prod | 2024-05-10T00:01:00.000Z
538.5 | qa | 2024-05-10T00:01:00.000Z
612.0 | prod | 2024-05-10T00:02:00.000Z
749.0 | qa | 2024-05-10T00:02:00.000Z
382.5 | staging | 2024-05-10T00:02:00.000Z
970.0 | prod | 2024-05-10T00:03:00.000Z
373.0 | qa | 2024-05-10T00:03:00.000Z
612.5 | staging | 2024-05-10T00:03:00.000Z
;

last_over_time_with_filtering
required_capability: metrics_command
required_capability: last_over_time
Expand All @@ -52,6 +88,25 @@ tx:long | cluster:keyword | time_bucket:datetime
238 | staging | 2024-05-10T00:20:00.000Z
;


implicit_last_over_time_with_filtering
required_capability: metrics_command
required_capability: implicit_last_over_time
TS k8s | WHERE pod == "one" | STATS tx = sum(network.bytes_in) BY cluster, time_bucket = bucket(@timestamp, 10minute) | SORT time_bucket, cluster | LIMIT 10;

tx:long | cluster:keyword | time_bucket:datetime
3 | prod | 2024-05-10T00:00:00.000Z
830 | qa | 2024-05-10T00:00:00.000Z
753 | staging | 2024-05-10T00:00:00.000Z
542 | prod | 2024-05-10T00:10:00.000Z
187 | qa | 2024-05-10T00:10:00.000Z
4 | staging | 2024-05-10T00:10:00.000Z
931 | prod | 2024-05-10T00:20:00.000Z
206 | qa | 2024-05-10T00:20:00.000Z
238 | staging | 2024-05-10T00:20:00.000Z
;


last_over_time_older_than_10d
required_capability: metrics_command
required_capability: last_over_time
Expand All @@ -65,6 +120,19 @@ cost:double | pod:keyword | time_bucket:datetime
1038.0 | three | 2024-05-10T00:10:00.000Z
;

implicit_last_over_time_older_than_10d
required_capability: metrics_command
required_capability: implicit_last_over_time
TS k8s | WHERE cluster == "qa" AND @timestamp < now() - 10 day | STATS cost = avg(network.eth0.rx) BY pod, time_bucket = bucket(@timestamp, 10minute) | SORT time_bucket, pod | LIMIT 5;

cost:double | pod:keyword | time_bucket:datetime
818.0 | one | 2024-05-10T00:00:00.000Z
529.0 | three | 2024-05-10T00:00:00.000Z
620.0 | two | 2024-05-10T00:00:00.000Z
1262.0 | one | 2024-05-10T00:10:00.000Z
1038.0 | three | 2024-05-10T00:10:00.000Z
;

eval_on_last_over_time
required_capability: metrics_command
required_capability: last_over_time
Expand All @@ -82,6 +150,23 @@ max_bytes:double | cluster:keyword | time_bucket:datetime | kb_minus_offset
81.33333333333333 | staging | 2024-05-10T00:20:00.000Z | -0.01866666666666667
;

implicit_eval_on_last_over_time
required_capability: metrics_command
required_capability: implicit_last_over_time
TS k8s | STATS max_bytes = avg(network.bytes_in) BY cluster, time_bucket = bucket(@timestamp, 10minute) | EVAL kb_minus_offset = (max_bytes - 100) / 1000.0 | LIMIT 10 | SORT time_bucket, cluster ;

max_bytes:double | cluster:keyword | time_bucket:datetime | kb_minus_offset:double
225.0 | prod | 2024-05-10T00:00:00.000Z | 0.125
485.6666666666667 | qa | 2024-05-10T00:00:00.000Z | 0.3856666666666667
572.6666666666666 | staging | 2024-05-10T00:00:00.000Z | 0.4726666666666666
517.6666666666666 | prod | 2024-05-10T00:10:00.000Z | 0.41766666666666663
426.6666666666667 | qa | 2024-05-10T00:10:00.000Z | 0.32666666666666666
482.3333333333333 | staging | 2024-05-10T00:10:00.000Z | 0.3823333333333333
839.0 | prod | 2024-05-10T00:20:00.000Z | 0.739
697.0 | qa | 2024-05-10T00:20:00.000Z | 0.597
81.33333333333333 | staging | 2024-05-10T00:20:00.000Z | -0.01866666666666667
;

last_over_time_multi_values
required_capability: metrics_command
required_capability: last_over_time
Expand All @@ -101,6 +186,26 @@ events:long | pod:keyword | time_bucket:datetime
9 | three | 2024-05-10T00:02:00.000Z
;


implicit_last_over_time_multi_values
required_capability: metrics_command
required_capability: implicit_last_over_time
TS k8s | WHERE @timestamp < "2024-05-10T00:10:00.000Z" | STATS events = sum(events_received) by pod, time_bucket = bucket(@timestamp, 1minute) | SORT events desc, pod, time_bucket | LIMIT 10;

events:long | pod:keyword | time_bucket:datetime
18 | one | 2024-05-10T00:01:00.000Z
16 | one | 2024-05-10T00:08:00.000Z
12 | one | 2024-05-10T00:03:00.000Z
12 | three | 2024-05-10T00:00:00.000Z
12 | two | 2024-05-10T00:09:00.000Z
10 | three | 2024-05-10T00:06:00.000Z
10 | two | 2024-05-10T00:02:00.000Z
10 | two | 2024-05-10T00:04:00.000Z
9 | one | 2024-05-10T00:09:00.000Z
9 | three | 2024-05-10T00:02:00.000Z
;


last_over_time_null_values
required_capability: metrics_command
required_capability: last_over_time
Expand All @@ -120,6 +225,24 @@ null | two | 2024-05-10T00:13:00.000Z
7 | three | 2024-05-10T00:12:00.000Z
;

implicit_last_over_time_null_values
required_capability: metrics_command
required_capability: implicit_last_over_time
TS k8s | WHERE @timestamp > "2024-05-10T00:10:00.000Z" and @timestamp < "2024-05-10T00:15:00.000Z" | STATS events = sum(events_received) by pod, time_bucket = bucket(@timestamp, 1minute) | SORT events desc, pod, time_bucket | LIMIT 10;

events:long | pod:keyword | time_bucket:datetime
null | one | 2024-05-10T00:12:00.000Z
null | two | 2024-05-10T00:13:00.000Z
20 | two | 2024-05-10T00:14:00.000Z
18 | two | 2024-05-10T00:12:00.000Z
16 | one | 2024-05-10T00:13:00.000Z
16 | one | 2024-05-10T00:14:00.000Z
11 | one | 2024-05-10T00:10:00.000Z
9 | one | 2024-05-10T00:11:00.000Z
9 | three | 2024-05-10T00:13:00.000Z
7 | three | 2024-05-10T00:12:00.000Z
;

last_over_time_all_value_types
required_capability: metrics_command
required_capability: last_over_time
Expand All @@ -138,3 +261,21 @@ events:long | pod:keyword | time_bucket:datetime
5 | two | 2024-05-10T00:20:00.000Z
;


implicit_last_over_time_all_value_types
required_capability: metrics_command
required_capability: implicit_last_over_time
TS k8s | STATS events = sum(events_received) by pod, time_bucket = bucket(@timestamp, 10minute) | SORT events desc, pod, time_bucket | LIMIT 10 ;

events:long | pod:keyword | time_bucket:datetime
21 | three | 2024-05-10T00:10:00.000Z
20 | one | 2024-05-10T00:10:00.000Z
15 | one | 2024-05-10T00:20:00.000Z
15 | three | 2024-05-10T00:20:00.000Z
13 | two | 2024-05-10T00:10:00.000Z
12 | two | 2024-05-10T00:00:00.000Z
9 | one | 2024-05-10T00:00:00.000Z
9 | three | 2024-05-10T00:00:00.000Z
5 | two | 2024-05-10T00:20:00.000Z
;

Original file line number Diff line number Diff line change
Expand Up @@ -49,26 +49,51 @@ max_cost: double

maxRateAndBytes
required_capability: metrics_command
required_capability: implicit_last_over_time
TS k8s | STATS max(60 * rate(network.total_bytes_in)), max(network.bytes_in);

max(60 * rate(network.total_bytes_in)): double | max(network.bytes_in): long
790.4235090751944 | 1021
790.4235090751944 | 972
;

maxRateAndBytesExplicit
required_capability: metrics_command
TS k8s | STATS max(60 * rate(network.total_bytes_in)), max(last_over_time(network.bytes_in));

max(60 * rate(network.total_bytes_in)): double | max(last_over_time(network.bytes_in)): long
790.4235090751944 | 972
;

maxRateAndMarkupBytes
required_capability: metrics_command
TS k8s | STATS max(rate(network.total_bytes_in)), max(network.bytes_in * 1.05);

max(rate(network.total_bytes_in)): double | max(network.bytes_in * 1.05): double
13.17372515125324 | 1072.05
13.17372515125324 | 1020.6
;

maxRateAndMarkupBytesExplicit
required_capability: metrics_command
TS k8s | STATS max(rate(network.total_bytes_in)), max_bytes_in = max(last_over_time(network.bytes_in) * 1.05);

max(rate(network.total_bytes_in)): double | max_bytes_in: double
13.17372515125324 | 1020.6
;

maxRateAndLastBytesIn
required_capability: metrics_command
TS k8s | STATS max(rate(network.total_bytes_in)), max_bytes_in = max(last_over_time(network.bytes_in * 1.05));

max(rate(network.total_bytes_in)): double | max_bytes_in: double
13.17372515125324 | 1020.6
;

maxRateAndBytesAndCost
required_capability: metrics_command
TS k8s | STATS max(rate(network.total_bytes_in)), max(network.bytes_in), max(rate(network.total_cost));
TS k8s | STATS max(rate(network.total_bytes_in)), max(max_over_time(network.bytes_in)), max(rate(network.total_cost));

max(rate(network.total_bytes_in)): double| max(network.bytes_in): long| max(rate(network.total_cost)): double
13.17372515125324 | 1021 | 0.16151685393258428
max(rate(network.total_bytes_in)): double| max(max_over_time(network.bytes_in)): long| max(rate(network.total_cost)): double
13.17372515125324 | 1021 | 0.16151685393258428
;

sumRate
Expand Down Expand Up @@ -144,15 +169,15 @@ max(rate(network.total_bytes_in)):double | time_bucket:datetime | cluster:ke

BytesAndCostByBucketAndCluster
required_capability: metrics_command
TS k8s | STATS max(rate(network.total_bytes_in)), max(network.cost) BY time_bucket = bucket(@timestamp,5minute), cluster | SORT time_bucket DESC, cluster | LIMIT 6;

max(rate(network.total_bytes_in)): double | max(network.cost): double | time_bucket:date | cluster: keyword
6.980660660660663 | 10.75 | 2024-05-10T00:20:00.000Z | prod
4.05 | 11.875 | 2024-05-10T00:20:00.000Z | qa
3.19 | 9.5 | 2024-05-10T00:20:00.000Z | staging
11.860805860805861 | 12.375 | 2024-05-10T00:15:00.000Z | prod
23.702205882352942 | 12.125 | 2024-05-10T00:15:00.000Z | qa
7.784911616161616 | 11.5 | 2024-05-10T00:15:00.000Z | staging
TS k8s | STATS max(rate(network.total_bytes_in)), max(max_over_time(network.cost)) BY time_bucket = bucket(@timestamp,5minute), cluster | SORT time_bucket DESC, cluster | LIMIT 6;

max(rate(network.total_bytes_in)): double | max(max_over_time(network.cost)): double | time_bucket:date | cluster: keyword
6.980660660660663 | 10.75 | 2024-05-10T00:20:00.000Z | prod
4.05 | 11.875 | 2024-05-10T00:20:00.000Z | qa
3.19 | 9.5 | 2024-05-10T00:20:00.000Z | staging
11.860805860805861 | 12.375 | 2024-05-10T00:15:00.000Z | prod
23.702205882352942 | 12.125 | 2024-05-10T00:15:00.000Z | qa
7.784911616161616 | 11.5 | 2024-05-10T00:15:00.000Z | staging
;

oneRateWithBucketAndClusterThenFilter
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,6 @@
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.hamcrest.Matchers.allOf;
import static org.hamcrest.Matchers.closeTo;
import static org.hamcrest.Matchers.containsInAnyOrder;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.lessThan;
import static org.hamcrest.Matchers.lessThanOrEqualTo;
Expand Down Expand Up @@ -528,7 +527,6 @@ public void testGroupBySubset() {
try (EsqlQueryResponse resp = run(String.format(Locale.ROOT, """
TS %s
| STATS
values(metrics.gaugel_hdd.bytes.used),
max(max_over_time(metrics.gaugel_hdd.bytes.used)),
min(min_over_time(metrics.gaugel_hdd.bytes.used)),
sum(count_over_time(metrics.gaugel_hdd.bytes.used)),
Expand All @@ -541,29 +539,20 @@ public void testGroupBySubset() {
var groups = groupedRows(documents, dimensions, 60);
List<List<Object>> rows = consumeRows(resp);
for (List<Object> row : rows) {
var rowKey = getRowKey(row, dimensions, 7);
var rowKey = getRowKey(row, dimensions, 6);
var tsGroups = groupByTimeseries(groups.get(rowKey), "gaugel_hdd.bytes.used");
var docValues = valuesInWindow(groups.get(rowKey), "gaugel_hdd.bytes.used");
if (row.get(0) instanceof List) {
assertThat(
(Collection<Long>) row.getFirst(),
containsInAnyOrder(docValues.stream().mapToLong(Integer::longValue).boxed().toArray(Long[]::new))
);
} else {
assertThat(row.getFirst(), equalTo(docValues.isEmpty() ? null : docValues.getFirst().longValue()));
}
Function<Object, Double> toDouble = cell -> switch (cell) {
case Long l -> l.doubleValue();
case Double d -> d;
case null -> null;
default -> throw new IllegalStateException("Unexpected value type: " + cell + " of class " + cell.getClass());
};
assertThat(toDouble.apply(row.get(1)), equalTo(aggregatePerTimeseries(tsGroups, Agg.MAX, Agg.MAX)));
assertThat(toDouble.apply(row.get(2)), equalTo(aggregatePerTimeseries(tsGroups, Agg.MIN, Agg.MIN)));
assertThat(toDouble.apply(row.get(3)), equalTo(aggregatePerTimeseries(tsGroups, Agg.SUM, Agg.COUNT)));
assertThat(toDouble.apply(row.get(4)), equalTo(aggregatePerTimeseries(tsGroups, Agg.SUM, Agg.SUM)));
assertThat(toDouble.apply(row.get(0)), equalTo(aggregatePerTimeseries(tsGroups, Agg.MAX, Agg.MAX)));
assertThat(toDouble.apply(row.get(1)), equalTo(aggregatePerTimeseries(tsGroups, Agg.MIN, Agg.MIN)));
assertThat(toDouble.apply(row.get(2)), equalTo(aggregatePerTimeseries(tsGroups, Agg.SUM, Agg.COUNT)));
assertThat(toDouble.apply(row.get(3)), equalTo(aggregatePerTimeseries(tsGroups, Agg.SUM, Agg.SUM)));
var avg = (Double) aggregatePerTimeseries(tsGroups, Agg.AVG, Agg.AVG);
assertThat((Double) row.get(5), row.get(5) == null ? equalTo(null) : closeTo(avg, avg * 0.01));
assertThat((Double) row.get(4), row.get(4) == null ? equalTo(null) : closeTo(avg, avg * 0.01));
// assertThat(row.get(6), equalTo(aggregatePerTimeseries(tsGroups, Agg.COUNT, Agg.COUNT).longValue()));
}
}
Expand All @@ -579,7 +568,6 @@ public void testGroupByNothing() {
try (EsqlQueryResponse resp = run(String.format(Locale.ROOT, """
TS %s
| STATS
values(metrics.gaugel_hdd.bytes.used),
max(max_over_time(metrics.gaugel_hdd.bytes.used)),
min(min_over_time(metrics.gaugel_hdd.bytes.used)),
sum(count_over_time(metrics.gaugel_hdd.bytes.used)),
Expand All @@ -592,29 +580,20 @@ public void testGroupByNothing() {
List<List<Object>> rows = consumeRows(resp);
var groups = groupedRows(documents, List.of(), 60);
for (List<Object> row : rows) {
var windowStart = windowStart(row.get(7), 60);
List<Integer> docValues = valuesInWindow(groups.get(List.of(Long.toString(windowStart))), "gaugel_hdd.bytes.used");
var windowStart = windowStart(row.get(6), 60);
var tsGroups = groupByTimeseries(groups.get(List.of(Long.toString(windowStart))), "gaugel_hdd.bytes.used");
if (row.get(0) instanceof List) {
assertThat(
(Collection<Long>) row.get(0),
containsInAnyOrder(docValues.stream().mapToLong(Integer::longValue).boxed().toArray(Long[]::new))
);
} else {
assertThat(row.getFirst(), equalTo(docValues.isEmpty() ? null : docValues.getFirst().longValue()));
}
Function<Object, Double> toDouble = cell -> switch (cell) {
case Long l -> l.doubleValue();
case Double d -> d;
case null -> null;
default -> throw new IllegalStateException("Unexpected value type: " + cell + " of class " + cell.getClass());
};
assertThat(toDouble.apply(row.get(1)), equalTo(aggregatePerTimeseries(tsGroups, Agg.MAX, Agg.MAX)));
assertThat(toDouble.apply(row.get(2)), equalTo(aggregatePerTimeseries(tsGroups, Agg.MIN, Agg.MIN)));
assertThat(toDouble.apply(row.get(3)), equalTo(aggregatePerTimeseries(tsGroups, Agg.SUM, Agg.COUNT)));
assertThat(toDouble.apply(row.get(4)), equalTo(aggregatePerTimeseries(tsGroups, Agg.SUM, Agg.SUM)));
assertThat(toDouble.apply(row.get(0)), equalTo(aggregatePerTimeseries(tsGroups, Agg.MAX, Agg.MAX)));
assertThat(toDouble.apply(row.get(1)), equalTo(aggregatePerTimeseries(tsGroups, Agg.MIN, Agg.MIN)));
assertThat(toDouble.apply(row.get(2)), equalTo(aggregatePerTimeseries(tsGroups, Agg.SUM, Agg.COUNT)));
assertThat(toDouble.apply(row.get(3)), equalTo(aggregatePerTimeseries(tsGroups, Agg.SUM, Agg.SUM)));
var avg = (Double) aggregatePerTimeseries(tsGroups, Agg.AVG, Agg.AVG);
assertThat((Double) row.get(5), row.get(5) == null ? equalTo(null) : closeTo(avg, avg * 0.01));
assertThat((Double) row.get(4), row.get(4) == null ? equalTo(null) : closeTo(avg, avg * 0.01));
// assertThat(row.get(6), equalTo(aggregatePerTimeseries(tsGroups, Agg.COUNT, Agg.COUNT).longValue()));
}
}
Expand Down
Loading