Skip to content

Commit e98a2db

Browse files
committed
some more test cases and comments
1 parent b6b041c commit e98a2db

File tree

2 files changed

+30
-25
lines changed
  • x-pack/plugin
    • downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample
    • esql/src/main/java/org/elasticsearch/xpack/esql/analysis

2 files changed

+30
-25
lines changed

x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DownsampleIT.java

Lines changed: 25 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,7 @@ public void testDownsamplingPassthroughDimensions() throws Exception {
109109
assertDownsampleIndexFieldsAndDimensions(sourceIndex, targetIndex, downsampleConfig);
110110
}
111111

112-
public void testEsqlTSAfterDownsampling() throws Exception {
112+
public void testAggMetricInEsqlTSAfterDownsampling() throws Exception {
113113
String dataStreamName = "metrics-foo";
114114
Settings settings = Settings.builder().put("mode", "time_series").putList("routing_path", List.of("host", "cluster")).build();
115115
putTSDBIndexTemplate("my-template", List.of("metrics-foo"), settings, """
@@ -198,7 +198,8 @@ public void testEsqlTSAfterDownsampling() throws Exception {
198198
);
199199
assertAcked(client().execute(TransportDeleteIndexAction.TYPE, new DeleteIndexRequest(sourceIndex)).actionGet());
200200

201-
// index to the next backing index
201+
// index to the next backing index; random time between 31 and 59m in the future to because default look_ahead_time is 30m and we
202+
// don't want to conflict with the previous backing index
202203
Supplier<XContentBuilder> nextSourceSupplier = () -> {
203204
String ts = randomDateForRange(now.plusSeconds(60 * 31).toEpochMilli(), now.plusSeconds(60 * 59).toEpochMilli());
204205
try {
@@ -231,30 +232,29 @@ public void testEsqlTSAfterDownsampling() throws Exception {
231232
)
232233
)
233234
);
234-
235235
}
236-
// test that implicit casting within time aggregation query works
237-
try (
238-
var resp = esqlCommand(
239-
"TS "
240-
+ dataStreamName
241-
+ " | STATS min = sum(min_over_time(cpu)), max = sum(max_over_time(cpu)) by cluster, bucket(@timestamp, 1 hour)"
242-
)
243-
) {
244-
var columns = resp.columns();
245-
assertThat(columns, hasSize(4));
246-
assertThat(
247-
resp.columns(),
248-
equalTo(
249-
List.of(
250-
new ColumnInfoImpl("min", "double", null),
251-
new ColumnInfoImpl("max", "double", null),
252-
new ColumnInfoImpl("cluster", "keyword", null),
253-
new ColumnInfoImpl("bucket(@timestamp, 1 hour)", "date", null)
254-
)
255-
)
256-
);
257-
// TODO: verify the numbers are accurate
236+
237+
// test _over_time commands with implicit casting of aggregate_metric_double
238+
for (String innerCommand : List.of("min_over_time", "max_over_time", "avg_over_time", "count_over_time")) {
239+
for (String outerCommand : List.of("min", "max", "sum", "count")) {
240+
String command = outerCommand + " (" + innerCommand + "(cpu))";
241+
String expectedType = innerCommand.equals("count_over_time") || outerCommand.equals("count") ? "long" : "double";
242+
try (var resp = esqlCommand("TS " + dataStreamName + " | STATS " + command + " by cluster, bucket(@timestamp, 1 hour)")) {
243+
var columns = resp.columns();
244+
assertThat(columns, hasSize(3));
245+
assertThat(
246+
resp.columns(),
247+
equalTo(
248+
List.of(
249+
new ColumnInfoImpl(command, expectedType, null),
250+
new ColumnInfoImpl("cluster", "keyword", null),
251+
new ColumnInfoImpl("bucket(@timestamp, 1 hour)", "date", null)
252+
)
253+
)
254+
);
255+
// TODO: verify the numbers are accurate
256+
}
257+
}
258258
}
259259
}
260260

x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2068,6 +2068,11 @@ private void typeResolutions(
20682068
);
20692069

20702070
Expression convertExpression;
2071+
// Counting on aggregate metric double has unique behavior in that we cannot just provide the number of
2072+
// documents, instead we have to look inside the aggregate metric double's count field and sum those together.
2073+
// Grabbing the count value with FromAggregateMetricDouble the same way we do with min/max/sum would result in
2074+
// a single Int field, and incorrectly be treated as 1 document (instead of however many originally went into
2075+
// the aggregate metric double).
20712076
if (metric == AggregateMetricDoubleBlockBuilder.Metric.COUNT) {
20722077
convertExpression = new ToAggregateMetricDouble(fa.source(), resolved);
20732078
} else if (type == AGGREGATE_METRIC_DOUBLE) {

0 commit comments

Comments
 (0)