Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions x-pack/plugin/build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,7 @@ tasks.named("yamlRestCompatTestTransform").configure({ task ->
task.skipTest("ml/job_cat_apis/Test cat anomaly detector jobs", "Flush API is deprecated")
task.skipTest("ml/jobs_get_stats/Test get job stats after uploading data prompting the creation of some stats", "Flush API is deprecated")
task.skipTest("ml/jobs_get_stats/Test get job stats for closed job", "Flush API is deprecated")
task.skipTest("ml/jobs_get_stats/Test reading v54 data counts and model size stats", "Version 5.4 support removed")
task.skipTest("ml/inference_crud/Test deprecation of include model definition param", "Query parameter removed")
task.skipTest("ml/post_data/Test flush and close job WITHOUT sending any data", "Flush API is deprecated")
task.skipTest("ml/post_data/Test flush with skip_time", "Flush API is deprecated")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -138,10 +138,6 @@ public static String documentId(String jobId) {
return jobId + DOCUMENT_SUFFIX;
}

public static String v54DocumentId(String jobId) {
return jobId + "-data-counts";
}

private final String jobId;
private long processedRecordCount;
private long processedFieldCount;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -526,10 +526,7 @@ public void getDataCountsModelSizeAndTimingStats(
.addAggregation(
AggregationBuilders.filters(
results,
new FiltersAggregator.KeyedFilter(
dataCounts,
QueryBuilders.idsQuery().addIds(DataCounts.documentId(jobId), DataCounts.v54DocumentId(jobId))
),
new FiltersAggregator.KeyedFilter(dataCounts, QueryBuilders.idsQuery().addIds(DataCounts.documentId(jobId))),
new FiltersAggregator.KeyedFilter(timingStats, QueryBuilders.idsQuery().addIds(TimingStats.documentId(jobId))),
new FiltersAggregator.KeyedFilter(
modelSizeStats,
Expand Down Expand Up @@ -588,7 +585,7 @@ private SearchRequestBuilder createLatestDataCountsSearch(String indexName, Stri
.setSize(1)
.setIndicesOptions(IndicesOptions.lenientExpandOpen())
// look for both old and new formats
.setQuery(QueryBuilders.idsQuery().addIds(DataCounts.documentId(jobId), DataCounts.v54DocumentId(jobId)))
.setQuery(QueryBuilders.idsQuery().addIds(DataCounts.documentId(jobId)))
// We want to sort on log_time. However, this was added a long time later and before that we used to
// sort on latest_record_time. Thus we handle older data counts where no log_time exists and we fall back
// to the prior behaviour.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -245,99 +245,6 @@ setup:
job_id: "missing-*"
allow_no_match: false

---
"Test reading v54 data counts and model size stats":

- do:
ml.put_job:
job_id: job-stats-v54-bwc-test
body: >
{
"analysis_config" : {
"bucket_span": "1h",
"detectors" :[{"function":"metric","field_name":"responsetime","by_field_name":"airline"}]
},
"analysis_limits" : {
"model_memory_limit": "10mb"
},
"data_description" : {
}
}

- do:
headers:
Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser
indices.refresh:
index: ".ml-anomalies*"

# This is testing that the documents with v5.4 IDs are fetched.
# Ideally we would use the v5.4 type but we can't put a mapping
# for another type into the single type indices. Type isn't used
# in the query so the test is valid
- do:
headers:
Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser
Content-Type: application/json
index:
index: .ml-anomalies-shared
id: job-stats-v54-bwc-test-data-counts
body:
{
job_id : job-stats-v54-bwc-test,
processed_record_count : 10,
processed_field_count : 0,
input_bytes : 0,
input_field_count : 0,
invalid_date_count : 0,
missing_field_count : 0,
out_of_order_timestamp_count : 0,
empty_bucket_count : 0,
sparse_bucket_count : 0,
bucket_count : 0,
input_record_count : 0,
latest_record_timestamp: 2000000000000
}

- do:
headers:
Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser
Content-Type: application/json
index:
index: .ml-anomalies-shared
id: job-stats-v54-bwc-test-model_size_stats
body:
{
job_id : job-stats-v54-bwc-test,
result_type : model_size_stats,
model_bytes : 0,
total_by_field_count : 101,
total_over_field_count : 0,
total_partition_field_count : 0,
bucket_allocation_failures_count : 0,
memory_status : ok,
categorized_doc_count : 0,
total_category_count : 0,
frequent_category_count : 0,
rare_category_count : 0,
dead_category_count : 0,
failed_category_count : 0,
categorization_status : ok,
log_time : 1495808248662
}

- do:
headers:
Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser
indices.refresh:
index: [.ml-anomalies-shared]

- do:
ml.get_job_stats:
job_id: job-stats-v54-bwc-test
- match: { jobs.0.job_id : job-stats-v54-bwc-test }
- match: { jobs.0.data_counts.processed_record_count: 10 }
- match: { jobs.0.model_size_stats.total_by_field_count: 101 }

---
"Test no exception on get job stats with missing index":

Expand Down