diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlAssignmentPlannerUpgradeIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlAssignmentPlannerUpgradeIT.java index 1f3db455c9ce7..f21ec8ad15d4f 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlAssignmentPlannerUpgradeIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlAssignmentPlannerUpgradeIT.java @@ -66,7 +66,6 @@ public class MlAssignmentPlannerUpgradeIT extends AbstractUpgradeTestCase { RAW_MODEL_SIZE = Base64.getDecoder().decode(BASE_64_ENCODED_MODEL).length; } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/101926") public void testMlAssignmentPlannerUpgrade() throws Exception { assumeFalse("This test deploys multiple models which cannot be accommodated on a single processor", IS_SINGLE_PROCESSOR_TEST); @@ -186,12 +185,12 @@ private void setupDeployments() throws Exception { createTrainedModel("old_memory_format", 0, 0); putModelDefinition("old_memory_format"); putVocabulary(List.of("these", "are", "my", "words"), "old_memory_format"); - startDeployment("old_memory_format"); + startDeployment("old_memory_format", "started", "low"); createTrainedModel("new_memory_format", ByteSizeValue.ofMb(300).getBytes(), ByteSizeValue.ofMb(10).getBytes()); putModelDefinition("new_memory_format"); putVocabulary(List.of("these", "are", "my", "words"), "new_memory_format"); - startDeployment("new_memory_format"); + startDeployment("new_memory_format", "started", "low"); } private void cleanupDeployments() throws IOException { @@ -247,10 +246,14 @@ private void deleteTrainedModel(String modelId) throws IOException { } private Response startDeployment(String modelId) throws IOException { - return startDeployment(modelId, "started"); + return startDeployment(modelId, "started", "normal"); } private Response startDeployment(String modelId, String waitForState) throws IOException { + return startDeployment(modelId, waitForState, "normal"); + } + + private Response startDeployment(String modelId, String waitForState, String priority) throws IOException { String inferenceThreadParamName = "threads_per_allocation"; String modelThreadParamName = "number_of_allocations"; String compatibleHeader = null; @@ -270,7 +273,8 @@ private Response startDeployment(String modelId, String waitForState) throws IOE + inferenceThreadParamName + "=1&" + modelThreadParamName - + "=1" + + "=1&priority=" + + priority ); if (compatibleHeader != null) { request.setOptions(request.getOptions().toBuilder().addHeader("Accept", compatibleHeader).build());