From 14e772ea82b1369a65187c510bfa53a186de1e1b Mon Sep 17 00:00:00 2001 From: Valeriy Khakhutskyy <1292899+valeriy42@users.noreply.github.com> Date: Fri, 24 Oct 2025 15:26:35 +0200 Subject: [PATCH] [ML] Fix MlAssignmentPlannerUpgradeIT.estMlAssignmentPlannerUpgrade() (#137080) As suggested here, this PR changes deployments to low priority. Low priority deployments don't require specific processor allocations, allowing them to run in constrained environments. The test can now succeed in single-processor test environments where all processors are already allocated. Since we don't test BWC for pre-8.6.0 versions, where priority was introduced, this change fixes the test. --- .../upgrades/MlAssignmentPlannerUpgradeIT.java | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlAssignmentPlannerUpgradeIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlAssignmentPlannerUpgradeIT.java index 07d9de3dea6ac..e8443bef65436 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlAssignmentPlannerUpgradeIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlAssignmentPlannerUpgradeIT.java @@ -67,7 +67,6 @@ public class MlAssignmentPlannerUpgradeIT extends AbstractUpgradeTestCase { RAW_MODEL_SIZE = Base64.getDecoder().decode(BASE_64_ENCODED_MODEL).length; } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/101926") public void testMlAssignmentPlannerUpgrade() throws Exception { assumeFalse("This test deploys multiple models which cannot be accommodated on a single processor", IS_SINGLE_PROCESSOR_TEST); @@ -187,12 +186,12 @@ private void setupDeployments() throws Exception { createTrainedModel("old_memory_format", 0, 0); putModelDefinition("old_memory_format"); putVocabulary(List.of("these", "are", "my", "words"), "old_memory_format"); - startDeployment("old_memory_format"); + startDeployment("old_memory_format", "started", "low"); createTrainedModel("new_memory_format", ByteSizeValue.ofMb(300).getBytes(), ByteSizeValue.ofMb(10).getBytes()); putModelDefinition("new_memory_format"); putVocabulary(List.of("these", "are", "my", "words"), "new_memory_format"); - startDeployment("new_memory_format"); + startDeployment("new_memory_format", "started", "low"); } private void cleanupDeployments() throws IOException { @@ -248,10 +247,14 @@ private void deleteTrainedModel(String modelId) throws IOException { } private Response startDeployment(String modelId) throws IOException { - return startDeployment(modelId, "started"); + return startDeployment(modelId, "started", "normal"); } private Response startDeployment(String modelId, String waitForState) throws IOException { + return startDeployment(modelId, waitForState, "normal"); + } + + private Response startDeployment(String modelId, String waitForState, String priority) throws IOException { String inferenceThreadParamName = "threads_per_allocation"; String modelThreadParamName = "number_of_allocations"; String compatibleHeader = null; @@ -271,7 +274,8 @@ private Response startDeployment(String modelId, String waitForState) throws IOE + inferenceThreadParamName + "=1&" + modelThreadParamName - + "=1" + + "=1&priority=" + + priority ); if (compatibleHeader != null) { request.setOptions(request.getOptions().toBuilder().addHeader("Accept", compatibleHeader).build());