From 0134c5651072fbf1750013a748f5b33a45ae5183 Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Wed, 27 Nov 2024 11:40:54 +0100 Subject: [PATCH 1/2] Update "Download and deploy ELSER" snippet with adaptive allocations (#2878) (cherry picked from commit 57401489d311436254246885568b826f34b5cd08) # Conflicts: # docs/en/stack/ml/nlp/ml-nlp-elser.asciidoc --- docs/en/stack/ml/nlp/ml-nlp-elser.asciidoc | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/docs/en/stack/ml/nlp/ml-nlp-elser.asciidoc b/docs/en/stack/ml/nlp/ml-nlp-elser.asciidoc index 7a2d6acd4..7b9ccc88f 100644 --- a/docs/en/stack/ml/nlp/ml-nlp-elser.asciidoc +++ b/docs/en/stack/ml/nlp/ml-nlp-elser.asciidoc @@ -124,13 +124,24 @@ PUT _inference/sparse_embedding/my-elser-model { "service": "elser", "service_settings": { +<<<<<<< HEAD "num_allocations": 1, "num_threads": 1 +======= + "adaptive_allocations": { + "enabled": true, + "min_number_of_allocations": 1, + "max_number_of_allocations": 10 + }, + "num_threads": 1, + "model_id": ".elser_model_2_linux-x86_64" +>>>>>>> 57401489 (Update "Download and deploy ELSER" snippet with adaptive allocations (#2878)) } } ---------------------------------- -- The API request automatically initiates the model download and then deploy the model. +This example uses <> through adaptive allocation. Refer to the {ref}/infer-service-elser.html[ELSER {infer} service documentation] to learn more about the available settings. From 6b1fc838a534937bde73ddd7488ae2e181c5e7e4 Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Wed, 27 Nov 2024 12:15:23 +0100 Subject: [PATCH 2/2] Update docs/en/stack/ml/nlp/ml-nlp-elser.asciidoc --- docs/en/stack/ml/nlp/ml-nlp-elser.asciidoc | 6 ------ 1 file changed, 6 deletions(-) diff --git a/docs/en/stack/ml/nlp/ml-nlp-elser.asciidoc b/docs/en/stack/ml/nlp/ml-nlp-elser.asciidoc index 7b9ccc88f..84eee7a40 100644 --- a/docs/en/stack/ml/nlp/ml-nlp-elser.asciidoc +++ b/docs/en/stack/ml/nlp/ml-nlp-elser.asciidoc @@ -124,18 +124,12 @@ PUT _inference/sparse_embedding/my-elser-model { "service": "elser", "service_settings": { -<<<<<<< HEAD - "num_allocations": 1, - "num_threads": 1 -======= "adaptive_allocations": { "enabled": true, "min_number_of_allocations": 1, "max_number_of_allocations": 10 }, "num_threads": 1, - "model_id": ".elser_model_2_linux-x86_64" ->>>>>>> 57401489 (Update "Download and deploy ELSER" snippet with adaptive allocations (#2878)) } } ----------------------------------