From 184bd6f2a5e06e7d782bb27fffd210320a63d3fe Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 24 Feb 2025 06:03:36 +0000 Subject: [PATCH] Auto-generated API code --- docs/reference.asciidoc | 10 +++++----- src/api/types.ts | 8 ++++++++ src/api/typesWithBodyKey.ts | 8 ++++++++ 3 files changed, 21 insertions(+), 5 deletions(-) diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 5c9367a12..73d38c21c 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -4047,8 +4047,8 @@ client.ilm.start({ ... }) ==== Arguments * *Request (object):* -** *`master_timeout` (Optional, string | -1 | 0)* -** *`timeout` (Optional, string | -1 | 0)* +** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node +** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout [discrete] ==== stop @@ -4069,8 +4069,8 @@ client.ilm.stop({ ... }) ==== Arguments * *Request (object):* -** *`master_timeout` (Optional, string | -1 | 0)* -** *`timeout` (Optional, string | -1 | 0)* +** *`master_timeout` (Optional, string | -1 | 0)*: Explicit operation timeout for connection to master node +** *`timeout` (Optional, string | -1 | 0)*: Explicit operation timeout [discrete] === indices @@ -5765,7 +5765,7 @@ client.inference.put({ inference_id }) * *Request (object):* ** *`inference_id` (string)*: The inference Id ** *`task_type` (Optional, Enum("sparse_embedding" | "text_embedding" | "rerank" | "completion"))*: The task type -** *`inference_config` (Optional, { service, service_settings, task_settings })* +** *`inference_config` (Optional, { chunking_settings, service, service_settings, task_settings })* [discrete] ==== stream_inference diff --git a/src/api/types.ts b/src/api/types.ts index 4908d5662..6a7a3000e 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -12501,7 +12501,15 @@ export type InferenceDenseByteVector = byte[] export type InferenceDenseVector = float[] +export interface InferenceInferenceChunkingSettings extends InferenceInferenceEndpoint { + max_chunk_size?: integer + overlap?: integer + sentence_overlap?: integer + strategy?: string +} + export interface InferenceInferenceEndpoint { + chunking_settings?: InferenceInferenceChunkingSettings service: string service_settings: InferenceServiceSettings task_settings?: InferenceTaskSettings diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index 0afe50e7c..041805412 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -12725,7 +12725,15 @@ export type InferenceDenseByteVector = byte[] export type InferenceDenseVector = float[] +export interface InferenceInferenceChunkingSettings extends InferenceInferenceEndpoint { + max_chunk_size?: integer + overlap?: integer + sentence_overlap?: integer + strategy?: string +} + export interface InferenceInferenceEndpoint { + chunking_settings?: InferenceInferenceChunkingSettings service: string service_settings: InferenceServiceSettings task_settings?: InferenceTaskSettings