From 0c950c2ea7520d362e15c1a63950fc63bca76c3e Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 10 Nov 2025 06:03:53 +0000 Subject: [PATCH] Auto-generated API code --- docs/reference/api-reference.md | 116 +++++++------- src/api/api/indices.ts | 116 +++++++++----- src/api/api/inference.ts | 12 +- src/api/types.ts | 267 +++++++++++++++++++++++++------- 4 files changed, 347 insertions(+), 164 deletions(-) diff --git a/docs/reference/api-reference.md b/docs/reference/api-reference.md index ee75a6e1f..f04242f1b 100644 --- a/docs/reference/api-reference.md +++ b/docs/reference/api-reference.md @@ -671,7 +671,7 @@ client.fieldCaps({ ... }) - **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports a list of values, such as `open,hidden`. - **`ignore_unavailable` (Optional, boolean)**: If `true`, missing or closed indices are not included in the response. - **`include_unmapped` (Optional, boolean)**: If true, unmapped fields are included in the response. -- **`filters` (Optional, string)**: A list of filters to apply to the response. +- **`filters` (Optional, string \| string[])**: A list of filters to apply to the response. - **`types` (Optional, string[])**: A list of field types to include. Any fields that do not match one of these types will be excluded from the results. It defaults to empty, meaning that all field types are returned. - **`include_empty_fields` (Optional, boolean)**: If false, empty fields are not included in the response. - **`project_routing` (Optional, string)**: Specifies a subset of projects to target for the field-caps query using project metadata tags in a subset of Lucene query syntax. Allowed Lucene queries: the _alias tag and a single value (possibly wildcarded). Examples: _alias:my-project _alias:_origin _alias:*pr* Supported in serverless only. @@ -1769,7 +1769,7 @@ client.termsEnum({ index, field }) #### Request (object) [_request_terms_enum] -- **`index` (string)**: A list of data streams, indices, and index aliases to search. Wildcard (`*`) expressions are supported. To search all data streams or indices, omit this parameter or use `*` or `_all`. +- **`index` (string \| string[])**: A list of data streams, indices, and index aliases to search. Wildcard (`*`) expressions are supported. To search all data streams or indices, omit this parameter or use `*` or `_all`. - **`field` (string)**: The string to match at the start of indexed terms. If not provided, all terms in the field are considered. - **`size` (Optional, number)**: The number of matching terms to return. - **`timeout` (Optional, string \| -1 \| 0)**: The maximum length of time to spend collecting results. If the timeout is exceeded the `complete` flag set to `false` in the response and the results may be partial or empty. @@ -3718,7 +3718,7 @@ client.cluster.state({ ... }) ### Arguments [_arguments_cluster.state] #### Request (object) [_request_cluster.state] -- **`metric` (Optional, string \| string[])**: Limit the information returned to the specified metrics +- **`metric` (Optional, Enum("_all" \| "version" \| "master_node" \| "blocks" \| "nodes" \| "metadata" \| "routing_table" \| "routing_nodes" \| "customs") \| Enum("_all" \| "version" \| "master_node" \| "blocks" \| "nodes" \| "metadata" \| "routing_table" \| "routing_nodes" \| "customs")[])**: Limit the information returned to the specified metrics - **`index` (Optional, string \| string[])**: A list of index names; use `_all` or empty string to perform the operation on all indices - **`allow_no_indices` (Optional, boolean)**: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) - **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Whether to expand wildcard expression to concrete indices that are open, closed or both. @@ -5199,7 +5199,7 @@ client.indices.addBlock({ index, block }) ### Arguments [_arguments_indices.add_block] #### Request (object) [_request_indices.add_block] -- **`index` (string)**: A list or wildcard expression of index names used to limit the request. +- **`index` (string \| string[])**: A list or wildcard expression of index names used to limit the request. By default, you must explicitly name the indices you are adding blocks to. To allow the adding of blocks to indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. You can update this setting in the `elasticsearch.yml` file or by using the cluster update settings API. @@ -5521,7 +5521,7 @@ client.indices.dataStreamsStats({ ... }) ### Arguments [_arguments_indices.data_streams_stats] #### Request (object) [_request_indices.data_streams_stats] -- **`name` (Optional, string)**: List of data streams used to limit the request. +- **`name` (Optional, string \| string[])**: List of data streams used to limit the request. Wildcard expressions (`*`) are supported. To target all data streams in a cluster, omit this parameter or use `*`. - **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of data stream that wildcard patterns can match. @@ -5654,16 +5654,6 @@ client.indices.deleteIndexTemplate({ name }) - **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. -## client.indices.deleteSampleConfiguration [_indices.delete_sample_configuration] -Delete sampling configuration for an index or data stream - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-sample-configuration) - -```ts -client.indices.deleteSampleConfiguration() -``` - - ## client.indices.deleteTemplate [_indices.delete_template] Delete a legacy index template. IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. @@ -6050,16 +6040,6 @@ Supports a list of values, such as `open,hidden`. - **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. -## client.indices.getAllSampleConfiguration [_indices.get_all_sample_configuration] -Get sampling configurations for all indices and data streams - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-all-sample-configuration) - -```ts -client.indices.getAllSampleConfiguration() -``` - - ## client.indices.getDataLifecycle [_indices.get_data_lifecycle] Get data stream lifecycles. @@ -6216,7 +6196,7 @@ client.indices.getIndexTemplate({ ... }) ### Arguments [_arguments_indices.get_index_template] #### Request (object) [_request_indices.get_index_template] -- **`name` (Optional, string)**: List of index template names used to limit the request. Wildcard (*) expressions are supported. +- **`name` (Optional, string)**: Name of index template to retrieve. Wildcard (*) expressions are supported. - **`local` (Optional, boolean)**: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. - **`flat_settings` (Optional, boolean)**: If true, returns settings in flat format. - **`master_timeout` (Optional, string \| -1 \| 0)**: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. @@ -6264,16 +6244,6 @@ client.indices.getMigrateReindexStatus({ index }) #### Request (object) [_request_indices.get_migrate_reindex_status] - **`index` (string \| string[])**: The index or data stream name. -## client.indices.getSampleConfiguration [_indices.get_sample_configuration] -Get sampling configuration for an index or data stream - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-sample-configuration) - -```ts -client.indices.getSampleConfiguration() -``` - - ## client.indices.getSettings [_indices.get_settings] Get index settings. Get setting information for one or more indices. @@ -6744,16 +6714,6 @@ If no response is received before the timeout expires, the request fails and ret If no response is received before the timeout expires, the request fails and returns an error. - **`write_index_only` (Optional, boolean)**: If `true`, the mappings are applied only to the current write index for the target. -## client.indices.putSampleConfiguration [_indices.put_sample_configuration] -Configure sampling for an index or data stream - -[Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-sample-configuration) - -```ts -client.indices.putSampleConfiguration() -``` - - ## client.indices.putSettings [_indices.put_settings] Update index settings. Changes dynamic index settings in real time. @@ -7025,7 +6985,7 @@ client.indices.removeBlock({ index, block }) ### Arguments [_arguments_indices.remove_block] #### Request (object) [_request_indices.remove_block] -- **`index` (string)**: A list or wildcard expression of index names used to limit the request. +- **`index` (string \| string[])**: A list or wildcard expression of index names used to limit the request. By default, you must explicitly name the indices you are removing blocks from. To allow the removal of blocks from indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. You can update this setting in the `elasticsearch.yml` file or by using the cluster update settings API. @@ -7490,7 +7450,7 @@ client.indices.stats({ ... }) ### Arguments [_arguments_indices.stats] #### Request (object) [_request_indices.stats] -- **`metric` (Optional, string \| string[])**: Limit the information returned the specific metrics. +- **`metric` (Optional, Enum("_all" \| "store" \| "indexing" \| "get" \| "search" \| "merge" \| "flush" \| "refresh" \| "query_cache" \| "fielddata" \| "docs" \| "warmer" \| "completion" \| "segments" \| "translog" \| "request_cache" \| "recovery" \| "bulk" \| "shard_stats" \| "mappings" \| "dense_vector" \| "sparse_vector") \| Enum("_all" \| "store" \| "indexing" \| "get" \| "search" \| "merge" \| "flush" \| "refresh" \| "query_cache" \| "fielddata" \| "docs" \| "warmer" \| "completion" \| "segments" \| "translog" \| "request_cache" \| "recovery" \| "bulk" \| "shard_stats" \| "mappings" \| "dense_vector" \| "sparse_vector")[])**: Limit the information returned the specific metrics. - **`index` (Optional, string \| string[])**: A list of index names; use `_all` or empty string to perform the operation on all indices - **`completion_fields` (Optional, string \| string[])**: List or wildcard expressions of fields to include in fielddata and suggest statistics. - **`expand_wildcards` (Optional, Enum("all" \| "open" \| "closed" \| "hidden" \| "none") \| Enum("all" \| "open" \| "closed" \| "hidden" \| "none")[])**: Type of index that wildcard patterns can match. If the request can target data streams, this argument @@ -7608,6 +7568,7 @@ Either a string or an array of strings. ## client.inference.delete [_inference.delete] Delete an inference endpoint +This API requires the manage_inference cluster privilege (the built-in `inference_admin` role grants this privilege). [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-delete) @@ -7625,6 +7586,7 @@ client.inference.delete({ inference_id }) ## client.inference.get [_inference.get] Get an inference endpoint +This API requires the `monitor_inference` cluster privilege (the built-in `inference_admin` and `inference_user` roles grant this privilege). [Endpoint documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-get) @@ -7759,11 +7721,13 @@ client.inference.putAlibabacloud({ task_type, alibabacloud_inference_id, service ### Arguments [_arguments_inference.put_alibabacloud] #### Request (object) [_request_inference.put_alibabacloud] -- **`task_type` (Enum("completion" \| "rerank" \| "space_embedding" \| "text_embedding"))**: The type of the inference task that the model will perform. +- **`task_type` (Enum("completion" \| "rerank" \| "sparse_embedding" \| "text_embedding"))**: The type of the inference task that the model will perform. - **`alibabacloud_inference_id` (string)**: The unique identifier of the inference endpoint. - **`service` (Enum("alibabacloud-ai-search"))**: The type of service supported for the specified task type. In this case, `alibabacloud-ai-search`. - **`service_settings` ({ api_key, host, rate_limit, service_id, workspace })**: Settings used to install the inference model. These settings are specific to the `alibabacloud-ai-search` service. - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. +Applies only to the `sparse_embedding` or `text_embedding` task types. +Not applicable to the `rerank` or `completion` task types. - **`task_settings` (Optional, { input_type, return_token })**: Settings to configure the inference task. These settings are specific to the task type you specified. - **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. @@ -7790,6 +7754,8 @@ client.inference.putAmazonbedrock({ task_type, amazonbedrock_inference_id, servi - **`service` (Enum("amazonbedrock"))**: The type of service supported for the specified task type. In this case, `amazonbedrock`. - **`service_settings` ({ access_key, model, provider, region, rate_limit, secret_key })**: Settings used to install the inference model. These settings are specific to the `amazonbedrock` service. - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. +Applies only to the `text_embedding` task type. +Not applicable to the `completion` task type. - **`task_settings` (Optional, { max_new_tokens, temperature, top_k, top_p })**: Settings to configure the inference task. These settings are specific to the task type you specified. - **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. @@ -7814,6 +7780,8 @@ client.inference.putAmazonsagemaker({ task_type, amazonsagemaker_inference_id, s - **`service_settings` ({ access_key, endpoint_name, api, region, secret_key, target_model, target_container_hostname, inference_component_name, batch_size, dimensions })**: Settings used to install the inference model. These settings are specific to the `amazon_sagemaker` service and `service_settings.api` you specified. - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. +Applies only to the `sparse_embedding` or `text_embedding` task types. +Not applicable to the `rerank`, `completion`, or `chat_completion` task types. - **`task_settings` (Optional, { custom_attributes, enable_explanations, inference_id, session_id, target_variant })**: Settings to configure the inference task. These settings are specific to the task type and `service_settings.api` you specified. - **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. @@ -7837,7 +7805,6 @@ The only valid task type for the model to perform is `completion`. - **`anthropic_inference_id` (string)**: The unique identifier of the inference endpoint. - **`service` (Enum("anthropic"))**: The type of service supported for the specified task type. In this case, `anthropic`. - **`service_settings` ({ api_key, model_id, rate_limit })**: Settings used to install the inference model. These settings are specific to the `anthropic` service. -- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. - **`task_settings` (Optional, { max_tokens, temperature, top_k, top_p })**: Settings to configure the inference task. These settings are specific to the task type you specified. - **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. @@ -7861,6 +7828,8 @@ client.inference.putAzureaistudio({ task_type, azureaistudio_inference_id, servi - **`service` (Enum("azureaistudio"))**: The type of service supported for the specified task type. In this case, `azureaistudio`. - **`service_settings` ({ api_key, endpoint_type, target, provider, rate_limit })**: Settings used to install the inference model. These settings are specific to the `azureaistudio` service. - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. +Applies only to the `text_embedding` task type. +Not applicable to the `rerank` or `completion` task types. - **`task_settings` (Optional, { do_sample, max_new_tokens, temperature, top_p, user, return_documents, top_n })**: Settings to configure the inference task. These settings are specific to the task type you specified. - **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. @@ -7892,6 +7861,8 @@ NOTE: The `chat_completion` task type only supports streaming and only through t - **`service` (Enum("azureopenai"))**: The type of service supported for the specified task type. In this case, `azureopenai`. - **`service_settings` ({ api_key, api_version, deployment_id, entra_id, rate_limit, resource_name })**: Settings used to install the inference model. These settings are specific to the `azureopenai` service. - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. +Applies only to the `text_embedding` task type. +Not applicable to the `completion` task type. - **`task_settings` (Optional, { user })**: Settings to configure the inference task. These settings are specific to the task type you specified. - **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. @@ -7916,6 +7887,8 @@ client.inference.putCohere({ task_type, cohere_inference_id, service, service_se - **`service_settings` ({ api_key, embedding_type, model_id, rate_limit, similarity })**: Settings used to install the inference model. These settings are specific to the `cohere` service. - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. +Applies only to the `text_embedding` task type. +Not applicable to the `rerank` or `completion` task type. - **`task_settings` (Optional, { input_type, return_documents, top_n, truncate })**: Settings to configure the inference task. These settings are specific to the task type you specified. - **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. @@ -7940,7 +7913,6 @@ client.inference.putContextualai({ task_type, contextualai_inference_id, service - **`contextualai_inference_id` (string)**: The unique identifier of the inference endpoint. - **`service` (Enum("contextualai"))**: The type of service supported for the specified task type. In this case, `contextualai`. - **`service_settings` ({ api_key, model_id, rate_limit })**: Settings used to install the inference model. These settings are specific to the `contextualai` service. -- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. - **`task_settings` (Optional, { instruction, return_documents, top_k })**: Settings to configure the inference task. These settings are specific to the task type you specified. - **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. @@ -8004,6 +7976,8 @@ client.inference.putCustom({ task_type, custom_inference_id, service, service_se - **`service_settings` ({ batch_size, headers, input_type, query_parameters, request, response, secret_parameters, url })**: Settings used to install the inference model. These settings are specific to the `custom` service. - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. +Applies only to the `sparse_embedding` or `text_embedding` task types. +Not applicable to the `rerank` or `completion` task types. - **`task_settings` (Optional, { parameters })**: Settings to configure the inference task. These settings are specific to the task type you specified. @@ -8026,7 +8000,6 @@ client.inference.putDeepseek({ task_type, deepseek_inference_id, service, servic - **`service` (Enum("deepseek"))**: The type of service supported for the specified task type. In this case, `deepseek`. - **`service_settings` ({ api_key, model_id, url })**: Settings used to install the inference model. These settings are specific to the `deepseek` service. -- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. - **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. ## client.inference.putElasticsearch [_inference.put_elasticsearch] @@ -8063,7 +8036,7 @@ The must not match the `model_id`. - **`service_settings` ({ adaptive_allocations, deployment_id, model_id, num_allocations, num_threads, long_document_strategy, max_chunks_per_doc })**: Settings used to install the inference model. These settings are specific to the `elasticsearch` service. - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. Applies only to the `sparse_embedding` and `text_embedding` task types. -Not applicable to the `rerank`, `completion`, or `chat_completion` task types. +Not applicable to the `rerank` task type. - **`task_settings` (Optional, { return_documents })**: Settings to configure the inference task. These settings are specific to the task type you specified. - **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. @@ -8123,6 +8096,8 @@ client.inference.putGoogleaistudio({ task_type, googleaistudio_inference_id, ser - **`service` (Enum("googleaistudio"))**: The type of service supported for the specified task type. In this case, `googleaistudio`. - **`service_settings` ({ api_key, model_id, rate_limit })**: Settings used to install the inference model. These settings are specific to the `googleaistudio` service. - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. +Applies only to the `text_embedding` task type. +Not applicable to the `completion` task type. - **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. ## client.inference.putGooglevertexai [_inference.put_googlevertexai] @@ -8144,6 +8119,8 @@ client.inference.putGooglevertexai({ task_type, googlevertexai_inference_id, ser - **`service` (Enum("googlevertexai"))**: The type of service supported for the specified task type. In this case, `googlevertexai`. - **`service_settings` ({ provider, url, streaming_url, location, model_id, project_id, rate_limit, service_account_json, dimensions })**: Settings used to install the inference model. These settings are specific to the `googlevertexai` service. - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. +Applies only to the `text_embedding` task type. +Not applicable to the `rerank`, `completion`, or `chat_completion` task types. - **`task_settings` (Optional, { auto_truncate, top_n, thinking_config, max_tokens })**: Settings to configure the inference task. These settings are specific to the task type you specified. - **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. @@ -8202,6 +8179,8 @@ client.inference.putHuggingFace({ task_type, huggingface_inference_id, service, - **`service` (Enum("hugging_face"))**: The type of service supported for the specified task type. In this case, `hugging_face`. - **`service_settings` ({ api_key, rate_limit, url, model_id })**: Settings used to install the inference model. These settings are specific to the `hugging_face` service. - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. +Applies only to the `text_embedding` task type. +Not applicable to the `rerank`, `completion`, or `chat_completion` task types. - **`task_settings` (Optional, { return_documents, top_n })**: Settings to configure the inference task. These settings are specific to the task type you specified. - **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. @@ -8228,6 +8207,8 @@ client.inference.putJinaai({ task_type, jinaai_inference_id, service, service_se - **`service` (Enum("jinaai"))**: The type of service supported for the specified task type. In this case, `jinaai`. - **`service_settings` ({ api_key, model_id, rate_limit, similarity })**: Settings used to install the inference model. These settings are specific to the `jinaai` service. - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. +Applies only to the `text_embedding` task type. +Not applicable to the `rerank` task type. - **`task_settings` (Optional, { return_documents, task, top_n })**: Settings to configure the inference task. These settings are specific to the task type you specified. - **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. @@ -8251,6 +8232,8 @@ client.inference.putLlama({ task_type, llama_inference_id, service, service_sett - **`service` (Enum("llama"))**: The type of service supported for the specified task type. In this case, `llama`. - **`service_settings` ({ url, model_id, max_input_tokens, similarity, rate_limit })**: Settings used to install the inference model. These settings are specific to the `llama` service. - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. +Applies only to the `text_embedding` task type. +Not applicable to the `completion` or `chat_completion` task types. - **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. ## client.inference.putMistral [_inference.put_mistral] @@ -8272,6 +8255,8 @@ client.inference.putMistral({ task_type, mistral_inference_id, service, service_ - **`service` (Enum("mistral"))**: The type of service supported for the specified task type. In this case, `mistral`. - **`service_settings` ({ api_key, max_input_tokens, model, rate_limit })**: Settings used to install the inference model. These settings are specific to the `mistral` service. - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. +Applies only to the `text_embedding` task type. +Not applicable to the `completion` or `chat_completion` task types. - **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. ## client.inference.putOpenai [_inference.put_openai] @@ -8294,6 +8279,8 @@ NOTE: The `chat_completion` task type only supports streaming and only through t - **`service` (Enum("openai"))**: The type of service supported for the specified task type. In this case, `openai`. - **`service_settings` ({ api_key, dimensions, model_id, organization_id, rate_limit, url })**: Settings used to install the inference model. These settings are specific to the `openai` service. - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. +Applies only to the `text_embedding` task type. +Not applicable to the `completion` or `chat_completion` task types. - **`task_settings` (Optional, { user, headers })**: Settings to configure the inference task. These settings are specific to the task type you specified. - **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. @@ -8319,6 +8306,8 @@ client.inference.putVoyageai({ task_type, voyageai_inference_id, service, servic - **`service` (Enum("voyageai"))**: The type of service supported for the specified task type. In this case, `voyageai`. - **`service_settings` ({ dimensions, model_id, rate_limit, embedding_type })**: Settings used to install the inference model. These settings are specific to the `voyageai` service. - **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. +Applies only to the `text_embedding` task type. +Not applicable to the `rerank` task type. - **`task_settings` (Optional, { input_type, return_documents, top_k, truncation })**: Settings to configure the inference task. These settings are specific to the task type you specified. - **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. @@ -8343,6 +8332,9 @@ client.inference.putWatsonx({ task_type, watsonx_inference_id, service, service_ - **`watsonx_inference_id` (string)**: The unique identifier of the inference endpoint. - **`service` (Enum("watsonxai"))**: The type of service supported for the specified task type. In this case, `watsonxai`. - **`service_settings` ({ api_key, api_version, model_id, project_id, rate_limit, url })**: Settings used to install the inference model. These settings are specific to the `watsonxai` service. +- **`chunking_settings` (Optional, { max_chunk_size, overlap, sentence_overlap, separator_group, separators, strategy })**: The chunking configuration object. +Applies only to the `text_embedding` task type. +Not applicable to the `completion` or `chat_completion` task types. - **`timeout` (Optional, string \| -1 \| 0)**: Specifies the amount of time to wait for the inference endpoint to be created. ## client.inference.rerank [_inference.rerank] @@ -10960,7 +10952,7 @@ client.nodes.info({ ... }) #### Request (object) [_request_nodes.info] - **`node_id` (Optional, string \| string[])**: List of node IDs or names used to limit returned information. -- **`metric` (Optional, string \| string[])**: Limits the information returned to the specific metrics. Supports a list, such as http,ingest. +- **`metric` (Optional, Enum("_all" \| "_none" \| "settings" \| "os" \| "process" \| "jvm" \| "thread_pool" \| "transport" \| "http" \| "remote_cluster_server" \| "plugins" \| "ingest" \| "aggregations" \| "indices") \| Enum("_all" \| "_none" \| "settings" \| "os" \| "process" \| "jvm" \| "thread_pool" \| "transport" \| "http" \| "remote_cluster_server" \| "plugins" \| "ingest" \| "aggregations" \| "indices")[])**: Limits the information returned to the specific metrics. Supports a list, such as http,ingest. - **`flat_settings` (Optional, boolean)**: If true, returns settings in flat format. - **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. @@ -11004,8 +10996,8 @@ client.nodes.stats({ ... }) #### Request (object) [_request_nodes.stats] - **`node_id` (Optional, string \| string[])**: List of node IDs or names used to limit returned information. -- **`metric` (Optional, string \| string[])**: Limit the information returned to the specified metrics -- **`index_metric` (Optional, string \| string[])**: Limit the information returned for indices metric to the specific index metrics. It can be used only if indices (or all) metric is specified. +- **`metric` (Optional, Enum("_all" \| "_none" \| "indices" \| "os" \| "process" \| "jvm" \| "thread_pool" \| "fs" \| "transport" \| "http" \| "breaker" \| "script" \| "discovery" \| "ingest" \| "adaptive_selection" \| "script_cache" \| "indexing_pressure" \| "repositories" \| "allocations") \| Enum("_all" \| "_none" \| "indices" \| "os" \| "process" \| "jvm" \| "thread_pool" \| "fs" \| "transport" \| "http" \| "breaker" \| "script" \| "discovery" \| "ingest" \| "adaptive_selection" \| "script_cache" \| "indexing_pressure" \| "repositories" \| "allocations")[])**: Limit the information returned to the specified metrics +- **`index_metric` (Optional, Enum("_all" \| "store" \| "indexing" \| "get" \| "search" \| "merge" \| "flush" \| "refresh" \| "query_cache" \| "fielddata" \| "docs" \| "warmer" \| "completion" \| "segments" \| "translog" \| "request_cache" \| "recovery" \| "bulk" \| "shard_stats" \| "mappings" \| "dense_vector" \| "sparse_vector") \| Enum("_all" \| "store" \| "indexing" \| "get" \| "search" \| "merge" \| "flush" \| "refresh" \| "query_cache" \| "fielddata" \| "docs" \| "warmer" \| "completion" \| "segments" \| "translog" \| "request_cache" \| "recovery" \| "bulk" \| "shard_stats" \| "mappings" \| "dense_vector" \| "sparse_vector")[])**: Limit the information returned for indices metric to the specific index metrics. It can be used only if indices (or all) metric is specified. - **`completion_fields` (Optional, string \| string[])**: List or wildcard expressions of fields to include in fielddata and suggest statistics. - **`fielddata_fields` (Optional, string \| string[])**: List or wildcard expressions of fields to include in fielddata statistics. - **`fields` (Optional, string \| string[])**: List or wildcard expressions of fields to include in the statistics. @@ -11029,8 +11021,8 @@ client.nodes.usage({ ... }) #### Request (object) [_request_nodes.usage] - **`node_id` (Optional, string \| string[])**: A list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes -- **`metric` (Optional, string \| string[])**: Limits the information returned to the specific metrics. -A list of the following options: `_all`, `rest_actions`. +- **`metric` (Optional, Enum("_all" \| "rest_actions" \| "aggregations") \| Enum("_all" \| "rest_actions" \| "aggregations")[])**: Limits the information returned to the specific metrics. +A list of the following options: `_all`, `rest_actions`, `aggregations`. - **`timeout` (Optional, string \| -1 \| 0)**: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. @@ -11835,7 +11827,7 @@ client.security.clearCachedPrivileges({ application }) ### Arguments [_arguments_security.clear_cached_privileges] #### Request (object) [_request_security.clear_cached_privileges] -- **`application` (string)**: A list of applications. +- **`application` (string \| string[])**: A list of applications. To clear all applications, use an asterism (`*`). It does not support other wildcard patterns. @@ -13809,7 +13801,7 @@ client.snapshot.delete({ repository, snapshot }) #### Request (object) [_request_snapshot.delete] - **`repository` (string)**: The name of the repository to delete a snapshot from. -- **`snapshot` (string)**: A list of snapshot names to delete. +- **`snapshot` (string \| string[])**: A list of snapshot names to delete. It also accepts wildcards (`*`). - **`master_timeout` (Optional, string \| -1 \| 0)**: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. @@ -14768,7 +14760,7 @@ client.textStructure.findFieldStructure({ field, index }) #### Request (object) [_request_text_structure.find_field_structure] - **`field` (string)**: The field that should be analyzed. - **`index` (string)**: The name of the index that contains the analyzed field. -- **`column_names` (Optional, string)**: If `format` is set to `delimited`, you can specify the column names in a list. +- **`column_names` (Optional, string \| string[])**: If `format` is set to `delimited`, you can specify the column names in a list. If this parameter is not specified, the structure finder uses the column names from the header row of the text. If the text does not have a header row, columns are named "column1", "column2", "column3", for example. - **`delimiter` (Optional, string)**: If you have set `format` to `delimited`, you can specify the character used to delimit the values in each row. @@ -14877,7 +14869,7 @@ client.textStructure.findMessageStructure({ messages }) #### Request (object) [_request_text_structure.find_message_structure] - **`messages` (string[])**: The list of messages you want to analyze. -- **`column_names` (Optional, string)**: If the format is `delimited`, you can specify the column names in a list. +- **`column_names` (Optional, string \| string[])**: If the format is `delimited`, you can specify the column names in a list. If this parameter is not specified, the structure finder uses the column names from the header row of the text. If the text does not have a header role, columns are named "column1", "column2", "column3", for example. - **`delimiter` (Optional, string)**: If you the format is `delimited`, you can specify the character used to delimit the values in each row. diff --git a/src/api/api/indices.ts b/src/api/api/indices.ts index b1fc1a736..cb8478cdb 100644 --- a/src/api/api/indices.ts +++ b/src/api/api/indices.ts @@ -236,7 +236,10 @@ export default class Indices { 'index' ], body: [], - query: [] + query: [ + 'master_timeout', + 'timeout' + ] }, 'indices.delete_template': { path: [ @@ -402,7 +405,9 @@ export default class Indices { 'indices.get_all_sample_configuration': { path: [], body: [], - query: [] + query: [ + 'master_timeout' + ] }, 'indices.get_data_lifecycle': { path: [ @@ -517,7 +522,9 @@ export default class Indices { 'index' ], body: [], - query: [] + query: [ + 'master_timeout' + ] }, 'indices.get_sample_stats': { path: [ @@ -723,8 +730,17 @@ export default class Indices { path: [ 'index' ], - body: [], - query: [] + body: [ + 'rate', + 'max_samples', + 'max_size', + 'time_to_live', + 'if' + ], + query: [ + 'master_timeout', + 'timeout' + ] }, 'indices.put_settings': { path: [ @@ -1908,13 +1924,13 @@ export default class Indices { } /** - * Delete sampling configuration for an index or data stream - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-delete-sample-configuration | Elasticsearch API documentation} + * Delete sampling configuration. Delete the sampling configuration for the specified index. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/group/ingest-random-sampling | Elasticsearch API documentation} */ - async deleteSampleConfiguration (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async deleteSampleConfiguration (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async deleteSampleConfiguration (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise - async deleteSampleConfiguration (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + async deleteSampleConfiguration (this: That, params: T.IndicesDeleteSampleConfigurationRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async deleteSampleConfiguration (this: That, params: T.IndicesDeleteSampleConfigurationRequest, options?: TransportRequestOptionsWithMeta): Promise> + async deleteSampleConfiguration (this: That, params: T.IndicesDeleteSampleConfigurationRequest, options?: TransportRequestOptions): Promise + async deleteSampleConfiguration (this: That, params: T.IndicesDeleteSampleConfigurationRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath } = this[kAcceptedParams]['indices.delete_sample_configuration'] @@ -1932,11 +1948,11 @@ export default class Indices { } } - params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error querystring[key] = params[key] } } @@ -1949,7 +1965,9 @@ export default class Indices { index: params.index }, acceptedParams: [ - 'index' + 'index', + 'master_timeout', + 'timeout' ] } return await this.transport.request({ path, method, querystring, body, meta }, options) @@ -2679,13 +2697,13 @@ export default class Indices { } /** - * Get sampling configurations for all indices and data streams - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-all-sample-configuration | Elasticsearch API documentation} + * Get all sampling configurations. Get the sampling configurations for all indices. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/group/ingest-random-sampling | Elasticsearch API documentation} */ - async getAllSampleConfiguration (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async getAllSampleConfiguration (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async getAllSampleConfiguration (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise - async getAllSampleConfiguration (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + async getAllSampleConfiguration (this: That, params?: T.IndicesGetAllSampleConfigurationRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getAllSampleConfiguration (this: That, params?: T.IndicesGetAllSampleConfigurationRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getAllSampleConfiguration (this: That, params?: T.IndicesGetAllSampleConfigurationRequest, options?: TransportRequestOptions): Promise + async getAllSampleConfiguration (this: That, params?: T.IndicesGetAllSampleConfigurationRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath } = this[kAcceptedParams]['indices.get_all_sample_configuration'] @@ -2708,6 +2726,7 @@ export default class Indices { if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error querystring[key] = params[key] } } @@ -2717,6 +2736,7 @@ export default class Indices { const meta: TransportRequestMetadata = { name: 'indices.get_all_sample_configuration', acceptedParams: [ + 'master_timeout' ] } return await this.transport.request({ path, method, querystring, body, meta }, options) @@ -3305,13 +3325,13 @@ export default class Indices { } /** - * Get sampling configuration for an index or data stream - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-get-sample-configuration | Elasticsearch API documentation} + * Get sampling configuration. Get the sampling configuration for the specified index. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/group/ingest-random-sampling | Elasticsearch API documentation} */ - async getSampleConfiguration (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async getSampleConfiguration (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async getSampleConfiguration (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise - async getSampleConfiguration (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + async getSampleConfiguration (this: That, params: T.IndicesGetSampleConfigurationRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async getSampleConfiguration (this: That, params: T.IndicesGetSampleConfigurationRequest, options?: TransportRequestOptionsWithMeta): Promise> + async getSampleConfiguration (this: That, params: T.IndicesGetSampleConfigurationRequest, options?: TransportRequestOptions): Promise + async getSampleConfiguration (this: That, params: T.IndicesGetSampleConfigurationRequest, options?: TransportRequestOptions): Promise { const { path: acceptedPath } = this[kAcceptedParams]['indices.get_sample_configuration'] @@ -3329,11 +3349,11 @@ export default class Indices { } } - params = params ?? {} for (const key in params) { if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { + // @ts-expect-error querystring[key] = params[key] } } @@ -3346,7 +3366,8 @@ export default class Indices { index: params.index }, acceptedParams: [ - 'index' + 'index', + 'master_timeout' ] } return await this.transport.request({ path, method, querystring, body, meta }, options) @@ -4252,15 +4273,17 @@ export default class Indices { } /** - * Configure sampling for an index or data stream - * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-sample-configuration | Elasticsearch API documentation} + * Create or update sampling configuration. Create or update the sampling configuration for the specified index. + * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/group/ingest-random-sampling | Elasticsearch API documentation} */ - async putSampleConfiguration (this: That, params?: T.TODO, options?: TransportRequestOptionsWithOutMeta): Promise - async putSampleConfiguration (this: That, params?: T.TODO, options?: TransportRequestOptionsWithMeta): Promise> - async putSampleConfiguration (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise - async putSampleConfiguration (this: That, params?: T.TODO, options?: TransportRequestOptions): Promise { + async putSampleConfiguration (this: That, params: T.IndicesPutSampleConfigurationRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async putSampleConfiguration (this: That, params: T.IndicesPutSampleConfigurationRequest, options?: TransportRequestOptionsWithMeta): Promise> + async putSampleConfiguration (this: That, params: T.IndicesPutSampleConfigurationRequest, options?: TransportRequestOptions): Promise + async putSampleConfiguration (this: That, params: T.IndicesPutSampleConfigurationRequest, options?: TransportRequestOptions): Promise { const { - path: acceptedPath + path: acceptedPath, + body: acceptedBody, + query: acceptedQuery } = this[kAcceptedParams]['indices.put_sample_configuration'] const userQuery = params?.querystring @@ -4276,12 +4299,22 @@ export default class Indices { } } - params = params ?? {} for (const key in params) { - if (acceptedPath.includes(key)) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { continue } else if (key !== 'body' && key !== 'querystring') { - querystring[key] = params[key] + if (acceptedQuery.includes(key) || commonQueryParams.includes(key)) { + // @ts-expect-error + querystring[key] = params[key] + } else { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } } } @@ -4293,7 +4326,14 @@ export default class Indices { index: params.index }, acceptedParams: [ - 'index' + 'index', + 'rate', + 'max_samples', + 'max_size', + 'time_to_live', + 'if', + 'master_timeout', + 'timeout' ] } return await this.transport.request({ path, method, querystring, body, meta }, options) diff --git a/src/api/api/inference.ts b/src/api/api/inference.ts index d887535f6..045c1aa61 100644 --- a/src/api/api/inference.ts +++ b/src/api/api/inference.ts @@ -169,7 +169,6 @@ export default class Inference { 'anthropic_inference_id' ], body: [ - 'chunking_settings', 'service', 'service_settings', 'task_settings' @@ -229,7 +228,6 @@ export default class Inference { 'contextualai_inference_id' ], body: [ - 'chunking_settings', 'service', 'service_settings', 'task_settings' @@ -257,7 +255,6 @@ export default class Inference { 'deepseek_inference_id' ], body: [ - 'chunking_settings', 'service', 'service_settings' ], @@ -417,6 +414,7 @@ export default class Inference { 'watsonx_inference_id' ], body: [ + 'chunking_settings', 'service', 'service_settings' ], @@ -605,7 +603,7 @@ export default class Inference { } /** - * Delete an inference endpoint + * Delete an inference endpoint This API requires the manage_inference cluster privilege (the built-in `inference_admin` role grants this privilege). * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-delete | Elasticsearch API documentation} */ async delete (this: That, params: T.InferenceDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -664,7 +662,7 @@ export default class Inference { } /** - * Get an inference endpoint + * Get an inference endpoint This API requires the `monitor_inference` cluster privilege (the built-in `inference_admin` and `inference_user` roles grant this privilege). * @see {@link https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-get | Elasticsearch API documentation} */ async get (this: That, params?: T.InferenceGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1182,7 +1180,6 @@ export default class Inference { acceptedParams: [ 'task_type', 'anthropic_inference_id', - 'chunking_settings', 'service', 'service_settings', 'task_settings', @@ -1450,7 +1447,6 @@ export default class Inference { acceptedParams: [ 'task_type', 'contextualai_inference_id', - 'chunking_settings', 'service', 'service_settings', 'task_settings', @@ -1583,7 +1579,6 @@ export default class Inference { acceptedParams: [ 'task_type', 'deepseek_inference_id', - 'chunking_settings', 'service', 'service_settings', 'timeout' @@ -2315,6 +2310,7 @@ export default class Inference { acceptedParams: [ 'task_type', 'watsonx_inference_id', + 'chunking_settings', 'service', 'service_settings', 'timeout' diff --git a/src/api/types.ts b/src/api/types.ts index 427c55aec..a5657a4f8 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -750,7 +750,7 @@ export interface FieldCapsRequest extends RequestBase { /** If true, unmapped fields are included in the response. */ include_unmapped?: boolean /** A comma-separated list of filters to apply to the response. */ - filters?: string + filters?: string | string[] /** A comma-separated list of field types to include. * Any fields that do not match one of these types will be excluded from the results. * It defaults to empty, meaning that all field types are returned. */ @@ -3400,7 +3400,7 @@ export interface TermsEnumRequest extends RequestBase { /** A comma-separated list of data streams, indices, and index aliases to search. * Wildcard (`*`) expressions are supported. * To search all data streams or indices, omit this parameter or use `*` or `_all`. */ - index: IndexName + index: Indices /** The string to match at the start of indexed terms. If not provided, all terms in the field are considered. */ field: Field /** The number of matching terms to return. */ @@ -3866,6 +3866,10 @@ export interface ClusterStatistics { details?: Record } +export type CommonStatsFlag = '_all' | 'store' | 'indexing' | 'get' | 'search' | 'merge' | 'flush' | 'refresh' | 'query_cache' | 'fielddata' | 'docs' | 'warmer' | 'completion' | 'segments' | 'translog' | 'request_cache' | 'recovery' | 'bulk' | 'shard_stats' | 'mappings' | 'dense_vector' | 'sparse_vector' + +export type CommonStatsFlags = CommonStatsFlag | CommonStatsFlag[] + export interface CompletionStats { /** Total amount, in bytes, of memory used for completion across all shards assigned to selected nodes. */ size_in_bytes: long @@ -4051,7 +4055,7 @@ export interface GeoHashLocation { geohash: GeoHash } -export type GeoHashPrecision = number | string +export type GeoHashPrecision = integer | string export type GeoHexCell = string @@ -4070,7 +4074,7 @@ export type GeoShapeRelation = 'intersects' | 'disjoint' | 'within' | 'contains' export type GeoTile = string -export type GeoTilePrecision = number +export type GeoTilePrecision = integer export interface GetStats { current: long @@ -4289,8 +4293,6 @@ export interface MergesStats { export type Metadata = Record -export type Metrics = string | string[] - export type MinimumShouldMatch = integer | string export type MultiTermQueryRewrite = string @@ -16577,7 +16579,7 @@ export interface ClusterRerouteRequest extends RequestBase { /** If true, then the response contains an explanation of why the commands can or cannot run. */ explain?: boolean /** Limits the information returned to the specified metrics. */ - metric?: Metrics + metric?: string | string[] /** If true, then retries allocation of shards that are blocked due to too many subsequent allocation failures. */ retry_failed?: boolean /** Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. */ @@ -16622,9 +16624,13 @@ export interface ClusterRerouteResponse { state?: any } +export type ClusterStateClusterStateMetric = '_all' | 'version' | 'master_node' | 'blocks' | 'nodes' | 'metadata' | 'routing_table' | 'routing_nodes' | 'customs' + +export type ClusterStateClusterStateMetrics = ClusterStateClusterStateMetric | ClusterStateClusterStateMetric[] + export interface ClusterStateRequest extends RequestBase { /** Limit the information returned to the specified metrics */ - metric?: Metrics + metric?: ClusterStateClusterStateMetrics /** A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices */ index?: Indices /** Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) */ @@ -19880,6 +19886,27 @@ export interface IndicesRetentionLease { period: Duration } +export interface IndicesSamplingConfiguration { + /** The fraction of documents to sample between 0 and 1. */ + rate: double + /** The maximum number of documents to sample. */ + max_samples: integer + /** The maximum total size of sampled documents. */ + max_size?: ByteSize + /** The maximum total size of sampled documents in bytes. */ + max_size_in_bytes: long + /** The duration for which the sampled documents should be retained. */ + time_to_live?: Duration + /** The duration for which the sampled documents should be retained, in milliseconds. */ + time_to_live_in_millis: long + /** An optional condition script that sampled documents must satisfy. */ + if?: string + /** The time when the sampling configuration was created. */ + creation_time?: DateTime + /** The time when the sampling configuration was created, in milliseconds since epoch. */ + creation_time_in_millis: long +} + export type IndicesSamplingMethod = 'aggregate' | 'last_value' export interface IndicesSearchIdle { @@ -20052,7 +20079,7 @@ export interface IndicesAddBlockRequest extends RequestBase { * By default, you must explicitly name the indices you are adding blocks to. * To allow the adding of blocks to indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. * You can update this setting in the `elasticsearch.yml` file or by using the cluster update settings API. */ - index: IndexName + index: Indices /** The block type to add to the index. */ block: IndicesIndicesBlockOptions /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. @@ -20396,7 +20423,7 @@ export interface IndicesDataStreamsStatsRequest extends RequestBase { /** Comma-separated list of data streams used to limit the request. * Wildcard expressions (`*`) are supported. * To target all data streams in a cluster, omit this parameter or use `*`. */ - name?: IndexName + name?: Indices /** Type of data stream that wildcard patterns can match. * Supports comma-separated values, such as `open,hidden`. */ expand_wildcards?: ExpandWildcards @@ -20540,6 +20567,24 @@ export interface IndicesDeleteIndexTemplateRequest extends RequestBase { export type IndicesDeleteIndexTemplateResponse = AcknowledgedResponseBase +export interface IndicesDeleteSampleConfigurationRequest extends RequestBase { + /** The name of the index. */ + index: IndexName + /** Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an + * error. */ + master_timeout?: Duration + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, master_timeout?: never, timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, master_timeout?: never, timeout?: never } +} + +export type IndicesDeleteSampleConfigurationResponse = AcknowledgedResponseBase + export interface IndicesDeleteTemplateRequest extends RequestBase { /** The name of the legacy index template to delete. * Wildcard (`*`) expressions are supported. */ @@ -20913,10 +20958,30 @@ export interface IndicesGetAliasIndexAliases { export interface IndicesGetAliasNotFoundAliasesKeys { error: string - status: number + status: integer } export type IndicesGetAliasNotFoundAliases = IndicesGetAliasNotFoundAliasesKeys -& { [property: string]: IndicesGetAliasIndexAliases | string | number } +& { [property: string]: IndicesGetAliasIndexAliases | string | integer } + +export interface IndicesGetAllSampleConfigurationRequest extends RequestBase { + /** Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an + * error. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { master_timeout?: never } +} + +export interface IndicesGetAllSampleConfigurationResponse { + configurations: IndicesGetAllSampleConfigurationIndexSamplingConfiguration[] +} + +export interface IndicesGetAllSampleConfigurationIndexSamplingConfiguration { + index: IndexName + configuration: IndicesSamplingConfiguration +} export interface IndicesGetDataLifecycleDataStreamWithLifecycle { name: DataStreamName @@ -21112,7 +21177,7 @@ export interface IndicesGetIndexTemplateIndexTemplateItem { } export interface IndicesGetIndexTemplateRequest extends RequestBase { - /** Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. */ + /** Name of index template to retrieve. Wildcard (*) expressions are supported. */ name?: Name /** If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. */ local?: boolean @@ -21217,6 +21282,24 @@ export interface IndicesGetSampleRawDocument { source: Record } +export interface IndicesGetSampleConfigurationRequest extends RequestBase { + /** The name of the index. */ + index: IndexName + /** Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an + * error. */ + master_timeout?: Duration + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, master_timeout?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, master_timeout?: never } +} + +export interface IndicesGetSampleConfigurationResponse { + index: IndexName + configuration: IndicesSamplingConfiguration | null +} + export interface IndicesGetSampleStatsRequest extends RequestBase { /** Single index or data stream name. Wildcards are not supported. */ index: IndexName @@ -21742,6 +21825,36 @@ export interface IndicesPutMappingRequest extends RequestBase { export type IndicesPutMappingResponse = IndicesResponseBase +export interface IndicesPutSampleConfigurationRequest extends RequestBase { + /** The name of the index or data stream. */ + index: IndexName + /** Period to wait for a connection to the master node. If no response is + * received before the timeout expires, the request fails and returns an + * error. */ + master_timeout?: Duration + /** Period to wait for a response. + * If no response is received before the timeout expires, the request fails and returns an error. */ + timeout?: Duration + /** The fraction of documents to sample. Must be greater than 0 and less than or equal to 1. + * Can be specified as a number or a string. */ + rate: SpecUtilsStringified + /** The maximum number of documents to sample. Must be greater than 0 and less than or equal to 10,000. */ + max_samples?: integer + /** The maximum total size of sampled documents. Must be greater than 0 and less than or equal to 5GB. */ + max_size?: ByteSize + /** The duration for which the sampled documents should be retained. + * Must be greater than 0 and less than or equal to 30 days. */ + time_to_live?: Duration + /** An optional condition script that sampled documents must satisfy. */ + if?: string + /** All values in `body` will be added to the request body. */ + body?: string | { [key: string]: any } & { index?: never, master_timeout?: never, timeout?: never, rate?: never, max_samples?: never, max_size?: never, time_to_live?: never, if?: never } + /** All values in `querystring` will be added to the request querystring. */ + querystring?: { [key: string]: any } & { index?: never, master_timeout?: never, timeout?: never, rate?: never, max_samples?: never, max_size?: never, time_to_live?: never, if?: never } +} + +export type IndicesPutSampleConfigurationResponse = AcknowledgedResponseBase + export interface IndicesPutSettingsRequest extends RequestBase { /** Comma-separated list of data streams, indices, and aliases used to limit * the request. Supports wildcards (`*`). To target all data streams and @@ -22009,7 +22122,7 @@ export interface IndicesRemoveBlockRequest extends RequestBase { * By default, you must explicitly name the indices you are removing blocks from. * To allow the removal of blocks from indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. * You can update this setting in the `elasticsearch.yml` file or by using the cluster update settings API. */ - index: IndexName + index: Indices /** The block type to remove from the index. */ block: IndicesIndicesBlockOptions /** If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. @@ -22549,7 +22662,7 @@ export interface IndicesStatsMappingStats { export interface IndicesStatsRequest extends RequestBase { /** Limit the information returned the specific metrics. */ - metric?: Metrics + metric?: CommonStatsFlags /** A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices */ index?: Indices /** Comma-separated list or wildcard expressions of fields to include in fielddata and suggest statistics. */ @@ -22908,7 +23021,7 @@ export interface InferenceAlibabaCloudTaskSettings { return_token?: boolean } -export type InferenceAlibabaCloudTaskType = 'completion' | 'rerank' | 'space_embedding' | 'text_embedding' +export type InferenceAlibabaCloudTaskType = 'completion' | 'rerank' | 'sparse_embedding' | 'text_embedding' export interface InferenceAmazonBedrockServiceSettings { /** A valid AWS access key that has permissions to use Amazon Bedrock and access to models for inference requests. */ @@ -24568,7 +24681,9 @@ export interface InferencePutAlibabacloudRequest extends RequestBase { alibabacloud_inference_id: Id /** Specifies the amount of time to wait for the inference endpoint to be created. */ timeout?: Duration - /** The chunking configuration object. */ + /** The chunking configuration object. + * Applies only to the `sparse_embedding` or `text_embedding` task types. + * Not applicable to the `rerank` or `completion` task types. */ chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `alibabacloud-ai-search`. */ service: InferenceAlibabaCloudServiceType @@ -24592,7 +24707,9 @@ export interface InferencePutAmazonbedrockRequest extends RequestBase { amazonbedrock_inference_id: Id /** Specifies the amount of time to wait for the inference endpoint to be created. */ timeout?: Duration - /** The chunking configuration object. */ + /** The chunking configuration object. + * Applies only to the `text_embedding` task type. + * Not applicable to the `completion` task type. */ chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `amazonbedrock`. */ service: InferenceAmazonBedrockServiceType @@ -24616,7 +24733,9 @@ export interface InferencePutAmazonsagemakerRequest extends RequestBase { amazonsagemaker_inference_id: Id /** Specifies the amount of time to wait for the inference endpoint to be created. */ timeout?: Duration - /** The chunking configuration object. */ + /** The chunking configuration object. + * Applies only to the `sparse_embedding` or `text_embedding` task types. + * Not applicable to the `rerank`, `completion`, or `chat_completion` task types. */ chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `amazon_sagemaker`. */ service: InferenceAmazonSageMakerServiceType @@ -24642,8 +24761,6 @@ export interface InferencePutAnthropicRequest extends RequestBase { anthropic_inference_id: Id /** Specifies the amount of time to wait for the inference endpoint to be created. */ timeout?: Duration - /** The chunking configuration object. */ - chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `anthropic`. */ service: InferenceAnthropicServiceType /** Settings used to install the inference model. These settings are specific to the `anthropic` service. */ @@ -24652,9 +24769,9 @@ export interface InferencePutAnthropicRequest extends RequestBase { * These settings are specific to the task type you specified. */ task_settings?: InferenceAnthropicTaskSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { task_type?: never, anthropic_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + body?: string | { [key: string]: any } & { task_type?: never, anthropic_inference_id?: never, timeout?: never, service?: never, service_settings?: never, task_settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { task_type?: never, anthropic_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + querystring?: { [key: string]: any } & { task_type?: never, anthropic_inference_id?: never, timeout?: never, service?: never, service_settings?: never, task_settings?: never } } export type InferencePutAnthropicResponse = InferenceInferenceEndpointInfoAnthropic @@ -24666,7 +24783,9 @@ export interface InferencePutAzureaistudioRequest extends RequestBase { azureaistudio_inference_id: Id /** Specifies the amount of time to wait for the inference endpoint to be created. */ timeout?: Duration - /** The chunking configuration object. */ + /** The chunking configuration object. + * Applies only to the `text_embedding` task type. + * Not applicable to the `rerank` or `completion` task types. */ chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `azureaistudio`. */ service: InferenceAzureAiStudioServiceType @@ -24691,7 +24810,9 @@ export interface InferencePutAzureopenaiRequest extends RequestBase { azureopenai_inference_id: Id /** Specifies the amount of time to wait for the inference endpoint to be created. */ timeout?: Duration - /** The chunking configuration object. */ + /** The chunking configuration object. + * Applies only to the `text_embedding` task type. + * Not applicable to the `completion` task type. */ chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `azureopenai`. */ service: InferenceAzureOpenAIServiceType @@ -24715,7 +24836,9 @@ export interface InferencePutCohereRequest extends RequestBase { cohere_inference_id: Id /** Specifies the amount of time to wait for the inference endpoint to be created. */ timeout?: Duration - /** The chunking configuration object. */ + /** The chunking configuration object. + * Applies only to the `text_embedding` task type. + * Not applicable to the `rerank` or `completion` task type. */ chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `cohere`. */ service: InferenceCohereServiceType @@ -24740,8 +24863,6 @@ export interface InferencePutContextualaiRequest extends RequestBase { contextualai_inference_id: Id /** Specifies the amount of time to wait for the inference endpoint to be created. */ timeout?: Duration - /** The chunking configuration object. */ - chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `contextualai`. */ service: InferenceContextualAIServiceType /** Settings used to install the inference model. These settings are specific to the `contextualai` service. */ @@ -24750,9 +24871,9 @@ export interface InferencePutContextualaiRequest extends RequestBase { * These settings are specific to the task type you specified. */ task_settings?: InferenceContextualAITaskSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { task_type?: never, contextualai_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + body?: string | { [key: string]: any } & { task_type?: never, contextualai_inference_id?: never, timeout?: never, service?: never, service_settings?: never, task_settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { task_type?: never, contextualai_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never, task_settings?: never } + querystring?: { [key: string]: any } & { task_type?: never, contextualai_inference_id?: never, timeout?: never, service?: never, service_settings?: never, task_settings?: never } } export type InferencePutContextualaiResponse = InferenceInferenceEndpointInfoContextualAi @@ -24762,7 +24883,9 @@ export interface InferencePutCustomRequest extends RequestBase { task_type: InferenceCustomTaskType /** The unique identifier of the inference endpoint. */ custom_inference_id: Id - /** The chunking configuration object. */ + /** The chunking configuration object. + * Applies only to the `sparse_embedding` or `text_embedding` task types. + * Not applicable to the `rerank` or `completion` task types. */ chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `custom`. */ service: InferenceCustomServiceType @@ -24787,17 +24910,15 @@ export interface InferencePutDeepseekRequest extends RequestBase { deepseek_inference_id: Id /** Specifies the amount of time to wait for the inference endpoint to be created. */ timeout?: Duration - /** The chunking configuration object. */ - chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `deepseek`. */ service: InferenceDeepSeekServiceType /** Settings used to install the inference model. * These settings are specific to the `deepseek` service. */ service_settings: InferenceDeepSeekServiceSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { task_type?: never, deepseek_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never } + body?: string | { [key: string]: any } & { task_type?: never, deepseek_inference_id?: never, timeout?: never, service?: never, service_settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { task_type?: never, deepseek_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never } + querystring?: { [key: string]: any } & { task_type?: never, deepseek_inference_id?: never, timeout?: never, service?: never, service_settings?: never } } export type InferencePutDeepseekResponse = InferenceInferenceEndpointInfoDeepSeek @@ -24812,7 +24933,7 @@ export interface InferencePutElasticsearchRequest extends RequestBase { timeout?: Duration /** The chunking configuration object. * Applies only to the `sparse_embedding` and `text_embedding` task types. - * Not applicable to the `rerank`, `completion`, or `chat_completion` task types. */ + * Not applicable to the `rerank` task type. */ chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `elasticsearch`. */ service: InferenceElasticsearchServiceType @@ -24858,7 +24979,9 @@ export interface InferencePutGoogleaistudioRequest extends RequestBase { googleaistudio_inference_id: Id /** Specifies the amount of time to wait for the inference endpoint to be created. */ timeout?: Duration - /** The chunking configuration object. */ + /** The chunking configuration object. + * Applies only to the `text_embedding` task type. + * Not applicable to the `completion` task type. */ chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `googleaistudio`. */ service: InferenceGoogleAiServiceType @@ -24879,7 +25002,9 @@ export interface InferencePutGooglevertexaiRequest extends RequestBase { googlevertexai_inference_id: Id /** Specifies the amount of time to wait for the inference endpoint to be created. */ timeout?: Duration - /** The chunking configuration object. */ + /** The chunking configuration object. + * Applies only to the `text_embedding` task type. + * Not applicable to the `rerank`, `completion`, or `chat_completion` task types. */ chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `googlevertexai`. */ service: InferenceGoogleVertexAIServiceType @@ -24903,7 +25028,9 @@ export interface InferencePutHuggingFaceRequest extends RequestBase { huggingface_inference_id: Id /** Specifies the amount of time to wait for the inference endpoint to be created. */ timeout?: Duration - /** The chunking configuration object. */ + /** The chunking configuration object. + * Applies only to the `text_embedding` task type. + * Not applicable to the `rerank`, `completion`, or `chat_completion` task types. */ chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `hugging_face`. */ service: InferenceHuggingFaceServiceType @@ -24927,7 +25054,9 @@ export interface InferencePutJinaaiRequest extends RequestBase { jinaai_inference_id: Id /** Specifies the amount of time to wait for the inference endpoint to be created. */ timeout?: Duration - /** The chunking configuration object. */ + /** The chunking configuration object. + * Applies only to the `text_embedding` task type. + * Not applicable to the `rerank` task type. */ chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `jinaai`. */ service: InferenceJinaAIServiceType @@ -24951,7 +25080,9 @@ export interface InferencePutLlamaRequest extends RequestBase { llama_inference_id: Id /** Specifies the amount of time to wait for the inference endpoint to be created. */ timeout?: Duration - /** The chunking configuration object. */ + /** The chunking configuration object. + * Applies only to the `text_embedding` task type. + * Not applicable to the `completion` or `chat_completion` task types. */ chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `llama`. */ service: InferenceLlamaServiceType @@ -24972,7 +25103,9 @@ export interface InferencePutMistralRequest extends RequestBase { mistral_inference_id: Id /** Specifies the amount of time to wait for the inference endpoint to be created. */ timeout?: Duration - /** The chunking configuration object. */ + /** The chunking configuration object. + * Applies only to the `text_embedding` task type. + * Not applicable to the `completion` or `chat_completion` task types. */ chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `mistral`. */ service: InferenceMistralServiceType @@ -24994,7 +25127,9 @@ export interface InferencePutOpenaiRequest extends RequestBase { openai_inference_id: Id /** Specifies the amount of time to wait for the inference endpoint to be created. */ timeout?: Duration - /** The chunking configuration object. */ + /** The chunking configuration object. + * Applies only to the `text_embedding` task type. + * Not applicable to the `completion` or `chat_completion` task types. */ chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `openai`. */ service: InferenceOpenAIServiceType @@ -25018,7 +25153,9 @@ export interface InferencePutVoyageaiRequest extends RequestBase { voyageai_inference_id: Id /** Specifies the amount of time to wait for the inference endpoint to be created. */ timeout?: Duration - /** The chunking configuration object. */ + /** The chunking configuration object. + * Applies only to the `text_embedding` task type. + * Not applicable to the `rerank` task type. */ chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `voyageai`. */ service: InferenceVoyageAIServiceType @@ -25042,14 +25179,18 @@ export interface InferencePutWatsonxRequest extends RequestBase { watsonx_inference_id: Id /** Specifies the amount of time to wait for the inference endpoint to be created. */ timeout?: Duration + /** The chunking configuration object. + * Applies only to the `text_embedding` task type. + * Not applicable to the `completion` or `chat_completion` task types. */ + chunking_settings?: InferenceInferenceChunkingSettings /** The type of service supported for the specified task type. In this case, `watsonxai`. */ service: InferenceWatsonxServiceType /** Settings used to install the inference model. These settings are specific to the `watsonxai` service. */ service_settings: InferenceWatsonxServiceSettings /** All values in `body` will be added to the request body. */ - body?: string | { [key: string]: any } & { task_type?: never, watsonx_inference_id?: never, timeout?: never, service?: never, service_settings?: never } + body?: string | { [key: string]: any } & { task_type?: never, watsonx_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never } /** All values in `querystring` will be added to the request querystring. */ - querystring?: { [key: string]: any } & { task_type?: never, watsonx_inference_id?: never, timeout?: never, service?: never, service_settings?: never } + querystring?: { [key: string]: any } & { task_type?: never, watsonx_inference_id?: never, timeout?: never, chunking_settings?: never, service?: never, service_settings?: never } } export type InferencePutWatsonxResponse = InferenceInferenceEndpointInfoWatsonx @@ -29247,7 +29388,9 @@ export interface MlEvaluateDataFrameRequest extends RequestBase { querystring?: { [key: string]: any } & { evaluation?: never, index?: never, query?: never } } -export interface MlEvaluateDataFrameResponse { +export type MlEvaluateDataFrameResponse = MlEvaluateDataFrameResponseBody + +export interface MlEvaluateDataFrameResponseBody { /** Evaluation results for a classification analysis. * It outputs a prediction that identifies to which of the classes each document belongs. */ classification?: MlEvaluateDataFrameDataframeClassificationSummary @@ -32507,6 +32650,10 @@ export interface NodesInfoNodeThreadPoolInfo { type: string } +export type NodesInfoNodesInfoMetric = '_all' | '_none' | 'settings' | 'os' | 'process' | 'jvm' | 'thread_pool' | 'transport' | 'http' | 'remote_cluster_server' | 'plugins' | 'ingest' | 'aggregations' | 'indices' + +export type NodesInfoNodesInfoMetrics = NodesInfoNodesInfoMetric | NodesInfoNodesInfoMetric[] + export interface NodesInfoRemoveClusterServer { bound_address: TransportAddress[] publish_address: TransportAddress @@ -32516,7 +32663,7 @@ export interface NodesInfoRequest extends RequestBase { /** Comma-separated list of node IDs or names used to limit returned information. */ node_id?: NodeIds /** Limits the information returned to the specific metrics. Supports a comma-separated list, such as http,ingest. */ - metric?: Metrics + metric?: NodesInfoNodesInfoMetrics /** If true, returns settings in flat format. */ flat_settings?: boolean /** Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. */ @@ -32555,13 +32702,17 @@ export interface NodesReloadSecureSettingsResponseBase extends NodesNodesRespons nodes: Record } +export type NodesStatsNodeStatsMetric = '_all' | '_none' | 'indices' | 'os' | 'process' | 'jvm' | 'thread_pool' | 'fs' | 'transport' | 'http' | 'breaker' | 'script' | 'discovery' | 'ingest' | 'adaptive_selection' | 'script_cache' | 'indexing_pressure' | 'repositories' | 'allocations' + +export type NodesStatsNodeStatsMetrics = NodesStatsNodeStatsMetric | NodesStatsNodeStatsMetric[] + export interface NodesStatsRequest extends RequestBase { /** Comma-separated list of node IDs or names used to limit returned information. */ node_id?: NodeIds /** Limit the information returned to the specified metrics */ - metric?: Metrics + metric?: NodesStatsNodeStatsMetrics /** Limit the information returned for indices metric to the specific index metrics. It can be used only if indices (or all) metric is specified. */ - index_metric?: Metrics + index_metric?: CommonStatsFlags /** Comma-separated list or wildcard expressions of fields to include in fielddata and suggest statistics. */ completion_fields?: Fields /** Comma-separated list or wildcard expressions of fields to include in fielddata statistics. */ @@ -32600,12 +32751,16 @@ export interface NodesUsageNodeUsage { aggregations: Record } +export type NodesUsageNodesUsageMetric = '_all' | 'rest_actions' | 'aggregations' + +export type NodesUsageNodesUsageMetrics = NodesUsageNodesUsageMetric | NodesUsageNodesUsageMetric[] + export interface NodesUsageRequest extends RequestBase { /** A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes */ node_id?: NodeIds /** Limits the information returned to the specific metrics. - * A comma-separated list of the following options: `_all`, `rest_actions`. */ - metric?: Metrics + * A comma-separated list of the following options: `_all`, `rest_actions`, `aggregations`. */ + metric?: NodesUsageNodesUsageMetrics /** Period to wait for a response. * If no response is received before the timeout expires, the request fails and returns an error. */ timeout?: Duration @@ -33970,7 +34125,7 @@ export interface SecurityClearCachedPrivilegesRequest extends RequestBase { /** A comma-separated list of applications. * To clear all applications, use an asterism (`*`). * It does not support other wildcard patterns. */ - application: Name + application: Names /** All values in `body` will be added to the request body. */ body?: string | { [key: string]: any } & { application?: never } /** All values in `querystring` will be added to the request querystring. */ @@ -36684,7 +36839,7 @@ export interface SnapshotDeleteRequest extends RequestBase { repository: Name /** A comma-separated list of snapshot names to delete. * It also accepts wildcards (`*`). */ - snapshot: Name + snapshot: Names /** The period to wait for the master node. * If the master node is not available before the timeout expires, the request fails and returns an error. * To indicate that the request should never timeout, set it to `-1`. */ @@ -37819,7 +37974,7 @@ export interface TextStructureFindFieldStructureRequest extends RequestBase { /** If `format` is set to `delimited`, you can specify the column names in a comma-separated list. * If this parameter is not specified, the structure finder uses the column names from the header row of the text. * If the text does not have a header row, columns are named "column1", "column2", "column3", for example. */ - column_names?: string + column_names?: string | string[] /** If you have set `format` to `delimited`, you can specify the character used to delimit the values in each row. * Only a single character is supported; the delimiter cannot have multiple characters. * By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). @@ -37937,7 +38092,7 @@ export interface TextStructureFindMessageStructureRequest extends RequestBase { /** If the format is `delimited`, you can specify the column names in a comma-separated list. * If this parameter is not specified, the structure finder uses the column names from the header row of the text. * If the text does not have a header role, columns are named "column1", "column2", "column3", for example. */ - column_names?: string + column_names?: string | string[] /** If you the format is `delimited`, you can specify the character used to delimit the values in each row. * Only a single character is supported; the delimiter cannot have multiple characters. * By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`).