From e4f5df1eeaa54e25e19213cc6e79648b8531ac25 Mon Sep 17 00:00:00 2001 From: lcawl Date: Fri, 3 Jan 2025 18:54:02 -0800 Subject: [PATCH 1/4] Add examples to index APIs --- specification/_doc_ids/table.csv | 12 +++ .../field_caps/FieldCapabilitiesRequest.ts | 2 +- .../ClusterDeleteComponentTemplateRequest.ts | 1 - .../ClusterGetComponentTemplateRequest.ts | 2 +- .../ClusterPutComponentTemplateRequest.ts | 8 +- ...erPutComponentTemplateRequestExample1.yaml | 17 ++++ ...erPutComponentTemplateRequestExample2.yaml | 18 ++++ .../DeleteDanglingIndexRequest.ts | 3 +- .../ImportDanglingIndexRequest.ts | 2 + .../ImportDanglingIndexResponseExample1.yaml | 7 ++ .../ListDanglingIndicesRequest.ts | 2 + .../ListDanglingIndicesResponseExample1.yaml | 9 ++ .../indices/analyze/IndicesAnalyzeRequest.ts | 9 +- .../indicesAnalyzeRequestExample1.yaml | 10 ++- .../indicesAnalyzeRequestExample2.yaml | 9 ++ .../indicesAnalyzeRequestExample3.yaml | 11 +++ .../indicesAnalyzeRequestExample4.yaml | 14 +++ .../indicesAnalyzeRequestExample5.yaml | 7 ++ .../indicesAnalyzeRequestExample6.yaml | 7 ++ .../indicesAnalyzeRequestExample7.yaml | 13 +++ .../indicesAnalyzeResponseExample7.yaml | 36 ++++++++ ...Request.ts => IndicesClearCacheRequest.ts} | 5 ++ .../indices/clone/IndicesCloneRequest.ts | 22 +++++ .../indices/close/CloseIndexRequest.ts | 1 + ...1.yaml => CloseIndexResponseExample1.yaml} | 0 .../indices/create/IndicesCreateRequest.ts | 22 ++++- .../create/indicesCreateRequestExample1.yaml | 2 +- .../create/indicesCreateRequestExample2.yaml | 2 +- .../create/indicesCreateRequestExample3.yaml | 14 +++ .../indices/delete/IndicesDeleteRequest.ts | 9 +- .../delete_alias/IndicesDeleteAliasRequest.ts | 2 + .../IndicesDeleteIndexTemplateRequest.ts | 3 +- .../IndicesDeleteTemplateRequest.ts | 5 +- .../disk_usage/IndicesDiskUsageRequest.ts | 4 + .../indicesDiskUsageResponseExample1.yaml | 89 +++++++++++++++++++ .../indices/exists/IndicesExistsRequest.ts | 3 +- .../IndicesExistsTemplateRequest.ts | 26 +++++- .../IndicesFieldUsageStatsRequest.ts | 4 + ...ndicesFieldUsageStatsResponseExample1.yaml | 57 ++++++++++++ .../indices/flush/IndicesFlushRequest.ts | 1 + .../forcemerge/IndicesForceMergeRequest.ts | 42 +++++++++ .../indices/get/IndicesGetRequest.ts | 3 +- .../get_alias/IndicesGetAliasRequest.ts | 2 + .../IndicesGetFieldMappingRequest.ts | 5 ++ ...ndicesGetFieldMappingResponseExample1.yaml | 11 +++ ...ndicesGetFieldMappingResponseExample2.yaml | 14 +++ ...ndicesGetFieldMappingResponseExample3.yaml | 16 ++++ .../IndicesGetIndexTemplateRequest.ts | 5 +- .../get_mapping/IndicesGetMappingRequest.ts | 3 +- .../get_settings/IndicesGetSettingsRequest.ts | 7 +- .../get_template/IndicesGetTemplateRequest.ts | 7 +- .../indices/open/IndicesOpenRequest.ts | 25 +++++- .../indicesPutAliasRequestExample1.yaml | 5 ++ .../IndicesPutIndexTemplateRequest.ts | 35 +++++++- ...ndicesPutIndexTemplateRequestExample1.yaml | 2 +- ...ndicesPutIndexTemplateRequestExample2.yaml | 20 +++++ .../put_template/IndicesPutTemplateRequest.ts | 11 +++ .../indicesPutTemplateRequestExample1.yaml | 2 +- .../indicesPutTemplateRequestExample2.yaml | 19 ++++ .../recovery/IndicesRecoveryRequest.ts | 4 + .../indicesRecoveryResponseExample1.yaml | 10 ++- .../indicesRecoveryResponseExample2.yaml | 43 +++++++++ .../ResolveClusterResponseExample1.yaml | 6 +- .../indicesResolveResponseExample1.yaml | 6 +- .../indicesRolloverResponseExample1.yaml | 34 +++++-- .../segments/IndicesSegmentsRequest.ts | 2 + .../indicesSegmentsResponseExample1.yaml | 10 +++ .../shard_stores/IndicesShardStoresRequest.ts | 3 +- .../indicesShardStoresResponseExample1.yaml | 24 +++++ .../IndicesSimulateTemplateRequest.ts | 2 +- .../indices/stats/IndicesStatsRequest.ts | 3 +- .../GetPipelineResponseExample1.yaml | 4 +- .../SimulatePipelineResponseExample1.yaml | 3 +- 73 files changed, 802 insertions(+), 56 deletions(-) create mode 100644 specification/cluster/put_component_template/ClusterPutComponentTemplateRequestExample1.yaml create mode 100644 specification/cluster/put_component_template/ClusterPutComponentTemplateRequestExample2.yaml create mode 100644 specification/dangling_indices/import_dangling_index/ImportDanglingIndexResponseExample1.yaml create mode 100644 specification/dangling_indices/list_dangling_indices/ListDanglingIndicesResponseExample1.yaml create mode 100644 specification/indices/analyze/indicesAnalyzeRequestExample2.yaml create mode 100644 specification/indices/analyze/indicesAnalyzeRequestExample3.yaml create mode 100644 specification/indices/analyze/indicesAnalyzeRequestExample4.yaml create mode 100644 specification/indices/analyze/indicesAnalyzeRequestExample5.yaml create mode 100644 specification/indices/analyze/indicesAnalyzeRequestExample6.yaml create mode 100644 specification/indices/analyze/indicesAnalyzeRequestExample7.yaml create mode 100644 specification/indices/analyze/indicesAnalyzeResponseExample7.yaml rename specification/indices/clear_cache/{IndicesIndicesClearCacheRequest.ts => IndicesClearCacheRequest.ts} (91%) rename specification/indices/close/{indicesCloseResponseExample1.yaml => CloseIndexResponseExample1.yaml} (100%) create mode 100644 specification/indices/create/indicesCreateRequestExample3.yaml create mode 100644 specification/indices/disk_usage/indicesDiskUsageResponseExample1.yaml create mode 100644 specification/indices/field_usage_stats/indicesFieldUsageStatsResponseExample1.yaml create mode 100644 specification/indices/get_field_mapping/indicesGetFieldMappingResponseExample1.yaml create mode 100644 specification/indices/get_field_mapping/indicesGetFieldMappingResponseExample2.yaml create mode 100644 specification/indices/get_field_mapping/indicesGetFieldMappingResponseExample3.yaml create mode 100644 specification/indices/put_alias/indicesPutAliasRequestExample1.yaml create mode 100644 specification/indices/put_index_template/IndicesPutIndexTemplateRequestExample2.yaml create mode 100644 specification/indices/put_template/indicesPutTemplateRequestExample2.yaml create mode 100644 specification/indices/recovery/indicesRecoveryResponseExample2.yaml create mode 100644 specification/indices/segments/indicesSegmentsResponseExample1.yaml create mode 100644 specification/indices/shard_stores/indicesShardStoresResponseExample1.yaml diff --git a/specification/_doc_ids/table.csv b/specification/_doc_ids/table.csv index 279300a248..400121a449 100644 --- a/specification/_doc_ids/table.csv +++ b/specification/_doc_ids/table.csv @@ -117,6 +117,9 @@ connector-update-status,https://www.elastic.co/guide/en/elasticsearch/reference/ convert-processor,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/convert-processor.html cron-expressions,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/api-conventions.html#api-cron-expressions csv-processor,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/csv-processor.html +dangling-index-delete,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/dangling-index-delete.html +dangling-index-import,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/dangling-index-import.html +dangling-indices-list,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/dangling-indices-list.html data-processor,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/date-processor.html data-stream-path-param,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-create-data-stream.html#indices-create-data-stream-api-path-params data-streams,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/data-streams.html @@ -224,6 +227,7 @@ index,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/index.htm indexing-buffer,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indexing-buffer.html index-modules-merge,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/index-modules-merge.html index-templates,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/index-templates.html +index-templates-v1,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-templates-v1.html indices-aliases,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-aliases.html indices-analyze,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-analyze.html indices-clearcache,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-clearcache.html @@ -232,16 +236,23 @@ indices-close,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/i indices-component-template,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-component-template.html indices-create-data-stream,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-create-data-stream.html indices-create-index,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-create-index.html +indices-delete-alias,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-delete-alias.html indices-delete-index,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-delete-index.html +indices-delete-template,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-delete-template.html +indices-delete-template-v1,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-delete-template-v1.html indices-disk-usage,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-disk-usage.html indices-downsample-data-stream,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-downsample-data-stream.html indices-exists,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-exists.html indices-flush,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-flush.html indices-forcemerge,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-forcemerge.html +indices-get-alias, +https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-get-alias.html indices-get-field-mapping,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-get-field-mapping.html indices-get-index,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-get-index.html indices-get-mapping,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-get-mapping.html indices-get-settings,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-get-settings.html +indices-get-template,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-get-template.html +indices-get-template-v1,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-get-template-v1.html indices-open-close,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-open-close.html indices-put-mapping,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-put-mapping.html indices-recovery,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-recovery.html @@ -254,6 +265,7 @@ indices-shards-stores,https://www.elastic.co/guide/en/elasticsearch/reference/{b indices-shrink-index,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-shrink-index.html indices-split-index,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-split-index.html indices-stats,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-stats.html +indices-template-exists-v1,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-template-exists-v1.html indices-templates,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-templates.html indices-update-settings,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-update-settings.html infer-trained-model-deployment,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/infer-trained-model-deployment.html diff --git a/specification/_global/field_caps/FieldCapabilitiesRequest.ts b/specification/_global/field_caps/FieldCapabilitiesRequest.ts index 44e17bba60..6715bd5a5b 100644 --- a/specification/_global/field_caps/FieldCapabilitiesRequest.ts +++ b/specification/_global/field_caps/FieldCapabilitiesRequest.ts @@ -33,7 +33,7 @@ import { QueryContainer } from '@_types/query_dsl/abstractions' * @rest_spec_name field_caps * @availability stack since=5.4.0 stability=stable * @availability serverless stability=stable visibility=public - * @index_privileges view_index_metadata,read,manage + * @index_privileges view_index_metadata,read * @doc_tag search */ export interface Request extends RequestBase { diff --git a/specification/cluster/delete_component_template/ClusterDeleteComponentTemplateRequest.ts b/specification/cluster/delete_component_template/ClusterDeleteComponentTemplateRequest.ts index 7828bd2f37..48aa057d1b 100644 --- a/specification/cluster/delete_component_template/ClusterDeleteComponentTemplateRequest.ts +++ b/specification/cluster/delete_component_template/ClusterDeleteComponentTemplateRequest.ts @@ -23,7 +23,6 @@ import { Duration } from '@_types/Time' /** * Delete component templates. - * Deletes component templates. * Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. * @rest_spec_name cluster.delete_component_template * @availability stack since=7.8.0 stability=stable diff --git a/specification/cluster/get_component_template/ClusterGetComponentTemplateRequest.ts b/specification/cluster/get_component_template/ClusterGetComponentTemplateRequest.ts index d70614bae0..1e3324c6bb 100644 --- a/specification/cluster/get_component_template/ClusterGetComponentTemplateRequest.ts +++ b/specification/cluster/get_component_template/ClusterGetComponentTemplateRequest.ts @@ -23,7 +23,7 @@ import { Duration } from '@_types/Time' /** * Get component templates. - * Retrieves information about component templates. + * Get information about component templates. * @rest_spec_name cluster.get_component_template * @availability stack since=7.8.0 stability=stable * @availability serverless stability=stable visibility=public diff --git a/specification/cluster/put_component_template/ClusterPutComponentTemplateRequest.ts b/specification/cluster/put_component_template/ClusterPutComponentTemplateRequest.ts index 0f45d9ee64..c32fca3ff3 100644 --- a/specification/cluster/put_component_template/ClusterPutComponentTemplateRequest.ts +++ b/specification/cluster/put_component_template/ClusterPutComponentTemplateRequest.ts @@ -24,7 +24,6 @@ import { Duration } from '@_types/Time' /** * Create or update a component template. - * Creates or updates a component template. * Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. * * An index template can be composed of multiple component templates. @@ -39,6 +38,11 @@ import { Duration } from '@_types/Time' * * You can use C-style `/* *\/` block comments in component templates. * You can include comments anywhere in the request body except before the opening curly bracket. + * + * **Applying component templates** + * + * You cannot directly apply a component template to a data stream or index. + * To be applied, a component template must be included in an index template's `composed_of` list. * @rest_spec_name cluster.put_component_template * @availability stack since=7.8.0 stability=stable * @availability serverless stability=stable visibility=public @@ -81,7 +85,7 @@ export interface Request extends RequestBase { version?: VersionNumber /** * Optional user metadata about the component template. - * May have any contents. This map is not automatically generated by Elasticsearch. + * It may have any contents. This map is not automatically generated by Elasticsearch. * This information is stored in the cluster state, so keeping it short is preferable. * To unset `_meta`, replace the template without specifying this information. */ diff --git a/specification/cluster/put_component_template/ClusterPutComponentTemplateRequestExample1.yaml b/specification/cluster/put_component_template/ClusterPutComponentTemplateRequestExample1.yaml new file mode 100644 index 0000000000..6e8be1b936 --- /dev/null +++ b/specification/cluster/put_component_template/ClusterPutComponentTemplateRequestExample1.yaml @@ -0,0 +1,17 @@ +summary: Create a template +# method_request: PUT _component_template/template_1 +# description: +# type: request +value: + template: + settings: + number_of_shards: 1 + mappings: + _source: + enabled: false + properties: + host_name: + type: keyword + created_at: + type: date + format: 'EEE MMM dd HH:mm:ss Z yyyy' diff --git a/specification/cluster/put_component_template/ClusterPutComponentTemplateRequestExample2.yaml b/specification/cluster/put_component_template/ClusterPutComponentTemplateRequestExample2.yaml new file mode 100644 index 0000000000..aebdd3f5ec --- /dev/null +++ b/specification/cluster/put_component_template/ClusterPutComponentTemplateRequestExample2.yaml @@ -0,0 +1,18 @@ +summary: Create a template with aliases +# method_request: PUT _component_template/template_1 +description: > + You can include index aliases in a component template. + During index creation, the `{index}` placeholder in the alias name will be replaced with the actual index name that the template gets applied to. +# type: request +value: + template: + settings: + number_of_shards: 1 + aliases: + alias1: {} + alias2: + filter: + term: + user.id: kimchy + routing: shard-1 + '{index}-alias': {} diff --git a/specification/dangling_indices/delete_dangling_index/DeleteDanglingIndexRequest.ts b/specification/dangling_indices/delete_dangling_index/DeleteDanglingIndexRequest.ts index ae4135c600..55762bc95e 100644 --- a/specification/dangling_indices/delete_dangling_index/DeleteDanglingIndexRequest.ts +++ b/specification/dangling_indices/delete_dangling_index/DeleteDanglingIndexRequest.ts @@ -23,12 +23,13 @@ import { Duration } from '@_types/Time' /** * Delete a dangling index. - * * If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. * For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. * @rest_spec_name dangling_indices.delete_dangling_index * @availability stack since=7.9.0 stability=stable * @doc_tag indices + * @doc_id dangling-index-delete + * @cluster_privileges manage */ export interface Request extends RequestBase { path_parts: { diff --git a/specification/dangling_indices/import_dangling_index/ImportDanglingIndexRequest.ts b/specification/dangling_indices/import_dangling_index/ImportDanglingIndexRequest.ts index 559280bf85..96e59d3bba 100644 --- a/specification/dangling_indices/import_dangling_index/ImportDanglingIndexRequest.ts +++ b/specification/dangling_indices/import_dangling_index/ImportDanglingIndexRequest.ts @@ -29,6 +29,8 @@ import { Duration } from '@_types/Time' * @rest_spec_name dangling_indices.import_dangling_index * @availability stack since=7.9.0 stability=stable * @doc_tag indices + * @doc_id dangling-index-import + * @cluster_privileges manage */ export interface Request extends RequestBase { path_parts: { diff --git a/specification/dangling_indices/import_dangling_index/ImportDanglingIndexResponseExample1.yaml b/specification/dangling_indices/import_dangling_index/ImportDanglingIndexResponseExample1.yaml new file mode 100644 index 0000000000..0ccafb09f0 --- /dev/null +++ b/specification/dangling_indices/import_dangling_index/ImportDanglingIndexResponseExample1.yaml @@ -0,0 +1,7 @@ +# summary: '' +description: > + A successful response from `POST /_dangling/zmM4e0JtBkeUjiHD-MihPQ?accept_data_loss=true`. +# type: response +# response_code: 200 +value: + acknowledged: true diff --git a/specification/dangling_indices/list_dangling_indices/ListDanglingIndicesRequest.ts b/specification/dangling_indices/list_dangling_indices/ListDanglingIndicesRequest.ts index 1a99461891..61d663b9fc 100644 --- a/specification/dangling_indices/list_dangling_indices/ListDanglingIndicesRequest.ts +++ b/specification/dangling_indices/list_dangling_indices/ListDanglingIndicesRequest.ts @@ -29,5 +29,7 @@ import { RequestBase } from '@_types/Base' * @rest_spec_name dangling_indices.list_dangling_indices * @availability stack since=7.9.0 stability=stable * @doc_tag indices + * @doc_id dangling-indices-list + * @cluster_privileges manage */ export interface Request extends RequestBase {} diff --git a/specification/dangling_indices/list_dangling_indices/ListDanglingIndicesResponseExample1.yaml b/specification/dangling_indices/list_dangling_indices/ListDanglingIndicesResponseExample1.yaml new file mode 100644 index 0000000000..d64013d065 --- /dev/null +++ b/specification/dangling_indices/list_dangling_indices/ListDanglingIndicesResponseExample1.yaml @@ -0,0 +1,9 @@ +# summary: +# description: '' +# type: response +# response_code: 200 +value: + "{\n \"dangling_indices\": [\n {\n \"index_name\": \"my-index-000001\"\ + ,\n \"index_uuid\": \"zmM4e0JtBkeUjiHD-MihPQ\",\n \"creation_date_millis\"\ + : 1589414451372,\n \"node_ids\": [\n \"pL47UN3dAb2d5RCWP6lQ3e\"\n ]\n\ + \ }\n ]\n}" diff --git a/specification/indices/analyze/IndicesAnalyzeRequest.ts b/specification/indices/analyze/IndicesAnalyzeRequest.ts index 934e31ed63..095c9ae5e4 100644 --- a/specification/indices/analyze/IndicesAnalyzeRequest.ts +++ b/specification/indices/analyze/IndicesAnalyzeRequest.ts @@ -26,11 +26,18 @@ import { TextToAnalyze } from './types' /** * Get tokens from text analysis. - * The analyze API performs [analysis](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis.html) on a text string and returns the resulting tokens. + * The analyze API performs analysis on a text string and returns the resulting tokens. + * + * Generating excessive amount of tokens may cause a node to run out of memory. + * The `index.analyze.max_token_count` setting enables you to limit the number of tokens that can be produced. + * If more than this limit of tokens gets generated, an error occurs. + * The `_analyze` endpoint without a specified index will always use `10000` as its limit. * @doc_id indices-analyze + * @ext_doc_id analysis * @rest_spec_name indices.analyze * @availability stack stability=stable * @availability serverless stability=stable visibility=public + * @index_privileges index */ export interface Request extends RequestBase { path_parts: { diff --git a/specification/indices/analyze/indicesAnalyzeRequestExample1.yaml b/specification/indices/analyze/indicesAnalyzeRequestExample1.yaml index 4b6f322b7a..0c55363578 100644 --- a/specification/indices/analyze/indicesAnalyzeRequestExample1.yaml +++ b/specification/indices/analyze/indicesAnalyzeRequestExample1.yaml @@ -1,5 +1,7 @@ -summary: Perform analysis on a text string and returns the resulting tokens. -method_request: GET /_analyze -# description: '' +summary: No index specified +# method_request: GET /_analyze +description: You can apply any of the built-in analyzers to the text string without specifying an index. # type: request -value: "{\n \"analyzer\" : \"standard\",\n \"text\" : \"Quick Brown Foxes!\"\n}" +value: + analyzer: standard + text: this is a test diff --git a/specification/indices/analyze/indicesAnalyzeRequestExample2.yaml b/specification/indices/analyze/indicesAnalyzeRequestExample2.yaml new file mode 100644 index 0000000000..2994ff6804 --- /dev/null +++ b/specification/indices/analyze/indicesAnalyzeRequestExample2.yaml @@ -0,0 +1,9 @@ +summary: An array of text strings +# method_request: GET /_analyze +description: If the text parameter is provided as array of strings, it is analyzed as a multi-value field. +# type: request +value: + analyzer: standard + text: + - this is a test + - the second text diff --git a/specification/indices/analyze/indicesAnalyzeRequestExample3.yaml b/specification/indices/analyze/indicesAnalyzeRequestExample3.yaml new file mode 100644 index 0000000000..85183cc34e --- /dev/null +++ b/specification/indices/analyze/indicesAnalyzeRequestExample3.yaml @@ -0,0 +1,11 @@ +summary: Custom analyzer example 1 +# method_request: GET /_analyze +description: You can test a custom transient analyzer built from tokenizers, token filters, and char filters. Token filters use the filter parameter. +# type: request +value: + tokenizer: keyword + filter: + - lowercase + char_filter: + - html_strip + text: 'this is a test' diff --git a/specification/indices/analyze/indicesAnalyzeRequestExample4.yaml b/specification/indices/analyze/indicesAnalyzeRequestExample4.yaml new file mode 100644 index 0000000000..1a7a23f4fa --- /dev/null +++ b/specification/indices/analyze/indicesAnalyzeRequestExample4.yaml @@ -0,0 +1,14 @@ +summary: Custom analyzer example 2 +# method_request: GET /_analyze +description: Custom tokenizers, token filters, and character filters can be specified in the request body. +# type: request +value: + tokenizer: whitespace + filter: + - lowercase + - type: stop + stopwords: + - a + - is + - this + text: this is a test diff --git a/specification/indices/analyze/indicesAnalyzeRequestExample5.yaml b/specification/indices/analyze/indicesAnalyzeRequestExample5.yaml new file mode 100644 index 0000000000..1201479445 --- /dev/null +++ b/specification/indices/analyze/indicesAnalyzeRequestExample5.yaml @@ -0,0 +1,7 @@ +summary: Derive analyzer from field mapping +# method_request: GET /analyze_sample/_analyze +description: Run `GET /analyze_sample/_analyze` to run an analysis on the text using the default index analyzer associated with the `analyze_sample` index. Alternatively, the analyzer can be derived based on a field mapping. +# type: request +value: + field: obj1.field1 + text: this is a test diff --git a/specification/indices/analyze/indicesAnalyzeRequestExample6.yaml b/specification/indices/analyze/indicesAnalyzeRequestExample6.yaml new file mode 100644 index 0000000000..770a880b47 --- /dev/null +++ b/specification/indices/analyze/indicesAnalyzeRequestExample6.yaml @@ -0,0 +1,7 @@ +summary: Normalizer +# method_request: GET /analyze_sample/_analyze +description: Run `GET /analyze_sample/_analyze` and supply a normalizer for a keyword field if there is a normalizer associated with the specified index. +# type: request +value: + normalizer: my_normalizer + text: BaR diff --git a/specification/indices/analyze/indicesAnalyzeRequestExample7.yaml b/specification/indices/analyze/indicesAnalyzeRequestExample7.yaml new file mode 100644 index 0000000000..e08254a7d7 --- /dev/null +++ b/specification/indices/analyze/indicesAnalyzeRequestExample7.yaml @@ -0,0 +1,13 @@ +summary: Explain analysis +# method_request: GET /_analyze +description: > + If you want to get more advanced details, set `explain` to `true`. It will output all token attributes for each token. You can filter token attributes you want to output by setting the `attributes` option. NOTE: The format of the additional detail information is labelled as experimental in Lucene and it may change in the future. +# type: request +value: + tokenizer: standard + filter: + - snowball + text: detailed output + explain: true + attributes: + - keyword diff --git a/specification/indices/analyze/indicesAnalyzeResponseExample7.yaml b/specification/indices/analyze/indicesAnalyzeResponseExample7.yaml new file mode 100644 index 0000000000..4783a5f5b2 --- /dev/null +++ b/specification/indices/analyze/indicesAnalyzeResponseExample7.yaml @@ -0,0 +1,36 @@ +# summary: '' +description: A successful response for an analysis with `explain` set to `true`. +# type: response +# response_code: 200 +value: + detail: + custom_analyzer: true + charfilters: [] + tokenizer: + name: standard + tokens: + - token: detailed + start_offset: 0 + end_offset: 8 + type: + position: 0 + - token: output + start_offset: 9 + end_offset: 15 + type: + position: 1 + tokenfilters: + - name: snowball + tokens: + - token: detail + start_offset: 0 + end_offset: 8 + type: + position: 0 + keyword: false + - token: output + start_offset: 9 + end_offset: 15 + type: + position: 1 + keyword: false diff --git a/specification/indices/clear_cache/IndicesIndicesClearCacheRequest.ts b/specification/indices/clear_cache/IndicesClearCacheRequest.ts similarity index 91% rename from specification/indices/clear_cache/IndicesIndicesClearCacheRequest.ts rename to specification/indices/clear_cache/IndicesClearCacheRequest.ts index 395bdebce4..d798c40285 100644 --- a/specification/indices/clear_cache/IndicesIndicesClearCacheRequest.ts +++ b/specification/indices/clear_cache/IndicesClearCacheRequest.ts @@ -24,9 +24,14 @@ import { ExpandWildcards, Fields, Indices } from '@_types/common' * Clear the cache. * Clear the cache of one or more indices. * For data streams, the API clears the caches of the stream's backing indices. + * + * By default, the clear cache API clears all caches. + * To clear only specific caches, use the `fielddata`, `query`, or `request` parameters. + * To clear the cache only of specific fields, use the `fields` parameter. * @rest_spec_name indices.clear_cache * @availability stack stability=stable * @availability serverless stability=stable visibility=private + * @index_privileges manage */ export interface Request extends RequestBase { path_parts: { diff --git a/specification/indices/clone/IndicesCloneRequest.ts b/specification/indices/clone/IndicesCloneRequest.ts index 7623074936..c9eac897f7 100644 --- a/specification/indices/clone/IndicesCloneRequest.ts +++ b/specification/indices/clone/IndicesCloneRequest.ts @@ -45,12 +45,34 @@ import { Duration } from '@_types/Time' * * IMPORTANT: Indices can only be cloned if they meet the following requirements: * + * * The index must be marked as read-only and have a cluster health status of green. * * The target index must not exist. * * The source index must have the same number of primary shards as the target index. * * The node handling the clone process must have sufficient free disk space to accommodate a second copy of the existing index. * + * The current write index on a data stream cannot be cloned. + * In order to clone the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be cloned. + * + * NOTE: Mappings cannot be specified in the `_clone` request. The mappings of the source index will be used for the target index. + * + * **Monitor the cloning process** + * + * The cloning process can be monitored with the cat recovery API or the cluster health API can be used to wait until all primary shards have been allocated by setting the `wait_for_status` parameter to `yellow`. + * + * The `_clone` API returns as soon as the target index has been added to the cluster state, before any shards have been allocated. + * At this point, all shards are in the state unassigned. + * If, for any reason, the target index can't be allocated, its primary shard will remain unassigned until it can be allocated on that node. + * + * Once the primary shard is allocated, it moves to state initializing, and the clone process begins. + * When the clone operation completes, the shard will become active. + * At that point, Elasticsearch will try to allocate any replicas and may decide to relocate the primary shard to another node. + * + * **Wait for active shards** + * + * Because the clone operation creates a new index to clone the shards to, the wait for active shards setting on index creation applies to the clone index action as well. * @rest_spec_name indices.clone * @availability stack since=7.4.0 stability=stable + * @index_privileges manage */ export interface Request extends RequestBase { path_parts: { diff --git a/specification/indices/close/CloseIndexRequest.ts b/specification/indices/close/CloseIndexRequest.ts index 46ee95332d..4ce1f61619 100644 --- a/specification/indices/close/CloseIndexRequest.ts +++ b/specification/indices/close/CloseIndexRequest.ts @@ -44,6 +44,7 @@ import { Duration } from '@_types/Time' * @rest_spec_name indices.close * @availability stack stability=stable * @availability serverless stability=stable visibility=private + * @index_privileges manage */ export interface Request extends RequestBase { path_parts: { diff --git a/specification/indices/close/indicesCloseResponseExample1.yaml b/specification/indices/close/CloseIndexResponseExample1.yaml similarity index 100% rename from specification/indices/close/indicesCloseResponseExample1.yaml rename to specification/indices/close/CloseIndexResponseExample1.yaml diff --git a/specification/indices/create/IndicesCreateRequest.ts b/specification/indices/create/IndicesCreateRequest.ts index b4dd6380bd..cbf5cbc20e 100644 --- a/specification/indices/create/IndicesCreateRequest.ts +++ b/specification/indices/create/IndicesCreateRequest.ts @@ -27,7 +27,27 @@ import { Duration } from '@_types/Time' /** * Create an index. - * Creates a new index. + * You can use the create index API to add a new index to an Elasticsearch cluster. + * When creating an index, you can specify the following: + * + * * Settings for the index. + * * Mappings for fields in the index. + * * Index aliases + * + * **Wait for active shards** + * + * By default, index creation will only return a response to the client when the primary copies of each shard have been started, or the request times out. + * The index creation response will indicate what happened. + * For example, `acknowledged` indicates whether the index was successfully created in the cluster, `while shards_acknowledged` indicates whether the requisite number of shard copies were started for each shard in the index before timing out. + * Note that it is still possible for either `acknowledged` or `shards_acknowledged` to be `false`, but for the index creation to be successful. + * These values simply indicate whether the operation completed before the timeout. + * If `acknowledged` is false, the request timed out before the cluster state was updated with the newly created index, but it probably will be created sometime soon. + * If `shards_acknowledged` is false, then the request timed out before the requisite number of shards were started (by default just the primaries), even if the cluster state was successfully updated to reflect the newly created index (that is to say, `acknowledged` is `true`). + * + * You can change the default of only waiting for the primary shards to start through the index setting `index.write.wait_for_active_shards`. + * Note that changing this setting will also affect the `wait_for_active_shards` value on all subsequent write operations. + + * @doc_id indices-create-index * @rest_spec_name indices.create * @availability stack stability=stable diff --git a/specification/indices/create/indicesCreateRequestExample1.yaml b/specification/indices/create/indicesCreateRequestExample1.yaml index 21a40348d1..3ec29bbca3 100644 --- a/specification/indices/create/indicesCreateRequestExample1.yaml +++ b/specification/indices/create/indicesCreateRequestExample1.yaml @@ -1,4 +1,4 @@ -summary: Creates an index. +summary: Create an index. # method_request: PUT /my-index-000001 description: This request specifies the `number_of_shards` and `number_of_replicas`. # type: request diff --git a/specification/indices/create/indicesCreateRequestExample2.yaml b/specification/indices/create/indicesCreateRequestExample2.yaml index a64d16b63f..b015c2b6b5 100644 --- a/specification/indices/create/indicesCreateRequestExample2.yaml +++ b/specification/indices/create/indicesCreateRequestExample2.yaml @@ -1,4 +1,4 @@ -summary: Creates an index with mapping. +summary: Create an index with mappings. # method_request: PUT /test description: You can provide mapping definitions in the create index API requests. # type: request diff --git a/specification/indices/create/indicesCreateRequestExample3.yaml b/specification/indices/create/indicesCreateRequestExample3.yaml new file mode 100644 index 0000000000..9d77b0cc19 --- /dev/null +++ b/specification/indices/create/indicesCreateRequestExample3.yaml @@ -0,0 +1,14 @@ +summary: Create an index with aliases. +# method_request: PUT /test +description: > + You can provide mapping definitions in the create index API requests. + Index alias names also support date math. +# type: request +value: + aliases: + alias_1: {} + alias_2: + filter: + term: + 'user.id': 'kimchy' + routing: shard-1 diff --git a/specification/indices/delete/IndicesDeleteRequest.ts b/specification/indices/delete/IndicesDeleteRequest.ts index dde2510600..7cc615d056 100644 --- a/specification/indices/delete/IndicesDeleteRequest.ts +++ b/specification/indices/delete/IndicesDeleteRequest.ts @@ -23,10 +23,17 @@ import { Duration } from '@_types/Time' /** * Delete indices. - * Deletes one or more indices. + * Deleting an index deletes its documents, shards, and metadata. + * It does not delete related Kibana components, such as data views, visualizations, or dashboards. + * + * You cannot delete the current write index of a data stream. + * To delete the index, you must roll over the data stream so a new write index is created. + * You can then use the delete index API to delete the previous write index. * @rest_spec_name indices.delete * @availability stack stability=stable * @availability serverless stability=stable visibility=public + * @doc_id indices-delete-index + * @index_privileges delete_index */ export interface Request extends RequestBase { path_parts: { diff --git a/specification/indices/delete_alias/IndicesDeleteAliasRequest.ts b/specification/indices/delete_alias/IndicesDeleteAliasRequest.ts index fca8072253..93eaeafaad 100644 --- a/specification/indices/delete_alias/IndicesDeleteAliasRequest.ts +++ b/specification/indices/delete_alias/IndicesDeleteAliasRequest.ts @@ -27,6 +27,8 @@ import { Duration } from '@_types/Time' * @rest_spec_name indices.delete_alias * @availability stack stability=stable * @availability serverless stability=stable visibility=public + * @index_privileges manage + * @doc_id indices-delete-alias */ export interface Request extends RequestBase { path_parts: { diff --git a/specification/indices/delete_index_template/IndicesDeleteIndexTemplateRequest.ts b/specification/indices/delete_index_template/IndicesDeleteIndexTemplateRequest.ts index d2ef9dda1f..1ba63bfd6c 100644 --- a/specification/indices/delete_index_template/IndicesDeleteIndexTemplateRequest.ts +++ b/specification/indices/delete_index_template/IndicesDeleteIndexTemplateRequest.ts @@ -29,7 +29,8 @@ import { Duration } from '@_types/Time' * @rest_spec_name indices.delete_index_template * @availability stack since=7.8.0 stability=stable * @availability serverless stability=stable visibility=public - * @cluster_privileges manage_index_templates,manage + * @cluster_privileges manage_index_templates + * @doc_id indices-delete-template */ export interface Request extends RequestBase { path_parts: { diff --git a/specification/indices/delete_template/IndicesDeleteTemplateRequest.ts b/specification/indices/delete_template/IndicesDeleteTemplateRequest.ts index 19689773c4..58d4d3a06b 100644 --- a/specification/indices/delete_template/IndicesDeleteTemplateRequest.ts +++ b/specification/indices/delete_template/IndicesDeleteTemplateRequest.ts @@ -22,10 +22,11 @@ import { Name } from '@_types/common' import { Duration } from '@_types/Time' /** - * Deletes a legacy index template. + * Delete a legacy index template. * @rest_spec_name indices.delete_template * @availability stack stability=stable - * @cluster_privileges manage_index_templates,manage + * @cluster_privileges manage_index_templates + * @doc_id indices-delete-template-v1 */ export interface Request extends RequestBase { path_parts: { diff --git a/specification/indices/disk_usage/IndicesDiskUsageRequest.ts b/specification/indices/disk_usage/IndicesDiskUsageRequest.ts index b8703b5c0d..0cbfdf4ff7 100644 --- a/specification/indices/disk_usage/IndicesDiskUsageRequest.ts +++ b/specification/indices/disk_usage/IndicesDiskUsageRequest.ts @@ -25,6 +25,10 @@ import { ExpandWildcards, Indices } from '@_types/common' * Analyze the disk usage of each field of an index or data stream. * This API might not support indices created in previous Elasticsearch versions. * The result of a small index can be inaccurate as some parts of an index might not be analyzed by the API. + * + * NOTE: The total size of fields of the analyzed shards of the index in the response is usually smaller than the index `store_size` value because some small metadata files are ignored and some parts of data files might not be scanned by the API. + * Since stored fields are stored together in a compressed format, the sizes of stored fields are also estimates and can be inaccurate. + * The stored size of the `_id` field is likely underestimated while the `_source` field is overestimated. * @doc_id indices-disk-usage * @rest_spec_name indices.disk_usage * @availability stack since=7.15.0 stability=experimental diff --git a/specification/indices/disk_usage/indicesDiskUsageResponseExample1.yaml b/specification/indices/disk_usage/indicesDiskUsageResponseExample1.yaml new file mode 100644 index 0000000000..f75f08217a --- /dev/null +++ b/specification/indices/disk_usage/indicesDiskUsageResponseExample1.yaml @@ -0,0 +1,89 @@ +# summary: '' +description: An abbreviated response from `POST /my-index-000001/_disk_usage?run_expensive_tasks=true`. +# type: response +# response_code: 200 +value: + _shards: + total: 1 + successful: 1 + failed: 0 + my-index-000001: + store_size: 929mb" + store_size_in_bytes: 974192723 + all_fields: + total: 928.9mb" + total_in_bytes: 973977084 + inverted_index: + total: 107.8mb + total_in_bytes: 113128526 + stored_fields: 623.5mb + stored_fields_in_bytes: 653819143 + doc_values: 125.7mb + doc_values_in_bytes: 131885142 + points: 59.9mb + points_in_bytes: 62885773 + norms: 2.3kb + norms_in_bytes: 2356 + term_vectors: 2.2kb + term_vectors_in_bytes: 2310 + knn_vectors: 0b + knn_vectors_in_bytes: 0 + fields: + _id: + total: 49.3mb + total_in_bytes: 51709993 + inverted_index: + total: 29.7mb + total_in_bytes: 31172745 + stored_fields: 19.5mb + stored_fields_in_bytes: 20537248 + doc_values: 0b + doc_values_in_bytes: 0 + points: 0b + points_in_bytes: 0 + norms: 0b + norms_in_bytes: 0 + term_vectors: 0b + term_vectors_in_bytes: 0 + knn_vectors": 0b + knn_vectors_in_bytes: 0 + _primary_term: {} + _seq_no: {} + _version: {} + _source: + total: 603.9mb + total_in_bytes: 633281895 + inverted_index: {} + stored_fields: 603.9mb + stored_fields_in_bytes: 633281895 + doc_values: 0b + doc_values_in_bytes: 0 + points: 0b + points_in_bytes: 0 + norms: 0b + norms_in_bytes: 0 + term_vectors: 0b + term_vectors_in_bytes: 0 + knn_vectors: 0b + knn_vectors_in_bytes: 0 + context: + total: 28.6mb + total_in_bytes: 30060405 + inverted_index: + total: 22mb + total_in_bytes: 23090908 + stored_fields: 0b + stored_fields_in_bytes: 0 + doc_values: 0b + doc_values_in_bytes: 0 + points: 0b + points_in_bytes: 0 + norms: 2.3kb + norms_in_bytes: 2356 + term_vectors: 2.2kb + term_vectors_in_bytes: 2310 + knn_vectors: 0b + knn_vectors_in_bytes: 0 + context.keyword: {} + message: {} + message.keyword: {} diff --git a/specification/indices/exists/IndicesExistsRequest.ts b/specification/indices/exists/IndicesExistsRequest.ts index f44573ffd4..36c6bd95bc 100644 --- a/specification/indices/exists/IndicesExistsRequest.ts +++ b/specification/indices/exists/IndicesExistsRequest.ts @@ -22,10 +22,11 @@ import { ExpandWildcards, Indices } from '@_types/common' /** * Check indices. - * Checks if one or more indices, index aliases, or data streams exist. + * Check if one or more indices, index aliases, or data streams exist. * @rest_spec_name indices.exists * @availability stack stability=stable * @availability serverless stability=stable visibility=public + * @doc_id indices-exists */ export interface Request extends RequestBase { path_parts: { diff --git a/specification/indices/exists_template/IndicesExistsTemplateRequest.ts b/specification/indices/exists_template/IndicesExistsTemplateRequest.ts index 5e1937efe0..4caa764199 100644 --- a/specification/indices/exists_template/IndicesExistsTemplateRequest.ts +++ b/specification/indices/exists_template/IndicesExistsTemplateRequest.ts @@ -23,17 +23,41 @@ import { Duration } from '@_types/Time' /** * Check existence of index templates. - * Returns information about whether a particular index template exists. + * Get information about whether index templates exist. + * Index templates define settings, mappings, and aliases that can be applied automatically to new indices. + * + * IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. * @rest_spec_name indices.exists_template * @availability stack stability=stable + * @doc_id indices-template-exists-v1 + * @ext_doc_id index-templates + * @cluster_privileges manage_index_templates */ export interface Request extends RequestBase { path_parts: { + /** + * A comma-separated list of index template names used to limit the request. + * Wildcard (`*`) expressions are supported. + */ name: Names } query_parameters: { + /** + * Indicates whether to use a flat format for the response. + * @server_default false + */ flat_settings?: boolean + /** + * Indicates whether to get information from the local node only. + * @server_default false + */ local?: boolean + /** + * The period to wait for the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * To indicate that the request should never timeout, set it to `-1`. + * @server_default 30s + */ master_timeout?: Duration } } diff --git a/specification/indices/field_usage_stats/IndicesFieldUsageStatsRequest.ts b/specification/indices/field_usage_stats/IndicesFieldUsageStatsRequest.ts index 703f8a386b..139723d57f 100644 --- a/specification/indices/field_usage_stats/IndicesFieldUsageStatsRequest.ts +++ b/specification/indices/field_usage_stats/IndicesFieldUsageStatsRequest.ts @@ -31,10 +31,14 @@ import { Duration } from '@_types/Time' * Get field usage information for each shard and field of an index. * Field usage statistics are automatically captured when queries are running on a cluster. * A shard-level search request that accesses a given field, even if multiple times during that request, is counted as a single use. + * + * The response body reports the per-shard usage count of the data structures that back the fields in the index. + * A given request will increment each count by a maximum value of 1, even if the request accesses the same field multiple times. * @rest_spec_name indices.field_usage_stats * @availability stack since=7.15.0 stability=experimental * @availability serverless stability=experimental visibility=private * @index_privileges manage + * @doc_id field-usage-stats */ export interface Request extends RequestBase { path_parts: { diff --git a/specification/indices/field_usage_stats/indicesFieldUsageStatsResponseExample1.yaml b/specification/indices/field_usage_stats/indicesFieldUsageStatsResponseExample1.yaml new file mode 100644 index 0000000000..f4f837541f --- /dev/null +++ b/specification/indices/field_usage_stats/indicesFieldUsageStatsResponseExample1.yaml @@ -0,0 +1,57 @@ +# summary: '' +description: > + An abbreviated response from `GET /my-index-000001/_field_usage_stats`. + The `all_fields` object reports the sums of the usage counts for all fields in the index (on the listed shard). +# type: response +# response_code: 200 +value: + _shards: + total: 1 + successful: 1 + failed: 0 + my-index-000001: + shards: + - tracking_id: MpOl0QlTQ4SYYhEe6KgJoQ + tracking_started_at_millis: 1625558985010 + routing: + state: STARTED + primary: true + node: gA6KeeVzQkGURFCUyV-e8Q + relocating_node: null + stats: + all_fields: + any: '6' + inverted_index: + terms: 1 + postings: 1 + proximity: 1 + positions: 0 + term_frequencies: 1 + offsets: 0 + payloads: 0 + stored_fields: 2 + doc_values: 1 + points: 0 + norms: 1 + term_vectors: 0 + knn_vectors: 0 + fields: + _id: + any: 1 + inverted_index: + terms: 1 + postings: 1 + proximity: 1 + positions: 0 + term_frequencies: 1 + offsets: 0 + payloads: 0 + stored_fields: 1 + doc_values: 0 + points: 0 + norms: 0 + term_vectors: 0 + knn_vectors: 0 + _source: {} + context: {} + message.keyword: {} diff --git a/specification/indices/flush/IndicesFlushRequest.ts b/specification/indices/flush/IndicesFlushRequest.ts index d111e574bd..9738e74e31 100644 --- a/specification/indices/flush/IndicesFlushRequest.ts +++ b/specification/indices/flush/IndicesFlushRequest.ts @@ -36,6 +36,7 @@ import { ExpandWildcards, Indices } from '@_types/common' * @rest_spec_name indices.flush * @availability stack stability=stable * @availability serverless stability=stable visibility=private + * @index_privileges maintenance */ export interface Request extends RequestBase { path_parts: { diff --git a/specification/indices/forcemerge/IndicesForceMergeRequest.ts b/specification/indices/forcemerge/IndicesForceMergeRequest.ts index 4d3234e2d1..aebcd74aaf 100644 --- a/specification/indices/forcemerge/IndicesForceMergeRequest.ts +++ b/specification/indices/forcemerge/IndicesForceMergeRequest.ts @@ -35,10 +35,52 @@ import { long } from '@_types/Numeric' * But force merge can cause very large (greater than 5 GB) segments to be produced, which are not eligible for regular merges. * So the number of soft-deleted documents can then grow rapidly, resulting in higher disk usage and worse search performance. * If you regularly force merge an index receiving writes, this can also make snapshots more expensive, since the new documents can't be backed up incrementally. + * + * **Blocks during a force merge** + * + * Calls to this API block until the merge is complete (unless request contains `wait_for_completion=false`). + * If the client connection is lost before completion then the force merge process will continue in the background. + * Any new requests to force merge the same indices will also block until the ongoing force merge is complete. + * + * **Running force merge asynchronously** + * + * If the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to get the status of the task. + * However, you can not cancel this task as the force merge task is not cancelable. + * Elasticsearch creates a record of this task as a document at `_tasks/`. + * When you are done with a task, you should delete the task document so Elasticsearch can reclaim the space. + * + * **Force merging multiple indices** + * + * You can force merge multiple indices with a single request by targeting: + * + * * One or more data streams that contain multiple backing indices + * * Multiple indices + * * One or more aliases + * * All data streams and indices in a cluster + * + * Each targeted shard is force-merged separately using the force_merge threadpool. + * By default each node only has a single `force_merge` thread which means that the shards on that node are force-merged one at a time. + * If you expand the `force_merge` threadpool on a node then it will force merge its shards in parallel + * + * Force merge makes the storage for the shard being merged temporarily increase, as it may require free space up to triple its size in case `max_num_segments parameter` is set to `1`, to rewrite all segments into a new one. + * + * **Data streams and time-based indices** + * + * Force-merging is useful for managing a data stream's older backing indices and other time-based indices, particularly after a rollover. + * In these cases, each index only receives indexing traffic for a certain period of time. + * Once an index receive no more writes, its shards can be force-merged to a single segment. + * This can be a good idea because single-segment shards can sometimes use simpler and more efficient data structures to perform searches. + * For example: + * + * ``` + * POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1 + * ``` * @rest_spec_name indices.forcemerge * @availability stack since=2.1.0 stability=stable * @availability serverless stability=stable visibility=private + * @doc_id indices-forcemerge * @ext_doc_id index-modules-merge + * @index_privileges maintenance */ export interface Request extends RequestBase { path_parts: { diff --git a/specification/indices/get/IndicesGetRequest.ts b/specification/indices/get/IndicesGetRequest.ts index 2304bbd2df..82a3cc70be 100644 --- a/specification/indices/get/IndicesGetRequest.ts +++ b/specification/indices/get/IndicesGetRequest.ts @@ -23,12 +23,13 @@ import { Duration } from '@_types/Time' /** * Get index information. - * Returns information about one or more indices. For data streams, the API returns information about the + * Get information about one or more indices. For data streams, the API returns information about the * stream’s backing indices. * @rest_spec_name indices.get * @availability stack stability=stable * @availability serverless stability=stable visibility=public * @index_privileges view_index_metadata, manage + * @doc_id indices-get-index */ export interface Request extends RequestBase { path_parts: { diff --git a/specification/indices/get_alias/IndicesGetAliasRequest.ts b/specification/indices/get_alias/IndicesGetAliasRequest.ts index 2d7b9d7b21..6a857e139c 100644 --- a/specification/indices/get_alias/IndicesGetAliasRequest.ts +++ b/specification/indices/get_alias/IndicesGetAliasRequest.ts @@ -27,6 +27,8 @@ import { Duration } from '@_types/Time' * @rest_spec_name indices.get_alias * @availability stack stability=stable * @availability serverless stability=stable visibility=public + * @doc_id indices-get-alias + * @index_privileges view_index_metadata */ export interface Request extends RequestBase { path_parts: { diff --git a/specification/indices/get_field_mapping/IndicesGetFieldMappingRequest.ts b/specification/indices/get_field_mapping/IndicesGetFieldMappingRequest.ts index cb238daf74..42a9bff788 100644 --- a/specification/indices/get_field_mapping/IndicesGetFieldMappingRequest.ts +++ b/specification/indices/get_field_mapping/IndicesGetFieldMappingRequest.ts @@ -24,13 +24,18 @@ import { ExpandWildcards, Fields, Indices } from '@_types/common' * Get mapping definitions. * Retrieves mapping definitions for one or more fields. * For data streams, the API retrieves field mappings for the stream’s backing indices. + * + * This API is useful if you don't need a complete mapping or if an index mapping contains a large number of fields. * @rest_spec_name indices.get_field_mapping * @availability stack stability=stable + * @index_privileges view_index_metadata + * @doc_id indices-get-field-mapping */ export interface Request extends RequestBase { path_parts: { /** * Comma-separated list or wildcard expression of fields used to limit returned information. + * Supports wildcards (`*`). */ fields: Fields /** diff --git a/specification/indices/get_field_mapping/indicesGetFieldMappingResponseExample1.yaml b/specification/indices/get_field_mapping/indicesGetFieldMappingResponseExample1.yaml new file mode 100644 index 0000000000..06294947ba --- /dev/null +++ b/specification/indices/get_field_mapping/indicesGetFieldMappingResponseExample1.yaml @@ -0,0 +1,11 @@ +summary: A single field mapping +# indices/get-field-mapping.asciidoc:104 +description: > + A sucessful response from `GET publications/_mapping/field/title`, which returns the mapping of a field called `title`. +# type: response +# response_code: '' +value: + "{\n \"publications\": {\n \"mappings\": {\n \"title\": {\n\ + \ \"full_name\": \"title\",\n \"mapping\": {\n \ + \ \"title\": {\n \"type\": \"text\"\n }\n\ + \ }\n }\n }\n }\n}" diff --git a/specification/indices/get_field_mapping/indicesGetFieldMappingResponseExample2.yaml b/specification/indices/get_field_mapping/indicesGetFieldMappingResponseExample2.yaml new file mode 100644 index 0000000000..66de147282 --- /dev/null +++ b/specification/indices/get_field_mapping/indicesGetFieldMappingResponseExample2.yaml @@ -0,0 +1,14 @@ +summary: Multiple field mappings +# indices/get-field-mapping.asciidoc:137 +description: > + A successful response from `GET publications/_mapping/field/author.id,abstract,name`. + The get field mapping API also supports wildcard notation. +# type: response +# response_code: '' +value: + "{\n \"publications\": {\n \"mappings\": {\n \"author.id\": {\n\ + \ \"full_name\": \"author.id\",\n \"mapping\": {\n \ + \ \"id\": {\n \"type\": \"text\"\n }\n \ + \ }\n },\n \"abstract\": {\n \"full_name\": \"abstract\"\ + ,\n \"mapping\": {\n \"abstract\": {\n \"\ + type\": \"text\"\n }\n }\n }\n }\n }\n}" diff --git a/specification/indices/get_field_mapping/indicesGetFieldMappingResponseExample3.yaml b/specification/indices/get_field_mapping/indicesGetFieldMappingResponseExample3.yaml new file mode 100644 index 0000000000..2230e6b4ff --- /dev/null +++ b/specification/indices/get_field_mapping/indicesGetFieldMappingResponseExample3.yaml @@ -0,0 +1,16 @@ +summary: Wildcards +# indices/get-field-mapping.asciidoc:173 +description: > + A successful response from `GET publications/_mapping/field/a*`. +# type: response +# response_code: '' +value: + "{\n \"publications\": {\n \"mappings\": {\n \"author.name\"\ + : {\n \"full_name\": \"author.name\",\n \"mapping\": {\n \ + \ \"name\": {\n \"type\": \"text\"\n \ + \ }\n }\n },\n \"abstract\": {\n \"full_name\"\ + : \"abstract\",\n \"mapping\": {\n \"abstract\": {\n \ + \ \"type\": \"text\"\n }\n }\n },\n\ + \ \"author.id\": {\n \"full_name\": \"author.id\",\n \ + \ \"mapping\": {\n \"id\": {\n \"type\": \"text\"\ + \n }\n }\n }\n }\n }\n}" diff --git a/specification/indices/get_index_template/IndicesGetIndexTemplateRequest.ts b/specification/indices/get_index_template/IndicesGetIndexTemplateRequest.ts index f24412100d..1cabddb396 100644 --- a/specification/indices/get_index_template/IndicesGetIndexTemplateRequest.ts +++ b/specification/indices/get_index_template/IndicesGetIndexTemplateRequest.ts @@ -23,11 +23,12 @@ import { Duration } from '@_types/Time' /** * Get index templates. - * Returns information about one or more index templates. + * Get information about one or more index templates. * @rest_spec_name indices.get_index_template * @availability stack since=7.9.0 stability=stable * @availability serverless stability=stable visibility=public - * @cluster_privileges manage_index_templates,manage + * @cluster_privileges manage_index_templates + * @doc_id indices-get-template */ export interface Request extends RequestBase { path_parts: { diff --git a/specification/indices/get_mapping/IndicesGetMappingRequest.ts b/specification/indices/get_mapping/IndicesGetMappingRequest.ts index 0c93162289..6e41be71b3 100644 --- a/specification/indices/get_mapping/IndicesGetMappingRequest.ts +++ b/specification/indices/get_mapping/IndicesGetMappingRequest.ts @@ -23,11 +23,12 @@ import { Duration } from '@_types/Time' /** * Get mapping definitions. - * Retrieves mapping definitions for one or more indices. * For data streams, the API retrieves mappings for the stream’s backing indices. * @rest_spec_name indices.get_mapping * @availability stack stability=stable * @availability serverless stability=stable visibility=public + * @doc_id indices-get-mapping + * @index_privileges view_index_metadata */ export interface Request extends RequestBase { path_parts: { diff --git a/specification/indices/get_settings/IndicesGetSettingsRequest.ts b/specification/indices/get_settings/IndicesGetSettingsRequest.ts index 711694cb5a..be2fe5d370 100644 --- a/specification/indices/get_settings/IndicesGetSettingsRequest.ts +++ b/specification/indices/get_settings/IndicesGetSettingsRequest.ts @@ -23,12 +23,13 @@ import { Duration } from '@_types/Time' /** * Get index settings. - * Returns setting information for one or more indices. For data streams, - * returns setting information for the stream’s backing indices. + * Get setting information for one or more indices. + * For data streams, it returns setting information for the stream's backing indices. * @rest_spec_name indices.get_settings * @availability stack stability=stable * @availability serverless stability=stable visibility=public - * @index_privileges view_index_metadata, monitor, manage + * @index_privileges view_index_metadata + * @doc_id indices-get-settings */ export interface Request extends RequestBase { path_parts: { diff --git a/specification/indices/get_template/IndicesGetTemplateRequest.ts b/specification/indices/get_template/IndicesGetTemplateRequest.ts index 0ed5faad1e..119630747d 100644 --- a/specification/indices/get_template/IndicesGetTemplateRequest.ts +++ b/specification/indices/get_template/IndicesGetTemplateRequest.ts @@ -23,9 +23,14 @@ import { Duration } from '@_types/Time' /** * Get index templates. - * Retrieves information about one or more index templates. + * Get information about one or more index templates. + * + * IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. * @rest_spec_name indices.get_template * @availability stack stability=stable + * @doc_id indices-get-template-v1 + * @ext_doc_id index-templates + * @cluster_privileges manage_index_templates */ export interface Request extends RequestBase { path_parts: { diff --git a/specification/indices/open/IndicesOpenRequest.ts b/specification/indices/open/IndicesOpenRequest.ts index 31d76bf84d..42183bae24 100644 --- a/specification/indices/open/IndicesOpenRequest.ts +++ b/specification/indices/open/IndicesOpenRequest.ts @@ -22,11 +22,34 @@ import { ExpandWildcards, Indices, WaitForActiveShards } from '@_types/common' import { Duration } from '@_types/Time' /** - * Opens a closed index. + * Open a closed index. * For data streams, the API opens any closed backing indices. + * + * A closed index is blocked for read/write operations and does not allow all operations that opened indices allow. + * It is not possible to index documents or to search for documents in a closed index. + * This allows closed indices to not have to maintain internal data structures for indexing or searching documents, resulting in a smaller overhead on the cluster. + * + * When opening or closing an index, the master is responsible for restarting the index shards to reflect the new state of the index. + * The shards will then go through the normal recovery process. + * The data of opened or closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times. + * + * You can open and close multiple indices. + * An error is thrown if the request explicitly refers to a missing index. + * This behavior can be turned off by using the `ignore_unavailable=true` parameter. + * + * By default, you must explicitly name the indices you are opening or closing. + * To open or close indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. + * This setting can also be changed with the cluster update settings API. + * + * Closed indices consume a significant amount of disk-space which can cause problems in managed environments. + * Closing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`. + * + * Because opening or closing an index allocates its shards, the `wait_for_active_shards` setting on index creation applies to the `_open` and `_close` index actions as well. * @rest_spec_name indices.open * @availability stack stability=stable * @availability serverless stability=stable visibility=private + * @doc_id indices-open-close + * @index_privileges manage */ export interface Request extends RequestBase { path_parts: { diff --git a/specification/indices/put_alias/indicesPutAliasRequestExample1.yaml b/specification/indices/put_alias/indicesPutAliasRequestExample1.yaml new file mode 100644 index 0000000000..15b66f2330 --- /dev/null +++ b/specification/indices/put_alias/indicesPutAliasRequestExample1.yaml @@ -0,0 +1,5 @@ +# summary: indices/aliases.asciidoc:10 +# method_request: POST _aliases +# description: '' +# type: request +value: "{\n \"actions\": [\n {\n \"add\": {\n \"index\": \"my-data-stream\",\n \"alias\": \"my-alias\"\n }\n }\n ]\n}" diff --git a/specification/indices/put_index_template/IndicesPutIndexTemplateRequest.ts b/specification/indices/put_index_template/IndicesPutIndexTemplateRequest.ts index c09195cac6..e58fb3e9d4 100644 --- a/specification/indices/put_index_template/IndicesPutIndexTemplateRequest.ts +++ b/specification/indices/put_index_template/IndicesPutIndexTemplateRequest.ts @@ -37,9 +37,36 @@ import { Duration } from '@_types/Time' /** * Create or update an index template. * Index templates define settings, mappings, and aliases that can be applied automatically to new indices. + * + * Elasticsearch applies templates to new indices based on an wildcard pattern that matches the index name. + * Index templates are applied during data stream or index creation. + * For data streams, these settings and mappings are applied when the stream's backing indices are created. + * Settings and mappings specified in a create index API request override any settings or mappings specified in an index template. + * Changes to index templates do not affect existing indices, including the existing backing indices of a data stream. + * + * You can use C-style `/* *\/` block comments in index templates. + * You can include comments anywhere in the request body, except before the opening curly bracket. + * + * **Multiple matching templates** + * + * If multiple index templates match the name of a new index or data stream, the template with the highest priority is used. + * + * Multiple templates with overlapping index patterns at the same priority are not allowed and an error will be thrown when attempting to create a template matching an existing index template at identical priorities. + * + * **Composing aliases, mappings, and settings** + * + * When multiple component templates are specified in the `composed_of` field for an index template, they are merged in the order specified, meaning that later component templates override earlier component templates. + * Any mappings, settings, or aliases from the parent index template are merged in next. + * Finally, any configuration on the index request itself is merged. + * Mapping definitions are merged recursively, which means that later mapping components can introduce new field mappings and update the mapping configuration. + * If a field mapping is already contained in an earlier component, its definition will be completely overwritten by the later one. + * This recursive merging strategy applies not only to field mappings, but also root options like `dynamic_templates` and `meta`. + * If an earlier component contains a `dynamic_templates` block, then by default new `dynamic_templates` entries are appended onto the end. + * If an entry already exists with the same key, then it is overwritten by the new definition. * @rest_spec_name indices.put_index_template * @availability stack since=7.9.0 stability=stable * @availability serverless stability=stable visibility=public + * @cluster_privileges manage_index_templates */ export interface Request extends RequestBase { path_parts: { @@ -77,12 +104,16 @@ export interface Request extends RequestBase { /** * Version number used to manage index templates externally. * This number is not automatically generated by Elasticsearch. + * External systems can use these version numbers to simplify template management. + * To unset a version, replace the template without specifying one. */ version?: VersionNumber /** * Optional user metadata about the index template. - * May have any contents. - * This map is not automatically generated by Elasticsearch. + * It may have any contents. + * It is not automatically generated or used by Elasticsearch. + * This user-defined object is stored in the cluster state, so keeping it short is preferable + * To unset the metadata, replace the template without specifying it. * @doc_id mapping-meta-field */ _meta?: Metadata /** diff --git a/specification/indices/put_index_template/IndicesPutIndexTemplateRequestExample1.yaml b/specification/indices/put_index_template/IndicesPutIndexTemplateRequestExample1.yaml index b1f6588ffe..841582fc89 100644 --- a/specification/indices/put_index_template/IndicesPutIndexTemplateRequestExample1.yaml +++ b/specification/indices/put_index_template/IndicesPutIndexTemplateRequestExample1.yaml @@ -1,4 +1,4 @@ -summary: Create an index template. +summary: Create a template # method_request: PUT /_index_template/template_1 # description: '' # type: request diff --git a/specification/indices/put_index_template/IndicesPutIndexTemplateRequestExample2.yaml b/specification/indices/put_index_template/IndicesPutIndexTemplateRequestExample2.yaml new file mode 100644 index 0000000000..2bd5336b80 --- /dev/null +++ b/specification/indices/put_index_template/IndicesPutIndexTemplateRequestExample2.yaml @@ -0,0 +1,20 @@ +summary: Create a template with aliases +# method_request: PUT /_index_template/template_1 +description: | + You can include index aliases in an index template. + During index creation, the `{index}` placeholder in the alias name will be replaced with the actual index name that the template gets applied to. +# type: request +value: + index_patterns: + - template* + template: + settings: + number_of_shards: 1 + aliases: + alias1: {} + alias2: + filter: + term: + user.id: kimchy + routing: shard-1 + '{index}-alias': {} diff --git a/specification/indices/put_template/IndicesPutTemplateRequest.ts b/specification/indices/put_template/IndicesPutTemplateRequest.ts index 8fbb2ae16a..2fa96ad4d6 100644 --- a/specification/indices/put_template/IndicesPutTemplateRequest.ts +++ b/specification/indices/put_template/IndicesPutTemplateRequest.ts @@ -39,9 +39,19 @@ import { Duration } from '@_types/Time' * Index templates are only applied during index creation. * Changes to index templates do not affect existing indices. * Settings and mappings specified in create index API requests override any settings or mappings specified in an index template. + * + * You can use C-style `/* *\/` block comments in index templates. + * You can include comments anywhere in the request body, except before the opening curly bracket. + * + * **Indices matching multiple templates** + * + * Multiple index templates can potentially match an index, in this case, both the settings and mappings are merged into the final configuration of the index. + * The order of the merging can be controlled using the order parameter, with lower order being applied first, and higher orders overriding them. + * NOTE: Multiple matching templates with the same order value will result in a non-deterministic merging order. * @rest_spec_name indices.put_template * @availability stack stability=stable * @cluster_privileges manage_index_templates, manage + * @doc_id index-templates-v1 * @ext_doc_id index-templates */ export interface Request extends RequestBase { @@ -100,6 +110,7 @@ export interface Request extends RequestBase { /** * Version number used to manage index templates externally. This number * is not automatically generated by Elasticsearch. + * To unset a version, replace the template without specifying one. */ version?: VersionNumber } diff --git a/specification/indices/put_template/indicesPutTemplateRequestExample1.yaml b/specification/indices/put_template/indicesPutTemplateRequestExample1.yaml index a35702311c..48e9d7f7e0 100644 --- a/specification/indices/put_template/indicesPutTemplateRequestExample1.yaml +++ b/specification/indices/put_template/indicesPutTemplateRequestExample1.yaml @@ -1,4 +1,4 @@ -summary: Create or update an index template. +summary: Create an index template # method_request: PUT _template/template_1 # description: '' # type: request diff --git a/specification/indices/put_template/indicesPutTemplateRequestExample2.yaml b/specification/indices/put_template/indicesPutTemplateRequestExample2.yaml new file mode 100644 index 0000000000..0902682739 --- /dev/null +++ b/specification/indices/put_template/indicesPutTemplateRequestExample2.yaml @@ -0,0 +1,19 @@ +summary: Create an index template with aliases +# method_request: PUT _template/template_1 +description: > + You can include index aliases in an index template. + During index creation, the `{index}` placeholder in the alias name will be replaced with the actual index name that the template gets applied to. +# type: request +value: + index_patterns: + - te* + settings: + number_of_shards: 1 + aliases: + alias1: {} + alias2: + filter: + term: + user.id: kimchy + routing: shard-1 + '{index}-alias': {} diff --git a/specification/indices/recovery/IndicesRecoveryRequest.ts b/specification/indices/recovery/IndicesRecoveryRequest.ts index 0c4212ae46..d8b5494756 100644 --- a/specification/indices/recovery/IndicesRecoveryRequest.ts +++ b/specification/indices/recovery/IndicesRecoveryRequest.ts @@ -25,6 +25,8 @@ import { Indices } from '@_types/common' * Get information about ongoing and completed shard recoveries for one or more indices. * For data streams, the API returns information for the stream's backing indices. * + * All recoveries, whether ongoing or complete, are kept in the cluster state and may be reported on at any time. + * * Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or creating a replica shard from a primary shard. * When a shard recovery completes, the recovered shard is available for search and indexing. * @@ -45,6 +47,8 @@ import { Indices } from '@_types/common' * @rest_spec_name indices.recovery * @availability stack stability=stable * @availability serverless stability=stable visibility=private + * @index_privileges monitor + * @doc_id indices-recovery */ export interface Request extends RequestBase { path_parts: { diff --git a/specification/indices/recovery/indicesRecoveryResponseExample1.yaml b/specification/indices/recovery/indicesRecoveryResponseExample1.yaml index 5251a05cc4..5804f8c909 100644 --- a/specification/indices/recovery/indicesRecoveryResponseExample1.yaml +++ b/specification/indices/recovery/indicesRecoveryResponseExample1.yaml @@ -1,7 +1,11 @@ -summary: A successful response for retrieving information about ongoing and completed shard recoveries for an index. -# description: '' +summary: Get segment information +description: > + A successful response from `GET /_recovery?human`, which gets information about ongoing and completed shard recoveries for all data streams and indices in a cluster. + This example includes information about a single index recovering a single shard. + The source of the recovery is a snapshot repository and the target of the recovery is the `my_es_node` node. + The response also includes the number and percentage of files and bytes recovered. # type: response -response_code: 200 +# response_code: 200 value: "{\n \"index1\" : {\n \"shards\" : [ {\n \"id\" : 0,\n \"type\"\ \ : \"SNAPSHOT\",\n \"stage\" : \"INDEX\",\n \"primary\" : true,\n \ diff --git a/specification/indices/recovery/indicesRecoveryResponseExample2.yaml b/specification/indices/recovery/indicesRecoveryResponseExample2.yaml new file mode 100644 index 0000000000..c92389d4b4 --- /dev/null +++ b/specification/indices/recovery/indicesRecoveryResponseExample2.yaml @@ -0,0 +1,43 @@ +summary: Get detailed recovery information +# indices/recovery.asciidoc:358 +description: > + A successful response from `GET _recovery?human&detailed=true`. + The response includes a listing of any physical files recovered and their sizes. + The response also includes timings in milliseconds of the various stages of recovery: index retrieval, translog replay, and index start time. + This response indicates the recovery is done. +# type: response +# response_code: 200 +value: + "{\n \"index1\" : {\n \"shards\" : [ {\n \"id\" : 0,\n \"type\"\ + \ : \"EXISTING_STORE\",\n \"stage\" : \"DONE\",\n \"primary\" : true,\n \"\ + start_time\" : \"2014-02-24T12:38:06.349\",\n \"start_time_in_millis\" : \"\ + 1393245486349\",\n \"stop_time\" : \"2014-02-24T12:38:08.464\",\n \"stop_time_in_millis\"\ + \ : \"1393245488464\",\n \"total_time\" : \"2.1s\",\n \"total_time_in_millis\"\ + \ : 2115,\n \"source\" : {\n \"id\" : \"RGMdRc-yQWWKIBM4DGvwqQ\",\n\ + \ \"host\" : \"my.fqdn\",\n \"transport_address\" : \"my.fqdn\",\n\ + \ \"ip\" : \"10.0.1.7\",\n \"name\" : \"my_es_node\"\n },\n \ + \ \"target\" : {\n \"id\" : \"RGMdRc-yQWWKIBM4DGvwqQ\",\n \"host\"\ + \ : \"my.fqdn\",\n \"transport_address\" : \"my.fqdn\",\n \"ip\" :\ + \ \"10.0.1.7\",\n \"name\" : \"my_es_node\"\n },\n \"index\" :\ + \ {\n \"size\" : {\n \"total\" : \"24.7mb\",\n \"total_in_bytes\"\ + \ : 26001617,\n \"reused\" : \"24.7mb\",\n \"reused_in_bytes\"\ + \ : 26001617,\n \"recovered\" : \"0b\",\n \"recovered_in_bytes\"\ + \ : 0,\n \"recovered_from_snapshot\" : \"0b\",\n \"recovered_from_snapshot_in_bytes\"\ + \ : 0,\n \"percent\" : \"100.0%\"\n },\n \"files\" : {\n\ + \ \"total\" : 26,\n \"reused\" : 26,\n \"recovered\"\ + \ : 0,\n \"percent\" : \"100.0%\",\n \"details\" : [ {\n \ + \ \"name\" : \"segments.gen\",\n \"length\" : 20,\n \ + \ \"recovered\" : 20\n }, {\n \"name\" : \"_0.cfs\",\n \ + \ \"length\" : 135306,\n \"recovered\" : 135306,\n \"\ + recovered_from_snapshot\": 0\n }, {\n \"name\" : \"segments_2\"\ + ,\n \"length\" : 251,\n \"recovered\" : 251,\n \ + \ \"recovered_from_snapshot\": 0\n }\n ]\n },\n \ + \ \"total_time\" : \"2ms\",\n \"total_time_in_millis\" : 2,\n \"source_throttle_time\"\ + \ : \"0s\",\n \"source_throttle_time_in_millis\" : 0,\n \"target_throttle_time\"\ + \ : \"0s\",\n \"target_throttle_time_in_millis\" : 0\n },\n \"\ + translog\" : {\n \"recovered\" : 71,\n \"total\" : 0,\n \"\ + percent\" : \"100.0%\",\n \"total_on_start\" : 0,\n \"total_time\"\ + \ : \"2.0s\",\n \"total_time_in_millis\" : 2025\n },\n \"verify_index\"\ + \ : {\n \"check_index_time\" : 0,\n \"check_index_time_in_millis\"\ + \ : 0,\n \"total_time\" : \"88ms\",\n \"total_time_in_millis\" : 88\n\ + \ }\n } ]\n }\n}" diff --git a/specification/indices/resolve_cluster/ResolveClusterResponseExample1.yaml b/specification/indices/resolve_cluster/ResolveClusterResponseExample1.yaml index 4ecc9dd322..8595a6a39f 100644 --- a/specification/indices/resolve_cluster/ResolveClusterResponseExample1.yaml +++ b/specification/indices/resolve_cluster/ResolveClusterResponseExample1.yaml @@ -1,7 +1,7 @@ -summary: A successful response for resolving a specified index expression to return information about each cluster. -# description: '' +# summary: +description: A successful response for resolving a specified index expression to return information about each cluster. # type: response -response_code: 200 +# response_code: 200 value: "{\n \"(local)\": {\n \"connected\": true,\n \"skip_unavailable\": false,\n\ \ \"matching_indices\": true,\n \"version\": {\n \"number\": \"8.13.0\"\ diff --git a/specification/indices/resolve_index/indicesResolveResponseExample1.yaml b/specification/indices/resolve_index/indicesResolveResponseExample1.yaml index 0522f59de0..15a73f73d9 100644 --- a/specification/indices/resolve_index/indicesResolveResponseExample1.yaml +++ b/specification/indices/resolve_index/indicesResolveResponseExample1.yaml @@ -1,7 +1,7 @@ -summary: A successful response for resolving the specified name for an index. -# description: '' +# summary: +description: A successful response for resolving the specified name for an index. # type: response -response_code: 200 +# response_code: 200 value: "{\n \"indices\": [\n {\n \"name\": \"foo_closed\",\n \"attributes\"\ : [\n \"closed\"\n ]\n },\n {\n \"name\": \"freeze-index\"\ diff --git a/specification/indices/rollover/indicesRolloverResponseExample1.yaml b/specification/indices/rollover/indicesRolloverResponseExample1.yaml index cd9af2a77a..83e69dea80 100644 --- a/specification/indices/rollover/indicesRolloverResponseExample1.yaml +++ b/specification/indices/rollover/indicesRolloverResponseExample1.yaml @@ -1,10 +1,28 @@ -summary: A successful response for creating a new index for a data stream. -# description: '' +# summary: +description: > + An abbreviated response from `GET /_segments`. # type: response -response_code: 200 +# response_code: 200 value: - "{\n \"acknowledged\": true,\n \"shards_acknowledged\": true,\n \"old_index\"\ - : \".ds-my-data-stream-2099.05.06-000001\",\n \"new_index\": \".ds-my-data-stream-2099.05.07-000002\"\ - ,\n \"rolled_over\": true,\n \"dry_run\": false,\n \"lazy\": false,\n \"conditions\"\ - : {\n \"[max_age: 7d]\": false,\n \"[max_docs: 1000]\": true,\n \"[max_primary_shard_size:\ - \ 50gb]\": false,\n \"[max_primary_shard_docs: 2000]\": false\n }\n}" + _shards: {} + indices: + test: + shards: + 0: + - routing: + state: STARTED + primary: true + node: zDC_RorJQCao9xf9pg3Fvw + num_committed_segments: 0 + num_search_segments: 1 + segments: + _0: + generation: 0 + num_docs: 1 + deleted_docs: 0 + size_in_bytes: 3800 + committed: false + search: true + version: '7.0.0' + compound: true + attributes: {} diff --git a/specification/indices/segments/IndicesSegmentsRequest.ts b/specification/indices/segments/IndicesSegmentsRequest.ts index 9eeff54d02..36ff0c3b7b 100644 --- a/specification/indices/segments/IndicesSegmentsRequest.ts +++ b/specification/indices/segments/IndicesSegmentsRequest.ts @@ -27,6 +27,8 @@ import { ExpandWildcards, Indices } from '@_types/common' * @rest_spec_name indices.segments * @availability stack stability=stable * @availability serverless stability=stable visibility=private + * @doc_id indices-segments + * @index_privileges monitor */ export interface Request extends RequestBase { path_parts: { diff --git a/specification/indices/segments/indicesSegmentsResponseExample1.yaml b/specification/indices/segments/indicesSegmentsResponseExample1.yaml new file mode 100644 index 0000000000..85bf7c96b9 --- /dev/null +++ b/specification/indices/segments/indicesSegmentsResponseExample1.yaml @@ -0,0 +1,10 @@ +# summary: +description: A successful response for creating a new index for a data stream. +# type: response +# response_code: 200 +value: + "{\n \"acknowledged\": true,\n \"shards_acknowledged\": true,\n \"old_index\"\ + : \".ds-my-data-stream-2099.05.06-000001\",\n \"new_index\": \".ds-my-data-stream-2099.05.07-000002\"\ + ,\n \"rolled_over\": true,\n \"dry_run\": false,\n \"lazy\": false,\n \"conditions\"\ + : {\n \"[max_age: 7d]\": false,\n \"[max_docs: 1000]\": true,\n \"[max_primary_shard_size:\ + \ 50gb]\": false,\n \"[max_primary_shard_docs: 2000]\": false\n }\n}" diff --git a/specification/indices/shard_stores/IndicesShardStoresRequest.ts b/specification/indices/shard_stores/IndicesShardStoresRequest.ts index f54c55af84..711736b2ea 100644 --- a/specification/indices/shard_stores/IndicesShardStoresRequest.ts +++ b/specification/indices/shard_stores/IndicesShardStoresRequest.ts @@ -36,7 +36,8 @@ import { ShardStoreStatus } from './types' * By default, the API returns store information only for primary shards that are unassigned or have one or more unassigned replica shards. * @rest_spec_name indices.shard_stores * @availability stack stability=stable - * @index_privileges monitor,manage + * @index_privileges monitor + * @doc_id indices-shards-stores */ export interface Request extends RequestBase { path_parts: { diff --git a/specification/indices/shard_stores/indicesShardStoresResponseExample1.yaml b/specification/indices/shard_stores/indicesShardStoresResponseExample1.yaml new file mode 100644 index 0000000000..f914a2f1a8 --- /dev/null +++ b/specification/indices/shard_stores/indicesShardStoresResponseExample1.yaml @@ -0,0 +1,24 @@ +# summary: +description: > + An abbreviated response from `GET /_shard_stores?status=green`. +# type: response +# response_code: 200 +value: + indices: + my-index-000001: + shards: + '0': + stores: + - sPa3OgxLSYGvQ4oPs-Tajw: + name: node_t0 + ephemeral_id: 9NlXRFGCT1m8tkvYCMK-8A + transport_address: local[1] + external_id: node_t0 + attributes: {} + roles: [] + version: 8.10.0 + min_index_version: 7000099 + max_index_version: 8100099 + allocation_id: 2iNySv_OQVePRX-yaRH_lQ + allocation: primary + store_exception: {} diff --git a/specification/indices/simulate_template/IndicesSimulateTemplateRequest.ts b/specification/indices/simulate_template/IndicesSimulateTemplateRequest.ts index a027d6e3b8..2352c5653a 100644 --- a/specification/indices/simulate_template/IndicesSimulateTemplateRequest.ts +++ b/specification/indices/simulate_template/IndicesSimulateTemplateRequest.ts @@ -30,7 +30,7 @@ import { Duration } from '@_types/Time' * @rest_spec_name indices.simulate_template * @availability stack stability=stable * @availability serverless stability=stable visibility=public - * @cluster_privileges manage_index_templates,manage + * @cluster_privileges manage_index_templates */ export interface Request extends RequestBase { path_parts: { diff --git a/specification/indices/stats/IndicesStatsRequest.ts b/specification/indices/stats/IndicesStatsRequest.ts index 6e10d8ce5b..94b804dda5 100644 --- a/specification/indices/stats/IndicesStatsRequest.ts +++ b/specification/indices/stats/IndicesStatsRequest.ts @@ -41,7 +41,8 @@ import { * @rest_spec_name indices.stats * @availability stack since=1.3.0 stability=stable * @availability serverless stability=stable visibility=private - * @index_privileges manage, monitor + * @index_privileges monitor + * @doc_id indices-stats */ export interface Request extends RequestBase { path_parts: { diff --git a/specification/ingest/get_pipeline/GetPipelineResponseExample1.yaml b/specification/ingest/get_pipeline/GetPipelineResponseExample1.yaml index 36dad4236d..2cbf23f0df 100644 --- a/specification/ingest/get_pipeline/GetPipelineResponseExample1.yaml +++ b/specification/ingest/get_pipeline/GetPipelineResponseExample1.yaml @@ -1,5 +1,5 @@ -summary: A successful response for retrieving information about an ingest pipeline. -# description: '' +# summary: +description: A successful response for retrieving information about an ingest pipeline. # type: response # response_code: 200 value: diff --git a/specification/ingest/simulate/SimulatePipelineResponseExample1.yaml b/specification/ingest/simulate/SimulatePipelineResponseExample1.yaml index 5a8077a5bd..db5a1b5a09 100644 --- a/specification/ingest/simulate/SimulatePipelineResponseExample1.yaml +++ b/specification/ingest/simulate/SimulatePipelineResponseExample1.yaml @@ -1,4 +1,5 @@ -summary: A successful response for running an ingest pipeline against a set of provided documents. +# summary: +description: A successful response for running an ingest pipeline against a set of provided documents. # description: '' # type: response # response_code: 200 From aeeffa7b7b205e6ef1f83f112def8dbac6b89c49 Mon Sep 17 00:00:00 2001 From: lcawl Date: Fri, 3 Jan 2025 19:47:08 -0800 Subject: [PATCH 2/4] Generate output --- output/openapi/elasticsearch-openapi.json | 141 ++--- .../elasticsearch-serverless-openapi.json | 76 +-- output/schema/schema.json | 482 ++++++++++++------ specification/_doc_ids/table.csv | 2 + .../put_mapping/IndicesPutMappingRequest.ts | 4 +- .../indices/refresh/IndicesRefreshRequest.ts | 13 + .../resolve_cluster/ResolveClusterRequest.ts | 13 + .../resolve_cluster/ResolveClusterResponse.ts | 4 +- .../ResolveClusterResponseExample1.yaml | 6 +- .../ResolveClusterResponseExample2.yaml | 25 + .../resolve_index/ResolveIndexRequest.ts | 2 + ...yaml => ResolveIndexResponseExample1.yaml} | 2 +- .../rollover/IndicesRolloverRequest.ts | 39 +- .../IndicesSimulateIndexTemplateRequest.ts | 4 +- .../split/indicesSplitRequestExample1.yaml | 4 +- .../unfreeze/IndicesUnfreezeRequest.ts | 1 + 16 files changed, 555 insertions(+), 263 deletions(-) create mode 100644 specification/indices/resolve_cluster/ResolveClusterResponseExample2.yaml rename specification/indices/resolve_index/{indicesResolveResponseExample1.yaml => ResolveIndexResponseExample1.yaml} (88%) diff --git a/output/openapi/elasticsearch-openapi.json b/output/openapi/elasticsearch-openapi.json index 8821b957fe..4830661752 100644 --- a/output/openapi/elasticsearch-openapi.json +++ b/output/openapi/elasticsearch-openapi.json @@ -3440,7 +3440,7 @@ "indices" ], "summary": "Get component templates", - "description": "Retrieves information about component templates.", + "description": "Get information about component templates.", "operationId": "cluster-get-component-template-1", "parameters": [ { @@ -3471,7 +3471,7 @@ "indices" ], "summary": "Create or update a component template", - "description": "Creates or updates a component template.\nComponent templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.\n\nAn index template can be composed of multiple component templates.\nTo use a component template, specify it in an index template’s `composed_of` list.\nComponent templates are only applied to new data streams and indices as part of a matching index template.\n\nSettings and mappings specified directly in the index template or the create index request override any settings or mappings specified in a component template.\n\nComponent templates are only used during index creation.\nFor data streams, this includes data stream creation and the creation of a stream’s backing indices.\nChanges to component templates do not affect existing indices, including a stream’s backing indices.\n\nYou can use C-style `/* *\\/` block comments in component templates.\nYou can include comments anywhere in the request body except before the opening curly bracket.", + "description": "Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.\n\nAn index template can be composed of multiple component templates.\nTo use a component template, specify it in an index template’s `composed_of` list.\nComponent templates are only applied to new data streams and indices as part of a matching index template.\n\nSettings and mappings specified directly in the index template or the create index request override any settings or mappings specified in a component template.\n\nComponent templates are only used during index creation.\nFor data streams, this includes data stream creation and the creation of a stream’s backing indices.\nChanges to component templates do not affect existing indices, including a stream’s backing indices.\n\nYou can use C-style `/* *\\/` block comments in component templates.\nYou can include comments anywhere in the request body except before the opening curly bracket.\n\n**Applying component templates**\n\nYou cannot directly apply a component template to a data stream or index.\nTo be applied, a component template must be included in an index template's `composed_of` list.", "operationId": "cluster-put-component-template", "parameters": [ { @@ -3499,7 +3499,7 @@ "indices" ], "summary": "Create or update a component template", - "description": "Creates or updates a component template.\nComponent templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.\n\nAn index template can be composed of multiple component templates.\nTo use a component template, specify it in an index template’s `composed_of` list.\nComponent templates are only applied to new data streams and indices as part of a matching index template.\n\nSettings and mappings specified directly in the index template or the create index request override any settings or mappings specified in a component template.\n\nComponent templates are only used during index creation.\nFor data streams, this includes data stream creation and the creation of a stream’s backing indices.\nChanges to component templates do not affect existing indices, including a stream’s backing indices.\n\nYou can use C-style `/* *\\/` block comments in component templates.\nYou can include comments anywhere in the request body except before the opening curly bracket.", + "description": "Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.\n\nAn index template can be composed of multiple component templates.\nTo use a component template, specify it in an index template’s `composed_of` list.\nComponent templates are only applied to new data streams and indices as part of a matching index template.\n\nSettings and mappings specified directly in the index template or the create index request override any settings or mappings specified in a component template.\n\nComponent templates are only used during index creation.\nFor data streams, this includes data stream creation and the creation of a stream’s backing indices.\nChanges to component templates do not affect existing indices, including a stream’s backing indices.\n\nYou can use C-style `/* *\\/` block comments in component templates.\nYou can include comments anywhere in the request body except before the opening curly bracket.\n\n**Applying component templates**\n\nYou cannot directly apply a component template to a data stream or index.\nTo be applied, a component template must be included in an index template's `composed_of` list.", "operationId": "cluster-put-component-template-1", "parameters": [ { @@ -3527,7 +3527,7 @@ "indices" ], "summary": "Delete component templates", - "description": "Deletes component templates.\nComponent templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.", + "description": "Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.", "operationId": "cluster-delete-component-template", "parameters": [ { @@ -3719,7 +3719,7 @@ "indices" ], "summary": "Get component templates", - "description": "Retrieves information about component templates.", + "description": "Get information about component templates.", "operationId": "cluster-get-component-template", "parameters": [ { @@ -10727,7 +10727,10 @@ "indices" ], "summary": "Get tokens from text analysis", - "description": "The analyze API performs [analysis](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis.html) on a text string and returns the resulting tokens.", + "description": "The analyze API performs analysis on a text string and returns the resulting tokens.\n\nGenerating excessive amount of tokens may cause a node to run out of memory.\nThe `index.analyze.max_token_count` setting enables you to limit the number of tokens that can be produced.\nIf more than this limit of tokens gets generated, an error occurs.\nThe `_analyze` endpoint without a specified index will always use `10000` as its limit.", + "externalDocs": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis.html" + }, "operationId": "indices-analyze", "requestBody": { "$ref": "#/components/requestBodies/indices.analyze" @@ -10743,7 +10746,10 @@ "indices" ], "summary": "Get tokens from text analysis", - "description": "The analyze API performs [analysis](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis.html) on a text string and returns the resulting tokens.", + "description": "The analyze API performs analysis on a text string and returns the resulting tokens.\n\nGenerating excessive amount of tokens may cause a node to run out of memory.\nThe `index.analyze.max_token_count` setting enables you to limit the number of tokens that can be produced.\nIf more than this limit of tokens gets generated, an error occurs.\nThe `_analyze` endpoint without a specified index will always use `10000` as its limit.", + "externalDocs": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis.html" + }, "operationId": "indices-analyze-1", "requestBody": { "$ref": "#/components/requestBodies/indices.analyze" @@ -10761,7 +10767,10 @@ "indices" ], "summary": "Get tokens from text analysis", - "description": "The analyze API performs [analysis](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis.html) on a text string and returns the resulting tokens.", + "description": "The analyze API performs analysis on a text string and returns the resulting tokens.\n\nGenerating excessive amount of tokens may cause a node to run out of memory.\nThe `index.analyze.max_token_count` setting enables you to limit the number of tokens that can be produced.\nIf more than this limit of tokens gets generated, an error occurs.\nThe `_analyze` endpoint without a specified index will always use `10000` as its limit.", + "externalDocs": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis.html" + }, "operationId": "indices-analyze-2", "parameters": [ { @@ -10782,7 +10791,10 @@ "indices" ], "summary": "Get tokens from text analysis", - "description": "The analyze API performs [analysis](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis.html) on a text string and returns the resulting tokens.", + "description": "The analyze API performs analysis on a text string and returns the resulting tokens.\n\nGenerating excessive amount of tokens may cause a node to run out of memory.\nThe `index.analyze.max_token_count` setting enables you to limit the number of tokens that can be produced.\nIf more than this limit of tokens gets generated, an error occurs.\nThe `_analyze` endpoint without a specified index will always use `10000` as its limit.", + "externalDocs": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis.html" + }, "operationId": "indices-analyze-3", "parameters": [ { @@ -10805,7 +10817,7 @@ "indices" ], "summary": "Clear the cache", - "description": "Clear the cache of one or more indices.\nFor data streams, the API clears the caches of the stream's backing indices.", + "description": "Clear the cache of one or more indices.\nFor data streams, the API clears the caches of the stream's backing indices.\n\nBy default, the clear cache API clears all caches.\nTo clear only specific caches, use the `fielddata`, `query`, or `request` parameters.\nTo clear the cache only of specific fields, use the `fields` parameter.", "operationId": "indices-clear-cache", "parameters": [ { @@ -10843,7 +10855,7 @@ "indices" ], "summary": "Clear the cache", - "description": "Clear the cache of one or more indices.\nFor data streams, the API clears the caches of the stream's backing indices.", + "description": "Clear the cache of one or more indices.\nFor data streams, the API clears the caches of the stream's backing indices.\n\nBy default, the clear cache API clears all caches.\nTo clear only specific caches, use the `fielddata`, `query`, or `request` parameters.\nTo clear the cache only of specific fields, use the `fields` parameter.", "operationId": "indices-clear-cache-1", "parameters": [ { @@ -10884,7 +10896,7 @@ "indices" ], "summary": "Clone an index", - "description": "Clone an existing index into a new index.\nEach original primary shard is cloned into a new primary shard in the new index.\n\nIMPORTANT: Elasticsearch does not apply index templates to the resulting index.\nThe API also does not copy index metadata from the original index.\nIndex metadata includes aliases, index lifecycle management phase definitions, and cross-cluster replication (CCR) follower information.\nFor example, if you clone a CCR follower index, the resulting clone will not be a follower index.\n\nThe clone API copies most index settings from the source index to the resulting index, with the exception of `index.number_of_replicas` and `index.auto_expand_replicas`.\nTo set the number of replicas in the resulting index, configure these settings in the clone request.\n\nCloning works as follows:\n\n* First, it creates a new target index with the same definition as the source index.\n* Then it hard-links segments from the source index into the target index. If the file system does not support hard-linking, all segments are copied into the new index, which is a much more time consuming process.\n* Finally, it recovers the target index as though it were a closed index which had just been re-opened.\n\nIMPORTANT: Indices can only be cloned if they meet the following requirements:\n\n* The target index must not exist.\n* The source index must have the same number of primary shards as the target index.\n* The node handling the clone process must have sufficient free disk space to accommodate a second copy of the existing index.", + "description": "Clone an existing index into a new index.\nEach original primary shard is cloned into a new primary shard in the new index.\n\nIMPORTANT: Elasticsearch does not apply index templates to the resulting index.\nThe API also does not copy index metadata from the original index.\nIndex metadata includes aliases, index lifecycle management phase definitions, and cross-cluster replication (CCR) follower information.\nFor example, if you clone a CCR follower index, the resulting clone will not be a follower index.\n\nThe clone API copies most index settings from the source index to the resulting index, with the exception of `index.number_of_replicas` and `index.auto_expand_replicas`.\nTo set the number of replicas in the resulting index, configure these settings in the clone request.\n\nCloning works as follows:\n\n* First, it creates a new target index with the same definition as the source index.\n* Then it hard-links segments from the source index into the target index. If the file system does not support hard-linking, all segments are copied into the new index, which is a much more time consuming process.\n* Finally, it recovers the target index as though it were a closed index which had just been re-opened.\n\nIMPORTANT: Indices can only be cloned if they meet the following requirements:\n\n* The index must be marked as read-only and have a cluster health status of green.\n* The target index must not exist.\n* The source index must have the same number of primary shards as the target index.\n* The node handling the clone process must have sufficient free disk space to accommodate a second copy of the existing index.\n\nThe current write index on a data stream cannot be cloned.\nIn order to clone the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be cloned.\n\nNOTE: Mappings cannot be specified in the `_clone` request. The mappings of the source index will be used for the target index.\n\n**Monitor the cloning process**\n\nThe cloning process can be monitored with the cat recovery API or the cluster health API can be used to wait until all primary shards have been allocated by setting the `wait_for_status` parameter to `yellow`.\n\nThe `_clone` API returns as soon as the target index has been added to the cluster state, before any shards have been allocated.\nAt this point, all shards are in the state unassigned.\nIf, for any reason, the target index can't be allocated, its primary shard will remain unassigned until it can be allocated on that node.\n\nOnce the primary shard is allocated, it moves to state initializing, and the clone process begins.\nWhen the clone operation completes, the shard will become active.\nAt that point, Elasticsearch will try to allocate any replicas and may decide to relocate the primary shard to another node.\n\n**Wait for active shards**\n\nBecause the clone operation creates a new index to clone the shards to, the wait for active shards setting on index creation applies to the clone index action as well.", "operationId": "indices-clone", "parameters": [ { @@ -10918,7 +10930,7 @@ "indices" ], "summary": "Clone an index", - "description": "Clone an existing index into a new index.\nEach original primary shard is cloned into a new primary shard in the new index.\n\nIMPORTANT: Elasticsearch does not apply index templates to the resulting index.\nThe API also does not copy index metadata from the original index.\nIndex metadata includes aliases, index lifecycle management phase definitions, and cross-cluster replication (CCR) follower information.\nFor example, if you clone a CCR follower index, the resulting clone will not be a follower index.\n\nThe clone API copies most index settings from the source index to the resulting index, with the exception of `index.number_of_replicas` and `index.auto_expand_replicas`.\nTo set the number of replicas in the resulting index, configure these settings in the clone request.\n\nCloning works as follows:\n\n* First, it creates a new target index with the same definition as the source index.\n* Then it hard-links segments from the source index into the target index. If the file system does not support hard-linking, all segments are copied into the new index, which is a much more time consuming process.\n* Finally, it recovers the target index as though it were a closed index which had just been re-opened.\n\nIMPORTANT: Indices can only be cloned if they meet the following requirements:\n\n* The target index must not exist.\n* The source index must have the same number of primary shards as the target index.\n* The node handling the clone process must have sufficient free disk space to accommodate a second copy of the existing index.", + "description": "Clone an existing index into a new index.\nEach original primary shard is cloned into a new primary shard in the new index.\n\nIMPORTANT: Elasticsearch does not apply index templates to the resulting index.\nThe API also does not copy index metadata from the original index.\nIndex metadata includes aliases, index lifecycle management phase definitions, and cross-cluster replication (CCR) follower information.\nFor example, if you clone a CCR follower index, the resulting clone will not be a follower index.\n\nThe clone API copies most index settings from the source index to the resulting index, with the exception of `index.number_of_replicas` and `index.auto_expand_replicas`.\nTo set the number of replicas in the resulting index, configure these settings in the clone request.\n\nCloning works as follows:\n\n* First, it creates a new target index with the same definition as the source index.\n* Then it hard-links segments from the source index into the target index. If the file system does not support hard-linking, all segments are copied into the new index, which is a much more time consuming process.\n* Finally, it recovers the target index as though it were a closed index which had just been re-opened.\n\nIMPORTANT: Indices can only be cloned if they meet the following requirements:\n\n* The index must be marked as read-only and have a cluster health status of green.\n* The target index must not exist.\n* The source index must have the same number of primary shards as the target index.\n* The node handling the clone process must have sufficient free disk space to accommodate a second copy of the existing index.\n\nThe current write index on a data stream cannot be cloned.\nIn order to clone the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be cloned.\n\nNOTE: Mappings cannot be specified in the `_clone` request. The mappings of the source index will be used for the target index.\n\n**Monitor the cloning process**\n\nThe cloning process can be monitored with the cat recovery API or the cluster health API can be used to wait until all primary shards have been allocated by setting the `wait_for_status` parameter to `yellow`.\n\nThe `_clone` API returns as soon as the target index has been added to the cluster state, before any shards have been allocated.\nAt this point, all shards are in the state unassigned.\nIf, for any reason, the target index can't be allocated, its primary shard will remain unassigned until it can be allocated on that node.\n\nOnce the primary shard is allocated, it moves to state initializing, and the clone process begins.\nWhen the clone operation completes, the shard will become active.\nAt that point, Elasticsearch will try to allocate any replicas and may decide to relocate the primary shard to another node.\n\n**Wait for active shards**\n\nBecause the clone operation creates a new index to clone the shards to, the wait for active shards setting on index creation applies to the clone index action as well.", "operationId": "indices-clone-1", "parameters": [ { @@ -11068,7 +11080,7 @@ "indices" ], "summary": "Get index information", - "description": "Returns information about one or more indices. For data streams, the API returns information about the\nstream’s backing indices.", + "description": "Get information about one or more indices. For data streams, the API returns information about the\nstream’s backing indices.", "operationId": "indices-get", "parameters": [ { @@ -11184,7 +11196,7 @@ "indices" ], "summary": "Create an index", - "description": "Creates a new index.", + "description": "You can use the create index API to add a new index to an Elasticsearch cluster.\nWhen creating an index, you can specify the following:\n\n* Settings for the index.\n* Mappings for fields in the index.\n* Index aliases\n\n**Wait for active shards**\n\nBy default, index creation will only return a response to the client when the primary copies of each shard have been started, or the request times out.\nThe index creation response will indicate what happened.\nFor example, `acknowledged` indicates whether the index was successfully created in the cluster, `while shards_acknowledged` indicates whether the requisite number of shard copies were started for each shard in the index before timing out.\nNote that it is still possible for either `acknowledged` or `shards_acknowledged` to be `false`, but for the index creation to be successful.\nThese values simply indicate whether the operation completed before the timeout.\nIf `acknowledged` is false, the request timed out before the cluster state was updated with the newly created index, but it probably will be created sometime soon.\nIf `shards_acknowledged` is false, then the request timed out before the requisite number of shards were started (by default just the primaries), even if the cluster state was successfully updated to reflect the newly created index (that is to say, `acknowledged` is `true`).\n\nYou can change the default of only waiting for the primary shards to start through the index setting `index.write.wait_for_active_shards`.\nNote that changing this setting will also affect the `wait_for_active_shards` value on all subsequent write operations.", "operationId": "indices-create", "parameters": [ { @@ -11287,7 +11299,7 @@ "indices" ], "summary": "Delete indices", - "description": "Deletes one or more indices.", + "description": "Deleting an index deletes its documents, shards, and metadata.\nIt does not delete related Kibana components, such as data views, visualizations, or dashboards.\n\nYou cannot delete the current write index of a data stream.\nTo delete the index, you must roll over the data stream so a new write index is created.\nYou can then use the delete index API to delete the previous write index.", "operationId": "indices-delete", "parameters": [ { @@ -11370,7 +11382,7 @@ "indices" ], "summary": "Check indices", - "description": "Checks if one or more indices, index aliases, or data streams exist.", + "description": "Check if one or more indices, index aliases, or data streams exist.", "operationId": "indices-exists", "parameters": [ { @@ -12106,7 +12118,7 @@ "indices" ], "summary": "Get index templates", - "description": "Returns information about one or more index templates.", + "description": "Get information about one or more index templates.", "operationId": "indices-get-index-template-1", "parameters": [ { @@ -12137,7 +12149,7 @@ "indices" ], "summary": "Create or update an index template", - "description": "Index templates define settings, mappings, and aliases that can be applied automatically to new indices.", + "description": "Index templates define settings, mappings, and aliases that can be applied automatically to new indices.\n\nElasticsearch applies templates to new indices based on an wildcard pattern that matches the index name.\nIndex templates are applied during data stream or index creation.\nFor data streams, these settings and mappings are applied when the stream's backing indices are created.\nSettings and mappings specified in a create index API request override any settings or mappings specified in an index template.\nChanges to index templates do not affect existing indices, including the existing backing indices of a data stream.\n\nYou can use C-style `/* *\\/` block comments in index templates.\nYou can include comments anywhere in the request body, except before the opening curly bracket.\n\n**Multiple matching templates**\n\nIf multiple index templates match the name of a new index or data stream, the template with the highest priority is used.\n\nMultiple templates with overlapping index patterns at the same priority are not allowed and an error will be thrown when attempting to create a template matching an existing index template at identical priorities.\n\n**Composing aliases, mappings, and settings**\n\nWhen multiple component templates are specified in the `composed_of` field for an index template, they are merged in the order specified, meaning that later component templates override earlier component templates.\nAny mappings, settings, or aliases from the parent index template are merged in next.\nFinally, any configuration on the index request itself is merged.\nMapping definitions are merged recursively, which means that later mapping components can introduce new field mappings and update the mapping configuration.\nIf a field mapping is already contained in an earlier component, its definition will be completely overwritten by the later one.\nThis recursive merging strategy applies not only to field mappings, but also root options like `dynamic_templates` and `meta`.\nIf an earlier component contains a `dynamic_templates` block, then by default new `dynamic_templates` entries are appended onto the end.\nIf an entry already exists with the same key, then it is overwritten by the new definition.", "operationId": "indices-put-index-template", "parameters": [ { @@ -12168,7 +12180,7 @@ "indices" ], "summary": "Create or update an index template", - "description": "Index templates define settings, mappings, and aliases that can be applied automatically to new indices.", + "description": "Index templates define settings, mappings, and aliases that can be applied automatically to new indices.\n\nElasticsearch applies templates to new indices based on an wildcard pattern that matches the index name.\nIndex templates are applied during data stream or index creation.\nFor data streams, these settings and mappings are applied when the stream's backing indices are created.\nSettings and mappings specified in a create index API request override any settings or mappings specified in an index template.\nChanges to index templates do not affect existing indices, including the existing backing indices of a data stream.\n\nYou can use C-style `/* *\\/` block comments in index templates.\nYou can include comments anywhere in the request body, except before the opening curly bracket.\n\n**Multiple matching templates**\n\nIf multiple index templates match the name of a new index or data stream, the template with the highest priority is used.\n\nMultiple templates with overlapping index patterns at the same priority are not allowed and an error will be thrown when attempting to create a template matching an existing index template at identical priorities.\n\n**Composing aliases, mappings, and settings**\n\nWhen multiple component templates are specified in the `composed_of` field for an index template, they are merged in the order specified, meaning that later component templates override earlier component templates.\nAny mappings, settings, or aliases from the parent index template are merged in next.\nFinally, any configuration on the index request itself is merged.\nMapping definitions are merged recursively, which means that later mapping components can introduce new field mappings and update the mapping configuration.\nIf a field mapping is already contained in an earlier component, its definition will be completely overwritten by the later one.\nThis recursive merging strategy applies not only to field mappings, but also root options like `dynamic_templates` and `meta`.\nIf an earlier component contains a `dynamic_templates` block, then by default new `dynamic_templates` entries are appended onto the end.\nIf an entry already exists with the same key, then it is overwritten by the new definition.", "operationId": "indices-put-index-template-1", "parameters": [ { @@ -12294,7 +12306,10 @@ "indices" ], "summary": "Get index templates", - "description": "Retrieves information about one or more index templates.", + "description": "Get information about one or more index templates.\n\nIMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.", + "externalDocs": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/index-templates.html" + }, "operationId": "indices-get-template-1", "parameters": [ { @@ -12321,7 +12336,7 @@ "indices" ], "summary": "Create or update an index template", - "description": "Index templates define settings, mappings, and aliases that can be applied automatically to new indices.\nElasticsearch applies templates to new indices based on an index pattern that matches the index name.\n\nIMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.\n\nComposable templates always take precedence over legacy templates.\nIf no composable template matches a new index, matching legacy templates are applied according to their order.\n\nIndex templates are only applied during index creation.\nChanges to index templates do not affect existing indices.\nSettings and mappings specified in create index API requests override any settings or mappings specified in an index template.", + "description": "Index templates define settings, mappings, and aliases that can be applied automatically to new indices.\nElasticsearch applies templates to new indices based on an index pattern that matches the index name.\n\nIMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.\n\nComposable templates always take precedence over legacy templates.\nIf no composable template matches a new index, matching legacy templates are applied according to their order.\n\nIndex templates are only applied during index creation.\nChanges to index templates do not affect existing indices.\nSettings and mappings specified in create index API requests override any settings or mappings specified in an index template.\n\nYou can use C-style `/* *\\/` block comments in index templates.\nYou can include comments anywhere in the request body, except before the opening curly bracket.\n\n**Indices matching multiple templates**\n\nMultiple index templates can potentially match an index, in this case, both the settings and mappings are merged into the final configuration of the index.\nThe order of the merging can be controlled using the order parameter, with lower order being applied first, and higher orders overriding them.\nNOTE: Multiple matching templates with the same order value will result in a non-deterministic merging order.", "externalDocs": { "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/index-templates.html" }, @@ -12357,7 +12372,7 @@ "indices" ], "summary": "Create or update an index template", - "description": "Index templates define settings, mappings, and aliases that can be applied automatically to new indices.\nElasticsearch applies templates to new indices based on an index pattern that matches the index name.\n\nIMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.\n\nComposable templates always take precedence over legacy templates.\nIf no composable template matches a new index, matching legacy templates are applied according to their order.\n\nIndex templates are only applied during index creation.\nChanges to index templates do not affect existing indices.\nSettings and mappings specified in create index API requests override any settings or mappings specified in an index template.", + "description": "Index templates define settings, mappings, and aliases that can be applied automatically to new indices.\nElasticsearch applies templates to new indices based on an index pattern that matches the index name.\n\nIMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.\n\nComposable templates always take precedence over legacy templates.\nIf no composable template matches a new index, matching legacy templates are applied according to their order.\n\nIndex templates are only applied during index creation.\nChanges to index templates do not affect existing indices.\nSettings and mappings specified in create index API requests override any settings or mappings specified in an index template.\n\nYou can use C-style `/* *\\/` block comments in index templates.\nYou can include comments anywhere in the request body, except before the opening curly bracket.\n\n**Indices matching multiple templates**\n\nMultiple index templates can potentially match an index, in this case, both the settings and mappings are merged into the final configuration of the index.\nThe order of the merging can be controlled using the order parameter, with lower order being applied first, and higher orders overriding them.\nNOTE: Multiple matching templates with the same order value will result in a non-deterministic merging order.", "externalDocs": { "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/index-templates.html" }, @@ -12392,7 +12407,7 @@ "tags": [ "indices" ], - "summary": "Deletes a legacy index template", + "summary": "Delete a legacy index template", "operationId": "indices-delete-template", "parameters": [ { @@ -12445,13 +12460,16 @@ "indices" ], "summary": "Check existence of index templates", - "description": "Returns information about whether a particular index template exists.", + "description": "Get information about whether index templates exist.\nIndex templates define settings, mappings, and aliases that can be applied automatically to new indices.\n\nIMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.", + "externalDocs": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/index-templates.html" + }, "operationId": "indices-exists-template", "parameters": [ { "in": "path", "name": "name", - "description": "The comma separated names of the index templates", + "description": "A comma-separated list of index template names used to limit the request.\nWildcard (`*`) expressions are supported.", "required": true, "deprecated": false, "schema": { @@ -12462,7 +12480,7 @@ { "in": "query", "name": "flat_settings", - "description": "Return settings in flat format (default: false)", + "description": "Indicates whether to use a flat format for the response.", "deprecated": false, "schema": { "type": "boolean" @@ -12472,7 +12490,7 @@ { "in": "query", "name": "local", - "description": "Return local information, do not retrieve the state from master node (default: false)", + "description": "Indicates whether to get information from the local node only.", "deprecated": false, "schema": { "type": "boolean" @@ -12482,7 +12500,7 @@ { "in": "query", "name": "master_timeout", - "description": "Explicit operation timeout for connection to master node", + "description": "The period to wait for the master node.\nIf the master node is not available before the timeout expires, the request fails and returns an error.\nTo indicate that the request should never timeout, set it to `-1`.", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types:Duration" @@ -12506,7 +12524,7 @@ "indices" ], "summary": "Analyze the index disk usage", - "description": "Analyze the disk usage of each field of an index or data stream.\nThis API might not support indices created in previous Elasticsearch versions.\nThe result of a small index can be inaccurate as some parts of an index might not be analyzed by the API.", + "description": "Analyze the disk usage of each field of an index or data stream.\nThis API might not support indices created in previous Elasticsearch versions.\nThe result of a small index can be inaccurate as some parts of an index might not be analyzed by the API.\n\nNOTE: The total size of fields of the analyzed shards of the index in the response is usually smaller than the index `store_size` value because some small metadata files are ignored and some parts of data files might not be scanned by the API.\nSince stored fields are stored together in a compressed format, the sizes of stored fields are also estimates and can be inaccurate.\nThe stored size of the `_id` field is likely underestimated while the `_source` field is overestimated.", "operationId": "indices-disk-usage", "parameters": [ { @@ -12778,7 +12796,7 @@ "indices" ], "summary": "Get field usage stats", - "description": "Get field usage information for each shard and field of an index.\nField usage statistics are automatically captured when queries are running on a cluster.\nA shard-level search request that accesses a given field, even if multiple times during that request, is counted as a single use.", + "description": "Get field usage information for each shard and field of an index.\nField usage statistics are automatically captured when queries are running on a cluster.\nA shard-level search request that accesses a given field, even if multiple times during that request, is counted as a single use.\n\nThe response body reports the per-shard usage count of the data structures that back the fields in the index.\nA given request will increment each count by a maximum value of 1, even if the request accesses the same field multiple times.", "operationId": "indices-field-usage-stats", "parameters": [ { @@ -13014,7 +13032,7 @@ "indices" ], "summary": "Force a merge", - "description": "Perform the force merge operation on the shards of one or more indices.\nFor data streams, the API forces a merge on the shards of the stream's backing indices.\n\nMerging reduces the number of segments in each shard by merging some of them together and also frees up the space used by deleted documents.\nMerging normally happens automatically, but sometimes it is useful to trigger a merge manually.\n\nWARNING: We recommend force merging only a read-only index (meaning the index is no longer receiving writes).\nWhen documents are updated or deleted, the old version is not immediately removed but instead soft-deleted and marked with a \"tombstone\".\nThese soft-deleted documents are automatically cleaned up during regular segment merges.\nBut force merge can cause very large (greater than 5 GB) segments to be produced, which are not eligible for regular merges.\nSo the number of soft-deleted documents can then grow rapidly, resulting in higher disk usage and worse search performance.\nIf you regularly force merge an index receiving writes, this can also make snapshots more expensive, since the new documents can't be backed up incrementally.", + "description": "Perform the force merge operation on the shards of one or more indices.\nFor data streams, the API forces a merge on the shards of the stream's backing indices.\n\nMerging reduces the number of segments in each shard by merging some of them together and also frees up the space used by deleted documents.\nMerging normally happens automatically, but sometimes it is useful to trigger a merge manually.\n\nWARNING: We recommend force merging only a read-only index (meaning the index is no longer receiving writes).\nWhen documents are updated or deleted, the old version is not immediately removed but instead soft-deleted and marked with a \"tombstone\".\nThese soft-deleted documents are automatically cleaned up during regular segment merges.\nBut force merge can cause very large (greater than 5 GB) segments to be produced, which are not eligible for regular merges.\nSo the number of soft-deleted documents can then grow rapidly, resulting in higher disk usage and worse search performance.\nIf you regularly force merge an index receiving writes, this can also make snapshots more expensive, since the new documents can't be backed up incrementally.\n\n**Blocks during a force merge**\n\nCalls to this API block until the merge is complete (unless request contains `wait_for_completion=false`).\nIf the client connection is lost before completion then the force merge process will continue in the background.\nAny new requests to force merge the same indices will also block until the ongoing force merge is complete.\n\n**Running force merge asynchronously**\n\nIf the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to get the status of the task.\nHowever, you can not cancel this task as the force merge task is not cancelable.\nElasticsearch creates a record of this task as a document at `_tasks/`.\nWhen you are done with a task, you should delete the task document so Elasticsearch can reclaim the space.\n\n**Force merging multiple indices**\n\nYou can force merge multiple indices with a single request by targeting:\n\n* One or more data streams that contain multiple backing indices\n* Multiple indices\n* One or more aliases\n* All data streams and indices in a cluster\n\nEach targeted shard is force-merged separately using the force_merge threadpool.\nBy default each node only has a single `force_merge` thread which means that the shards on that node are force-merged one at a time.\nIf you expand the `force_merge` threadpool on a node then it will force merge its shards in parallel\n\nForce merge makes the storage for the shard being merged temporarily increase, as it may require free space up to triple its size in case `max_num_segments parameter` is set to `1`, to rewrite all segments into a new one.\n\n**Data streams and time-based indices**\n\nForce-merging is useful for managing a data stream's older backing indices and other time-based indices, particularly after a rollover.\nIn these cases, each index only receives indexing traffic for a certain period of time.\nOnce an index receive no more writes, its shards can be force-merged to a single segment.\nThis can be a good idea because single-segment shards can sometimes use simpler and more efficient data structures to perform searches.\nFor example:\n\n```\nPOST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1\n```", "externalDocs": { "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-merge.html" }, @@ -13056,7 +13074,7 @@ "indices" ], "summary": "Force a merge", - "description": "Perform the force merge operation on the shards of one or more indices.\nFor data streams, the API forces a merge on the shards of the stream's backing indices.\n\nMerging reduces the number of segments in each shard by merging some of them together and also frees up the space used by deleted documents.\nMerging normally happens automatically, but sometimes it is useful to trigger a merge manually.\n\nWARNING: We recommend force merging only a read-only index (meaning the index is no longer receiving writes).\nWhen documents are updated or deleted, the old version is not immediately removed but instead soft-deleted and marked with a \"tombstone\".\nThese soft-deleted documents are automatically cleaned up during regular segment merges.\nBut force merge can cause very large (greater than 5 GB) segments to be produced, which are not eligible for regular merges.\nSo the number of soft-deleted documents can then grow rapidly, resulting in higher disk usage and worse search performance.\nIf you regularly force merge an index receiving writes, this can also make snapshots more expensive, since the new documents can't be backed up incrementally.", + "description": "Perform the force merge operation on the shards of one or more indices.\nFor data streams, the API forces a merge on the shards of the stream's backing indices.\n\nMerging reduces the number of segments in each shard by merging some of them together and also frees up the space used by deleted documents.\nMerging normally happens automatically, but sometimes it is useful to trigger a merge manually.\n\nWARNING: We recommend force merging only a read-only index (meaning the index is no longer receiving writes).\nWhen documents are updated or deleted, the old version is not immediately removed but instead soft-deleted and marked with a \"tombstone\".\nThese soft-deleted documents are automatically cleaned up during regular segment merges.\nBut force merge can cause very large (greater than 5 GB) segments to be produced, which are not eligible for regular merges.\nSo the number of soft-deleted documents can then grow rapidly, resulting in higher disk usage and worse search performance.\nIf you regularly force merge an index receiving writes, this can also make snapshots more expensive, since the new documents can't be backed up incrementally.\n\n**Blocks during a force merge**\n\nCalls to this API block until the merge is complete (unless request contains `wait_for_completion=false`).\nIf the client connection is lost before completion then the force merge process will continue in the background.\nAny new requests to force merge the same indices will also block until the ongoing force merge is complete.\n\n**Running force merge asynchronously**\n\nIf the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to get the status of the task.\nHowever, you can not cancel this task as the force merge task is not cancelable.\nElasticsearch creates a record of this task as a document at `_tasks/`.\nWhen you are done with a task, you should delete the task document so Elasticsearch can reclaim the space.\n\n**Force merging multiple indices**\n\nYou can force merge multiple indices with a single request by targeting:\n\n* One or more data streams that contain multiple backing indices\n* Multiple indices\n* One or more aliases\n* All data streams and indices in a cluster\n\nEach targeted shard is force-merged separately using the force_merge threadpool.\nBy default each node only has a single `force_merge` thread which means that the shards on that node are force-merged one at a time.\nIf you expand the `force_merge` threadpool on a node then it will force merge its shards in parallel\n\nForce merge makes the storage for the shard being merged temporarily increase, as it may require free space up to triple its size in case `max_num_segments parameter` is set to `1`, to rewrite all segments into a new one.\n\n**Data streams and time-based indices**\n\nForce-merging is useful for managing a data stream's older backing indices and other time-based indices, particularly after a rollover.\nIn these cases, each index only receives indexing traffic for a certain period of time.\nOnce an index receive no more writes, its shards can be force-merged to a single segment.\nThis can be a good idea because single-segment shards can sometimes use simpler and more efficient data structures to perform searches.\nFor example:\n\n```\nPOST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1\n```", "externalDocs": { "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-merge.html" }, @@ -13192,7 +13210,7 @@ "indices" ], "summary": "Get mapping definitions", - "description": "Retrieves mapping definitions for one or more fields.\nFor data streams, the API retrieves field mappings for the stream’s backing indices.", + "description": "Retrieves mapping definitions for one or more fields.\nFor data streams, the API retrieves field mappings for the stream’s backing indices.\n\nThis API is useful if you don't need a complete mapping or if an index mapping contains a large number of fields.", "operationId": "indices-get-field-mapping", "parameters": [ { @@ -13227,7 +13245,7 @@ "indices" ], "summary": "Get mapping definitions", - "description": "Retrieves mapping definitions for one or more fields.\nFor data streams, the API retrieves field mappings for the stream’s backing indices.", + "description": "Retrieves mapping definitions for one or more fields.\nFor data streams, the API retrieves field mappings for the stream’s backing indices.\n\nThis API is useful if you don't need a complete mapping or if an index mapping contains a large number of fields.", "operationId": "indices-get-field-mapping-1", "parameters": [ { @@ -13265,7 +13283,7 @@ "indices" ], "summary": "Get index templates", - "description": "Returns information about one or more index templates.", + "description": "Get information about one or more index templates.", "operationId": "indices-get-index-template", "parameters": [ { @@ -13295,7 +13313,7 @@ "indices" ], "summary": "Get mapping definitions", - "description": "Retrieves mapping definitions for one or more indices.\nFor data streams, the API retrieves mappings for the stream’s backing indices.", + "description": "For data streams, the API retrieves mappings for the stream’s backing indices.", "operationId": "indices-get-mapping", "parameters": [ { @@ -13327,7 +13345,7 @@ "indices" ], "summary": "Get mapping definitions", - "description": "Retrieves mapping definitions for one or more indices.\nFor data streams, the API retrieves mappings for the stream’s backing indices.", + "description": "For data streams, the API retrieves mappings for the stream’s backing indices.", "operationId": "indices-get-mapping-1", "parameters": [ { @@ -13360,7 +13378,7 @@ "indices" ], "summary": "Update field mappings", - "description": "Adds new fields to an existing data stream or index.\nYou can also use this API to change the search settings of existing fields.\nFor data streams, these changes are applied to all backing indices by default.", + "description": "Add new fields to an existing data stream or index.\nYou can also use this API to change the search settings of existing fields.\nFor data streams, these changes are applied to all backing indices by default.", "operationId": "indices-put-mapping", "parameters": [ { @@ -13399,7 +13417,7 @@ "indices" ], "summary": "Update field mappings", - "description": "Adds new fields to an existing data stream or index.\nYou can also use this API to change the search settings of existing fields.\nFor data streams, these changes are applied to all backing indices by default.", + "description": "Add new fields to an existing data stream or index.\nYou can also use this API to change the search settings of existing fields.\nFor data streams, these changes are applied to all backing indices by default.", "operationId": "indices-put-mapping-1", "parameters": [ { @@ -13440,7 +13458,7 @@ "indices" ], "summary": "Get index settings", - "description": "Returns setting information for one or more indices. For data streams,\nreturns setting information for the stream’s backing indices.", + "description": "Get setting information for one or more indices.\nFor data streams, it returns setting information for the stream's backing indices.", "operationId": "indices-get-settings", "parameters": [ { @@ -13517,7 +13535,7 @@ "indices" ], "summary": "Get index settings", - "description": "Returns setting information for one or more indices. For data streams,\nreturns setting information for the stream’s backing indices.", + "description": "Get setting information for one or more indices.\nFor data streams, it returns setting information for the stream's backing indices.", "operationId": "indices-get-settings-1", "parameters": [ { @@ -13600,7 +13618,7 @@ "indices" ], "summary": "Get index settings", - "description": "Returns setting information for one or more indices. For data streams,\nreturns setting information for the stream’s backing indices.", + "description": "Get setting information for one or more indices.\nFor data streams, it returns setting information for the stream's backing indices.", "operationId": "indices-get-settings-2", "parameters": [ { @@ -13644,7 +13662,7 @@ "indices" ], "summary": "Get index settings", - "description": "Returns setting information for one or more indices. For data streams,\nreturns setting information for the stream’s backing indices.", + "description": "Get setting information for one or more indices.\nFor data streams, it returns setting information for the stream's backing indices.", "operationId": "indices-get-settings-3", "parameters": [ { @@ -13685,7 +13703,10 @@ "indices" ], "summary": "Get index templates", - "description": "Retrieves information about one or more index templates.", + "description": "Get information about one or more index templates.\n\nIMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.", + "externalDocs": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/index-templates.html" + }, "operationId": "indices-get-template", "parameters": [ { @@ -13811,8 +13832,8 @@ "tags": [ "indices" ], - "summary": "Opens a closed index", - "description": "For data streams, the API opens any closed backing indices.", + "summary": "Open a closed index", + "description": "For data streams, the API opens any closed backing indices.\n\nA closed index is blocked for read/write operations and does not allow all operations that opened indices allow.\nIt is not possible to index documents or to search for documents in a closed index.\nThis allows closed indices to not have to maintain internal data structures for indexing or searching documents, resulting in a smaller overhead on the cluster.\n\nWhen opening or closing an index, the master is responsible for restarting the index shards to reflect the new state of the index.\nThe shards will then go through the normal recovery process.\nThe data of opened or closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times.\n\nYou can open and close multiple indices.\nAn error is thrown if the request explicitly refers to a missing index.\nThis behavior can be turned off by using the `ignore_unavailable=true` parameter.\n\nBy default, you must explicitly name the indices you are opening or closing.\nTo open or close indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`.\nThis setting can also be changed with the cluster update settings API.\n\nClosed indices consume a significant amount of disk-space which can cause problems in managed environments.\nClosing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`.\n\nBecause opening or closing an index allocates its shards, the `wait_for_active_shards` setting on index creation applies to the `_open` and `_close` index actions as well.", "operationId": "indices-open", "parameters": [ { @@ -13965,7 +13986,7 @@ "indices" ], "summary": "Get index recovery information", - "description": "Get information about ongoing and completed shard recoveries for one or more indices.\nFor data streams, the API returns information for the stream's backing indices.\n\nShard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or creating a replica shard from a primary shard.\nWhen a shard recovery completes, the recovered shard is available for search and indexing.\n\nRecovery automatically occurs during the following processes:\n\n* When creating an index for the first time.\n* When a node rejoins the cluster and starts up any missing primary shard copies using the data that it holds in its data path.\n* Creation of new replica shard copies from the primary.\n* Relocation of a shard copy to a different node in the same cluster.\n* A snapshot restore operation.\n* A clone, shrink, or split operation.\n\nYou can determine the cause of a shard recovery using the recovery or cat recovery APIs.\n\nThe index recovery API reports information about completed recoveries only for shard copies that currently exist in the cluster.\nIt only reports the last recovery for each shard copy and does not report historical information about earlier recoveries, nor does it report information about the recoveries of shard copies that no longer exist.\nThis means that if a shard copy completes a recovery and then Elasticsearch relocates it onto a different node then the information about the original recovery will not be shown in the recovery API.", + "description": "Get information about ongoing and completed shard recoveries for one or more indices.\nFor data streams, the API returns information for the stream's backing indices.\n\nAll recoveries, whether ongoing or complete, are kept in the cluster state and may be reported on at any time.\n\nShard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or creating a replica shard from a primary shard.\nWhen a shard recovery completes, the recovered shard is available for search and indexing.\n\nRecovery automatically occurs during the following processes:\n\n* When creating an index for the first time.\n* When a node rejoins the cluster and starts up any missing primary shard copies using the data that it holds in its data path.\n* Creation of new replica shard copies from the primary.\n* Relocation of a shard copy to a different node in the same cluster.\n* A snapshot restore operation.\n* A clone, shrink, or split operation.\n\nYou can determine the cause of a shard recovery using the recovery or cat recovery APIs.\n\nThe index recovery API reports information about completed recoveries only for shard copies that currently exist in the cluster.\nIt only reports the last recovery for each shard copy and does not report historical information about earlier recoveries, nor does it report information about the recoveries of shard copies that no longer exist.\nThis means that if a shard copy completes a recovery and then Elasticsearch relocates it onto a different node then the information about the original recovery will not be shown in the recovery API.", "operationId": "indices-recovery", "parameters": [ { @@ -13988,7 +14009,7 @@ "indices" ], "summary": "Get index recovery information", - "description": "Get information about ongoing and completed shard recoveries for one or more indices.\nFor data streams, the API returns information for the stream's backing indices.\n\nShard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or creating a replica shard from a primary shard.\nWhen a shard recovery completes, the recovered shard is available for search and indexing.\n\nRecovery automatically occurs during the following processes:\n\n* When creating an index for the first time.\n* When a node rejoins the cluster and starts up any missing primary shard copies using the data that it holds in its data path.\n* Creation of new replica shard copies from the primary.\n* Relocation of a shard copy to a different node in the same cluster.\n* A snapshot restore operation.\n* A clone, shrink, or split operation.\n\nYou can determine the cause of a shard recovery using the recovery or cat recovery APIs.\n\nThe index recovery API reports information about completed recoveries only for shard copies that currently exist in the cluster.\nIt only reports the last recovery for each shard copy and does not report historical information about earlier recoveries, nor does it report information about the recoveries of shard copies that no longer exist.\nThis means that if a shard copy completes a recovery and then Elasticsearch relocates it onto a different node then the information about the original recovery will not be shown in the recovery API.", + "description": "Get information about ongoing and completed shard recoveries for one or more indices.\nFor data streams, the API returns information for the stream's backing indices.\n\nAll recoveries, whether ongoing or complete, are kept in the cluster state and may be reported on at any time.\n\nShard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or creating a replica shard from a primary shard.\nWhen a shard recovery completes, the recovered shard is available for search and indexing.\n\nRecovery automatically occurs during the following processes:\n\n* When creating an index for the first time.\n* When a node rejoins the cluster and starts up any missing primary shard copies using the data that it holds in its data path.\n* Creation of new replica shard copies from the primary.\n* Relocation of a shard copy to a different node in the same cluster.\n* A snapshot restore operation.\n* A clone, shrink, or split operation.\n\nYou can determine the cause of a shard recovery using the recovery or cat recovery APIs.\n\nThe index recovery API reports information about completed recoveries only for shard copies that currently exist in the cluster.\nIt only reports the last recovery for each shard copy and does not report historical information about earlier recoveries, nor does it report information about the recoveries of shard copies that no longer exist.\nThis means that if a shard copy completes a recovery and then Elasticsearch relocates it onto a different node then the information about the original recovery will not be shown in the recovery API.", "operationId": "indices-recovery-1", "parameters": [ { @@ -14014,7 +14035,7 @@ "indices" ], "summary": "Refresh an index", - "description": "A refresh makes recent operations performed on one or more indices available for search.\nFor data streams, the API runs the refresh operation on the stream’s backing indices.", + "description": "A refresh makes recent operations performed on one or more indices available for search.\nFor data streams, the API runs the refresh operation on the stream’s backing indices.\n\nBy default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds.\nYou can change this default interval with the `index.refresh_interval` setting.\n\nRefresh requests are synchronous and do not return a response until the refresh operation completes.\n\nRefreshes are resource-intensive.\nTo ensure good cluster performance, it's recommended to wait for Elasticsearch's periodic refresh rather than performing an explicit refresh when possible.\n\nIf your application workflow indexes documents and then runs a search to retrieve the indexed document, it's recommended to use the index API's `refresh=wait_for` query parameter option.\nThis option ensures the indexing operation waits for a periodic refresh before running the search.", "operationId": "indices-refresh-1", "parameters": [ { @@ -14038,7 +14059,7 @@ "indices" ], "summary": "Refresh an index", - "description": "A refresh makes recent operations performed on one or more indices available for search.\nFor data streams, the API runs the refresh operation on the stream’s backing indices.", + "description": "A refresh makes recent operations performed on one or more indices available for search.\nFor data streams, the API runs the refresh operation on the stream’s backing indices.\n\nBy default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds.\nYou can change this default interval with the `index.refresh_interval` setting.\n\nRefresh requests are synchronous and do not return a response until the refresh operation completes.\n\nRefreshes are resource-intensive.\nTo ensure good cluster performance, it's recommended to wait for Elasticsearch's periodic refresh rather than performing an explicit refresh when possible.\n\nIf your application workflow indexes documents and then runs a search to retrieve the indexed document, it's recommended to use the index API's `refresh=wait_for` query parameter option.\nThis option ensures the indexing operation waits for a periodic refresh before running the search.", "operationId": "indices-refresh", "parameters": [ { @@ -14064,7 +14085,7 @@ "indices" ], "summary": "Refresh an index", - "description": "A refresh makes recent operations performed on one or more indices available for search.\nFor data streams, the API runs the refresh operation on the stream’s backing indices.", + "description": "A refresh makes recent operations performed on one or more indices available for search.\nFor data streams, the API runs the refresh operation on the stream’s backing indices.\n\nBy default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds.\nYou can change this default interval with the `index.refresh_interval` setting.\n\nRefresh requests are synchronous and do not return a response until the refresh operation completes.\n\nRefreshes are resource-intensive.\nTo ensure good cluster performance, it's recommended to wait for Elasticsearch's periodic refresh rather than performing an explicit refresh when possible.\n\nIf your application workflow indexes documents and then runs a search to retrieve the indexed document, it's recommended to use the index API's `refresh=wait_for` query parameter option.\nThis option ensures the indexing operation waits for a periodic refresh before running the search.", "operationId": "indices-refresh-3", "parameters": [ { @@ -14091,7 +14112,7 @@ "indices" ], "summary": "Refresh an index", - "description": "A refresh makes recent operations performed on one or more indices available for search.\nFor data streams, the API runs the refresh operation on the stream’s backing indices.", + "description": "A refresh makes recent operations performed on one or more indices available for search.\nFor data streams, the API runs the refresh operation on the stream’s backing indices.\n\nBy default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds.\nYou can change this default interval with the `index.refresh_interval` setting.\n\nRefresh requests are synchronous and do not return a response until the refresh operation completes.\n\nRefreshes are resource-intensive.\nTo ensure good cluster performance, it's recommended to wait for Elasticsearch's periodic refresh rather than performing an explicit refresh when possible.\n\nIf your application workflow indexes documents and then runs a search to retrieve the indexed document, it's recommended to use the index API's `refresh=wait_for` query parameter option.\nThis option ensures the indexing operation waits for a periodic refresh before running the search.", "operationId": "indices-refresh-2", "parameters": [ { @@ -14184,7 +14205,7 @@ "indices" ], "summary": "Resolve the cluster", - "description": "Resolve the specified index expressions to return information about each cluster, including the local cluster, if included.\nMultiple patterns and remote clusters are supported.\n\nThis endpoint is useful before doing a cross-cluster search in order to determine which remote clusters should be included in a search.\n\nYou use the same index expression with this endpoint as you would for cross-cluster search.\nIndex and cluster exclusions are also supported with this endpoint.\n\nFor each cluster in the index expression, information is returned about:\n\n* Whether the querying (\"local\") cluster is currently connected to each remote cluster in the index expression scope.\n* Whether each remote cluster is configured with `skip_unavailable` as `true` or `false`.\n* Whether there are any indices, aliases, or data streams on that cluster that match the index expression.\n* Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index).\n* Cluster version information, including the Elasticsearch server version.", + "description": "Resolve the specified index expressions to return information about each cluster, including the local cluster, if included.\nMultiple patterns and remote clusters are supported.\n\nThis endpoint is useful before doing a cross-cluster search in order to determine which remote clusters should be included in a search.\n\nYou use the same index expression with this endpoint as you would for cross-cluster search.\nIndex and cluster exclusions are also supported with this endpoint.\n\nFor each cluster in the index expression, information is returned about:\n\n* Whether the querying (\"local\") cluster is currently connected to each remote cluster in the index expression scope.\n* Whether each remote cluster is configured with `skip_unavailable` as `true` or `false`.\n* Whether there are any indices, aliases, or data streams on that cluster that match the index expression.\n* Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index).\n* Cluster version information, including the Elasticsearch server version.\n\nFor example, `GET /_resolve/cluster/my-index-*,cluster*:my-index-*` returns information about the local cluster and all remotely configured clusters that start with the alias `cluster*`.\nEach cluster returns information about whether it has any indices, aliases or data streams that match `my-index-*`.\n\n**Advantages of using this endpoint before a cross-cluster search**\n\nYou may want to exclude a cluster or index from a search when:\n\n* A remote cluster is not currently connected and is configured with `skip_unavailable=false`. Running a cross-cluster search under those conditions will cause the entire search to fail.\n* A cluster has no matching indices, aliases or data streams for the index expression (or your user does not have permissions to search them). For example, suppose your index expression is `logs*,remote1:logs*` and the remote1 cluster has no indices, aliases or data streams that match `logs*`. In that case, that cluster will return no results from that cluster if you include it in a cross-cluster search.\n* The index expression (combined with any query parameters you specify) will likely cause an exception to be thrown when you do the search. In these cases, the \"error\" field in the `_resolve/cluster` response will be present. (This is also where security/permission errors will be shown.)\n* A remote cluster is an older version that does not support the feature you want to use in your search.", "operationId": "indices-resolve-cluster", "parameters": [ { @@ -14354,7 +14375,7 @@ "indices" ], "summary": "Roll over to a new index", - "description": "Creates a new index for a data stream or index alias.", + "description": "TIP: It is recommended to use the index lifecycle rollover action to automate rollovers.\n\nThe rollover API creates a new index for a data stream or index alias.\nThe API behavior depends on the rollover target.\n\n**Roll over a data stream**\n\nIf you roll over a data stream, the API creates a new write index for the stream.\nThe stream's previous write index becomes a regular backing index.\nA rollover also increments the data stream's generation.\n\n**Roll over an index alias with a write index**\n\nTIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a write index to manage time series data.\nData streams replace this functionality, require less maintenance, and automatically integrate with data tiers.\n\nIf an index alias points to multiple indices, one of the indices must be a write index.\nThe rollover API creates a new write index for the alias with `is_write_index` set to `true`.\nThe API also `sets is_write_index` to `false` for the previous write index.\n\n**Roll over an index alias with one index**\n\nIf you roll over an index alias that points to only one index, the API creates a new index for the alias and removes the original index from the alias.\n\nNOTE: A rollover creates a new index and is subject to the `wait_for_active_shards` setting.\n\n**Increment index names for an alias**\n\nWhen you roll over an index alias, you can specify a name for the new index.\nIf you don't specify a name and the current index ends with `-` and a number, such as `my-index-000001` or `my-index-3`, the new index name increments that number.\nFor example, if you roll over an alias with a current index of `my-index-000001`, the rollover creates a new index named `my-index-000002`.\nThis number is always six characters and zero-padded, regardless of the previous index's name.\n\nIf you use an index alias for time series data, you can use date math in the index name to track the rollover date.\nFor example, you can create an alias that points to an index named ``.\nIf you create the index on May 6, 2099, the index's name is `my-index-2099.05.06-000001`.\nIf you roll over the alias on May 7, 2099, the new index's name is `my-index-2099.05.07-000002`.", "operationId": "indices-rollover", "parameters": [ { @@ -14390,7 +14411,7 @@ "indices" ], "summary": "Roll over to a new index", - "description": "Creates a new index for a data stream or index alias.", + "description": "TIP: It is recommended to use the index lifecycle rollover action to automate rollovers.\n\nThe rollover API creates a new index for a data stream or index alias.\nThe API behavior depends on the rollover target.\n\n**Roll over a data stream**\n\nIf you roll over a data stream, the API creates a new write index for the stream.\nThe stream's previous write index becomes a regular backing index.\nA rollover also increments the data stream's generation.\n\n**Roll over an index alias with a write index**\n\nTIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a write index to manage time series data.\nData streams replace this functionality, require less maintenance, and automatically integrate with data tiers.\n\nIf an index alias points to multiple indices, one of the indices must be a write index.\nThe rollover API creates a new write index for the alias with `is_write_index` set to `true`.\nThe API also `sets is_write_index` to `false` for the previous write index.\n\n**Roll over an index alias with one index**\n\nIf you roll over an index alias that points to only one index, the API creates a new index for the alias and removes the original index from the alias.\n\nNOTE: A rollover creates a new index and is subject to the `wait_for_active_shards` setting.\n\n**Increment index names for an alias**\n\nWhen you roll over an index alias, you can specify a name for the new index.\nIf you don't specify a name and the current index ends with `-` and a number, such as `my-index-000001` or `my-index-3`, the new index name increments that number.\nFor example, if you roll over an alias with a current index of `my-index-000001`, the rollover creates a new index named `my-index-000002`.\nThis number is always six characters and zero-padded, regardless of the previous index's name.\n\nIf you use an index alias for time series data, you can use date math in the index name to track the rollover date.\nFor example, you can create an alias that points to an index named ``.\nIf you create the index on May 6, 2099, the index's name is `my-index-2099.05.06-000001`.\nIf you roll over the alias on May 7, 2099, the new index's name is `my-index-2099.05.07-000002`.", "operationId": "indices-rollover-1", "parameters": [ { @@ -14615,7 +14636,7 @@ "indices" ], "summary": "Simulate an index", - "description": "Returns the index configuration that would be applied to the specified index from an existing index template.", + "description": "Get the index configuration that would be applied to the specified index from an existing index template.", "operationId": "indices-simulate-index-template", "parameters": [ { @@ -69798,7 +69819,7 @@ "type": "boolean" }, "skip_unavailable": { - "description": "The skip_unavailable setting for a remote cluster.", + "description": "The `skip_unavailable` setting for a remote cluster.", "type": "boolean" }, "matching_indices": { @@ -69806,7 +69827,7 @@ "type": "boolean" }, "error": { - "description": "Provides error messages that are likely to occur if you do a search with this index expression\non the specified cluster (e.g., lack of security privileges to query an index).", + "description": "Provides error messages that are likely to occur if you do a search with this index expression\non the specified cluster (for example, lack of security privileges to query an index).", "type": "string" }, "version": { @@ -98465,7 +98486,7 @@ "indices.get_field_mapping#fields": { "in": "path", "name": "fields", - "description": "Comma-separated list or wildcard expression of fields used to limit returned information.", + "description": "Comma-separated list or wildcard expression of fields used to limit returned information.\nSupports wildcards (`*`).", "required": true, "deprecated": false, "schema": { diff --git a/output/openapi/elasticsearch-serverless-openapi.json b/output/openapi/elasticsearch-serverless-openapi.json index 9845b3afc7..633b46864d 100644 --- a/output/openapi/elasticsearch-serverless-openapi.json +++ b/output/openapi/elasticsearch-serverless-openapi.json @@ -1484,7 +1484,7 @@ "indices" ], "summary": "Get component templates", - "description": "Retrieves information about component templates.", + "description": "Get information about component templates.", "operationId": "cluster-get-component-template-1", "parameters": [ { @@ -1515,7 +1515,7 @@ "indices" ], "summary": "Create or update a component template", - "description": "Creates or updates a component template.\nComponent templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.\n\nAn index template can be composed of multiple component templates.\nTo use a component template, specify it in an index template’s `composed_of` list.\nComponent templates are only applied to new data streams and indices as part of a matching index template.\n\nSettings and mappings specified directly in the index template or the create index request override any settings or mappings specified in a component template.\n\nComponent templates are only used during index creation.\nFor data streams, this includes data stream creation and the creation of a stream’s backing indices.\nChanges to component templates do not affect existing indices, including a stream’s backing indices.\n\nYou can use C-style `/* *\\/` block comments in component templates.\nYou can include comments anywhere in the request body except before the opening curly bracket.", + "description": "Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.\n\nAn index template can be composed of multiple component templates.\nTo use a component template, specify it in an index template’s `composed_of` list.\nComponent templates are only applied to new data streams and indices as part of a matching index template.\n\nSettings and mappings specified directly in the index template or the create index request override any settings or mappings specified in a component template.\n\nComponent templates are only used during index creation.\nFor data streams, this includes data stream creation and the creation of a stream’s backing indices.\nChanges to component templates do not affect existing indices, including a stream’s backing indices.\n\nYou can use C-style `/* *\\/` block comments in component templates.\nYou can include comments anywhere in the request body except before the opening curly bracket.\n\n**Applying component templates**\n\nYou cannot directly apply a component template to a data stream or index.\nTo be applied, a component template must be included in an index template's `composed_of` list.", "operationId": "cluster-put-component-template", "parameters": [ { @@ -1543,7 +1543,7 @@ "indices" ], "summary": "Create or update a component template", - "description": "Creates or updates a component template.\nComponent templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.\n\nAn index template can be composed of multiple component templates.\nTo use a component template, specify it in an index template’s `composed_of` list.\nComponent templates are only applied to new data streams and indices as part of a matching index template.\n\nSettings and mappings specified directly in the index template or the create index request override any settings or mappings specified in a component template.\n\nComponent templates are only used during index creation.\nFor data streams, this includes data stream creation and the creation of a stream’s backing indices.\nChanges to component templates do not affect existing indices, including a stream’s backing indices.\n\nYou can use C-style `/* *\\/` block comments in component templates.\nYou can include comments anywhere in the request body except before the opening curly bracket.", + "description": "Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.\n\nAn index template can be composed of multiple component templates.\nTo use a component template, specify it in an index template’s `composed_of` list.\nComponent templates are only applied to new data streams and indices as part of a matching index template.\n\nSettings and mappings specified directly in the index template or the create index request override any settings or mappings specified in a component template.\n\nComponent templates are only used during index creation.\nFor data streams, this includes data stream creation and the creation of a stream’s backing indices.\nChanges to component templates do not affect existing indices, including a stream’s backing indices.\n\nYou can use C-style `/* *\\/` block comments in component templates.\nYou can include comments anywhere in the request body except before the opening curly bracket.\n\n**Applying component templates**\n\nYou cannot directly apply a component template to a data stream or index.\nTo be applied, a component template must be included in an index template's `composed_of` list.", "operationId": "cluster-put-component-template-1", "parameters": [ { @@ -1571,7 +1571,7 @@ "indices" ], "summary": "Delete component templates", - "description": "Deletes component templates.\nComponent templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.", + "description": "Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.", "operationId": "cluster-delete-component-template", "parameters": [ { @@ -1677,7 +1677,7 @@ "indices" ], "summary": "Get component templates", - "description": "Retrieves information about component templates.", + "description": "Get information about component templates.", "operationId": "cluster-get-component-template", "parameters": [ { @@ -5961,7 +5961,10 @@ "indices" ], "summary": "Get tokens from text analysis", - "description": "The analyze API performs [analysis](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis.html) on a text string and returns the resulting tokens.", + "description": "The analyze API performs analysis on a text string and returns the resulting tokens.\n\nGenerating excessive amount of tokens may cause a node to run out of memory.\nThe `index.analyze.max_token_count` setting enables you to limit the number of tokens that can be produced.\nIf more than this limit of tokens gets generated, an error occurs.\nThe `_analyze` endpoint without a specified index will always use `10000` as its limit.", + "externalDocs": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis.html" + }, "operationId": "indices-analyze", "requestBody": { "$ref": "#/components/requestBodies/indices.analyze" @@ -5977,7 +5980,10 @@ "indices" ], "summary": "Get tokens from text analysis", - "description": "The analyze API performs [analysis](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis.html) on a text string and returns the resulting tokens.", + "description": "The analyze API performs analysis on a text string and returns the resulting tokens.\n\nGenerating excessive amount of tokens may cause a node to run out of memory.\nThe `index.analyze.max_token_count` setting enables you to limit the number of tokens that can be produced.\nIf more than this limit of tokens gets generated, an error occurs.\nThe `_analyze` endpoint without a specified index will always use `10000` as its limit.", + "externalDocs": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis.html" + }, "operationId": "indices-analyze-1", "requestBody": { "$ref": "#/components/requestBodies/indices.analyze" @@ -5995,7 +6001,10 @@ "indices" ], "summary": "Get tokens from text analysis", - "description": "The analyze API performs [analysis](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis.html) on a text string and returns the resulting tokens.", + "description": "The analyze API performs analysis on a text string and returns the resulting tokens.\n\nGenerating excessive amount of tokens may cause a node to run out of memory.\nThe `index.analyze.max_token_count` setting enables you to limit the number of tokens that can be produced.\nIf more than this limit of tokens gets generated, an error occurs.\nThe `_analyze` endpoint without a specified index will always use `10000` as its limit.", + "externalDocs": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis.html" + }, "operationId": "indices-analyze-2", "parameters": [ { @@ -6016,7 +6025,10 @@ "indices" ], "summary": "Get tokens from text analysis", - "description": "The analyze API performs [analysis](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis.html) on a text string and returns the resulting tokens.", + "description": "The analyze API performs analysis on a text string and returns the resulting tokens.\n\nGenerating excessive amount of tokens may cause a node to run out of memory.\nThe `index.analyze.max_token_count` setting enables you to limit the number of tokens that can be produced.\nIf more than this limit of tokens gets generated, an error occurs.\nThe `_analyze` endpoint without a specified index will always use `10000` as its limit.", + "externalDocs": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis.html" + }, "operationId": "indices-analyze-3", "parameters": [ { @@ -6039,7 +6051,7 @@ "indices" ], "summary": "Get index information", - "description": "Returns information about one or more indices. For data streams, the API returns information about the\nstream’s backing indices.", + "description": "Get information about one or more indices. For data streams, the API returns information about the\nstream’s backing indices.", "operationId": "indices-get", "parameters": [ { @@ -6155,7 +6167,7 @@ "indices" ], "summary": "Create an index", - "description": "Creates a new index.", + "description": "You can use the create index API to add a new index to an Elasticsearch cluster.\nWhen creating an index, you can specify the following:\n\n* Settings for the index.\n* Mappings for fields in the index.\n* Index aliases\n\n**Wait for active shards**\n\nBy default, index creation will only return a response to the client when the primary copies of each shard have been started, or the request times out.\nThe index creation response will indicate what happened.\nFor example, `acknowledged` indicates whether the index was successfully created in the cluster, `while shards_acknowledged` indicates whether the requisite number of shard copies were started for each shard in the index before timing out.\nNote that it is still possible for either `acknowledged` or `shards_acknowledged` to be `false`, but for the index creation to be successful.\nThese values simply indicate whether the operation completed before the timeout.\nIf `acknowledged` is false, the request timed out before the cluster state was updated with the newly created index, but it probably will be created sometime soon.\nIf `shards_acknowledged` is false, then the request timed out before the requisite number of shards were started (by default just the primaries), even if the cluster state was successfully updated to reflect the newly created index (that is to say, `acknowledged` is `true`).\n\nYou can change the default of only waiting for the primary shards to start through the index setting `index.write.wait_for_active_shards`.\nNote that changing this setting will also affect the `wait_for_active_shards` value on all subsequent write operations.", "operationId": "indices-create", "parameters": [ { @@ -6258,7 +6270,7 @@ "indices" ], "summary": "Delete indices", - "description": "Deletes one or more indices.", + "description": "Deleting an index deletes its documents, shards, and metadata.\nIt does not delete related Kibana components, such as data views, visualizations, or dashboards.\n\nYou cannot delete the current write index of a data stream.\nTo delete the index, you must roll over the data stream so a new write index is created.\nYou can then use the delete index API to delete the previous write index.", "operationId": "indices-delete", "parameters": [ { @@ -6341,7 +6353,7 @@ "indices" ], "summary": "Check indices", - "description": "Checks if one or more indices, index aliases, or data streams exist.", + "description": "Check if one or more indices, index aliases, or data streams exist.", "operationId": "indices-exists", "parameters": [ { @@ -6817,7 +6829,7 @@ "indices" ], "summary": "Get index templates", - "description": "Returns information about one or more index templates.", + "description": "Get information about one or more index templates.", "operationId": "indices-get-index-template-1", "parameters": [ { @@ -6848,7 +6860,7 @@ "indices" ], "summary": "Create or update an index template", - "description": "Index templates define settings, mappings, and aliases that can be applied automatically to new indices.", + "description": "Index templates define settings, mappings, and aliases that can be applied automatically to new indices.\n\nElasticsearch applies templates to new indices based on an wildcard pattern that matches the index name.\nIndex templates are applied during data stream or index creation.\nFor data streams, these settings and mappings are applied when the stream's backing indices are created.\nSettings and mappings specified in a create index API request override any settings or mappings specified in an index template.\nChanges to index templates do not affect existing indices, including the existing backing indices of a data stream.\n\nYou can use C-style `/* *\\/` block comments in index templates.\nYou can include comments anywhere in the request body, except before the opening curly bracket.\n\n**Multiple matching templates**\n\nIf multiple index templates match the name of a new index or data stream, the template with the highest priority is used.\n\nMultiple templates with overlapping index patterns at the same priority are not allowed and an error will be thrown when attempting to create a template matching an existing index template at identical priorities.\n\n**Composing aliases, mappings, and settings**\n\nWhen multiple component templates are specified in the `composed_of` field for an index template, they are merged in the order specified, meaning that later component templates override earlier component templates.\nAny mappings, settings, or aliases from the parent index template are merged in next.\nFinally, any configuration on the index request itself is merged.\nMapping definitions are merged recursively, which means that later mapping components can introduce new field mappings and update the mapping configuration.\nIf a field mapping is already contained in an earlier component, its definition will be completely overwritten by the later one.\nThis recursive merging strategy applies not only to field mappings, but also root options like `dynamic_templates` and `meta`.\nIf an earlier component contains a `dynamic_templates` block, then by default new `dynamic_templates` entries are appended onto the end.\nIf an entry already exists with the same key, then it is overwritten by the new definition.", "operationId": "indices-put-index-template", "parameters": [ { @@ -6879,7 +6891,7 @@ "indices" ], "summary": "Create or update an index template", - "description": "Index templates define settings, mappings, and aliases that can be applied automatically to new indices.", + "description": "Index templates define settings, mappings, and aliases that can be applied automatically to new indices.\n\nElasticsearch applies templates to new indices based on an wildcard pattern that matches the index name.\nIndex templates are applied during data stream or index creation.\nFor data streams, these settings and mappings are applied when the stream's backing indices are created.\nSettings and mappings specified in a create index API request override any settings or mappings specified in an index template.\nChanges to index templates do not affect existing indices, including the existing backing indices of a data stream.\n\nYou can use C-style `/* *\\/` block comments in index templates.\nYou can include comments anywhere in the request body, except before the opening curly bracket.\n\n**Multiple matching templates**\n\nIf multiple index templates match the name of a new index or data stream, the template with the highest priority is used.\n\nMultiple templates with overlapping index patterns at the same priority are not allowed and an error will be thrown when attempting to create a template matching an existing index template at identical priorities.\n\n**Composing aliases, mappings, and settings**\n\nWhen multiple component templates are specified in the `composed_of` field for an index template, they are merged in the order specified, meaning that later component templates override earlier component templates.\nAny mappings, settings, or aliases from the parent index template are merged in next.\nFinally, any configuration on the index request itself is merged.\nMapping definitions are merged recursively, which means that later mapping components can introduce new field mappings and update the mapping configuration.\nIf a field mapping is already contained in an earlier component, its definition will be completely overwritten by the later one.\nThis recursive merging strategy applies not only to field mappings, but also root options like `dynamic_templates` and `meta`.\nIf an earlier component contains a `dynamic_templates` block, then by default new `dynamic_templates` entries are appended onto the end.\nIf an entry already exists with the same key, then it is overwritten by the new definition.", "operationId": "indices-put-index-template-1", "parameters": [ { @@ -7376,7 +7388,7 @@ "indices" ], "summary": "Get index templates", - "description": "Returns information about one or more index templates.", + "description": "Get information about one or more index templates.", "operationId": "indices-get-index-template", "parameters": [ { @@ -7406,7 +7418,7 @@ "indices" ], "summary": "Get mapping definitions", - "description": "Retrieves mapping definitions for one or more indices.\nFor data streams, the API retrieves mappings for the stream’s backing indices.", + "description": "For data streams, the API retrieves mappings for the stream’s backing indices.", "operationId": "indices-get-mapping", "parameters": [ { @@ -7438,7 +7450,7 @@ "indices" ], "summary": "Get mapping definitions", - "description": "Retrieves mapping definitions for one or more indices.\nFor data streams, the API retrieves mappings for the stream’s backing indices.", + "description": "For data streams, the API retrieves mappings for the stream’s backing indices.", "operationId": "indices-get-mapping-1", "parameters": [ { @@ -7471,7 +7483,7 @@ "indices" ], "summary": "Update field mappings", - "description": "Adds new fields to an existing data stream or index.\nYou can also use this API to change the search settings of existing fields.\nFor data streams, these changes are applied to all backing indices by default.", + "description": "Add new fields to an existing data stream or index.\nYou can also use this API to change the search settings of existing fields.\nFor data streams, these changes are applied to all backing indices by default.", "operationId": "indices-put-mapping", "parameters": [ { @@ -7510,7 +7522,7 @@ "indices" ], "summary": "Update field mappings", - "description": "Adds new fields to an existing data stream or index.\nYou can also use this API to change the search settings of existing fields.\nFor data streams, these changes are applied to all backing indices by default.", + "description": "Add new fields to an existing data stream or index.\nYou can also use this API to change the search settings of existing fields.\nFor data streams, these changes are applied to all backing indices by default.", "operationId": "indices-put-mapping-1", "parameters": [ { @@ -7551,7 +7563,7 @@ "indices" ], "summary": "Get index settings", - "description": "Returns setting information for one or more indices. For data streams,\nreturns setting information for the stream’s backing indices.", + "description": "Get setting information for one or more indices.\nFor data streams, it returns setting information for the stream's backing indices.", "operationId": "indices-get-settings", "parameters": [ { @@ -7628,7 +7640,7 @@ "indices" ], "summary": "Get index settings", - "description": "Returns setting information for one or more indices. For data streams,\nreturns setting information for the stream’s backing indices.", + "description": "Get setting information for one or more indices.\nFor data streams, it returns setting information for the stream's backing indices.", "operationId": "indices-get-settings-1", "parameters": [ { @@ -7711,7 +7723,7 @@ "indices" ], "summary": "Get index settings", - "description": "Returns setting information for one or more indices. For data streams,\nreturns setting information for the stream’s backing indices.", + "description": "Get setting information for one or more indices.\nFor data streams, it returns setting information for the stream's backing indices.", "operationId": "indices-get-settings-2", "parameters": [ { @@ -7755,7 +7767,7 @@ "indices" ], "summary": "Get index settings", - "description": "Returns setting information for one or more indices. For data streams,\nreturns setting information for the stream’s backing indices.", + "description": "Get setting information for one or more indices.\nFor data streams, it returns setting information for the stream's backing indices.", "operationId": "indices-get-settings-3", "parameters": [ { @@ -7897,7 +7909,7 @@ "indices" ], "summary": "Refresh an index", - "description": "A refresh makes recent operations performed on one or more indices available for search.\nFor data streams, the API runs the refresh operation on the stream’s backing indices.", + "description": "A refresh makes recent operations performed on one or more indices available for search.\nFor data streams, the API runs the refresh operation on the stream’s backing indices.\n\nBy default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds.\nYou can change this default interval with the `index.refresh_interval` setting.\n\nRefresh requests are synchronous and do not return a response until the refresh operation completes.\n\nRefreshes are resource-intensive.\nTo ensure good cluster performance, it's recommended to wait for Elasticsearch's periodic refresh rather than performing an explicit refresh when possible.\n\nIf your application workflow indexes documents and then runs a search to retrieve the indexed document, it's recommended to use the index API's `refresh=wait_for` query parameter option.\nThis option ensures the indexing operation waits for a periodic refresh before running the search.", "operationId": "indices-refresh-1", "parameters": [ { @@ -7921,7 +7933,7 @@ "indices" ], "summary": "Refresh an index", - "description": "A refresh makes recent operations performed on one or more indices available for search.\nFor data streams, the API runs the refresh operation on the stream’s backing indices.", + "description": "A refresh makes recent operations performed on one or more indices available for search.\nFor data streams, the API runs the refresh operation on the stream’s backing indices.\n\nBy default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds.\nYou can change this default interval with the `index.refresh_interval` setting.\n\nRefresh requests are synchronous and do not return a response until the refresh operation completes.\n\nRefreshes are resource-intensive.\nTo ensure good cluster performance, it's recommended to wait for Elasticsearch's periodic refresh rather than performing an explicit refresh when possible.\n\nIf your application workflow indexes documents and then runs a search to retrieve the indexed document, it's recommended to use the index API's `refresh=wait_for` query parameter option.\nThis option ensures the indexing operation waits for a periodic refresh before running the search.", "operationId": "indices-refresh", "parameters": [ { @@ -7947,7 +7959,7 @@ "indices" ], "summary": "Refresh an index", - "description": "A refresh makes recent operations performed on one or more indices available for search.\nFor data streams, the API runs the refresh operation on the stream’s backing indices.", + "description": "A refresh makes recent operations performed on one or more indices available for search.\nFor data streams, the API runs the refresh operation on the stream’s backing indices.\n\nBy default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds.\nYou can change this default interval with the `index.refresh_interval` setting.\n\nRefresh requests are synchronous and do not return a response until the refresh operation completes.\n\nRefreshes are resource-intensive.\nTo ensure good cluster performance, it's recommended to wait for Elasticsearch's periodic refresh rather than performing an explicit refresh when possible.\n\nIf your application workflow indexes documents and then runs a search to retrieve the indexed document, it's recommended to use the index API's `refresh=wait_for` query parameter option.\nThis option ensures the indexing operation waits for a periodic refresh before running the search.", "operationId": "indices-refresh-3", "parameters": [ { @@ -7974,7 +7986,7 @@ "indices" ], "summary": "Refresh an index", - "description": "A refresh makes recent operations performed on one or more indices available for search.\nFor data streams, the API runs the refresh operation on the stream’s backing indices.", + "description": "A refresh makes recent operations performed on one or more indices available for search.\nFor data streams, the API runs the refresh operation on the stream’s backing indices.\n\nBy default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds.\nYou can change this default interval with the `index.refresh_interval` setting.\n\nRefresh requests are synchronous and do not return a response until the refresh operation completes.\n\nRefreshes are resource-intensive.\nTo ensure good cluster performance, it's recommended to wait for Elasticsearch's periodic refresh rather than performing an explicit refresh when possible.\n\nIf your application workflow indexes documents and then runs a search to retrieve the indexed document, it's recommended to use the index API's `refresh=wait_for` query parameter option.\nThis option ensures the indexing operation waits for a periodic refresh before running the search.", "operationId": "indices-refresh-2", "parameters": [ { @@ -8094,7 +8106,7 @@ "indices" ], "summary": "Roll over to a new index", - "description": "Creates a new index for a data stream or index alias.", + "description": "TIP: It is recommended to use the index lifecycle rollover action to automate rollovers.\n\nThe rollover API creates a new index for a data stream or index alias.\nThe API behavior depends on the rollover target.\n\n**Roll over a data stream**\n\nIf you roll over a data stream, the API creates a new write index for the stream.\nThe stream's previous write index becomes a regular backing index.\nA rollover also increments the data stream's generation.\n\n**Roll over an index alias with a write index**\n\nTIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a write index to manage time series data.\nData streams replace this functionality, require less maintenance, and automatically integrate with data tiers.\n\nIf an index alias points to multiple indices, one of the indices must be a write index.\nThe rollover API creates a new write index for the alias with `is_write_index` set to `true`.\nThe API also `sets is_write_index` to `false` for the previous write index.\n\n**Roll over an index alias with one index**\n\nIf you roll over an index alias that points to only one index, the API creates a new index for the alias and removes the original index from the alias.\n\nNOTE: A rollover creates a new index and is subject to the `wait_for_active_shards` setting.\n\n**Increment index names for an alias**\n\nWhen you roll over an index alias, you can specify a name for the new index.\nIf you don't specify a name and the current index ends with `-` and a number, such as `my-index-000001` or `my-index-3`, the new index name increments that number.\nFor example, if you roll over an alias with a current index of `my-index-000001`, the rollover creates a new index named `my-index-000002`.\nThis number is always six characters and zero-padded, regardless of the previous index's name.\n\nIf you use an index alias for time series data, you can use date math in the index name to track the rollover date.\nFor example, you can create an alias that points to an index named ``.\nIf you create the index on May 6, 2099, the index's name is `my-index-2099.05.06-000001`.\nIf you roll over the alias on May 7, 2099, the new index's name is `my-index-2099.05.07-000002`.", "operationId": "indices-rollover", "parameters": [ { @@ -8130,7 +8142,7 @@ "indices" ], "summary": "Roll over to a new index", - "description": "Creates a new index for a data stream or index alias.", + "description": "TIP: It is recommended to use the index lifecycle rollover action to automate rollovers.\n\nThe rollover API creates a new index for a data stream or index alias.\nThe API behavior depends on the rollover target.\n\n**Roll over a data stream**\n\nIf you roll over a data stream, the API creates a new write index for the stream.\nThe stream's previous write index becomes a regular backing index.\nA rollover also increments the data stream's generation.\n\n**Roll over an index alias with a write index**\n\nTIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a write index to manage time series data.\nData streams replace this functionality, require less maintenance, and automatically integrate with data tiers.\n\nIf an index alias points to multiple indices, one of the indices must be a write index.\nThe rollover API creates a new write index for the alias with `is_write_index` set to `true`.\nThe API also `sets is_write_index` to `false` for the previous write index.\n\n**Roll over an index alias with one index**\n\nIf you roll over an index alias that points to only one index, the API creates a new index for the alias and removes the original index from the alias.\n\nNOTE: A rollover creates a new index and is subject to the `wait_for_active_shards` setting.\n\n**Increment index names for an alias**\n\nWhen you roll over an index alias, you can specify a name for the new index.\nIf you don't specify a name and the current index ends with `-` and a number, such as `my-index-000001` or `my-index-3`, the new index name increments that number.\nFor example, if you roll over an alias with a current index of `my-index-000001`, the rollover creates a new index named `my-index-000002`.\nThis number is always six characters and zero-padded, regardless of the previous index's name.\n\nIf you use an index alias for time series data, you can use date math in the index name to track the rollover date.\nFor example, you can create an alias that points to an index named ``.\nIf you create the index on May 6, 2099, the index's name is `my-index-2099.05.06-000001`.\nIf you roll over the alias on May 7, 2099, the new index's name is `my-index-2099.05.07-000002`.", "operationId": "indices-rollover-1", "parameters": [ { @@ -8169,7 +8181,7 @@ "indices" ], "summary": "Simulate an index", - "description": "Returns the index configuration that would be applied to the specified index from an existing index template.", + "description": "Get the index configuration that would be applied to the specified index from an existing index template.", "operationId": "indices-simulate-index-template", "parameters": [ { diff --git a/output/schema/schema.json b/output/schema/schema.json index 32be7def8b..24469effe9 100644 --- a/output/schema/schema.json +++ b/output/schema/schema.json @@ -2306,7 +2306,7 @@ "stability": "stable" } }, - "description": "Delete component templates.\nDeletes component templates.\nComponent templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.", + "description": "Delete component templates.\nComponent templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.", "docId": "indices-component-template", "docTag": "indices", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-component-template.html", @@ -2419,7 +2419,7 @@ "stability": "stable" } }, - "description": "Get component templates.\nRetrieves information about component templates.", + "description": "Get component templates.\nGet information about component templates.", "docId": "indices-component-template", "docTag": "indices", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-component-template.html", @@ -2666,7 +2666,7 @@ "stability": "stable" } }, - "description": "Create or update a component template.\nCreates or updates a component template.\nComponent templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.\n\nAn index template can be composed of multiple component templates.\nTo use a component template, specify it in an index template’s `composed_of` list.\nComponent templates are only applied to new data streams and indices as part of a matching index template.\n\nSettings and mappings specified directly in the index template or the create index request override any settings or mappings specified in a component template.\n\nComponent templates are only used during index creation.\nFor data streams, this includes data stream creation and the creation of a stream’s backing indices.\nChanges to component templates do not affect existing indices, including a stream’s backing indices.\n\nYou can use C-style `/* *\\/` block comments in component templates.\nYou can include comments anywhere in the request body except before the opening curly bracket.", + "description": "Create or update a component template.\nComponent templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.\n\nAn index template can be composed of multiple component templates.\nTo use a component template, specify it in an index template’s `composed_of` list.\nComponent templates are only applied to new data streams and indices as part of a matching index template.\n\nSettings and mappings specified directly in the index template or the create index request override any settings or mappings specified in a component template.\n\nComponent templates are only used during index creation.\nFor data streams, this includes data stream creation and the creation of a stream’s backing indices.\nChanges to component templates do not affect existing indices, including a stream’s backing indices.\n\nYou can use C-style `/* *\\/` block comments in component templates.\nYou can include comments anywhere in the request body except before the opening curly bracket.\n\n**Applying component templates**\n\nYou cannot directly apply a component template to a data stream or index.\nTo be applied, a component template must be included in an index template's `composed_of` list.", "docId": "indices-component-template", "docTag": "indices", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-component-template.html", @@ -4227,10 +4227,16 @@ "stability": "stable" } }, - "description": "Delete a dangling index.\n\nIf Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling.\nFor example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline.", + "description": "Delete a dangling index.\nIf Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling.\nFor example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline.", + "docId": "dangling-index-delete", "docTag": "indices", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-gateway-dangling-indices.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/dangling-index-delete.html", "name": "dangling_indices.delete_dangling_index", + "privileges": { + "cluster": [ + "manage" + ] + }, "request": { "name": "Request", "namespace": "dangling_indices.delete_dangling_index" @@ -4260,9 +4266,15 @@ } }, "description": "Import a dangling index.\n\nIf Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling.\nFor example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline.", + "docId": "dangling-index-import", "docTag": "indices", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-gateway-dangling-indices.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/dangling-index-import.html", "name": "dangling_indices.import_dangling_index", + "privileges": { + "cluster": [ + "manage" + ] + }, "request": { "name": "Request", "namespace": "dangling_indices.import_dangling_index" @@ -4292,9 +4304,15 @@ } }, "description": "Get the dangling indices.\n\nIf Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling.\nFor example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline.\n\nUse this API to list dangling indices, which you can then import or delete.", + "docId": "dangling-indices-list", "docTag": "indices", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-gateway-dangling-indices.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/dangling-indices-list.html", "name": "dangling_indices.list_dangling_indices", + "privileges": { + "cluster": [ + "manage" + ] + }, "request": { "name": "Request", "namespace": "dangling_indices.list_dangling_indices" @@ -5081,8 +5099,7 @@ "privileges": { "index": [ "view_index_metadata", - "read", - "manage" + "read" ] }, "request": { @@ -6079,10 +6096,17 @@ "stability": "stable" } }, - "description": "Get tokens from text analysis.\nThe analyze API performs [analysis](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis.html) on a text string and returns the resulting tokens.", + "description": "Get tokens from text analysis.\nThe analyze API performs analysis on a text string and returns the resulting tokens.\n\nGenerating excessive amount of tokens may cause a node to run out of memory.\nThe `index.analyze.max_token_count` setting enables you to limit the number of tokens that can be produced.\nIf more than this limit of tokens gets generated, an error occurs.\nThe `_analyze` endpoint without a specified index will always use `10000` as its limit.", "docId": "indices-analyze", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-analyze.html", + "extDocId": "analysis", + "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/analysis.html", "name": "indices.analyze", + "privileges": { + "index": [ + "index" + ] + }, "request": { "name": "Request", "namespace": "indices.analyze" @@ -6125,9 +6149,14 @@ "stability": "stable" } }, - "description": "Clear the cache.\nClear the cache of one or more indices.\nFor data streams, the API clears the caches of the stream's backing indices.", + "description": "Clear the cache.\nClear the cache of one or more indices.\nFor data streams, the API clears the caches of the stream's backing indices.\n\nBy default, the clear cache API clears all caches.\nTo clear only specific caches, use the `fielddata`, `query`, or `request` parameters.\nTo clear the cache only of specific fields, use the `fields` parameter.", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-clearcache.html", "name": "indices.clear_cache", + "privileges": { + "index": [ + "manage" + ] + }, "request": { "name": "Request", "namespace": "indices.clear_cache" @@ -6162,9 +6191,14 @@ "stability": "stable" } }, - "description": "Clone an index.\nClone an existing index into a new index.\nEach original primary shard is cloned into a new primary shard in the new index.\n\nIMPORTANT: Elasticsearch does not apply index templates to the resulting index.\nThe API also does not copy index metadata from the original index.\nIndex metadata includes aliases, index lifecycle management phase definitions, and cross-cluster replication (CCR) follower information.\nFor example, if you clone a CCR follower index, the resulting clone will not be a follower index.\n\nThe clone API copies most index settings from the source index to the resulting index, with the exception of `index.number_of_replicas` and `index.auto_expand_replicas`.\nTo set the number of replicas in the resulting index, configure these settings in the clone request.\n\nCloning works as follows:\n\n* First, it creates a new target index with the same definition as the source index.\n* Then it hard-links segments from the source index into the target index. If the file system does not support hard-linking, all segments are copied into the new index, which is a much more time consuming process.\n* Finally, it recovers the target index as though it were a closed index which had just been re-opened.\n\nIMPORTANT: Indices can only be cloned if they meet the following requirements:\n\n* The target index must not exist.\n* The source index must have the same number of primary shards as the target index.\n* The node handling the clone process must have sufficient free disk space to accommodate a second copy of the existing index.", + "description": "Clone an index.\nClone an existing index into a new index.\nEach original primary shard is cloned into a new primary shard in the new index.\n\nIMPORTANT: Elasticsearch does not apply index templates to the resulting index.\nThe API also does not copy index metadata from the original index.\nIndex metadata includes aliases, index lifecycle management phase definitions, and cross-cluster replication (CCR) follower information.\nFor example, if you clone a CCR follower index, the resulting clone will not be a follower index.\n\nThe clone API copies most index settings from the source index to the resulting index, with the exception of `index.number_of_replicas` and `index.auto_expand_replicas`.\nTo set the number of replicas in the resulting index, configure these settings in the clone request.\n\nCloning works as follows:\n\n* First, it creates a new target index with the same definition as the source index.\n* Then it hard-links segments from the source index into the target index. If the file system does not support hard-linking, all segments are copied into the new index, which is a much more time consuming process.\n* Finally, it recovers the target index as though it were a closed index which had just been re-opened.\n\nIMPORTANT: Indices can only be cloned if they meet the following requirements:\n\n* The index must be marked as read-only and have a cluster health status of green.\n* The target index must not exist.\n* The source index must have the same number of primary shards as the target index.\n* The node handling the clone process must have sufficient free disk space to accommodate a second copy of the existing index.\n\nThe current write index on a data stream cannot be cloned.\nIn order to clone the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be cloned.\n\nNOTE: Mappings cannot be specified in the `_clone` request. The mappings of the source index will be used for the target index.\n\n**Monitor the cloning process**\n\nThe cloning process can be monitored with the cat recovery API or the cluster health API can be used to wait until all primary shards have been allocated by setting the `wait_for_status` parameter to `yellow`.\n\nThe `_clone` API returns as soon as the target index has been added to the cluster state, before any shards have been allocated.\nAt this point, all shards are in the state unassigned.\nIf, for any reason, the target index can't be allocated, its primary shard will remain unassigned until it can be allocated on that node.\n\nOnce the primary shard is allocated, it moves to state initializing, and the clone process begins.\nWhen the clone operation completes, the shard will become active.\nAt that point, Elasticsearch will try to allocate any replicas and may decide to relocate the primary shard to another node.\n\n**Wait for active shards**\n\nBecause the clone operation creates a new index to clone the shards to, the wait for active shards setting on index creation applies to the clone index action as well.", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-clone-index.html", "name": "indices.clone", + "privileges": { + "index": [ + "manage" + ] + }, "request": { "name": "Request", "namespace": "indices.clone" @@ -6204,6 +6238,11 @@ "docId": "indices-close", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-close.html", "name": "indices.close", + "privileges": { + "index": [ + "manage" + ] + }, "request": { "name": "Request", "namespace": "indices.close" @@ -6235,7 +6274,7 @@ "stability": "stable" } }, - "description": "Create an index.\nCreates a new index.", + "description": "Create an index.\nYou can use the create index API to add a new index to an Elasticsearch cluster.\nWhen creating an index, you can specify the following:\n\n* Settings for the index.\n* Mappings for fields in the index.\n* Index aliases\n\n**Wait for active shards**\n\nBy default, index creation will only return a response to the client when the primary copies of each shard have been started, or the request times out.\nThe index creation response will indicate what happened.\nFor example, `acknowledged` indicates whether the index was successfully created in the cluster, `while shards_acknowledged` indicates whether the requisite number of shard copies were started for each shard in the index before timing out.\nNote that it is still possible for either `acknowledged` or `shards_acknowledged` to be `false`, but for the index creation to be successful.\nThese values simply indicate whether the operation completed before the timeout.\nIf `acknowledged` is false, the request timed out before the cluster state was updated with the newly created index, but it probably will be created sometime soon.\nIf `shards_acknowledged` is false, then the request timed out before the requisite number of shards were started (by default just the primaries), even if the cluster state was successfully updated to reflect the newly created index (that is to say, `acknowledged` is `true`).\n\nYou can change the default of only waiting for the primary shards to start through the index setting `index.write.wait_for_active_shards`.\nNote that changing this setting will also affect the `wait_for_active_shards` value on all subsequent write operations.", "docId": "indices-create-index", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-create-index.html", "name": "indices.create", @@ -6367,9 +6406,15 @@ "stability": "stable" } }, - "description": "Delete indices.\nDeletes one or more indices.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-delete-index.html", + "description": "Delete indices.\nDeleting an index deletes its documents, shards, and metadata.\nIt does not delete related Kibana components, such as data views, visualizations, or dashboards.\n\nYou cannot delete the current write index of a data stream.\nTo delete the index, you must roll over the data stream so a new write index is created.\nYou can then use the delete index API to delete the previous write index.", + "docId": "indices-delete-index", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-delete-index.html", "name": "indices.delete", + "privileges": { + "index": [ + "delete_index" + ] + }, "request": { "name": "Request", "namespace": "indices.delete" @@ -6402,8 +6447,14 @@ } }, "description": "Delete an alias.\nRemoves a data stream or index from an alias.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html", + "docId": "indices-delete-alias", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-delete-alias.html", "name": "indices.delete_alias", + "privileges": { + "index": [ + "manage" + ] + }, "request": { "name": "Request", "namespace": "indices.delete_alias" @@ -6519,12 +6570,12 @@ } }, "description": "Delete an index template.\nThe provided may contain multiple template names separated by a comma. If multiple template\nnames are specified then there is no wildcard support and the provided names should match completely with\nexisting templates.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-delete-template.html", + "docId": "indices-delete-template", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-delete-template.html", "name": "indices.delete_index_template", "privileges": { "cluster": [ - "manage_index_templates", - "manage" + "manage_index_templates" ] }, "request": { @@ -6554,13 +6605,13 @@ "stability": "stable" } }, - "description": "Deletes a legacy index template.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-delete-template-v1.html", + "description": "Delete a legacy index template.", + "docId": "indices-delete-template-v1", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-delete-template-v1.html", "name": "indices.delete_template", "privileges": { "cluster": [ - "manage_index_templates", - "manage" + "manage_index_templates" ] }, "request": { @@ -6595,7 +6646,7 @@ "stability": "experimental" } }, - "description": "Analyze the index disk usage.\nAnalyze the disk usage of each field of an index or data stream.\nThis API might not support indices created in previous Elasticsearch versions.\nThe result of a small index can be inaccurate as some parts of an index might not be analyzed by the API.", + "description": "Analyze the index disk usage.\nAnalyze the disk usage of each field of an index or data stream.\nThis API might not support indices created in previous Elasticsearch versions.\nThe result of a small index can be inaccurate as some parts of an index might not be analyzed by the API.\n\nNOTE: The total size of fields of the analyzed shards of the index in the response is usually smaller than the index `store_size` value because some small metadata files are ignored and some parts of data files might not be scanned by the API.\nSince stored fields are stored together in a compressed format, the sizes of stored fields are also estimates and can be inaccurate.\nThe stored size of the `_id` field is likely underestimated while the `_source` field is overestimated.", "docId": "indices-disk-usage", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-disk-usage.html", "name": "indices.disk_usage", @@ -6670,8 +6721,9 @@ "stability": "stable" } }, - "description": "Check indices.\nChecks if one or more indices, index aliases, or data streams exist.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-exists.html", + "description": "Check indices.\nCheck if one or more indices, index aliases, or data streams exist.", + "docId": "indices-exists", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-exists.html", "name": "indices.exists", "request": { "name": "Request", @@ -6774,9 +6826,17 @@ "stability": "stable" } }, - "description": "Check existence of index templates.\nReturns information about whether a particular index template exists.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-template-exists-v1.html", + "description": "Check existence of index templates.\nGet information about whether index templates exist.\nIndex templates define settings, mappings, and aliases that can be applied automatically to new indices.\n\nIMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.", + "docId": "indices-template-exists-v1", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-template-exists-v1.html", + "extDocId": "index-templates", + "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/index-templates.html", "name": "indices.exists_template", + "privileges": { + "cluster": [ + "manage_index_templates" + ] + }, "request": { "name": "Request", "namespace": "indices.exists_template" @@ -6845,8 +6905,9 @@ "stability": "experimental" } }, - "description": "Get field usage stats.\nGet field usage information for each shard and field of an index.\nField usage statistics are automatically captured when queries are running on a cluster.\nA shard-level search request that accesses a given field, even if multiple times during that request, is counted as a single use.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/field-usage-stats.html", + "description": "Get field usage stats.\nGet field usage information for each shard and field of an index.\nField usage statistics are automatically captured when queries are running on a cluster.\nA shard-level search request that accesses a given field, even if multiple times during that request, is counted as a single use.\n\nThe response body reports the per-shard usage count of the data structures that back the fields in the index.\nA given request will increment each count by a maximum value of 1, even if the request accesses the same field multiple times.", + "docId": "field-usage-stats", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/field-usage-stats.html", "name": "indices.field_usage_stats", "privileges": { "index": [ @@ -6888,6 +6949,11 @@ "docId": "indices-flush", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-flush.html", "name": "indices.flush", + "privileges": { + "index": [ + "maintenance" + ] + }, "request": { "name": "Request", "namespace": "indices.flush" @@ -6928,11 +6994,17 @@ "stability": "stable" } }, - "description": "Force a merge.\nPerform the force merge operation on the shards of one or more indices.\nFor data streams, the API forces a merge on the shards of the stream's backing indices.\n\nMerging reduces the number of segments in each shard by merging some of them together and also frees up the space used by deleted documents.\nMerging normally happens automatically, but sometimes it is useful to trigger a merge manually.\n\nWARNING: We recommend force merging only a read-only index (meaning the index is no longer receiving writes).\nWhen documents are updated or deleted, the old version is not immediately removed but instead soft-deleted and marked with a \"tombstone\".\nThese soft-deleted documents are automatically cleaned up during regular segment merges.\nBut force merge can cause very large (greater than 5 GB) segments to be produced, which are not eligible for regular merges.\nSo the number of soft-deleted documents can then grow rapidly, resulting in higher disk usage and worse search performance.\nIf you regularly force merge an index receiving writes, this can also make snapshots more expensive, since the new documents can't be backed up incrementally.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-forcemerge.html", + "description": "Force a merge.\nPerform the force merge operation on the shards of one or more indices.\nFor data streams, the API forces a merge on the shards of the stream's backing indices.\n\nMerging reduces the number of segments in each shard by merging some of them together and also frees up the space used by deleted documents.\nMerging normally happens automatically, but sometimes it is useful to trigger a merge manually.\n\nWARNING: We recommend force merging only a read-only index (meaning the index is no longer receiving writes).\nWhen documents are updated or deleted, the old version is not immediately removed but instead soft-deleted and marked with a \"tombstone\".\nThese soft-deleted documents are automatically cleaned up during regular segment merges.\nBut force merge can cause very large (greater than 5 GB) segments to be produced, which are not eligible for regular merges.\nSo the number of soft-deleted documents can then grow rapidly, resulting in higher disk usage and worse search performance.\nIf you regularly force merge an index receiving writes, this can also make snapshots more expensive, since the new documents can't be backed up incrementally.\n\n**Blocks during a force merge**\n\nCalls to this API block until the merge is complete (unless request contains `wait_for_completion=false`).\nIf the client connection is lost before completion then the force merge process will continue in the background.\nAny new requests to force merge the same indices will also block until the ongoing force merge is complete.\n\n**Running force merge asynchronously**\n\nIf the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to get the status of the task.\nHowever, you can not cancel this task as the force merge task is not cancelable.\nElasticsearch creates a record of this task as a document at `_tasks/`.\nWhen you are done with a task, you should delete the task document so Elasticsearch can reclaim the space.\n\n**Force merging multiple indices**\n\nYou can force merge multiple indices with a single request by targeting:\n\n* One or more data streams that contain multiple backing indices\n* Multiple indices\n* One or more aliases\n* All data streams and indices in a cluster\n\nEach targeted shard is force-merged separately using the force_merge threadpool.\nBy default each node only has a single `force_merge` thread which means that the shards on that node are force-merged one at a time.\nIf you expand the `force_merge` threadpool on a node then it will force merge its shards in parallel\n\nForce merge makes the storage for the shard being merged temporarily increase, as it may require free space up to triple its size in case `max_num_segments parameter` is set to `1`, to rewrite all segments into a new one.\n\n**Data streams and time-based indices**\n\nForce-merging is useful for managing a data stream's older backing indices and other time-based indices, particularly after a rollover.\nIn these cases, each index only receives indexing traffic for a certain period of time.\nOnce an index receive no more writes, its shards can be force-merged to a single segment.\nThis can be a good idea because single-segment shards can sometimes use simpler and more efficient data structures to perform searches.\nFor example:\n\n```\nPOST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1\n```", + "docId": "indices-forcemerge", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-forcemerge.html", "extDocId": "index-modules-merge", "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/index-modules-merge.html", "name": "indices.forcemerge", + "privileges": { + "index": [ + "maintenance" + ] + }, "request": { "name": "Request", "namespace": "indices.forcemerge" @@ -6970,8 +7042,9 @@ "stability": "stable" } }, - "description": "Get index information.\nReturns information about one or more indices. For data streams, the API returns information about the\nstream’s backing indices.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-index.html", + "description": "Get index information.\nGet information about one or more indices. For data streams, the API returns information about the\nstream’s backing indices.", + "docId": "indices-get-index", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-get-index.html", "name": "indices.get", "privileges": { "index": [ @@ -7011,8 +7084,14 @@ } }, "description": "Get aliases.\nRetrieves information for one or more data stream or index aliases.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html", + "docId": "indices-get-alias", + "docUrl": "", "name": "indices.get_alias", + "privileges": { + "index": [ + "view_index_metadata" + ] + }, "request": { "name": "Request", "namespace": "indices.get_alias" @@ -7141,9 +7220,15 @@ "stability": "stable" } }, - "description": "Get mapping definitions.\nRetrieves mapping definitions for one or more fields.\nFor data streams, the API retrieves field mappings for the stream’s backing indices.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-field-mapping.html", + "description": "Get mapping definitions.\nRetrieves mapping definitions for one or more fields.\nFor data streams, the API retrieves field mappings for the stream’s backing indices.\n\nThis API is useful if you don't need a complete mapping or if an index mapping contains a large number of fields.", + "docId": "indices-get-field-mapping", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-get-field-mapping.html", "name": "indices.get_field_mapping", + "privileges": { + "index": [ + "view_index_metadata" + ] + }, "request": { "name": "Request", "namespace": "indices.get_field_mapping" @@ -7182,13 +7267,13 @@ "stability": "stable" } }, - "description": "Get index templates.\nReturns information about one or more index templates.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-template.html", + "description": "Get index templates.\nGet information about one or more index templates.", + "docId": "indices-get-template", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-get-template.html", "name": "indices.get_index_template", "privileges": { "cluster": [ - "manage_index_templates", - "manage" + "manage_index_templates" ] }, "request": { @@ -7228,9 +7313,15 @@ "stability": "stable" } }, - "description": "Get mapping definitions.\nRetrieves mapping definitions for one or more indices.\nFor data streams, the API retrieves mappings for the stream’s backing indices.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-mapping.html", + "description": "Get mapping definitions.\nFor data streams, the API retrieves mappings for the stream’s backing indices.", + "docId": "indices-get-mapping", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-get-mapping.html", "name": "indices.get_mapping", + "privileges": { + "index": [ + "view_index_metadata" + ] + }, "request": { "name": "Request", "namespace": "indices.get_mapping" @@ -7268,14 +7359,13 @@ "stability": "stable" } }, - "description": "Get index settings.\nReturns setting information for one or more indices. For data streams,\nreturns setting information for the stream’s backing indices.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-settings.html", + "description": "Get index settings.\nGet setting information for one or more indices.\nFor data streams, it returns setting information for the stream's backing indices.", + "docId": "indices-get-settings", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-get-settings.html", "name": "indices.get_settings", "privileges": { "index": [ - "view_index_metadata", - "monitor", - "manage" + "view_index_metadata" ] }, "request": { @@ -7323,9 +7413,17 @@ "stability": "stable" } }, - "description": "Get index templates.\nRetrieves information about one or more index templates.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-template-v1.html", + "description": "Get index templates.\nGet information about one or more index templates.\n\nIMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.", + "docId": "indices-get-template-v1", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-get-template-v1.html", + "extDocId": "index-templates", + "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/index-templates.html", "name": "indices.get_template", + "privileges": { + "cluster": [ + "manage_index_templates" + ] + }, "request": { "name": "Request", "namespace": "indices.get_template" @@ -7443,9 +7541,15 @@ "stability": "stable" } }, - "description": "Opens a closed index.\nFor data streams, the API opens any closed backing indices.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-open-close.html", + "description": "Open a closed index.\nFor data streams, the API opens any closed backing indices.\n\nA closed index is blocked for read/write operations and does not allow all operations that opened indices allow.\nIt is not possible to index documents or to search for documents in a closed index.\nThis allows closed indices to not have to maintain internal data structures for indexing or searching documents, resulting in a smaller overhead on the cluster.\n\nWhen opening or closing an index, the master is responsible for restarting the index shards to reflect the new state of the index.\nThe shards will then go through the normal recovery process.\nThe data of opened or closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times.\n\nYou can open and close multiple indices.\nAn error is thrown if the request explicitly refers to a missing index.\nThis behavior can be turned off by using the `ignore_unavailable=true` parameter.\n\nBy default, you must explicitly name the indices you are opening or closing.\nTo open or close indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`.\nThis setting can also be changed with the cluster update settings API.\n\nClosed indices consume a significant amount of disk-space which can cause problems in managed environments.\nClosing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`.\n\nBecause opening or closing an index allocates its shards, the `wait_for_active_shards` setting on index creation applies to the `_open` and `_close` index actions as well.", + "docId": "indices-open-close", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-open-close.html", "name": "indices.open", + "privileges": { + "index": [ + "manage" + ] + }, "request": { "name": "Request", "namespace": "indices.open" @@ -7594,9 +7698,14 @@ "stability": "stable" } }, - "description": "Create or update an index template.\nIndex templates define settings, mappings, and aliases that can be applied automatically to new indices.", + "description": "Create or update an index template.\nIndex templates define settings, mappings, and aliases that can be applied automatically to new indices.\n\nElasticsearch applies templates to new indices based on an wildcard pattern that matches the index name.\nIndex templates are applied during data stream or index creation.\nFor data streams, these settings and mappings are applied when the stream's backing indices are created.\nSettings and mappings specified in a create index API request override any settings or mappings specified in an index template.\nChanges to index templates do not affect existing indices, including the existing backing indices of a data stream.\n\nYou can use C-style `/* *\\/` block comments in index templates.\nYou can include comments anywhere in the request body, except before the opening curly bracket.\n\n**Multiple matching templates**\n\nIf multiple index templates match the name of a new index or data stream, the template with the highest priority is used.\n\nMultiple templates with overlapping index patterns at the same priority are not allowed and an error will be thrown when attempting to create a template matching an existing index template at identical priorities.\n\n**Composing aliases, mappings, and settings**\n\nWhen multiple component templates are specified in the `composed_of` field for an index template, they are merged in the order specified, meaning that later component templates override earlier component templates.\nAny mappings, settings, or aliases from the parent index template are merged in next.\nFinally, any configuration on the index request itself is merged.\nMapping definitions are merged recursively, which means that later mapping components can introduce new field mappings and update the mapping configuration.\nIf a field mapping is already contained in an earlier component, its definition will be completely overwritten by the later one.\nThis recursive merging strategy applies not only to field mappings, but also root options like `dynamic_templates` and `meta`.\nIf an earlier component contains a `dynamic_templates` block, then by default new `dynamic_templates` entries are appended onto the end.\nIf an entry already exists with the same key, then it is overwritten by the new definition.", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-put-template.html", "name": "indices.put_index_template", + "privileges": { + "cluster": [ + "manage_index_templates" + ] + }, "request": { "name": "Request", "namespace": "indices.put_index_template" @@ -7632,9 +7741,15 @@ "stability": "stable" } }, - "description": "Update field mappings.\nAdds new fields to an existing data stream or index.\nYou can also use this API to change the search settings of existing fields.\nFor data streams, these changes are applied to all backing indices by default.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-put-mapping.html", + "description": "Update field mappings.\nAdd new fields to an existing data stream or index.\nYou can also use this API to change the search settings of existing fields.\nFor data streams, these changes are applied to all backing indices by default.", + "docId": "indices-put-mapping", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-put-mapping.html", "name": "indices.put_mapping", + "privileges": { + "index": [ + "manage" + ] + }, "request": { "name": "Request", "namespace": "indices.put_mapping" @@ -7714,8 +7829,9 @@ "stability": "stable" } }, - "description": "Create or update an index template.\nIndex templates define settings, mappings, and aliases that can be applied automatically to new indices.\nElasticsearch applies templates to new indices based on an index pattern that matches the index name.\n\nIMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.\n\nComposable templates always take precedence over legacy templates.\nIf no composable template matches a new index, matching legacy templates are applied according to their order.\n\nIndex templates are only applied during index creation.\nChanges to index templates do not affect existing indices.\nSettings and mappings specified in create index API requests override any settings or mappings specified in an index template.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates-v1.html", + "description": "Create or update an index template.\nIndex templates define settings, mappings, and aliases that can be applied automatically to new indices.\nElasticsearch applies templates to new indices based on an index pattern that matches the index name.\n\nIMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.\n\nComposable templates always take precedence over legacy templates.\nIf no composable template matches a new index, matching legacy templates are applied according to their order.\n\nIndex templates are only applied during index creation.\nChanges to index templates do not affect existing indices.\nSettings and mappings specified in create index API requests override any settings or mappings specified in an index template.\n\nYou can use C-style `/* *\\/` block comments in index templates.\nYou can include comments anywhere in the request body, except before the opening curly bracket.\n\n**Indices matching multiple templates**\n\nMultiple index templates can potentially match an index, in this case, both the settings and mappings are merged into the final configuration of the index.\nThe order of the merging can be controlled using the order parameter, with lower order being applied first, and higher orders overriding them.\nNOTE: Multiple matching templates with the same order value will result in a non-deterministic merging order.", + "docId": "index-templates-v1", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-templates-v1.html", "extDocId": "index-templates", "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/index-templates.html", "name": "indices.put_template", @@ -7760,9 +7876,15 @@ "stability": "stable" } }, - "description": "Get index recovery information.\nGet information about ongoing and completed shard recoveries for one or more indices.\nFor data streams, the API returns information for the stream's backing indices.\n\nShard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or creating a replica shard from a primary shard.\nWhen a shard recovery completes, the recovered shard is available for search and indexing.\n\nRecovery automatically occurs during the following processes:\n\n* When creating an index for the first time.\n* When a node rejoins the cluster and starts up any missing primary shard copies using the data that it holds in its data path.\n* Creation of new replica shard copies from the primary.\n* Relocation of a shard copy to a different node in the same cluster.\n* A snapshot restore operation.\n* A clone, shrink, or split operation.\n\nYou can determine the cause of a shard recovery using the recovery or cat recovery APIs.\n\nThe index recovery API reports information about completed recoveries only for shard copies that currently exist in the cluster.\nIt only reports the last recovery for each shard copy and does not report historical information about earlier recoveries, nor does it report information about the recoveries of shard copies that no longer exist.\nThis means that if a shard copy completes a recovery and then Elasticsearch relocates it onto a different node then the information about the original recovery will not be shown in the recovery API.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-recovery.html", + "description": "Get index recovery information.\nGet information about ongoing and completed shard recoveries for one or more indices.\nFor data streams, the API returns information for the stream's backing indices.\n\nAll recoveries, whether ongoing or complete, are kept in the cluster state and may be reported on at any time.\n\nShard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or creating a replica shard from a primary shard.\nWhen a shard recovery completes, the recovered shard is available for search and indexing.\n\nRecovery automatically occurs during the following processes:\n\n* When creating an index for the first time.\n* When a node rejoins the cluster and starts up any missing primary shard copies using the data that it holds in its data path.\n* Creation of new replica shard copies from the primary.\n* Relocation of a shard copy to a different node in the same cluster.\n* A snapshot restore operation.\n* A clone, shrink, or split operation.\n\nYou can determine the cause of a shard recovery using the recovery or cat recovery APIs.\n\nThe index recovery API reports information about completed recoveries only for shard copies that currently exist in the cluster.\nIt only reports the last recovery for each shard copy and does not report historical information about earlier recoveries, nor does it report information about the recoveries of shard copies that no longer exist.\nThis means that if a shard copy completes a recovery and then Elasticsearch relocates it onto a different node then the information about the original recovery will not be shown in the recovery API.", + "docId": "indices-recovery", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-recovery.html", "name": "indices.recovery", + "privileges": { + "index": [ + "monitor" + ] + }, "request": { "name": "Request", "namespace": "indices.recovery" @@ -7800,9 +7922,15 @@ "stability": "stable" } }, - "description": "Refresh an index.\nA refresh makes recent operations performed on one or more indices available for search.\nFor data streams, the API runs the refresh operation on the stream’s backing indices.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-refresh.html", + "description": "Refresh an index.\nA refresh makes recent operations performed on one or more indices available for search.\nFor data streams, the API runs the refresh operation on the stream’s backing indices.\n\nBy default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds.\nYou can change this default interval with the `index.refresh_interval` setting.\n\nRefresh requests are synchronous and do not return a response until the refresh operation completes.\n\nRefreshes are resource-intensive.\nTo ensure good cluster performance, it's recommended to wait for Elasticsearch's periodic refresh rather than performing an explicit refresh when possible.\n\nIf your application workflow indexes documents and then runs a search to retrieve the indexed document, it's recommended to use the index API's `refresh=wait_for` query parameter option.\nThis option ensures the indexing operation waits for a periodic refresh before running the search.", + "docId": "indices-refresh", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-refresh.html", "name": "indices.refresh", + "privileges": { + "index": [ + "maintenance" + ] + }, "request": { "name": "Request", "namespace": "indices.refresh" @@ -7873,9 +8001,15 @@ "stability": "stable" } }, - "description": "Resolve the cluster.\nResolve the specified index expressions to return information about each cluster, including the local cluster, if included.\nMultiple patterns and remote clusters are supported.\n\nThis endpoint is useful before doing a cross-cluster search in order to determine which remote clusters should be included in a search.\n\nYou use the same index expression with this endpoint as you would for cross-cluster search.\nIndex and cluster exclusions are also supported with this endpoint.\n\nFor each cluster in the index expression, information is returned about:\n\n* Whether the querying (\"local\") cluster is currently connected to each remote cluster in the index expression scope.\n* Whether each remote cluster is configured with `skip_unavailable` as `true` or `false`.\n* Whether there are any indices, aliases, or data streams on that cluster that match the index expression.\n* Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index).\n* Cluster version information, including the Elasticsearch server version.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-resolve-cluster-api.html", + "description": "Resolve the cluster.\nResolve the specified index expressions to return information about each cluster, including the local cluster, if included.\nMultiple patterns and remote clusters are supported.\n\nThis endpoint is useful before doing a cross-cluster search in order to determine which remote clusters should be included in a search.\n\nYou use the same index expression with this endpoint as you would for cross-cluster search.\nIndex and cluster exclusions are also supported with this endpoint.\n\nFor each cluster in the index expression, information is returned about:\n\n* Whether the querying (\"local\") cluster is currently connected to each remote cluster in the index expression scope.\n* Whether each remote cluster is configured with `skip_unavailable` as `true` or `false`.\n* Whether there are any indices, aliases, or data streams on that cluster that match the index expression.\n* Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index).\n* Cluster version information, including the Elasticsearch server version.\n\nFor example, `GET /_resolve/cluster/my-index-*,cluster*:my-index-*` returns information about the local cluster and all remotely configured clusters that start with the alias `cluster*`.\nEach cluster returns information about whether it has any indices, aliases or data streams that match `my-index-*`.\n\n**Advantages of using this endpoint before a cross-cluster search**\n\nYou may want to exclude a cluster or index from a search when:\n\n* A remote cluster is not currently connected and is configured with `skip_unavailable=false`. Running a cross-cluster search under those conditions will cause the entire search to fail.\n* A cluster has no matching indices, aliases or data streams for the index expression (or your user does not have permissions to search them). For example, suppose your index expression is `logs*,remote1:logs*` and the remote1 cluster has no indices, aliases or data streams that match `logs*`. In that case, that cluster will return no results from that cluster if you include it in a cross-cluster search.\n* The index expression (combined with any query parameters you specify) will likely cause an exception to be thrown when you do the search. In these cases, the \"error\" field in the `_resolve/cluster` response will be present. (This is also where security/permission errors will be shown.)\n* A remote cluster is an older version that does not support the feature you want to use in your search.", + "docId": "indices-resolve-cluster-api", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-resolve-cluster-api.html", "name": "indices.resolve_cluster", + "privileges": { + "index": [ + "view_index_metadata" + ] + }, "request": { "name": "Request", "namespace": "indices.resolve_cluster" @@ -7909,8 +8043,14 @@ } }, "description": "Resolve indices.\nResolve the names and/or index patterns for indices, aliases, and data streams.\nMultiple patterns and remote clusters are supported.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-resolve-index-api.html", + "docId": "indices-resolve-index-api", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-resolve-index-api.html", "name": "indices.resolve_index", + "privileges": { + "index": [ + "view_index_metadata" + ] + }, "request": { "name": "Request", "namespace": "indices.resolve_index" @@ -7943,10 +8083,15 @@ "stability": "stable" } }, - "description": "Roll over to a new index.\nCreates a new index for a data stream or index alias.", + "description": "Roll over to a new index.\nTIP: It is recommended to use the index lifecycle rollover action to automate rollovers.\n\nThe rollover API creates a new index for a data stream or index alias.\nThe API behavior depends on the rollover target.\n\n**Roll over a data stream**\n\nIf you roll over a data stream, the API creates a new write index for the stream.\nThe stream's previous write index becomes a regular backing index.\nA rollover also increments the data stream's generation.\n\n**Roll over an index alias with a write index**\n\nTIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a write index to manage time series data.\nData streams replace this functionality, require less maintenance, and automatically integrate with data tiers.\n\nIf an index alias points to multiple indices, one of the indices must be a write index.\nThe rollover API creates a new write index for the alias with `is_write_index` set to `true`.\nThe API also `sets is_write_index` to `false` for the previous write index.\n\n**Roll over an index alias with one index**\n\nIf you roll over an index alias that points to only one index, the API creates a new index for the alias and removes the original index from the alias.\n\nNOTE: A rollover creates a new index and is subject to the `wait_for_active_shards` setting.\n\n**Increment index names for an alias**\n\nWhen you roll over an index alias, you can specify a name for the new index.\nIf you don't specify a name and the current index ends with `-` and a number, such as `my-index-000001` or `my-index-3`, the new index name increments that number.\nFor example, if you roll over an alias with a current index of `my-index-000001`, the rollover creates a new index named `my-index-000002`.\nThis number is always six characters and zero-padded, regardless of the previous index's name.\n\nIf you use an index alias for time series data, you can use date math in the index name to track the rollover date.\nFor example, you can create an alias that points to an index named ``.\nIf you create the index on May 6, 2099, the index's name is `my-index-2099.05.06-000001`.\nIf you roll over the alias on May 7, 2099, the new index's name is `my-index-2099.05.07-000002`.", "docId": "indices-rollover-index", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-rollover-index.html", "name": "indices.rollover", + "privileges": { + "index": [ + "manage" + ] + }, "request": { "name": "Request", "namespace": "indices.rollover" @@ -7988,8 +8133,14 @@ } }, "description": "Get index segments.\nGet low-level information about the Lucene segments in index shards.\nFor data streams, the API returns information about the stream's backing indices.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-segments.html", + "docId": "indices-segments", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-segments.html", "name": "indices.segments", + "privileges": { + "index": [ + "monitor" + ] + }, "request": { "name": "Request", "namespace": "indices.segments" @@ -8024,12 +8175,12 @@ } }, "description": "Get index shard stores.\nGet store information about replica shards in one or more indices.\nFor data streams, the API retrieves store information for the stream's backing indices.\n\nThe index shard stores API returns the following information:\n\n* The node on which each replica shard exists.\n* The allocation ID for each replica shard.\n* A unique ID for each replica shard.\n* Any errors encountered while opening the shard index or from an earlier failure.\n\nBy default, the API returns store information only for primary shards that are unassigned or have one or more unassigned replica shards.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-shards-stores.html", + "docId": "indices-shards-stores", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-shards-stores.html", "name": "indices.shard_stores", "privileges": { "index": [ - "monitor", - "manage" + "monitor" ] }, "request": { @@ -8111,9 +8262,15 @@ "stability": "stable" } }, - "description": "Simulate an index.\nReturns the index configuration that would be applied to the specified index from an existing index template.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-simulate-index.html", + "description": "Simulate an index.\nGet the index configuration that would be applied to the specified index from an existing index template.", + "docId": "indices-simulate-template", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-simulate-template.html", "name": "indices.simulate_index_template", + "privileges": { + "cluster": [ + "manage_index_templates" + ] + }, "request": { "name": "Request", "namespace": "indices.simulate_index_template" @@ -8153,8 +8310,7 @@ "name": "indices.simulate_template", "privileges": { "cluster": [ - "manage_index_templates", - "manage" + "manage_index_templates" ] }, "request": { @@ -8240,11 +8396,11 @@ } }, "description": "Get index statistics.\nFor data streams, the API retrieves statistics for the stream's backing indices.\n\nBy default, the returned statistics are index-level with `primaries` and `total` aggregations.\n`primaries` are the values for only the primary shards.\n`total` are the accumulated values for both primary and replica shards.\n\nTo get shard-level statistics, set the `level` parameter to `shards`.\n\nNOTE: When moving to another node, the shard-level statistics for a shard are cleared.\nAlthough the shard is no longer part of the node, that node retains any node-level statistics to which the shard contributed.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-stats.html", + "docId": "indices-stats", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-stats.html", "name": "indices.stats", "privileges": { "index": [ - "manage", "monitor" ] }, @@ -8295,7 +8451,8 @@ } }, "description": "Unfreeze an index.\nWhen a frozen index is unfrozen, the index goes through the normal recovery process and becomes writeable again.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/unfreeze-index-api.html", + "docId": "unfreeze-index-api", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/unfreeze-index-api.html", "name": "indices.unfreeze", "privileges": { "index": [ @@ -107563,7 +107720,7 @@ "body": { "kind": "no_body" }, - "description": "Delete component templates.\nDeletes component templates.\nComponent templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.", + "description": "Delete component templates.\nComponent templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.", "inherits": { "type": { "name": "RequestBase", @@ -107616,7 +107773,7 @@ } } ], - "specLocation": "cluster/delete_component_template/ClusterDeleteComponentTemplateRequest.ts#L24-L56" + "specLocation": "cluster/delete_component_template/ClusterDeleteComponentTemplateRequest.ts#L24-L55" }, { "kind": "response", @@ -107766,7 +107923,7 @@ "body": { "kind": "no_body" }, - "description": "Get component templates.\nRetrieves information about component templates.", + "description": "Get component templates.\nGet information about component templates.", "inherits": { "type": { "name": "RequestBase", @@ -109099,7 +109256,7 @@ } }, { - "description": "Optional user metadata about the component template.\nMay have any contents. This map is not automatically generated by Elasticsearch.\nThis information is stored in the cluster state, so keeping it short is preferable.\nTo unset `_meta`, replace the template without specifying this information.", + "description": "Optional user metadata about the component template.\nIt may have any contents. This map is not automatically generated by Elasticsearch.\nThis information is stored in the cluster state, so keeping it short is preferable.\nTo unset `_meta`, replace the template without specifying this information.", "name": "_meta", "required": false, "type": { @@ -109124,7 +109281,7 @@ } ] }, - "description": "Create or update a component template.\nCreates or updates a component template.\nComponent templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.\n\nAn index template can be composed of multiple component templates.\nTo use a component template, specify it in an index template’s `composed_of` list.\nComponent templates are only applied to new data streams and indices as part of a matching index template.\n\nSettings and mappings specified directly in the index template or the create index request override any settings or mappings specified in a component template.\n\nComponent templates are only used during index creation.\nFor data streams, this includes data stream creation and the creation of a stream’s backing indices.\nChanges to component templates do not affect existing indices, including a stream’s backing indices.\n\nYou can use C-style `/* *\\/` block comments in component templates.\nYou can include comments anywhere in the request body except before the opening curly bracket.", + "description": "Create or update a component template.\nComponent templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.\n\nAn index template can be composed of multiple component templates.\nTo use a component template, specify it in an index template’s `composed_of` list.\nComponent templates are only applied to new data streams and indices as part of a matching index template.\n\nSettings and mappings specified directly in the index template or the create index request override any settings or mappings specified in a component template.\n\nComponent templates are only used during index creation.\nFor data streams, this includes data stream creation and the creation of a stream’s backing indices.\nChanges to component templates do not affect existing indices, including a stream’s backing indices.\n\nYou can use C-style `/* *\\/` block comments in component templates.\nYou can include comments anywhere in the request body except before the opening curly bracket.\n\n**Applying component templates**\n\nYou cannot directly apply a component template to a data stream or index.\nTo be applied, a component template must be included in an index template's `composed_of` list.", "inherits": { "type": { "name": "RequestBase", @@ -109177,7 +109334,7 @@ } } ], - "specLocation": "cluster/put_component_template/ClusterPutComponentTemplateRequest.ts#L25-L95" + "specLocation": "cluster/put_component_template/ClusterPutComponentTemplateRequest.ts#L25-L99" }, { "kind": "response", @@ -117287,7 +117444,7 @@ "body": { "kind": "no_body" }, - "description": "Delete a dangling index.\n\nIf Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling.\nFor example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline.", + "description": "Delete a dangling index.\nIf Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling.\nFor example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline.", "inherits": { "type": { "name": "RequestBase", @@ -117350,7 +117507,7 @@ } } ], - "specLocation": "dangling_indices/delete_dangling_index/DeleteDanglingIndexRequest.ts#L24-L48" + "specLocation": "dangling_indices/delete_dangling_index/DeleteDanglingIndexRequest.ts#L24-L49" }, { "kind": "response", @@ -117441,7 +117598,7 @@ } } ], - "specLocation": "dangling_indices/import_dangling_index/ImportDanglingIndexRequest.ts#L24-L49" + "specLocation": "dangling_indices/import_dangling_index/ImportDanglingIndexRequest.ts#L24-L51" }, { "kind": "response", @@ -117545,7 +117702,7 @@ }, "path": [], "query": [], - "specLocation": "dangling_indices/list_dangling_indices/ListDanglingIndicesRequest.ts#L22-L33" + "specLocation": "dangling_indices/list_dangling_indices/ListDanglingIndicesRequest.ts#L22-L35" }, { "kind": "response", @@ -129485,7 +129642,7 @@ } ] }, - "description": "Get tokens from text analysis.\nThe analyze API performs [analysis](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis.html) on a text string and returns the resulting tokens.", + "description": "Get tokens from text analysis.\nThe analyze API performs analysis on a text string and returns the resulting tokens.\n\nGenerating excessive amount of tokens may cause a node to run out of memory.\nThe `index.analyze.max_token_count` setting enables you to limit the number of tokens that can be produced.\nIf more than this limit of tokens gets generated, an error occurs.\nThe `_analyze` endpoint without a specified index will always use `10000` as its limit.", "inherits": { "type": { "name": "RequestBase", @@ -129513,7 +129670,7 @@ } ], "query": [], - "specLocation": "indices/analyze/IndicesAnalyzeRequest.ts#L27-L93" + "specLocation": "indices/analyze/IndicesAnalyzeRequest.ts#L27-L100" }, { "kind": "response", @@ -129626,7 +129783,7 @@ "body": { "kind": "no_body" }, - "description": "Clear the cache.\nClear the cache of one or more indices.\nFor data streams, the API clears the caches of the stream's backing indices.", + "description": "Clear the cache.\nClear the cache of one or more indices.\nFor data streams, the API clears the caches of the stream's backing indices.\n\nBy default, the clear cache API clears all caches.\nTo clear only specific caches, use the `fielddata`, `query`, or `request` parameters.\nTo clear the cache only of specific fields, use the `fields` parameter.", "inherits": { "type": { "name": "RequestBase", @@ -129740,7 +129897,7 @@ } } ], - "specLocation": "indices/clear_cache/IndicesIndicesClearCacheRequest.ts#L23-L78" + "specLocation": "indices/clear_cache/IndicesClearCacheRequest.ts#L23-L83" }, { "kind": "response", @@ -129814,7 +129971,7 @@ } ] }, - "description": "Clone an index.\nClone an existing index into a new index.\nEach original primary shard is cloned into a new primary shard in the new index.\n\nIMPORTANT: Elasticsearch does not apply index templates to the resulting index.\nThe API also does not copy index metadata from the original index.\nIndex metadata includes aliases, index lifecycle management phase definitions, and cross-cluster replication (CCR) follower information.\nFor example, if you clone a CCR follower index, the resulting clone will not be a follower index.\n\nThe clone API copies most index settings from the source index to the resulting index, with the exception of `index.number_of_replicas` and `index.auto_expand_replicas`.\nTo set the number of replicas in the resulting index, configure these settings in the clone request.\n\nCloning works as follows:\n\n* First, it creates a new target index with the same definition as the source index.\n* Then it hard-links segments from the source index into the target index. If the file system does not support hard-linking, all segments are copied into the new index, which is a much more time consuming process.\n* Finally, it recovers the target index as though it were a closed index which had just been re-opened.\n\nIMPORTANT: Indices can only be cloned if they meet the following requirements:\n\n* The target index must not exist.\n* The source index must have the same number of primary shards as the target index.\n* The node handling the clone process must have sufficient free disk space to accommodate a second copy of the existing index.", + "description": "Clone an index.\nClone an existing index into a new index.\nEach original primary shard is cloned into a new primary shard in the new index.\n\nIMPORTANT: Elasticsearch does not apply index templates to the resulting index.\nThe API also does not copy index metadata from the original index.\nIndex metadata includes aliases, index lifecycle management phase definitions, and cross-cluster replication (CCR) follower information.\nFor example, if you clone a CCR follower index, the resulting clone will not be a follower index.\n\nThe clone API copies most index settings from the source index to the resulting index, with the exception of `index.number_of_replicas` and `index.auto_expand_replicas`.\nTo set the number of replicas in the resulting index, configure these settings in the clone request.\n\nCloning works as follows:\n\n* First, it creates a new target index with the same definition as the source index.\n* Then it hard-links segments from the source index into the target index. If the file system does not support hard-linking, all segments are copied into the new index, which is a much more time consuming process.\n* Finally, it recovers the target index as though it were a closed index which had just been re-opened.\n\nIMPORTANT: Indices can only be cloned if they meet the following requirements:\n\n* The index must be marked as read-only and have a cluster health status of green.\n* The target index must not exist.\n* The source index must have the same number of primary shards as the target index.\n* The node handling the clone process must have sufficient free disk space to accommodate a second copy of the existing index.\n\nThe current write index on a data stream cannot be cloned.\nIn order to clone the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be cloned.\n\nNOTE: Mappings cannot be specified in the `_clone` request. The mappings of the source index will be used for the target index.\n\n**Monitor the cloning process**\n\nThe cloning process can be monitored with the cat recovery API or the cluster health API can be used to wait until all primary shards have been allocated by setting the `wait_for_status` parameter to `yellow`.\n\nThe `_clone` API returns as soon as the target index has been added to the cluster state, before any shards have been allocated.\nAt this point, all shards are in the state unassigned.\nIf, for any reason, the target index can't be allocated, its primary shard will remain unassigned until it can be allocated on that node.\n\nOnce the primary shard is allocated, it moves to state initializing, and the clone process begins.\nWhen the clone operation completes, the shard will become active.\nAt that point, Elasticsearch will try to allocate any replicas and may decide to relocate the primary shard to another node.\n\n**Wait for active shards**\n\nBecause the clone operation creates a new index to clone the shards to, the wait for active shards setting on index creation applies to the clone index action as well.", "inherits": { "type": { "name": "RequestBase", @@ -129894,7 +130051,7 @@ } } ], - "specLocation": "indices/clone/IndicesCloneRequest.ts#L27-L98" + "specLocation": "indices/clone/IndicesCloneRequest.ts#L27-L120" }, { "kind": "response", @@ -130122,7 +130279,7 @@ } } ], - "specLocation": "indices/close/CloseIndexRequest.ts#L24-L94" + "specLocation": "indices/close/CloseIndexRequest.ts#L24-L95" }, { "kind": "response", @@ -130240,7 +130397,7 @@ } ] }, - "description": "Create an index.\nCreates a new index.", + "description": "Create an index.\nYou can use the create index API to add a new index to an Elasticsearch cluster.\nWhen creating an index, you can specify the following:\n\n* Settings for the index.\n* Mappings for fields in the index.\n* Index aliases\n\n**Wait for active shards**\n\nBy default, index creation will only return a response to the client when the primary copies of each shard have been started, or the request times out.\nThe index creation response will indicate what happened.\nFor example, `acknowledged` indicates whether the index was successfully created in the cluster, `while shards_acknowledged` indicates whether the requisite number of shard copies were started for each shard in the index before timing out.\nNote that it is still possible for either `acknowledged` or `shards_acknowledged` to be `false`, but for the index creation to be successful.\nThese values simply indicate whether the operation completed before the timeout.\nIf `acknowledged` is false, the request timed out before the cluster state was updated with the newly created index, but it probably will be created sometime soon.\nIf `shards_acknowledged` is false, then the request timed out before the requisite number of shards were started (by default just the primaries), even if the cluster state was successfully updated to reflect the newly created index (that is to say, `acknowledged` is `true`).\n\nYou can change the default of only waiting for the primary shards to start through the index setting `index.write.wait_for_active_shards`.\nNote that changing this setting will also affect the `wait_for_active_shards` value on all subsequent write operations.", "inherits": { "type": { "name": "RequestBase", @@ -130306,7 +130463,7 @@ } } ], - "specLocation": "indices/create/IndicesCreateRequest.ts#L28-L82" + "specLocation": "indices/create/IndicesCreateRequest.ts#L28-L102" }, { "kind": "response", @@ -130660,7 +130817,7 @@ "body": { "kind": "no_body" }, - "description": "Delete indices.\nDeletes one or more indices.", + "description": "Delete indices.\nDeleting an index deletes its documents, shards, and metadata.\nIt does not delete related Kibana components, such as data views, visualizations, or dashboards.\n\nYou cannot delete the current write index of a data stream.\nTo delete the index, you must roll over the data stream so a new write index is created.\nYou can then use the delete index API to delete the previous write index.", "inherits": { "type": { "name": "RequestBase", @@ -130752,7 +130909,7 @@ } } ], - "specLocation": "indices/delete/IndicesDeleteRequest.ts#L24-L74" + "specLocation": "indices/delete/IndicesDeleteRequest.ts#L24-L81" }, { "kind": "response", @@ -130845,7 +131002,7 @@ } } ], - "specLocation": "indices/delete_alias/IndicesDeleteAliasRequest.ts#L24-L58" + "specLocation": "indices/delete_alias/IndicesDeleteAliasRequest.ts#L24-L60" }, { "kind": "response", @@ -131098,7 +131255,7 @@ } } ], - "specLocation": "indices/delete_index_template/IndicesDeleteIndexTemplateRequest.ts#L24-L53" + "specLocation": "indices/delete_index_template/IndicesDeleteIndexTemplateRequest.ts#L24-L54" }, { "kind": "response", @@ -131126,7 +131283,7 @@ "body": { "kind": "no_body" }, - "description": "Deletes a legacy index template.", + "description": "Delete a legacy index template.", "inherits": { "type": { "name": "RequestBase", @@ -131179,7 +131336,7 @@ } } ], - "specLocation": "indices/delete_template/IndicesDeleteTemplateRequest.ts#L24-L52" + "specLocation": "indices/delete_template/IndicesDeleteTemplateRequest.ts#L24-L53" }, { "kind": "response", @@ -131207,7 +131364,7 @@ "body": { "kind": "no_body" }, - "description": "Analyze the index disk usage.\nAnalyze the disk usage of each field of an index or data stream.\nThis API might not support indices created in previous Elasticsearch versions.\nThe result of a small index can be inaccurate as some parts of an index might not be analyzed by the API.", + "description": "Analyze the index disk usage.\nAnalyze the disk usage of each field of an index or data stream.\nThis API might not support indices created in previous Elasticsearch versions.\nThe result of a small index can be inaccurate as some parts of an index might not be analyzed by the API.\n\nNOTE: The total size of fields of the analyzed shards of the index in the response is usually smaller than the index `store_size` value because some small metadata files are ignored and some parts of data files might not be scanned by the API.\nSince stored fields are stored together in a compressed format, the sizes of stored fields are also estimates and can be inaccurate.\nThe stored size of the `_id` field is likely underestimated while the `_source` field is overestimated.", "inherits": { "type": { "name": "RequestBase", @@ -131299,7 +131456,7 @@ } } ], - "specLocation": "indices/disk_usage/IndicesDiskUsageRequest.ts#L23-L74" + "specLocation": "indices/disk_usage/IndicesDiskUsageRequest.ts#L23-L78" }, { "kind": "response", @@ -131393,7 +131550,7 @@ "body": { "kind": "no_body" }, - "description": "Check indices.\nChecks if one or more indices, index aliases, or data streams exist.", + "description": "Check indices.\nCheck if one or more indices, index aliases, or data streams exist.", "inherits": { "type": { "name": "RequestBase", @@ -131498,7 +131655,7 @@ } } ], - "specLocation": "indices/exists/IndicesExistsRequest.ts#L23-L73" + "specLocation": "indices/exists/IndicesExistsRequest.ts#L23-L74" }, { "kind": "response", @@ -131692,7 +131849,7 @@ "body": { "kind": "no_body" }, - "description": "Check existence of index templates.\nReturns information about whether a particular index template exists.", + "description": "Check existence of index templates.\nGet information about whether index templates exist.\nIndex templates define settings, mappings, and aliases that can be applied automatically to new indices.\n\nIMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.", "inherits": { "type": { "name": "RequestBase", @@ -131705,7 +131862,7 @@ }, "path": [ { - "description": "The comma separated names of the index templates", + "description": "A comma-separated list of index template names used to limit the request.\nWildcard (`*`) expressions are supported.", "name": "name", "required": true, "type": { @@ -131719,9 +131876,10 @@ ], "query": [ { - "description": "Return settings in flat format (default: false)", + "description": "Indicates whether to use a flat format for the response.", "name": "flat_settings", "required": false, + "serverDefault": false, "type": { "kind": "instance_of", "type": { @@ -131731,9 +131889,10 @@ } }, { - "description": "Return local information, do not retrieve the state from master node (default: false)", + "description": "Indicates whether to get information from the local node only.", "name": "local", "required": false, + "serverDefault": false, "type": { "kind": "instance_of", "type": { @@ -131743,9 +131902,10 @@ } }, { - "description": "Explicit operation timeout for connection to master node", + "description": "The period to wait for the master node.\nIf the master node is not available before the timeout expires, the request fails and returns an error.\nTo indicate that the request should never timeout, set it to `-1`.", "name": "master_timeout", "required": false, + "serverDefault": "30s", "type": { "kind": "instance_of", "type": { @@ -131755,7 +131915,7 @@ } } ], - "specLocation": "indices/exists_template/IndicesExistsTemplateRequest.ts#L24-L39" + "specLocation": "indices/exists_template/IndicesExistsTemplateRequest.ts#L24-L63" }, { "kind": "response", @@ -132236,7 +132396,7 @@ "body": { "kind": "no_body" }, - "description": "Get field usage stats.\nGet field usage information for each shard and field of an index.\nField usage statistics are automatically captured when queries are running on a cluster.\nA shard-level search request that accesses a given field, even if multiple times during that request, is counted as a single use.", + "description": "Get field usage stats.\nGet field usage information for each shard and field of an index.\nField usage statistics are automatically captured when queries are running on a cluster.\nA shard-level search request that accesses a given field, even if multiple times during that request, is counted as a single use.\n\nThe response body reports the per-shard usage count of the data structures that back the fields in the index.\nA given request will increment each count by a maximum value of 1, even if the request accesses the same field multiple times.", "inherits": { "type": { "name": "RequestBase", @@ -132351,7 +132511,7 @@ } } ], - "specLocation": "indices/field_usage_stats/IndicesFieldUsageStatsRequest.ts#L29-L87" + "specLocation": "indices/field_usage_stats/IndicesFieldUsageStatsRequest.ts#L29-L91" }, { "kind": "response", @@ -132601,7 +132761,7 @@ } } ], - "specLocation": "indices/flush/IndicesFlushRequest.ts#L23-L81" + "specLocation": "indices/flush/IndicesFlushRequest.ts#L23-L82" }, { "kind": "response", @@ -132629,7 +132789,7 @@ "body": { "kind": "no_body" }, - "description": "Force a merge.\nPerform the force merge operation on the shards of one or more indices.\nFor data streams, the API forces a merge on the shards of the stream's backing indices.\n\nMerging reduces the number of segments in each shard by merging some of them together and also frees up the space used by deleted documents.\nMerging normally happens automatically, but sometimes it is useful to trigger a merge manually.\n\nWARNING: We recommend force merging only a read-only index (meaning the index is no longer receiving writes).\nWhen documents are updated or deleted, the old version is not immediately removed but instead soft-deleted and marked with a \"tombstone\".\nThese soft-deleted documents are automatically cleaned up during regular segment merges.\nBut force merge can cause very large (greater than 5 GB) segments to be produced, which are not eligible for regular merges.\nSo the number of soft-deleted documents can then grow rapidly, resulting in higher disk usage and worse search performance.\nIf you regularly force merge an index receiving writes, this can also make snapshots more expensive, since the new documents can't be backed up incrementally.", + "description": "Force a merge.\nPerform the force merge operation on the shards of one or more indices.\nFor data streams, the API forces a merge on the shards of the stream's backing indices.\n\nMerging reduces the number of segments in each shard by merging some of them together and also frees up the space used by deleted documents.\nMerging normally happens automatically, but sometimes it is useful to trigger a merge manually.\n\nWARNING: We recommend force merging only a read-only index (meaning the index is no longer receiving writes).\nWhen documents are updated or deleted, the old version is not immediately removed but instead soft-deleted and marked with a \"tombstone\".\nThese soft-deleted documents are automatically cleaned up during regular segment merges.\nBut force merge can cause very large (greater than 5 GB) segments to be produced, which are not eligible for regular merges.\nSo the number of soft-deleted documents can then grow rapidly, resulting in higher disk usage and worse search performance.\nIf you regularly force merge an index receiving writes, this can also make snapshots more expensive, since the new documents can't be backed up incrementally.\n\n**Blocks during a force merge**\n\nCalls to this API block until the merge is complete (unless request contains `wait_for_completion=false`).\nIf the client connection is lost before completion then the force merge process will continue in the background.\nAny new requests to force merge the same indices will also block until the ongoing force merge is complete.\n\n**Running force merge asynchronously**\n\nIf the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to get the status of the task.\nHowever, you can not cancel this task as the force merge task is not cancelable.\nElasticsearch creates a record of this task as a document at `_tasks/`.\nWhen you are done with a task, you should delete the task document so Elasticsearch can reclaim the space.\n\n**Force merging multiple indices**\n\nYou can force merge multiple indices with a single request by targeting:\n\n* One or more data streams that contain multiple backing indices\n* Multiple indices\n* One or more aliases\n* All data streams and indices in a cluster\n\nEach targeted shard is force-merged separately using the force_merge threadpool.\nBy default each node only has a single `force_merge` thread which means that the shards on that node are force-merged one at a time.\nIf you expand the `force_merge` threadpool on a node then it will force merge its shards in parallel\n\nForce merge makes the storage for the shard being merged temporarily increase, as it may require free space up to triple its size in case `max_num_segments parameter` is set to `1`, to rewrite all segments into a new one.\n\n**Data streams and time-based indices**\n\nForce-merging is useful for managing a data stream's older backing indices and other time-based indices, particularly after a rollover.\nIn these cases, each index only receives indexing traffic for a certain period of time.\nOnce an index receive no more writes, its shards can be force-merged to a single segment.\nThis can be a good idea because single-segment shards can sometimes use simpler and more efficient data structures to perform searches.\nFor example:\n\n```\nPOST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1\n```", "inherits": { "type": { "name": "RequestBase", @@ -132740,7 +132900,7 @@ } } ], - "specLocation": "indices/forcemerge/IndicesForceMergeRequest.ts#L24-L56" + "specLocation": "indices/forcemerge/IndicesForceMergeRequest.ts#L24-L98" }, { "kind": "response", @@ -132805,7 +132965,7 @@ "name": "Feature", "namespace": "indices.get" }, - "specLocation": "indices/get/IndicesGetRequest.ts#L91-L95" + "specLocation": "indices/get/IndicesGetRequest.ts#L92-L96" }, { "kind": "type_alias", @@ -132813,7 +132973,7 @@ "name": "Features", "namespace": "indices.get" }, - "specLocation": "indices/get/IndicesGetRequest.ts#L96-L96", + "specLocation": "indices/get/IndicesGetRequest.ts#L97-L97", "type": { "kind": "union_of", "items": [ @@ -132845,7 +133005,7 @@ "body": { "kind": "no_body" }, - "description": "Get index information.\nReturns information about one or more indices. For data streams, the API returns information about the\nstream’s backing indices.", + "description": "Get index information.\nGet information about one or more indices. For data streams, the API returns information about the\nstream’s backing indices.", "inherits": { "type": { "name": "RequestBase", @@ -132982,7 +133142,7 @@ } } ], - "specLocation": "indices/get/IndicesGetRequest.ts#L24-L89" + "specLocation": "indices/get/IndicesGetRequest.ts#L24-L90" }, { "kind": "response", @@ -133144,7 +133304,7 @@ } } ], - "specLocation": "indices/get_alias/IndicesGetAliasRequest.ts#L24-L73" + "specLocation": "indices/get_alias/IndicesGetAliasRequest.ts#L24-L75" }, { "kind": "response", @@ -133472,7 +133632,7 @@ "body": { "kind": "no_body" }, - "description": "Get mapping definitions.\nRetrieves mapping definitions for one or more fields.\nFor data streams, the API retrieves field mappings for the stream’s backing indices.", + "description": "Get mapping definitions.\nRetrieves mapping definitions for one or more fields.\nFor data streams, the API retrieves field mappings for the stream’s backing indices.\n\nThis API is useful if you don't need a complete mapping or if an index mapping contains a large number of fields.", "inherits": { "type": { "name": "RequestBase", @@ -133485,7 +133645,7 @@ }, "path": [ { - "description": "Comma-separated list or wildcard expression of fields used to limit returned information.", + "description": "Comma-separated list or wildcard expression of fields used to limit returned information.\nSupports wildcards (`*`).", "name": "fields", "required": true, "type": { @@ -133576,7 +133736,7 @@ } } ], - "specLocation": "indices/get_field_mapping/IndicesGetFieldMappingRequest.ts#L23-L74" + "specLocation": "indices/get_field_mapping/IndicesGetFieldMappingRequest.ts#L23-L79" }, { "kind": "response", @@ -133679,7 +133839,7 @@ "body": { "kind": "no_body" }, - "description": "Get index templates.\nReturns information about one or more index templates.", + "description": "Get index templates.\nGet information about one or more index templates.", "inherits": { "type": { "name": "RequestBase", @@ -133767,7 +133927,7 @@ } } ], - "specLocation": "indices/get_index_template/IndicesGetIndexTemplateRequest.ts#L24-L61" + "specLocation": "indices/get_index_template/IndicesGetIndexTemplateRequest.ts#L24-L62" }, { "kind": "response", @@ -133836,7 +133996,7 @@ "body": { "kind": "no_body" }, - "description": "Get mapping definitions.\nRetrieves mapping definitions for one or more indices.\nFor data streams, the API retrieves mappings for the stream’s backing indices.", + "description": "Get mapping definitions.\nFor data streams, the API retrieves mappings for the stream’s backing indices.", "inherits": { "type": { "name": "RequestBase", @@ -133928,7 +134088,7 @@ } } ], - "specLocation": "indices/get_mapping/IndicesGetMappingRequest.ts#L24-L73" + "specLocation": "indices/get_mapping/IndicesGetMappingRequest.ts#L24-L74" }, { "kind": "response", @@ -133967,7 +134127,7 @@ "body": { "kind": "no_body" }, - "description": "Get index settings.\nReturns setting information for one or more indices. For data streams,\nreturns setting information for the stream’s backing indices.", + "description": "Get index settings.\nGet setting information for one or more indices.\nFor data streams, it returns setting information for the stream's backing indices.", "inherits": { "type": { "name": "RequestBase", @@ -134097,7 +134257,7 @@ } } ], - "specLocation": "indices/get_settings/IndicesGetSettingsRequest.ts#L24-L92" + "specLocation": "indices/get_settings/IndicesGetSettingsRequest.ts#L24-L93" }, { "kind": "response", @@ -134136,7 +134296,7 @@ "body": { "kind": "no_body" }, - "description": "Get index templates.\nRetrieves information about one or more index templates.", + "description": "Get index templates.\nGet information about one or more index templates.\n\nIMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.", "inherits": { "type": { "name": "RequestBase", @@ -134202,7 +134362,7 @@ } } ], - "specLocation": "indices/get_template/IndicesGetTemplateRequest.ts#L24-L57" + "specLocation": "indices/get_template/IndicesGetTemplateRequest.ts#L24-L62" }, { "kind": "response", @@ -134451,7 +134611,7 @@ "body": { "kind": "no_body" }, - "description": "Opens a closed index.\nFor data streams, the API opens any closed backing indices.", + "description": "Open a closed index.\nFor data streams, the API opens any closed backing indices.\n\nA closed index is blocked for read/write operations and does not allow all operations that opened indices allow.\nIt is not possible to index documents or to search for documents in a closed index.\nThis allows closed indices to not have to maintain internal data structures for indexing or searching documents, resulting in a smaller overhead on the cluster.\n\nWhen opening or closing an index, the master is responsible for restarting the index shards to reflect the new state of the index.\nThe shards will then go through the normal recovery process.\nThe data of opened or closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times.\n\nYou can open and close multiple indices.\nAn error is thrown if the request explicitly refers to a missing index.\nThis behavior can be turned off by using the `ignore_unavailable=true` parameter.\n\nBy default, you must explicitly name the indices you are opening or closing.\nTo open or close indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`.\nThis setting can also be changed with the cluster update settings API.\n\nClosed indices consume a significant amount of disk-space which can cause problems in managed environments.\nClosing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`.\n\nBecause opening or closing an index allocates its shards, the `wait_for_active_shards` setting on index creation applies to the `_open` and `_close` index actions as well.", "inherits": { "type": { "name": "RequestBase", @@ -134558,7 +134718,7 @@ } } ], - "specLocation": "indices/open/IndicesOpenRequest.ts#L24-L82" + "specLocation": "indices/open/IndicesOpenRequest.ts#L24-L105" }, { "kind": "response", @@ -134993,7 +135153,7 @@ } } ], - "specLocation": "indices/put_index_template/IndicesPutIndexTemplateRequest.ts#L121-L143" + "specLocation": "indices/put_index_template/IndicesPutIndexTemplateRequest.ts#L152-L174" }, { "kind": "request", @@ -135067,7 +135227,7 @@ } }, { - "description": "Version number used to manage index templates externally.\nThis number is not automatically generated by Elasticsearch.", + "description": "Version number used to manage index templates externally.\nThis number is not automatically generated by Elasticsearch.\nExternal systems can use these version numbers to simplify template management.\nTo unset a version, replace the template without specifying one.", "name": "version", "required": false, "type": { @@ -135079,7 +135239,7 @@ } }, { - "description": "Optional user metadata about the index template.\nMay have any contents.\nThis map is not automatically generated by Elasticsearch.", + "description": "Optional user metadata about the index template.\nIt may have any contents.\nIt is not automatically generated or used by Elasticsearch.\nThis user-defined object is stored in the cluster state, so keeping it short is preferable\nTo unset the metadata, replace the template without specifying it.", "docId": "mapping-meta-field", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/mapping-meta-field.html", "name": "_meta", @@ -135133,7 +135293,7 @@ } ] }, - "description": "Create or update an index template.\nIndex templates define settings, mappings, and aliases that can be applied automatically to new indices.", + "description": "Create or update an index template.\nIndex templates define settings, mappings, and aliases that can be applied automatically to new indices.\n\nElasticsearch applies templates to new indices based on an wildcard pattern that matches the index name.\nIndex templates are applied during data stream or index creation.\nFor data streams, these settings and mappings are applied when the stream's backing indices are created.\nSettings and mappings specified in a create index API request override any settings or mappings specified in an index template.\nChanges to index templates do not affect existing indices, including the existing backing indices of a data stream.\n\nYou can use C-style `/* *\\/` block comments in index templates.\nYou can include comments anywhere in the request body, except before the opening curly bracket.\n\n**Multiple matching templates**\n\nIf multiple index templates match the name of a new index or data stream, the template with the highest priority is used.\n\nMultiple templates with overlapping index patterns at the same priority are not allowed and an error will be thrown when attempting to create a template matching an existing index template at identical priorities.\n\n**Composing aliases, mappings, and settings**\n\nWhen multiple component templates are specified in the `composed_of` field for an index template, they are merged in the order specified, meaning that later component templates override earlier component templates.\nAny mappings, settings, or aliases from the parent index template are merged in next.\nFinally, any configuration on the index request itself is merged.\nMapping definitions are merged recursively, which means that later mapping components can introduce new field mappings and update the mapping configuration.\nIf a field mapping is already contained in an earlier component, its definition will be completely overwritten by the later one.\nThis recursive merging strategy applies not only to field mappings, but also root options like `dynamic_templates` and `meta`.\nIf an earlier component contains a `dynamic_templates` block, then by default new `dynamic_templates` entries are appended onto the end.\nIf an entry already exists with the same key, then it is overwritten by the new definition.", "inherits": { "type": { "name": "RequestBase", @@ -135198,7 +135358,7 @@ } } ], - "specLocation": "indices/put_index_template/IndicesPutIndexTemplateRequest.ts#L37-L119" + "specLocation": "indices/put_index_template/IndicesPutIndexTemplateRequest.ts#L37-L150" }, { "kind": "response", @@ -135412,7 +135572,7 @@ } ] }, - "description": "Update field mappings.\nAdds new fields to an existing data stream or index.\nYou can also use this API to change the search settings of existing fields.\nFor data streams, these changes are applied to all backing indices by default.", + "description": "Update field mappings.\nAdd new fields to an existing data stream or index.\nYou can also use this API to change the search settings of existing fields.\nFor data streams, these changes are applied to all backing indices by default.", "inherits": { "type": { "name": "RequestBase", @@ -135517,7 +135677,7 @@ } } ], - "specLocation": "indices/put_mapping/IndicesPutMappingRequest.ts#L41-L149" + "specLocation": "indices/put_mapping/IndicesPutMappingRequest.ts#L41-L151" }, { "kind": "response", @@ -135786,7 +135946,7 @@ } }, { - "description": "Version number used to manage index templates externally. This number\nis not automatically generated by Elasticsearch.", + "description": "Version number used to manage index templates externally. This number\nis not automatically generated by Elasticsearch.\nTo unset a version, replace the template without specifying one.", "name": "version", "required": false, "type": { @@ -135799,7 +135959,7 @@ } ] }, - "description": "Create or update an index template.\nIndex templates define settings, mappings, and aliases that can be applied automatically to new indices.\nElasticsearch applies templates to new indices based on an index pattern that matches the index name.\n\nIMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.\n\nComposable templates always take precedence over legacy templates.\nIf no composable template matches a new index, matching legacy templates are applied according to their order.\n\nIndex templates are only applied during index creation.\nChanges to index templates do not affect existing indices.\nSettings and mappings specified in create index API requests override any settings or mappings specified in an index template.", + "description": "Create or update an index template.\nIndex templates define settings, mappings, and aliases that can be applied automatically to new indices.\nElasticsearch applies templates to new indices based on an index pattern that matches the index name.\n\nIMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.\n\nComposable templates always take precedence over legacy templates.\nIf no composable template matches a new index, matching legacy templates are applied according to their order.\n\nIndex templates are only applied during index creation.\nChanges to index templates do not affect existing indices.\nSettings and mappings specified in create index API requests override any settings or mappings specified in an index template.\n\nYou can use C-style `/* *\\/` block comments in index templates.\nYou can include comments anywhere in the request body, except before the opening curly bracket.\n\n**Indices matching multiple templates**\n\nMultiple index templates can potentially match an index, in this case, both the settings and mappings are merged into the final configuration of the index.\nThe order of the merging can be controlled using the order parameter, with lower order being applied first, and higher orders overriding them.\nNOTE: Multiple matching templates with the same order value will result in a non-deterministic merging order.", "inherits": { "type": { "name": "RequestBase", @@ -135875,7 +136035,7 @@ } } ], - "specLocation": "indices/put_template/IndicesPutTemplateRequest.ts#L29-L106" + "specLocation": "indices/put_template/IndicesPutTemplateRequest.ts#L29-L117" }, { "kind": "response", @@ -136497,7 +136657,7 @@ "body": { "kind": "no_body" }, - "description": "Get index recovery information.\nGet information about ongoing and completed shard recoveries for one or more indices.\nFor data streams, the API returns information for the stream's backing indices.\n\nShard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or creating a replica shard from a primary shard.\nWhen a shard recovery completes, the recovered shard is available for search and indexing.\n\nRecovery automatically occurs during the following processes:\n\n* When creating an index for the first time.\n* When a node rejoins the cluster and starts up any missing primary shard copies using the data that it holds in its data path.\n* Creation of new replica shard copies from the primary.\n* Relocation of a shard copy to a different node in the same cluster.\n* A snapshot restore operation.\n* A clone, shrink, or split operation.\n\nYou can determine the cause of a shard recovery using the recovery or cat recovery APIs.\n\nThe index recovery API reports information about completed recoveries only for shard copies that currently exist in the cluster.\nIt only reports the last recovery for each shard copy and does not report historical information about earlier recoveries, nor does it report information about the recoveries of shard copies that no longer exist.\nThis means that if a shard copy completes a recovery and then Elasticsearch relocates it onto a different node then the information about the original recovery will not be shown in the recovery API.", + "description": "Get index recovery information.\nGet information about ongoing and completed shard recoveries for one or more indices.\nFor data streams, the API returns information for the stream's backing indices.\n\nAll recoveries, whether ongoing or complete, are kept in the cluster state and may be reported on at any time.\n\nShard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or creating a replica shard from a primary shard.\nWhen a shard recovery completes, the recovered shard is available for search and indexing.\n\nRecovery automatically occurs during the following processes:\n\n* When creating an index for the first time.\n* When a node rejoins the cluster and starts up any missing primary shard copies using the data that it holds in its data path.\n* Creation of new replica shard copies from the primary.\n* Relocation of a shard copy to a different node in the same cluster.\n* A snapshot restore operation.\n* A clone, shrink, or split operation.\n\nYou can determine the cause of a shard recovery using the recovery or cat recovery APIs.\n\nThe index recovery API reports information about completed recoveries only for shard copies that currently exist in the cluster.\nIt only reports the last recovery for each shard copy and does not report historical information about earlier recoveries, nor does it report information about the recoveries of shard copies that no longer exist.\nThis means that if a shard copy completes a recovery and then Elasticsearch relocates it onto a different node then the information about the original recovery will not be shown in the recovery API.", "inherits": { "type": { "name": "RequestBase", @@ -136550,7 +136710,7 @@ } } ], - "specLocation": "indices/recovery/IndicesRecoveryRequest.ts#L23-L70" + "specLocation": "indices/recovery/IndicesRecoveryRequest.ts#L23-L74" }, { "kind": "response", @@ -136959,7 +137119,7 @@ "body": { "kind": "no_body" }, - "description": "Refresh an index.\nA refresh makes recent operations performed on one or more indices available for search.\nFor data streams, the API runs the refresh operation on the stream’s backing indices.", + "description": "Refresh an index.\nA refresh makes recent operations performed on one or more indices available for search.\nFor data streams, the API runs the refresh operation on the stream’s backing indices.\n\nBy default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds.\nYou can change this default interval with the `index.refresh_interval` setting.\n\nRefresh requests are synchronous and do not return a response until the refresh operation completes.\n\nRefreshes are resource-intensive.\nTo ensure good cluster performance, it's recommended to wait for Elasticsearch's periodic refresh rather than performing an explicit refresh when possible.\n\nIf your application workflow indexes documents and then runs a search to retrieve the indexed document, it's recommended to use the index API's `refresh=wait_for` query parameter option.\nThis option ensures the indexing operation waits for a periodic refresh before running the search.", "inherits": { "type": { "name": "RequestBase", @@ -137025,7 +137185,7 @@ } } ], - "specLocation": "indices/refresh/IndicesRefreshRequest.ts#L23-L61" + "specLocation": "indices/refresh/IndicesRefreshRequest.ts#L23-L74" }, { "kind": "response", @@ -137228,7 +137388,7 @@ "body": { "kind": "no_body" }, - "description": "Resolve the cluster.\nResolve the specified index expressions to return information about each cluster, including the local cluster, if included.\nMultiple patterns and remote clusters are supported.\n\nThis endpoint is useful before doing a cross-cluster search in order to determine which remote clusters should be included in a search.\n\nYou use the same index expression with this endpoint as you would for cross-cluster search.\nIndex and cluster exclusions are also supported with this endpoint.\n\nFor each cluster in the index expression, information is returned about:\n\n* Whether the querying (\"local\") cluster is currently connected to each remote cluster in the index expression scope.\n* Whether each remote cluster is configured with `skip_unavailable` as `true` or `false`.\n* Whether there are any indices, aliases, or data streams on that cluster that match the index expression.\n* Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index).\n* Cluster version information, including the Elasticsearch server version.", + "description": "Resolve the cluster.\nResolve the specified index expressions to return information about each cluster, including the local cluster, if included.\nMultiple patterns and remote clusters are supported.\n\nThis endpoint is useful before doing a cross-cluster search in order to determine which remote clusters should be included in a search.\n\nYou use the same index expression with this endpoint as you would for cross-cluster search.\nIndex and cluster exclusions are also supported with this endpoint.\n\nFor each cluster in the index expression, information is returned about:\n\n* Whether the querying (\"local\") cluster is currently connected to each remote cluster in the index expression scope.\n* Whether each remote cluster is configured with `skip_unavailable` as `true` or `false`.\n* Whether there are any indices, aliases, or data streams on that cluster that match the index expression.\n* Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index).\n* Cluster version information, including the Elasticsearch server version.\n\nFor example, `GET /_resolve/cluster/my-index-*,cluster*:my-index-*` returns information about the local cluster and all remotely configured clusters that start with the alias `cluster*`.\nEach cluster returns information about whether it has any indices, aliases or data streams that match `my-index-*`.\n\n**Advantages of using this endpoint before a cross-cluster search**\n\nYou may want to exclude a cluster or index from a search when:\n\n* A remote cluster is not currently connected and is configured with `skip_unavailable=false`. Running a cross-cluster search under those conditions will cause the entire search to fail.\n* A cluster has no matching indices, aliases or data streams for the index expression (or your user does not have permissions to search them). For example, suppose your index expression is `logs*,remote1:logs*` and the remote1 cluster has no indices, aliases or data streams that match `logs*`. In that case, that cluster will return no results from that cluster if you include it in a cross-cluster search.\n* The index expression (combined with any query parameters you specify) will likely cause an exception to be thrown when you do the search. In these cases, the \"error\" field in the `_resolve/cluster` response will be present. (This is also where security/permission errors will be shown.)\n* A remote cluster is an older version that does not support the feature you want to use in your search.", "inherits": { "type": { "name": "RequestBase", @@ -137304,7 +137464,7 @@ } } ], - "specLocation": "indices/resolve_cluster/ResolveClusterRequest.ts#L23-L76" + "specLocation": "indices/resolve_cluster/ResolveClusterRequest.ts#L23-L89" }, { "kind": "interface", @@ -137327,7 +137487,7 @@ } }, { - "description": "The skip_unavailable setting for a remote cluster.", + "description": "The `skip_unavailable` setting for a remote cluster.", "name": "skip_unavailable", "required": true, "type": { @@ -137351,7 +137511,7 @@ } }, { - "description": "Provides error messages that are likely to occur if you do a search with this index expression\non the specified cluster (e.g., lack of security privileges to query an index).", + "description": "Provides error messages that are likely to occur if you do a search with this index expression\non the specified cluster (for example, lack of security privileges to query an index).", "name": "error", "required": false, "type": { @@ -137480,7 +137640,7 @@ } } ], - "specLocation": "indices/resolve_index/ResolveIndexRequest.ts#L23-L61" + "specLocation": "indices/resolve_index/ResolveIndexRequest.ts#L23-L63" }, { "kind": "interface", @@ -137748,7 +137908,7 @@ } ] }, - "description": "Roll over to a new index.\nCreates a new index for a data stream or index alias.", + "description": "Roll over to a new index.\nTIP: It is recommended to use the index lifecycle rollover action to automate rollovers.\n\nThe rollover API creates a new index for a data stream or index alias.\nThe API behavior depends on the rollover target.\n\n**Roll over a data stream**\n\nIf you roll over a data stream, the API creates a new write index for the stream.\nThe stream's previous write index becomes a regular backing index.\nA rollover also increments the data stream's generation.\n\n**Roll over an index alias with a write index**\n\nTIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a write index to manage time series data.\nData streams replace this functionality, require less maintenance, and automatically integrate with data tiers.\n\nIf an index alias points to multiple indices, one of the indices must be a write index.\nThe rollover API creates a new write index for the alias with `is_write_index` set to `true`.\nThe API also `sets is_write_index` to `false` for the previous write index.\n\n**Roll over an index alias with one index**\n\nIf you roll over an index alias that points to only one index, the API creates a new index for the alias and removes the original index from the alias.\n\nNOTE: A rollover creates a new index and is subject to the `wait_for_active_shards` setting.\n\n**Increment index names for an alias**\n\nWhen you roll over an index alias, you can specify a name for the new index.\nIf you don't specify a name and the current index ends with `-` and a number, such as `my-index-000001` or `my-index-3`, the new index name increments that number.\nFor example, if you roll over an alias with a current index of `my-index-000001`, the rollover creates a new index named `my-index-000002`.\nThis number is always six characters and zero-padded, regardless of the previous index's name.\n\nIf you use an index alias for time series data, you can use date math in the index name to track the rollover date.\nFor example, you can create an alias that points to an index named ``.\nIf you create the index on May 6, 2099, the index's name is `my-index-2099.05.06-000001`.\nIf you roll over the alias on May 7, 2099, the new index's name is `my-index-2099.05.07-000002`.", "inherits": { "type": { "name": "RequestBase", @@ -137839,7 +137999,7 @@ } } ], - "specLocation": "indices/rollover/IndicesRolloverRequest.ts#L29-L100" + "specLocation": "indices/rollover/IndicesRolloverRequest.ts#L29-L137" }, { "kind": "response", @@ -138247,7 +138407,7 @@ } } ], - "specLocation": "indices/segments/IndicesSegmentsRequest.ts#L23-L61" + "specLocation": "indices/segments/IndicesSegmentsRequest.ts#L23-L63" }, { "kind": "response", @@ -138655,7 +138815,7 @@ } } ], - "specLocation": "indices/shard_stores/IndicesShardStoresRequest.ts#L24-L71" + "specLocation": "indices/shard_stores/IndicesShardStoresRequest.ts#L24-L72" }, { "kind": "response", @@ -139143,7 +139303,7 @@ "body": { "kind": "no_body" }, - "description": "Simulate an index.\nReturns the index configuration that would be applied to the specified index from an existing index template.", + "description": "Simulate an index.\nGet the index configuration that would be applied to the specified index from an existing index template.", "inherits": { "type": { "name": "RequestBase", @@ -139205,7 +139365,7 @@ } } ], - "specLocation": "indices/simulate_index_template/IndicesSimulateIndexTemplateRequest.ts#L24-L50" + "specLocation": "indices/simulate_index_template/IndicesSimulateIndexTemplateRequest.ts#L24-L52" }, { "kind": "response", @@ -140343,7 +140503,7 @@ } } ], - "specLocation": "indices/stats/IndicesStatsRequest.ts#L29-L94" + "specLocation": "indices/stats/IndicesStatsRequest.ts#L29-L95" }, { "kind": "response", @@ -141347,7 +141507,7 @@ } } ], - "specLocation": "indices/unfreeze/IndicesUnfreezeRequest.ts#L24-L77" + "specLocation": "indices/unfreeze/IndicesUnfreezeRequest.ts#L24-L78" }, { "kind": "response", diff --git a/specification/_doc_ids/table.csv b/specification/_doc_ids/table.csv index 400121a449..e1b74dde92 100644 --- a/specification/_doc_ids/table.csv +++ b/specification/_doc_ids/table.csv @@ -258,11 +258,13 @@ indices-put-mapping,https://www.elastic.co/guide/en/elasticsearch/reference/{bra indices-recovery,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-recovery.html indices-refresh,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-refresh.html indices-reload-analyzers,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-reload-analyzers.html +indices-resolve-cluster-api,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-resolve-cluster-api.html indices-resolve-index-api,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-resolve-index-api.html indices-rollover-index,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-rollover-index.html indices-segments,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-segments.html indices-shards-stores,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-shards-stores.html indices-shrink-index,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-shrink-index.html +indices-simulate-template,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-simulate-template.html indices-split-index,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-split-index.html indices-stats,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-stats.html indices-template-exists-v1,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-template-exists-v1.html diff --git a/specification/indices/put_mapping/IndicesPutMappingRequest.ts b/specification/indices/put_mapping/IndicesPutMappingRequest.ts index ee6ffa292e..1894d454f8 100644 --- a/specification/indices/put_mapping/IndicesPutMappingRequest.ts +++ b/specification/indices/put_mapping/IndicesPutMappingRequest.ts @@ -40,12 +40,14 @@ import { Duration } from '@_types/Time' /** * Update field mappings. - * Adds new fields to an existing data stream or index. + * Add new fields to an existing data stream or index. * You can also use this API to change the search settings of existing fields. * For data streams, these changes are applied to all backing indices by default. * @rest_spec_name indices.put_mapping * @availability stack stability=stable * @availability serverless stability=stable visibility=public + * @doc_id indices-put-mapping + * @index_privileges manage */ export interface Request extends RequestBase { path_parts: { diff --git a/specification/indices/refresh/IndicesRefreshRequest.ts b/specification/indices/refresh/IndicesRefreshRequest.ts index 0b1586293b..dbc86b8950 100644 --- a/specification/indices/refresh/IndicesRefreshRequest.ts +++ b/specification/indices/refresh/IndicesRefreshRequest.ts @@ -24,9 +24,22 @@ import { ExpandWildcards, Indices } from '@_types/common' * Refresh an index. * A refresh makes recent operations performed on one or more indices available for search. * For data streams, the API runs the refresh operation on the stream’s backing indices. + * + * By default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds. + * You can change this default interval with the `index.refresh_interval` setting. + * + * Refresh requests are synchronous and do not return a response until the refresh operation completes. + * + * Refreshes are resource-intensive. + * To ensure good cluster performance, it's recommended to wait for Elasticsearch's periodic refresh rather than performing an explicit refresh when possible. + * + * If your application workflow indexes documents and then runs a search to retrieve the indexed document, it's recommended to use the index API's `refresh=wait_for` query parameter option. + * This option ensures the indexing operation waits for a periodic refresh before running the search. * @rest_spec_name indices.refresh * @availability stack stability=stable * @availability serverless stability=stable visibility=public + * @doc_id indices-refresh + * @index_privileges maintenance */ export interface Request extends RequestBase { path_parts: { diff --git a/specification/indices/resolve_cluster/ResolveClusterRequest.ts b/specification/indices/resolve_cluster/ResolveClusterRequest.ts index 0609869730..f42971f70f 100644 --- a/specification/indices/resolve_cluster/ResolveClusterRequest.ts +++ b/specification/indices/resolve_cluster/ResolveClusterRequest.ts @@ -38,8 +38,21 @@ import { ExpandWildcards, Names } from '@_types/common' * * Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index). * * Cluster version information, including the Elasticsearch server version. * + * For example, `GET /_resolve/cluster/my-index-*,cluster*:my-index-*` returns information about the local cluster and all remotely configured clusters that start with the alias `cluster*`. + * Each cluster returns information about whether it has any indices, aliases or data streams that match `my-index-*`. + * + * **Advantages of using this endpoint before a cross-cluster search** + * + * You may want to exclude a cluster or index from a search when: + * + * * A remote cluster is not currently connected and is configured with `skip_unavailable=false`. Running a cross-cluster search under those conditions will cause the entire search to fail. + * * A cluster has no matching indices, aliases or data streams for the index expression (or your user does not have permissions to search them). For example, suppose your index expression is `logs*,remote1:logs*` and the remote1 cluster has no indices, aliases or data streams that match `logs*`. In that case, that cluster will return no results from that cluster if you include it in a cross-cluster search. + * * The index expression (combined with any query parameters you specify) will likely cause an exception to be thrown when you do the search. In these cases, the "error" field in the `_resolve/cluster` response will be present. (This is also where security/permission errors will be shown.) + * * A remote cluster is an older version that does not support the feature you want to use in your search. * @rest_spec_name indices.resolve_cluster * @availability stack since=8.13.0 stability=stable + * @doc_id indices-resolve-cluster-api + * @index_privileges view_index_metadata */ export interface Request extends RequestBase { path_parts: { diff --git a/specification/indices/resolve_cluster/ResolveClusterResponse.ts b/specification/indices/resolve_cluster/ResolveClusterResponse.ts index ce1e8cb1d2..8bba702f04 100644 --- a/specification/indices/resolve_cluster/ResolveClusterResponse.ts +++ b/specification/indices/resolve_cluster/ResolveClusterResponse.ts @@ -35,7 +35,7 @@ export class ResolveClusterInfo { */ connected: boolean /** - * The skip_unavailable setting for a remote cluster. + * The `skip_unavailable` setting for a remote cluster. */ skip_unavailable: boolean /** @@ -45,7 +45,7 @@ export class ResolveClusterInfo { matching_indices?: boolean /** * Provides error messages that are likely to occur if you do a search with this index expression - * on the specified cluster (e.g., lack of security privileges to query an index). + * on the specified cluster (for example, lack of security privileges to query an index). */ error?: string /** diff --git a/specification/indices/resolve_cluster/ResolveClusterResponseExample1.yaml b/specification/indices/resolve_cluster/ResolveClusterResponseExample1.yaml index 8595a6a39f..3f4c0fcd62 100644 --- a/specification/indices/resolve_cluster/ResolveClusterResponseExample1.yaml +++ b/specification/indices/resolve_cluster/ResolveClusterResponseExample1.yaml @@ -1,5 +1,7 @@ -# summary: -description: A successful response for resolving a specified index expression to return information about each cluster. +summary: Resolve with wildcards +description: > + A successful response from `GET /_resolve/cluster/my-index*,clust*:my-index*`. + Each cluster has its own response section. The cluster you sent the request to is labelled as "(local)". # type: response # response_code: 200 value: diff --git a/specification/indices/resolve_cluster/ResolveClusterResponseExample2.yaml b/specification/indices/resolve_cluster/ResolveClusterResponseExample2.yaml new file mode 100644 index 0000000000..2315196289 --- /dev/null +++ b/specification/indices/resolve_cluster/ResolveClusterResponseExample2.yaml @@ -0,0 +1,25 @@ +summary: Identify search problems +# indices/resolve-cluster.asciidoc:213 +description: > + A successful response from `GET /_resolve/cluster/not-present,clust*:my-index*,oldcluster:*?ignore_unavailable=false`. + This type of request can be used to identify potential problems with your cross-cluster search. + The local cluster has no index called `not_present`. Searching with `ignore_unavailable=false` would return a "no such index" error. + The `cluster_one` remote cluster has no indices that match the pattern `my-index*`. + There may be no indices that match the pattern or the index could be closed. + The `cluster_two` remote cluster is not connected (the attempt to connect failed). Since this cluster is marked as `skip_unavailable=false`, you should probably exclude this cluster from the search by adding `-cluster_two:*` to the search index expression. + The `oldcluster` remote cluster shows that it has matching indices, but no version information is included. This indicates that the cluster version predates the introduction of the `_resolve/cluster` API, so you may want to exclude it from your cross-cluster search. +# type: response +# response_code: 200 +value: + "{\n \"(local)\": {\n \"connected\": true,\n \"skip_unavailable\": false,\n\ + \ \"error\": \"no such index [not_present]\"\n },\n \"cluster_one\": {\n \ + \ \"connected\": true,\n \"skip_unavailable\": true,\n \"matching_indices\"\ + : false,\n \"version\": {\n \"number\": \"8.13.0\",\n \"build_flavor\"\ + : \"default\",\n \"minimum_wire_compatibility_version\": \"7.17.0\",\n \ + \ \"minimum_index_compatibility_version\": \"7.0.0\"\n }\n },\n \"cluster_two\"\ + : {\n \"connected\": false,\n \"skip_unavailable\": false,\n \"matching_indices\"\ + : true,\n \"version\": {\n \"number\": \"8.13.0\",\n \"build_flavor\"\ + : \"default\",\n \"minimum_wire_compatibility_version\": \"7.17.0\",\n \ + \ \"minimum_index_compatibility_version\": \"7.0.0\"\n }\n },\n \"oldcluster\"\ + : {\n \"connected\": true,\n \"skip_unavailable\": false,\n \"matching_indices\"\ + : true\n }\n}" diff --git a/specification/indices/resolve_index/ResolveIndexRequest.ts b/specification/indices/resolve_index/ResolveIndexRequest.ts index a456eb5e2c..15ab4db3b1 100644 --- a/specification/indices/resolve_index/ResolveIndexRequest.ts +++ b/specification/indices/resolve_index/ResolveIndexRequest.ts @@ -27,6 +27,8 @@ import { ExpandWildcards, Names } from '@_types/common' * @rest_spec_name indices.resolve_index * @availability stack since=7.9.0 stability=stable * @availability serverless stability=stable visibility=public + * @doc_id indices-resolve-index-api + * @index_privileges view_index_metadata */ export interface Request extends RequestBase { path_parts: { diff --git a/specification/indices/resolve_index/indicesResolveResponseExample1.yaml b/specification/indices/resolve_index/ResolveIndexResponseExample1.yaml similarity index 88% rename from specification/indices/resolve_index/indicesResolveResponseExample1.yaml rename to specification/indices/resolve_index/ResolveIndexResponseExample1.yaml index 15a73f73d9..da130d5826 100644 --- a/specification/indices/resolve_index/indicesResolveResponseExample1.yaml +++ b/specification/indices/resolve_index/ResolveIndexResponseExample1.yaml @@ -1,5 +1,5 @@ # summary: -description: A successful response for resolving the specified name for an index. +description: A successful response from `GET /_resolve/index/f*,remoteCluster1:bar*?expand_wildcards=all`. # type: response # response_code: 200 value: diff --git a/specification/indices/rollover/IndicesRolloverRequest.ts b/specification/indices/rollover/IndicesRolloverRequest.ts index afd79b6cb8..a5d5080142 100644 --- a/specification/indices/rollover/IndicesRolloverRequest.ts +++ b/specification/indices/rollover/IndicesRolloverRequest.ts @@ -28,11 +28,48 @@ import { RolloverConditions } from './types' /** * Roll over to a new index. - * Creates a new index for a data stream or index alias. + * TIP: It is recommended to use the index lifecycle rollover action to automate rollovers. + * + * The rollover API creates a new index for a data stream or index alias. + * The API behavior depends on the rollover target. + * + * **Roll over a data stream** + * + * If you roll over a data stream, the API creates a new write index for the stream. + * The stream's previous write index becomes a regular backing index. + * A rollover also increments the data stream's generation. + * + * **Roll over an index alias with a write index** + * + * TIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a write index to manage time series data. + * Data streams replace this functionality, require less maintenance, and automatically integrate with data tiers. + * + * If an index alias points to multiple indices, one of the indices must be a write index. + * The rollover API creates a new write index for the alias with `is_write_index` set to `true`. + * The API also `sets is_write_index` to `false` for the previous write index. + * + * **Roll over an index alias with one index** + * + * If you roll over an index alias that points to only one index, the API creates a new index for the alias and removes the original index from the alias. + * + * NOTE: A rollover creates a new index and is subject to the `wait_for_active_shards` setting. + * + * **Increment index names for an alias** + * + * When you roll over an index alias, you can specify a name for the new index. + * If you don't specify a name and the current index ends with `-` and a number, such as `my-index-000001` or `my-index-3`, the new index name increments that number. + * For example, if you roll over an alias with a current index of `my-index-000001`, the rollover creates a new index named `my-index-000002`. + * This number is always six characters and zero-padded, regardless of the previous index's name. + * + * If you use an index alias for time series data, you can use date math in the index name to track the rollover date. + * For example, you can create an alias that points to an index named ``. + * If you create the index on May 6, 2099, the index's name is `my-index-2099.05.06-000001`. + * If you roll over the alias on May 7, 2099, the new index's name is `my-index-2099.05.07-000002`. * @doc_id indices-rollover-index * @rest_spec_name indices.rollover * @availability stack since=5.0.0 stability=stable * @availability serverless stability=stable visibility=public + * @index_privileges manage */ export interface Request extends RequestBase { path_parts: { diff --git a/specification/indices/simulate_index_template/IndicesSimulateIndexTemplateRequest.ts b/specification/indices/simulate_index_template/IndicesSimulateIndexTemplateRequest.ts index 0cad3bcfc0..ea76f6552b 100644 --- a/specification/indices/simulate_index_template/IndicesSimulateIndexTemplateRequest.ts +++ b/specification/indices/simulate_index_template/IndicesSimulateIndexTemplateRequest.ts @@ -23,10 +23,12 @@ import { Duration } from '@_types/Time' /** * Simulate an index. - * Returns the index configuration that would be applied to the specified index from an existing index template. + * Get the index configuration that would be applied to the specified index from an existing index template. * @rest_spec_name indices.simulate_index_template * @availability stack since=7.9.0 stability=stable * @availability serverless stability=stable visibility=public + * @doc_id indices-simulate-template + * @cluster_privileges manage_index_templates */ export interface Request extends RequestBase { path_parts: { diff --git a/specification/indices/split/indicesSplitRequestExample1.yaml b/specification/indices/split/indicesSplitRequestExample1.yaml index 09c632a783..a1bbb40102 100644 --- a/specification/indices/split/indicesSplitRequestExample1.yaml +++ b/specification/indices/split/indicesSplitRequestExample1.yaml @@ -1,5 +1,5 @@ -summary: Split an existing index into a new index with more primary shards. +# summary: +description: Split an existing index into a new index with more primary shards. # method_request: POST /my-index-000001/_split/split-my-index-000001 -# description: '' # type: request value: "{\n \"settings\": {\n \"index.number_of_shards\": 2\n }\n}" diff --git a/specification/indices/unfreeze/IndicesUnfreezeRequest.ts b/specification/indices/unfreeze/IndicesUnfreezeRequest.ts index 05be87ef0a..9105892053 100644 --- a/specification/indices/unfreeze/IndicesUnfreezeRequest.ts +++ b/specification/indices/unfreeze/IndicesUnfreezeRequest.ts @@ -27,6 +27,7 @@ import { Duration } from '@_types/Time' * @rest_spec_name indices.unfreeze * @availability stack since=6.6.0 stability=stable * @index_privileges manage + * @doc_id unfreeze-index-api */ export interface Request extends RequestBase { path_parts: { From daaa778482c152684f4cd73bc8f2fda87c8a9048 Mon Sep 17 00:00:00 2001 From: lcawl Date: Mon, 6 Jan 2025 10:05:37 -0800 Subject: [PATCH 3/4] Add examples for simulate APIs --- output/openapi/elasticsearch-openapi.json | 4 ++-- .../openapi/elasticsearch-serverless-openapi.json | 4 ++-- output/schema/schema.json | 13 +++++++------ specification/_doc_ids/table.csv | 1 + .../IndicesSimulateIndexTemplateRequest.ts | 2 +- .../indicesSimulateIndexResponseExample1.yaml | 13 +++++++++++++ .../IndicesSimulateTemplateRequest.ts | 3 ++- .../indicesSimulateTemplateRequestExample1.yaml | 9 +++++++++ .../indicesSimulateTemplateResponseExample2.yaml | 14 ++++++++++++++ 9 files changed, 51 insertions(+), 12 deletions(-) create mode 100644 specification/indices/simulate_index_template/indicesSimulateIndexResponseExample1.yaml create mode 100644 specification/indices/simulate_template/indicesSimulateTemplateRequestExample1.yaml create mode 100644 specification/indices/simulate_template/indicesSimulateTemplateResponseExample2.yaml diff --git a/output/openapi/elasticsearch-openapi.json b/output/openapi/elasticsearch-openapi.json index 1e9c923d3c..58aa56bd91 100644 --- a/output/openapi/elasticsearch-openapi.json +++ b/output/openapi/elasticsearch-openapi.json @@ -14706,7 +14706,7 @@ "indices" ], "summary": "Simulate an index template", - "description": "Returns the index configuration that would be applied by a particular index template.", + "description": "Get the index configuration that would be applied by a particular index template.", "operationId": "indices-simulate-template", "parameters": [ { @@ -14735,7 +14735,7 @@ "indices" ], "summary": "Simulate an index template", - "description": "Returns the index configuration that would be applied by a particular index template.", + "description": "Get the index configuration that would be applied by a particular index template.", "operationId": "indices-simulate-template-1", "parameters": [ { diff --git a/output/openapi/elasticsearch-serverless-openapi.json b/output/openapi/elasticsearch-serverless-openapi.json index 633b46864d..8285cf294a 100644 --- a/output/openapi/elasticsearch-serverless-openapi.json +++ b/output/openapi/elasticsearch-serverless-openapi.json @@ -8251,7 +8251,7 @@ "indices" ], "summary": "Simulate an index template", - "description": "Returns the index configuration that would be applied by a particular index template.", + "description": "Get the index configuration that would be applied by a particular index template.", "operationId": "indices-simulate-template", "parameters": [ { @@ -8280,7 +8280,7 @@ "indices" ], "summary": "Simulate an index template", - "description": "Returns the index configuration that would be applied by a particular index template.", + "description": "Get the index configuration that would be applied by a particular index template.", "operationId": "indices-simulate-template-1", "parameters": [ { diff --git a/output/schema/schema.json b/output/schema/schema.json index f45e48b507..81f51301c5 100644 --- a/output/schema/schema.json +++ b/output/schema/schema.json @@ -8263,8 +8263,8 @@ } }, "description": "Simulate an index.\nGet the index configuration that would be applied to the specified index from an existing index template.", - "docId": "indices-simulate-template", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-simulate-template.html", + "docId": "indices-simulate", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{master}/indices-simulate-index.html", "name": "indices.simulate_index_template", "privileges": { "cluster": [ @@ -8305,8 +8305,9 @@ "stability": "stable" } }, - "description": "Simulate an index template.\nReturns the index configuration that would be applied by a particular index template.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-simulate-template.html", + "description": "Simulate an index template.\nGet the index configuration that would be applied by a particular index template.", + "docId": "indices-simulate-template", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-simulate-template.html", "name": "indices.simulate_template", "privileges": { "cluster": [ @@ -139591,7 +139592,7 @@ } ] }, - "description": "Simulate an index template.\nReturns the index configuration that would be applied by a particular index template.", + "description": "Simulate an index template.\nGet the index configuration that would be applied by a particular index template.", "inherits": { "type": { "name": "RequestBase", @@ -139666,7 +139667,7 @@ } } ], - "specLocation": "indices/simulate_template/IndicesSimulateTemplateRequest.ts#L27-L120" + "specLocation": "indices/simulate_template/IndicesSimulateTemplateRequest.ts#L27-L121" }, { "kind": "response", diff --git a/specification/_doc_ids/table.csv b/specification/_doc_ids/table.csv index 6f42100d70..0ca0db22ba 100644 --- a/specification/_doc_ids/table.csv +++ b/specification/_doc_ids/table.csv @@ -266,6 +266,7 @@ indices-rollover-index,https://www.elastic.co/guide/en/elasticsearch/reference/{ indices-segments,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-segments.html indices-shards-stores,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-shards-stores.html indices-shrink-index,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-shrink-index.html +indices-simulate,https://www.elastic.co/guide/en/elasticsearch/reference/{master}/indices-simulate-index.html indices-simulate-template,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-simulate-template.html indices-split-index,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-split-index.html indices-stats,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-stats.html diff --git a/specification/indices/simulate_index_template/IndicesSimulateIndexTemplateRequest.ts b/specification/indices/simulate_index_template/IndicesSimulateIndexTemplateRequest.ts index ea76f6552b..9a7c77d352 100644 --- a/specification/indices/simulate_index_template/IndicesSimulateIndexTemplateRequest.ts +++ b/specification/indices/simulate_index_template/IndicesSimulateIndexTemplateRequest.ts @@ -27,7 +27,7 @@ import { Duration } from '@_types/Time' * @rest_spec_name indices.simulate_index_template * @availability stack since=7.9.0 stability=stable * @availability serverless stability=stable visibility=public - * @doc_id indices-simulate-template + * @doc_id indices-simulate * @cluster_privileges manage_index_templates */ export interface Request extends RequestBase { diff --git a/specification/indices/simulate_index_template/indicesSimulateIndexResponseExample1.yaml b/specification/indices/simulate_index_template/indicesSimulateIndexResponseExample1.yaml new file mode 100644 index 0000000000..fcf0504ead --- /dev/null +++ b/specification/indices/simulate_index_template/indicesSimulateIndexResponseExample1.yaml @@ -0,0 +1,13 @@ +# summary: indices/simulate-index.asciidoc:188 +description: A successful response from `POST /_index_template/_simulate_index/my-index-000001`. +# type: response +# response_code: 200 +value: "{\n \"template\" : {\n \"settings\" : {\n \"index\" : {\n \ + \ \"number_of_shards\" : \"2\",\n \"number_of_replicas\" : \"0\",\n \ + \ \"routing\" : {\n \"allocation\" : {\n \"include\" : {\n\ + \ \"_tier_preference\" : \"data_content\"\n }\n \ + \ }\n }\n }\n },\n \"mappings\" : {\n \"properties\" : {\n\ + \ \"@timestamp\" : {\n \"type\" : \"date\"\n }\n }\n\ + \ },\n \"aliases\" : { }\n },\n \"overlapping\" : [\n {\n \"name\"\ + \ : \"template_1\",\n \"index_patterns\" : [\n \"my-index-*\"\n \ + \ ]\n }\n ]\n}" diff --git a/specification/indices/simulate_template/IndicesSimulateTemplateRequest.ts b/specification/indices/simulate_template/IndicesSimulateTemplateRequest.ts index 2352c5653a..f55fee9643 100644 --- a/specification/indices/simulate_template/IndicesSimulateTemplateRequest.ts +++ b/specification/indices/simulate_template/IndicesSimulateTemplateRequest.ts @@ -26,11 +26,12 @@ import { Duration } from '@_types/Time' /** * Simulate an index template. - * Returns the index configuration that would be applied by a particular index template. + * Get the index configuration that would be applied by a particular index template. * @rest_spec_name indices.simulate_template * @availability stack stability=stable * @availability serverless stability=stable visibility=public * @cluster_privileges manage_index_templates + * @doc_id indices-simulate-template */ export interface Request extends RequestBase { path_parts: { diff --git a/specification/indices/simulate_template/indicesSimulateTemplateRequestExample1.yaml b/specification/indices/simulate_template/indicesSimulateTemplateRequestExample1.yaml new file mode 100644 index 0000000000..6deade552d --- /dev/null +++ b/specification/indices/simulate_template/indicesSimulateTemplateRequestExample1.yaml @@ -0,0 +1,9 @@ +# summary: +# method_request: POST /_index_template/_simulate +description: > + To see what settings will be applied by a template before you add it to the cluster, you can pass a template configuration in the request body. + The specified template is used for the simulation if it has a higher priority than existing templates. +# type: request +value: "{\n \"index_patterns\": [\"my-index-*\"],\n \"composed_of\": [\"ct2\"],\n\ + \ \"priority\": 10,\n \"template\": {\n \"settings\": {\n \"index.number_of_replicas\"\ + : 1\n }\n }\n}" diff --git a/specification/indices/simulate_template/indicesSimulateTemplateResponseExample2.yaml b/specification/indices/simulate_template/indicesSimulateTemplateResponseExample2.yaml new file mode 100644 index 0000000000..0c6dfed6f1 --- /dev/null +++ b/specification/indices/simulate_template/indicesSimulateTemplateResponseExample2.yaml @@ -0,0 +1,14 @@ +# summary: +description: > + A successful response from `POST /_index_template/_simulate` with a template configuration in the request body. + The response shows any overlapping templates with a lower priority. +# type: response +# response_code: 200 +value: "{\n \"template\" : {\n \"settings\" : {\n \"index\" : {\n \ + \ \"number_of_replicas\" : \"1\",\n \"routing\" : {\n \"allocation\"\ + \ : {\n \"include\" : {\n \"_tier_preference\" : \"data_content\"\ + \n }\n }\n }\n }\n },\n \"mappings\" : {\n\ + \ \"properties\" : {\n \"@timestamp\" : {\n \"type\" : \"date\"\ + \n }\n }\n },\n \"aliases\" : { }\n },\n \"overlapping\" : [\n\ + \ {\n \"name\" : \"final-template\",\n \"index_patterns\" : [\n \ + \ \"my-index-*\"\n ]\n }\n ]\n}" From a62171f4f9d461891fe015e346384a686c0318b8 Mon Sep 17 00:00:00 2001 From: lcawl Date: Mon, 6 Jan 2025 14:03:35 -0800 Subject: [PATCH 4/4] Add mapping, split, unfreeze updates --- output/openapi/elasticsearch-openapi.json | 25 +++++++++---- .../elasticsearch-serverless-openapi.json | 20 ++++++++--- output/schema/schema.json | 35 +++++++++++++------ specification/_doc_ids/table.csv | 1 + .../put_mapping/IndicesPutMappingRequest.ts | 28 ++++++++++++++- .../indicesPutMappingRequestExample1.yaml | 13 ++++--- .../put_settings/IndicesPutSettingsRequest.ts | 19 ++++++++-- .../IndicesPutSettingsRequestExample1.yaml | 2 +- .../indicesPutSettingsRequestExample2.yaml | 5 +++ .../indicesPutSettingsRequestExample3.yaml | 8 +++++ ...imulateIndexTemplateResponseExample1.yaml} | 3 +- ...ndicesSimulateTemplateRequestExample1.yaml | 3 +- ...dicesSimulateTemplateResponseExample2.yaml | 5 +-- .../indices/split/IndicesSplitRequest.ts | 9 +++++ .../unfreeze/IndicesUnfreezeRequest.ts | 1 + 15 files changed, 144 insertions(+), 33 deletions(-) create mode 100644 specification/indices/put_settings/indicesPutSettingsRequestExample2.yaml create mode 100644 specification/indices/put_settings/indicesPutSettingsRequestExample3.yaml rename specification/indices/simulate_index_template/{indicesSimulateIndexResponseExample1.yaml => indicesSimulateIndexTemplateResponseExample1.yaml} (90%) diff --git a/output/openapi/elasticsearch-openapi.json b/output/openapi/elasticsearch-openapi.json index 58aa56bd91..87ef96e5e9 100644 --- a/output/openapi/elasticsearch-openapi.json +++ b/output/openapi/elasticsearch-openapi.json @@ -13378,7 +13378,10 @@ "indices" ], "summary": "Update field mappings", - "description": "Add new fields to an existing data stream or index.\nYou can also use this API to change the search settings of existing fields.\nFor data streams, these changes are applied to all backing indices by default.", + "description": "Add new fields to an existing data stream or index.\nYou can also use this API to change the search settings of existing fields and add new properties to existing object fields.\nFor data streams, these changes are applied to all backing indices by default.\n\n**Add multi-fields to an existing field**\n\nMulti-fields let you index the same field in different ways.\nYou can use this API to update the fields mapping parameter and enable multi-fields for an existing field.\nWARNING: If an index (or data stream) contains documents when you add a multi-field, those documents will not have values for the new multi-field.\nYou can populate the new multi-field with the update by query API.\n\n**Change supported mapping parameters for an existing field**\n\nThe documentation for each mapping parameter indicates whether you can update it for an existing field using this API.\nFor example, you can use the update mapping API to update the `ignore_above` parameter.\n\n**Change the mapping of an existing field**\n\nExcept for supported mapping parameters, you can't change the mapping or field type of an existing field.\nChanging an existing field could invalidate data that's already indexed.\n\nIf you need to change the mapping of a field in a data stream's backing indices, refer to documentation about modifying data streams.\nIf you need to change the mapping of a field in other indices, create a new index with the correct mapping and reindex your data into that index.\n\n**Rename a field**\n\nRenaming a field would invalidate data already indexed under the old field name.\nInstead, add an alias field to create an alternate field name.", + "externalDocs": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-params.html" + }, "operationId": "indices-put-mapping", "parameters": [ { @@ -13417,7 +13420,10 @@ "indices" ], "summary": "Update field mappings", - "description": "Add new fields to an existing data stream or index.\nYou can also use this API to change the search settings of existing fields.\nFor data streams, these changes are applied to all backing indices by default.", + "description": "Add new fields to an existing data stream or index.\nYou can also use this API to change the search settings of existing fields and add new properties to existing object fields.\nFor data streams, these changes are applied to all backing indices by default.\n\n**Add multi-fields to an existing field**\n\nMulti-fields let you index the same field in different ways.\nYou can use this API to update the fields mapping parameter and enable multi-fields for an existing field.\nWARNING: If an index (or data stream) contains documents when you add a multi-field, those documents will not have values for the new multi-field.\nYou can populate the new multi-field with the update by query API.\n\n**Change supported mapping parameters for an existing field**\n\nThe documentation for each mapping parameter indicates whether you can update it for an existing field using this API.\nFor example, you can use the update mapping API to update the `ignore_above` parameter.\n\n**Change the mapping of an existing field**\n\nExcept for supported mapping parameters, you can't change the mapping or field type of an existing field.\nChanging an existing field could invalidate data that's already indexed.\n\nIf you need to change the mapping of a field in a data stream's backing indices, refer to documentation about modifying data streams.\nIf you need to change the mapping of a field in other indices, create a new index with the correct mapping and reindex your data into that index.\n\n**Rename a field**\n\nRenaming a field would invalidate data already indexed under the old field name.\nInstead, add an alias field to create an alternate field name.", + "externalDocs": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-params.html" + }, "operationId": "indices-put-mapping-1", "parameters": [ { @@ -13494,7 +13500,10 @@ "indices" ], "summary": "Update index settings", - "description": "Changes dynamic index settings in real time. For data streams, index setting\nchanges are applied to all backing indices by default.", + "description": "Changes dynamic index settings in real time.\nFor data streams, index setting changes are applied to all backing indices by default.\n\nTo revert a setting to the default value, use a null value.\nThe list of per-index settings that can be updated dynamically on live indices can be found in index module documentation.\nTo preserve existing settings from being updated, set the `preserve_existing` parameter to `true`.\n\nNOTE: You can only define new analyzers on closed indices.\nTo add an analyzer, you must close the index, define the analyzer, and reopen the index.\nYou cannot close the write index of a data stream.\nTo update the analyzer for a data stream's write index and future backing indices, update the analyzer in the index template used by the stream.\nThen roll over the data stream to apply the new analyzer to the stream's write index and future backing indices.\nThis affects searches and any new data added to the stream after the rollover.\nHowever, it does not affect the data stream's backing indices or their existing data.\nTo change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it.", + "externalDocs": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules.html" + }, "operationId": "indices-put-settings", "parameters": [ { @@ -13574,7 +13583,10 @@ "indices" ], "summary": "Update index settings", - "description": "Changes dynamic index settings in real time. For data streams, index setting\nchanges are applied to all backing indices by default.", + "description": "Changes dynamic index settings in real time.\nFor data streams, index setting changes are applied to all backing indices by default.\n\nTo revert a setting to the default value, use a null value.\nThe list of per-index settings that can be updated dynamically on live indices can be found in index module documentation.\nTo preserve existing settings from being updated, set the `preserve_existing` parameter to `true`.\n\nNOTE: You can only define new analyzers on closed indices.\nTo add an analyzer, you must close the index, define the analyzer, and reopen the index.\nYou cannot close the write index of a data stream.\nTo update the analyzer for a data stream's write index and future backing indices, update the analyzer in the index template used by the stream.\nThen roll over the data stream to apply the new analyzer to the stream's write index and future backing indices.\nThis affects searches and any new data added to the stream after the rollover.\nHowever, it does not affect the data stream's backing indices or their existing data.\nTo change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it.", + "externalDocs": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules.html" + }, "operationId": "indices-put-settings-1", "parameters": [ { @@ -14767,7 +14779,7 @@ "indices" ], "summary": "Split an index", - "description": "Split an index into a new index with more primary shards.\n* Before you can split an index:\n\n* The index must be read-only.\n* The cluster health status must be green.\n\nThe number of times the index can be split (and the number of shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` setting.\nThe number of routing shards specifies the hashing space that is used internally to distribute documents across shards with consistent hashing.\nFor instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x 3) could be split by a factor of 2 or 3.\n\nA split operation:\n\n* Creates a new target index with the same definition as the source index, but with a larger number of primary shards.\n* Hard-links segments from the source index into the target index. If the file system doesn't support hard-linking, all segments are copied into the new index, which is a much more time consuming process.\n* Hashes all documents again, after low level files are created, to delete documents that belong to a different shard.\n* Recovers the target index as though it were a closed index which had just been re-opened.\n\nIMPORTANT: Indices can only be split if they satisfy the following requirements:\n\n* The target index must not exist.\n* The source index must have fewer primary shards than the target index.\n* The number of primary shards in the target index must be a multiple of the number of primary shards in the source index.\n* The node handling the split process must have sufficient free disk space to accommodate a second copy of the existing index.", + "description": "Split an index into a new index with more primary shards.\n* Before you can split an index:\n\n* The index must be read-only.\n* The cluster health status must be green.\n\nYou can do make an index read-only with the following request using the add index block API:\n\n```\nPUT /my_source_index/_block/write\n```\n\nThe current write index on a data stream cannot be split.\nIn order to split the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be split.\n\nThe number of times the index can be split (and the number of shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` setting.\nThe number of routing shards specifies the hashing space that is used internally to distribute documents across shards with consistent hashing.\nFor instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x 3) could be split by a factor of 2 or 3.\n\nA split operation:\n\n* Creates a new target index with the same definition as the source index, but with a larger number of primary shards.\n* Hard-links segments from the source index into the target index. If the file system doesn't support hard-linking, all segments are copied into the new index, which is a much more time consuming process.\n* Hashes all documents again, after low level files are created, to delete documents that belong to a different shard.\n* Recovers the target index as though it were a closed index which had just been re-opened.\n\nIMPORTANT: Indices can only be split if they satisfy the following requirements:\n\n* The target index must not exist.\n* The source index must have fewer primary shards than the target index.\n* The number of primary shards in the target index must be a multiple of the number of primary shards in the source index.\n* The node handling the split process must have sufficient free disk space to accommodate a second copy of the existing index.", "operationId": "indices-split", "parameters": [ { @@ -14801,7 +14813,7 @@ "indices" ], "summary": "Split an index", - "description": "Split an index into a new index with more primary shards.\n* Before you can split an index:\n\n* The index must be read-only.\n* The cluster health status must be green.\n\nThe number of times the index can be split (and the number of shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` setting.\nThe number of routing shards specifies the hashing space that is used internally to distribute documents across shards with consistent hashing.\nFor instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x 3) could be split by a factor of 2 or 3.\n\nA split operation:\n\n* Creates a new target index with the same definition as the source index, but with a larger number of primary shards.\n* Hard-links segments from the source index into the target index. If the file system doesn't support hard-linking, all segments are copied into the new index, which is a much more time consuming process.\n* Hashes all documents again, after low level files are created, to delete documents that belong to a different shard.\n* Recovers the target index as though it were a closed index which had just been re-opened.\n\nIMPORTANT: Indices can only be split if they satisfy the following requirements:\n\n* The target index must not exist.\n* The source index must have fewer primary shards than the target index.\n* The number of primary shards in the target index must be a multiple of the number of primary shards in the source index.\n* The node handling the split process must have sufficient free disk space to accommodate a second copy of the existing index.", + "description": "Split an index into a new index with more primary shards.\n* Before you can split an index:\n\n* The index must be read-only.\n* The cluster health status must be green.\n\nYou can do make an index read-only with the following request using the add index block API:\n\n```\nPUT /my_source_index/_block/write\n```\n\nThe current write index on a data stream cannot be split.\nIn order to split the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be split.\n\nThe number of times the index can be split (and the number of shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` setting.\nThe number of routing shards specifies the hashing space that is used internally to distribute documents across shards with consistent hashing.\nFor instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x 3) could be split by a factor of 2 or 3.\n\nA split operation:\n\n* Creates a new target index with the same definition as the source index, but with a larger number of primary shards.\n* Hard-links segments from the source index into the target index. If the file system doesn't support hard-linking, all segments are copied into the new index, which is a much more time consuming process.\n* Hashes all documents again, after low level files are created, to delete documents that belong to a different shard.\n* Recovers the target index as though it were a closed index which had just been re-opened.\n\nIMPORTANT: Indices can only be split if they satisfy the following requirements:\n\n* The target index must not exist.\n* The source index must have fewer primary shards than the target index.\n* The number of primary shards in the target index must be a multiple of the number of primary shards in the source index.\n* The node handling the split process must have sufficient free disk space to accommodate a second copy of the existing index.", "operationId": "indices-split-1", "parameters": [ { @@ -15128,6 +15140,7 @@ } } }, + "deprecated": true, "x-state": "Added in 6.6.0" } }, diff --git a/output/openapi/elasticsearch-serverless-openapi.json b/output/openapi/elasticsearch-serverless-openapi.json index 8285cf294a..d9ca3d3e1c 100644 --- a/output/openapi/elasticsearch-serverless-openapi.json +++ b/output/openapi/elasticsearch-serverless-openapi.json @@ -7483,7 +7483,10 @@ "indices" ], "summary": "Update field mappings", - "description": "Add new fields to an existing data stream or index.\nYou can also use this API to change the search settings of existing fields.\nFor data streams, these changes are applied to all backing indices by default.", + "description": "Add new fields to an existing data stream or index.\nYou can also use this API to change the search settings of existing fields and add new properties to existing object fields.\nFor data streams, these changes are applied to all backing indices by default.\n\n**Add multi-fields to an existing field**\n\nMulti-fields let you index the same field in different ways.\nYou can use this API to update the fields mapping parameter and enable multi-fields for an existing field.\nWARNING: If an index (or data stream) contains documents when you add a multi-field, those documents will not have values for the new multi-field.\nYou can populate the new multi-field with the update by query API.\n\n**Change supported mapping parameters for an existing field**\n\nThe documentation for each mapping parameter indicates whether you can update it for an existing field using this API.\nFor example, you can use the update mapping API to update the `ignore_above` parameter.\n\n**Change the mapping of an existing field**\n\nExcept for supported mapping parameters, you can't change the mapping or field type of an existing field.\nChanging an existing field could invalidate data that's already indexed.\n\nIf you need to change the mapping of a field in a data stream's backing indices, refer to documentation about modifying data streams.\nIf you need to change the mapping of a field in other indices, create a new index with the correct mapping and reindex your data into that index.\n\n**Rename a field**\n\nRenaming a field would invalidate data already indexed under the old field name.\nInstead, add an alias field to create an alternate field name.", + "externalDocs": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-params.html" + }, "operationId": "indices-put-mapping", "parameters": [ { @@ -7522,7 +7525,10 @@ "indices" ], "summary": "Update field mappings", - "description": "Add new fields to an existing data stream or index.\nYou can also use this API to change the search settings of existing fields.\nFor data streams, these changes are applied to all backing indices by default.", + "description": "Add new fields to an existing data stream or index.\nYou can also use this API to change the search settings of existing fields and add new properties to existing object fields.\nFor data streams, these changes are applied to all backing indices by default.\n\n**Add multi-fields to an existing field**\n\nMulti-fields let you index the same field in different ways.\nYou can use this API to update the fields mapping parameter and enable multi-fields for an existing field.\nWARNING: If an index (or data stream) contains documents when you add a multi-field, those documents will not have values for the new multi-field.\nYou can populate the new multi-field with the update by query API.\n\n**Change supported mapping parameters for an existing field**\n\nThe documentation for each mapping parameter indicates whether you can update it for an existing field using this API.\nFor example, you can use the update mapping API to update the `ignore_above` parameter.\n\n**Change the mapping of an existing field**\n\nExcept for supported mapping parameters, you can't change the mapping or field type of an existing field.\nChanging an existing field could invalidate data that's already indexed.\n\nIf you need to change the mapping of a field in a data stream's backing indices, refer to documentation about modifying data streams.\nIf you need to change the mapping of a field in other indices, create a new index with the correct mapping and reindex your data into that index.\n\n**Rename a field**\n\nRenaming a field would invalidate data already indexed under the old field name.\nInstead, add an alias field to create an alternate field name.", + "externalDocs": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-params.html" + }, "operationId": "indices-put-mapping-1", "parameters": [ { @@ -7599,7 +7605,10 @@ "indices" ], "summary": "Update index settings", - "description": "Changes dynamic index settings in real time. For data streams, index setting\nchanges are applied to all backing indices by default.", + "description": "Changes dynamic index settings in real time.\nFor data streams, index setting changes are applied to all backing indices by default.\n\nTo revert a setting to the default value, use a null value.\nThe list of per-index settings that can be updated dynamically on live indices can be found in index module documentation.\nTo preserve existing settings from being updated, set the `preserve_existing` parameter to `true`.\n\nNOTE: You can only define new analyzers on closed indices.\nTo add an analyzer, you must close the index, define the analyzer, and reopen the index.\nYou cannot close the write index of a data stream.\nTo update the analyzer for a data stream's write index and future backing indices, update the analyzer in the index template used by the stream.\nThen roll over the data stream to apply the new analyzer to the stream's write index and future backing indices.\nThis affects searches and any new data added to the stream after the rollover.\nHowever, it does not affect the data stream's backing indices or their existing data.\nTo change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it.", + "externalDocs": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules.html" + }, "operationId": "indices-put-settings", "parameters": [ { @@ -7679,7 +7688,10 @@ "indices" ], "summary": "Update index settings", - "description": "Changes dynamic index settings in real time. For data streams, index setting\nchanges are applied to all backing indices by default.", + "description": "Changes dynamic index settings in real time.\nFor data streams, index setting changes are applied to all backing indices by default.\n\nTo revert a setting to the default value, use a null value.\nThe list of per-index settings that can be updated dynamically on live indices can be found in index module documentation.\nTo preserve existing settings from being updated, set the `preserve_existing` parameter to `true`.\n\nNOTE: You can only define new analyzers on closed indices.\nTo add an analyzer, you must close the index, define the analyzer, and reopen the index.\nYou cannot close the write index of a data stream.\nTo update the analyzer for a data stream's write index and future backing indices, update the analyzer in the index template used by the stream.\nThen roll over the data stream to apply the new analyzer to the stream's write index and future backing indices.\nThis affects searches and any new data added to the stream after the rollover.\nHowever, it does not affect the data stream's backing indices or their existing data.\nTo change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it.", + "externalDocs": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules.html" + }, "operationId": "indices-put-settings-1", "parameters": [ { diff --git a/output/schema/schema.json b/output/schema/schema.json index 81f51301c5..7a5cf2add8 100644 --- a/output/schema/schema.json +++ b/output/schema/schema.json @@ -7741,9 +7741,11 @@ "stability": "stable" } }, - "description": "Update field mappings.\nAdd new fields to an existing data stream or index.\nYou can also use this API to change the search settings of existing fields.\nFor data streams, these changes are applied to all backing indices by default.", + "description": "Update field mappings.\nAdd new fields to an existing data stream or index.\nYou can also use this API to change the search settings of existing fields and add new properties to existing object fields.\nFor data streams, these changes are applied to all backing indices by default.\n\n**Add multi-fields to an existing field**\n\nMulti-fields let you index the same field in different ways.\nYou can use this API to update the fields mapping parameter and enable multi-fields for an existing field.\nWARNING: If an index (or data stream) contains documents when you add a multi-field, those documents will not have values for the new multi-field.\nYou can populate the new multi-field with the update by query API.\n\n**Change supported mapping parameters for an existing field**\n\nThe documentation for each mapping parameter indicates whether you can update it for an existing field using this API.\nFor example, you can use the update mapping API to update the `ignore_above` parameter.\n\n**Change the mapping of an existing field**\n\nExcept for supported mapping parameters, you can't change the mapping or field type of an existing field.\nChanging an existing field could invalidate data that's already indexed.\n\nIf you need to change the mapping of a field in a data stream's backing indices, refer to documentation about modifying data streams.\nIf you need to change the mapping of a field in other indices, create a new index with the correct mapping and reindex your data into that index.\n\n**Rename a field**\n\nRenaming a field would invalidate data already indexed under the old field name.\nInstead, add an alias field to create an alternate field name.", "docId": "indices-put-mapping", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-put-mapping.html", + "extDocId": "mapping-params", + "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/mapping-params.html", "name": "indices.put_mapping", "privileges": { "index": [ @@ -7785,8 +7787,11 @@ "stability": "stable" } }, - "description": "Update index settings.\nChanges dynamic index settings in real time. For data streams, index setting\nchanges are applied to all backing indices by default.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-update-settings.html", + "description": "Update index settings.\nChanges dynamic index settings in real time.\nFor data streams, index setting changes are applied to all backing indices by default.\n\nTo revert a setting to the default value, use a null value.\nThe list of per-index settings that can be updated dynamically on live indices can be found in index module documentation.\nTo preserve existing settings from being updated, set the `preserve_existing` parameter to `true`.\n\nNOTE: You can only define new analyzers on closed indices.\nTo add an analyzer, you must close the index, define the analyzer, and reopen the index.\nYou cannot close the write index of a data stream.\nTo update the analyzer for a data stream's write index and future backing indices, update the analyzer in the index template used by the stream.\nThen roll over the data stream to apply the new analyzer to the stream's write index and future backing indices.\nThis affects searches and any new data added to the stream after the rollover.\nHowever, it does not affect the data stream's backing indices or their existing data.\nTo change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it.", + "docId": "indices-update-settings", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-update-settings.html", + "extDocId": "index-modules", + "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/index-modules.html", "name": "indices.put_settings", "privileges": { "index": [ @@ -8351,7 +8356,7 @@ "stability": "stable" } }, - "description": "Split an index.\nSplit an index into a new index with more primary shards.\n* Before you can split an index:\n\n* The index must be read-only.\n* The cluster health status must be green.\n\nThe number of times the index can be split (and the number of shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` setting.\nThe number of routing shards specifies the hashing space that is used internally to distribute documents across shards with consistent hashing.\nFor instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x 3) could be split by a factor of 2 or 3.\n\nA split operation:\n\n* Creates a new target index with the same definition as the source index, but with a larger number of primary shards.\n* Hard-links segments from the source index into the target index. If the file system doesn't support hard-linking, all segments are copied into the new index, which is a much more time consuming process.\n* Hashes all documents again, after low level files are created, to delete documents that belong to a different shard.\n* Recovers the target index as though it were a closed index which had just been re-opened.\n\nIMPORTANT: Indices can only be split if they satisfy the following requirements:\n\n* The target index must not exist.\n* The source index must have fewer primary shards than the target index.\n* The number of primary shards in the target index must be a multiple of the number of primary shards in the source index.\n* The node handling the split process must have sufficient free disk space to accommodate a second copy of the existing index.", + "description": "Split an index.\nSplit an index into a new index with more primary shards.\n* Before you can split an index:\n\n* The index must be read-only.\n* The cluster health status must be green.\n\nYou can do make an index read-only with the following request using the add index block API:\n\n```\nPUT /my_source_index/_block/write\n```\n\nThe current write index on a data stream cannot be split.\nIn order to split the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be split.\n\nThe number of times the index can be split (and the number of shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` setting.\nThe number of routing shards specifies the hashing space that is used internally to distribute documents across shards with consistent hashing.\nFor instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x 3) could be split by a factor of 2 or 3.\n\nA split operation:\n\n* Creates a new target index with the same definition as the source index, but with a larger number of primary shards.\n* Hard-links segments from the source index into the target index. If the file system doesn't support hard-linking, all segments are copied into the new index, which is a much more time consuming process.\n* Hashes all documents again, after low level files are created, to delete documents that belong to a different shard.\n* Recovers the target index as though it were a closed index which had just been re-opened.\n\nIMPORTANT: Indices can only be split if they satisfy the following requirements:\n\n* The target index must not exist.\n* The source index must have fewer primary shards than the target index.\n* The number of primary shards in the target index must be a multiple of the number of primary shards in the source index.\n* The node handling the split process must have sufficient free disk space to accommodate a second copy of the existing index.", "docId": "indices-split-index", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-split-index.html", "name": "indices.split", @@ -8451,6 +8456,10 @@ "stability": "stable" } }, + "deprecation": { + "description": "", + "version": "7.14.0" + }, "description": "Unfreeze an index.\nWhen a frozen index is unfrozen, the index goes through the normal recovery process and becomes writeable again.", "docId": "unfreeze-index-api", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/unfreeze-index-api.html", @@ -135584,7 +135593,7 @@ } ] }, - "description": "Update field mappings.\nAdd new fields to an existing data stream or index.\nYou can also use this API to change the search settings of existing fields.\nFor data streams, these changes are applied to all backing indices by default.", + "description": "Update field mappings.\nAdd new fields to an existing data stream or index.\nYou can also use this API to change the search settings of existing fields and add new properties to existing object fields.\nFor data streams, these changes are applied to all backing indices by default.\n\n**Add multi-fields to an existing field**\n\nMulti-fields let you index the same field in different ways.\nYou can use this API to update the fields mapping parameter and enable multi-fields for an existing field.\nWARNING: If an index (or data stream) contains documents when you add a multi-field, those documents will not have values for the new multi-field.\nYou can populate the new multi-field with the update by query API.\n\n**Change supported mapping parameters for an existing field**\n\nThe documentation for each mapping parameter indicates whether you can update it for an existing field using this API.\nFor example, you can use the update mapping API to update the `ignore_above` parameter.\n\n**Change the mapping of an existing field**\n\nExcept for supported mapping parameters, you can't change the mapping or field type of an existing field.\nChanging an existing field could invalidate data that's already indexed.\n\nIf you need to change the mapping of a field in a data stream's backing indices, refer to documentation about modifying data streams.\nIf you need to change the mapping of a field in other indices, create a new index with the correct mapping and reindex your data into that index.\n\n**Rename a field**\n\nRenaming a field would invalidate data already indexed under the old field name.\nInstead, add an alias field to create an alternate field name.", "inherits": { "type": { "name": "RequestBase", @@ -135689,7 +135698,7 @@ } } ], - "specLocation": "indices/put_mapping/IndicesPutMappingRequest.ts#L41-L151" + "specLocation": "indices/put_mapping/IndicesPutMappingRequest.ts#L41-L177" }, { "kind": "response", @@ -135725,7 +135734,7 @@ } } }, - "description": "Update index settings.\nChanges dynamic index settings in real time. For data streams, index setting\nchanges are applied to all backing indices by default.", + "description": "Update index settings.\nChanges dynamic index settings in real time.\nFor data streams, index setting changes are applied to all backing indices by default.\n\nTo revert a setting to the default value, use a null value.\nThe list of per-index settings that can be updated dynamically on live indices can be found in index module documentation.\nTo preserve existing settings from being updated, set the `preserve_existing` parameter to `true`.\n\nNOTE: You can only define new analyzers on closed indices.\nTo add an analyzer, you must close the index, define the analyzer, and reopen the index.\nYou cannot close the write index of a data stream.\nTo update the analyzer for a data stream's write index and future backing indices, update the analyzer in the index template used by the stream.\nThen roll over the data stream to apply the new analyzer to the stream's write index and future backing indices.\nThis affects searches and any new data added to the stream after the rollover.\nHowever, it does not affect the data stream's backing indices or their existing data.\nTo change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it.", "inherits": { "type": { "name": "RequestBase", @@ -135843,7 +135852,7 @@ } } ], - "specLocation": "indices/put_settings/IndicesPutSettingsRequest.ts#L25-L93" + "specLocation": "indices/put_settings/IndicesPutSettingsRequest.ts#L25-L108" }, { "kind": "response", @@ -139813,7 +139822,7 @@ } ] }, - "description": "Split an index.\nSplit an index into a new index with more primary shards.\n* Before you can split an index:\n\n* The index must be read-only.\n* The cluster health status must be green.\n\nThe number of times the index can be split (and the number of shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` setting.\nThe number of routing shards specifies the hashing space that is used internally to distribute documents across shards with consistent hashing.\nFor instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x 3) could be split by a factor of 2 or 3.\n\nA split operation:\n\n* Creates a new target index with the same definition as the source index, but with a larger number of primary shards.\n* Hard-links segments from the source index into the target index. If the file system doesn't support hard-linking, all segments are copied into the new index, which is a much more time consuming process.\n* Hashes all documents again, after low level files are created, to delete documents that belong to a different shard.\n* Recovers the target index as though it were a closed index which had just been re-opened.\n\nIMPORTANT: Indices can only be split if they satisfy the following requirements:\n\n* The target index must not exist.\n* The source index must have fewer primary shards than the target index.\n* The number of primary shards in the target index must be a multiple of the number of primary shards in the source index.\n* The node handling the split process must have sufficient free disk space to accommodate a second copy of the existing index.", + "description": "Split an index.\nSplit an index into a new index with more primary shards.\n* Before you can split an index:\n\n* The index must be read-only.\n* The cluster health status must be green.\n\nYou can do make an index read-only with the following request using the add index block API:\n\n```\nPUT /my_source_index/_block/write\n```\n\nThe current write index on a data stream cannot be split.\nIn order to split the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be split.\n\nThe number of times the index can be split (and the number of shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` setting.\nThe number of routing shards specifies the hashing space that is used internally to distribute documents across shards with consistent hashing.\nFor instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x 3) could be split by a factor of 2 or 3.\n\nA split operation:\n\n* Creates a new target index with the same definition as the source index, but with a larger number of primary shards.\n* Hard-links segments from the source index into the target index. If the file system doesn't support hard-linking, all segments are copied into the new index, which is a much more time consuming process.\n* Hashes all documents again, after low level files are created, to delete documents that belong to a different shard.\n* Recovers the target index as though it were a closed index which had just been re-opened.\n\nIMPORTANT: Indices can only be split if they satisfy the following requirements:\n\n* The target index must not exist.\n* The source index must have fewer primary shards than the target index.\n* The number of primary shards in the target index must be a multiple of the number of primary shards in the source index.\n* The node handling the split process must have sufficient free disk space to accommodate a second copy of the existing index.", "inherits": { "type": { "name": "RequestBase", @@ -139891,7 +139900,7 @@ } } ], - "specLocation": "indices/split/IndicesSplitRequest.ts#L27-L98" + "specLocation": "indices/split/IndicesSplitRequest.ts#L27-L107" }, { "kind": "response", @@ -141414,6 +141423,10 @@ "body": { "kind": "no_body" }, + "deprecation": { + "description": "", + "version": "7.14.0" + }, "description": "Unfreeze an index.\nWhen a frozen index is unfrozen, the index goes through the normal recovery process and becomes writeable again.", "inherits": { "type": { @@ -141519,7 +141532,7 @@ } } ], - "specLocation": "indices/unfreeze/IndicesUnfreezeRequest.ts#L24-L78" + "specLocation": "indices/unfreeze/IndicesUnfreezeRequest.ts#L24-L79" }, { "kind": "response", diff --git a/specification/_doc_ids/table.csv b/specification/_doc_ids/table.csv index 0ca0db22ba..c603b6e420 100644 --- a/specification/_doc_ids/table.csv +++ b/specification/_doc_ids/table.csv @@ -296,6 +296,7 @@ logstash-logstash-settings-file,https://www.elastic.co/guide/en/logstash/{branch lowercase-processor,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/lowercase-processor.html mapping-date-format,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/mapping-date-format.html mapping-meta-field,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/mapping-meta-field.html +mapping-params,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/mapping-params.html mapping-metadata,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/mapping-fields.html mapping-roles,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/mapping-roles.html mapping-settings-limit,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/mapping-settings-limit.html diff --git a/specification/indices/put_mapping/IndicesPutMappingRequest.ts b/specification/indices/put_mapping/IndicesPutMappingRequest.ts index 1894d454f8..d8956b6fcf 100644 --- a/specification/indices/put_mapping/IndicesPutMappingRequest.ts +++ b/specification/indices/put_mapping/IndicesPutMappingRequest.ts @@ -41,12 +41,38 @@ import { Duration } from '@_types/Time' /** * Update field mappings. * Add new fields to an existing data stream or index. - * You can also use this API to change the search settings of existing fields. + * You can also use this API to change the search settings of existing fields and add new properties to existing object fields. * For data streams, these changes are applied to all backing indices by default. + * + * **Add multi-fields to an existing field** + * + * Multi-fields let you index the same field in different ways. + * You can use this API to update the fields mapping parameter and enable multi-fields for an existing field. + * WARNING: If an index (or data stream) contains documents when you add a multi-field, those documents will not have values for the new multi-field. + * You can populate the new multi-field with the update by query API. + * + * **Change supported mapping parameters for an existing field** + * + * The documentation for each mapping parameter indicates whether you can update it for an existing field using this API. + * For example, you can use the update mapping API to update the `ignore_above` parameter. + * + * **Change the mapping of an existing field** + * + * Except for supported mapping parameters, you can't change the mapping or field type of an existing field. + * Changing an existing field could invalidate data that's already indexed. + * + * If you need to change the mapping of a field in a data stream's backing indices, refer to documentation about modifying data streams. + * If you need to change the mapping of a field in other indices, create a new index with the correct mapping and reindex your data into that index. + * + * **Rename a field** + * + * Renaming a field would invalidate data already indexed under the old field name. + * Instead, add an alias field to create an alternate field name. * @rest_spec_name indices.put_mapping * @availability stack stability=stable * @availability serverless stability=stable visibility=public * @doc_id indices-put-mapping + * @ext_doc_id mapping-params * @index_privileges manage */ export interface Request extends RequestBase { diff --git a/specification/indices/put_mapping/indicesPutMappingRequestExample1.yaml b/specification/indices/put_mapping/indicesPutMappingRequestExample1.yaml index a94f179750..8d9f12348c 100644 --- a/specification/indices/put_mapping/indicesPutMappingRequestExample1.yaml +++ b/specification/indices/put_mapping/indicesPutMappingRequestExample1.yaml @@ -1,7 +1,12 @@ -summary: Create or update the mapping of an index. +summary: Update multiple targets # method_request: PUT /my-index-000001/_mapping -# description: '' +description: > + The update mapping API can be applied to multiple data streams or indices with a single request. + For example, run `PUT /my-index-000001,my-index-000002/_mapping` to update mappings for the `my-index-000001` and `my-index-000002` indices at the same time. # type: request value: - "{\n \"properties\": {\n \"email\": {\n \"type\": \"keyword\"\n \ - \ }\n }\n}" + properties: + user: + properties: + name: + type: keyword diff --git a/specification/indices/put_settings/IndicesPutSettingsRequest.ts b/specification/indices/put_settings/IndicesPutSettingsRequest.ts index 9fda3c46d2..9e7622a2f3 100644 --- a/specification/indices/put_settings/IndicesPutSettingsRequest.ts +++ b/specification/indices/put_settings/IndicesPutSettingsRequest.ts @@ -24,12 +24,27 @@ import { Duration } from '@_types/Time' /** * Update index settings. - * Changes dynamic index settings in real time. For data streams, index setting - * changes are applied to all backing indices by default. + * Changes dynamic index settings in real time. + * For data streams, index setting changes are applied to all backing indices by default. + * + * To revert a setting to the default value, use a null value. + * The list of per-index settings that can be updated dynamically on live indices can be found in index module documentation. + * To preserve existing settings from being updated, set the `preserve_existing` parameter to `true`. + * + * NOTE: You can only define new analyzers on closed indices. + * To add an analyzer, you must close the index, define the analyzer, and reopen the index. + * You cannot close the write index of a data stream. + * To update the analyzer for a data stream's write index and future backing indices, update the analyzer in the index template used by the stream. + * Then roll over the data stream to apply the new analyzer to the stream's write index and future backing indices. + * This affects searches and any new data added to the stream after the rollover. + * However, it does not affect the data stream's backing indices or their existing data. + * To change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it. * @rest_spec_name indices.put_settings * @availability stack stability=stable * @availability serverless stability=stable visibility=public * @index_privileges manage + * @doc_id indices-update-settings + * @ext_doc_id index-modules */ export interface Request extends RequestBase { path_parts: { diff --git a/specification/indices/put_settings/IndicesPutSettingsRequestExample1.yaml b/specification/indices/put_settings/IndicesPutSettingsRequestExample1.yaml index ac577d387e..bc9b1b75fc 100644 --- a/specification/indices/put_settings/IndicesPutSettingsRequestExample1.yaml +++ b/specification/indices/put_settings/IndicesPutSettingsRequestExample1.yaml @@ -1,4 +1,4 @@ -summary: Change a dynamic index setting in real time. +summary: Change a dynamic index setting # method_request: PUT /my-index-000001/_settings # description: '' # type: request diff --git a/specification/indices/put_settings/indicesPutSettingsRequestExample2.yaml b/specification/indices/put_settings/indicesPutSettingsRequestExample2.yaml new file mode 100644 index 0000000000..81414c6f50 --- /dev/null +++ b/specification/indices/put_settings/indicesPutSettingsRequestExample2.yaml @@ -0,0 +1,5 @@ +summary: Reset an index setting +# method_request: PUT /my-index-000001/_settings +description: To revert a setting to the default value, use `null`. +# type: request +value: "{\n \"index\" : {\n \"refresh_interval\" : null\n }\n}" diff --git a/specification/indices/put_settings/indicesPutSettingsRequestExample3.yaml b/specification/indices/put_settings/indicesPutSettingsRequestExample3.yaml new file mode 100644 index 0000000000..2eb2ebf6f5 --- /dev/null +++ b/specification/indices/put_settings/indicesPutSettingsRequestExample3.yaml @@ -0,0 +1,8 @@ +summary: Update index analysis +# method_request: 'POST /my-index-000001/_close +description: To add an analyzer, you must close the index, define the analyzer, then reopen the index. +# type: request +value: + "{\n \"analysis\" : {\n \"analyzer\":{\n \"content\":{\n \"\ + type\":\"custom\",\n \"tokenizer\":\"whitespace\"\n }\n }\n }\n\ + }\n\nPOST /my-index-000001/_open" diff --git a/specification/indices/simulate_index_template/indicesSimulateIndexResponseExample1.yaml b/specification/indices/simulate_index_template/indicesSimulateIndexTemplateResponseExample1.yaml similarity index 90% rename from specification/indices/simulate_index_template/indicesSimulateIndexResponseExample1.yaml rename to specification/indices/simulate_index_template/indicesSimulateIndexTemplateResponseExample1.yaml index fcf0504ead..4734f60669 100644 --- a/specification/indices/simulate_index_template/indicesSimulateIndexResponseExample1.yaml +++ b/specification/indices/simulate_index_template/indicesSimulateIndexTemplateResponseExample1.yaml @@ -2,7 +2,8 @@ description: A successful response from `POST /_index_template/_simulate_index/my-index-000001`. # type: response # response_code: 200 -value: "{\n \"template\" : {\n \"settings\" : {\n \"index\" : {\n \ +value: + "{\n \"template\" : {\n \"settings\" : {\n \"index\" : {\n \ \ \"number_of_shards\" : \"2\",\n \"number_of_replicas\" : \"0\",\n \ \ \"routing\" : {\n \"allocation\" : {\n \"include\" : {\n\ \ \"_tier_preference\" : \"data_content\"\n }\n \ diff --git a/specification/indices/simulate_template/indicesSimulateTemplateRequestExample1.yaml b/specification/indices/simulate_template/indicesSimulateTemplateRequestExample1.yaml index 6deade552d..64d6b3d4aa 100644 --- a/specification/indices/simulate_template/indicesSimulateTemplateRequestExample1.yaml +++ b/specification/indices/simulate_template/indicesSimulateTemplateRequestExample1.yaml @@ -4,6 +4,7 @@ description: > To see what settings will be applied by a template before you add it to the cluster, you can pass a template configuration in the request body. The specified template is used for the simulation if it has a higher priority than existing templates. # type: request -value: "{\n \"index_patterns\": [\"my-index-*\"],\n \"composed_of\": [\"ct2\"],\n\ +value: + "{\n \"index_patterns\": [\"my-index-*\"],\n \"composed_of\": [\"ct2\"],\n\ \ \"priority\": 10,\n \"template\": {\n \"settings\": {\n \"index.number_of_replicas\"\ : 1\n }\n }\n}" diff --git a/specification/indices/simulate_template/indicesSimulateTemplateResponseExample2.yaml b/specification/indices/simulate_template/indicesSimulateTemplateResponseExample2.yaml index 0c6dfed6f1..13302c0ba9 100644 --- a/specification/indices/simulate_template/indicesSimulateTemplateResponseExample2.yaml +++ b/specification/indices/simulate_template/indicesSimulateTemplateResponseExample2.yaml @@ -1,10 +1,11 @@ -# summary: +# summary: description: > A successful response from `POST /_index_template/_simulate` with a template configuration in the request body. The response shows any overlapping templates with a lower priority. # type: response # response_code: 200 -value: "{\n \"template\" : {\n \"settings\" : {\n \"index\" : {\n \ +value: + "{\n \"template\" : {\n \"settings\" : {\n \"index\" : {\n \ \ \"number_of_replicas\" : \"1\",\n \"routing\" : {\n \"allocation\"\ \ : {\n \"include\" : {\n \"_tier_preference\" : \"data_content\"\ \n }\n }\n }\n }\n },\n \"mappings\" : {\n\ diff --git a/specification/indices/split/IndicesSplitRequest.ts b/specification/indices/split/IndicesSplitRequest.ts index 36ed65a25c..626261da88 100644 --- a/specification/indices/split/IndicesSplitRequest.ts +++ b/specification/indices/split/IndicesSplitRequest.ts @@ -32,6 +32,15 @@ import { Duration } from '@_types/Time' * * The index must be read-only. * * The cluster health status must be green. * + * You can do make an index read-only with the following request using the add index block API: + * + * ``` + * PUT /my_source_index/_block/write + * ``` + * + * The current write index on a data stream cannot be split. + * In order to split the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be split. + * * The number of times the index can be split (and the number of shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` setting. * The number of routing shards specifies the hashing space that is used internally to distribute documents across shards with consistent hashing. * For instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x 3) could be split by a factor of 2 or 3. diff --git a/specification/indices/unfreeze/IndicesUnfreezeRequest.ts b/specification/indices/unfreeze/IndicesUnfreezeRequest.ts index 9105892053..385e3aa1ca 100644 --- a/specification/indices/unfreeze/IndicesUnfreezeRequest.ts +++ b/specification/indices/unfreeze/IndicesUnfreezeRequest.ts @@ -28,6 +28,7 @@ import { Duration } from '@_types/Time' * @availability stack since=6.6.0 stability=stable * @index_privileges manage * @doc_id unfreeze-index-api + * @deprecated 7.14.0 */ export interface Request extends RequestBase { path_parts: {