diff --git a/docs/overlays/elasticsearch-shared-overlays.yaml b/docs/overlays/elasticsearch-shared-overlays.yaml index 80c5266b19..f5d00901d2 100644 --- a/docs/overlays/elasticsearch-shared-overlays.yaml +++ b/docs/overlays/elasticsearch-shared-overlays.yaml @@ -1360,6 +1360,293 @@ actions: examples: getBehavioralAnalyticsCollectionsResponseExample1: $ref: "../../specification/search_application/get_behavioral_analytics/examples/200_response/BehavioralAnalyticsGetResponseExample1.yaml" +## Examples for cat + - target: "$.components['responses']['cat.aliases#200']" + description: "Add example for cat aliases response" + update: + content: + text/plain: + schema: + type: string + examples: + catAliasesResponseExample1: + $ref: "../../specification/cat/aliases/examples/200_response/CatAliasesResponseExample1.yaml" + - target: "$.components['responses']['cat.allocation#200']" + description: "Add example for cat allocation response" + update: + content: + text/plain: + schema: + type: string + examples: + catAllocationResponseExample1: + $ref: "../../specification/cat/allocation/examples/200_response/CatAllocationResponseExample1.yaml" + - target: "$.components['responses']['cat.component_templates#200']" + description: "Add example for cat component templates response" + update: + content: + text/plain: + schema: + type: string + examples: + catComponentTemplatesResponseExample1: + $ref: "../../specification/cat/component_templates/examples/200_response/CatComponentTemplatesResponseExample1.yaml" + - target: "$.components['responses']['cat.count#200']" + description: "Add example for cat count response" + update: + content: + text/plain: + schema: + type: string + examples: + catCountResponseExample1: + $ref: "../../specification/cat/count/examples/200_response/CatCountResponseExample1.yaml" + catCountResponseExample2: + $ref: "../../specification/cat/count/examples/200_response/CatCountResponseExample2.yaml" + - target: "$.components['responses']['cat.fielddata#200']" + description: "Add example for cat fielddata response" + update: + content: + text/plain: + schema: + type: string + examples: + catFieldDataResponseExample1: + $ref: "../../specification/cat/fielddata/examples/200_response/CatFielddataResponseExample1.yaml" + catFieldDataResponseExample2: + $ref: "../../specification/cat/fielddata/examples/200_response/CatFielddataResponseExample2.yaml" + - target: "$.components['responses']['cat.indices#200']" + description: "Add example for cat indices response" + update: + content: + text/plain: + schema: + type: string + examples: + catIndicesResponseExample1: + $ref: "../../specification/cat/indices/examples/200_response/CatIndicesResponseExample1.yaml" + - target: "$.components['responses']['cat.ml_datafeeds#200']" + description: "Add example for cat datafeeds response" + update: + content: + text/plain: + schema: + type: string + examples: + catDatafeedsResponseExample1: + $ref: "../../specification/cat/ml_datafeeds/examples/200_response/CatDatafeedsResponseExample1.yaml" + - target: "$.components['responses']['cat.ml_data_frame_analytics#200']" + description: "Add example for cat data frame analytics response" + update: + content: + text/plain: + schema: + type: string + examples: + catDataFrameAnalyticsResponseExample1: + $ref: "../../specification/cat/ml_data_frame_analytics/examples/200_response/CatDataFrameAnalyticsResponseExample1.yaml" + - target: "$.components['responses']['cat.ml_jobs#200']" + description: "Add example for cat anomaly detectors response" + update: + content: + text/plain: + schema: + type: string + examples: + catAnomalyDetectorsResponseExample1: + $ref: "../../specification/cat/ml_jobs/examples/200_response/CatJobsResponseExample1.yaml" + - target: "$.components['responses']['cat.ml_trained_models#200']" + description: "Add example for cat trained models response" + update: + content: + text/plain: + schema: + type: string + examples: + catTrainedModelsResponseExample1: + $ref: "../../specification/cat/ml_trained_models/examples/200_response/CatTrainedModelsResponseExample1.yaml" + - target: "$.components['responses']['cat.recovery#200']" + description: "Add example for cat recovery response" + update: + content: + text/plain: + schema: + type: string + examples: + catRecoveryResponseExample1: + $ref: "../../specification/cat/recovery/examples/200_response/CatRecoveryResponseExample1.yaml" + catRecoveryResponseExample2: + $ref: "../../specification/cat/recovery/examples/200_response/CatRecoveryResponseExample2.yaml" + catRecoveryResponseExample3: + $ref: "../../specification/cat/recovery/examples/200_response/CatRecoveryResponseExample3.yaml" + - target: "$.components['responses']['cat.segments#200']" + description: "Add example for cat segments response" + update: + content: + text/plain: + schema: + type: string + examples: + catSegmentsResponseExample1: + $ref: "../../specification/cat/segments/examples/200_response/CatSegmentsResponseExample1.yaml" + - target: "$.components['responses']['cat.shards#200']" + description: "Add example for cat shards response" + update: + content: + text/plain: + schema: + type: string + examples: + catShardsResponseExample1: + $ref: "../../specification/cat/shards/examples/200_response/CatShardsResponseExample1.yaml" + catShardsResponseExample2: + $ref: "../../specification/cat/shards/examples/200_response/CatShardsResponseExample2.yaml" + catShardsResponseExample3: + $ref: "../../specification/cat/shards/examples/200_response/CatShardsResponseExample3.yaml" + catShardsResponseExample4: + $ref: "../../specification/cat/shards/examples/200_response/CatShardsResponseExample4.yaml" + catShardsResponseExample5: + $ref: "../../specification/cat/shards/examples/200_response/CatShardsResponseExample5.yaml" + - target: "$.components['responses']['cat.snapshots#200']" + description: "Add example for cat snapshot response" + update: + content: + text/plain: + schema: + type: string + examples: + catSnapshotsResponseExample1: + $ref: "../../specification/cat/snapshots/examples/200_response/CatSnapshotsResponseExample1.yaml" + - target: "$.components['responses']['cat.templates#200']" + description: "Add example for cat templates response" + update: + content: + text/plain: + schema: + type: string + examples: + catTemplatesResponseExample1: + $ref: "../../specification/cat/templates/examples/200_response/CatTemplatesResponseExample1.yaml" + - target: "$.components['responses']['cat.thread_pool#200']" + description: "Add example for cat thread pool response" + update: + content: + text/plain: + schema: + type: string + examples: + catThreadPoolResponseExample1: + $ref: "../../specification/cat/thread_pool/examples/200_response/CatThreadPoolResponseExample1.yaml" + catThreadPoolResponseExample2: + $ref: "../../specification/cat/thread_pool/examples/200_response/CatThreadPoolResponseExample2.yaml" + - target: "$.components['responses']['cat.transforms#200']" + description: "Add example for cat transforms response" + update: + content: + application/json: + examples: + catTransformsResponseExample1: + $ref: "../../specification/cat/transforms/examples/200_response/CatTransformsResponseExample1.yaml" + - target: "$.paths['/_cat/health']['get']" + description: "Add examples for cat health operation" + update: + responses: + 200: + content: + text/plain: + schema: + type: string + examples: + catMasterResponseExample1: + $ref: "../../specification/cat/health/examples/200_response/CatHealthResponseExample1.yaml" + - target: "$.paths['/_cat/master']['get']" + description: "Add examples for cat master operation" + update: + responses: + 200: + content: + text/plain: + schema: + type: string + examples: + catMasterResponseExample1: + $ref: "../../specification/cat/master/examples/200_response/CatMasterResponseExample1.yaml" + - target: "$.paths['/_cat/nodeattrs']['get']" + description: "Add examples for cat node attributes operation" + update: + responses: + 200: + content: + text/plain: + schema: + type: string + examples: + catNodeAttributesResponseExample1: + $ref: "../../specification/cat/nodeattrs/examples/200_response/CatNodeAttributesResponseExample1.yaml" + catNodeAttributesResponseExample2: + $ref: "../../specification/cat/nodeattrs/examples/200_response/CatNodeAttributesResponseExample2.yaml" + - target: "$.paths['/_cat/nodes']['get']" + description: "Add examples for cat nodes operation" + update: + responses: + 200: + content: + text/plain: + schema: + type: string + examples: + catNodesResponseExample1: + $ref: "../../specification/cat/nodes/examples/200_response/CatNodesResponseExample1.yaml" + catNodesResponseExample2: + $ref: "../../specification/cat/nodes/examples/200_response/CatNodesResponseExample2.yaml" + - target: "$.paths['/_cat/pending_tasks']['get']" + description: "Add examples for cat pending tasks operation" + update: + responses: + 200: + content: + text/plain: + schema: + type: string + examples: + catPendingTasksResponseExample1: + $ref: "../../specification/cat/pending_tasks/examples/200_response/CatPendingTasksResponseExample1.yaml" + - target: "$.paths['/_cat/plugins']['get']" + description: "Add examples for cat plugins operation" + update: + responses: + 200: + content: + text/plain: + schema: + type: string + examples: + catPluginsResponseExample1: + $ref: "../../specification/cat/plugins/examples/200_response/CatPluginsResponseExample1.yaml" + - target: "$.paths['/_cat/repositories']['get']" + description: "Add examples for cat repositories operation" + update: + responses: + 200: + content: + text/plain: + schema: + type: string + examples: + catRepositoriesResponseExample1: + $ref: "../../specification/cat/repositories/examples/200_response/CatRepositoriesResponseExample1.yaml" + - target: "$.paths['/_cat/tasks']['get']" + description: "Add examples for cat tasks operation" + update: + responses: + 200: + content: + text/plain: + schema: + type: string + examples: + catTasksResponseExample1: + $ref: "../../specification/cat/tasks/examples/200_response/CatTasksResponseExample1.yaml" ## Examples for licensing - target: "$.paths['/_license']['get']" description: "Add example for get license response" @@ -1408,4 +1695,4 @@ actions: application/json: examples: searchApplicationSearchRequestExample1: - $ref: "../../specification/search_application/search/examples/request/SearchApplicationsSearchRequestExample1.yaml" \ No newline at end of file + $ref: "../../specification/search_application/search/examples/request/SearchApplicationsSearchRequestExample1.yaml" diff --git a/output/openapi/elasticsearch-openapi.json b/output/openapi/elasticsearch-openapi.json index 4238df504c..f3855e957b 100644 --- a/output/openapi/elasticsearch-openapi.json +++ b/output/openapi/elasticsearch-openapi.json @@ -900,7 +900,7 @@ "cat" ], "summary": "Get aliases", - "description": "Retrieves the cluster’s index aliases, including filter and routing information.\nThe API does not return data stream aliases.\n\nCAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API.", + "description": "Get the cluster's index aliases, including filter and routing information.\nThis API does not return data stream aliases.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API.", "operationId": "cat-aliases", "parameters": [ { @@ -926,7 +926,7 @@ "cat" ], "summary": "Get aliases", - "description": "Retrieves the cluster’s index aliases, including filter and routing information.\nThe API does not return data stream aliases.\n\nCAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API.", + "description": "Get the cluster's index aliases, including filter and routing information.\nThis API does not return data stream aliases.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API.", "operationId": "cat-aliases-1", "parameters": [ { @@ -954,8 +954,8 @@ "tags": [ "cat" ], - "summary": "Provides a snapshot of the number of shards allocated to each data node and their disk space", - "description": "IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications.", + "summary": "Get shard allocation information", + "description": "Get a snapshot of the number of shards allocated to each data node and their disk space.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications.", "operationId": "cat-allocation", "parameters": [ { @@ -980,8 +980,8 @@ "tags": [ "cat" ], - "summary": "Provides a snapshot of the number of shards allocated to each data node and their disk space", - "description": "IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications.", + "summary": "Get shard allocation information", + "description": "Get a snapshot of the number of shards allocated to each data node and their disk space.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications.", "operationId": "cat-allocation-1", "parameters": [ { @@ -1010,7 +1010,7 @@ "cat" ], "summary": "Get component templates", - "description": "Returns information about component templates in a cluster.\nComponent templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.\n\nCAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the get component template API.", + "description": "Get information about component templates in a cluster.\nComponent templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the get component template API.", "operationId": "cat-component-templates", "parameters": [ { @@ -1034,7 +1034,7 @@ "cat" ], "summary": "Get component templates", - "description": "Returns information about component templates in a cluster.\nComponent templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.\n\nCAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the get component template API.", + "description": "Get information about component templates in a cluster.\nComponent templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the get component template API.", "operationId": "cat-component-templates-1", "parameters": [ { @@ -1061,7 +1061,7 @@ "cat" ], "summary": "Get a document count", - "description": "Provides quick access to a document count for a data stream, an index, or an entire cluster.\nThe document count only includes live documents, not deleted documents which have not yet been removed by the merge process.\n\nCAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the count API.", + "description": "Get quick access to a document count for a data stream, an index, or an entire cluster.\nThe document count only includes live documents, not deleted documents which have not yet been removed by the merge process.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the count API.", "operationId": "cat-count", "responses": { "200": { @@ -1076,7 +1076,7 @@ "cat" ], "summary": "Get a document count", - "description": "Provides quick access to a document count for a data stream, an index, or an entire cluster.\nThe document count only includes live documents, not deleted documents which have not yet been removed by the merge process.\n\nCAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the count API.", + "description": "Get quick access to a document count for a data stream, an index, or an entire cluster.\nThe document count only includes live documents, not deleted documents which have not yet been removed by the merge process.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the count API.", "operationId": "cat-count-1", "parameters": [ { @@ -1095,8 +1095,8 @@ "tags": [ "cat" ], - "summary": "Returns the amount of heap memory currently used by the field data cache on every data node in the cluster", - "description": "IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the nodes stats API.", + "summary": "Get field data cache information", + "description": "Get the amount of heap memory currently used by the field data cache on every data node in the cluster.\n\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the nodes stats API.", "operationId": "cat-fielddata", "parameters": [ { @@ -1118,8 +1118,8 @@ "tags": [ "cat" ], - "summary": "Returns the amount of heap memory currently used by the field data cache on every data node in the cluster", - "description": "IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the nodes stats API.", + "summary": "Get field data cache information", + "description": "Get the amount of heap memory currently used by the field data cache on every data node in the cluster.\n\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the nodes stats API.", "operationId": "cat-fielddata-1", "parameters": [ { @@ -1144,8 +1144,8 @@ "tags": [ "cat" ], - "summary": "Returns the health status of a cluster, similar to the cluster health API", - "description": "IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the cluster health API.\nThis API is often used to check malfunctioning clusters.\nTo help you track cluster health alongside log files and alerting systems, the API returns timestamps in two formats:\n`HH:MM:SS`, which is human-readable but includes no date information;\n`Unix epoch time`, which is machine-sortable and includes date information.\nThe latter format is useful for cluster recoveries that take multiple days.\nYou can use the cat health API to verify cluster health across multiple nodes.\nYou also can use the API to track the recovery of a large cluster over a longer period of time.", + "summary": "Get the cluster health status", + "description": "IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the cluster health API.\nThis API is often used to check malfunctioning clusters.\nTo help you track cluster health alongside log files and alerting systems, the API returns timestamps in two formats:\n`HH:MM:SS`, which is human-readable but includes no date information;\n`Unix epoch time`, which is machine-sortable and includes date information.\nThe latter format is useful for cluster recoveries that take multiple days.\nYou can use the cat health API to verify cluster health across multiple nodes.\nYou also can use the API to track the recovery of a large cluster over a longer period of time.", "operationId": "cat-health", "parameters": [ { @@ -1192,7 +1192,7 @@ "cat" ], "summary": "Get CAT help", - "description": "Returns help for the CAT APIs.", + "description": "Get help for the CAT APIs.", "operationId": "cat-help", "responses": { "200": { @@ -1214,7 +1214,7 @@ "cat" ], "summary": "Get index information", - "description": "Returns high-level information about indices in a cluster, including backing indices for data streams.\n\nUse this request to get the following information for each index in a cluster:\n- shard count\n- document count\n- deleted document count\n- primary store size\n- total store size of all shards, including shard replicas\n\nThese metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents.\nTo get an accurate count of Elasticsearch documents, use the cat count or count APIs.\n\nCAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use an index endpoint.", + "description": "Get high-level information about indices in a cluster, including backing indices for data streams.\n\nUse this request to get the following information for each index in a cluster:\n- shard count\n- document count\n- deleted document count\n- primary store size\n- total store size of all shards, including shard replicas\n\nThese metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents.\nTo get an accurate count of Elasticsearch documents, use the cat count or count APIs.\n\nCAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use an index endpoint.", "operationId": "cat-indices", "parameters": [ { @@ -1252,7 +1252,7 @@ "cat" ], "summary": "Get index information", - "description": "Returns high-level information about indices in a cluster, including backing indices for data streams.\n\nUse this request to get the following information for each index in a cluster:\n- shard count\n- document count\n- deleted document count\n- primary store size\n- total store size of all shards, including shard replicas\n\nThese metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents.\nTo get an accurate count of Elasticsearch documents, use the cat count or count APIs.\n\nCAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use an index endpoint.", + "description": "Get high-level information about indices in a cluster, including backing indices for data streams.\n\nUse this request to get the following information for each index in a cluster:\n- shard count\n- document count\n- deleted document count\n- primary store size\n- total store size of all shards, including shard replicas\n\nThese metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents.\nTo get an accurate count of Elasticsearch documents, use the cat count or count APIs.\n\nCAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use an index endpoint.", "operationId": "cat-indices-1", "parameters": [ { @@ -1292,8 +1292,8 @@ "tags": [ "cat" ], - "summary": "Returns information about the master node, including the ID, bound IP address, and name", - "description": "IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.", + "summary": "Get master node information", + "description": "Get information about the master node, including the ID, bound IP address, and name.\n\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.", "operationId": "cat-master", "parameters": [ { @@ -1340,7 +1340,7 @@ "cat" ], "summary": "Get data frame analytics jobs", - "description": "Returns configuration and usage information about data frame analytics jobs.\n\nCAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get data frame analytics jobs statistics API.", + "description": "Get configuration and usage information about data frame analytics jobs.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get data frame analytics jobs statistics API.", "operationId": "cat-ml-data-frame-analytics", "parameters": [ { @@ -1373,7 +1373,7 @@ "cat" ], "summary": "Get data frame analytics jobs", - "description": "Returns configuration and usage information about data frame analytics jobs.\n\nCAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get data frame analytics jobs statistics API.", + "description": "Get configuration and usage information about data frame analytics jobs.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get data frame analytics jobs statistics API.", "operationId": "cat-ml-data-frame-analytics-1", "parameters": [ { @@ -1409,7 +1409,7 @@ "cat" ], "summary": "Get datafeeds", - "description": "Returns configuration and usage information about datafeeds.\nThis API returns a maximum of 10,000 datafeeds.\nIf the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage`\ncluster privileges to use this API.\n\nCAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get datafeed statistics API.", + "description": "Get configuration and usage information about datafeeds.\nThis API returns a maximum of 10,000 datafeeds.\nIf the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage`\ncluster privileges to use this API.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get datafeed statistics API.", "operationId": "cat-ml-datafeeds", "parameters": [ { @@ -1439,7 +1439,7 @@ "cat" ], "summary": "Get datafeeds", - "description": "Returns configuration and usage information about datafeeds.\nThis API returns a maximum of 10,000 datafeeds.\nIf the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage`\ncluster privileges to use this API.\n\nCAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get datafeed statistics API.", + "description": "Get configuration and usage information about datafeeds.\nThis API returns a maximum of 10,000 datafeeds.\nIf the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage`\ncluster privileges to use this API.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get datafeed statistics API.", "operationId": "cat-ml-datafeeds-1", "parameters": [ { @@ -1472,7 +1472,7 @@ "cat" ], "summary": "Get anomaly detection jobs", - "description": "Returns configuration and usage information for anomaly detection jobs.\nThis API returns a maximum of 10,000 jobs.\nIf the Elasticsearch security features are enabled, you must have `monitor_ml`,\n`monitor`, `manage_ml`, or `manage` cluster privileges to use this API.\n\nCAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get anomaly detection job statistics API.", + "description": "Get configuration and usage information for anomaly detection jobs.\nThis API returns a maximum of 10,000 jobs.\nIf the Elasticsearch security features are enabled, you must have `monitor_ml`,\n`monitor`, `manage_ml`, or `manage` cluster privileges to use this API.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get anomaly detection job statistics API.", "operationId": "cat-ml-jobs", "parameters": [ { @@ -1505,7 +1505,7 @@ "cat" ], "summary": "Get anomaly detection jobs", - "description": "Returns configuration and usage information for anomaly detection jobs.\nThis API returns a maximum of 10,000 jobs.\nIf the Elasticsearch security features are enabled, you must have `monitor_ml`,\n`monitor`, `manage_ml`, or `manage` cluster privileges to use this API.\n\nCAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get anomaly detection job statistics API.", + "description": "Get configuration and usage information for anomaly detection jobs.\nThis API returns a maximum of 10,000 jobs.\nIf the Elasticsearch security features are enabled, you must have `monitor_ml`,\n`monitor`, `manage_ml`, or `manage` cluster privileges to use this API.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get anomaly detection job statistics API.", "operationId": "cat-ml-jobs-1", "parameters": [ { @@ -1541,7 +1541,7 @@ "cat" ], "summary": "Get trained models", - "description": "Returns configuration and usage information about inference trained models.\n\nCAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get trained models statistics API.", + "description": "Get configuration and usage information about inference trained models.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get trained models statistics API.", "operationId": "cat-ml-trained-models", "parameters": [ { @@ -1580,7 +1580,7 @@ "cat" ], "summary": "Get trained models", - "description": "Returns configuration and usage information about inference trained models.\n\nCAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get trained models statistics API.", + "description": "Get configuration and usage information about inference trained models.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get trained models statistics API.", "operationId": "cat-ml-trained-models-1", "parameters": [ { @@ -1621,8 +1621,8 @@ "tags": [ "cat" ], - "summary": "Returns information about custom node attributes", - "description": "IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.", + "summary": "Get node attribute information", + "description": "Get information about custom node attributes.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.", "operationId": "cat-nodeattrs", "parameters": [ { @@ -1668,8 +1668,8 @@ "tags": [ "cat" ], - "summary": "Returns information about the nodes in a cluster", - "description": "IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.", + "summary": "Get node information", + "description": "Get information about the nodes in a cluster.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.", "operationId": "cat-nodes", "parameters": [ { @@ -1752,8 +1752,8 @@ "tags": [ "cat" ], - "summary": "Returns cluster-level changes that have not yet been executed", - "description": "IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the pending cluster tasks API.", + "summary": "Get pending task information", + "description": "Get information about cluster-level changes that have not yet taken effect.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the pending cluster tasks API.", "operationId": "cat-pending-tasks", "parameters": [ { @@ -1809,8 +1809,8 @@ "tags": [ "cat" ], - "summary": "Returns a list of plugins running on each node of a cluster", - "description": "IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.", + "summary": "Get plugin information", + "description": "Get a list of plugins running on each node of a cluster.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.", "operationId": "cat-plugins", "parameters": [ { @@ -1866,8 +1866,8 @@ "tags": [ "cat" ], - "summary": "Returns information about ongoing and completed shard recoveries", - "description": "Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or syncing a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing.\nFor data streams, the API returns information about the stream’s backing indices.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index recovery API.", + "summary": "Get shard recovery information", + "description": "Get information about ongoing and completed shard recoveries.\nShard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or syncing a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing.\nFor data streams, the API returns information about the stream’s backing indices.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index recovery API.", "operationId": "cat-recovery", "parameters": [ { @@ -1895,8 +1895,8 @@ "tags": [ "cat" ], - "summary": "Returns information about ongoing and completed shard recoveries", - "description": "Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or syncing a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing.\nFor data streams, the API returns information about the stream’s backing indices.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index recovery API.", + "summary": "Get shard recovery information", + "description": "Get information about ongoing and completed shard recoveries.\nShard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or syncing a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing.\nFor data streams, the API returns information about the stream’s backing indices.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index recovery API.", "operationId": "cat-recovery-1", "parameters": [ { @@ -1927,8 +1927,8 @@ "tags": [ "cat" ], - "summary": "Returns the snapshot repositories for a cluster", - "description": "IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot repository API.", + "summary": "Get snapshot repository information", + "description": "Get a list of snapshot repositories for a cluster.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot repository API.", "operationId": "cat-repositories", "parameters": [ { @@ -1975,8 +1975,8 @@ "tags": [ "cat" ], - "summary": "Returns low-level information about the Lucene segments in index shards", - "description": "For data streams, the API returns information about the backing indices.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index segments API.", + "summary": "Get segment information", + "description": "Get low-level information about the Lucene segments in index shards.\nFor data streams, the API returns information about the backing indices.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index segments API.", "operationId": "cat-segments", "parameters": [ { @@ -2001,8 +2001,8 @@ "tags": [ "cat" ], - "summary": "Returns low-level information about the Lucene segments in index shards", - "description": "For data streams, the API returns information about the backing indices.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index segments API.", + "summary": "Get segment information", + "description": "Get low-level information about the Lucene segments in index shards.\nFor data streams, the API returns information about the backing indices.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index segments API.", "operationId": "cat-segments-1", "parameters": [ { @@ -2030,8 +2030,8 @@ "tags": [ "cat" ], - "summary": "Returns information about the shards in a cluster", - "description": "For data streams, the API returns information about the backing indices.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications.", + "summary": "Get shard information", + "description": "Get information about the shards in a cluster.\nFor data streams, the API returns information about the backing indices.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications.", "operationId": "cat-shards", "parameters": [ { @@ -2056,8 +2056,8 @@ "tags": [ "cat" ], - "summary": "Returns information about the shards in a cluster", - "description": "For data streams, the API returns information about the backing indices.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications.", + "summary": "Get shard information", + "description": "Get information about the shards in a cluster.\nFor data streams, the API returns information about the backing indices.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications.", "operationId": "cat-shards-1", "parameters": [ { @@ -2085,8 +2085,8 @@ "tags": [ "cat" ], - "summary": "Returns information about the snapshots stored in one or more repositories", - "description": "A snapshot is a backup of an index or running Elasticsearch cluster.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot API.", + "summary": "Get snapshot information", + "description": "Get information about the snapshots stored in one or more repositories.\nA snapshot is a backup of an index or running Elasticsearch cluster.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot API.", "operationId": "cat-snapshots", "parameters": [ { @@ -2112,8 +2112,8 @@ "tags": [ "cat" ], - "summary": "Returns information about the snapshots stored in one or more repositories", - "description": "A snapshot is a backup of an index or running Elasticsearch cluster.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot API.", + "summary": "Get snapshot information", + "description": "Get information about the snapshots stored in one or more repositories.\nA snapshot is a backup of an index or running Elasticsearch cluster.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot API.", "operationId": "cat-snapshots-1", "parameters": [ { @@ -2142,8 +2142,8 @@ "tags": [ "cat" ], - "summary": "Returns information about tasks currently executing in the cluster", - "description": "IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the task management API.", + "summary": "Get task information", + "description": "Get information about tasks currently running in the cluster.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the task management API.", "operationId": "cat-tasks", "parameters": [ { @@ -2246,8 +2246,8 @@ "tags": [ "cat" ], - "summary": "Returns information about index templates in a cluster", - "description": "You can use index templates to apply index settings and field mappings to new indices at creation.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get index template API.", + "summary": "Get index template information", + "description": "Get information about the index templates in a cluster.\nYou can use index templates to apply index settings and field mappings to new indices at creation.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get index template API.", "operationId": "cat-templates", "parameters": [ { @@ -2270,8 +2270,8 @@ "tags": [ "cat" ], - "summary": "Returns information about index templates in a cluster", - "description": "You can use index templates to apply index settings and field mappings to new indices at creation.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get index template API.", + "summary": "Get index template information", + "description": "Get information about the index templates in a cluster.\nYou can use index templates to apply index settings and field mappings to new indices at creation.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get index template API.", "operationId": "cat-templates-1", "parameters": [ { @@ -2297,8 +2297,8 @@ "tags": [ "cat" ], - "summary": "Returns thread pool statistics for each node in a cluster", - "description": "Returned information includes all built-in thread pools and custom thread pools.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.", + "summary": "Get thread pool statistics", + "description": "Get thread pool statistics for each node in a cluster.\nReturned information includes all built-in thread pools and custom thread pools.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.", "operationId": "cat-thread-pool", "parameters": [ { @@ -2323,8 +2323,8 @@ "tags": [ "cat" ], - "summary": "Returns thread pool statistics for each node in a cluster", - "description": "Returned information includes all built-in thread pools and custom thread pools.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.", + "summary": "Get thread pool statistics", + "description": "Get thread pool statistics for each node in a cluster.\nReturned information includes all built-in thread pools and custom thread pools.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.", "operationId": "cat-thread-pool-1", "parameters": [ { @@ -2352,8 +2352,8 @@ "tags": [ "cat" ], - "summary": "Get transforms", - "description": "Returns configuration and usage information about transforms.\n\nCAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get transform statistics API.", + "summary": "Get transform information", + "description": "Get configuration and usage information about transforms.\n\nCAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get transform statistics API.", "operationId": "cat-transforms", "parameters": [ { @@ -2388,8 +2388,8 @@ "tags": [ "cat" ], - "summary": "Get transforms", - "description": "Returns configuration and usage information about transforms.\n\nCAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get transform statistics API.", + "summary": "Get transform information", + "description": "Get configuration and usage information about transforms.\n\nCAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get transform statistics API.", "operationId": "cat-transforms-1", "parameters": [ { @@ -6700,7 +6700,7 @@ "document" ], "summary": "Create a new document in the index", - "description": "You can index a new JSON document with the `//_doc/` or `//_create/<_id>` APIs\nUsing `_create` guarantees that the document is indexed only if it does not already exist.\nIt returns a 409 response when a document with a same ID already exists in the index.\nTo update an existing document, you must use the `//_doc/` API.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To add a document using the `PUT //_create/<_id>` or `POST //_create/<_id>` request formats, you must have the `create_doc`, `create`, `index`, or `write` index privilege.\n* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\n**Automatically create data streams and indices**\n\nIf the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream.\n\nIf the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.\n\nNOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.\n\nIf no mapping exists, the index operation creates a dynamic mapping.\nBy default, new fields and objects are automatically added to the mapping if needed.\n\nAutomatic index creation is controlled by the `action.auto_create_index` setting.\nIf it is `true`, any index can be created automatically.\nYou can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely.\nSpecify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked.\nWhen a list is specified, the default behaviour is to disallow.\n\nNOTE: The `action.auto_create_index` setting affects the automatic creation of indices only.\nIt does not affect the creation of data streams.\n\n**Routing**\n\nBy default, shard placement — or routing — is controlled by using a hash of the document's ID value.\nFor more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter.\n\nWhen setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself.\nThis does come at the (very minimal) cost of an additional document parsing pass.\nIf the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n** Distributed**\n\nThe index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.\nAfter the primary shard completes the operation, if needed, the update is distributed to applicable replicas.\n\n**Active shards**\n\nTo improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation.\nIf the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs.\nBy default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`).\nThis default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`.\nTo alter this behavior per operation, use the `wait_for_active_shards request` parameter.\n\nValid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1).\nSpecifying a negative value or a number greater than the number of shard copies will throw an error.\n\nFor example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes).\nIf you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding.\nThis means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding.\nThis requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard.\nHowever, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index.\nThe operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.\n\nIt is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts.\nAfter the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary.\nThe `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed.", + "description": "You can index a new JSON document with the `//_doc/` or `//_create/<_id>` APIs\nUsing `_create` guarantees that the document is indexed only if it does not already exist.\nIt returns a 409 response when a document with a same ID already exists in the index.\nTo update an existing document, you must use the `//_doc/` API.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To add a document using the `PUT //_create/<_id>` or `POST //_create/<_id>` request formats, you must have the `create_doc`, `create`, `index`, or `write` index privilege.\n* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\n**Automatically create data streams and indices**\n\nIf the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream.\n\nIf the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.\n\nNOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.\n\nIf no mapping exists, the index operation creates a dynamic mapping.\nBy default, new fields and objects are automatically added to the mapping if needed.\n\nAutomatic index creation is controlled by the `action.auto_create_index` setting.\nIf it is `true`, any index can be created automatically.\nYou can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely.\nSpecify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked.\nWhen a list is specified, the default behaviour is to disallow.\n\nNOTE: The `action.auto_create_index` setting affects the automatic creation of indices only.\nIt does not affect the creation of data streams.\n\n**Routing**\n\nBy default, shard placement — or routing — is controlled by using a hash of the document's ID value.\nFor more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter.\n\nWhen setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself.\nThis does come at the (very minimal) cost of an additional document parsing pass.\nIf the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n**Distributed**\n\nThe index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.\nAfter the primary shard completes the operation, if needed, the update is distributed to applicable replicas.\n\n**Active shards**\n\nTo improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation.\nIf the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs.\nBy default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`).\nThis default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`.\nTo alter this behavior per operation, use the `wait_for_active_shards request` parameter.\n\nValid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1).\nSpecifying a negative value or a number greater than the number of shard copies will throw an error.\n\nFor example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes).\nIf you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding.\nThis means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding.\nThis requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard.\nHowever, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index.\nThe operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.\n\nIt is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts.\nAfter the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary.\nThe `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed.", "externalDocs": { "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html" }, @@ -6749,7 +6749,7 @@ "document" ], "summary": "Create a new document in the index", - "description": "You can index a new JSON document with the `//_doc/` or `//_create/<_id>` APIs\nUsing `_create` guarantees that the document is indexed only if it does not already exist.\nIt returns a 409 response when a document with a same ID already exists in the index.\nTo update an existing document, you must use the `//_doc/` API.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To add a document using the `PUT //_create/<_id>` or `POST //_create/<_id>` request formats, you must have the `create_doc`, `create`, `index`, or `write` index privilege.\n* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\n**Automatically create data streams and indices**\n\nIf the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream.\n\nIf the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.\n\nNOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.\n\nIf no mapping exists, the index operation creates a dynamic mapping.\nBy default, new fields and objects are automatically added to the mapping if needed.\n\nAutomatic index creation is controlled by the `action.auto_create_index` setting.\nIf it is `true`, any index can be created automatically.\nYou can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely.\nSpecify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked.\nWhen a list is specified, the default behaviour is to disallow.\n\nNOTE: The `action.auto_create_index` setting affects the automatic creation of indices only.\nIt does not affect the creation of data streams.\n\n**Routing**\n\nBy default, shard placement — or routing — is controlled by using a hash of the document's ID value.\nFor more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter.\n\nWhen setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself.\nThis does come at the (very minimal) cost of an additional document parsing pass.\nIf the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n** Distributed**\n\nThe index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.\nAfter the primary shard completes the operation, if needed, the update is distributed to applicable replicas.\n\n**Active shards**\n\nTo improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation.\nIf the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs.\nBy default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`).\nThis default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`.\nTo alter this behavior per operation, use the `wait_for_active_shards request` parameter.\n\nValid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1).\nSpecifying a negative value or a number greater than the number of shard copies will throw an error.\n\nFor example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes).\nIf you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding.\nThis means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding.\nThis requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard.\nHowever, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index.\nThe operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.\n\nIt is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts.\nAfter the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary.\nThe `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed.", + "description": "You can index a new JSON document with the `//_doc/` or `//_create/<_id>` APIs\nUsing `_create` guarantees that the document is indexed only if it does not already exist.\nIt returns a 409 response when a document with a same ID already exists in the index.\nTo update an existing document, you must use the `//_doc/` API.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To add a document using the `PUT //_create/<_id>` or `POST //_create/<_id>` request formats, you must have the `create_doc`, `create`, `index`, or `write` index privilege.\n* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\n**Automatically create data streams and indices**\n\nIf the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream.\n\nIf the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.\n\nNOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.\n\nIf no mapping exists, the index operation creates a dynamic mapping.\nBy default, new fields and objects are automatically added to the mapping if needed.\n\nAutomatic index creation is controlled by the `action.auto_create_index` setting.\nIf it is `true`, any index can be created automatically.\nYou can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely.\nSpecify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked.\nWhen a list is specified, the default behaviour is to disallow.\n\nNOTE: The `action.auto_create_index` setting affects the automatic creation of indices only.\nIt does not affect the creation of data streams.\n\n**Routing**\n\nBy default, shard placement — or routing — is controlled by using a hash of the document's ID value.\nFor more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter.\n\nWhen setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself.\nThis does come at the (very minimal) cost of an additional document parsing pass.\nIf the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n**Distributed**\n\nThe index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.\nAfter the primary shard completes the operation, if needed, the update is distributed to applicable replicas.\n\n**Active shards**\n\nTo improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation.\nIf the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs.\nBy default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`).\nThis default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`.\nTo alter this behavior per operation, use the `wait_for_active_shards request` parameter.\n\nValid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1).\nSpecifying a negative value or a number greater than the number of shard copies will throw an error.\n\nFor example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes).\nIf you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding.\nThis means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding.\nThis requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard.\nHowever, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index.\nThe operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.\n\nIt is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts.\nAfter the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary.\nThe `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed.", "externalDocs": { "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html" }, @@ -7120,7 +7120,7 @@ "document" ], "summary": "Create or update a document in an index", - "description": "Add a JSON document to the specified data stream or index and make it searchable.\nIf the target is an index and the document already exists, the request updates the document and increments its version.\n\nNOTE: You cannot use this API to send update requests for existing documents in a data stream.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To add or overwrite a document using the `PUT //_doc/<_id>` request format, you must have the `create`, `index`, or `write` index privilege.\n* To add a document using the `POST //_doc/` request format, you must have the `create_doc`, `create`, `index`, or `write` index privilege.\n* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nNOTE: Replica shards might not all be started when an indexing operation returns successfully.\nBy default, only the primary is required. Set `wait_for_active_shards` to change this default behavior.\n\n**Automatically create data streams and indices**\n\nIf the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream.\n\nIf the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.\n\nNOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.\n\nIf no mapping exists, the index operation creates a dynamic mapping.\nBy default, new fields and objects are automatically added to the mapping if needed.\n\nAutomatic index creation is controlled by the `action.auto_create_index` setting.\nIf it is `true`, any index can be created automatically.\nYou can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely.\nSpecify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked.\nWhen a list is specified, the default behaviour is to disallow.\n\nNOTE: The `action.auto_create_index` setting affects the automatic creation of indices only.\nIt does not affect the creation of data streams.\n\n**Optimistic concurrency control**\n\nIndex operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters.\nIf a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`.\n\n**Routing**\n\nBy default, shard placement — or routing — is controlled by using a hash of the document's ID value.\nFor more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter.\n\nWhen setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself.\nThis does come at the (very minimal) cost of an additional document parsing pass.\nIf the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n * ** Distributed**\n\nThe index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.\nAfter the primary shard completes the operation, if needed, the update is distributed to applicable replicas.\n\n**Active shards**\n\nTo improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation.\nIf the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs.\nBy default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`).\nThis default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`.\nTo alter this behavior per operation, use the `wait_for_active_shards request` parameter.\n\nValid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1).\nSpecifying a negative value or a number greater than the number of shard copies will throw an error.\n\nFor example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes).\nIf you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding.\nThis means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding.\nThis requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard.\nHowever, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index.\nThe operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.\n\nIt is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts.\nAfter the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary.\nThe `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed.\n\n**No operation (noop) updates**\n\nWhen updating a document by using this API, a new version of the document is always created even if the document hasn't changed.\nIf this isn't acceptable use the `_update` API with `detect_noop` set to `true`.\nThe `detect_noop` option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source.\n\nThere isn't a definitive rule for when noop updates aren't acceptable.\nIt's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates.\n\n**Versioning**\n\nEach indexed document is given a version number.\nBy default, internal versioning is used that starts at 1 and increments with each update, deletes included.\nOptionally, the version number can be set to an external value (for example, if maintained in a database).\nTo enable this functionality, `version_type` should be set to `external`.\nThe value provided must be a numeric, long value greater than or equal to 0, and less than around `9.2e+18`.\n\nNOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations.\nIf no version is provided, the operation runs without any version checks.\n\nWhen using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document.\nIf true, the document will be indexed and the new version number used.\nIf the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. For example:\n\n```\nPUT my-index-000001/_doc/1?version=2&version_type=external\n{\n \"user\": {\n \"id\": \"elkbee\"\n }\n}\n\nIn this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1.\nIf the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code).\n\nA nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used.\nEven the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order.", + "description": "Add a JSON document to the specified data stream or index and make it searchable.\nIf the target is an index and the document already exists, the request updates the document and increments its version.\n\nNOTE: You cannot use this API to send update requests for existing documents in a data stream.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To add or overwrite a document using the `PUT //_doc/<_id>` request format, you must have the `create`, `index`, or `write` index privilege.\n* To add a document using the `POST //_doc/` request format, you must have the `create_doc`, `create`, `index`, or `write` index privilege.\n* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nNOTE: Replica shards might not all be started when an indexing operation returns successfully.\nBy default, only the primary is required. Set `wait_for_active_shards` to change this default behavior.\n\n**Automatically create data streams and indices**\n\nIf the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream.\n\nIf the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.\n\nNOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.\n\nIf no mapping exists, the index operation creates a dynamic mapping.\nBy default, new fields and objects are automatically added to the mapping if needed.\n\nAutomatic index creation is controlled by the `action.auto_create_index` setting.\nIf it is `true`, any index can be created automatically.\nYou can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely.\nSpecify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked.\nWhen a list is specified, the default behaviour is to disallow.\n\nNOTE: The `action.auto_create_index` setting affects the automatic creation of indices only.\nIt does not affect the creation of data streams.\n\n**Optimistic concurrency control**\n\nIndex operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters.\nIf a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`.\n\n**Routing**\n\nBy default, shard placement — or routing — is controlled by using a hash of the document's ID value.\nFor more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter.\n\nWhen setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself.\nThis does come at the (very minimal) cost of an additional document parsing pass.\nIf the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n**Distributed**\n\nThe index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.\nAfter the primary shard completes the operation, if needed, the update is distributed to applicable replicas.\n\n**Active shards**\n\nTo improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation.\nIf the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs.\nBy default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`).\nThis default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`.\nTo alter this behavior per operation, use the `wait_for_active_shards request` parameter.\n\nValid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1).\nSpecifying a negative value or a number greater than the number of shard copies will throw an error.\n\nFor example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes).\nIf you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding.\nThis means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding.\nThis requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard.\nHowever, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index.\nThe operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.\n\nIt is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts.\nAfter the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary.\nThe `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed.\n\n**No operation (noop) updates**\n\nWhen updating a document by using this API, a new version of the document is always created even if the document hasn't changed.\nIf this isn't acceptable use the `_update` API with `detect_noop` set to `true`.\nThe `detect_noop` option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source.\n\nThere isn't a definitive rule for when noop updates aren't acceptable.\nIt's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates.\n\n**Versioning**\n\nEach indexed document is given a version number.\nBy default, internal versioning is used that starts at 1 and increments with each update, deletes included.\nOptionally, the version number can be set to an external value (for example, if maintained in a database).\nTo enable this functionality, `version_type` should be set to `external`.\nThe value provided must be a numeric, long value greater than or equal to 0, and less than around `9.2e+18`.\n\nNOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations.\nIf no version is provided, the operation runs without any version checks.\n\nWhen using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document.\nIf true, the document will be indexed and the new version number used.\nIf the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. For example:\n\n```\nPUT my-index-000001/_doc/1?version=2&version_type=external\n{\n \"user\": {\n \"id\": \"elkbee\"\n }\n}\n\nIn this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1.\nIf the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code).\n\nA nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used.\nEven the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order.", "externalDocs": { "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html" }, @@ -7180,7 +7180,7 @@ "document" ], "summary": "Create or update a document in an index", - "description": "Add a JSON document to the specified data stream or index and make it searchable.\nIf the target is an index and the document already exists, the request updates the document and increments its version.\n\nNOTE: You cannot use this API to send update requests for existing documents in a data stream.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To add or overwrite a document using the `PUT //_doc/<_id>` request format, you must have the `create`, `index`, or `write` index privilege.\n* To add a document using the `POST //_doc/` request format, you must have the `create_doc`, `create`, `index`, or `write` index privilege.\n* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nNOTE: Replica shards might not all be started when an indexing operation returns successfully.\nBy default, only the primary is required. Set `wait_for_active_shards` to change this default behavior.\n\n**Automatically create data streams and indices**\n\nIf the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream.\n\nIf the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.\n\nNOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.\n\nIf no mapping exists, the index operation creates a dynamic mapping.\nBy default, new fields and objects are automatically added to the mapping if needed.\n\nAutomatic index creation is controlled by the `action.auto_create_index` setting.\nIf it is `true`, any index can be created automatically.\nYou can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely.\nSpecify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked.\nWhen a list is specified, the default behaviour is to disallow.\n\nNOTE: The `action.auto_create_index` setting affects the automatic creation of indices only.\nIt does not affect the creation of data streams.\n\n**Optimistic concurrency control**\n\nIndex operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters.\nIf a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`.\n\n**Routing**\n\nBy default, shard placement — or routing — is controlled by using a hash of the document's ID value.\nFor more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter.\n\nWhen setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself.\nThis does come at the (very minimal) cost of an additional document parsing pass.\nIf the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n * ** Distributed**\n\nThe index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.\nAfter the primary shard completes the operation, if needed, the update is distributed to applicable replicas.\n\n**Active shards**\n\nTo improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation.\nIf the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs.\nBy default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`).\nThis default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`.\nTo alter this behavior per operation, use the `wait_for_active_shards request` parameter.\n\nValid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1).\nSpecifying a negative value or a number greater than the number of shard copies will throw an error.\n\nFor example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes).\nIf you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding.\nThis means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding.\nThis requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard.\nHowever, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index.\nThe operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.\n\nIt is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts.\nAfter the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary.\nThe `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed.\n\n**No operation (noop) updates**\n\nWhen updating a document by using this API, a new version of the document is always created even if the document hasn't changed.\nIf this isn't acceptable use the `_update` API with `detect_noop` set to `true`.\nThe `detect_noop` option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source.\n\nThere isn't a definitive rule for when noop updates aren't acceptable.\nIt's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates.\n\n**Versioning**\n\nEach indexed document is given a version number.\nBy default, internal versioning is used that starts at 1 and increments with each update, deletes included.\nOptionally, the version number can be set to an external value (for example, if maintained in a database).\nTo enable this functionality, `version_type` should be set to `external`.\nThe value provided must be a numeric, long value greater than or equal to 0, and less than around `9.2e+18`.\n\nNOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations.\nIf no version is provided, the operation runs without any version checks.\n\nWhen using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document.\nIf true, the document will be indexed and the new version number used.\nIf the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. For example:\n\n```\nPUT my-index-000001/_doc/1?version=2&version_type=external\n{\n \"user\": {\n \"id\": \"elkbee\"\n }\n}\n\nIn this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1.\nIf the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code).\n\nA nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used.\nEven the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order.", + "description": "Add a JSON document to the specified data stream or index and make it searchable.\nIf the target is an index and the document already exists, the request updates the document and increments its version.\n\nNOTE: You cannot use this API to send update requests for existing documents in a data stream.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To add or overwrite a document using the `PUT //_doc/<_id>` request format, you must have the `create`, `index`, or `write` index privilege.\n* To add a document using the `POST //_doc/` request format, you must have the `create_doc`, `create`, `index`, or `write` index privilege.\n* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nNOTE: Replica shards might not all be started when an indexing operation returns successfully.\nBy default, only the primary is required. Set `wait_for_active_shards` to change this default behavior.\n\n**Automatically create data streams and indices**\n\nIf the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream.\n\nIf the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.\n\nNOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.\n\nIf no mapping exists, the index operation creates a dynamic mapping.\nBy default, new fields and objects are automatically added to the mapping if needed.\n\nAutomatic index creation is controlled by the `action.auto_create_index` setting.\nIf it is `true`, any index can be created automatically.\nYou can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely.\nSpecify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked.\nWhen a list is specified, the default behaviour is to disallow.\n\nNOTE: The `action.auto_create_index` setting affects the automatic creation of indices only.\nIt does not affect the creation of data streams.\n\n**Optimistic concurrency control**\n\nIndex operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters.\nIf a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`.\n\n**Routing**\n\nBy default, shard placement — or routing — is controlled by using a hash of the document's ID value.\nFor more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter.\n\nWhen setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself.\nThis does come at the (very minimal) cost of an additional document parsing pass.\nIf the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n**Distributed**\n\nThe index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.\nAfter the primary shard completes the operation, if needed, the update is distributed to applicable replicas.\n\n**Active shards**\n\nTo improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation.\nIf the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs.\nBy default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`).\nThis default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`.\nTo alter this behavior per operation, use the `wait_for_active_shards request` parameter.\n\nValid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1).\nSpecifying a negative value or a number greater than the number of shard copies will throw an error.\n\nFor example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes).\nIf you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding.\nThis means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding.\nThis requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard.\nHowever, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index.\nThe operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.\n\nIt is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts.\nAfter the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary.\nThe `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed.\n\n**No operation (noop) updates**\n\nWhen updating a document by using this API, a new version of the document is always created even if the document hasn't changed.\nIf this isn't acceptable use the `_update` API with `detect_noop` set to `true`.\nThe `detect_noop` option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source.\n\nThere isn't a definitive rule for when noop updates aren't acceptable.\nIt's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates.\n\n**Versioning**\n\nEach indexed document is given a version number.\nBy default, internal versioning is used that starts at 1 and increments with each update, deletes included.\nOptionally, the version number can be set to an external value (for example, if maintained in a database).\nTo enable this functionality, `version_type` should be set to `external`.\nThe value provided must be a numeric, long value greater than or equal to 0, and less than around `9.2e+18`.\n\nNOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations.\nIf no version is provided, the operation runs without any version checks.\n\nWhen using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document.\nIf true, the document will be indexed and the new version number used.\nIf the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. For example:\n\n```\nPUT my-index-000001/_doc/1?version=2&version_type=external\n{\n \"user\": {\n \"id\": \"elkbee\"\n }\n}\n\nIn this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1.\nIf the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code).\n\nA nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used.\nEven the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order.", "externalDocs": { "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html" }, @@ -11186,7 +11186,7 @@ "document" ], "summary": "Create or update a document in an index", - "description": "Add a JSON document to the specified data stream or index and make it searchable.\nIf the target is an index and the document already exists, the request updates the document and increments its version.\n\nNOTE: You cannot use this API to send update requests for existing documents in a data stream.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To add or overwrite a document using the `PUT //_doc/<_id>` request format, you must have the `create`, `index`, or `write` index privilege.\n* To add a document using the `POST //_doc/` request format, you must have the `create_doc`, `create`, `index`, or `write` index privilege.\n* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nNOTE: Replica shards might not all be started when an indexing operation returns successfully.\nBy default, only the primary is required. Set `wait_for_active_shards` to change this default behavior.\n\n**Automatically create data streams and indices**\n\nIf the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream.\n\nIf the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.\n\nNOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.\n\nIf no mapping exists, the index operation creates a dynamic mapping.\nBy default, new fields and objects are automatically added to the mapping if needed.\n\nAutomatic index creation is controlled by the `action.auto_create_index` setting.\nIf it is `true`, any index can be created automatically.\nYou can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely.\nSpecify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked.\nWhen a list is specified, the default behaviour is to disallow.\n\nNOTE: The `action.auto_create_index` setting affects the automatic creation of indices only.\nIt does not affect the creation of data streams.\n\n**Optimistic concurrency control**\n\nIndex operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters.\nIf a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`.\n\n**Routing**\n\nBy default, shard placement — or routing — is controlled by using a hash of the document's ID value.\nFor more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter.\n\nWhen setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself.\nThis does come at the (very minimal) cost of an additional document parsing pass.\nIf the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n * ** Distributed**\n\nThe index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.\nAfter the primary shard completes the operation, if needed, the update is distributed to applicable replicas.\n\n**Active shards**\n\nTo improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation.\nIf the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs.\nBy default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`).\nThis default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`.\nTo alter this behavior per operation, use the `wait_for_active_shards request` parameter.\n\nValid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1).\nSpecifying a negative value or a number greater than the number of shard copies will throw an error.\n\nFor example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes).\nIf you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding.\nThis means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding.\nThis requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard.\nHowever, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index.\nThe operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.\n\nIt is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts.\nAfter the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary.\nThe `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed.\n\n**No operation (noop) updates**\n\nWhen updating a document by using this API, a new version of the document is always created even if the document hasn't changed.\nIf this isn't acceptable use the `_update` API with `detect_noop` set to `true`.\nThe `detect_noop` option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source.\n\nThere isn't a definitive rule for when noop updates aren't acceptable.\nIt's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates.\n\n**Versioning**\n\nEach indexed document is given a version number.\nBy default, internal versioning is used that starts at 1 and increments with each update, deletes included.\nOptionally, the version number can be set to an external value (for example, if maintained in a database).\nTo enable this functionality, `version_type` should be set to `external`.\nThe value provided must be a numeric, long value greater than or equal to 0, and less than around `9.2e+18`.\n\nNOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations.\nIf no version is provided, the operation runs without any version checks.\n\nWhen using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document.\nIf true, the document will be indexed and the new version number used.\nIf the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. For example:\n\n```\nPUT my-index-000001/_doc/1?version=2&version_type=external\n{\n \"user\": {\n \"id\": \"elkbee\"\n }\n}\n\nIn this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1.\nIf the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code).\n\nA nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used.\nEven the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order.", + "description": "Add a JSON document to the specified data stream or index and make it searchable.\nIf the target is an index and the document already exists, the request updates the document and increments its version.\n\nNOTE: You cannot use this API to send update requests for existing documents in a data stream.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To add or overwrite a document using the `PUT //_doc/<_id>` request format, you must have the `create`, `index`, or `write` index privilege.\n* To add a document using the `POST //_doc/` request format, you must have the `create_doc`, `create`, `index`, or `write` index privilege.\n* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nNOTE: Replica shards might not all be started when an indexing operation returns successfully.\nBy default, only the primary is required. Set `wait_for_active_shards` to change this default behavior.\n\n**Automatically create data streams and indices**\n\nIf the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream.\n\nIf the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.\n\nNOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.\n\nIf no mapping exists, the index operation creates a dynamic mapping.\nBy default, new fields and objects are automatically added to the mapping if needed.\n\nAutomatic index creation is controlled by the `action.auto_create_index` setting.\nIf it is `true`, any index can be created automatically.\nYou can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely.\nSpecify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked.\nWhen a list is specified, the default behaviour is to disallow.\n\nNOTE: The `action.auto_create_index` setting affects the automatic creation of indices only.\nIt does not affect the creation of data streams.\n\n**Optimistic concurrency control**\n\nIndex operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters.\nIf a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`.\n\n**Routing**\n\nBy default, shard placement — or routing — is controlled by using a hash of the document's ID value.\nFor more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter.\n\nWhen setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself.\nThis does come at the (very minimal) cost of an additional document parsing pass.\nIf the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n**Distributed**\n\nThe index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.\nAfter the primary shard completes the operation, if needed, the update is distributed to applicable replicas.\n\n**Active shards**\n\nTo improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation.\nIf the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs.\nBy default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`).\nThis default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`.\nTo alter this behavior per operation, use the `wait_for_active_shards request` parameter.\n\nValid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1).\nSpecifying a negative value or a number greater than the number of shard copies will throw an error.\n\nFor example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes).\nIf you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding.\nThis means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding.\nThis requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard.\nHowever, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index.\nThe operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.\n\nIt is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts.\nAfter the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary.\nThe `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed.\n\n**No operation (noop) updates**\n\nWhen updating a document by using this API, a new version of the document is always created even if the document hasn't changed.\nIf this isn't acceptable use the `_update` API with `detect_noop` set to `true`.\nThe `detect_noop` option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source.\n\nThere isn't a definitive rule for when noop updates aren't acceptable.\nIt's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates.\n\n**Versioning**\n\nEach indexed document is given a version number.\nBy default, internal versioning is used that starts at 1 and increments with each update, deletes included.\nOptionally, the version number can be set to an external value (for example, if maintained in a database).\nTo enable this functionality, `version_type` should be set to `external`.\nThe value provided must be a numeric, long value greater than or equal to 0, and less than around `9.2e+18`.\n\nNOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations.\nIf no version is provided, the operation runs without any version checks.\n\nWhen using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document.\nIf true, the document will be indexed and the new version number used.\nIf the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. For example:\n\n```\nPUT my-index-000001/_doc/1?version=2&version_type=external\n{\n \"user\": {\n \"id\": \"elkbee\"\n }\n}\n\nIn this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1.\nIf the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code).\n\nA nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used.\nEven the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order.", "externalDocs": { "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html" }, @@ -26584,7 +26584,7 @@ "document" ], "summary": "Reindex documents", - "description": "Copy documents from a source to a destination.\nYou can copy all documents to the destination index or reindex a subset of the documents.\nThe source can be any existing index, alias, or data stream.\nThe destination must differ from the source.\nFor example, you cannot reindex a data stream into itself.\n\nIMPORTANT: Reindex requires `_source` to be enabled for all documents in the source.\nThe destination should be configured as wanted before calling the reindex API.\nReindex does not copy the settings from the source or its associated template.\nMappings, shard counts, and replicas, for example, must be configured ahead of time.\n\nIf the Elasticsearch security features are enabled, you must have the following security privileges:\n\n* The `read` index privilege for the source data stream, index, or alias.\n* The `write` index privilege for the destination data stream, index, or index alias.\n* To automatically create a data stream or index with a reindex API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege for the destination data stream, index, or alias.\n* If reindexing from a remote cluster, the `source.remote.user` must have the `monitor` cluster privilege and the `read` index privilege for the source data stream, index, or alias.\n\nIf reindexing from a remote cluster, you must explicitly allow the remote host in the `reindex.remote.whitelist` setting.\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nThe `dest` element can be configured like the index API to control optimistic concurrency control.\nOmitting `version_type` or setting it to `internal` causes Elasticsearch to blindly dump documents into the destination, overwriting any that happen to have the same ID.\n\nSetting `version_type` to `external` causes Elasticsearch to preserve the `version` from the source, create any documents that are missing, and update any documents that have an older version in the destination than they do in the source.\n\nSetting `op_type` to `create` causes the reindex API to create only missing documents in the destination.\nAll existing documents will cause a version conflict.\n\nIMPORTANT: Because data streams are append-only, any reindex request to a destination data stream must have an `op_type` of `create`.\nA reindex can only add new documents to a destination data stream.\nIt cannot update existing documents in a destination data stream.\n\nBy default, version conflicts abort the reindex process.\nTo continue reindexing if there are conflicts, set the `conflicts` request body property to `proceed`.\nIn this case, the response includes a count of the version conflicts that were encountered.\nNote that the handling of other error types is unaffected by the `conflicts` property.\nAdditionally, if you opt to count version conflicts, the operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query.\n\nNOTE: The reindex API makes no effort to handle ID collisions.\nThe last document written will \"win\" but the order isn't usually predictable so it is not a good idea to rely on this behavior.\nInstead, make sure that IDs are unique by using a script.\n\n**Running reindex asynchronously**\n\nIf the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task.\nElasticsearch creates a record of this task as a document at `_tasks/`.\n\n**Reindex from multiple sources**\n\nIf you have many sources to reindex it is generally better to reindex them one at a time rather than using a glob pattern to pick up multiple sources.\nThat way you can resume the process if there are any errors by removing the partially completed source and starting over.\nIt also makes parallelizing the process fairly simple: split the list of sources to reindex and run each list in parallel.\n\nFor example, you can use a bash script like this:\n\n```\nfor index in i1 i2 i3 i4 i5; do\n curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{\n \"source\": {\n \"index\": \"'$index'\"\n },\n \"dest\": {\n \"index\": \"'$index'-reindexed\"\n }\n }'\ndone\n```\n\n** Throttling**\n\nSet `requests_per_second` to any positive decimal number (`1.4`, `6`, `1000`, for example) to throttle the rate at which reindex issues batches of index operations.\nRequests are throttled by padding each batch with a wait time.\nTo turn off throttling, set `requests_per_second` to `-1`.\n\nThe throttling is done by waiting between batches so that the scroll that reindex uses internally can be given a timeout that takes into account the padding.\nThe padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing.\nBy default the batch size is `1000`, so if `requests_per_second` is set to `500`:\n\n```\ntarget_time = 1000 / 500 per second = 2 seconds\nwait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds\n```\n\nSince the batch is issued as a single bulk request, large batch sizes cause Elasticsearch to create many requests and then wait for a while before starting the next set.\nThis is \"bursty\" instead of \"smooth\".\n\n**Slicing**\n\nReindex supports sliced scroll to parallelize the reindexing process.\nThis parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts.\n\nNOTE: Reindexing from remote clusters does not support manual or automatic slicing.\n\nYou can slice a reindex request manually by providing a slice ID and total number of slices to each request.\nYou can also let reindex automatically parallelize by using sliced scroll to slice on `_id`.\nThe `slices` parameter specifies the number of slices to use.\n\nAdding `slices` to the reindex request just automates the manual process, creating sub-requests which means it has some quirks:\n\n* You can see these requests in the tasks API. These sub-requests are \"child\" tasks of the task for the request with slices.\n* Fetching the status of the task for the request with `slices` only contains the status of completed slices.\n* These sub-requests are individually addressable for things like cancellation and rethrottling.\n* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally.\n* Canceling the request with `slices` will cancel each sub-request.\n* Due to the nature of `slices`, each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution.\n* Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the previous point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being reindexed.\n* Each sub-request gets a slightly different snapshot of the source, though these are all taken at approximately the same time.\n\nIf slicing automatically, setting `slices` to `auto` will choose a reasonable number for most indices.\nIf slicing manually or otherwise tuning automatic slicing, use the following guidelines.\n\nQuery performance is most efficient when the number of slices is equal to the number of shards in the index.\nIf that number is large (for example, `500`), choose a lower number as too many slices will hurt performance.\nSetting slices higher than the number of shards generally does not improve efficiency and adds overhead.\n\nIndexing performance scales linearly across available resources with the number of slices.\n\nWhether query or indexing performance dominates the runtime depends on the documents being reindexed and cluster resources.\n\n**Modify documents during reindexing**\n\nLike `_update_by_query`, reindex operations support a script that modifies the document.\nUnlike `_update_by_query`, the script is allowed to modify the document's metadata.\n\nJust as in `_update_by_query`, you can set `ctx.op` to change the operation that is run on the destination.\nFor example, set `ctx.op` to `noop` if your script decides that the document doesn’t have to be indexed in the destination. This \"no operation\" will be reported in the `noop` counter in the response body.\nSet `ctx.op` to `delete` if your script decides that the document must be deleted from the destination.\nThe deletion will be reported in the `deleted` counter in the response body.\nSetting `ctx.op` to anything else will return an error, as will setting any other field in `ctx`.\n\nThink of the possibilities! Just be careful; you are able to change:\n\n* `_id`\n* `_index`\n* `_version`\n* `_routing`\n\nSetting `_version` to `null` or clearing it from the `ctx` map is just like not sending the version in an indexing request.\nIt will cause the document to be overwritten in the destination regardless of the version on the target or the version type you use in the reindex API.\n\n**Reindex from remote**\n\nReindex supports reindexing from a remote Elasticsearch cluster.\nThe `host` parameter must contain a scheme, host, port, and optional path.\nThe `username` and `password` parameters are optional and when they are present the reindex operation will connect to the remote Elasticsearch node using basic authentication.\nBe sure to use HTTPS when using basic authentication or the password will be sent in plain text.\nThere are a range of settings available to configure the behavior of the HTTPS connection.\n\nWhen using Elastic Cloud, it is also possible to authenticate against the remote cluster through the use of a valid API key.\nRemote hosts must be explicitly allowed with the `reindex.remote.whitelist` setting.\nIt can be set to a comma delimited list of allowed remote host and port combinations.\nScheme is ignored; only the host and port are used.\nFor example:\n\n```\nreindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*\"]\n```\n\nThe list of allowed hosts must be configured on any nodes that will coordinate the reindex.\nThis feature should work with remote clusters of any version of Elasticsearch.\nThis should enable you to upgrade from any version of Elasticsearch to the current version by reindexing from a cluster of the old version.\n\nWARNING: Elasticsearch does not support forward compatibility across major versions.\nFor example, you cannot reindex from a 7.x cluster into a 6.x cluster.\n\nTo enable queries sent to older versions of Elasticsearch, the `query` parameter is sent directly to the remote host without validation or modification.\n\nNOTE: Reindexing from remote clusters does not support manual or automatic slicing.\n\nReindexing from a remote server uses an on-heap buffer that defaults to a maximum size of 100mb.\nIf the remote index includes very large documents you'll need to use a smaller batch size.\nIt is also possible to set the socket read timeout on the remote connection with the `socket_timeout` field and the connection timeout with the `connect_timeout` field.\nBoth default to 30 seconds.\n\n**Configuring SSL parameters**\n\nReindex from remote supports configurable SSL settings.\nThese must be specified in the `elasticsearch.yml` file, with the exception of the secure settings, which you add in the Elasticsearch keystore.\nIt is not possible to configure SSL in the body of the reindex request.", + "description": "Copy documents from a source to a destination.\nYou can copy all documents to the destination index or reindex a subset of the documents.\nThe source can be any existing index, alias, or data stream.\nThe destination must differ from the source.\nFor example, you cannot reindex a data stream into itself.\n\nIMPORTANT: Reindex requires `_source` to be enabled for all documents in the source.\nThe destination should be configured as wanted before calling the reindex API.\nReindex does not copy the settings from the source or its associated template.\nMappings, shard counts, and replicas, for example, must be configured ahead of time.\n\nIf the Elasticsearch security features are enabled, you must have the following security privileges:\n\n* The `read` index privilege for the source data stream, index, or alias.\n* The `write` index privilege for the destination data stream, index, or index alias.\n* To automatically create a data stream or index with a reindex API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege for the destination data stream, index, or alias.\n* If reindexing from a remote cluster, the `source.remote.user` must have the `monitor` cluster privilege and the `read` index privilege for the source data stream, index, or alias.\n\nIf reindexing from a remote cluster, you must explicitly allow the remote host in the `reindex.remote.whitelist` setting.\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nThe `dest` element can be configured like the index API to control optimistic concurrency control.\nOmitting `version_type` or setting it to `internal` causes Elasticsearch to blindly dump documents into the destination, overwriting any that happen to have the same ID.\n\nSetting `version_type` to `external` causes Elasticsearch to preserve the `version` from the source, create any documents that are missing, and update any documents that have an older version in the destination than they do in the source.\n\nSetting `op_type` to `create` causes the reindex API to create only missing documents in the destination.\nAll existing documents will cause a version conflict.\n\nIMPORTANT: Because data streams are append-only, any reindex request to a destination data stream must have an `op_type` of `create`.\nA reindex can only add new documents to a destination data stream.\nIt cannot update existing documents in a destination data stream.\n\nBy default, version conflicts abort the reindex process.\nTo continue reindexing if there are conflicts, set the `conflicts` request body property to `proceed`.\nIn this case, the response includes a count of the version conflicts that were encountered.\nNote that the handling of other error types is unaffected by the `conflicts` property.\nAdditionally, if you opt to count version conflicts, the operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query.\n\nNOTE: The reindex API makes no effort to handle ID collisions.\nThe last document written will \"win\" but the order isn't usually predictable so it is not a good idea to rely on this behavior.\nInstead, make sure that IDs are unique by using a script.\n\n**Running reindex asynchronously**\n\nIf the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task.\nElasticsearch creates a record of this task as a document at `_tasks/`.\n\n**Reindex from multiple sources**\n\nIf you have many sources to reindex it is generally better to reindex them one at a time rather than using a glob pattern to pick up multiple sources.\nThat way you can resume the process if there are any errors by removing the partially completed source and starting over.\nIt also makes parallelizing the process fairly simple: split the list of sources to reindex and run each list in parallel.\n\nFor example, you can use a bash script like this:\n\n```\nfor index in i1 i2 i3 i4 i5; do\n curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{\n \"source\": {\n \"index\": \"'$index'\"\n },\n \"dest\": {\n \"index\": \"'$index'-reindexed\"\n }\n }'\ndone\n```\n\n**Throttling**\n\nSet `requests_per_second` to any positive decimal number (`1.4`, `6`, `1000`, for example) to throttle the rate at which reindex issues batches of index operations.\nRequests are throttled by padding each batch with a wait time.\nTo turn off throttling, set `requests_per_second` to `-1`.\n\nThe throttling is done by waiting between batches so that the scroll that reindex uses internally can be given a timeout that takes into account the padding.\nThe padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing.\nBy default the batch size is `1000`, so if `requests_per_second` is set to `500`:\n\n```\ntarget_time = 1000 / 500 per second = 2 seconds\nwait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds\n```\n\nSince the batch is issued as a single bulk request, large batch sizes cause Elasticsearch to create many requests and then wait for a while before starting the next set.\nThis is \"bursty\" instead of \"smooth\".\n\n**Slicing**\n\nReindex supports sliced scroll to parallelize the reindexing process.\nThis parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts.\n\nNOTE: Reindexing from remote clusters does not support manual or automatic slicing.\n\nYou can slice a reindex request manually by providing a slice ID and total number of slices to each request.\nYou can also let reindex automatically parallelize by using sliced scroll to slice on `_id`.\nThe `slices` parameter specifies the number of slices to use.\n\nAdding `slices` to the reindex request just automates the manual process, creating sub-requests which means it has some quirks:\n\n* You can see these requests in the tasks API. These sub-requests are \"child\" tasks of the task for the request with slices.\n* Fetching the status of the task for the request with `slices` only contains the status of completed slices.\n* These sub-requests are individually addressable for things like cancellation and rethrottling.\n* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally.\n* Canceling the request with `slices` will cancel each sub-request.\n* Due to the nature of `slices`, each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution.\n* Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the previous point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being reindexed.\n* Each sub-request gets a slightly different snapshot of the source, though these are all taken at approximately the same time.\n\nIf slicing automatically, setting `slices` to `auto` will choose a reasonable number for most indices.\nIf slicing manually or otherwise tuning automatic slicing, use the following guidelines.\n\nQuery performance is most efficient when the number of slices is equal to the number of shards in the index.\nIf that number is large (for example, `500`), choose a lower number as too many slices will hurt performance.\nSetting slices higher than the number of shards generally does not improve efficiency and adds overhead.\n\nIndexing performance scales linearly across available resources with the number of slices.\n\nWhether query or indexing performance dominates the runtime depends on the documents being reindexed and cluster resources.\n\n**Modify documents during reindexing**\n\nLike `_update_by_query`, reindex operations support a script that modifies the document.\nUnlike `_update_by_query`, the script is allowed to modify the document's metadata.\n\nJust as in `_update_by_query`, you can set `ctx.op` to change the operation that is run on the destination.\nFor example, set `ctx.op` to `noop` if your script decides that the document doesn’t have to be indexed in the destination. This \"no operation\" will be reported in the `noop` counter in the response body.\nSet `ctx.op` to `delete` if your script decides that the document must be deleted from the destination.\nThe deletion will be reported in the `deleted` counter in the response body.\nSetting `ctx.op` to anything else will return an error, as will setting any other field in `ctx`.\n\nThink of the possibilities! Just be careful; you are able to change:\n\n* `_id`\n* `_index`\n* `_version`\n* `_routing`\n\nSetting `_version` to `null` or clearing it from the `ctx` map is just like not sending the version in an indexing request.\nIt will cause the document to be overwritten in the destination regardless of the version on the target or the version type you use in the reindex API.\n\n**Reindex from remote**\n\nReindex supports reindexing from a remote Elasticsearch cluster.\nThe `host` parameter must contain a scheme, host, port, and optional path.\nThe `username` and `password` parameters are optional and when they are present the reindex operation will connect to the remote Elasticsearch node using basic authentication.\nBe sure to use HTTPS when using basic authentication or the password will be sent in plain text.\nThere are a range of settings available to configure the behavior of the HTTPS connection.\n\nWhen using Elastic Cloud, it is also possible to authenticate against the remote cluster through the use of a valid API key.\nRemote hosts must be explicitly allowed with the `reindex.remote.whitelist` setting.\nIt can be set to a comma delimited list of allowed remote host and port combinations.\nScheme is ignored; only the host and port are used.\nFor example:\n\n```\nreindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*\"]\n```\n\nThe list of allowed hosts must be configured on any nodes that will coordinate the reindex.\nThis feature should work with remote clusters of any version of Elasticsearch.\nThis should enable you to upgrade from any version of Elasticsearch to the current version by reindexing from a cluster of the old version.\n\nWARNING: Elasticsearch does not support forward compatibility across major versions.\nFor example, you cannot reindex from a 7.x cluster into a 6.x cluster.\n\nTo enable queries sent to older versions of Elasticsearch, the `query` parameter is sent directly to the remote host without validation or modification.\n\nNOTE: Reindexing from remote clusters does not support manual or automatic slicing.\n\nReindexing from a remote server uses an on-heap buffer that defaults to a maximum size of 100mb.\nIf the remote index includes very large documents you'll need to use a smaller batch size.\nIt is also possible to set the socket read timeout on the remote connection with the `socket_timeout` field and the connection timeout with the `connect_timeout` field.\nBoth default to 30 seconds.\n\n**Configuring SSL parameters**\n\nReindex from remote supports configurable SSL settings.\nThese must be specified in the `elasticsearch.yml` file, with the exception of the secure settings, which you add in the Elasticsearch keystore.\nIt is not possible to configure SSL in the body of the reindex request.", "operationId": "reindex", "parameters": [ { @@ -47486,6 +47486,9 @@ "similarity": { "description": "The minimum similarity for a vector to be considered a match", "type": "number" + }, + "rescore_vector": { + "$ref": "#/components/schemas/_types:RescoreVector" } }, "required": [ @@ -47525,6 +47528,18 @@ "model_text" ] }, + "_types:RescoreVector": { + "type": "object", + "properties": { + "oversample": { + "description": "Applies the specified oversample factor to k on the approximate kNN search", + "type": "number" + } + }, + "required": [ + "oversample" + ] + }, "_types.query_dsl:MatchQuery": { "allOf": [ { @@ -52473,6 +52488,9 @@ }, "inner_hits": { "$ref": "#/components/schemas/_global.search._types:InnerHits" + }, + "rescore_vector": { + "$ref": "#/components/schemas/_types:RescoreVector" } }, "required": [ @@ -85780,6 +85798,9 @@ "similarity": { "description": "The minimum similarity required for a document to be considered a match.", "type": "number" + }, + "rescore_vector": { + "$ref": "#/components/schemas/_types:RescoreVector" } }, "required": [ @@ -98237,7 +98258,7 @@ "cat.aliases#expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "Whether to expand wildcard expression to concrete indices that are open, closed or both.", + "description": "The type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nIt supports comma-separated values, such as `open,hidden`.", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types:ExpandWildcards" @@ -98257,7 +98278,7 @@ "cat.aliases#master_timeout": { "in": "query", "name": "master_timeout", - "description": "Period to wait for a connection to the master node.", + "description": "The period to wait for a connection to the master node.\nIf the master node is not available before the timeout expires, the request fails and returns an error.\nTo indicated that the request should never timeout, you can set it to `-1`.", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types:Duration" @@ -98267,7 +98288,7 @@ "cat.allocation#node_id": { "in": "path", "name": "node_id", - "description": "Comma-separated list of node identifiers or names used to limit the returned information.", + "description": "A comma-separated list of node identifiers or names used to limit the returned information.", "required": true, "deprecated": false, "schema": { @@ -98308,7 +98329,7 @@ "cat.component_templates#name": { "in": "path", "name": "name", - "description": "The name of the component template. Accepts wildcard expressions. If omitted, all component templates are returned.", + "description": "The name of the component template.\nIt accepts wildcard expressions.\nIf it is omitted, all component templates are returned.", "required": true, "deprecated": false, "schema": { @@ -98329,7 +98350,7 @@ "cat.component_templates#master_timeout": { "in": "query", "name": "master_timeout", - "description": "Period to wait for a connection to the master node.", + "description": "The period to wait for a connection to the master node.", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types:Duration" @@ -98339,7 +98360,7 @@ "cat.count#index": { "in": "path", "name": "index", - "description": "Comma-separated list of data streams, indices, and aliases used to limit the request.\nSupports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`.", + "description": "A comma-separated list of data streams, indices, and aliases used to limit the request.\nIt supports wildcards (`*`).\nTo target all data streams and indices, omit this parameter or use `*` or `_all`.", "required": true, "deprecated": false, "schema": { diff --git a/output/openapi/elasticsearch-serverless-openapi.json b/output/openapi/elasticsearch-serverless-openapi.json index 4fa5f0f6a4..30eee907a4 100644 --- a/output/openapi/elasticsearch-serverless-openapi.json +++ b/output/openapi/elasticsearch-serverless-openapi.json @@ -678,7 +678,7 @@ "cat" ], "summary": "Get aliases", - "description": "Retrieves the cluster’s index aliases, including filter and routing information.\nThe API does not return data stream aliases.\n\nCAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API.", + "description": "Get the cluster's index aliases, including filter and routing information.\nThis API does not return data stream aliases.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API.", "operationId": "cat-aliases", "parameters": [ { @@ -704,7 +704,7 @@ "cat" ], "summary": "Get aliases", - "description": "Retrieves the cluster’s index aliases, including filter and routing information.\nThe API does not return data stream aliases.\n\nCAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API.", + "description": "Get the cluster's index aliases, including filter and routing information.\nThis API does not return data stream aliases.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API.", "operationId": "cat-aliases-1", "parameters": [ { @@ -733,7 +733,7 @@ "cat" ], "summary": "Get component templates", - "description": "Returns information about component templates in a cluster.\nComponent templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.\n\nCAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the get component template API.", + "description": "Get information about component templates in a cluster.\nComponent templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the get component template API.", "operationId": "cat-component-templates", "parameters": [ { @@ -757,7 +757,7 @@ "cat" ], "summary": "Get component templates", - "description": "Returns information about component templates in a cluster.\nComponent templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.\n\nCAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the get component template API.", + "description": "Get information about component templates in a cluster.\nComponent templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the get component template API.", "operationId": "cat-component-templates-1", "parameters": [ { @@ -784,7 +784,7 @@ "cat" ], "summary": "Get a document count", - "description": "Provides quick access to a document count for a data stream, an index, or an entire cluster.\nThe document count only includes live documents, not deleted documents which have not yet been removed by the merge process.\n\nCAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the count API.", + "description": "Get quick access to a document count for a data stream, an index, or an entire cluster.\nThe document count only includes live documents, not deleted documents which have not yet been removed by the merge process.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the count API.", "operationId": "cat-count", "responses": { "200": { @@ -799,7 +799,7 @@ "cat" ], "summary": "Get a document count", - "description": "Provides quick access to a document count for a data stream, an index, or an entire cluster.\nThe document count only includes live documents, not deleted documents which have not yet been removed by the merge process.\n\nCAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the count API.", + "description": "Get quick access to a document count for a data stream, an index, or an entire cluster.\nThe document count only includes live documents, not deleted documents which have not yet been removed by the merge process.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the count API.", "operationId": "cat-count-1", "parameters": [ { @@ -819,7 +819,7 @@ "cat" ], "summary": "Get CAT help", - "description": "Returns help for the CAT APIs.", + "description": "Get help for the CAT APIs.", "operationId": "cat-help", "responses": { "200": { @@ -841,7 +841,7 @@ "cat" ], "summary": "Get index information", - "description": "Returns high-level information about indices in a cluster, including backing indices for data streams.\n\nUse this request to get the following information for each index in a cluster:\n- shard count\n- document count\n- deleted document count\n- primary store size\n- total store size of all shards, including shard replicas\n\nThese metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents.\nTo get an accurate count of Elasticsearch documents, use the cat count or count APIs.\n\nCAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use an index endpoint.", + "description": "Get high-level information about indices in a cluster, including backing indices for data streams.\n\nUse this request to get the following information for each index in a cluster:\n- shard count\n- document count\n- deleted document count\n- primary store size\n- total store size of all shards, including shard replicas\n\nThese metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents.\nTo get an accurate count of Elasticsearch documents, use the cat count or count APIs.\n\nCAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use an index endpoint.", "operationId": "cat-indices", "parameters": [ { @@ -879,7 +879,7 @@ "cat" ], "summary": "Get index information", - "description": "Returns high-level information about indices in a cluster, including backing indices for data streams.\n\nUse this request to get the following information for each index in a cluster:\n- shard count\n- document count\n- deleted document count\n- primary store size\n- total store size of all shards, including shard replicas\n\nThese metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents.\nTo get an accurate count of Elasticsearch documents, use the cat count or count APIs.\n\nCAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use an index endpoint.", + "description": "Get high-level information about indices in a cluster, including backing indices for data streams.\n\nUse this request to get the following information for each index in a cluster:\n- shard count\n- document count\n- deleted document count\n- primary store size\n- total store size of all shards, including shard replicas\n\nThese metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents.\nTo get an accurate count of Elasticsearch documents, use the cat count or count APIs.\n\nCAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use an index endpoint.", "operationId": "cat-indices-1", "parameters": [ { @@ -920,7 +920,7 @@ "cat" ], "summary": "Get data frame analytics jobs", - "description": "Returns configuration and usage information about data frame analytics jobs.\n\nCAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get data frame analytics jobs statistics API.", + "description": "Get configuration and usage information about data frame analytics jobs.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get data frame analytics jobs statistics API.", "operationId": "cat-ml-data-frame-analytics", "parameters": [ { @@ -953,7 +953,7 @@ "cat" ], "summary": "Get data frame analytics jobs", - "description": "Returns configuration and usage information about data frame analytics jobs.\n\nCAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get data frame analytics jobs statistics API.", + "description": "Get configuration and usage information about data frame analytics jobs.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get data frame analytics jobs statistics API.", "operationId": "cat-ml-data-frame-analytics-1", "parameters": [ { @@ -989,7 +989,7 @@ "cat" ], "summary": "Get datafeeds", - "description": "Returns configuration and usage information about datafeeds.\nThis API returns a maximum of 10,000 datafeeds.\nIf the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage`\ncluster privileges to use this API.\n\nCAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get datafeed statistics API.", + "description": "Get configuration and usage information about datafeeds.\nThis API returns a maximum of 10,000 datafeeds.\nIf the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage`\ncluster privileges to use this API.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get datafeed statistics API.", "operationId": "cat-ml-datafeeds", "parameters": [ { @@ -1019,7 +1019,7 @@ "cat" ], "summary": "Get datafeeds", - "description": "Returns configuration and usage information about datafeeds.\nThis API returns a maximum of 10,000 datafeeds.\nIf the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage`\ncluster privileges to use this API.\n\nCAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get datafeed statistics API.", + "description": "Get configuration and usage information about datafeeds.\nThis API returns a maximum of 10,000 datafeeds.\nIf the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage`\ncluster privileges to use this API.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get datafeed statistics API.", "operationId": "cat-ml-datafeeds-1", "parameters": [ { @@ -1052,7 +1052,7 @@ "cat" ], "summary": "Get anomaly detection jobs", - "description": "Returns configuration and usage information for anomaly detection jobs.\nThis API returns a maximum of 10,000 jobs.\nIf the Elasticsearch security features are enabled, you must have `monitor_ml`,\n`monitor`, `manage_ml`, or `manage` cluster privileges to use this API.\n\nCAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get anomaly detection job statistics API.", + "description": "Get configuration and usage information for anomaly detection jobs.\nThis API returns a maximum of 10,000 jobs.\nIf the Elasticsearch security features are enabled, you must have `monitor_ml`,\n`monitor`, `manage_ml`, or `manage` cluster privileges to use this API.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get anomaly detection job statistics API.", "operationId": "cat-ml-jobs", "parameters": [ { @@ -1085,7 +1085,7 @@ "cat" ], "summary": "Get anomaly detection jobs", - "description": "Returns configuration and usage information for anomaly detection jobs.\nThis API returns a maximum of 10,000 jobs.\nIf the Elasticsearch security features are enabled, you must have `monitor_ml`,\n`monitor`, `manage_ml`, or `manage` cluster privileges to use this API.\n\nCAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get anomaly detection job statistics API.", + "description": "Get configuration and usage information for anomaly detection jobs.\nThis API returns a maximum of 10,000 jobs.\nIf the Elasticsearch security features are enabled, you must have `monitor_ml`,\n`monitor`, `manage_ml`, or `manage` cluster privileges to use this API.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get anomaly detection job statistics API.", "operationId": "cat-ml-jobs-1", "parameters": [ { @@ -1121,7 +1121,7 @@ "cat" ], "summary": "Get trained models", - "description": "Returns configuration and usage information about inference trained models.\n\nCAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get trained models statistics API.", + "description": "Get configuration and usage information about inference trained models.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get trained models statistics API.", "operationId": "cat-ml-trained-models", "parameters": [ { @@ -1160,7 +1160,7 @@ "cat" ], "summary": "Get trained models", - "description": "Returns configuration and usage information about inference trained models.\n\nCAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get trained models statistics API.", + "description": "Get configuration and usage information about inference trained models.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get trained models statistics API.", "operationId": "cat-ml-trained-models-1", "parameters": [ { @@ -1201,8 +1201,8 @@ "tags": [ "cat" ], - "summary": "Get transforms", - "description": "Returns configuration and usage information about transforms.\n\nCAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get transform statistics API.", + "summary": "Get transform information", + "description": "Get configuration and usage information about transforms.\n\nCAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get transform statistics API.", "operationId": "cat-transforms", "parameters": [ { @@ -1237,8 +1237,8 @@ "tags": [ "cat" ], - "summary": "Get transforms", - "description": "Returns configuration and usage information about transforms.\n\nCAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get transform statistics API.", + "summary": "Get transform information", + "description": "Get configuration and usage information about transforms.\n\nCAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get transform statistics API.", "operationId": "cat-transforms-1", "parameters": [ { @@ -3434,7 +3434,7 @@ "document" ], "summary": "Create a new document in the index", - "description": "You can index a new JSON document with the `//_doc/` or `//_create/<_id>` APIs\nUsing `_create` guarantees that the document is indexed only if it does not already exist.\nIt returns a 409 response when a document with a same ID already exists in the index.\nTo update an existing document, you must use the `//_doc/` API.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To add a document using the `PUT //_create/<_id>` or `POST //_create/<_id>` request formats, you must have the `create_doc`, `create`, `index`, or `write` index privilege.\n* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\n**Automatically create data streams and indices**\n\nIf the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream.\n\nIf the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.\n\nNOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.\n\nIf no mapping exists, the index operation creates a dynamic mapping.\nBy default, new fields and objects are automatically added to the mapping if needed.\n\nAutomatic index creation is controlled by the `action.auto_create_index` setting.\nIf it is `true`, any index can be created automatically.\nYou can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely.\nSpecify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked.\nWhen a list is specified, the default behaviour is to disallow.\n\nNOTE: The `action.auto_create_index` setting affects the automatic creation of indices only.\nIt does not affect the creation of data streams.\n\n**Routing**\n\nBy default, shard placement — or routing — is controlled by using a hash of the document's ID value.\nFor more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter.\n\nWhen setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself.\nThis does come at the (very minimal) cost of an additional document parsing pass.\nIf the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n** Distributed**\n\nThe index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.\nAfter the primary shard completes the operation, if needed, the update is distributed to applicable replicas.\n\n**Active shards**\n\nTo improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation.\nIf the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs.\nBy default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`).\nThis default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`.\nTo alter this behavior per operation, use the `wait_for_active_shards request` parameter.\n\nValid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1).\nSpecifying a negative value or a number greater than the number of shard copies will throw an error.\n\nFor example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes).\nIf you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding.\nThis means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding.\nThis requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard.\nHowever, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index.\nThe operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.\n\nIt is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts.\nAfter the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary.\nThe `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed.", + "description": "You can index a new JSON document with the `//_doc/` or `//_create/<_id>` APIs\nUsing `_create` guarantees that the document is indexed only if it does not already exist.\nIt returns a 409 response when a document with a same ID already exists in the index.\nTo update an existing document, you must use the `//_doc/` API.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To add a document using the `PUT //_create/<_id>` or `POST //_create/<_id>` request formats, you must have the `create_doc`, `create`, `index`, or `write` index privilege.\n* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\n**Automatically create data streams and indices**\n\nIf the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream.\n\nIf the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.\n\nNOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.\n\nIf no mapping exists, the index operation creates a dynamic mapping.\nBy default, new fields and objects are automatically added to the mapping if needed.\n\nAutomatic index creation is controlled by the `action.auto_create_index` setting.\nIf it is `true`, any index can be created automatically.\nYou can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely.\nSpecify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked.\nWhen a list is specified, the default behaviour is to disallow.\n\nNOTE: The `action.auto_create_index` setting affects the automatic creation of indices only.\nIt does not affect the creation of data streams.\n\n**Routing**\n\nBy default, shard placement — or routing — is controlled by using a hash of the document's ID value.\nFor more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter.\n\nWhen setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself.\nThis does come at the (very minimal) cost of an additional document parsing pass.\nIf the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n**Distributed**\n\nThe index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.\nAfter the primary shard completes the operation, if needed, the update is distributed to applicable replicas.\n\n**Active shards**\n\nTo improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation.\nIf the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs.\nBy default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`).\nThis default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`.\nTo alter this behavior per operation, use the `wait_for_active_shards request` parameter.\n\nValid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1).\nSpecifying a negative value or a number greater than the number of shard copies will throw an error.\n\nFor example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes).\nIf you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding.\nThis means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding.\nThis requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard.\nHowever, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index.\nThe operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.\n\nIt is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts.\nAfter the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary.\nThe `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed.", "externalDocs": { "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html" }, @@ -3483,7 +3483,7 @@ "document" ], "summary": "Create a new document in the index", - "description": "You can index a new JSON document with the `//_doc/` or `//_create/<_id>` APIs\nUsing `_create` guarantees that the document is indexed only if it does not already exist.\nIt returns a 409 response when a document with a same ID already exists in the index.\nTo update an existing document, you must use the `//_doc/` API.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To add a document using the `PUT //_create/<_id>` or `POST //_create/<_id>` request formats, you must have the `create_doc`, `create`, `index`, or `write` index privilege.\n* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\n**Automatically create data streams and indices**\n\nIf the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream.\n\nIf the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.\n\nNOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.\n\nIf no mapping exists, the index operation creates a dynamic mapping.\nBy default, new fields and objects are automatically added to the mapping if needed.\n\nAutomatic index creation is controlled by the `action.auto_create_index` setting.\nIf it is `true`, any index can be created automatically.\nYou can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely.\nSpecify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked.\nWhen a list is specified, the default behaviour is to disallow.\n\nNOTE: The `action.auto_create_index` setting affects the automatic creation of indices only.\nIt does not affect the creation of data streams.\n\n**Routing**\n\nBy default, shard placement — or routing — is controlled by using a hash of the document's ID value.\nFor more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter.\n\nWhen setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself.\nThis does come at the (very minimal) cost of an additional document parsing pass.\nIf the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n** Distributed**\n\nThe index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.\nAfter the primary shard completes the operation, if needed, the update is distributed to applicable replicas.\n\n**Active shards**\n\nTo improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation.\nIf the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs.\nBy default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`).\nThis default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`.\nTo alter this behavior per operation, use the `wait_for_active_shards request` parameter.\n\nValid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1).\nSpecifying a negative value or a number greater than the number of shard copies will throw an error.\n\nFor example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes).\nIf you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding.\nThis means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding.\nThis requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard.\nHowever, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index.\nThe operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.\n\nIt is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts.\nAfter the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary.\nThe `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed.", + "description": "You can index a new JSON document with the `//_doc/` or `//_create/<_id>` APIs\nUsing `_create` guarantees that the document is indexed only if it does not already exist.\nIt returns a 409 response when a document with a same ID already exists in the index.\nTo update an existing document, you must use the `//_doc/` API.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To add a document using the `PUT //_create/<_id>` or `POST //_create/<_id>` request formats, you must have the `create_doc`, `create`, `index`, or `write` index privilege.\n* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\n**Automatically create data streams and indices**\n\nIf the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream.\n\nIf the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.\n\nNOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.\n\nIf no mapping exists, the index operation creates a dynamic mapping.\nBy default, new fields and objects are automatically added to the mapping if needed.\n\nAutomatic index creation is controlled by the `action.auto_create_index` setting.\nIf it is `true`, any index can be created automatically.\nYou can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely.\nSpecify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked.\nWhen a list is specified, the default behaviour is to disallow.\n\nNOTE: The `action.auto_create_index` setting affects the automatic creation of indices only.\nIt does not affect the creation of data streams.\n\n**Routing**\n\nBy default, shard placement — or routing — is controlled by using a hash of the document's ID value.\nFor more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter.\n\nWhen setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself.\nThis does come at the (very minimal) cost of an additional document parsing pass.\nIf the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n**Distributed**\n\nThe index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.\nAfter the primary shard completes the operation, if needed, the update is distributed to applicable replicas.\n\n**Active shards**\n\nTo improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation.\nIf the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs.\nBy default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`).\nThis default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`.\nTo alter this behavior per operation, use the `wait_for_active_shards request` parameter.\n\nValid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1).\nSpecifying a negative value or a number greater than the number of shard copies will throw an error.\n\nFor example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes).\nIf you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding.\nThis means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding.\nThis requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard.\nHowever, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index.\nThe operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.\n\nIt is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts.\nAfter the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary.\nThe `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed.", "externalDocs": { "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html" }, @@ -3678,7 +3678,7 @@ "document" ], "summary": "Create or update a document in an index", - "description": "Add a JSON document to the specified data stream or index and make it searchable.\nIf the target is an index and the document already exists, the request updates the document and increments its version.\n\nNOTE: You cannot use this API to send update requests for existing documents in a data stream.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To add or overwrite a document using the `PUT //_doc/<_id>` request format, you must have the `create`, `index`, or `write` index privilege.\n* To add a document using the `POST //_doc/` request format, you must have the `create_doc`, `create`, `index`, or `write` index privilege.\n* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nNOTE: Replica shards might not all be started when an indexing operation returns successfully.\nBy default, only the primary is required. Set `wait_for_active_shards` to change this default behavior.\n\n**Automatically create data streams and indices**\n\nIf the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream.\n\nIf the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.\n\nNOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.\n\nIf no mapping exists, the index operation creates a dynamic mapping.\nBy default, new fields and objects are automatically added to the mapping if needed.\n\nAutomatic index creation is controlled by the `action.auto_create_index` setting.\nIf it is `true`, any index can be created automatically.\nYou can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely.\nSpecify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked.\nWhen a list is specified, the default behaviour is to disallow.\n\nNOTE: The `action.auto_create_index` setting affects the automatic creation of indices only.\nIt does not affect the creation of data streams.\n\n**Optimistic concurrency control**\n\nIndex operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters.\nIf a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`.\n\n**Routing**\n\nBy default, shard placement — or routing — is controlled by using a hash of the document's ID value.\nFor more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter.\n\nWhen setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself.\nThis does come at the (very minimal) cost of an additional document parsing pass.\nIf the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n * ** Distributed**\n\nThe index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.\nAfter the primary shard completes the operation, if needed, the update is distributed to applicable replicas.\n\n**Active shards**\n\nTo improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation.\nIf the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs.\nBy default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`).\nThis default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`.\nTo alter this behavior per operation, use the `wait_for_active_shards request` parameter.\n\nValid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1).\nSpecifying a negative value or a number greater than the number of shard copies will throw an error.\n\nFor example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes).\nIf you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding.\nThis means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding.\nThis requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard.\nHowever, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index.\nThe operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.\n\nIt is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts.\nAfter the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary.\nThe `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed.\n\n**No operation (noop) updates**\n\nWhen updating a document by using this API, a new version of the document is always created even if the document hasn't changed.\nIf this isn't acceptable use the `_update` API with `detect_noop` set to `true`.\nThe `detect_noop` option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source.\n\nThere isn't a definitive rule for when noop updates aren't acceptable.\nIt's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates.\n\n**Versioning**\n\nEach indexed document is given a version number.\nBy default, internal versioning is used that starts at 1 and increments with each update, deletes included.\nOptionally, the version number can be set to an external value (for example, if maintained in a database).\nTo enable this functionality, `version_type` should be set to `external`.\nThe value provided must be a numeric, long value greater than or equal to 0, and less than around `9.2e+18`.\n\nNOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations.\nIf no version is provided, the operation runs without any version checks.\n\nWhen using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document.\nIf true, the document will be indexed and the new version number used.\nIf the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. For example:\n\n```\nPUT my-index-000001/_doc/1?version=2&version_type=external\n{\n \"user\": {\n \"id\": \"elkbee\"\n }\n}\n\nIn this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1.\nIf the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code).\n\nA nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used.\nEven the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order.", + "description": "Add a JSON document to the specified data stream or index and make it searchable.\nIf the target is an index and the document already exists, the request updates the document and increments its version.\n\nNOTE: You cannot use this API to send update requests for existing documents in a data stream.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To add or overwrite a document using the `PUT //_doc/<_id>` request format, you must have the `create`, `index`, or `write` index privilege.\n* To add a document using the `POST //_doc/` request format, you must have the `create_doc`, `create`, `index`, or `write` index privilege.\n* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nNOTE: Replica shards might not all be started when an indexing operation returns successfully.\nBy default, only the primary is required. Set `wait_for_active_shards` to change this default behavior.\n\n**Automatically create data streams and indices**\n\nIf the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream.\n\nIf the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.\n\nNOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.\n\nIf no mapping exists, the index operation creates a dynamic mapping.\nBy default, new fields and objects are automatically added to the mapping if needed.\n\nAutomatic index creation is controlled by the `action.auto_create_index` setting.\nIf it is `true`, any index can be created automatically.\nYou can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely.\nSpecify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked.\nWhen a list is specified, the default behaviour is to disallow.\n\nNOTE: The `action.auto_create_index` setting affects the automatic creation of indices only.\nIt does not affect the creation of data streams.\n\n**Optimistic concurrency control**\n\nIndex operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters.\nIf a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`.\n\n**Routing**\n\nBy default, shard placement — or routing — is controlled by using a hash of the document's ID value.\nFor more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter.\n\nWhen setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself.\nThis does come at the (very minimal) cost of an additional document parsing pass.\nIf the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n**Distributed**\n\nThe index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.\nAfter the primary shard completes the operation, if needed, the update is distributed to applicable replicas.\n\n**Active shards**\n\nTo improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation.\nIf the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs.\nBy default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`).\nThis default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`.\nTo alter this behavior per operation, use the `wait_for_active_shards request` parameter.\n\nValid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1).\nSpecifying a negative value or a number greater than the number of shard copies will throw an error.\n\nFor example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes).\nIf you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding.\nThis means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding.\nThis requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard.\nHowever, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index.\nThe operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.\n\nIt is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts.\nAfter the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary.\nThe `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed.\n\n**No operation (noop) updates**\n\nWhen updating a document by using this API, a new version of the document is always created even if the document hasn't changed.\nIf this isn't acceptable use the `_update` API with `detect_noop` set to `true`.\nThe `detect_noop` option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source.\n\nThere isn't a definitive rule for when noop updates aren't acceptable.\nIt's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates.\n\n**Versioning**\n\nEach indexed document is given a version number.\nBy default, internal versioning is used that starts at 1 and increments with each update, deletes included.\nOptionally, the version number can be set to an external value (for example, if maintained in a database).\nTo enable this functionality, `version_type` should be set to `external`.\nThe value provided must be a numeric, long value greater than or equal to 0, and less than around `9.2e+18`.\n\nNOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations.\nIf no version is provided, the operation runs without any version checks.\n\nWhen using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document.\nIf true, the document will be indexed and the new version number used.\nIf the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. For example:\n\n```\nPUT my-index-000001/_doc/1?version=2&version_type=external\n{\n \"user\": {\n \"id\": \"elkbee\"\n }\n}\n\nIn this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1.\nIf the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code).\n\nA nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used.\nEven the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order.", "externalDocs": { "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html" }, @@ -3738,7 +3738,7 @@ "document" ], "summary": "Create or update a document in an index", - "description": "Add a JSON document to the specified data stream or index and make it searchable.\nIf the target is an index and the document already exists, the request updates the document and increments its version.\n\nNOTE: You cannot use this API to send update requests for existing documents in a data stream.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To add or overwrite a document using the `PUT //_doc/<_id>` request format, you must have the `create`, `index`, or `write` index privilege.\n* To add a document using the `POST //_doc/` request format, you must have the `create_doc`, `create`, `index`, or `write` index privilege.\n* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nNOTE: Replica shards might not all be started when an indexing operation returns successfully.\nBy default, only the primary is required. Set `wait_for_active_shards` to change this default behavior.\n\n**Automatically create data streams and indices**\n\nIf the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream.\n\nIf the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.\n\nNOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.\n\nIf no mapping exists, the index operation creates a dynamic mapping.\nBy default, new fields and objects are automatically added to the mapping if needed.\n\nAutomatic index creation is controlled by the `action.auto_create_index` setting.\nIf it is `true`, any index can be created automatically.\nYou can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely.\nSpecify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked.\nWhen a list is specified, the default behaviour is to disallow.\n\nNOTE: The `action.auto_create_index` setting affects the automatic creation of indices only.\nIt does not affect the creation of data streams.\n\n**Optimistic concurrency control**\n\nIndex operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters.\nIf a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`.\n\n**Routing**\n\nBy default, shard placement — or routing — is controlled by using a hash of the document's ID value.\nFor more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter.\n\nWhen setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself.\nThis does come at the (very minimal) cost of an additional document parsing pass.\nIf the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n * ** Distributed**\n\nThe index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.\nAfter the primary shard completes the operation, if needed, the update is distributed to applicable replicas.\n\n**Active shards**\n\nTo improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation.\nIf the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs.\nBy default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`).\nThis default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`.\nTo alter this behavior per operation, use the `wait_for_active_shards request` parameter.\n\nValid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1).\nSpecifying a negative value or a number greater than the number of shard copies will throw an error.\n\nFor example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes).\nIf you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding.\nThis means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding.\nThis requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard.\nHowever, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index.\nThe operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.\n\nIt is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts.\nAfter the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary.\nThe `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed.\n\n**No operation (noop) updates**\n\nWhen updating a document by using this API, a new version of the document is always created even if the document hasn't changed.\nIf this isn't acceptable use the `_update` API with `detect_noop` set to `true`.\nThe `detect_noop` option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source.\n\nThere isn't a definitive rule for when noop updates aren't acceptable.\nIt's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates.\n\n**Versioning**\n\nEach indexed document is given a version number.\nBy default, internal versioning is used that starts at 1 and increments with each update, deletes included.\nOptionally, the version number can be set to an external value (for example, if maintained in a database).\nTo enable this functionality, `version_type` should be set to `external`.\nThe value provided must be a numeric, long value greater than or equal to 0, and less than around `9.2e+18`.\n\nNOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations.\nIf no version is provided, the operation runs without any version checks.\n\nWhen using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document.\nIf true, the document will be indexed and the new version number used.\nIf the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. For example:\n\n```\nPUT my-index-000001/_doc/1?version=2&version_type=external\n{\n \"user\": {\n \"id\": \"elkbee\"\n }\n}\n\nIn this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1.\nIf the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code).\n\nA nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used.\nEven the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order.", + "description": "Add a JSON document to the specified data stream or index and make it searchable.\nIf the target is an index and the document already exists, the request updates the document and increments its version.\n\nNOTE: You cannot use this API to send update requests for existing documents in a data stream.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To add or overwrite a document using the `PUT //_doc/<_id>` request format, you must have the `create`, `index`, or `write` index privilege.\n* To add a document using the `POST //_doc/` request format, you must have the `create_doc`, `create`, `index`, or `write` index privilege.\n* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nNOTE: Replica shards might not all be started when an indexing operation returns successfully.\nBy default, only the primary is required. Set `wait_for_active_shards` to change this default behavior.\n\n**Automatically create data streams and indices**\n\nIf the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream.\n\nIf the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.\n\nNOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.\n\nIf no mapping exists, the index operation creates a dynamic mapping.\nBy default, new fields and objects are automatically added to the mapping if needed.\n\nAutomatic index creation is controlled by the `action.auto_create_index` setting.\nIf it is `true`, any index can be created automatically.\nYou can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely.\nSpecify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked.\nWhen a list is specified, the default behaviour is to disallow.\n\nNOTE: The `action.auto_create_index` setting affects the automatic creation of indices only.\nIt does not affect the creation of data streams.\n\n**Optimistic concurrency control**\n\nIndex operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters.\nIf a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`.\n\n**Routing**\n\nBy default, shard placement — or routing — is controlled by using a hash of the document's ID value.\nFor more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter.\n\nWhen setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself.\nThis does come at the (very minimal) cost of an additional document parsing pass.\nIf the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n**Distributed**\n\nThe index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.\nAfter the primary shard completes the operation, if needed, the update is distributed to applicable replicas.\n\n**Active shards**\n\nTo improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation.\nIf the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs.\nBy default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`).\nThis default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`.\nTo alter this behavior per operation, use the `wait_for_active_shards request` parameter.\n\nValid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1).\nSpecifying a negative value or a number greater than the number of shard copies will throw an error.\n\nFor example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes).\nIf you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding.\nThis means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding.\nThis requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard.\nHowever, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index.\nThe operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.\n\nIt is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts.\nAfter the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary.\nThe `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed.\n\n**No operation (noop) updates**\n\nWhen updating a document by using this API, a new version of the document is always created even if the document hasn't changed.\nIf this isn't acceptable use the `_update` API with `detect_noop` set to `true`.\nThe `detect_noop` option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source.\n\nThere isn't a definitive rule for when noop updates aren't acceptable.\nIt's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates.\n\n**Versioning**\n\nEach indexed document is given a version number.\nBy default, internal versioning is used that starts at 1 and increments with each update, deletes included.\nOptionally, the version number can be set to an external value (for example, if maintained in a database).\nTo enable this functionality, `version_type` should be set to `external`.\nThe value provided must be a numeric, long value greater than or equal to 0, and less than around `9.2e+18`.\n\nNOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations.\nIf no version is provided, the operation runs without any version checks.\n\nWhen using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document.\nIf true, the document will be indexed and the new version number used.\nIf the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. For example:\n\n```\nPUT my-index-000001/_doc/1?version=2&version_type=external\n{\n \"user\": {\n \"id\": \"elkbee\"\n }\n}\n\nIn this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1.\nIf the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code).\n\nA nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used.\nEven the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order.", "externalDocs": { "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html" }, @@ -5928,7 +5928,7 @@ "document" ], "summary": "Create or update a document in an index", - "description": "Add a JSON document to the specified data stream or index and make it searchable.\nIf the target is an index and the document already exists, the request updates the document and increments its version.\n\nNOTE: You cannot use this API to send update requests for existing documents in a data stream.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To add or overwrite a document using the `PUT //_doc/<_id>` request format, you must have the `create`, `index`, or `write` index privilege.\n* To add a document using the `POST //_doc/` request format, you must have the `create_doc`, `create`, `index`, or `write` index privilege.\n* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nNOTE: Replica shards might not all be started when an indexing operation returns successfully.\nBy default, only the primary is required. Set `wait_for_active_shards` to change this default behavior.\n\n**Automatically create data streams and indices**\n\nIf the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream.\n\nIf the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.\n\nNOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.\n\nIf no mapping exists, the index operation creates a dynamic mapping.\nBy default, new fields and objects are automatically added to the mapping if needed.\n\nAutomatic index creation is controlled by the `action.auto_create_index` setting.\nIf it is `true`, any index can be created automatically.\nYou can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely.\nSpecify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked.\nWhen a list is specified, the default behaviour is to disallow.\n\nNOTE: The `action.auto_create_index` setting affects the automatic creation of indices only.\nIt does not affect the creation of data streams.\n\n**Optimistic concurrency control**\n\nIndex operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters.\nIf a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`.\n\n**Routing**\n\nBy default, shard placement — or routing — is controlled by using a hash of the document's ID value.\nFor more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter.\n\nWhen setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself.\nThis does come at the (very minimal) cost of an additional document parsing pass.\nIf the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n * ** Distributed**\n\nThe index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.\nAfter the primary shard completes the operation, if needed, the update is distributed to applicable replicas.\n\n**Active shards**\n\nTo improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation.\nIf the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs.\nBy default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`).\nThis default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`.\nTo alter this behavior per operation, use the `wait_for_active_shards request` parameter.\n\nValid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1).\nSpecifying a negative value or a number greater than the number of shard copies will throw an error.\n\nFor example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes).\nIf you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding.\nThis means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding.\nThis requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard.\nHowever, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index.\nThe operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.\n\nIt is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts.\nAfter the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary.\nThe `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed.\n\n**No operation (noop) updates**\n\nWhen updating a document by using this API, a new version of the document is always created even if the document hasn't changed.\nIf this isn't acceptable use the `_update` API with `detect_noop` set to `true`.\nThe `detect_noop` option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source.\n\nThere isn't a definitive rule for when noop updates aren't acceptable.\nIt's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates.\n\n**Versioning**\n\nEach indexed document is given a version number.\nBy default, internal versioning is used that starts at 1 and increments with each update, deletes included.\nOptionally, the version number can be set to an external value (for example, if maintained in a database).\nTo enable this functionality, `version_type` should be set to `external`.\nThe value provided must be a numeric, long value greater than or equal to 0, and less than around `9.2e+18`.\n\nNOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations.\nIf no version is provided, the operation runs without any version checks.\n\nWhen using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document.\nIf true, the document will be indexed and the new version number used.\nIf the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. For example:\n\n```\nPUT my-index-000001/_doc/1?version=2&version_type=external\n{\n \"user\": {\n \"id\": \"elkbee\"\n }\n}\n\nIn this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1.\nIf the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code).\n\nA nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used.\nEven the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order.", + "description": "Add a JSON document to the specified data stream or index and make it searchable.\nIf the target is an index and the document already exists, the request updates the document and increments its version.\n\nNOTE: You cannot use this API to send update requests for existing documents in a data stream.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To add or overwrite a document using the `PUT //_doc/<_id>` request format, you must have the `create`, `index`, or `write` index privilege.\n* To add a document using the `POST //_doc/` request format, you must have the `create_doc`, `create`, `index`, or `write` index privilege.\n* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nNOTE: Replica shards might not all be started when an indexing operation returns successfully.\nBy default, only the primary is required. Set `wait_for_active_shards` to change this default behavior.\n\n**Automatically create data streams and indices**\n\nIf the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream.\n\nIf the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.\n\nNOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.\n\nIf no mapping exists, the index operation creates a dynamic mapping.\nBy default, new fields and objects are automatically added to the mapping if needed.\n\nAutomatic index creation is controlled by the `action.auto_create_index` setting.\nIf it is `true`, any index can be created automatically.\nYou can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely.\nSpecify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked.\nWhen a list is specified, the default behaviour is to disallow.\n\nNOTE: The `action.auto_create_index` setting affects the automatic creation of indices only.\nIt does not affect the creation of data streams.\n\n**Optimistic concurrency control**\n\nIndex operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters.\nIf a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`.\n\n**Routing**\n\nBy default, shard placement — or routing — is controlled by using a hash of the document's ID value.\nFor more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter.\n\nWhen setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself.\nThis does come at the (very minimal) cost of an additional document parsing pass.\nIf the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n**Distributed**\n\nThe index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.\nAfter the primary shard completes the operation, if needed, the update is distributed to applicable replicas.\n\n**Active shards**\n\nTo improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation.\nIf the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs.\nBy default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`).\nThis default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`.\nTo alter this behavior per operation, use the `wait_for_active_shards request` parameter.\n\nValid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1).\nSpecifying a negative value or a number greater than the number of shard copies will throw an error.\n\nFor example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes).\nIf you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding.\nThis means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding.\nThis requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard.\nHowever, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index.\nThe operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.\n\nIt is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts.\nAfter the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary.\nThe `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed.\n\n**No operation (noop) updates**\n\nWhen updating a document by using this API, a new version of the document is always created even if the document hasn't changed.\nIf this isn't acceptable use the `_update` API with `detect_noop` set to `true`.\nThe `detect_noop` option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source.\n\nThere isn't a definitive rule for when noop updates aren't acceptable.\nIt's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates.\n\n**Versioning**\n\nEach indexed document is given a version number.\nBy default, internal versioning is used that starts at 1 and increments with each update, deletes included.\nOptionally, the version number can be set to an external value (for example, if maintained in a database).\nTo enable this functionality, `version_type` should be set to `external`.\nThe value provided must be a numeric, long value greater than or equal to 0, and less than around `9.2e+18`.\n\nNOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations.\nIf no version is provided, the operation runs without any version checks.\n\nWhen using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document.\nIf true, the document will be indexed and the new version number used.\nIf the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. For example:\n\n```\nPUT my-index-000001/_doc/1?version=2&version_type=external\n{\n \"user\": {\n \"id\": \"elkbee\"\n }\n}\n\nIn this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1.\nIf the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code).\n\nA nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used.\nEven the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order.", "externalDocs": { "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html" }, @@ -15657,7 +15657,7 @@ "document" ], "summary": "Reindex documents", - "description": "Copy documents from a source to a destination.\nYou can copy all documents to the destination index or reindex a subset of the documents.\nThe source can be any existing index, alias, or data stream.\nThe destination must differ from the source.\nFor example, you cannot reindex a data stream into itself.\n\nIMPORTANT: Reindex requires `_source` to be enabled for all documents in the source.\nThe destination should be configured as wanted before calling the reindex API.\nReindex does not copy the settings from the source or its associated template.\nMappings, shard counts, and replicas, for example, must be configured ahead of time.\n\nIf the Elasticsearch security features are enabled, you must have the following security privileges:\n\n* The `read` index privilege for the source data stream, index, or alias.\n* The `write` index privilege for the destination data stream, index, or index alias.\n* To automatically create a data stream or index with a reindex API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege for the destination data stream, index, or alias.\n* If reindexing from a remote cluster, the `source.remote.user` must have the `monitor` cluster privilege and the `read` index privilege for the source data stream, index, or alias.\n\nIf reindexing from a remote cluster, you must explicitly allow the remote host in the `reindex.remote.whitelist` setting.\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nThe `dest` element can be configured like the index API to control optimistic concurrency control.\nOmitting `version_type` or setting it to `internal` causes Elasticsearch to blindly dump documents into the destination, overwriting any that happen to have the same ID.\n\nSetting `version_type` to `external` causes Elasticsearch to preserve the `version` from the source, create any documents that are missing, and update any documents that have an older version in the destination than they do in the source.\n\nSetting `op_type` to `create` causes the reindex API to create only missing documents in the destination.\nAll existing documents will cause a version conflict.\n\nIMPORTANT: Because data streams are append-only, any reindex request to a destination data stream must have an `op_type` of `create`.\nA reindex can only add new documents to a destination data stream.\nIt cannot update existing documents in a destination data stream.\n\nBy default, version conflicts abort the reindex process.\nTo continue reindexing if there are conflicts, set the `conflicts` request body property to `proceed`.\nIn this case, the response includes a count of the version conflicts that were encountered.\nNote that the handling of other error types is unaffected by the `conflicts` property.\nAdditionally, if you opt to count version conflicts, the operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query.\n\nNOTE: The reindex API makes no effort to handle ID collisions.\nThe last document written will \"win\" but the order isn't usually predictable so it is not a good idea to rely on this behavior.\nInstead, make sure that IDs are unique by using a script.\n\n**Running reindex asynchronously**\n\nIf the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task.\nElasticsearch creates a record of this task as a document at `_tasks/`.\n\n**Reindex from multiple sources**\n\nIf you have many sources to reindex it is generally better to reindex them one at a time rather than using a glob pattern to pick up multiple sources.\nThat way you can resume the process if there are any errors by removing the partially completed source and starting over.\nIt also makes parallelizing the process fairly simple: split the list of sources to reindex and run each list in parallel.\n\nFor example, you can use a bash script like this:\n\n```\nfor index in i1 i2 i3 i4 i5; do\n curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{\n \"source\": {\n \"index\": \"'$index'\"\n },\n \"dest\": {\n \"index\": \"'$index'-reindexed\"\n }\n }'\ndone\n```\n\n** Throttling**\n\nSet `requests_per_second` to any positive decimal number (`1.4`, `6`, `1000`, for example) to throttle the rate at which reindex issues batches of index operations.\nRequests are throttled by padding each batch with a wait time.\nTo turn off throttling, set `requests_per_second` to `-1`.\n\nThe throttling is done by waiting between batches so that the scroll that reindex uses internally can be given a timeout that takes into account the padding.\nThe padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing.\nBy default the batch size is `1000`, so if `requests_per_second` is set to `500`:\n\n```\ntarget_time = 1000 / 500 per second = 2 seconds\nwait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds\n```\n\nSince the batch is issued as a single bulk request, large batch sizes cause Elasticsearch to create many requests and then wait for a while before starting the next set.\nThis is \"bursty\" instead of \"smooth\".\n\n**Slicing**\n\nReindex supports sliced scroll to parallelize the reindexing process.\nThis parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts.\n\nNOTE: Reindexing from remote clusters does not support manual or automatic slicing.\n\nYou can slice a reindex request manually by providing a slice ID and total number of slices to each request.\nYou can also let reindex automatically parallelize by using sliced scroll to slice on `_id`.\nThe `slices` parameter specifies the number of slices to use.\n\nAdding `slices` to the reindex request just automates the manual process, creating sub-requests which means it has some quirks:\n\n* You can see these requests in the tasks API. These sub-requests are \"child\" tasks of the task for the request with slices.\n* Fetching the status of the task for the request with `slices` only contains the status of completed slices.\n* These sub-requests are individually addressable for things like cancellation and rethrottling.\n* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally.\n* Canceling the request with `slices` will cancel each sub-request.\n* Due to the nature of `slices`, each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution.\n* Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the previous point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being reindexed.\n* Each sub-request gets a slightly different snapshot of the source, though these are all taken at approximately the same time.\n\nIf slicing automatically, setting `slices` to `auto` will choose a reasonable number for most indices.\nIf slicing manually or otherwise tuning automatic slicing, use the following guidelines.\n\nQuery performance is most efficient when the number of slices is equal to the number of shards in the index.\nIf that number is large (for example, `500`), choose a lower number as too many slices will hurt performance.\nSetting slices higher than the number of shards generally does not improve efficiency and adds overhead.\n\nIndexing performance scales linearly across available resources with the number of slices.\n\nWhether query or indexing performance dominates the runtime depends on the documents being reindexed and cluster resources.\n\n**Modify documents during reindexing**\n\nLike `_update_by_query`, reindex operations support a script that modifies the document.\nUnlike `_update_by_query`, the script is allowed to modify the document's metadata.\n\nJust as in `_update_by_query`, you can set `ctx.op` to change the operation that is run on the destination.\nFor example, set `ctx.op` to `noop` if your script decides that the document doesn’t have to be indexed in the destination. This \"no operation\" will be reported in the `noop` counter in the response body.\nSet `ctx.op` to `delete` if your script decides that the document must be deleted from the destination.\nThe deletion will be reported in the `deleted` counter in the response body.\nSetting `ctx.op` to anything else will return an error, as will setting any other field in `ctx`.\n\nThink of the possibilities! Just be careful; you are able to change:\n\n* `_id`\n* `_index`\n* `_version`\n* `_routing`\n\nSetting `_version` to `null` or clearing it from the `ctx` map is just like not sending the version in an indexing request.\nIt will cause the document to be overwritten in the destination regardless of the version on the target or the version type you use in the reindex API.\n\n**Reindex from remote**\n\nReindex supports reindexing from a remote Elasticsearch cluster.\nThe `host` parameter must contain a scheme, host, port, and optional path.\nThe `username` and `password` parameters are optional and when they are present the reindex operation will connect to the remote Elasticsearch node using basic authentication.\nBe sure to use HTTPS when using basic authentication or the password will be sent in plain text.\nThere are a range of settings available to configure the behavior of the HTTPS connection.\n\nWhen using Elastic Cloud, it is also possible to authenticate against the remote cluster through the use of a valid API key.\nRemote hosts must be explicitly allowed with the `reindex.remote.whitelist` setting.\nIt can be set to a comma delimited list of allowed remote host and port combinations.\nScheme is ignored; only the host and port are used.\nFor example:\n\n```\nreindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*\"]\n```\n\nThe list of allowed hosts must be configured on any nodes that will coordinate the reindex.\nThis feature should work with remote clusters of any version of Elasticsearch.\nThis should enable you to upgrade from any version of Elasticsearch to the current version by reindexing from a cluster of the old version.\n\nWARNING: Elasticsearch does not support forward compatibility across major versions.\nFor example, you cannot reindex from a 7.x cluster into a 6.x cluster.\n\nTo enable queries sent to older versions of Elasticsearch, the `query` parameter is sent directly to the remote host without validation or modification.\n\nNOTE: Reindexing from remote clusters does not support manual or automatic slicing.\n\nReindexing from a remote server uses an on-heap buffer that defaults to a maximum size of 100mb.\nIf the remote index includes very large documents you'll need to use a smaller batch size.\nIt is also possible to set the socket read timeout on the remote connection with the `socket_timeout` field and the connection timeout with the `connect_timeout` field.\nBoth default to 30 seconds.\n\n**Configuring SSL parameters**\n\nReindex from remote supports configurable SSL settings.\nThese must be specified in the `elasticsearch.yml` file, with the exception of the secure settings, which you add in the Elasticsearch keystore.\nIt is not possible to configure SSL in the body of the reindex request.", + "description": "Copy documents from a source to a destination.\nYou can copy all documents to the destination index or reindex a subset of the documents.\nThe source can be any existing index, alias, or data stream.\nThe destination must differ from the source.\nFor example, you cannot reindex a data stream into itself.\n\nIMPORTANT: Reindex requires `_source` to be enabled for all documents in the source.\nThe destination should be configured as wanted before calling the reindex API.\nReindex does not copy the settings from the source or its associated template.\nMappings, shard counts, and replicas, for example, must be configured ahead of time.\n\nIf the Elasticsearch security features are enabled, you must have the following security privileges:\n\n* The `read` index privilege for the source data stream, index, or alias.\n* The `write` index privilege for the destination data stream, index, or index alias.\n* To automatically create a data stream or index with a reindex API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege for the destination data stream, index, or alias.\n* If reindexing from a remote cluster, the `source.remote.user` must have the `monitor` cluster privilege and the `read` index privilege for the source data stream, index, or alias.\n\nIf reindexing from a remote cluster, you must explicitly allow the remote host in the `reindex.remote.whitelist` setting.\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nThe `dest` element can be configured like the index API to control optimistic concurrency control.\nOmitting `version_type` or setting it to `internal` causes Elasticsearch to blindly dump documents into the destination, overwriting any that happen to have the same ID.\n\nSetting `version_type` to `external` causes Elasticsearch to preserve the `version` from the source, create any documents that are missing, and update any documents that have an older version in the destination than they do in the source.\n\nSetting `op_type` to `create` causes the reindex API to create only missing documents in the destination.\nAll existing documents will cause a version conflict.\n\nIMPORTANT: Because data streams are append-only, any reindex request to a destination data stream must have an `op_type` of `create`.\nA reindex can only add new documents to a destination data stream.\nIt cannot update existing documents in a destination data stream.\n\nBy default, version conflicts abort the reindex process.\nTo continue reindexing if there are conflicts, set the `conflicts` request body property to `proceed`.\nIn this case, the response includes a count of the version conflicts that were encountered.\nNote that the handling of other error types is unaffected by the `conflicts` property.\nAdditionally, if you opt to count version conflicts, the operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query.\n\nNOTE: The reindex API makes no effort to handle ID collisions.\nThe last document written will \"win\" but the order isn't usually predictable so it is not a good idea to rely on this behavior.\nInstead, make sure that IDs are unique by using a script.\n\n**Running reindex asynchronously**\n\nIf the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task.\nElasticsearch creates a record of this task as a document at `_tasks/`.\n\n**Reindex from multiple sources**\n\nIf you have many sources to reindex it is generally better to reindex them one at a time rather than using a glob pattern to pick up multiple sources.\nThat way you can resume the process if there are any errors by removing the partially completed source and starting over.\nIt also makes parallelizing the process fairly simple: split the list of sources to reindex and run each list in parallel.\n\nFor example, you can use a bash script like this:\n\n```\nfor index in i1 i2 i3 i4 i5; do\n curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{\n \"source\": {\n \"index\": \"'$index'\"\n },\n \"dest\": {\n \"index\": \"'$index'-reindexed\"\n }\n }'\ndone\n```\n\n**Throttling**\n\nSet `requests_per_second` to any positive decimal number (`1.4`, `6`, `1000`, for example) to throttle the rate at which reindex issues batches of index operations.\nRequests are throttled by padding each batch with a wait time.\nTo turn off throttling, set `requests_per_second` to `-1`.\n\nThe throttling is done by waiting between batches so that the scroll that reindex uses internally can be given a timeout that takes into account the padding.\nThe padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing.\nBy default the batch size is `1000`, so if `requests_per_second` is set to `500`:\n\n```\ntarget_time = 1000 / 500 per second = 2 seconds\nwait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds\n```\n\nSince the batch is issued as a single bulk request, large batch sizes cause Elasticsearch to create many requests and then wait for a while before starting the next set.\nThis is \"bursty\" instead of \"smooth\".\n\n**Slicing**\n\nReindex supports sliced scroll to parallelize the reindexing process.\nThis parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts.\n\nNOTE: Reindexing from remote clusters does not support manual or automatic slicing.\n\nYou can slice a reindex request manually by providing a slice ID and total number of slices to each request.\nYou can also let reindex automatically parallelize by using sliced scroll to slice on `_id`.\nThe `slices` parameter specifies the number of slices to use.\n\nAdding `slices` to the reindex request just automates the manual process, creating sub-requests which means it has some quirks:\n\n* You can see these requests in the tasks API. These sub-requests are \"child\" tasks of the task for the request with slices.\n* Fetching the status of the task for the request with `slices` only contains the status of completed slices.\n* These sub-requests are individually addressable for things like cancellation and rethrottling.\n* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally.\n* Canceling the request with `slices` will cancel each sub-request.\n* Due to the nature of `slices`, each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution.\n* Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the previous point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being reindexed.\n* Each sub-request gets a slightly different snapshot of the source, though these are all taken at approximately the same time.\n\nIf slicing automatically, setting `slices` to `auto` will choose a reasonable number for most indices.\nIf slicing manually or otherwise tuning automatic slicing, use the following guidelines.\n\nQuery performance is most efficient when the number of slices is equal to the number of shards in the index.\nIf that number is large (for example, `500`), choose a lower number as too many slices will hurt performance.\nSetting slices higher than the number of shards generally does not improve efficiency and adds overhead.\n\nIndexing performance scales linearly across available resources with the number of slices.\n\nWhether query or indexing performance dominates the runtime depends on the documents being reindexed and cluster resources.\n\n**Modify documents during reindexing**\n\nLike `_update_by_query`, reindex operations support a script that modifies the document.\nUnlike `_update_by_query`, the script is allowed to modify the document's metadata.\n\nJust as in `_update_by_query`, you can set `ctx.op` to change the operation that is run on the destination.\nFor example, set `ctx.op` to `noop` if your script decides that the document doesn’t have to be indexed in the destination. This \"no operation\" will be reported in the `noop` counter in the response body.\nSet `ctx.op` to `delete` if your script decides that the document must be deleted from the destination.\nThe deletion will be reported in the `deleted` counter in the response body.\nSetting `ctx.op` to anything else will return an error, as will setting any other field in `ctx`.\n\nThink of the possibilities! Just be careful; you are able to change:\n\n* `_id`\n* `_index`\n* `_version`\n* `_routing`\n\nSetting `_version` to `null` or clearing it from the `ctx` map is just like not sending the version in an indexing request.\nIt will cause the document to be overwritten in the destination regardless of the version on the target or the version type you use in the reindex API.\n\n**Reindex from remote**\n\nReindex supports reindexing from a remote Elasticsearch cluster.\nThe `host` parameter must contain a scheme, host, port, and optional path.\nThe `username` and `password` parameters are optional and when they are present the reindex operation will connect to the remote Elasticsearch node using basic authentication.\nBe sure to use HTTPS when using basic authentication or the password will be sent in plain text.\nThere are a range of settings available to configure the behavior of the HTTPS connection.\n\nWhen using Elastic Cloud, it is also possible to authenticate against the remote cluster through the use of a valid API key.\nRemote hosts must be explicitly allowed with the `reindex.remote.whitelist` setting.\nIt can be set to a comma delimited list of allowed remote host and port combinations.\nScheme is ignored; only the host and port are used.\nFor example:\n\n```\nreindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*\"]\n```\n\nThe list of allowed hosts must be configured on any nodes that will coordinate the reindex.\nThis feature should work with remote clusters of any version of Elasticsearch.\nThis should enable you to upgrade from any version of Elasticsearch to the current version by reindexing from a cluster of the old version.\n\nWARNING: Elasticsearch does not support forward compatibility across major versions.\nFor example, you cannot reindex from a 7.x cluster into a 6.x cluster.\n\nTo enable queries sent to older versions of Elasticsearch, the `query` parameter is sent directly to the remote host without validation or modification.\n\nNOTE: Reindexing from remote clusters does not support manual or automatic slicing.\n\nReindexing from a remote server uses an on-heap buffer that defaults to a maximum size of 100mb.\nIf the remote index includes very large documents you'll need to use a smaller batch size.\nIt is also possible to set the socket read timeout on the remote connection with the `socket_timeout` field and the connection timeout with the `connect_timeout` field.\nBoth default to 30 seconds.\n\n**Configuring SSL parameters**\n\nReindex from remote supports configurable SSL settings.\nThese must be specified in the `elasticsearch.yml` file, with the exception of the secure settings, which you add in the Elasticsearch keystore.\nIt is not possible to configure SSL in the body of the reindex request.", "operationId": "reindex", "parameters": [ { @@ -27958,6 +27958,9 @@ "similarity": { "description": "The minimum similarity for a vector to be considered a match", "type": "number" + }, + "rescore_vector": { + "$ref": "#/components/schemas/_types:RescoreVector" } }, "required": [ @@ -27997,6 +28000,18 @@ "model_text" ] }, + "_types:RescoreVector": { + "type": "object", + "properties": { + "oversample": { + "description": "Applies the specified oversample factor to k on the approximate kNN search", + "type": "number" + } + }, + "required": [ + "oversample" + ] + }, "_types.query_dsl:MatchQuery": { "allOf": [ { @@ -32918,6 +32933,9 @@ }, "inner_hits": { "$ref": "#/components/schemas/_global.search._types:InnerHits" + }, + "rescore_vector": { + "$ref": "#/components/schemas/_types:RescoreVector" } }, "required": [ @@ -54405,6 +54423,9 @@ "similarity": { "description": "The minimum similarity required for a document to be considered a match.", "type": "number" + }, + "rescore_vector": { + "$ref": "#/components/schemas/_types:RescoreVector" } }, "required": [ @@ -58325,7 +58346,7 @@ "cat.aliases#expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "Whether to expand wildcard expression to concrete indices that are open, closed or both.", + "description": "The type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nIt supports comma-separated values, such as `open,hidden`.", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types:ExpandWildcards" @@ -58345,7 +58366,7 @@ "cat.aliases#master_timeout": { "in": "query", "name": "master_timeout", - "description": "Period to wait for a connection to the master node.", + "description": "The period to wait for a connection to the master node.\nIf the master node is not available before the timeout expires, the request fails and returns an error.\nTo indicated that the request should never timeout, you can set it to `-1`.", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types:Duration" @@ -58355,7 +58376,7 @@ "cat.component_templates#name": { "in": "path", "name": "name", - "description": "The name of the component template. Accepts wildcard expressions. If omitted, all component templates are returned.", + "description": "The name of the component template.\nIt accepts wildcard expressions.\nIf it is omitted, all component templates are returned.", "required": true, "deprecated": false, "schema": { @@ -58376,7 +58397,7 @@ "cat.component_templates#master_timeout": { "in": "query", "name": "master_timeout", - "description": "Period to wait for a connection to the master node.", + "description": "The period to wait for a connection to the master node.", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types:Duration" @@ -58386,7 +58407,7 @@ "cat.count#index": { "in": "path", "name": "index", - "description": "Comma-separated list of data streams, indices, and aliases used to limit the request.\nSupports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`.", + "description": "A comma-separated list of data streams, indices, and aliases used to limit the request.\nIt supports wildcards (`*`).\nTo target all data streams and indices, omit this parameter or use `*` or `_all`.", "required": true, "deprecated": false, "schema": { diff --git a/output/schema/schema.json b/output/schema/schema.json index e0f97268c9..f3e272f8b6 100644 --- a/output/schema/schema.json +++ b/output/schema/schema.json @@ -538,7 +538,7 @@ "stability": "stable" } }, - "description": "Get aliases.\nRetrieves the cluster’s index aliases, including filter and routing information.\nThe API does not return data stream aliases.\n\nCAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API.", + "description": "Get aliases.\n\nGet the cluster's index aliases, including filter and routing information.\nThis API does not return data stream aliases.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API.", "docId": "cat-alias", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-alias.html", "name": "cat.aliases", @@ -585,7 +585,7 @@ "stability": "stable" } }, - "description": "Provides a snapshot of the number of shards allocated to each data node and their disk space.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications.", + "description": "Get shard allocation information.\n\nGet a snapshot of the number of shards allocated to each data node and their disk space.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications.", "docId": "cat-allocation", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-allocation.html", "name": "cat.allocation", @@ -633,8 +633,9 @@ "stability": "stable" } }, - "description": "Get component templates.\nReturns information about component templates in a cluster.\nComponent templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.\n\nCAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the get component template API.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-component-templates.html", + "description": "Get component templates.\n\nGet information about component templates in a cluster.\nComponent templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the get component template API.", + "docId": "cat-component-templates", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-component-templates.html", "name": "cat.component_templates", "privileges": { "cluster": [ @@ -679,7 +680,7 @@ "stability": "stable" } }, - "description": "Get a document count.\nProvides quick access to a document count for a data stream, an index, or an entire cluster.\nThe document count only includes live documents, not deleted documents which have not yet been removed by the merge process.\n\nCAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the count API.", + "description": "Get a document count.\n\nGet quick access to a document count for a data stream, an index, or an entire cluster.\nThe document count only includes live documents, not deleted documents which have not yet been removed by the merge process.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the count API.", "docId": "cat-count", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-count.html", "name": "cat.count", @@ -726,7 +727,7 @@ "stability": "stable" } }, - "description": "Returns the amount of heap memory currently used by the field data cache on every data node in the cluster.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the nodes stats API.", + "description": "Get field data cache information.\n\nGet the amount of heap memory currently used by the field data cache on every data node in the cluster.\n\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the nodes stats API.", "docId": "cat-fielddata", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-fielddata.html", "name": "cat.fielddata", @@ -773,7 +774,7 @@ "stability": "stable" } }, - "description": "Returns the health status of a cluster, similar to the cluster health API.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the cluster health API.\nThis API is often used to check malfunctioning clusters.\nTo help you track cluster health alongside log files and alerting systems, the API returns timestamps in two formats:\n`HH:MM:SS`, which is human-readable but includes no date information;\n`Unix epoch time`, which is machine-sortable and includes date information.\nThe latter format is useful for cluster recoveries that take multiple days.\nYou can use the cat health API to verify cluster health across multiple nodes.\nYou also can use the API to track the recovery of a large cluster over a longer period of time.", + "description": "Get the cluster health status.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the cluster health API.\nThis API is often used to check malfunctioning clusters.\nTo help you track cluster health alongside log files and alerting systems, the API returns timestamps in two formats:\n`HH:MM:SS`, which is human-readable but includes no date information;\n`Unix epoch time`, which is machine-sortable and includes date information.\nThe latter format is useful for cluster recoveries that take multiple days.\nYou can use the cat health API to verify cluster health across multiple nodes.\nYou also can use the API to track the recovery of a large cluster over a longer period of time.", "docId": "cat-health", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-health.html", "name": "cat.health", @@ -814,7 +815,7 @@ "stability": "stable" } }, - "description": "Get CAT help.\nReturns help for the CAT APIs.", + "description": "Get CAT help.\n\nGet help for the CAT APIs.", "docId": "cat", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat.html", "name": "cat.help", @@ -849,7 +850,7 @@ "stability": "stable" } }, - "description": "Get index information.\nReturns high-level information about indices in a cluster, including backing indices for data streams.\n\nUse this request to get the following information for each index in a cluster:\n- shard count\n- document count\n- deleted document count\n- primary store size\n- total store size of all shards, including shard replicas\n\nThese metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents.\nTo get an accurate count of Elasticsearch documents, use the cat count or count APIs.\n\nCAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use an index endpoint.", + "description": "Get index information.\n\nGet high-level information about indices in a cluster, including backing indices for data streams.\n\nUse this request to get the following information for each index in a cluster:\n- shard count\n- document count\n- deleted document count\n- primary store size\n- total store size of all shards, including shard replicas\n\nThese metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents.\nTo get an accurate count of Elasticsearch documents, use the cat count or count APIs.\n\nCAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use an index endpoint.", "docId": "cat-indices", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-indices.html", "name": "cat.indices", @@ -899,7 +900,7 @@ "stability": "stable" } }, - "description": "Returns information about the master node, including the ID, bound IP address, and name.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.", + "description": "Get master node information.\n\nGet information about the master node, including the ID, bound IP address, and name.\n\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.", "docId": "cat-master", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-master.html", "name": "cat.master", @@ -941,7 +942,7 @@ "stability": "stable" } }, - "description": "Get data frame analytics jobs.\nReturns configuration and usage information about data frame analytics jobs.\n\nCAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get data frame analytics jobs statistics API.", + "description": "Get data frame analytics jobs.\n\nGet configuration and usage information about data frame analytics jobs.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get data frame analytics jobs statistics API.", "docId": "cat-dfanalytics", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-dfanalytics.html", "name": "cat.ml_data_frame_analytics", @@ -989,7 +990,7 @@ "stability": "stable" } }, - "description": "Get datafeeds.\nReturns configuration and usage information about datafeeds.\nThis API returns a maximum of 10,000 datafeeds.\nIf the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage`\ncluster privileges to use this API.\n\nCAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get datafeed statistics API.", + "description": "Get datafeeds.\n\nGet configuration and usage information about datafeeds.\nThis API returns a maximum of 10,000 datafeeds.\nIf the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage`\ncluster privileges to use this API.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get datafeed statistics API.", "docId": "cat-datafeeds", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-datafeeds.html", "name": "cat.ml_datafeeds", @@ -1037,7 +1038,7 @@ "stability": "stable" } }, - "description": "Get anomaly detection jobs.\nReturns configuration and usage information for anomaly detection jobs.\nThis API returns a maximum of 10,000 jobs.\nIf the Elasticsearch security features are enabled, you must have `monitor_ml`,\n`monitor`, `manage_ml`, or `manage` cluster privileges to use this API.\n\nCAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get anomaly detection job statistics API.", + "description": "Get anomaly detection jobs.\n\nGet configuration and usage information for anomaly detection jobs.\nThis API returns a maximum of 10,000 jobs.\nIf the Elasticsearch security features are enabled, you must have `monitor_ml`,\n`monitor`, `manage_ml`, or `manage` cluster privileges to use this API.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get anomaly detection job statistics API.", "docId": "cat-anomaly-detectors", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-anomaly-detectors.html", "name": "cat.ml_jobs", @@ -1085,7 +1086,7 @@ "stability": "stable" } }, - "description": "Get trained models.\nReturns configuration and usage information about inference trained models.\n\nCAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get trained models statistics API.", + "description": "Get trained models.\n\nGet configuration and usage information about inference trained models.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get trained models statistics API.", "docId": "cat-trained-model", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-trained-model.html", "name": "cat.ml_trained_models", @@ -1132,7 +1133,7 @@ "stability": "stable" } }, - "description": "Returns information about custom node attributes.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.", + "description": "Get node attribute information.\n\nGet information about custom node attributes.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.", "docId": "cat-nodeattrs", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-nodeattrs.html", "name": "cat.nodeattrs", @@ -1173,7 +1174,7 @@ "stability": "stable" } }, - "description": "Returns information about the nodes in a cluster.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.", + "description": "Get node information.\n\nGet information about the nodes in a cluster.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.", "docId": "cat-nodes", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-nodes.html", "name": "cat.nodes", @@ -1214,7 +1215,7 @@ "stability": "stable" } }, - "description": "Returns cluster-level changes that have not yet been executed.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the pending cluster tasks API.", + "description": "Get pending task information.\n\nGet information about cluster-level changes that have not yet taken effect.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the pending cluster tasks API.", "docId": "cat-pending-tasks", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-pending-tasks.html", "name": "cat.pending_tasks", @@ -1255,7 +1256,7 @@ "stability": "stable" } }, - "description": "Returns a list of plugins running on each node of a cluster.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.", + "description": "Get plugin information.\n\nGet a list of plugins running on each node of a cluster.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.", "docId": "cat-plugins", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-plugins.html", "name": "cat.plugins", @@ -1296,7 +1297,7 @@ "stability": "stable" } }, - "description": "Returns information about ongoing and completed shard recoveries.\nShard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or syncing a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing.\nFor data streams, the API returns information about the stream’s backing indices.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index recovery API.", + "description": "Get shard recovery information.\n\nGet information about ongoing and completed shard recoveries.\nShard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or syncing a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing.\nFor data streams, the API returns information about the stream’s backing indices.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index recovery API.", "docId": "cat-recovery", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-recovery.html", "name": "cat.recovery", @@ -1347,7 +1348,7 @@ "stability": "stable" } }, - "description": "Returns the snapshot repositories for a cluster.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot repository API.", + "description": "Get snapshot repository information.\n\nGet a list of snapshot repositories for a cluster.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot repository API.", "docId": "cat-repositories", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-repositories.html", "name": "cat.repositories", @@ -1388,7 +1389,7 @@ "stability": "stable" } }, - "description": "Returns low-level information about the Lucene segments in index shards.\nFor data streams, the API returns information about the backing indices.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index segments API.", + "description": "Get segment information.\n\nGet low-level information about the Lucene segments in index shards.\nFor data streams, the API returns information about the backing indices.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index segments API.", "docId": "cat-segments", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-segments.html", "name": "cat.segments", @@ -1438,7 +1439,7 @@ "stability": "stable" } }, - "description": "Returns information about the shards in a cluster.\nFor data streams, the API returns information about the backing indices.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications.", + "description": "Get shard information.\n\nGet information about the shards in a cluster.\nFor data streams, the API returns information about the backing indices.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications.", "docId": "cat-shards", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-shards.html", "name": "cat.shards", @@ -1489,7 +1490,7 @@ "stability": "stable" } }, - "description": "Returns information about the snapshots stored in one or more repositories.\nA snapshot is a backup of an index or running Elasticsearch cluster.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot API.", + "description": "Get snapshot information.\n\nGet information about the snapshots stored in one or more repositories.\nA snapshot is a backup of an index or running Elasticsearch cluster.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot API.", "docId": "cat-snapshots", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-snapshots.html", "name": "cat.snapshots", @@ -1537,7 +1538,7 @@ "stability": "experimental" } }, - "description": "Returns information about tasks currently executing in the cluster.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the task management API.", + "description": "Get task information.\n\nGet information about tasks currently running in the cluster.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the task management API.", "docId": "tasks", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/tasks.html", "name": "cat.tasks", @@ -1579,7 +1580,7 @@ "stability": "stable" } }, - "description": "Returns information about index templates in a cluster.\nYou can use index templates to apply index settings and field mappings to new indices at creation.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get index template API.", + "description": "Get index template information.\n\nGet information about the index templates in a cluster.\nYou can use index templates to apply index settings and field mappings to new indices at creation.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get index template API.", "docId": "cat-templates", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-templates.html", "name": "cat.templates", @@ -1626,7 +1627,7 @@ "stability": "stable" } }, - "description": "Returns thread pool statistics for each node in a cluster.\nReturned information includes all built-in thread pools and custom thread pools.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.", + "description": "Get thread pool statistics.\n\nGet thread pool statistics for each node in a cluster.\nReturned information includes all built-in thread pools and custom thread pools.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.", "docId": "cat-thread-pool", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-thread-pool.html", "name": "cat.thread_pool", @@ -1674,7 +1675,7 @@ "stability": "stable" } }, - "description": "Get transforms.\nReturns configuration and usage information about transforms.\n\nCAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get transform statistics API.", + "description": "Get transform information.\n\nGet configuration and usage information about transforms.\n\nCAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get transform statistics API.", "docId": "cat-transforms", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-transforms.html", "name": "cat.transforms", @@ -4206,7 +4207,7 @@ "stability": "stable" } }, - "description": "Create a new document in the index.\n\nYou can index a new JSON document with the `//_doc/` or `//_create/<_id>` APIs\nUsing `_create` guarantees that the document is indexed only if it does not already exist.\nIt returns a 409 response when a document with a same ID already exists in the index.\nTo update an existing document, you must use the `//_doc/` API.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To add a document using the `PUT //_create/<_id>` or `POST //_create/<_id>` request formats, you must have the `create_doc`, `create`, `index`, or `write` index privilege.\n* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\n**Automatically create data streams and indices**\n\nIf the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream.\n\nIf the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.\n\nNOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.\n\nIf no mapping exists, the index operation creates a dynamic mapping.\nBy default, new fields and objects are automatically added to the mapping if needed.\n\nAutomatic index creation is controlled by the `action.auto_create_index` setting.\nIf it is `true`, any index can be created automatically.\nYou can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely.\nSpecify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked.\nWhen a list is specified, the default behaviour is to disallow.\n\nNOTE: The `action.auto_create_index` setting affects the automatic creation of indices only.\nIt does not affect the creation of data streams.\n\n**Routing**\n\nBy default, shard placement — or routing — is controlled by using a hash of the document's ID value.\nFor more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter.\n\nWhen setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself.\nThis does come at the (very minimal) cost of an additional document parsing pass.\nIf the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n** Distributed**\n\nThe index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.\nAfter the primary shard completes the operation, if needed, the update is distributed to applicable replicas.\n\n**Active shards**\n\nTo improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation.\nIf the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs.\nBy default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`).\nThis default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`.\nTo alter this behavior per operation, use the `wait_for_active_shards request` parameter.\n\nValid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1).\nSpecifying a negative value or a number greater than the number of shard copies will throw an error.\n\nFor example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes).\nIf you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding.\nThis means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding.\nThis requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard.\nHowever, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index.\nThe operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.\n\nIt is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts.\nAfter the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary.\nThe `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed.", + "description": "Create a new document in the index.\n\nYou can index a new JSON document with the `//_doc/` or `//_create/<_id>` APIs\nUsing `_create` guarantees that the document is indexed only if it does not already exist.\nIt returns a 409 response when a document with a same ID already exists in the index.\nTo update an existing document, you must use the `//_doc/` API.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To add a document using the `PUT //_create/<_id>` or `POST //_create/<_id>` request formats, you must have the `create_doc`, `create`, `index`, or `write` index privilege.\n* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\n**Automatically create data streams and indices**\n\nIf the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream.\n\nIf the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.\n\nNOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.\n\nIf no mapping exists, the index operation creates a dynamic mapping.\nBy default, new fields and objects are automatically added to the mapping if needed.\n\nAutomatic index creation is controlled by the `action.auto_create_index` setting.\nIf it is `true`, any index can be created automatically.\nYou can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely.\nSpecify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked.\nWhen a list is specified, the default behaviour is to disallow.\n\nNOTE: The `action.auto_create_index` setting affects the automatic creation of indices only.\nIt does not affect the creation of data streams.\n\n**Routing**\n\nBy default, shard placement — or routing — is controlled by using a hash of the document's ID value.\nFor more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter.\n\nWhen setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself.\nThis does come at the (very minimal) cost of an additional document parsing pass.\nIf the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n**Distributed**\n\nThe index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.\nAfter the primary shard completes the operation, if needed, the update is distributed to applicable replicas.\n\n**Active shards**\n\nTo improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation.\nIf the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs.\nBy default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`).\nThis default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`.\nTo alter this behavior per operation, use the `wait_for_active_shards request` parameter.\n\nValid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1).\nSpecifying a negative value or a number greater than the number of shard copies will throw an error.\n\nFor example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes).\nIf you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding.\nThis means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding.\nThis requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard.\nHowever, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index.\nThe operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.\n\nIt is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts.\nAfter the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary.\nThe `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed.", "docId": "docs-index", "docTag": "document", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/docs-index_.html", @@ -6128,7 +6129,7 @@ "stability": "stable" } }, - "description": "Create or update a document in an index.\n\nAdd a JSON document to the specified data stream or index and make it searchable.\nIf the target is an index and the document already exists, the request updates the document and increments its version.\n\nNOTE: You cannot use this API to send update requests for existing documents in a data stream.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To add or overwrite a document using the `PUT //_doc/<_id>` request format, you must have the `create`, `index`, or `write` index privilege.\n* To add a document using the `POST //_doc/` request format, you must have the `create_doc`, `create`, `index`, or `write` index privilege.\n* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nNOTE: Replica shards might not all be started when an indexing operation returns successfully.\nBy default, only the primary is required. Set `wait_for_active_shards` to change this default behavior.\n\n**Automatically create data streams and indices**\n\nIf the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream.\n\nIf the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.\n\nNOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.\n\nIf no mapping exists, the index operation creates a dynamic mapping.\nBy default, new fields and objects are automatically added to the mapping if needed.\n\nAutomatic index creation is controlled by the `action.auto_create_index` setting.\nIf it is `true`, any index can be created automatically.\nYou can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely.\nSpecify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked.\nWhen a list is specified, the default behaviour is to disallow.\n\nNOTE: The `action.auto_create_index` setting affects the automatic creation of indices only.\nIt does not affect the creation of data streams.\n\n**Optimistic concurrency control**\n\nIndex operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters.\nIf a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`.\n\n**Routing**\n\nBy default, shard placement — or routing — is controlled by using a hash of the document's ID value.\nFor more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter.\n\nWhen setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself.\nThis does come at the (very minimal) cost of an additional document parsing pass.\nIf the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n * ** Distributed**\n\nThe index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.\nAfter the primary shard completes the operation, if needed, the update is distributed to applicable replicas.\n\n**Active shards**\n\nTo improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation.\nIf the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs.\nBy default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`).\nThis default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`.\nTo alter this behavior per operation, use the `wait_for_active_shards request` parameter.\n\nValid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1).\nSpecifying a negative value or a number greater than the number of shard copies will throw an error.\n\nFor example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes).\nIf you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding.\nThis means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding.\nThis requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard.\nHowever, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index.\nThe operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.\n\nIt is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts.\nAfter the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary.\nThe `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed.\n\n**No operation (noop) updates**\n\nWhen updating a document by using this API, a new version of the document is always created even if the document hasn't changed.\nIf this isn't acceptable use the `_update` API with `detect_noop` set to `true`.\nThe `detect_noop` option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source.\n\nThere isn't a definitive rule for when noop updates aren't acceptable.\nIt's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates.\n\n**Versioning**\n\nEach indexed document is given a version number.\nBy default, internal versioning is used that starts at 1 and increments with each update, deletes included.\nOptionally, the version number can be set to an external value (for example, if maintained in a database).\nTo enable this functionality, `version_type` should be set to `external`.\nThe value provided must be a numeric, long value greater than or equal to 0, and less than around `9.2e+18`.\n\nNOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations.\nIf no version is provided, the operation runs without any version checks.\n\nWhen using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document.\nIf true, the document will be indexed and the new version number used.\nIf the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. For example:\n\n```\nPUT my-index-000001/_doc/1?version=2&version_type=external\n{\n \"user\": {\n \"id\": \"elkbee\"\n }\n}\n\nIn this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1.\nIf the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code).\n\nA nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used.\nEven the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order.", + "description": "Create or update a document in an index.\n\nAdd a JSON document to the specified data stream or index and make it searchable.\nIf the target is an index and the document already exists, the request updates the document and increments its version.\n\nNOTE: You cannot use this API to send update requests for existing documents in a data stream.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To add or overwrite a document using the `PUT //_doc/<_id>` request format, you must have the `create`, `index`, or `write` index privilege.\n* To add a document using the `POST //_doc/` request format, you must have the `create_doc`, `create`, `index`, or `write` index privilege.\n* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nNOTE: Replica shards might not all be started when an indexing operation returns successfully.\nBy default, only the primary is required. Set `wait_for_active_shards` to change this default behavior.\n\n**Automatically create data streams and indices**\n\nIf the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream.\n\nIf the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.\n\nNOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.\n\nIf no mapping exists, the index operation creates a dynamic mapping.\nBy default, new fields and objects are automatically added to the mapping if needed.\n\nAutomatic index creation is controlled by the `action.auto_create_index` setting.\nIf it is `true`, any index can be created automatically.\nYou can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely.\nSpecify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked.\nWhen a list is specified, the default behaviour is to disallow.\n\nNOTE: The `action.auto_create_index` setting affects the automatic creation of indices only.\nIt does not affect the creation of data streams.\n\n**Optimistic concurrency control**\n\nIndex operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters.\nIf a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`.\n\n**Routing**\n\nBy default, shard placement — or routing — is controlled by using a hash of the document's ID value.\nFor more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter.\n\nWhen setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself.\nThis does come at the (very minimal) cost of an additional document parsing pass.\nIf the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n**Distributed**\n\nThe index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.\nAfter the primary shard completes the operation, if needed, the update is distributed to applicable replicas.\n\n**Active shards**\n\nTo improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation.\nIf the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs.\nBy default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`).\nThis default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`.\nTo alter this behavior per operation, use the `wait_for_active_shards request` parameter.\n\nValid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1).\nSpecifying a negative value or a number greater than the number of shard copies will throw an error.\n\nFor example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes).\nIf you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding.\nThis means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding.\nThis requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard.\nHowever, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index.\nThe operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.\n\nIt is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts.\nAfter the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary.\nThe `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed.\n\n**No operation (noop) updates**\n\nWhen updating a document by using this API, a new version of the document is always created even if the document hasn't changed.\nIf this isn't acceptable use the `_update` API with `detect_noop` set to `true`.\nThe `detect_noop` option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source.\n\nThere isn't a definitive rule for when noop updates aren't acceptable.\nIt's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates.\n\n**Versioning**\n\nEach indexed document is given a version number.\nBy default, internal versioning is used that starts at 1 and increments with each update, deletes included.\nOptionally, the version number can be set to an external value (for example, if maintained in a database).\nTo enable this functionality, `version_type` should be set to `external`.\nThe value provided must be a numeric, long value greater than or equal to 0, and less than around `9.2e+18`.\n\nNOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations.\nIf no version is provided, the operation runs without any version checks.\n\nWhen using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document.\nIf true, the document will be indexed and the new version number used.\nIf the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. For example:\n\n```\nPUT my-index-000001/_doc/1?version=2&version_type=external\n{\n \"user\": {\n \"id\": \"elkbee\"\n }\n}\n\nIn this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1.\nIf the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code).\n\nA nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used.\nEven the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order.", "docId": "docs-index", "docTag": "document", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/docs-index_.html", @@ -14845,7 +14846,7 @@ "stability": "stable" } }, - "description": "Reindex documents.\n\nCopy documents from a source to a destination.\nYou can copy all documents to the destination index or reindex a subset of the documents.\nThe source can be any existing index, alias, or data stream.\nThe destination must differ from the source.\nFor example, you cannot reindex a data stream into itself.\n\nIMPORTANT: Reindex requires `_source` to be enabled for all documents in the source.\nThe destination should be configured as wanted before calling the reindex API.\nReindex does not copy the settings from the source or its associated template.\nMappings, shard counts, and replicas, for example, must be configured ahead of time.\n\nIf the Elasticsearch security features are enabled, you must have the following security privileges:\n\n* The `read` index privilege for the source data stream, index, or alias.\n* The `write` index privilege for the destination data stream, index, or index alias.\n* To automatically create a data stream or index with a reindex API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege for the destination data stream, index, or alias.\n* If reindexing from a remote cluster, the `source.remote.user` must have the `monitor` cluster privilege and the `read` index privilege for the source data stream, index, or alias.\n\nIf reindexing from a remote cluster, you must explicitly allow the remote host in the `reindex.remote.whitelist` setting.\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nThe `dest` element can be configured like the index API to control optimistic concurrency control.\nOmitting `version_type` or setting it to `internal` causes Elasticsearch to blindly dump documents into the destination, overwriting any that happen to have the same ID.\n\nSetting `version_type` to `external` causes Elasticsearch to preserve the `version` from the source, create any documents that are missing, and update any documents that have an older version in the destination than they do in the source.\n\nSetting `op_type` to `create` causes the reindex API to create only missing documents in the destination.\nAll existing documents will cause a version conflict.\n\nIMPORTANT: Because data streams are append-only, any reindex request to a destination data stream must have an `op_type` of `create`.\nA reindex can only add new documents to a destination data stream.\nIt cannot update existing documents in a destination data stream.\n\nBy default, version conflicts abort the reindex process.\nTo continue reindexing if there are conflicts, set the `conflicts` request body property to `proceed`.\nIn this case, the response includes a count of the version conflicts that were encountered.\nNote that the handling of other error types is unaffected by the `conflicts` property.\nAdditionally, if you opt to count version conflicts, the operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query.\n\nNOTE: The reindex API makes no effort to handle ID collisions.\nThe last document written will \"win\" but the order isn't usually predictable so it is not a good idea to rely on this behavior.\nInstead, make sure that IDs are unique by using a script.\n\n**Running reindex asynchronously**\n\nIf the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task.\nElasticsearch creates a record of this task as a document at `_tasks/`.\n\n**Reindex from multiple sources**\n\nIf you have many sources to reindex it is generally better to reindex them one at a time rather than using a glob pattern to pick up multiple sources.\nThat way you can resume the process if there are any errors by removing the partially completed source and starting over.\nIt also makes parallelizing the process fairly simple: split the list of sources to reindex and run each list in parallel.\n\nFor example, you can use a bash script like this:\n\n```\nfor index in i1 i2 i3 i4 i5; do\n curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{\n \"source\": {\n \"index\": \"'$index'\"\n },\n \"dest\": {\n \"index\": \"'$index'-reindexed\"\n }\n }'\ndone\n```\n\n** Throttling**\n\nSet `requests_per_second` to any positive decimal number (`1.4`, `6`, `1000`, for example) to throttle the rate at which reindex issues batches of index operations.\nRequests are throttled by padding each batch with a wait time.\nTo turn off throttling, set `requests_per_second` to `-1`.\n\nThe throttling is done by waiting between batches so that the scroll that reindex uses internally can be given a timeout that takes into account the padding.\nThe padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing.\nBy default the batch size is `1000`, so if `requests_per_second` is set to `500`:\n\n```\ntarget_time = 1000 / 500 per second = 2 seconds\nwait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds\n```\n\nSince the batch is issued as a single bulk request, large batch sizes cause Elasticsearch to create many requests and then wait for a while before starting the next set.\nThis is \"bursty\" instead of \"smooth\".\n\n**Slicing**\n\nReindex supports sliced scroll to parallelize the reindexing process.\nThis parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts.\n\nNOTE: Reindexing from remote clusters does not support manual or automatic slicing.\n\nYou can slice a reindex request manually by providing a slice ID and total number of slices to each request.\nYou can also let reindex automatically parallelize by using sliced scroll to slice on `_id`.\nThe `slices` parameter specifies the number of slices to use.\n\nAdding `slices` to the reindex request just automates the manual process, creating sub-requests which means it has some quirks:\n\n* You can see these requests in the tasks API. These sub-requests are \"child\" tasks of the task for the request with slices.\n* Fetching the status of the task for the request with `slices` only contains the status of completed slices.\n* These sub-requests are individually addressable for things like cancellation and rethrottling.\n* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally.\n* Canceling the request with `slices` will cancel each sub-request.\n* Due to the nature of `slices`, each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution.\n* Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the previous point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being reindexed.\n* Each sub-request gets a slightly different snapshot of the source, though these are all taken at approximately the same time.\n\nIf slicing automatically, setting `slices` to `auto` will choose a reasonable number for most indices.\nIf slicing manually or otherwise tuning automatic slicing, use the following guidelines.\n\nQuery performance is most efficient when the number of slices is equal to the number of shards in the index.\nIf that number is large (for example, `500`), choose a lower number as too many slices will hurt performance.\nSetting slices higher than the number of shards generally does not improve efficiency and adds overhead.\n\nIndexing performance scales linearly across available resources with the number of slices.\n\nWhether query or indexing performance dominates the runtime depends on the documents being reindexed and cluster resources.\n\n**Modify documents during reindexing**\n\nLike `_update_by_query`, reindex operations support a script that modifies the document.\nUnlike `_update_by_query`, the script is allowed to modify the document's metadata.\n\nJust as in `_update_by_query`, you can set `ctx.op` to change the operation that is run on the destination.\nFor example, set `ctx.op` to `noop` if your script decides that the document doesn’t have to be indexed in the destination. This \"no operation\" will be reported in the `noop` counter in the response body.\nSet `ctx.op` to `delete` if your script decides that the document must be deleted from the destination.\nThe deletion will be reported in the `deleted` counter in the response body.\nSetting `ctx.op` to anything else will return an error, as will setting any other field in `ctx`.\n\nThink of the possibilities! Just be careful; you are able to change:\n\n* `_id`\n* `_index`\n* `_version`\n* `_routing`\n\nSetting `_version` to `null` or clearing it from the `ctx` map is just like not sending the version in an indexing request.\nIt will cause the document to be overwritten in the destination regardless of the version on the target or the version type you use in the reindex API.\n\n**Reindex from remote**\n\nReindex supports reindexing from a remote Elasticsearch cluster.\nThe `host` parameter must contain a scheme, host, port, and optional path.\nThe `username` and `password` parameters are optional and when they are present the reindex operation will connect to the remote Elasticsearch node using basic authentication.\nBe sure to use HTTPS when using basic authentication or the password will be sent in plain text.\nThere are a range of settings available to configure the behavior of the HTTPS connection.\n\nWhen using Elastic Cloud, it is also possible to authenticate against the remote cluster through the use of a valid API key.\nRemote hosts must be explicitly allowed with the `reindex.remote.whitelist` setting.\nIt can be set to a comma delimited list of allowed remote host and port combinations.\nScheme is ignored; only the host and port are used.\nFor example:\n\n```\nreindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*\"]\n```\n\nThe list of allowed hosts must be configured on any nodes that will coordinate the reindex.\nThis feature should work with remote clusters of any version of Elasticsearch.\nThis should enable you to upgrade from any version of Elasticsearch to the current version by reindexing from a cluster of the old version.\n\nWARNING: Elasticsearch does not support forward compatibility across major versions.\nFor example, you cannot reindex from a 7.x cluster into a 6.x cluster.\n\nTo enable queries sent to older versions of Elasticsearch, the `query` parameter is sent directly to the remote host without validation or modification.\n\nNOTE: Reindexing from remote clusters does not support manual or automatic slicing.\n\nReindexing from a remote server uses an on-heap buffer that defaults to a maximum size of 100mb.\nIf the remote index includes very large documents you'll need to use a smaller batch size.\nIt is also possible to set the socket read timeout on the remote connection with the `socket_timeout` field and the connection timeout with the `connect_timeout` field.\nBoth default to 30 seconds.\n\n**Configuring SSL parameters**\n\nReindex from remote supports configurable SSL settings.\nThese must be specified in the `elasticsearch.yml` file, with the exception of the secure settings, which you add in the Elasticsearch keystore.\nIt is not possible to configure SSL in the body of the reindex request.", + "description": "Reindex documents.\n\nCopy documents from a source to a destination.\nYou can copy all documents to the destination index or reindex a subset of the documents.\nThe source can be any existing index, alias, or data stream.\nThe destination must differ from the source.\nFor example, you cannot reindex a data stream into itself.\n\nIMPORTANT: Reindex requires `_source` to be enabled for all documents in the source.\nThe destination should be configured as wanted before calling the reindex API.\nReindex does not copy the settings from the source or its associated template.\nMappings, shard counts, and replicas, for example, must be configured ahead of time.\n\nIf the Elasticsearch security features are enabled, you must have the following security privileges:\n\n* The `read` index privilege for the source data stream, index, or alias.\n* The `write` index privilege for the destination data stream, index, or index alias.\n* To automatically create a data stream or index with a reindex API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege for the destination data stream, index, or alias.\n* If reindexing from a remote cluster, the `source.remote.user` must have the `monitor` cluster privilege and the `read` index privilege for the source data stream, index, or alias.\n\nIf reindexing from a remote cluster, you must explicitly allow the remote host in the `reindex.remote.whitelist` setting.\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nThe `dest` element can be configured like the index API to control optimistic concurrency control.\nOmitting `version_type` or setting it to `internal` causes Elasticsearch to blindly dump documents into the destination, overwriting any that happen to have the same ID.\n\nSetting `version_type` to `external` causes Elasticsearch to preserve the `version` from the source, create any documents that are missing, and update any documents that have an older version in the destination than they do in the source.\n\nSetting `op_type` to `create` causes the reindex API to create only missing documents in the destination.\nAll existing documents will cause a version conflict.\n\nIMPORTANT: Because data streams are append-only, any reindex request to a destination data stream must have an `op_type` of `create`.\nA reindex can only add new documents to a destination data stream.\nIt cannot update existing documents in a destination data stream.\n\nBy default, version conflicts abort the reindex process.\nTo continue reindexing if there are conflicts, set the `conflicts` request body property to `proceed`.\nIn this case, the response includes a count of the version conflicts that were encountered.\nNote that the handling of other error types is unaffected by the `conflicts` property.\nAdditionally, if you opt to count version conflicts, the operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query.\n\nNOTE: The reindex API makes no effort to handle ID collisions.\nThe last document written will \"win\" but the order isn't usually predictable so it is not a good idea to rely on this behavior.\nInstead, make sure that IDs are unique by using a script.\n\n**Running reindex asynchronously**\n\nIf the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task.\nElasticsearch creates a record of this task as a document at `_tasks/`.\n\n**Reindex from multiple sources**\n\nIf you have many sources to reindex it is generally better to reindex them one at a time rather than using a glob pattern to pick up multiple sources.\nThat way you can resume the process if there are any errors by removing the partially completed source and starting over.\nIt also makes parallelizing the process fairly simple: split the list of sources to reindex and run each list in parallel.\n\nFor example, you can use a bash script like this:\n\n```\nfor index in i1 i2 i3 i4 i5; do\n curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{\n \"source\": {\n \"index\": \"'$index'\"\n },\n \"dest\": {\n \"index\": \"'$index'-reindexed\"\n }\n }'\ndone\n```\n\n**Throttling**\n\nSet `requests_per_second` to any positive decimal number (`1.4`, `6`, `1000`, for example) to throttle the rate at which reindex issues batches of index operations.\nRequests are throttled by padding each batch with a wait time.\nTo turn off throttling, set `requests_per_second` to `-1`.\n\nThe throttling is done by waiting between batches so that the scroll that reindex uses internally can be given a timeout that takes into account the padding.\nThe padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing.\nBy default the batch size is `1000`, so if `requests_per_second` is set to `500`:\n\n```\ntarget_time = 1000 / 500 per second = 2 seconds\nwait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds\n```\n\nSince the batch is issued as a single bulk request, large batch sizes cause Elasticsearch to create many requests and then wait for a while before starting the next set.\nThis is \"bursty\" instead of \"smooth\".\n\n**Slicing**\n\nReindex supports sliced scroll to parallelize the reindexing process.\nThis parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts.\n\nNOTE: Reindexing from remote clusters does not support manual or automatic slicing.\n\nYou can slice a reindex request manually by providing a slice ID and total number of slices to each request.\nYou can also let reindex automatically parallelize by using sliced scroll to slice on `_id`.\nThe `slices` parameter specifies the number of slices to use.\n\nAdding `slices` to the reindex request just automates the manual process, creating sub-requests which means it has some quirks:\n\n* You can see these requests in the tasks API. These sub-requests are \"child\" tasks of the task for the request with slices.\n* Fetching the status of the task for the request with `slices` only contains the status of completed slices.\n* These sub-requests are individually addressable for things like cancellation and rethrottling.\n* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally.\n* Canceling the request with `slices` will cancel each sub-request.\n* Due to the nature of `slices`, each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution.\n* Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the previous point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being reindexed.\n* Each sub-request gets a slightly different snapshot of the source, though these are all taken at approximately the same time.\n\nIf slicing automatically, setting `slices` to `auto` will choose a reasonable number for most indices.\nIf slicing manually or otherwise tuning automatic slicing, use the following guidelines.\n\nQuery performance is most efficient when the number of slices is equal to the number of shards in the index.\nIf that number is large (for example, `500`), choose a lower number as too many slices will hurt performance.\nSetting slices higher than the number of shards generally does not improve efficiency and adds overhead.\n\nIndexing performance scales linearly across available resources with the number of slices.\n\nWhether query or indexing performance dominates the runtime depends on the documents being reindexed and cluster resources.\n\n**Modify documents during reindexing**\n\nLike `_update_by_query`, reindex operations support a script that modifies the document.\nUnlike `_update_by_query`, the script is allowed to modify the document's metadata.\n\nJust as in `_update_by_query`, you can set `ctx.op` to change the operation that is run on the destination.\nFor example, set `ctx.op` to `noop` if your script decides that the document doesn’t have to be indexed in the destination. This \"no operation\" will be reported in the `noop` counter in the response body.\nSet `ctx.op` to `delete` if your script decides that the document must be deleted from the destination.\nThe deletion will be reported in the `deleted` counter in the response body.\nSetting `ctx.op` to anything else will return an error, as will setting any other field in `ctx`.\n\nThink of the possibilities! Just be careful; you are able to change:\n\n* `_id`\n* `_index`\n* `_version`\n* `_routing`\n\nSetting `_version` to `null` or clearing it from the `ctx` map is just like not sending the version in an indexing request.\nIt will cause the document to be overwritten in the destination regardless of the version on the target or the version type you use in the reindex API.\n\n**Reindex from remote**\n\nReindex supports reindexing from a remote Elasticsearch cluster.\nThe `host` parameter must contain a scheme, host, port, and optional path.\nThe `username` and `password` parameters are optional and when they are present the reindex operation will connect to the remote Elasticsearch node using basic authentication.\nBe sure to use HTTPS when using basic authentication or the password will be sent in plain text.\nThere are a range of settings available to configure the behavior of the HTTPS connection.\n\nWhen using Elastic Cloud, it is also possible to authenticate against the remote cluster through the use of a valid API key.\nRemote hosts must be explicitly allowed with the `reindex.remote.whitelist` setting.\nIt can be set to a comma delimited list of allowed remote host and port combinations.\nScheme is ignored; only the host and port are used.\nFor example:\n\n```\nreindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*\"]\n```\n\nThe list of allowed hosts must be configured on any nodes that will coordinate the reindex.\nThis feature should work with remote clusters of any version of Elasticsearch.\nThis should enable you to upgrade from any version of Elasticsearch to the current version by reindexing from a cluster of the old version.\n\nWARNING: Elasticsearch does not support forward compatibility across major versions.\nFor example, you cannot reindex from a 7.x cluster into a 6.x cluster.\n\nTo enable queries sent to older versions of Elasticsearch, the `query` parameter is sent directly to the remote host without validation or modification.\n\nNOTE: Reindexing from remote clusters does not support manual or automatic slicing.\n\nReindexing from a remote server uses an on-heap buffer that defaults to a maximum size of 100mb.\nIf the remote index includes very large documents you'll need to use a smaller batch size.\nIt is also possible to set the socket read timeout on the remote connection with the `socket_timeout` field and the connection timeout with the `connect_timeout` field.\nBoth default to 30 seconds.\n\n**Configuring SSL parameters**\n\nReindex from remote supports configurable SSL settings.\nThese must be specified in the `elasticsearch.yml` file, with the exception of the secure settings, which you add in the Elasticsearch keystore.\nIt is not possible to configure SSL in the body of the reindex request.", "docId": "docs-reindex", "docTag": "document", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/docs-reindex.html", @@ -23578,7 +23579,7 @@ } } }, - "description": "Create a new document in the index.\n\nYou can index a new JSON document with the `//_doc/` or `//_create/<_id>` APIs\nUsing `_create` guarantees that the document is indexed only if it does not already exist.\nIt returns a 409 response when a document with a same ID already exists in the index.\nTo update an existing document, you must use the `//_doc/` API.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To add a document using the `PUT //_create/<_id>` or `POST //_create/<_id>` request formats, you must have the `create_doc`, `create`, `index`, or `write` index privilege.\n* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\n**Automatically create data streams and indices**\n\nIf the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream.\n\nIf the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.\n\nNOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.\n\nIf no mapping exists, the index operation creates a dynamic mapping.\nBy default, new fields and objects are automatically added to the mapping if needed.\n\nAutomatic index creation is controlled by the `action.auto_create_index` setting.\nIf it is `true`, any index can be created automatically.\nYou can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely.\nSpecify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked.\nWhen a list is specified, the default behaviour is to disallow.\n\nNOTE: The `action.auto_create_index` setting affects the automatic creation of indices only.\nIt does not affect the creation of data streams.\n\n**Routing**\n\nBy default, shard placement — or routing — is controlled by using a hash of the document's ID value.\nFor more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter.\n\nWhen setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself.\nThis does come at the (very minimal) cost of an additional document parsing pass.\nIf the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n** Distributed**\n\nThe index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.\nAfter the primary shard completes the operation, if needed, the update is distributed to applicable replicas.\n\n**Active shards**\n\nTo improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation.\nIf the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs.\nBy default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`).\nThis default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`.\nTo alter this behavior per operation, use the `wait_for_active_shards request` parameter.\n\nValid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1).\nSpecifying a negative value or a number greater than the number of shard copies will throw an error.\n\nFor example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes).\nIf you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding.\nThis means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding.\nThis requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard.\nHowever, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index.\nThe operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.\n\nIt is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts.\nAfter the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary.\nThe `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed.", + "description": "Create a new document in the index.\n\nYou can index a new JSON document with the `//_doc/` or `//_create/<_id>` APIs\nUsing `_create` guarantees that the document is indexed only if it does not already exist.\nIt returns a 409 response when a document with a same ID already exists in the index.\nTo update an existing document, you must use the `//_doc/` API.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To add a document using the `PUT //_create/<_id>` or `POST //_create/<_id>` request formats, you must have the `create_doc`, `create`, `index`, or `write` index privilege.\n* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\n**Automatically create data streams and indices**\n\nIf the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream.\n\nIf the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.\n\nNOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.\n\nIf no mapping exists, the index operation creates a dynamic mapping.\nBy default, new fields and objects are automatically added to the mapping if needed.\n\nAutomatic index creation is controlled by the `action.auto_create_index` setting.\nIf it is `true`, any index can be created automatically.\nYou can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely.\nSpecify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked.\nWhen a list is specified, the default behaviour is to disallow.\n\nNOTE: The `action.auto_create_index` setting affects the automatic creation of indices only.\nIt does not affect the creation of data streams.\n\n**Routing**\n\nBy default, shard placement — or routing — is controlled by using a hash of the document's ID value.\nFor more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter.\n\nWhen setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself.\nThis does come at the (very minimal) cost of an additional document parsing pass.\nIf the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n**Distributed**\n\nThe index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.\nAfter the primary shard completes the operation, if needed, the update is distributed to applicable replicas.\n\n**Active shards**\n\nTo improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation.\nIf the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs.\nBy default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`).\nThis default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`.\nTo alter this behavior per operation, use the `wait_for_active_shards request` parameter.\n\nValid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1).\nSpecifying a negative value or a number greater than the number of shard copies will throw an error.\n\nFor example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes).\nIf you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding.\nThis means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding.\nThis requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard.\nHowever, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index.\nThe operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.\n\nIt is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts.\nAfter the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary.\nThe `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed.", "generics": [ { "name": "TDocument", @@ -26590,7 +26591,7 @@ }, "path": [], "query": [], - "specLocation": "_global/get_script_context/GetScriptContextRequest.ts#L22-L38" + "specLocation": "_global/get_script_context/GetScriptContextRequest.ts#L22-L37" }, { "kind": "response", @@ -26675,7 +26676,7 @@ }, "path": [], "query": [], - "specLocation": "_global/get_script_languages/GetScriptLanguagesRequest.ts#L22-L38" + "specLocation": "_global/get_script_languages/GetScriptLanguagesRequest.ts#L22-L37" }, { "kind": "response", @@ -28397,7 +28398,7 @@ } } }, - "description": "Create or update a document in an index.\n\nAdd a JSON document to the specified data stream or index and make it searchable.\nIf the target is an index and the document already exists, the request updates the document and increments its version.\n\nNOTE: You cannot use this API to send update requests for existing documents in a data stream.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To add or overwrite a document using the `PUT //_doc/<_id>` request format, you must have the `create`, `index`, or `write` index privilege.\n* To add a document using the `POST //_doc/` request format, you must have the `create_doc`, `create`, `index`, or `write` index privilege.\n* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nNOTE: Replica shards might not all be started when an indexing operation returns successfully.\nBy default, only the primary is required. Set `wait_for_active_shards` to change this default behavior.\n\n**Automatically create data streams and indices**\n\nIf the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream.\n\nIf the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.\n\nNOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.\n\nIf no mapping exists, the index operation creates a dynamic mapping.\nBy default, new fields and objects are automatically added to the mapping if needed.\n\nAutomatic index creation is controlled by the `action.auto_create_index` setting.\nIf it is `true`, any index can be created automatically.\nYou can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely.\nSpecify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked.\nWhen a list is specified, the default behaviour is to disallow.\n\nNOTE: The `action.auto_create_index` setting affects the automatic creation of indices only.\nIt does not affect the creation of data streams.\n\n**Optimistic concurrency control**\n\nIndex operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters.\nIf a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`.\n\n**Routing**\n\nBy default, shard placement — or routing — is controlled by using a hash of the document's ID value.\nFor more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter.\n\nWhen setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself.\nThis does come at the (very minimal) cost of an additional document parsing pass.\nIf the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n * ** Distributed**\n\nThe index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.\nAfter the primary shard completes the operation, if needed, the update is distributed to applicable replicas.\n\n**Active shards**\n\nTo improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation.\nIf the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs.\nBy default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`).\nThis default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`.\nTo alter this behavior per operation, use the `wait_for_active_shards request` parameter.\n\nValid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1).\nSpecifying a negative value or a number greater than the number of shard copies will throw an error.\n\nFor example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes).\nIf you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding.\nThis means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding.\nThis requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard.\nHowever, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index.\nThe operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.\n\nIt is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts.\nAfter the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary.\nThe `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed.\n\n**No operation (noop) updates**\n\nWhen updating a document by using this API, a new version of the document is always created even if the document hasn't changed.\nIf this isn't acceptable use the `_update` API with `detect_noop` set to `true`.\nThe `detect_noop` option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source.\n\nThere isn't a definitive rule for when noop updates aren't acceptable.\nIt's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates.\n\n**Versioning**\n\nEach indexed document is given a version number.\nBy default, internal versioning is used that starts at 1 and increments with each update, deletes included.\nOptionally, the version number can be set to an external value (for example, if maintained in a database).\nTo enable this functionality, `version_type` should be set to `external`.\nThe value provided must be a numeric, long value greater than or equal to 0, and less than around `9.2e+18`.\n\nNOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations.\nIf no version is provided, the operation runs without any version checks.\n\nWhen using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document.\nIf true, the document will be indexed and the new version number used.\nIf the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. For example:\n\n```\nPUT my-index-000001/_doc/1?version=2&version_type=external\n{\n \"user\": {\n \"id\": \"elkbee\"\n }\n}\n\nIn this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1.\nIf the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code).\n\nA nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used.\nEven the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order.", + "description": "Create or update a document in an index.\n\nAdd a JSON document to the specified data stream or index and make it searchable.\nIf the target is an index and the document already exists, the request updates the document and increments its version.\n\nNOTE: You cannot use this API to send update requests for existing documents in a data stream.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To add or overwrite a document using the `PUT //_doc/<_id>` request format, you must have the `create`, `index`, or `write` index privilege.\n* To add a document using the `POST //_doc/` request format, you must have the `create_doc`, `create`, `index`, or `write` index privilege.\n* To automatically create a data stream or index with this API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nNOTE: Replica shards might not all be started when an indexing operation returns successfully.\nBy default, only the primary is required. Set `wait_for_active_shards` to change this default behavior.\n\n**Automatically create data streams and indices**\n\nIf the request's target doesn't exist and matches an index template with a `data_stream` definition, the index operation automatically creates the data stream.\n\nIf the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.\n\nNOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.\n\nIf no mapping exists, the index operation creates a dynamic mapping.\nBy default, new fields and objects are automatically added to the mapping if needed.\n\nAutomatic index creation is controlled by the `action.auto_create_index` setting.\nIf it is `true`, any index can be created automatically.\nYou can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to `false` to turn off automatic index creation entirely.\nSpecify a comma-separated list of patterns you want to allow or prefix each pattern with `+` or `-` to indicate whether it should be allowed or blocked.\nWhen a list is specified, the default behaviour is to disallow.\n\nNOTE: The `action.auto_create_index` setting affects the automatic creation of indices only.\nIt does not affect the creation of data streams.\n\n**Optimistic concurrency control**\n\nIndex operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters.\nIf a mismatch is detected, the operation will result in a `VersionConflictException` and a status code of `409`.\n\n**Routing**\n\nBy default, shard placement — or routing — is controlled by using a hash of the document's ID value.\nFor more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter.\n\nWhen setting up explicit mapping, you can also use the `_routing` field to direct the index operation to extract the routing value from the document itself.\nThis does come at the (very minimal) cost of an additional document parsing pass.\nIf the `_routing` mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n**Distributed**\n\nThe index operation is directed to the primary shard based on its route and performed on the actual node containing this shard.\nAfter the primary shard completes the operation, if needed, the update is distributed to applicable replicas.\n\n**Active shards**\n\nTo improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation.\nIf the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs.\nBy default, write operations only wait for the primary shards to be active before proceeding (that is to say `wait_for_active_shards` is `1`).\nThis default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`.\nTo alter this behavior per operation, use the `wait_for_active_shards request` parameter.\n\nValid values are all or any positive integer up to the total number of configured copies per shard in the index (which is `number_of_replicas`+1).\nSpecifying a negative value or a number greater than the number of shard copies will throw an error.\n\nFor example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes).\nIf you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding.\nThis means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data.\nIf `wait_for_active_shards` is set on the request to `3` (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding.\nThis requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard.\nHowever, if you set `wait_for_active_shards` to `all` (or to `4`, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index.\nThe operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.\n\nIt is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts.\nAfter the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary.\nThe `_shards` section of the API response reveals the number of shard copies on which replication succeeded and failed.\n\n**No operation (noop) updates**\n\nWhen updating a document by using this API, a new version of the document is always created even if the document hasn't changed.\nIf this isn't acceptable use the `_update` API with `detect_noop` set to `true`.\nThe `detect_noop` option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source.\n\nThere isn't a definitive rule for when noop updates aren't acceptable.\nIt's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates.\n\n**Versioning**\n\nEach indexed document is given a version number.\nBy default, internal versioning is used that starts at 1 and increments with each update, deletes included.\nOptionally, the version number can be set to an external value (for example, if maintained in a database).\nTo enable this functionality, `version_type` should be set to `external`.\nThe value provided must be a numeric, long value greater than or equal to 0, and less than around `9.2e+18`.\n\nNOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations.\nIf no version is provided, the operation runs without any version checks.\n\nWhen using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document.\nIf true, the document will be indexed and the new version number used.\nIf the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. For example:\n\n```\nPUT my-index-000001/_doc/1?version=2&version_type=external\n{\n \"user\": {\n \"id\": \"elkbee\"\n }\n}\n\nIn this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1.\nIf the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code).\n\nA nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used.\nEven the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order.", "generics": [ { "name": "TDocument", @@ -28623,7 +28624,7 @@ }, "path": [], "query": [], - "specLocation": "_global/info/RootNodeInfoRequest.ts#L22-L39" + "specLocation": "_global/info/RootNodeInfoRequest.ts#L22-L38" }, { "kind": "response", @@ -31400,7 +31401,7 @@ }, "path": [], "query": [], - "specLocation": "_global/ping/PingRequest.ts#L22-L38" + "specLocation": "_global/ping/PingRequest.ts#L22-L37" }, { "kind": "response", @@ -32560,7 +32561,7 @@ } ] }, - "description": "Reindex documents.\n\nCopy documents from a source to a destination.\nYou can copy all documents to the destination index or reindex a subset of the documents.\nThe source can be any existing index, alias, or data stream.\nThe destination must differ from the source.\nFor example, you cannot reindex a data stream into itself.\n\nIMPORTANT: Reindex requires `_source` to be enabled for all documents in the source.\nThe destination should be configured as wanted before calling the reindex API.\nReindex does not copy the settings from the source or its associated template.\nMappings, shard counts, and replicas, for example, must be configured ahead of time.\n\nIf the Elasticsearch security features are enabled, you must have the following security privileges:\n\n* The `read` index privilege for the source data stream, index, or alias.\n* The `write` index privilege for the destination data stream, index, or index alias.\n* To automatically create a data stream or index with a reindex API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege for the destination data stream, index, or alias.\n* If reindexing from a remote cluster, the `source.remote.user` must have the `monitor` cluster privilege and the `read` index privilege for the source data stream, index, or alias.\n\nIf reindexing from a remote cluster, you must explicitly allow the remote host in the `reindex.remote.whitelist` setting.\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nThe `dest` element can be configured like the index API to control optimistic concurrency control.\nOmitting `version_type` or setting it to `internal` causes Elasticsearch to blindly dump documents into the destination, overwriting any that happen to have the same ID.\n\nSetting `version_type` to `external` causes Elasticsearch to preserve the `version` from the source, create any documents that are missing, and update any documents that have an older version in the destination than they do in the source.\n\nSetting `op_type` to `create` causes the reindex API to create only missing documents in the destination.\nAll existing documents will cause a version conflict.\n\nIMPORTANT: Because data streams are append-only, any reindex request to a destination data stream must have an `op_type` of `create`.\nA reindex can only add new documents to a destination data stream.\nIt cannot update existing documents in a destination data stream.\n\nBy default, version conflicts abort the reindex process.\nTo continue reindexing if there are conflicts, set the `conflicts` request body property to `proceed`.\nIn this case, the response includes a count of the version conflicts that were encountered.\nNote that the handling of other error types is unaffected by the `conflicts` property.\nAdditionally, if you opt to count version conflicts, the operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query.\n\nNOTE: The reindex API makes no effort to handle ID collisions.\nThe last document written will \"win\" but the order isn't usually predictable so it is not a good idea to rely on this behavior.\nInstead, make sure that IDs are unique by using a script.\n\n**Running reindex asynchronously**\n\nIf the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task.\nElasticsearch creates a record of this task as a document at `_tasks/`.\n\n**Reindex from multiple sources**\n\nIf you have many sources to reindex it is generally better to reindex them one at a time rather than using a glob pattern to pick up multiple sources.\nThat way you can resume the process if there are any errors by removing the partially completed source and starting over.\nIt also makes parallelizing the process fairly simple: split the list of sources to reindex and run each list in parallel.\n\nFor example, you can use a bash script like this:\n\n```\nfor index in i1 i2 i3 i4 i5; do\n curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{\n \"source\": {\n \"index\": \"'$index'\"\n },\n \"dest\": {\n \"index\": \"'$index'-reindexed\"\n }\n }'\ndone\n```\n\n** Throttling**\n\nSet `requests_per_second` to any positive decimal number (`1.4`, `6`, `1000`, for example) to throttle the rate at which reindex issues batches of index operations.\nRequests are throttled by padding each batch with a wait time.\nTo turn off throttling, set `requests_per_second` to `-1`.\n\nThe throttling is done by waiting between batches so that the scroll that reindex uses internally can be given a timeout that takes into account the padding.\nThe padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing.\nBy default the batch size is `1000`, so if `requests_per_second` is set to `500`:\n\n```\ntarget_time = 1000 / 500 per second = 2 seconds\nwait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds\n```\n\nSince the batch is issued as a single bulk request, large batch sizes cause Elasticsearch to create many requests and then wait for a while before starting the next set.\nThis is \"bursty\" instead of \"smooth\".\n\n**Slicing**\n\nReindex supports sliced scroll to parallelize the reindexing process.\nThis parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts.\n\nNOTE: Reindexing from remote clusters does not support manual or automatic slicing.\n\nYou can slice a reindex request manually by providing a slice ID and total number of slices to each request.\nYou can also let reindex automatically parallelize by using sliced scroll to slice on `_id`.\nThe `slices` parameter specifies the number of slices to use.\n\nAdding `slices` to the reindex request just automates the manual process, creating sub-requests which means it has some quirks:\n\n* You can see these requests in the tasks API. These sub-requests are \"child\" tasks of the task for the request with slices.\n* Fetching the status of the task for the request with `slices` only contains the status of completed slices.\n* These sub-requests are individually addressable for things like cancellation and rethrottling.\n* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally.\n* Canceling the request with `slices` will cancel each sub-request.\n* Due to the nature of `slices`, each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution.\n* Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the previous point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being reindexed.\n* Each sub-request gets a slightly different snapshot of the source, though these are all taken at approximately the same time.\n\nIf slicing automatically, setting `slices` to `auto` will choose a reasonable number for most indices.\nIf slicing manually or otherwise tuning automatic slicing, use the following guidelines.\n\nQuery performance is most efficient when the number of slices is equal to the number of shards in the index.\nIf that number is large (for example, `500`), choose a lower number as too many slices will hurt performance.\nSetting slices higher than the number of shards generally does not improve efficiency and adds overhead.\n\nIndexing performance scales linearly across available resources with the number of slices.\n\nWhether query or indexing performance dominates the runtime depends on the documents being reindexed and cluster resources.\n\n**Modify documents during reindexing**\n\nLike `_update_by_query`, reindex operations support a script that modifies the document.\nUnlike `_update_by_query`, the script is allowed to modify the document's metadata.\n\nJust as in `_update_by_query`, you can set `ctx.op` to change the operation that is run on the destination.\nFor example, set `ctx.op` to `noop` if your script decides that the document doesn’t have to be indexed in the destination. This \"no operation\" will be reported in the `noop` counter in the response body.\nSet `ctx.op` to `delete` if your script decides that the document must be deleted from the destination.\nThe deletion will be reported in the `deleted` counter in the response body.\nSetting `ctx.op` to anything else will return an error, as will setting any other field in `ctx`.\n\nThink of the possibilities! Just be careful; you are able to change:\n\n* `_id`\n* `_index`\n* `_version`\n* `_routing`\n\nSetting `_version` to `null` or clearing it from the `ctx` map is just like not sending the version in an indexing request.\nIt will cause the document to be overwritten in the destination regardless of the version on the target or the version type you use in the reindex API.\n\n**Reindex from remote**\n\nReindex supports reindexing from a remote Elasticsearch cluster.\nThe `host` parameter must contain a scheme, host, port, and optional path.\nThe `username` and `password` parameters are optional and when they are present the reindex operation will connect to the remote Elasticsearch node using basic authentication.\nBe sure to use HTTPS when using basic authentication or the password will be sent in plain text.\nThere are a range of settings available to configure the behavior of the HTTPS connection.\n\nWhen using Elastic Cloud, it is also possible to authenticate against the remote cluster through the use of a valid API key.\nRemote hosts must be explicitly allowed with the `reindex.remote.whitelist` setting.\nIt can be set to a comma delimited list of allowed remote host and port combinations.\nScheme is ignored; only the host and port are used.\nFor example:\n\n```\nreindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*\"]\n```\n\nThe list of allowed hosts must be configured on any nodes that will coordinate the reindex.\nThis feature should work with remote clusters of any version of Elasticsearch.\nThis should enable you to upgrade from any version of Elasticsearch to the current version by reindexing from a cluster of the old version.\n\nWARNING: Elasticsearch does not support forward compatibility across major versions.\nFor example, you cannot reindex from a 7.x cluster into a 6.x cluster.\n\nTo enable queries sent to older versions of Elasticsearch, the `query` parameter is sent directly to the remote host without validation or modification.\n\nNOTE: Reindexing from remote clusters does not support manual or automatic slicing.\n\nReindexing from a remote server uses an on-heap buffer that defaults to a maximum size of 100mb.\nIf the remote index includes very large documents you'll need to use a smaller batch size.\nIt is also possible to set the socket read timeout on the remote connection with the `socket_timeout` field and the connection timeout with the `connect_timeout` field.\nBoth default to 30 seconds.\n\n**Configuring SSL parameters**\n\nReindex from remote supports configurable SSL settings.\nThese must be specified in the `elasticsearch.yml` file, with the exception of the secure settings, which you add in the Elasticsearch keystore.\nIt is not possible to configure SSL in the body of the reindex request.", + "description": "Reindex documents.\n\nCopy documents from a source to a destination.\nYou can copy all documents to the destination index or reindex a subset of the documents.\nThe source can be any existing index, alias, or data stream.\nThe destination must differ from the source.\nFor example, you cannot reindex a data stream into itself.\n\nIMPORTANT: Reindex requires `_source` to be enabled for all documents in the source.\nThe destination should be configured as wanted before calling the reindex API.\nReindex does not copy the settings from the source or its associated template.\nMappings, shard counts, and replicas, for example, must be configured ahead of time.\n\nIf the Elasticsearch security features are enabled, you must have the following security privileges:\n\n* The `read` index privilege for the source data stream, index, or alias.\n* The `write` index privilege for the destination data stream, index, or index alias.\n* To automatically create a data stream or index with a reindex API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege for the destination data stream, index, or alias.\n* If reindexing from a remote cluster, the `source.remote.user` must have the `monitor` cluster privilege and the `read` index privilege for the source data stream, index, or alias.\n\nIf reindexing from a remote cluster, you must explicitly allow the remote host in the `reindex.remote.whitelist` setting.\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nThe `dest` element can be configured like the index API to control optimistic concurrency control.\nOmitting `version_type` or setting it to `internal` causes Elasticsearch to blindly dump documents into the destination, overwriting any that happen to have the same ID.\n\nSetting `version_type` to `external` causes Elasticsearch to preserve the `version` from the source, create any documents that are missing, and update any documents that have an older version in the destination than they do in the source.\n\nSetting `op_type` to `create` causes the reindex API to create only missing documents in the destination.\nAll existing documents will cause a version conflict.\n\nIMPORTANT: Because data streams are append-only, any reindex request to a destination data stream must have an `op_type` of `create`.\nA reindex can only add new documents to a destination data stream.\nIt cannot update existing documents in a destination data stream.\n\nBy default, version conflicts abort the reindex process.\nTo continue reindexing if there are conflicts, set the `conflicts` request body property to `proceed`.\nIn this case, the response includes a count of the version conflicts that were encountered.\nNote that the handling of other error types is unaffected by the `conflicts` property.\nAdditionally, if you opt to count version conflicts, the operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query.\n\nNOTE: The reindex API makes no effort to handle ID collisions.\nThe last document written will \"win\" but the order isn't usually predictable so it is not a good idea to rely on this behavior.\nInstead, make sure that IDs are unique by using a script.\n\n**Running reindex asynchronously**\n\nIf the request contains `wait_for_completion=false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task.\nElasticsearch creates a record of this task as a document at `_tasks/`.\n\n**Reindex from multiple sources**\n\nIf you have many sources to reindex it is generally better to reindex them one at a time rather than using a glob pattern to pick up multiple sources.\nThat way you can resume the process if there are any errors by removing the partially completed source and starting over.\nIt also makes parallelizing the process fairly simple: split the list of sources to reindex and run each list in parallel.\n\nFor example, you can use a bash script like this:\n\n```\nfor index in i1 i2 i3 i4 i5; do\n curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{\n \"source\": {\n \"index\": \"'$index'\"\n },\n \"dest\": {\n \"index\": \"'$index'-reindexed\"\n }\n }'\ndone\n```\n\n**Throttling**\n\nSet `requests_per_second` to any positive decimal number (`1.4`, `6`, `1000`, for example) to throttle the rate at which reindex issues batches of index operations.\nRequests are throttled by padding each batch with a wait time.\nTo turn off throttling, set `requests_per_second` to `-1`.\n\nThe throttling is done by waiting between batches so that the scroll that reindex uses internally can be given a timeout that takes into account the padding.\nThe padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing.\nBy default the batch size is `1000`, so if `requests_per_second` is set to `500`:\n\n```\ntarget_time = 1000 / 500 per second = 2 seconds\nwait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds\n```\n\nSince the batch is issued as a single bulk request, large batch sizes cause Elasticsearch to create many requests and then wait for a while before starting the next set.\nThis is \"bursty\" instead of \"smooth\".\n\n**Slicing**\n\nReindex supports sliced scroll to parallelize the reindexing process.\nThis parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts.\n\nNOTE: Reindexing from remote clusters does not support manual or automatic slicing.\n\nYou can slice a reindex request manually by providing a slice ID and total number of slices to each request.\nYou can also let reindex automatically parallelize by using sliced scroll to slice on `_id`.\nThe `slices` parameter specifies the number of slices to use.\n\nAdding `slices` to the reindex request just automates the manual process, creating sub-requests which means it has some quirks:\n\n* You can see these requests in the tasks API. These sub-requests are \"child\" tasks of the task for the request with slices.\n* Fetching the status of the task for the request with `slices` only contains the status of completed slices.\n* These sub-requests are individually addressable for things like cancellation and rethrottling.\n* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally.\n* Canceling the request with `slices` will cancel each sub-request.\n* Due to the nature of `slices`, each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution.\n* Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the previous point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being reindexed.\n* Each sub-request gets a slightly different snapshot of the source, though these are all taken at approximately the same time.\n\nIf slicing automatically, setting `slices` to `auto` will choose a reasonable number for most indices.\nIf slicing manually or otherwise tuning automatic slicing, use the following guidelines.\n\nQuery performance is most efficient when the number of slices is equal to the number of shards in the index.\nIf that number is large (for example, `500`), choose a lower number as too many slices will hurt performance.\nSetting slices higher than the number of shards generally does not improve efficiency and adds overhead.\n\nIndexing performance scales linearly across available resources with the number of slices.\n\nWhether query or indexing performance dominates the runtime depends on the documents being reindexed and cluster resources.\n\n**Modify documents during reindexing**\n\nLike `_update_by_query`, reindex operations support a script that modifies the document.\nUnlike `_update_by_query`, the script is allowed to modify the document's metadata.\n\nJust as in `_update_by_query`, you can set `ctx.op` to change the operation that is run on the destination.\nFor example, set `ctx.op` to `noop` if your script decides that the document doesn’t have to be indexed in the destination. This \"no operation\" will be reported in the `noop` counter in the response body.\nSet `ctx.op` to `delete` if your script decides that the document must be deleted from the destination.\nThe deletion will be reported in the `deleted` counter in the response body.\nSetting `ctx.op` to anything else will return an error, as will setting any other field in `ctx`.\n\nThink of the possibilities! Just be careful; you are able to change:\n\n* `_id`\n* `_index`\n* `_version`\n* `_routing`\n\nSetting `_version` to `null` or clearing it from the `ctx` map is just like not sending the version in an indexing request.\nIt will cause the document to be overwritten in the destination regardless of the version on the target or the version type you use in the reindex API.\n\n**Reindex from remote**\n\nReindex supports reindexing from a remote Elasticsearch cluster.\nThe `host` parameter must contain a scheme, host, port, and optional path.\nThe `username` and `password` parameters are optional and when they are present the reindex operation will connect to the remote Elasticsearch node using basic authentication.\nBe sure to use HTTPS when using basic authentication or the password will be sent in plain text.\nThere are a range of settings available to configure the behavior of the HTTPS connection.\n\nWhen using Elastic Cloud, it is also possible to authenticate against the remote cluster through the use of a valid API key.\nRemote hosts must be explicitly allowed with the `reindex.remote.whitelist` setting.\nIt can be set to a comma delimited list of allowed remote host and port combinations.\nScheme is ignored; only the host and port are used.\nFor example:\n\n```\nreindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*\"]\n```\n\nThe list of allowed hosts must be configured on any nodes that will coordinate the reindex.\nThis feature should work with remote clusters of any version of Elasticsearch.\nThis should enable you to upgrade from any version of Elasticsearch to the current version by reindexing from a cluster of the old version.\n\nWARNING: Elasticsearch does not support forward compatibility across major versions.\nFor example, you cannot reindex from a 7.x cluster into a 6.x cluster.\n\nTo enable queries sent to older versions of Elasticsearch, the `query` parameter is sent directly to the remote host without validation or modification.\n\nNOTE: Reindexing from remote clusters does not support manual or automatic slicing.\n\nReindexing from a remote server uses an on-heap buffer that defaults to a maximum size of 100mb.\nIf the remote index includes very large documents you'll need to use a smaller batch size.\nIt is also possible to set the socket read timeout on the remote connection with the `socket_timeout` field and the connection timeout with the `connect_timeout` field.\nBoth default to 30 seconds.\n\n**Configuring SSL parameters**\n\nReindex from remote supports configurable SSL settings.\nThese must be specified in the `elasticsearch.yml` file, with the exception of the secure settings, which you add in the Elasticsearch keystore.\nIt is not possible to configure SSL in the body of the reindex request.", "inherits": { "type": { "name": "RequestBase", @@ -45885,7 +45886,7 @@ "name": "closed" }, { - "description": "Match hidden data streams and hidden indices. Must be combined with open, closed, or both.", + "description": "Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`.", "name": "hidden" }, { @@ -92604,7 +92605,7 @@ "body": { "kind": "no_body" }, - "description": "Get aliases.\nRetrieves the cluster’s index aliases, including filter and routing information.\nThe API does not return data stream aliases.\n\nCAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API.", + "description": "Get aliases.\n\nGet the cluster's index aliases, including filter and routing information.\nThis API does not return data stream aliases.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API.", "inherits": { "type": { "name": "CatRequestBase", @@ -92631,7 +92632,7 @@ ], "query": [ { - "description": "Whether to expand wildcard expression to concrete indices that are open, closed or both.", + "description": "The type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nIt supports comma-separated values, such as `open,hidden`.", "name": "expand_wildcards", "required": false, "type": { @@ -92656,7 +92657,7 @@ } }, { - "description": "Period to wait for a connection to the master node.", + "description": "The period to wait for a connection to the master node.\nIf the master node is not available before the timeout expires, the request fails and returns an error.\nTo indicated that the request should never timeout, you can set it to `-1`.", "name": "master_timeout", "required": false, "serverDefault": "30s", @@ -92669,7 +92670,7 @@ } } ], - "specLocation": "cat/aliases/CatAliasesRequest.ts#L24-L67" + "specLocation": "cat/aliases/CatAliasesRequest.ts#L24-L75" }, { "kind": "response", @@ -93050,7 +93051,7 @@ "body": { "kind": "no_body" }, - "description": "Provides a snapshot of the number of shards allocated to each data node and their disk space.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications.", + "description": "Get shard allocation information.\n\nGet a snapshot of the number of shards allocated to each data node and their disk space.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications.", "inherits": { "type": { "name": "CatRequestBase", @@ -93063,7 +93064,7 @@ }, "path": [ { - "description": "Comma-separated list of node identifiers or names used to limit the returned information.", + "description": "A comma-separated list of node identifiers or names used to limit the returned information.", "name": "node_id", "required": false, "type": { @@ -93115,7 +93116,7 @@ } } ], - "specLocation": "cat/allocation/CatAllocationRequest.ts#L24-L65" + "specLocation": "cat/allocation/CatAllocationRequest.ts#L24-L68" }, { "kind": "response", @@ -93246,7 +93247,7 @@ "body": { "kind": "no_body" }, - "description": "Get component templates.\nReturns information about component templates in a cluster.\nComponent templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.\n\nCAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the get component template API.", + "description": "Get component templates.\n\nGet information about component templates in a cluster.\nComponent templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the get component template API.", "inherits": { "type": { "name": "CatRequestBase", @@ -93259,7 +93260,7 @@ }, "path": [ { - "description": "The name of the component template. Accepts wildcard expressions. If omitted, all component templates are returned.", + "description": "The name of the component template.\nIt accepts wildcard expressions.\nIf it is omitted, all component templates are returned.", "name": "name", "required": false, "type": { @@ -93286,7 +93287,7 @@ } }, { - "description": "Period to wait for a connection to the master node.", + "description": "The period to wait for a connection to the master node.", "name": "master_timeout", "required": false, "serverDefault": "30s", @@ -93299,7 +93300,7 @@ } } ], - "specLocation": "cat/component_templates/CatComponentTemplatesRequest.ts#L23-L65" + "specLocation": "cat/component_templates/CatComponentTemplatesRequest.ts#L23-L70" }, { "kind": "response", @@ -93409,7 +93410,7 @@ "body": { "kind": "no_body" }, - "description": "Get a document count.\nProvides quick access to a document count for a data stream, an index, or an entire cluster.\nThe document count only includes live documents, not deleted documents which have not yet been removed by the merge process.\n\nCAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the count API.", + "description": "Get a document count.\n\nGet quick access to a document count for a data stream, an index, or an entire cluster.\nThe document count only includes live documents, not deleted documents which have not yet been removed by the merge process.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the count API.", "inherits": { "type": { "name": "CatRequestBase", @@ -93422,7 +93423,7 @@ }, "path": [ { - "description": "Comma-separated list of data streams, indices, and aliases used to limit the request.\nSupports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`.", + "description": "A comma-separated list of data streams, indices, and aliases used to limit the request.\nIt supports wildcards (`*`).\nTo target all data streams and indices, omit this parameter or use `*` or `_all`.", "name": "index", "required": false, "type": { @@ -93435,7 +93436,7 @@ } ], "query": [], - "specLocation": "cat/count/CatCountRequest.ts#L23-L54" + "specLocation": "cat/count/CatCountRequest.ts#L23-L56" }, { "kind": "response", @@ -93558,7 +93559,7 @@ "body": { "kind": "no_body" }, - "description": "Returns the amount of heap memory currently used by the field data cache on every data node in the cluster.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the nodes stats API.", + "description": "Get field data cache information.\n\nGet the amount of heap memory currently used by the field data cache on every data node in the cluster.\n\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the nodes stats API.", "inherits": { "type": { "name": "CatRequestBase", @@ -93609,7 +93610,7 @@ } } ], - "specLocation": "cat/fielddata/CatFielddataRequest.ts#L23-L57" + "specLocation": "cat/fielddata/CatFielddataRequest.ts#L23-L60" }, { "kind": "response", @@ -93914,7 +93915,7 @@ "body": { "kind": "no_body" }, - "description": "Returns the health status of a cluster, similar to the cluster health API.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the cluster health API.\nThis API is often used to check malfunctioning clusters.\nTo help you track cluster health alongside log files and alerting systems, the API returns timestamps in two formats:\n`HH:MM:SS`, which is human-readable but includes no date information;\n`Unix epoch time`, which is machine-sortable and includes date information.\nThe latter format is useful for cluster recoveries that take multiple days.\nYou can use the cat health API to verify cluster health across multiple nodes.\nYou also can use the API to track the recovery of a large cluster over a longer period of time.", + "description": "Get the cluster health status.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use the cluster health API.\nThis API is often used to check malfunctioning clusters.\nTo help you track cluster health alongside log files and alerting systems, the API returns timestamps in two formats:\n`HH:MM:SS`, which is human-readable but includes no date information;\n`Unix epoch time`, which is machine-sortable and includes date information.\nThe latter format is useful for cluster recoveries that take multiple days.\nYou can use the cat health API to verify cluster health across multiple nodes.\nYou also can use the API to track the recovery of a large cluster over a longer period of time.", "inherits": { "type": { "name": "CatRequestBase", @@ -93953,7 +93954,7 @@ } } ], - "specLocation": "cat/health/CatHealthRequest.ts#L23-L58" + "specLocation": "cat/health/CatHealthRequest.ts#L23-L59" }, { "kind": "response", @@ -93981,7 +93982,7 @@ "body": { "kind": "no_body" }, - "description": "Get CAT help.\nReturns help for the CAT APIs.", + "description": "Get CAT help.\n\nGet help for the CAT APIs.", "name": { "name": "Request", "namespace": "cat.help" @@ -96093,7 +96094,7 @@ "body": { "kind": "no_body" }, - "description": "Get index information.\nReturns high-level information about indices in a cluster, including backing indices for data streams.\n\nUse this request to get the following information for each index in a cluster:\n- shard count\n- document count\n- deleted document count\n- primary store size\n- total store size of all shards, including shard replicas\n\nThese metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents.\nTo get an accurate count of Elasticsearch documents, use the cat count or count APIs.\n\nCAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use an index endpoint.", + "description": "Get index information.\n\nGet high-level information about indices in a cluster, including backing indices for data streams.\n\nUse this request to get the following information for each index in a cluster:\n- shard count\n- document count\n- deleted document count\n- primary store size\n- total store size of all shards, including shard replicas\n\nThese metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents.\nTo get an accurate count of Elasticsearch documents, use the cat count or count APIs.\n\nCAT APIs are only intended for human consumption using the command line or Kibana console.\nThey are not intended for use by applications. For application consumption, use an index endpoint.", "inherits": { "type": { "name": "CatRequestBase", @@ -96207,7 +96208,7 @@ } } ], - "specLocation": "cat/indices/CatIndicesRequest.ts#L24-L92" + "specLocation": "cat/indices/CatIndicesRequest.ts#L24-L93" }, { "kind": "response", @@ -96303,7 +96304,7 @@ "body": { "kind": "no_body" }, - "description": "Returns information about the master node, including the ID, bound IP address, and name.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.", + "description": "Get master node information.\n\nGet information about the master node, including the ID, bound IP address, and name.\n\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.", "inherits": { "type": { "name": "CatRequestBase", @@ -96343,7 +96344,7 @@ } } ], - "specLocation": "cat/master/CatMasterRequest.ts#L23-L54" + "specLocation": "cat/master/CatMasterRequest.ts#L23-L57" }, { "kind": "response", @@ -96632,7 +96633,7 @@ "body": { "kind": "no_body" }, - "description": "Get data frame analytics jobs.\nReturns configuration and usage information about data frame analytics jobs.\n\nCAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get data frame analytics jobs statistics API.", + "description": "Get data frame analytics jobs.\n\nGet configuration and usage information about data frame analytics jobs.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get data frame analytics jobs statistics API.", "inherits": { "type": { "name": "CatRequestBase", @@ -96720,7 +96721,7 @@ } } ], - "specLocation": "cat/ml_data_frame_analytics/CatDataFrameAnalyticsRequest.ts#L24-L69" + "specLocation": "cat/ml_data_frame_analytics/CatDataFrameAnalyticsRequest.ts#L24-L70" }, { "kind": "response", @@ -96948,7 +96949,7 @@ "body": { "kind": "no_body" }, - "description": "Get datafeeds.\nReturns configuration and usage information about datafeeds.\nThis API returns a maximum of 10,000 datafeeds.\nIf the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage`\ncluster privileges to use this API.\n\nCAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get datafeed statistics API.", + "description": "Get datafeeds.\n\nGet configuration and usage information about datafeeds.\nThis API returns a maximum of 10,000 datafeeds.\nIf the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage`\ncluster privileges to use this API.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get datafeed statistics API.", "inherits": { "type": { "name": "CatRequestBase", @@ -97025,7 +97026,7 @@ } } ], - "specLocation": "cat/ml_datafeeds/CatDatafeedsRequest.ts#L24-L84" + "specLocation": "cat/ml_datafeeds/CatDatafeedsRequest.ts#L24-L85" }, { "kind": "response", @@ -98019,7 +98020,7 @@ "body": { "kind": "no_body" }, - "description": "Get anomaly detection jobs.\nReturns configuration and usage information for anomaly detection jobs.\nThis API returns a maximum of 10,000 jobs.\nIf the Elasticsearch security features are enabled, you must have `monitor_ml`,\n`monitor`, `manage_ml`, or `manage` cluster privileges to use this API.\n\nCAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get anomaly detection job statistics API.", + "description": "Get anomaly detection jobs.\n\nGet configuration and usage information for anomaly detection jobs.\nThis API returns a maximum of 10,000 jobs.\nIf the Elasticsearch security features are enabled, you must have `monitor_ml`,\n`monitor`, `manage_ml`, or `manage` cluster privileges to use this API.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get anomaly detection job statistics API.", "inherits": { "type": { "name": "CatRequestBase", @@ -98108,7 +98109,7 @@ } } ], - "specLocation": "cat/ml_jobs/CatJobsRequest.ts#L24-L88" + "specLocation": "cat/ml_jobs/CatJobsRequest.ts#L24-L89" }, { "kind": "response", @@ -98140,7 +98141,7 @@ "body": { "kind": "no_body" }, - "description": "Get trained models.\nReturns configuration and usage information about inference trained models.\n\nCAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get trained models statistics API.", + "description": "Get trained models.\n\nGet configuration and usage information about inference trained models.\n\nIMPORTANT: CAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get trained models statistics API.", "inherits": { "type": { "name": "CatRequestBase", @@ -98252,7 +98253,7 @@ } } ], - "specLocation": "cat/ml_trained_models/CatTrainedModelsRequest.ts#L25-L79" + "specLocation": "cat/ml_trained_models/CatTrainedModelsRequest.ts#L25-L80" }, { "kind": "response", @@ -98687,7 +98688,7 @@ "body": { "kind": "no_body" }, - "description": "Returns information about custom node attributes.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.", + "description": "Get node attribute information.\n\nGet information about custom node attributes.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.", "inherits": { "type": { "name": "CatRequestBase", @@ -98727,7 +98728,7 @@ } } ], - "specLocation": "cat/nodeattrs/CatNodeAttributesRequest.ts#L23-L54" + "specLocation": "cat/nodeattrs/CatNodeAttributesRequest.ts#L23-L56" }, { "kind": "response", @@ -100274,7 +100275,7 @@ "body": { "kind": "no_body" }, - "description": "Returns information about the nodes in a cluster.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.", + "description": "Get node information.\n\nGet information about the nodes in a cluster.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.", "inherits": { "type": { "name": "CatRequestBase", @@ -100363,7 +100364,7 @@ } } ], - "specLocation": "cat/nodes/CatNodesRequest.ts#L24-L65" + "specLocation": "cat/nodes/CatNodesRequest.ts#L24-L67" }, { "kind": "response", @@ -100465,7 +100466,7 @@ "body": { "kind": "no_body" }, - "description": "Returns cluster-level changes that have not yet been executed.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the pending cluster tasks API.", + "description": "Get pending task information.\n\nGet information about cluster-level changes that have not yet taken effect.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the pending cluster tasks API.", "inherits": { "type": { "name": "CatRequestBase", @@ -100517,7 +100518,7 @@ } } ], - "specLocation": "cat/pending_tasks/CatPendingTasksRequest.ts#L23-L58" + "specLocation": "cat/pending_tasks/CatPendingTasksRequest.ts#L23-L60" }, { "kind": "response", @@ -100646,7 +100647,7 @@ "body": { "kind": "no_body" }, - "description": "Returns a list of plugins running on each node of a cluster.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.", + "description": "Get plugin information.\n\nGet a list of plugins running on each node of a cluster.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.", "inherits": { "type": { "name": "CatRequestBase", @@ -100699,7 +100700,7 @@ } } ], - "specLocation": "cat/plugins/CatPluginsRequest.ts#L23-L59" + "specLocation": "cat/plugins/CatPluginsRequest.ts#L23-L61" }, { "kind": "response", @@ -101152,7 +101153,7 @@ "body": { "kind": "no_body" }, - "description": "Returns information about ongoing and completed shard recoveries.\nShard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or syncing a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing.\nFor data streams, the API returns information about the stream’s backing indices.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index recovery API.", + "description": "Get shard recovery information.\n\nGet information about ongoing and completed shard recoveries.\nShard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or syncing a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing.\nFor data streams, the API returns information about the stream’s backing indices.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index recovery API.", "inherits": { "type": { "name": "CatRequestBase", @@ -101229,7 +101230,7 @@ } } ], - "specLocation": "cat/recovery/CatRecoveryRequest.ts#L24-L74" + "specLocation": "cat/recovery/CatRecoveryRequest.ts#L24-L76" }, { "kind": "response", @@ -101301,7 +101302,7 @@ "body": { "kind": "no_body" }, - "description": "Returns the snapshot repositories for a cluster.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot repository API.", + "description": "Get snapshot repository information.\n\nGet a list of snapshot repositories for a cluster.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot repository API.", "inherits": { "type": { "name": "CatRequestBase", @@ -101341,7 +101342,7 @@ } } ], - "specLocation": "cat/repositories/CatRepositoriesRequest.ts#L23-L54" + "specLocation": "cat/repositories/CatRepositoriesRequest.ts#L23-L56" }, { "kind": "response", @@ -101373,7 +101374,7 @@ "body": { "kind": "no_body" }, - "description": "Returns low-level information about the Lucene segments in index shards.\nFor data streams, the API returns information about the backing indices.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index segments API.", + "description": "Get segment information.\n\nGet low-level information about the Lucene segments in index shards.\nFor data streams, the API returns information about the backing indices.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index segments API.", "inherits": { "type": { "name": "CatRequestBase", @@ -101438,7 +101439,7 @@ } } ], - "specLocation": "cat/segments/CatSegmentsRequest.ts#L24-L73" + "specLocation": "cat/segments/CatSegmentsRequest.ts#L24-L75" }, { "kind": "response", @@ -101710,7 +101711,7 @@ "body": { "kind": "no_body" }, - "description": "Returns information about the shards in a cluster.\nFor data streams, the API returns information about the backing indices.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications.", + "description": "Get shard information.\n\nGet information about the shards in a cluster.\nFor data streams, the API returns information about the backing indices.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications.", "inherits": { "type": { "name": "CatRequestBase", @@ -101774,7 +101775,7 @@ } } ], - "specLocation": "cat/shards/CatShardsRequest.ts#L24-L69" + "specLocation": "cat/shards/CatShardsRequest.ts#L24-L71" }, { "kind": "response", @@ -103087,7 +103088,7 @@ "body": { "kind": "no_body" }, - "description": "Returns information about the snapshots stored in one or more repositories.\nA snapshot is a backup of an index or running Elasticsearch cluster.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot API.", + "description": "Get snapshot information.\n\nGet information about the snapshots stored in one or more repositories.\nA snapshot is a backup of an index or running Elasticsearch cluster.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot API.", "inherits": { "type": { "name": "CatRequestBase", @@ -103152,7 +103153,7 @@ } } ], - "specLocation": "cat/snapshots/CatSnapshotsRequest.ts#L24-L70" + "specLocation": "cat/snapshots/CatSnapshotsRequest.ts#L24-L72" }, { "kind": "response", @@ -103430,7 +103431,7 @@ "body": { "kind": "no_body" }, - "description": "Returns information about tasks currently executing in the cluster.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the task management API.", + "description": "Get task information.\n\nGet information about tasks currently running in the cluster.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the task management API.", "inherits": { "type": { "name": "CatRequestBase", @@ -103537,7 +103538,7 @@ } } ], - "specLocation": "cat/tasks/CatTasksRequest.ts#L23-L69" + "specLocation": "cat/tasks/CatTasksRequest.ts#L23-L71" }, { "kind": "response", @@ -103815,7 +103816,7 @@ "body": { "kind": "no_body" }, - "description": "Returns information about index templates in a cluster.\nYou can use index templates to apply index settings and field mappings to new indices at creation.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get index template API.", + "description": "Get index template information.\n\nGet information about the index templates in a cluster.\nYou can use index templates to apply index settings and field mappings to new indices at creation.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get index template API.", "inherits": { "type": { "name": "CatRequestBase", @@ -103868,7 +103869,7 @@ } } ], - "specLocation": "cat/templates/CatTemplatesRequest.ts#L24-L67" + "specLocation": "cat/templates/CatTemplatesRequest.ts#L24-L69" }, { "kind": "response", @@ -103998,7 +103999,7 @@ "body": { "kind": "no_body" }, - "description": "Returns thread pool statistics for each node in a cluster.\nReturned information includes all built-in thread pools and custom thread pools.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.", + "description": "Get thread pool statistics.\n\nGet thread pool statistics for each node in a cluster.\nReturned information includes all built-in thread pools and custom thread pools.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.", "inherits": { "type": { "name": "CatRequestBase", @@ -104063,7 +104064,7 @@ } } ], - "specLocation": "cat/thread_pool/CatThreadPoolRequest.ts#L24-L71" + "specLocation": "cat/thread_pool/CatThreadPoolRequest.ts#L24-L73" }, { "kind": "response", @@ -104453,7 +104454,7 @@ "body": { "kind": "no_body" }, - "description": "Get transforms.\nReturns configuration and usage information about transforms.\n\nCAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get transform statistics API.", + "description": "Get transform information.\n\nGet configuration and usage information about transforms.\n\nCAT APIs are only intended for human consumption using the Kibana\nconsole or command line. They are not intended for use by applications. For\napplication consumption, use the get transform statistics API.", "inherits": { "type": { "name": "CatRequestBase", @@ -104556,7 +104557,7 @@ } } ], - "specLocation": "cat/transforms/CatTransformsRequest.ts#L25-L88" + "specLocation": "cat/transforms/CatTransformsRequest.ts#L25-L89" }, { "kind": "response", @@ -111114,7 +111115,7 @@ }, "path": [], "query": [], - "specLocation": "cluster/remote_info/ClusterRemoteInfoRequest.ts#L22-L38" + "specLocation": "cluster/remote_info/ClusterRemoteInfoRequest.ts#L22-L37" }, { "kind": "response", @@ -119190,7 +119191,7 @@ }, "path": [], "query": [], - "specLocation": "dangling_indices/list_dangling_indices/ListDanglingIndicesRequest.ts#L22-L43" + "specLocation": "dangling_indices/list_dangling_indices/ListDanglingIndicesRequest.ts#L22-L42" }, { "kind": "response", @@ -125849,7 +125850,7 @@ }, "path": [], "query": [], - "specLocation": "ilm/get_status/GetIlmStatusRequest.ts#L22-L37" + "specLocation": "ilm/get_status/GetIlmStatusRequest.ts#L22-L36" }, { "kind": "response", @@ -135574,7 +135575,7 @@ }, "path": [], "query": [], - "specLocation": "indices/get_data_lifecycle_stats/IndicesGetDataLifecycleStatsRequest.ts#L22-L39" + "specLocation": "indices/get_data_lifecycle_stats/IndicesGetDataLifecycleStatsRequest.ts#L22-L38" }, { "kind": "response", @@ -151611,7 +151612,7 @@ }, "path": [], "query": [], - "specLocation": "ingest/geo_ip_stats/IngestGeoIpStatsRequest.ts#L22-L39" + "specLocation": "ingest/geo_ip_stats/IngestGeoIpStatsRequest.ts#L22-L38" }, { "kind": "response", @@ -152060,7 +152061,7 @@ }, "path": [], "query": [], - "specLocation": "ingest/processor_grok/GrokProcessorPatternsRequest.ts#L22-L41" + "specLocation": "ingest/processor_grok/GrokProcessorPatternsRequest.ts#L22-L40" }, { "kind": "response", @@ -153150,7 +153151,7 @@ }, "path": [], "query": [], - "specLocation": "license/get_basic_status/GetBasicLicenseStatusRequest.ts#L22-L36" + "specLocation": "license/get_basic_status/GetBasicLicenseStatusRequest.ts#L22-L35" }, { "kind": "response", @@ -153197,7 +153198,7 @@ }, "path": [], "query": [], - "specLocation": "license/get_trial_status/GetTrialLicenseStatusRequest.ts#L22-L36" + "specLocation": "license/get_trial_status/GetTrialLicenseStatusRequest.ts#L22-L35" }, { "kind": "response", @@ -155006,7 +155007,7 @@ }, "path": [], "query": [], - "specLocation": "migration/get_feature_upgrade_status/GetFeatureUpgradeStatusRequest.ts#L22-L43" + "specLocation": "migration/get_feature_upgrade_status/GetFeatureUpgradeStatusRequest.ts#L22-L42" }, { "kind": "response", @@ -155088,7 +155089,7 @@ }, "path": [], "query": [], - "specLocation": "migration/post_feature_upgrade/PostFeatureUpgradeRequest.ts#L22-L44" + "specLocation": "migration/post_feature_upgrade/PostFeatureUpgradeRequest.ts#L22-L43" }, { "kind": "response", @@ -173849,7 +173850,7 @@ }, "path": [], "query": [], - "specLocation": "ml/info/MlInfoRequest.ts#L22-L44" + "specLocation": "ml/info/MlInfoRequest.ts#L22-L43" }, { "kind": "response", @@ -196616,7 +196617,7 @@ }, "path": [], "query": [], - "specLocation": "security/authenticate/SecurityAuthenticateRequest.ts#L22-L42" + "specLocation": "security/authenticate/SecurityAuthenticateRequest.ts#L22-L41" }, { "kind": "response", @@ -199329,7 +199330,7 @@ }, "path": [], "query": [], - "specLocation": "security/enroll_kibana/Request.ts#L22-L38" + "specLocation": "security/enroll_kibana/Request.ts#L22-L37" }, { "kind": "response", @@ -199419,7 +199420,7 @@ }, "path": [], "query": [], - "specLocation": "security/enroll_node/Request.ts#L22-L38" + "specLocation": "security/enroll_node/Request.ts#L22-L37" }, { "kind": "response", @@ -199693,7 +199694,7 @@ }, "path": [], "query": [], - "specLocation": "security/get_builtin_privileges/SecurityGetBuiltinPrivilegesRequest.ts#L22-L41" + "specLocation": "security/get_builtin_privileges/SecurityGetBuiltinPrivilegesRequest.ts#L22-L40" }, { "kind": "response", @@ -214543,7 +214544,7 @@ }, "path": [], "query": [], - "specLocation": "ssl/certificates/GetCertificatesRequest.ts#L22-L56" + "specLocation": "ssl/certificates/GetCertificatesRequest.ts#L22-L55" }, { "kind": "response", diff --git a/specification/_doc_ids/table.csv b/specification/_doc_ids/table.csv index 42ab7af978..c8b31efdfe 100644 --- a/specification/_doc_ids/table.csv +++ b/specification/_doc_ids/table.csv @@ -28,6 +28,7 @@ calendar-and-fixed-intervals,https://www.elastic.co/guide/en/elasticsearch/refer cat-alias,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-alias.html cat-allocation,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-allocation.html cat-anomaly-detectors,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-anomaly-detectors.html +cat-component-templates,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-component-templates.html cat-count,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-count.html cat-datafeeds,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-datafeeds.html cat-dfanalytics,https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-dfanalytics.html diff --git a/specification/_types/common.ts b/specification/_types/common.ts index a29ec7f8ae..1e822cf7f4 100644 --- a/specification/_types/common.ts +++ b/specification/_types/common.ts @@ -212,7 +212,7 @@ export enum ExpandWildcard { open, /** Match closed, non-hidden indices. Also matches any non-hidden data stream. Data streams cannot be closed. */ closed, - /** Match hidden data streams and hidden indices. Must be combined with open, closed, or both. */ + /** Match hidden data streams and hidden indices. Must be combined with `open`, `closed`, or `both`. */ hidden, /** Wildcard expressions are not accepted. */ none diff --git a/specification/cat/aliases/CatAliasesRequest.ts b/specification/cat/aliases/CatAliasesRequest.ts index 2185eaebd1..d0ce04844c 100644 --- a/specification/cat/aliases/CatAliasesRequest.ts +++ b/specification/cat/aliases/CatAliasesRequest.ts @@ -23,10 +23,11 @@ import { Duration } from '@_types/Time' /** * Get aliases. - * Retrieves the cluster’s index aliases, including filter and routing information. - * The API does not return data stream aliases. * - * CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API. + * Get the cluster's index aliases, including filter and routing information. + * This API does not return data stream aliases. + * + * IMPORTANT: CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API. * @rest_spec_name cat.aliases * @availability stack stability=stable * @availability serverless stability=stable visibility=public @@ -49,6 +50,11 @@ export interface Request extends CatRequestBase { name?: Names } query_parameters: { + /** + * The type of index that wildcard patterns can match. + * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. + * It supports comma-separated values, such as `open,hidden`. + */ expand_wildcards?: ExpandWildcards /** * If `true`, the request computes the list of selected nodes from the @@ -59,7 +65,9 @@ export interface Request extends CatRequestBase { */ local?: boolean /** - * Period to wait for a connection to the master node. + * The period to wait for a connection to the master node. + * If the master node is not available before the timeout expires, the request fails and returns an error. + * To indicated that the request should never timeout, you can set it to `-1`. * @server_default 30s */ master_timeout?: Duration diff --git a/specification/cat/aliases/examples/200_response/CatAliasesResponseExample1.yaml b/specification/cat/aliases/examples/200_response/CatAliasesResponseExample1.yaml new file mode 100644 index 0000000000..f26b38c391 --- /dev/null +++ b/specification/cat/aliases/examples/200_response/CatAliasesResponseExample1.yaml @@ -0,0 +1,12 @@ +# summary: +description: > + A successful response from `GET _cat/aliases?v=true`. + This response shows that `alias2` has configured a filter and `alias3` and `alias4` have routing configurations. +# type: response +# response_code: 200 +value: |- + alias index filter routing.index routing.search is_write_index + alias1 test1 - - - - + alias2 test1 * - - - + alias3 test1 - 1 1 - + alias4 test1 - 2 1,2 - diff --git a/specification/cat/allocation/CatAllocationRequest.ts b/specification/cat/allocation/CatAllocationRequest.ts index 417171a98f..0f0cf114b9 100644 --- a/specification/cat/allocation/CatAllocationRequest.ts +++ b/specification/cat/allocation/CatAllocationRequest.ts @@ -22,8 +22,11 @@ import { Bytes, NodeIds } from '@_types/common' import { Duration } from '@_types/Time' /** - * Provides a snapshot of the number of shards allocated to each data node and their disk space. - * IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. + * Get shard allocation information. + * + * Get a snapshot of the number of shards allocated to each data node and their disk space. + * + * IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. * @rest_spec_name cat.allocation * @availability stack stability=stable * @availability serverless stability=stable visibility=private @@ -42,7 +45,7 @@ export interface Request extends CatRequestBase { } ] path_parts: { - /** Comma-separated list of node identifiers or names used to limit the returned information. */ + /** A comma-separated list of node identifiers or names used to limit the returned information. */ node_id?: NodeIds } query_parameters: { diff --git a/specification/cat/allocation/examples/200_response/CatAllocationResponseExample1.yaml b/specification/cat/allocation/examples/200_response/CatAllocationResponseExample1.yaml new file mode 100644 index 0000000000..679eb5ec63 --- /dev/null +++ b/specification/cat/allocation/examples/200_response/CatAllocationResponseExample1.yaml @@ -0,0 +1,9 @@ +# summary: +description: > + A successful response from `GET /_cat/allocation?v=true`. + It shows a single shard is allocated to the one node available. +# type: response +# response_code: 200 +value: |- + shards shards.undesired write_load.forecast disk.indices.forecast disk.indices disk.used disk.avail disk.total disk.percent host ip node node.role + 1 0 0.0 260b 260b 47.3gb 43.4gb 100.7gb 46 127.0.0.1 127.0.0.1 CSUXak2 himrst diff --git a/specification/cat/component_templates/CatComponentTemplatesRequest.ts b/specification/cat/component_templates/CatComponentTemplatesRequest.ts index 045ad055cc..824db3bf32 100644 --- a/specification/cat/component_templates/CatComponentTemplatesRequest.ts +++ b/specification/cat/component_templates/CatComponentTemplatesRequest.ts @@ -22,15 +22,17 @@ import { Duration } from '@_types/Time' /** * Get component templates. - * Returns information about component templates in a cluster. + * + * Get information about component templates in a cluster. * Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. * - * CAT APIs are only intended for human consumption using the command line or Kibana console. + * IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. * They are not intended for use by applications. For application consumption, use the get component template API. * @rest_spec_name cat.component_templates * @availability stack since=5.1.0 stability=stable * @availability serverless stability=stable visibility=public * @cluster_privileges monitor + * @doc_id cat-component-templates */ export interface Request extends CatRequestBase { urls: [ @@ -44,7 +46,10 @@ export interface Request extends CatRequestBase { } ] path_parts: { - /** The name of the component template. Accepts wildcard expressions. If omitted, all component templates are returned. */ + /** + * The name of the component template. + * It accepts wildcard expressions. + * If it is omitted, all component templates are returned. */ name?: string } query_parameters: { @@ -57,7 +62,7 @@ export interface Request extends CatRequestBase { */ local?: boolean /** - * Period to wait for a connection to the master node. + * The period to wait for a connection to the master node. * @server_default 30s */ master_timeout?: Duration diff --git a/specification/cat/component_templates/examples/200_response/CatComponentTemplatesResponseExample1.yaml b/specification/cat/component_templates/examples/200_response/CatComponentTemplatesResponseExample1.yaml new file mode 100644 index 0000000000..f26b38c391 --- /dev/null +++ b/specification/cat/component_templates/examples/200_response/CatComponentTemplatesResponseExample1.yaml @@ -0,0 +1,12 @@ +# summary: +description: > + A successful response from `GET _cat/aliases?v=true`. + This response shows that `alias2` has configured a filter and `alias3` and `alias4` have routing configurations. +# type: response +# response_code: 200 +value: |- + alias index filter routing.index routing.search is_write_index + alias1 test1 - - - - + alias2 test1 * - - - + alias3 test1 - 1 1 - + alias4 test1 - 2 1,2 - diff --git a/specification/cat/count/CatCountRequest.ts b/specification/cat/count/CatCountRequest.ts index 82baf5fa5c..69f72cbe92 100644 --- a/specification/cat/count/CatCountRequest.ts +++ b/specification/cat/count/CatCountRequest.ts @@ -22,10 +22,11 @@ import { Indices } from '@_types/common' /** * Get a document count. - * Provides quick access to a document count for a data stream, an index, or an entire cluster. + * + * Get quick access to a document count for a data stream, an index, or an entire cluster. * The document count only includes live documents, not deleted documents which have not yet been removed by the merge process. * - * CAT APIs are only intended for human consumption using the command line or Kibana console. + * IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. * They are not intended for use by applications. For application consumption, use the count API. * @rest_spec_name cat.count * @availability stack stability=stable @@ -46,8 +47,9 @@ export interface Request extends CatRequestBase { ] path_parts: { /** - * Comma-separated list of data streams, indices, and aliases used to limit the request. - * Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. + * A comma-separated list of data streams, indices, and aliases used to limit the request. + * It supports wildcards (`*`). + * To target all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices } diff --git a/specification/cat/count/examples/200_response/CatCountResponseExample1.yaml b/specification/cat/count/examples/200_response/CatCountResponseExample1.yaml new file mode 100644 index 0000000000..af1f820407 --- /dev/null +++ b/specification/cat/count/examples/200_response/CatCountResponseExample1.yaml @@ -0,0 +1,9 @@ +summary: Single data stream or index count +description: > + A successful response from `GET /_cat/count/my-index-000001?v=true`. + It retrieves the document count for the `my-index-000001` data stream or index. +# type: response +# response_code: 200 +value: |- + epoch timestamp count + 1475868259 15:24:20 120 diff --git a/specification/cat/count/examples/200_response/CatCountResponseExample2.yaml b/specification/cat/count/examples/200_response/CatCountResponseExample2.yaml new file mode 100644 index 0000000000..326450b3a2 --- /dev/null +++ b/specification/cat/count/examples/200_response/CatCountResponseExample2.yaml @@ -0,0 +1,9 @@ +summary: All data streams and indices count +description: > + A successful response from `GET /_cat/count?v=true`. + It retrieves the document count for all data streams and indices in the cluster. +# type: response +# response_code: 200 +value: |- + epoch timestamp count + 1475868259 15:24:20 121 diff --git a/specification/cat/fielddata/CatFielddataRequest.ts b/specification/cat/fielddata/CatFielddataRequest.ts index c85a51a69f..b495da9c08 100644 --- a/specification/cat/fielddata/CatFielddataRequest.ts +++ b/specification/cat/fielddata/CatFielddataRequest.ts @@ -21,7 +21,10 @@ import { CatRequestBase } from '@cat/_types/CatBase' import { Bytes, Fields } from '@_types/common' /** - * Returns the amount of heap memory currently used by the field data cache on every data node in the cluster. + * Get field data cache information. + * + * Get the amount of heap memory currently used by the field data cache on every data node in the cluster. + * * IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. * They are not intended for use by applications. For application consumption, use the nodes stats API. * @rest_spec_name cat.fielddata diff --git a/specification/cat/fielddata/examples/200_response/CatFielddataResponseExample1.yaml b/specification/cat/fielddata/examples/200_response/CatFielddataResponseExample1.yaml new file mode 100644 index 0000000000..56090cf442 --- /dev/null +++ b/specification/cat/fielddata/examples/200_response/CatFielddataResponseExample1.yaml @@ -0,0 +1,10 @@ +summary: Single field data +description: > + A successful response from `GET /_cat/fielddata?v=true&fields=body`. + You can specify an individual field in the request body or URL path. + This example retrieves heap memory size information for the `body` field. +# type: response +# response_code: +value: |- + id host ip node field size + Nqk-6inXQq-OxUfOUI8jNQ 127.0.0.1 127.0.0.1 Nqk-6in body 544b diff --git a/specification/cat/fielddata/examples/200_response/CatFielddataResponseExample2.yaml b/specification/cat/fielddata/examples/200_response/CatFielddataResponseExample2.yaml new file mode 100644 index 0000000000..dd4b44a770 --- /dev/null +++ b/specification/cat/fielddata/examples/200_response/CatFielddataResponseExample2.yaml @@ -0,0 +1,12 @@ +summary: Multiple fields data +description: > + A successful response from `GET /_cat/fielddata/body,soul?v=true`. + You can specify a comma-separated list of fields in the request body or URL path. + This example retrieves heap memory size information for the `body` and `soul` fields. + To get information for all fields, run `GET /_cat/fielddata?v=true`. +# type: response +# response_code: +value: |- + id host ip node field size + Nqk-6inXQq-OxUfOUI8jNQ 127.0.0.1 127.0.0.1 Nqk-6in body 544b + Nqk-6inXQq-OxUfOUI8jNQ 127.0.0.1 127.0.0.1 Nqk-6in soul 480b diff --git a/specification/cat/health/CatHealthRequest.ts b/specification/cat/health/CatHealthRequest.ts index 6571394c1d..232d2a8a1a 100644 --- a/specification/cat/health/CatHealthRequest.ts +++ b/specification/cat/health/CatHealthRequest.ts @@ -21,8 +21,9 @@ import { CatRequestBase } from '@cat/_types/CatBase' import { TimeUnit } from '@_types/Time' /** - * Returns the health status of a cluster, similar to the cluster health API. - * IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. + * Get the cluster health status. + * + * IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. * They are not intended for use by applications. For application consumption, use the cluster health API. * This API is often used to check malfunctioning clusters. * To help you track cluster health alongside log files and alerting systems, the API returns timestamps in two formats: diff --git a/specification/cat/health/examples/200_response/CatHealthResponseExample1.yaml b/specification/cat/health/examples/200_response/CatHealthResponseExample1.yaml new file mode 100644 index 0000000000..cbe596f9d9 --- /dev/null +++ b/specification/cat/health/examples/200_response/CatHealthResponseExample1.yaml @@ -0,0 +1,9 @@ +# summary: +description: > + A successful response from `GET /_cat/health?v=true`. + By default, it returns `HH:MM:SS` and Unix epoch timestamps. +# type: response +# response_code: 200 +value: |- + epoch timestamp cluster status node.total node.data shards pri relo init unassign unassign.pri pending_tasks max_task_wait_time active_shards_percent + 1475871424 16:17:04 elasticsearch green 1 1 1 1 0 0 0 0 0 - 100.0% diff --git a/specification/cat/help/CatHelpRequest.ts b/specification/cat/help/CatHelpRequest.ts index 8ff7098919..877fd430f6 100644 --- a/specification/cat/help/CatHelpRequest.ts +++ b/specification/cat/help/CatHelpRequest.ts @@ -19,7 +19,8 @@ /** * Get CAT help. - * Returns help for the CAT APIs. + * + * Get help for the CAT APIs. * @rest_spec_name cat.help * @availability stack stability=stable * @availability serverless stability=stable visibility=public diff --git a/specification/cat/indices/CatIndicesRequest.ts b/specification/cat/indices/CatIndicesRequest.ts index fecf5ae088..86b825c4c8 100644 --- a/specification/cat/indices/CatIndicesRequest.ts +++ b/specification/cat/indices/CatIndicesRequest.ts @@ -23,7 +23,8 @@ import { Duration, TimeUnit } from '@_types/Time' /** * Get index information. - * Returns high-level information about indices in a cluster, including backing indices for data streams. + * + * Get high-level information about indices in a cluster, including backing indices for data streams. * * Use this request to get the following information for each index in a cluster: * - shard count diff --git a/specification/cat/indices/examples/200_response/CatIndicesResponseExample1.yaml b/specification/cat/indices/examples/200_response/CatIndicesResponseExample1.yaml new file mode 100644 index 0000000000..ceb5b24974 --- /dev/null +++ b/specification/cat/indices/examples/200_response/CatIndicesResponseExample1.yaml @@ -0,0 +1,9 @@ +# summary: +description: > + A successful response from `GET /_cat/indices/my-index-*?v=true&s=index`. +# type: response +# response_code: +value: |- + health status index uuid pri rep docs.count docs.deleted store.size pri.store.size dataset.size + yellow open my-index-000001 u8FNjxh8Rfy_awN11oDKYQ 1 1 1200 0 88.1kb 88.1kb 88.1kb + green open my-index-000002 nYFWZEO7TUiOjLQXBaYJpA 1 0 0 0 260b 260b 260b diff --git a/specification/cat/master/CatMasterRequest.ts b/specification/cat/master/CatMasterRequest.ts index 09f0f1b6bb..9fd21124d4 100644 --- a/specification/cat/master/CatMasterRequest.ts +++ b/specification/cat/master/CatMasterRequest.ts @@ -21,7 +21,10 @@ import { CatRequestBase } from '@cat/_types/CatBase' import { Duration } from '@_types/Time' /** - * Returns information about the master node, including the ID, bound IP address, and name. + * Get master node information. + * + * Get information about the master node, including the ID, bound IP address, and name. + * * IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. * @rest_spec_name cat.master * @availability stack stability=stable diff --git a/specification/cat/master/examples/200_response/CatMasterResponseExample1.yaml b/specification/cat/master/examples/200_response/CatMasterResponseExample1.yaml new file mode 100644 index 0000000000..4124b3a62e --- /dev/null +++ b/specification/cat/master/examples/200_response/CatMasterResponseExample1.yaml @@ -0,0 +1,8 @@ +# summary: +description: > + A successful response from `GET /_cat/master?v=true`. +# type: response +# response_code: +value: |- + id host ip node + YzWoH_2BT-6UjVGDyPdqYg 127.0.0.1 127.0.0.1 YzWoH_2 diff --git a/specification/cat/ml_data_frame_analytics/CatDataFrameAnalyticsRequest.ts b/specification/cat/ml_data_frame_analytics/CatDataFrameAnalyticsRequest.ts index 98a19772e5..b1ea803427 100644 --- a/specification/cat/ml_data_frame_analytics/CatDataFrameAnalyticsRequest.ts +++ b/specification/cat/ml_data_frame_analytics/CatDataFrameAnalyticsRequest.ts @@ -23,9 +23,10 @@ import { TimeUnit } from '@_types/Time' /** * Get data frame analytics jobs. - * Returns configuration and usage information about data frame analytics jobs. * - * CAT APIs are only intended for human consumption using the Kibana + * Get configuration and usage information about data frame analytics jobs. + * + * IMPORTANT: CAT APIs are only intended for human consumption using the Kibana * console or command line. They are not intended for use by applications. For * application consumption, use the get data frame analytics jobs statistics API. * diff --git a/specification/cat/ml_data_frame_analytics/examples/200_response/CatDataframeanalyticsResponseExample1.yaml b/specification/cat/ml_data_frame_analytics/examples/200_response/CatDataframeanalyticsResponseExample1.yaml new file mode 100644 index 0000000000..7c0ab4cca3 --- /dev/null +++ b/specification/cat/ml_data_frame_analytics/examples/200_response/CatDataframeanalyticsResponseExample1.yaml @@ -0,0 +1,11 @@ +# summary: +description: A successful response from `GET _cat/ml/data_frame/analytics?v=true`. +# type: response +# response_code: 200 +value: |- + id create_time type state + classifier_job_1 2020-02-12T11:49:09.594Z classification stopped + classifier_job_2 2020-02-12T11:49:14.479Z classification stopped + classifier_job_3 2020-02-12T11:49:16.928Z classification stopped + classifier_job_4 2020-02-12T11:49:19.127Z classification stopped + classifier_job_5 2020-02-12T11:49:21.349Z classification stopped diff --git a/specification/cat/ml_datafeeds/CatDatafeedsRequest.ts b/specification/cat/ml_datafeeds/CatDatafeedsRequest.ts index 292ec3499e..5fccffd231 100644 --- a/specification/cat/ml_datafeeds/CatDatafeedsRequest.ts +++ b/specification/cat/ml_datafeeds/CatDatafeedsRequest.ts @@ -23,12 +23,13 @@ import { TimeUnit } from '@_types/Time' /** * Get datafeeds. - * Returns configuration and usage information about datafeeds. + * + * Get configuration and usage information about datafeeds. * This API returns a maximum of 10,000 datafeeds. * If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` * cluster privileges to use this API. * - * CAT APIs are only intended for human consumption using the Kibana + * IMPORTANT: CAT APIs are only intended for human consumption using the Kibana * console or command line. They are not intended for use by applications. For * application consumption, use the get datafeed statistics API. * diff --git a/specification/cat/ml_datafeeds/examples/200_response/CatDatafeedsResponseExample1.yaml b/specification/cat/ml_datafeeds/examples/200_response/CatDatafeedsResponseExample1.yaml new file mode 100644 index 0000000000..e2e17713f1 --- /dev/null +++ b/specification/cat/ml_datafeeds/examples/200_response/CatDatafeedsResponseExample1.yaml @@ -0,0 +1,10 @@ +# summary: +description: A successful response from `GET _cat/ml/datafeeds?v=true`. +# type: response +# response_code: +value: |- + id state buckets.count search.count + datafeed-high_sum_total_sales stopped 743 7 + datafeed-low_request_rate stopped 1457 3 + datafeed-response_code_rates stopped 1460 18 + datafeed-url_scanning stopped 1460 18 diff --git a/specification/cat/ml_jobs/CatJobsRequest.ts b/specification/cat/ml_jobs/CatJobsRequest.ts index 4d95f920fe..f5a4f5554b 100644 --- a/specification/cat/ml_jobs/CatJobsRequest.ts +++ b/specification/cat/ml_jobs/CatJobsRequest.ts @@ -23,12 +23,13 @@ import { TimeUnit } from '@_types/Time' /** * Get anomaly detection jobs. - * Returns configuration and usage information for anomaly detection jobs. + * + * Get configuration and usage information for anomaly detection jobs. * This API returns a maximum of 10,000 jobs. * If the Elasticsearch security features are enabled, you must have `monitor_ml`, * `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. * - * CAT APIs are only intended for human consumption using the Kibana + * IMPORTANT: CAT APIs are only intended for human consumption using the Kibana * console or command line. They are not intended for use by applications. For * application consumption, use the get anomaly detection job statistics API. * diff --git a/specification/cat/ml_jobs/examples/200_response/CatJobsResponseExample1.yaml b/specification/cat/ml_jobs/examples/200_response/CatJobsResponseExample1.yaml new file mode 100644 index 0000000000..1b1d38f386 --- /dev/null +++ b/specification/cat/ml_jobs/examples/200_response/CatJobsResponseExample1.yaml @@ -0,0 +1,8 @@ +# summary: +description: A succesful response from `GET _cat/component_templates/my-template-*?v=true&s=name`. +# type: response +# response_code: '' +value: |- + name version alias_count mapping_count settings_count metadata_count included_in + my-template-1 0 0 1 0 [my-index-template] + my-template-2 0 3 0 0 [my-index-template] diff --git a/specification/cat/ml_trained_models/CatTrainedModelsRequest.ts b/specification/cat/ml_trained_models/CatTrainedModelsRequest.ts index d28a26572c..f0c38d5699 100644 --- a/specification/cat/ml_trained_models/CatTrainedModelsRequest.ts +++ b/specification/cat/ml_trained_models/CatTrainedModelsRequest.ts @@ -24,9 +24,10 @@ import { TimeUnit } from '@_types/Time' /** * Get trained models. - * Returns configuration and usage information about inference trained models. * - * CAT APIs are only intended for human consumption using the Kibana + * Get configuration and usage information about inference trained models. + * + * IMPORTANT: CAT APIs are only intended for human consumption using the Kibana * console or command line. They are not intended for use by applications. For * application consumption, use the get trained models statistics API. * diff --git a/specification/cat/ml_trained_models/examples/200_response/CatTrainedModelsResponseExample1.yaml b/specification/cat/ml_trained_models/examples/200_response/CatTrainedModelsResponseExample1.yaml new file mode 100644 index 0000000000..a213f32400 --- /dev/null +++ b/specification/cat/ml_trained_models/examples/200_response/CatTrainedModelsResponseExample1.yaml @@ -0,0 +1,9 @@ +# summary: +description: A successful response from `GET _cat/ml/trained_models?h=c,o,l,ct,v&v=true`. +# type: response +# response_code: '' +value: |- + id created_by operations license create_time version + ddddd-1580216177138 _xpack 196 PLATINUM 2020-01-28T12:56:17.138Z 8.0.0 + flight-regress-1580215685537 _xpack 102 PLATINUM 2020-01-28T12:48:05.537Z 8.0.0 + lang_ident_model_1 _xpack 39629 BASIC 2019-12-05T12:28:34.594Z 7.6.0 diff --git a/specification/cat/nodeattrs/CatNodeAttributesRequest.ts b/specification/cat/nodeattrs/CatNodeAttributesRequest.ts index 023ad7964d..db54476226 100644 --- a/specification/cat/nodeattrs/CatNodeAttributesRequest.ts +++ b/specification/cat/nodeattrs/CatNodeAttributesRequest.ts @@ -21,7 +21,9 @@ import { CatRequestBase } from '@cat/_types/CatBase' import { Duration } from '@_types/Time' /** - * Returns information about custom node attributes. + * Get node attribute information. + * + * Get information about custom node attributes. * IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. * @rest_spec_name cat.nodeattrs * @availability stack stability=stable diff --git a/specification/cat/nodeattrs/examples/200_response/CatNodeAttributesResponseExample1.yaml b/specification/cat/nodeattrs/examples/200_response/CatNodeAttributesResponseExample1.yaml new file mode 100644 index 0000000000..6d7affe4cf --- /dev/null +++ b/specification/cat/nodeattrs/examples/200_response/CatNodeAttributesResponseExample1.yaml @@ -0,0 +1,10 @@ +summary: Default columns +description: > + A successful response from `GET /_cat/nodeattrs?v=true`. + The `node`, `host`, and `ip` columns provide basic information about each node. + The `attr` and `value` columns return custom node attributes, one per line. +# type: response +# response_code: 200 +value: |- + node host ip attr value + node-0 127.0.0.1 127.0.0.1 testattr test diff --git a/specification/cat/nodeattrs/examples/200_response/CatNodeAttributesResponseExample2.yaml b/specification/cat/nodeattrs/examples/200_response/CatNodeAttributesResponseExample2.yaml new file mode 100644 index 0000000000..4fae5ed416 --- /dev/null +++ b/specification/cat/nodeattrs/examples/200_response/CatNodeAttributesResponseExample2.yaml @@ -0,0 +1,9 @@ +summary: Explicit columns +description: > + A successful response from `GET /_cat/nodeattrs?v=true&h=name,pid,attr,value`. + It returns the `name`, `pid`, `attr`, and `value` columns. +# type: response +# response_code: 200 +value: |- + name pid attr value + node-0 19566 testattr test diff --git a/specification/cat/nodes/CatNodesRequest.ts b/specification/cat/nodes/CatNodesRequest.ts index a88ee99b93..181ca3670b 100644 --- a/specification/cat/nodes/CatNodesRequest.ts +++ b/specification/cat/nodes/CatNodesRequest.ts @@ -22,7 +22,9 @@ import { Bytes } from '@_types/common' import { Duration, TimeUnit } from '@_types/Time' /** - * Returns information about the nodes in a cluster. + * Get node information. + * + * Get information about the nodes in a cluster. * IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. * @rest_spec_name cat.nodes * @availability stack stability=stable diff --git a/specification/cat/nodes/examples/200_response/CatNodesResponseExample1.yaml b/specification/cat/nodes/examples/200_response/CatNodesResponseExample1.yaml new file mode 100644 index 0000000000..01fdfc7718 --- /dev/null +++ b/specification/cat/nodes/examples/200_response/CatNodesResponseExample1.yaml @@ -0,0 +1,10 @@ +summary: Default columns +description: > + A successful response from `GET /_cat/nodes?v=true`. + The `ip`, `heap.percent`, `ram.percent`, `cpu`, and `load_*` columns provide the IP addresses and performance information of each node. + The `node.role`, `master`, and `name` columns provide information useful for monitoring an entire cluster, particularly large ones. +# type: response +# response_code: 200 +value: |- + ip heap.percent ram.percent cpu load_1m load_5m load_15m node.role master name + 127.0.0.1 65 99 42 3.07 dim * mJw06l1 diff --git a/specification/cat/nodes/examples/200_response/CatNodesResponseExample2.yaml b/specification/cat/nodes/examples/200_response/CatNodesResponseExample2.yaml new file mode 100644 index 0000000000..e275a47985 --- /dev/null +++ b/specification/cat/nodes/examples/200_response/CatNodesResponseExample2.yaml @@ -0,0 +1,9 @@ +summary: Explicit columns +description: > + A successful response from `GET /_cat/nodes?v=true&h=id,ip,port,v,m`. + It returns the `id`, `ip`, `port`, `v` (version), and `m` (master) columns. +# type: response +# response_code: +value: |- + id ip port v m + veJR 127.0.0.1 59938 8.17.0 * diff --git a/specification/cat/pending_tasks/CatPendingTasksRequest.ts b/specification/cat/pending_tasks/CatPendingTasksRequest.ts index 2110ce21f2..2519eca58d 100644 --- a/specification/cat/pending_tasks/CatPendingTasksRequest.ts +++ b/specification/cat/pending_tasks/CatPendingTasksRequest.ts @@ -21,7 +21,9 @@ import { CatRequestBase } from '@cat/_types/CatBase' import { Duration, TimeUnit } from '@_types/Time' /** - * Returns cluster-level changes that have not yet been executed. + * Get pending task information. + * + * Get information about cluster-level changes that have not yet taken effect. * IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the pending cluster tasks API. * @rest_spec_name cat.pending_tasks * @availability stack stability=stable diff --git a/specification/cat/pending_tasks/examples/200_response/CatPendingTasksResponseExample1.yaml b/specification/cat/pending_tasks/examples/200_response/CatPendingTasksResponseExample1.yaml new file mode 100644 index 0000000000..9a021ada26 --- /dev/null +++ b/specification/cat/pending_tasks/examples/200_response/CatPendingTasksResponseExample1.yaml @@ -0,0 +1,14 @@ +# summary: +description: > + A successful response from `GET /_cat/pending_tasks?v=true`. +# type: response +# response_code: 200 +value: |- + insertOrder timeInQueue priority source + 1685 855ms HIGH update-mapping [foo][t] + 1686 843ms HIGH update-mapping [foo][t] + 1693 753ms HIGH refresh-mapping [foo][[t]] + 1688 816ms HIGH update-mapping [foo][t] + 1689 802ms HIGH update-mapping [foo][t] + 1690 787ms HIGH update-mapping [foo][t] + 1691 773ms HIGH update-mapping [foo][t] diff --git a/specification/cat/plugins/CatPluginsRequest.ts b/specification/cat/plugins/CatPluginsRequest.ts index ba8c2fda87..75ff70edbb 100644 --- a/specification/cat/plugins/CatPluginsRequest.ts +++ b/specification/cat/plugins/CatPluginsRequest.ts @@ -21,7 +21,9 @@ import { CatRequestBase } from '@cat/_types/CatBase' import { Duration } from '@_types/Time' /** - * Returns a list of plugins running on each node of a cluster. + * Get plugin information. + * + * Get a list of plugins running on each node of a cluster. * IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. * @rest_spec_name cat.plugins * @availability stack stability=stable diff --git a/specification/cat/plugins/examples/200_response/CatPluginsResponseExample1.yaml b/specification/cat/plugins/examples/200_response/CatPluginsResponseExample1.yaml new file mode 100644 index 0000000000..1fba45a722 --- /dev/null +++ b/specification/cat/plugins/examples/200_response/CatPluginsResponseExample1.yaml @@ -0,0 +1,21 @@ +# summary: +description: > + A successful response from `GET /_cat/plugins?v=true&s=component&h=name,component,version,description`. +# type: response +# response_code: +value: |- + name component version description + U7321H6 analysis-icu 8.17.0 The ICU Analysis plugin integrates the Lucene ICU module into Elasticsearch, adding ICU-related analysis components. + U7321H6 analysis-kuromoji 8.17.0 The Japanese (kuromoji) Analysis plugin integrates Lucene kuromoji analysis module into elasticsearch. + U7321H6 analysis-nori 8.17.0 The Korean (nori) Analysis plugin integrates Lucene nori analysis module into elasticsearch. + U7321H6 analysis-phonetic 8.17.0 The Phonetic Analysis plugin integrates phonetic token filter analysis with elasticsearch. + U7321H6 analysis-smartcn 8.17.0 Smart Chinese Analysis plugin integrates Lucene Smart Chinese analysis module into elasticsearch. + U7321H6 analysis-stempel 8.17.0 The Stempel (Polish) Analysis plugin integrates Lucene stempel (polish) analysis module into elasticsearch. + U7321H6 analysis-ukrainian 8.17.0 The Ukrainian Analysis plugin integrates the Lucene UkrainianMorfologikAnalyzer into elasticsearch. + U7321H6 discovery-azure-classic 8.17.0 The Azure Classic Discovery plugin allows to use Azure Classic API for the unicast discovery mechanism + U7321H6 discovery-ec2 8.17.0 The EC2 discovery plugin allows to use AWS API for the unicast discovery mechanism. + U7321H6 discovery-gce 8.17.0 The Google Compute Engine (GCE) Discovery plugin allows to use GCE API for the unicast discovery mechanism. + U7321H6 mapper-annotated-text 8.17.0 The Mapper Annotated_text plugin adds support for text fields with markup used to inject annotation tokens into the index. + U7321H6 mapper-murmur3 8.17.0 The Mapper Murmur3 plugin allows to compute hashes of a field's values at index-time and to store them in the index. + U7321H6 mapper-size 8.17.0 The Mapper Size plugin allows document to record their uncompressed size at index time. + U7321H6 store-smb 8.17.0 The Store SMB plugin adds support for SMB stores. diff --git a/specification/cat/recovery/CatRecoveryRequest.ts b/specification/cat/recovery/CatRecoveryRequest.ts index 0aa9d35396..8edd033847 100644 --- a/specification/cat/recovery/CatRecoveryRequest.ts +++ b/specification/cat/recovery/CatRecoveryRequest.ts @@ -22,7 +22,9 @@ import { Bytes, Indices } from '@_types/common' import { TimeUnit } from '@_types/Time' /** - * Returns information about ongoing and completed shard recoveries. + * Get shard recovery information. + * + * Get information about ongoing and completed shard recoveries. * Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or syncing a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing. * For data streams, the API returns information about the stream’s backing indices. * IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index recovery API. diff --git a/specification/cat/recovery/examples/200_response/CatRecoveryResponseExample1.yaml b/specification/cat/recovery/examples/200_response/CatRecoveryResponseExample1.yaml new file mode 100644 index 0000000000..34ed5bcd39 --- /dev/null +++ b/specification/cat/recovery/examples/200_response/CatRecoveryResponseExample1.yaml @@ -0,0 +1,9 @@ +summary: No ongoing recoveries +description: > + A successful response from `GET _cat/recovery?v=true`. + In this example, the source and target nodes are the same because the recovery type is `store`, meaning they were read from local storage on node start. +# type: response +# response_code: 200 +value: |- + index shard time type stage source_host source_node target_host target_node repository snapshot files files_recovered files_percent files_total bytes bytes_recovered bytes_percent bytes_total translog_ops translog_ops_recovered translog_ops_percent + my-index-000001 0 13ms store done n/a n/a 127.0.0.1 node-0 n/a n/a 0 0 100% 13 0b 0b 100% 9928b 0 0 100.0% diff --git a/specification/cat/recovery/examples/200_response/CatRecoveryResponseExample2.yaml b/specification/cat/recovery/examples/200_response/CatRecoveryResponseExample2.yaml new file mode 100644 index 0000000000..df7f8f6c7b --- /dev/null +++ b/specification/cat/recovery/examples/200_response/CatRecoveryResponseExample2.yaml @@ -0,0 +1,11 @@ +summary: A live shard recovery +description: > + A successful response from `GET _cat/recovery?v=true&h=i,s,t,ty,st,shost,thost,f,fp,b,bp`. + You can retrieve information about an ongoing recovery for example when you increase the replica count of an index and bring another node online to host the replicas. + In this example, the recovery type is `peer`, meaning the shard recovered from another node. + The `files` and `bytes` are real-time measurements. +# type: response +# response_code: 200 +value: |- + i s t ty st shost thost f fp b bp + my-index-000001 0 1252ms peer done 192.168.1.1 192.168.1.2 0 100.0% 0b 100.0% diff --git a/specification/cat/recovery/examples/200_response/CatRecoveryResponseExample3.yaml b/specification/cat/recovery/examples/200_response/CatRecoveryResponseExample3.yaml new file mode 100644 index 0000000000..2e4c852e75 --- /dev/null +++ b/specification/cat/recovery/examples/200_response/CatRecoveryResponseExample3.yaml @@ -0,0 +1,10 @@ +summary: A snapshot recovery +description: > + A successful response from `GET _cat/recovery?v=true&h=i,s,t,ty,st,rep,snap,f,fp,b,bp`. + You can restore backups of an index using the snapshot and restore API. + You can use the cat recovery API to get information about a snapshot recovery. +# type: response +# response_code: 200 +value: |- + i s t ty st rep snap f fp b bp + my-index-000001 0 1978ms snapshot done my-repo snap-1 79 8.0% 12086 9.0% diff --git a/specification/cat/repositories/CatRepositoriesRequest.ts b/specification/cat/repositories/CatRepositoriesRequest.ts index dcd5bbf13d..68f4e5b667 100644 --- a/specification/cat/repositories/CatRepositoriesRequest.ts +++ b/specification/cat/repositories/CatRepositoriesRequest.ts @@ -21,7 +21,9 @@ import { CatRequestBase } from '@cat/_types/CatBase' import { Duration } from '@_types/Time' /** - * Returns the snapshot repositories for a cluster. + * Get snapshot repository information. + * + * Get a list of snapshot repositories for a cluster. * IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot repository API. * @rest_spec_name cat.repositories * @availability stack since=2.1.0 stability=stable diff --git a/specification/cat/repositories/examples/200_response/CatRepositoriesResponseExample1.yaml b/specification/cat/repositories/examples/200_response/CatRepositoriesResponseExample1.yaml new file mode 100644 index 0000000000..3a857df658 --- /dev/null +++ b/specification/cat/repositories/examples/200_response/CatRepositoriesResponseExample1.yaml @@ -0,0 +1,9 @@ +# summary: +description: > + A successful response from `GET /_cat/repositories?v=true`. +# type: response +# response_code: 200 +value: |- + id type + repo1 fs + repo2 s3 diff --git a/specification/cat/segments/CatSegmentsRequest.ts b/specification/cat/segments/CatSegmentsRequest.ts index b31370170e..9eed2599c9 100644 --- a/specification/cat/segments/CatSegmentsRequest.ts +++ b/specification/cat/segments/CatSegmentsRequest.ts @@ -22,7 +22,9 @@ import { Bytes, Indices } from '@_types/common' import { Duration } from '@_types/Time' /** - * Returns low-level information about the Lucene segments in index shards. + * Get segment information. + * + * Get low-level information about the Lucene segments in index shards. * For data streams, the API returns information about the backing indices. * IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index segments API. * @rest_spec_name cat.segments diff --git a/specification/cat/segments/examples/200_response/CatSegmentsResponseExample1.yaml b/specification/cat/segments/examples/200_response/CatSegmentsResponseExample1.yaml new file mode 100644 index 0000000000..a93ad160fa --- /dev/null +++ b/specification/cat/segments/examples/200_response/CatSegmentsResponseExample1.yaml @@ -0,0 +1,9 @@ +# summary: +description: > + A successful response from `GET /_cat/segments?v=true`. +# type: response +# response_code: 200 +value: |- + index shard prirep ip segment generation docs.count docs.deleted size size.memory committed searchable version compound + test 0 p 127.0.0.1 _0 0 1 0 3kb 0 false true 9.12.0 true + test1 0 p 127.0.0.1 _0 0 1 0 3kb 0 false true 9.12.0 true diff --git a/specification/cat/shards/CatShardsRequest.ts b/specification/cat/shards/CatShardsRequest.ts index be7a4f0276..e83d8d6ff4 100644 --- a/specification/cat/shards/CatShardsRequest.ts +++ b/specification/cat/shards/CatShardsRequest.ts @@ -22,7 +22,9 @@ import { Bytes, Indices } from '@_types/common' import { Duration, TimeUnit } from '@_types/Time' /** - * Returns information about the shards in a cluster. + * Get shard information. + * + * Get information about the shards in a cluster. * For data streams, the API returns information about the backing indices. * IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. * @rest_spec_name cat.shards diff --git a/specification/cat/shards/examples/200_response/CatShardsResponseExample1.yaml b/specification/cat/shards/examples/200_response/CatShardsResponseExample1.yaml new file mode 100644 index 0000000000..8f473d6578 --- /dev/null +++ b/specification/cat/shards/examples/200_response/CatShardsResponseExample1.yaml @@ -0,0 +1,7 @@ +summary: A single data stream or index +description: > + A successful response from `GET _cat/shards`. +# type: response +# response_code: 200 +value: |- + my-index-000001 0 p STARTED 3014 31.1mb 192.168.56.10 H5dfFeA diff --git a/specification/cat/shards/examples/200_response/CatShardsResponseExample2.yaml b/specification/cat/shards/examples/200_response/CatShardsResponseExample2.yaml new file mode 100644 index 0000000000..d92faac6b2 --- /dev/null +++ b/specification/cat/shards/examples/200_response/CatShardsResponseExample2.yaml @@ -0,0 +1,8 @@ +summary: A wildcard pattern +description: > + A successful response from `GET _cat/shards/my-index-*`. + It returns information for any data streams or indices beginning with `my-index-`. +# type: response +# response_code: 200 +value: |- + my-index-000001 0 p STARTED 3014 31.1mb 192.168.56.10 H5dfFeA diff --git a/specification/cat/shards/examples/200_response/CatShardsResponseExample3.yaml b/specification/cat/shards/examples/200_response/CatShardsResponseExample3.yaml new file mode 100644 index 0000000000..e4380dacc0 --- /dev/null +++ b/specification/cat/shards/examples/200_response/CatShardsResponseExample3.yaml @@ -0,0 +1,8 @@ +summary: A relocating shard +description: > + A successful response from `GET _cat/shards`. + The `RELOCATING` value in the `state` column indicates the index shard is relocating. +# type: response +# response_code: 200 +value: |- + my-index-000001 0 p RELOCATING 3014 31.1mb 192.168.56.10 H5dfFeA -> -> 192.168.56.30 bGG90GE diff --git a/specification/cat/shards/examples/200_response/CatShardsResponseExample4.yaml b/specification/cat/shards/examples/200_response/CatShardsResponseExample4.yaml new file mode 100644 index 0000000000..01ad25c9be --- /dev/null +++ b/specification/cat/shards/examples/200_response/CatShardsResponseExample4.yaml @@ -0,0 +1,10 @@ +summary: Shard states +description: > + A successful response from `GET _cat/shards`. + Before a shard is available for use, it goes through an `INITIALIZING` state. + You can use the cat shards API to see which shards are initializing. +# type: response +# response_code: 200 +value: |- + my-index-000001 0 p STARTED 3014 31.1mb 192.168.56.10 H5dfFeA + my-index-000001 0 r INITIALIZING 0 14.3mb 192.168.56.30 bGG90GE diff --git a/specification/cat/shards/examples/200_response/CatShardsResponseExample5.yaml b/specification/cat/shards/examples/200_response/CatShardsResponseExample5.yaml new file mode 100644 index 0000000000..25313b1b70 --- /dev/null +++ b/specification/cat/shards/examples/200_response/CatShardsResponseExample5.yaml @@ -0,0 +1,11 @@ +summary: Reasons for unassigned shards +description: > + A successful response from `GET _cat/shards?h=index,shard,prirep,state,unassigned.reason`. + It includes the `unassigned.reason` column, which indicates why a shard is unassigned. +# type: response +# response_code: 200 +value: |- + my-index-000001 0 p STARTED 3014 31.1mb 192.168.56.10 H5dfFeA + my-index-000001 0 r STARTED 3014 31.1mb 192.168.56.30 bGG90GE + my-index-000001 0 r STARTED 3014 31.1mb 192.168.56.20 I8hydUG + my-index-000001 0 r UNASSIGNED ALLOCATION_FAILED diff --git a/specification/cat/snapshots/CatSnapshotsRequest.ts b/specification/cat/snapshots/CatSnapshotsRequest.ts index 3a6473973a..01d6f7ed5e 100644 --- a/specification/cat/snapshots/CatSnapshotsRequest.ts +++ b/specification/cat/snapshots/CatSnapshotsRequest.ts @@ -22,7 +22,9 @@ import { Names } from '@_types/common' import { Duration, TimeUnit } from '@_types/Time' /** - * Returns information about the snapshots stored in one or more repositories. + * Get snapshot information. + * + * Get information about the snapshots stored in one or more repositories. * A snapshot is a backup of an index or running Elasticsearch cluster. * IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot API. * @rest_spec_name cat.snapshots diff --git a/specification/cat/snapshots/examples/200_response/CatSnapshotsResponseExample1.yaml b/specification/cat/snapshots/examples/200_response/CatSnapshotsResponseExample1.yaml new file mode 100644 index 0000000000..c9f31cf7a8 --- /dev/null +++ b/specification/cat/snapshots/examples/200_response/CatSnapshotsResponseExample1.yaml @@ -0,0 +1,9 @@ +# summary: +description: > + A successful response from `GET /_cat/snapshots/repo1?v=true&s=id`. +# type: response +# response_code: 200 +value: |- + id repository status start_epoch start_time end_epoch end_time duration indices successful_shards failed_shards total_shards + snap1 repo1 FAILED 1445616705 18:11:45 1445616978 18:16:18 4.6m 1 4 1 5 + snap2 repo1 SUCCESS 1445634298 23:04:58 1445634672 23:11:12 6.2m 2 10 0 10 diff --git a/specification/cat/tasks/CatTasksRequest.ts b/specification/cat/tasks/CatTasksRequest.ts index a7c1e148d6..8578379e41 100644 --- a/specification/cat/tasks/CatTasksRequest.ts +++ b/specification/cat/tasks/CatTasksRequest.ts @@ -21,7 +21,9 @@ import { CatRequestBase } from '@cat/_types/CatBase' import { Duration, TimeUnit } from '@_types/Time' /** - * Returns information about tasks currently executing in the cluster. + * Get task information. + * + * Get information about tasks currently running in the cluster. * IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the task management API. * @rest_spec_name cat.tasks * @availability stack since=5.0.0 stability=experimental diff --git a/specification/cat/tasks/examples/200_response/CatTasksResponseExample1.yaml b/specification/cat/tasks/examples/200_response/CatTasksResponseExample1.yaml new file mode 100644 index 0000000000..8d05b0a9d2 --- /dev/null +++ b/specification/cat/tasks/examples/200_response/CatTasksResponseExample1.yaml @@ -0,0 +1,8 @@ +# summary: +description: A successful response from `GET _cat/tasks?v=true`. +# type: response +# response_code: 200 +value: |- + action task_id parent_task_id type start_time timestamp running_time ip node + cluster:monitor/tasks/lists[n] oTUltX4IQMOUUVeiohTt8A:124 oTUltX4IQMOUUVeiohTt8A:123 direct 1458585884904 01:48:24 44.1micros 127.0.0.1:9300 oTUltX4IQMOUUVeiohTt8A + cluster:monitor/tasks/lists oTUltX4IQMOUUVeiohTt8A:123 - transport 1458585884904 01:48:24 186.2micros 127.0.0.1:9300 oTUltX4IQMOUUVeiohTt8A diff --git a/specification/cat/templates/CatTemplatesRequest.ts b/specification/cat/templates/CatTemplatesRequest.ts index 529f759bf4..9f14fb3e10 100644 --- a/specification/cat/templates/CatTemplatesRequest.ts +++ b/specification/cat/templates/CatTemplatesRequest.ts @@ -22,7 +22,9 @@ import { Name } from '@_types/common' import { Duration } from '@_types/Time' /** - * Returns information about index templates in a cluster. + * Get index template information. + * + * Get information about the index templates in a cluster. * You can use index templates to apply index settings and field mappings to new indices at creation. * IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get index template API. * @rest_spec_name cat.templates diff --git a/specification/cat/templates/examples/200_response/CatTemplatesResponseExample1.yaml b/specification/cat/templates/examples/200_response/CatTemplatesResponseExample1.yaml new file mode 100644 index 0000000000..af3110b0f8 --- /dev/null +++ b/specification/cat/templates/examples/200_response/CatTemplatesResponseExample1.yaml @@ -0,0 +1,10 @@ +# summary: +description: > + A successful response from `GET _cat/templates/my-template-*?v=true&s=name`. +# type: response +# response_code: 200 +value: |- + name index_patterns order version composed_of + my-template-0 [te*] 500 [] + my-template-1 [tea*] 501 [] + my-template-2 [teak*] 502 7 [] diff --git a/specification/cat/thread_pool/CatThreadPoolRequest.ts b/specification/cat/thread_pool/CatThreadPoolRequest.ts index 642626d62c..d0ee06dfc6 100644 --- a/specification/cat/thread_pool/CatThreadPoolRequest.ts +++ b/specification/cat/thread_pool/CatThreadPoolRequest.ts @@ -22,7 +22,9 @@ import { Names } from '@_types/common' import { Duration, TimeUnit } from '@_types/Time' /** - * Returns thread pool statistics for each node in a cluster. + * Get thread pool statistics. + * + * Get thread pool statistics for each node in a cluster. * Returned information includes all built-in thread pools and custom thread pools. * IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. * @rest_spec_name cat.thread_pool diff --git a/specification/cat/thread_pool/examples/200_response/CatThreadPoolResponseExample1.yaml b/specification/cat/thread_pool/examples/200_response/CatThreadPoolResponseExample1.yaml new file mode 100644 index 0000000000..e62c1780cc --- /dev/null +++ b/specification/cat/thread_pool/examples/200_response/CatThreadPoolResponseExample1.yaml @@ -0,0 +1,11 @@ +summary: Default columns +description: > + A successful response from `GET /_cat/thread_pool`. +# type: response +# response_code: 200 +value: |- + node-0 analyze 0 0 0 + node-0 fetch_shard_started 0 0 0 + node-0 fetch_shard_store 0 0 0 + node-0 flush 0 0 0 + node-0 write 0 0 0 diff --git a/specification/cat/thread_pool/examples/200_response/CatThreadPoolResponseExample2.yaml b/specification/cat/thread_pool/examples/200_response/CatThreadPoolResponseExample2.yaml new file mode 100644 index 0000000000..861ae9dc46 --- /dev/null +++ b/specification/cat/thread_pool/examples/200_response/CatThreadPoolResponseExample2.yaml @@ -0,0 +1,10 @@ +summary: Explicit columns +description: > + A successful response from `GET /_cat/thread_pool/generic?v=true&h=id,name,active,rejected,completed`. + It returns the `id`, `name`, `active`, `rejected`, and `completed` columns. + It also limits returned information to the generic thread pool. +# type: response +# response_code: 200 +value: |- + id name active rejected completed + 0EWUhXeBQtaVGlexUeVwMg generic 0 0 70 diff --git a/specification/cat/transforms/CatTransformsRequest.ts b/specification/cat/transforms/CatTransformsRequest.ts index 6722b4e09d..1cea29821b 100644 --- a/specification/cat/transforms/CatTransformsRequest.ts +++ b/specification/cat/transforms/CatTransformsRequest.ts @@ -23,8 +23,9 @@ import { integer } from '@_types/Numeric' import { TimeUnit } from '@_types/Time' /** - * Get transforms. - * Returns configuration and usage information about transforms. + * Get transform information. + * + * Get configuration and usage information about transforms. * * CAT APIs are only intended for human consumption using the Kibana * console or command line. They are not intended for use by applications. For diff --git a/specification/cat/transforms/examples/200_response/CatTransformsResponseExample1.yaml b/specification/cat/transforms/examples/200_response/CatTransformsResponseExample1.yaml new file mode 100644 index 0000000000..d5ad7f2c82 --- /dev/null +++ b/specification/cat/transforms/examples/200_response/CatTransformsResponseExample1.yaml @@ -0,0 +1,15 @@ +# summary: +description: A successful response from `GET /_cat/transforms?v=true&format=json`. +# type: response +# response_code: 200 +value: |- + [ + { + "id" : "ecommerce_transform", + "state" : "started", + "checkpoint" : "1", + "documents_processed" : "705", + "checkpoint_progress" : "100.00", + "changes_last_detection_time" : null + } + ]