From 4f07a66b2ab60f2097750f19bd2fba584ae52df9 Mon Sep 17 00:00:00 2001 From: Laura Trotta <153528055+l-trotta@users.noreply.github.com> Date: Wed, 29 Jan 2025 14:48:23 +0100 Subject: [PATCH 1/2] making response binary (#3639) (cherry picked from commit 98f06336bc75c141725c6817ec3fd7dabcf28466) --- output/openapi/elasticsearch-openapi.json | 36 +------- .../elasticsearch-serverless-openapi.json | 4 +- output/schema/schema-serverless.json | 4 +- output/schema/schema.json | 84 ++++--------------- output/typescript/types.ts | 15 +--- specification/_types/Binary.ts | 4 +- .../esql/async_query/AsyncQueryResponse.ts | 20 +---- .../async_query_get/AsyncQueryGetResponse.ts | 11 +-- specification/esql/query/QueryResponse.ts | 4 +- 9 files changed, 38 insertions(+), 144 deletions(-) diff --git a/output/openapi/elasticsearch-openapi.json b/output/openapi/elasticsearch-openapi.json index 1de8cace8a..92cfa0e618 100644 --- a/output/openapi/elasticsearch-openapi.json +++ b/output/openapi/elasticsearch-openapi.json @@ -8803,23 +8803,7 @@ "content": { "application/json": { "schema": { - "type": "object", - "properties": { - "columns": { - "$ref": "#/components/schemas/_types:EsqlColumns" - }, - "id": { - "description": "A query identifier that is provided only when one of the following conditions is met:\n\n* A query request does not return complete results during the period specified in the `wait_for_completion_timeout` parameter.\n* The `keep_on_completion` parameter value is true.\n\nYou can use this ID with the `GET /_query/async/` API to get the current status and available results for the query.", - "type": "string" - }, - "is_running": { - "description": "Indicates whether the query is still running.\nIf the value is false, the async query has finished and the results are returned.", - "type": "boolean" - } - }, - "required": [ - "is_running" - ] + "$ref": "#/components/schemas/_types:EsqlResult" } } } @@ -8888,19 +8872,7 @@ "content": { "application/json": { "schema": { - "type": "object", - "properties": { - "columns": { - "$ref": "#/components/schemas/_types:EsqlColumns" - }, - "is_running": { - "description": "Indicates whether the query is still running.\nIf the value is false, the async query has finished and the results are returned.", - "type": "boolean" - } - }, - "required": [ - "is_running" - ] + "$ref": "#/components/schemas/_types:EsqlResult" } } } @@ -9045,7 +9017,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/_types:EsqlColumns" + "$ref": "#/components/schemas/_types:EsqlResult" } } } @@ -69272,7 +69244,7 @@ } ] }, - "_types:EsqlColumns": { + "_types:EsqlResult": { "type": "object" }, "_types:InlineGet": { diff --git a/output/openapi/elasticsearch-serverless-openapi.json b/output/openapi/elasticsearch-serverless-openapi.json index 25c2ca4e5a..6336ec2a7c 100644 --- a/output/openapi/elasticsearch-serverless-openapi.json +++ b/output/openapi/elasticsearch-serverless-openapi.json @@ -5285,7 +5285,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/_types:EsqlColumns" + "$ref": "#/components/schemas/_types:EsqlResult" } } } @@ -45126,7 +45126,7 @@ } ] }, - "_types:EsqlColumns": { + "_types:EsqlResult": { "type": "object" }, "_types:InlineGet": { diff --git a/output/schema/schema-serverless.json b/output/schema/schema-serverless.json index 177a8d98d6..bbb4f1ef0c 100644 --- a/output/schema/schema-serverless.json +++ b/output/schema/schema-serverless.json @@ -18426,7 +18426,7 @@ "value": { "kind": "instance_of", "type": { - "name": "EsqlColumns", + "name": "EsqlResult", "namespace": "_types" } } @@ -77876,7 +77876,7 @@ { "kind": "type_alias", "name": { - "name": "EsqlColumns", + "name": "EsqlResult", "namespace": "_types" }, "specLocation": "_types/Binary.ts#L24-L24", diff --git a/output/schema/schema.json b/output/schema/schema.json index a8f4acdc16..ee3c12c5f3 100644 --- a/output/schema/schema.json +++ b/output/schema/schema.json @@ -46292,7 +46292,7 @@ { "kind": "type_alias", "name": { - "name": "EsqlColumns", + "name": "EsqlResult", "namespace": "_types" }, "specLocation": "_types/Binary.ts#L24-L24", @@ -122100,50 +122100,20 @@ { "kind": "response", "body": { - "kind": "properties", - "properties": [ - { - "name": "columns", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "EsqlColumns", - "namespace": "_types" - } - } - }, - { - "description": "A query identifier that is provided only when one of the following conditions is met:\n\n* A query request does not return complete results during the period specified in the `wait_for_completion_timeout` parameter.\n* The `keep_on_completion` parameter value is true.\n\nYou can use this ID with the `GET /_query/async/` API to get the current status and available results for the query.", - "name": "id", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "string", - "namespace": "_builtins" - } - } - }, - { - "description": "Indicates whether the query is still running.\nIf the value is false, the async query has finished and the results are returned.", - "name": "is_running", - "required": true, - "type": { - "kind": "instance_of", - "type": { - "name": "boolean", - "namespace": "_builtins" - } - } + "kind": "value", + "value": { + "kind": "instance_of", + "type": { + "name": "EsqlResult", + "namespace": "_types" } - ] + } }, "name": { "name": "Response", "namespace": "esql.async_query" }, - "specLocation": "esql/async_query/AsyncQueryResponse.ts#L22-L40" + "specLocation": "esql/async_query/AsyncQueryResponse.ts#L22-L24" }, { "kind": "request", @@ -122276,38 +122246,20 @@ { "kind": "response", "body": { - "kind": "properties", - "properties": [ - { - "name": "columns", - "required": false, - "type": { - "kind": "instance_of", - "type": { - "name": "EsqlColumns", - "namespace": "_types" - } - } - }, - { - "description": "Indicates whether the query is still running.\nIf the value is false, the async query has finished and the results are returned.", - "name": "is_running", - "required": true, - "type": { - "kind": "instance_of", - "type": { - "name": "boolean", - "namespace": "_builtins" - } - } + "kind": "value", + "value": { + "kind": "instance_of", + "type": { + "name": "EsqlResult", + "namespace": "_types" } - ] + } }, "name": { "name": "Response", "namespace": "esql.async_query_get" }, - "specLocation": "esql/async_query_get/AsyncQueryGetResponse.ts#L22-L31" + "specLocation": "esql/async_query_get/AsyncQueryGetResponse.ts#L22-L24" }, { "kind": "request", @@ -122489,7 +122441,7 @@ "value": { "kind": "instance_of", "type": { - "name": "EsqlColumns", + "name": "EsqlResult", "namespace": "_types" } } diff --git a/output/typescript/types.ts b/output/typescript/types.ts index 09d4cda50a..f466905465 100644 --- a/output/typescript/types.ts +++ b/output/typescript/types.ts @@ -2315,7 +2315,7 @@ export interface ErrorResponseBase { status: integer } -export type EsqlColumns = ArrayBuffer +export type EsqlResult = ArrayBuffer export type ExpandWildcard = 'all' | 'open' | 'closed' | 'hidden' | 'none' @@ -10538,11 +10538,7 @@ export interface EsqlAsyncQueryRequest extends RequestBase { } } -export interface EsqlAsyncQueryResponse { - columns?: EsqlColumns - id?: string - is_running: boolean -} +export type EsqlAsyncQueryResponse = EsqlResult export interface EsqlAsyncQueryDeleteRequest extends RequestBase { id: Id @@ -10557,10 +10553,7 @@ export interface EsqlAsyncQueryGetRequest extends RequestBase { wait_for_completion_timeout?: Duration } -export interface EsqlAsyncQueryGetResponse { - columns?: EsqlColumns - is_running: boolean -} +export type EsqlAsyncQueryGetResponse = EsqlResult export interface EsqlQueryRequest extends RequestBase { format?: EsqlEsqlFormat @@ -10577,7 +10570,7 @@ export interface EsqlQueryRequest extends RequestBase { } } -export type EsqlQueryResponse = EsqlColumns +export type EsqlQueryResponse = EsqlResult export interface FeaturesFeature { name: string diff --git a/specification/_types/Binary.ts b/specification/_types/Binary.ts index f00d8ddcd4..ee7a540c2f 100644 --- a/specification/_types/Binary.ts +++ b/specification/_types/Binary.ts @@ -20,8 +20,8 @@ // Vector tile response export type MapboxVectorTiles = ArrayBuffer -// ES|QL columns -export type EsqlColumns = ArrayBuffer +// ES|QL generic response +export type EsqlResult = ArrayBuffer // Streaming endpoints response export type StreamResult = ArrayBuffer diff --git a/specification/esql/async_query/AsyncQueryResponse.ts b/specification/esql/async_query/AsyncQueryResponse.ts index a824a8411b..33fc735dbe 100644 --- a/specification/esql/async_query/AsyncQueryResponse.ts +++ b/specification/esql/async_query/AsyncQueryResponse.ts @@ -17,24 +17,8 @@ * under the License. */ -import { EsqlColumns } from '@_types/Binary' +import { EsqlResult } from '@_types/Binary' export class Response { - body: { - columns?: EsqlColumns - /** - * A query identifier that is provided only when one of the following conditions is met: - * - * * A query request does not return complete results during the period specified in the `wait_for_completion_timeout` parameter. - * * The `keep_on_completion` parameter value is true. - * - * You can use this ID with the `GET /_query/async/` API to get the current status and available results for the query. - */ - id?: string - /** - * Indicates whether the query is still running. - * If the value is false, the async query has finished and the results are returned. - */ - is_running: boolean - } + body: EsqlResult } diff --git a/specification/esql/async_query_get/AsyncQueryGetResponse.ts b/specification/esql/async_query_get/AsyncQueryGetResponse.ts index 47f6dc2e3b..33fc735dbe 100644 --- a/specification/esql/async_query_get/AsyncQueryGetResponse.ts +++ b/specification/esql/async_query_get/AsyncQueryGetResponse.ts @@ -17,15 +17,8 @@ * under the License. */ -import { EsqlColumns } from '@_types/Binary' +import { EsqlResult } from '@_types/Binary' export class Response { - body: { - columns?: EsqlColumns - /** - * Indicates whether the query is still running. - * If the value is false, the async query has finished and the results are returned. - */ - is_running: boolean - } + body: EsqlResult } diff --git a/specification/esql/query/QueryResponse.ts b/specification/esql/query/QueryResponse.ts index 0c44745c6b..ba2058f084 100644 --- a/specification/esql/query/QueryResponse.ts +++ b/specification/esql/query/QueryResponse.ts @@ -17,9 +17,9 @@ * under the License. */ -import { EsqlColumns } from '@_types/Binary' +import { EsqlResult } from '@_types/Binary' export class Response { /** @codegen_name data */ - body: EsqlColumns + body: EsqlResult } From e7ee9c7fc2362e2bdb5e1f4e7939222e1f1c4d87 Mon Sep 17 00:00:00 2001 From: Laura Trotta Date: Wed, 29 Jan 2025 14:53:42 +0100 Subject: [PATCH 2/2] change back esql format package --- output/openapi/elasticsearch-openapi.json | 6 +- .../elasticsearch-serverless-openapi.json | 4 +- output/schema/schema-serverless.json | 1288 ++++++++++------- output/schema/schema.json | 72 +- output/typescript/types.ts | 8 +- .../esql/async_query/AsyncQueryRequest.ts | 2 +- .../esql/{_types => query}/QueryParameters.ts | 0 specification/esql/query/QueryRequest.ts | 2 +- 8 files changed, 847 insertions(+), 535 deletions(-) rename specification/esql/{_types => query}/QueryParameters.ts (100%) diff --git a/output/openapi/elasticsearch-openapi.json b/output/openapi/elasticsearch-openapi.json index 92cfa0e618..10b4cf1b1f 100644 --- a/output/openapi/elasticsearch-openapi.json +++ b/output/openapi/elasticsearch-openapi.json @@ -8712,7 +8712,7 @@ "description": "A short version of the Accept header, for example `json` or `yaml`.", "deprecated": false, "schema": { - "$ref": "#/components/schemas/esql._types:EsqlFormat" + "$ref": "#/components/schemas/esql.query:EsqlFormat" }, "style": "form" }, @@ -8936,7 +8936,7 @@ "description": "A short version of the Accept header, e.g. json, yaml.", "deprecated": false, "schema": { - "$ref": "#/components/schemas/esql._types:EsqlFormat" + "$ref": "#/components/schemas/esql.query:EsqlFormat" }, "style": "form" }, @@ -69148,7 +69148,7 @@ "head" ] }, - "esql._types:EsqlFormat": { + "esql.query:EsqlFormat": { "type": "string", "enum": [ "csv", diff --git a/output/openapi/elasticsearch-serverless-openapi.json b/output/openapi/elasticsearch-serverless-openapi.json index 6336ec2a7c..33ad6b8af4 100644 --- a/output/openapi/elasticsearch-serverless-openapi.json +++ b/output/openapi/elasticsearch-serverless-openapi.json @@ -5204,7 +5204,7 @@ "description": "A short version of the Accept header, e.g. json, yaml.", "deprecated": false, "schema": { - "$ref": "#/components/schemas/esql._types:EsqlFormat" + "$ref": "#/components/schemas/esql.query:EsqlFormat" }, "style": "form" }, @@ -45030,7 +45030,7 @@ "head" ] }, - "esql._types:EsqlFormat": { + "esql.query:EsqlFormat": { "type": "string", "enum": [ "csv", diff --git a/output/schema/schema-serverless.json b/output/schema/schema-serverless.json index bbb4f1ef0c..1e84f29131 100644 --- a/output/schema/schema-serverless.json +++ b/output/schema/schema-serverless.json @@ -1907,7 +1907,7 @@ "stability": "stable" } }, - "description": "Count search results.\nGet the number of documents matching a query.\n\nThe query can either be provided using a simple query string as a parameter or using the Query DSL defined within the request body.\nThe latter must be nested in a `query` key, which is the same as the search API.\n\nThe count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices.\n\nThe operation is broadcast across all shards.\nFor each shard ID group, a replica is chosen and the search is run against it.\nThis means that replicas increase the scalability of the count.", + "description": "Count search results.\nGet the number of documents matching a query.\n\nThe query can be provided either by using a simple query string as a parameter, or by defining Query DSL within the request body.\nThe query is optional. When no query is provided, the API uses `match_all` to count all the documents.\n\nThe count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices.\n\nThe operation is broadcast across all shards.\nFor each shard ID group, a replica is chosen and the search is run against it.\nThis means that replicas increase the scalability of the count.", "docId": "search-count", "docTag": "search", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/search-count.html", @@ -2049,10 +2049,17 @@ "stability": "stable" } }, - "description": "Delete documents.\nDeletes documents that match the specified query.", + "description": "Delete documents.\n\nDeletes documents that match the specified query.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias:\n\n* `read`\n* `delete` or `write`\n\nYou can specify the query criteria in the request URI or the request body using the same syntax as the search API.\nWhen you submit a delete by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and deletes matching documents using internal versioning.\nIf a document changes between the time that the snapshot is taken and the delete operation is processed, it results in a version conflict and the delete operation fails.\n\nNOTE: Documents with a version equal to 0 cannot be deleted using delete by query because internal versioning does not support 0 as a valid version number.\n\nWhile processing a delete by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents to delete.\nA bulk delete request is performed for each batch of matching documents.\nIf a search or bulk request is rejected, the requests are retried up to 10 times, with exponential back off.\nIf the maximum retry limit is reached, processing halts and all failed requests are returned in the response.\nAny delete requests that completed successfully still stick, they are not rolled back.\n\nYou can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`.\nNote that if you opt to count version conflicts the operation could attempt to delete more documents from the source than `max_docs` until it has successfully deleted `max_docs documents`, or it has gone through every document in the source query.\n\n**Throttling delete requests**\n\nTo control the rate at which delete by query issues batches of delete operations, you can set `requests_per_second` to any positive decimal number.\nThis pads each batch with a wait time to throttle the rate.\nSet `requests_per_second` to `-1` to disable throttling.\n\nThrottling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account.\nThe padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing.\nBy default the batch size is `1000`, so if `requests_per_second` is set to `500`:\n\n```\ntarget_time = 1000 / 500 per second = 2 seconds\nwait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds\n```\n\nSince the batch is issued as a single `_bulk` request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set.\nThis is \"bursty\" instead of \"smooth\".\n\n**Slicing**\n\nDelete by query supports sliced scroll to parallelize the delete process.\nThis can improve efficiency and provide a convenient way to break the request down into smaller parts.\n\nSetting `slices` to `auto` lets Elasticsearch choose the number of slices to use.\nThis setting will use one slice per shard, up to a certain limit.\nIf there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards.\nAdding slices to the delete by query operation creates sub-requests which means it has some quirks:\n\n* You can see these requests in the tasks APIs. These sub-requests are \"child\" tasks of the task for the request with slices.\n* Fetching the status of the task for the request with slices only contains the status of completed slices.\n* These sub-requests are individually addressable for things like cancellation and rethrottling.\n* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally.\n* Canceling the request with `slices` will cancel each sub-request.\n* Due to the nature of `slices` each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution.\n* Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the earlier point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being deleted.\n* Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time.\n\nIf you're slicing manually or otherwise tuning automatic slicing, keep in mind that:\n\n* Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many `slices` hurts performance. Setting `slices` higher than the number of shards generally does not improve efficiency and adds overhead.\n* Delete performance scales linearly across available resources with the number of slices.\n\nWhether query or delete performance dominates the runtime depends on the documents being reindexed and cluster resources.\n\n**Cancel a delete by query operation**\n\nAny delete by query can be canceled using the task cancel API. For example:\n\n```\nPOST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel\n```\n\nThe task ID can be found by using the get tasks API.\n\nCancellation should happen quickly but might take a few seconds.\nThe get task status API will continue to list the delete by query task until this task checks that it has been cancelled and terminates itself.", + "docId": "docs-delete-by-query", "docTag": "document", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-delete-by-query.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/docs-delete-by-query.html", "name": "delete_by_query", + "privileges": { + "index": [ + "read", + "delete" + ] + }, "request": { "name": "Request", "namespace": "_global.delete_by_query" @@ -2088,9 +2095,15 @@ } }, "description": "Delete a script or search template.\nDeletes a stored script or search template.", + "docId": "script-delete", "docTag": "script", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/delete-stored-script-api.html", "name": "delete_script", + "privileges": { + "cluster": [ + "manage" + ] + }, "request": { "name": "Request", "namespace": "_global.delete_script" @@ -2124,7 +2137,8 @@ } }, "description": "Delete an enrich policy.\nDeletes an existing enrich policy and its enrich index.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-enrich-policy-api.html", + "docId": "delete-enrich-policy-api", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/delete-enrich-policy-api.html", "name": "enrich.delete_policy", "request": { "name": "Request", @@ -2195,7 +2209,8 @@ } }, "description": "Get an enrich policy.\nReturns information about an enrich policy.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/get-enrich-policy-api.html", + "docId": "get-enrich-policy-api", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/get-enrich-policy-api.html", "name": "enrich.get_policy", "request": { "name": "Request", @@ -2275,7 +2290,8 @@ } }, "description": "Get enrich stats.\nReturns enrich coordinator statistics and information about enrich policies that are currently executing.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/enrich-stats-api.html", + "docId": "enrich-stats-api", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/enrich-stats-api.html", "name": "enrich.stats", "request": { "name": "Request", @@ -2310,7 +2326,8 @@ } }, "description": "Delete an async EQL search.\nDelete an async EQL search or a stored synchronous EQL search.\nThe API also deletes results for the search.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/eql-search-api.html", + "docId": "eql-async-search-delete", + "docUrl": "https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-eql-delete", "name": "eql.delete", "request": { "name": "Request", @@ -2417,7 +2434,8 @@ } }, "description": "Get EQL search results.\nReturns search results for an Event Query Language (EQL) query.\nEQL assumes each document in a data stream or index corresponds to an event.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/eql-search-api.html", + "docId": "eql-search-api", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/eql-search-api.html", "extDocId": "eql", "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/eql.html", "name": "eql.search", @@ -2573,10 +2591,16 @@ "stability": "stable" } }, - "description": "Explain a document match result.\nReturns information about why a specific document matches, or doesn’t match, a query.", + "description": "Explain a document match result.\nGet information about why a specific document matches, or doesn't match, a query.\nIt computes a score explanation for a query and a specific document.", + "docId": "search-explain", "docTag": "search", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/search-explain.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/search-explain.html", "name": "explain", + "privileges": { + "index": [ + "read" + ] + }, "request": { "name": "Request", "namespace": "_global.explain" @@ -2614,8 +2638,9 @@ } }, "description": "Get the field capabilities.\n\nGet information about the capabilities of fields among multiple indices.\n\nFor data streams, the API returns field capabilities among the stream’s backing indices.\nIt returns runtime fields like any other field.\nFor example, a runtime field with a type of keyword is returned the same as any other field that belongs to the `keyword` family.", + "docId": "search-field-caps", "docTag": "search", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/search-field-caps.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/search-field-caps.html", "name": "field_caps", "privileges": { "index": [ @@ -2707,9 +2732,15 @@ } }, "description": "Get a script or search template.\nRetrieves a stored script or search template.", + "docId": "script-get", "docTag": "script", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/get-stored-script-api.html", "name": "get_script", + "privileges": { + "cluster": [ + "manage" + ] + }, "request": { "name": "Request", "namespace": "_global.get_script" @@ -2879,8 +2910,9 @@ "stability": "stable" } }, - "description": "Add an index block.\nLimits the operations allowed on an index by blocking specific operation types.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/index-modules-blocks.html", + "description": "Add an index block.\n\nAdd an index block to an index.\nIndex blocks limit the operations allowed on an index by blocking specific operation types.", + "docId": "index-block-add", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/index-modules-blocks.html#add-index-block", "name": "indices.add_block", "request": { "name": "Request", @@ -3011,9 +3043,10 @@ "stability": "stable" } }, - "description": "Create a data stream.\nCreates a data stream.\nYou must have a matching index template with data stream enabled.", + "description": "Create a data stream.\n\nYou must have a matching index template with data stream enabled.", + "docId": "indices-create-data-stream", "docTag": "data stream", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-create-data-stream.html", "name": "indices.create_data_stream", "privileges": { "index": [ @@ -3052,9 +3085,10 @@ "stability": "stable" } }, - "description": "Get data stream stats.\nRetrieves statistics for one or more data streams.", + "description": "Get data stream stats.\n\nGet statistics for one or more data streams.", + "docId": "data-stream-stats-api", "docTag": "data stream", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/data-stream-stats-api.html", "name": "indices.data_streams_stats", "privileges": { "index": [ @@ -3186,7 +3220,8 @@ } }, "description": "Delete data stream lifecycles.\nRemoves the data stream lifecycle from a data stream, rendering it not managed by the data stream lifecycle.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams-delete-lifecycle.html", + "docId": "data-stream-delete-lifecycle", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/data-streams-delete-lifecycle.html", "name": "indices.delete_data_lifecycle", "request": { "name": "Request", @@ -3221,8 +3256,9 @@ } }, "description": "Delete data streams.\nDeletes one or more data streams and their backing indices.", + "docId": "data-stream-delete", "docTag": "data stream", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-delete-data-stream.html", "name": "indices.delete_data_stream", "privileges": { "index": [ @@ -3336,8 +3372,9 @@ "stability": "stable" } }, - "description": "Check aliases.\nChecks if one or more data stream or index aliases exist.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html", + "description": "Check aliases.\n\nCheck if one or more data stream or index aliases exist.", + "docId": "indices-aliases-exist", + "docUrl": "https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-exists-alias", "name": "indices.exists_alias", "request": { "name": "Request", @@ -3376,8 +3413,9 @@ "stability": "stable" } }, - "description": "Check index templates.\nCheck whether index templates exist.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/index-templates.html", + "description": "Check index templates.\n\nCheck whether index templates exist.", + "docId": "index-templates-exist", + "docUrl": "https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-indices-exists-index-template", "name": "indices.exists_index_template", "request": { "name": "Request", @@ -3412,8 +3450,9 @@ } }, "description": "Get the status for a data stream lifecycle.\nGet information about an index or data stream's current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution.", + "docId": "data-stream-explain-lifecycle", "docTag": "data stream", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams-explain-lifecycle.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/data-streams-explain-lifecycle.html", "name": "indices.explain_data_lifecycle", "request": { "name": "Request", @@ -3546,9 +3585,10 @@ "stability": "stable" } }, - "description": "Get data stream lifecycles.\nRetrieves the data stream lifecycle configuration of one or more data streams.", + "description": "Get data stream lifecycles.\n\nGet the data stream lifecycle configuration of one or more data streams.", + "docId": "data-stream-get-lifecycle", "docTag": "data stream", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams-get-lifecycle.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/data-streams-get-lifecycle.html", "name": "indices.get_data_lifecycle", "request": { "name": "Request", @@ -3582,9 +3622,10 @@ "stability": "stable" } }, - "description": "Get data streams.\nRetrieves information about one or more data streams.", + "description": "Get data streams.\n\nGet information about one or more data streams.", + "docId": "data-stream-get", "docTag": "data stream", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/indices-get-data-stream.html", "name": "indices.get_data_stream", "privileges": { "index": [ @@ -4450,7 +4491,8 @@ } }, "description": "Delete an inference endpoint", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-inference-api.html", + "docId": "inference-api-delete", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/delete-inference-api.html", "name": "inference.delete", "request": { "name": "Request", @@ -4492,7 +4534,8 @@ } }, "description": "Get an inference endpoint", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/get-inference-api.html", + "docId": "inference-api-get", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/get-inference-api.html", "name": "inference.get", "request": { "name": "Request", @@ -4539,9 +4582,15 @@ "visibility": "public" } }, - "description": "Perform inference on the service", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/post-inference-api.html", + "description": "Perform inference on the service.\n\nThis API enables you to use machine learning models to perform specific tasks on data that you provide as an input.\nIt returns a response with the results of the tasks.\nThe inference endpoint you use can perform one specific task that has been defined when the endpoint was created with the create inference API.\n\n> info\n> The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.", + "docId": "inference-api-post", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/post-inference-api.html", "name": "inference.inference", + "privileges": { + "cluster": [ + "monitor_inference" + ] + }, "request": { "name": "Request", "namespace": "inference.inference" @@ -4585,7 +4634,8 @@ } }, "description": "Create an inference endpoint.\nWhen you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running.\nAfter creating the endpoint, wait for the model deployment to complete before using it.\nTo verify the deployment status, use the get trained model statistics API.\nLook for `\"state\": \"fully_allocated\"` in the response and ensure that the `\"allocation_count\"` matches the `\"target_allocation_count\"`.\nAvoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.\n\nIMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face.\nFor built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models.\nHowever, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/put-inference-api.html", + "docId": "inference-api-put", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/put-inference-api.html", "name": "inference.put", "privileges": { "cluster": [ @@ -4679,6 +4729,7 @@ }, "description": "Get cluster info.\nGet basic build, version, and cluster information.", "docId": "api-root", + "docTag": "info", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/rest-api-root.html", "name": "info", "privileges": { @@ -4719,7 +4770,8 @@ } }, "description": "Delete pipelines.\nDelete one or more ingest pipelines.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-pipeline-api.html", + "docId": "delete-pipeline-api", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/delete-pipeline-api.html", "extDocId": "ingest", "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ingest.html", "name": "ingest.delete_pipeline", @@ -4755,8 +4807,9 @@ "stability": "stable" } }, - "description": "Get pipelines.\nGet information about one or more ingest pipelines.\nThis API returns a local reference of the pipeline.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/get-pipeline-api.html", + "description": "Get pipelines.\n\nGet information about one or more ingest pipelines.\nThis API returns a local reference of the pipeline.", + "docId": "get-pipeline-api", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/get-pipeline-api.html", "extDocId": "ingest", "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ingest.html", "name": "ingest.get_pipeline", @@ -4877,9 +4930,15 @@ "stability": "stable" } }, - "description": "Simulate a pipeline.\nRun an ingest pipeline against a set of provided documents.\nYou can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/simulate-pipeline-api.html", + "description": "Simulate a pipeline.\n\nRun an ingest pipeline against a set of provided documents.\nYou can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request.", + "docId": "simulate-pipeline-api", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/simulate-pipeline-api.html", "name": "ingest.simulate", + "privileges": { + "cluster": [ + "read_pipeline" + ] + }, "request": { "name": "Request", "namespace": "ingest.simulate" @@ -4922,8 +4981,9 @@ "stability": "stable" } }, - "description": "Get license information.\nGet information about your Elastic license including its type, its status, when it was issued, and when it expires.\n\nNOTE: If the master node is generating a new cluster state, the get license API may return a `404 Not Found` response.\nIf you receive an unexpected 404 response after cluster startup, wait a short period and retry the request.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/get-license.html", + "description": "Get license information.\n\nGet information about your Elastic license including its type, its status, when it was issued, and when it expires.\n\n>info\n> If the master node is generating a new cluster state, the get license API may return a `404 Not Found` response.\n> If you receive an unexpected 404 response after cluster startup, wait a short period and retry the request.", + "docId": "get-license", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/get-license.html", "name": "license.get", "request": { "name": "Request", @@ -5095,9 +5155,10 @@ "stability": "stable" } }, - "description": "Get multiple documents.\n\nGet multiple JSON documents by ID from one or more indices.\nIf you specify an index in the request URI, you only need to specify the document IDs in the request body.\nTo ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail.", + "description": "Get multiple documents.\n\nGet multiple JSON documents by ID from one or more indices.\nIf you specify an index in the request URI, you only need to specify the document IDs in the request body.\nTo ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail.\n\n**Filter source fields**\n\nBy default, the `_source` field is returned for every document (if stored).\nUse the `_source` and `_source_include` or `source_exclude` attributes to filter what fields are returned for a particular document.\nYou can include the `_source`, `_source_includes`, and `_source_excludes` query parameters in the request URI to specify the defaults to use when there are no per-document instructions.\n\n**Get stored fields**\n\nUse the `stored_fields` attribute to specify the set of stored fields you want to retrieve.\nAny requested fields that are not stored are ignored.\nYou can include the `stored_fields` query parameter in the request URI to specify the defaults to use when there are no per-document instructions.", + "docId": "docs-multi-get", "docTag": "document", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-multi-get.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/docs-multi-get.html", "name": "mget", "privileges": { "index": [ @@ -5398,8 +5459,9 @@ } }, "description": "Delete a filter.\nIf an anomaly detection job references the filter, you cannot delete the\nfilter. You must update or delete the job before you can delete the filter.", + "docId": "ml-delete-filter", "docTag": "ml anomaly", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-filter.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ml-delete-filter.html", "name": "ml.delete_filter", "privileges": { "cluster": [ @@ -5439,8 +5501,9 @@ } }, "description": "Delete an anomaly detection job.\nAll job configuration, model state and results are deleted.\nIt is not currently possible to delete multiple jobs using wildcards or a\ncomma separated list. If you delete a job that has a datafeed, the request\nfirst tries to delete the datafeed. This behavior is equivalent to calling\nthe delete datafeed API with the same timeout and force parameters as the\ndelete job request.", + "docId": "ml-delete-job", "docTag": "ml anomaly", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-job.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ml-delete-job.html", "name": "ml.delete_job", "privileges": { "cluster": [ @@ -5480,8 +5543,9 @@ } }, "description": "Delete an unreferenced trained model.\nThe request deletes a trained inference model that is not referenced by an ingest pipeline.", + "docId": "delete-trained-models", "docTag": "ml trained model", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-trained-models.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/delete-trained-models.html", "name": "ml.delete_trained_model", "privileges": { "cluster": [ @@ -5521,8 +5585,9 @@ } }, "description": "Delete a trained model alias.\nThis API deletes an existing model alias that refers to a trained model. If\nthe model alias is missing or refers to a model other than the one identified\nby the `model_id`, this API returns an error.", + "docId": "delete-trained-models-aliases", "docTag": "ml trained model", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-trained-models-aliases.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/delete-trained-models-aliases.html", "name": "ml.delete_trained_model_alias", "privileges": { "cluster": [ @@ -5565,8 +5630,9 @@ } }, "description": "Estimate job model memory usage.\nMakes an estimation of the memory usage for an anomaly detection job model.\nIt is based on analysis configuration details for the job and cardinality\nestimates for the fields it references.", + "docId": "ml-estimate-memory", "docTag": "ml anomaly", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-apis.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ml-estimate-model-memory.html", "name": "ml.estimate_model_memory", "privileges": { "cluster": [ @@ -5609,8 +5675,9 @@ } }, "description": "Evaluate data frame analytics.\nThe API packages together commonly used evaluation metrics for various types\nof machine learning features. This has been designed for use on indexes\ncreated by data frame analytics. Evaluation requires both a ground truth\nfield and an analytics result field to be present.", + "docId": "evaluate-dfanalytics", "docTag": "ml data frame", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/evaluate-dfanalytics.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/evaluate-dfanalytics.html", "name": "ml.evaluate_data_frame", "privileges": { "cluster": [ @@ -5653,8 +5720,9 @@ } }, "description": "Force buffered data to be processed.\nThe flush jobs API is only applicable when sending data for analysis using\nthe post data API. Depending on the content of the buffer, then it might\nadditionally calculate new results. Both flush and close operations are\nsimilar, however the flush is more efficient if you are expecting to send\nmore data for analysis. When flushing, the job remains open and is available\nto continue analyzing data. A close operation additionally prunes and\npersists the model state to disk and the job must be opened again before\nanalyzing further data.", + "docId": "ml-flush-job", "docTag": "ml anomaly", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-flush-job.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ml-flush-job.html", "name": "ml.flush_job", "privileges": { "cluster": [ @@ -5697,8 +5765,9 @@ } }, "description": "Get info about events in calendars.", + "docId": "ml-get-calendar-event", "docTag": "ml anomaly", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-calendar-event.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ml-get-calendar-event.html", "name": "ml.get_calendar_events", "privileges": { "cluster": [ @@ -5738,8 +5807,9 @@ } }, "description": "Get calendar configuration info.", + "docId": "ml-get-calendar", "docTag": "ml anomaly", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-calendar.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ml-get-calendar.html", "name": "ml.get_calendars", "privileges": { "cluster": [ @@ -5790,8 +5860,9 @@ } }, "description": "Get data frame analytics job configuration info.\nYou can get information for multiple data frame analytics jobs in a single\nAPI request by using a comma-separated list of data frame analytics jobs or a\nwildcard expression.", + "docId": "get-dfanalytics", "docTag": "ml data frame", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/get-dfanalytics.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/get-dfanalytics.html", "name": "ml.get_data_frame_analytics", "privileges": { "cluster": [ @@ -5837,8 +5908,9 @@ } }, "description": "Get data frame analytics jobs usage info.", + "docId": "get-dfanalytics-stats", "docTag": "ml data frame", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/get-dfanalytics-stats.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/get-dfanalytics-stats.html", "name": "ml.get_data_frame_analytics_stats", "privileges": { "cluster": [ @@ -5884,8 +5956,9 @@ } }, "description": "Get datafeeds usage info.\nYou can get statistics for multiple datafeeds in a single API request by\nusing a comma-separated list of datafeeds or a wildcard expression. You can\nget statistics for all datafeeds by using `_all`, by specifying `*` as the\n``, or by omitting the ``. If the datafeed is stopped, the\nonly information you receive is the `datafeed_id` and the `state`.\nThis API returns a maximum of 10,000 datafeeds.", + "docId": "ml-get-datafeed-stats", "docTag": "ml anomaly", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-datafeed-stats.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ml-get-datafeed-stats.html", "name": "ml.get_datafeed_stats", "privileges": { "cluster": [ @@ -5931,8 +6004,9 @@ } }, "description": "Get datafeeds configuration info.\nYou can get information for multiple datafeeds in a single API request by\nusing a comma-separated list of datafeeds or a wildcard expression. You can\nget information for all datafeeds by using `_all`, by specifying `*` as the\n``, or by omitting the ``.\nThis API returns a maximum of 10,000 datafeeds.", + "docId": "ml-get-datafeed", "docTag": "ml anomaly", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-datafeed.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ml-get-datafeed.html", "name": "ml.get_datafeeds", "privileges": { "cluster": [ @@ -5978,8 +6052,9 @@ } }, "description": "Get filters.\nYou can get a single filter or all filters.", + "docId": "ml-get-filter", "docTag": "ml anomaly", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-filter.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ml-get-filter.html", "name": "ml.get_filters", "privileges": { "cluster": [ @@ -6025,8 +6100,9 @@ } }, "description": "Get anomaly detection jobs usage info.", + "docId": "ml-get-job-stats", "docTag": "ml anomaly", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-job-stats.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ml-get-job-stats.html", "name": "ml.get_job_stats", "privileges": { "cluster": [ @@ -6072,8 +6148,9 @@ } }, "description": "Get anomaly detection jobs configuration info.\nYou can get information for multiple anomaly detection jobs in a single API\nrequest by using a group name, a comma-separated list of jobs, or a wildcard\nexpression. You can get information for all anomaly detection jobs by using\n`_all`, by specifying `*` as the ``, or by omitting the ``.", + "docId": "ml-get-job", "docTag": "ml anomaly", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-job.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ml-get-job.html", "name": "ml.get_jobs", "privileges": { "cluster": [ @@ -6119,8 +6196,9 @@ } }, "description": "Get overall bucket results.\n\nRetrievs overall bucket results that summarize the bucket results of\nmultiple anomaly detection jobs.\n\nThe `overall_score` is calculated by combining the scores of all the\nbuckets within the overall bucket span. First, the maximum\n`anomaly_score` per anomaly detection job in the overall bucket is\ncalculated. Then the `top_n` of those scores are averaged to result in\nthe `overall_score`. This means that you can fine-tune the\n`overall_score` so that it is more or less sensitive to the number of\njobs that detect an anomaly at the same time. For example, if you set\n`top_n` to `1`, the `overall_score` is the maximum bucket score in the\noverall bucket. Alternatively, if you set `top_n` to the number of jobs,\nthe `overall_score` is high only when all jobs detect anomalies in that\noverall bucket. If you set the `bucket_span` parameter (to a value\ngreater than its default), the `overall_score` is the maximum\n`overall_score` of the overall buckets that have a span equal to the\njobs' largest bucket span.", + "docId": "ml-get-overall-buckets", "docTag": "ml anomaly", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-overall-buckets.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ml-get-overall-buckets.html", "name": "ml.get_overall_buckets", "privileges": { "cluster": [ @@ -6164,8 +6242,9 @@ } }, "description": "Get trained model configuration info.", + "docId": "get-trained-models", "docTag": "ml trained model", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/get-trained-models.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/get-trained-models.html", "name": "ml.get_trained_models", "privileges": { "cluster": [ @@ -6211,8 +6290,9 @@ } }, "description": "Get trained models usage info.\nYou can get usage information for multiple trained\nmodels in a single API request by using a comma-separated list of model IDs or a wildcard expression.", + "docId": "get-trained-models-stats", "docTag": "ml trained model", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/get-trained-models-stats.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/get-trained-models-stats.html", "name": "ml.get_trained_models_stats", "privileges": { "cluster": [ @@ -6258,8 +6338,9 @@ } }, "description": "Evaluate a trained model.", + "docId": "infer-trained-model", "docTag": "ml trained model", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/infer-trained-model.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/infer-trained-model.html", "name": "ml.infer_trained_model", "request": { "name": "Request", @@ -6307,8 +6388,9 @@ } }, "description": "Open anomaly detection jobs.\nAn anomaly detection job must be opened to be ready to receive and analyze\ndata. It can be opened and closed multiple times throughout its lifecycle.\nWhen you open a new job, it starts with an empty model.\nWhen you open an existing job, the most recent model state is automatically\nloaded. The job is ready to resume its analysis from where it left off, once\nnew data is received.", + "docId": "ml-open-job", "docTag": "ml anomaly", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-open-job.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ml-open-job.html", "name": "ml.open_job", "privileges": { "cluster": [ @@ -6351,8 +6433,9 @@ } }, "description": "Add scheduled events to the calendar.", + "docId": "ml-post-calendar-event", "docTag": "ml anomaly", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-post-calendar-event.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ml-post-calendar-event.html", "name": "ml.post_calendar_events", "privileges": { "cluster": [ @@ -6394,9 +6477,10 @@ "stability": "stable" } }, - "description": "Preview features used by data frame analytics.\nPreviews the extracted features used by a data frame analytics config.", + "description": "Preview features used by data frame analytics.\nPreview the extracted features used by a data frame analytics config.", + "docId": "preview-dfanalytics", "docTag": "ml data frame", - "docUrl": "http://www.elastic.co/guide/en/elasticsearch/reference/current/preview-dfanalytics.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/preview-dfanalytics.html", "name": "ml.preview_data_frame_analytics", "privileges": { "cluster": [ @@ -6447,8 +6531,9 @@ } }, "description": "Preview a datafeed.\nThis API returns the first \"page\" of search results from a datafeed.\nYou can preview an existing datafeed or provide configuration details for a datafeed\nand anomaly detection job in the API. The preview shows the structure of the data\nthat will be passed to the anomaly detection engine.\nIMPORTANT: When Elasticsearch security features are enabled, the preview uses the credentials of the user that\ncalled the API. However, when the datafeed starts it uses the roles of the last user that created or updated the\ndatafeed. To get a preview that accurately reflects the behavior of the datafeed, use the appropriate credentials.\nYou can also use secondary authorization headers to supply the credentials.", + "docId": "ml-preview-datafeed", "docTag": "ml anomaly", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-preview-datafeed.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ml-preview-datafeed.html", "name": "ml.preview_datafeed", "privileges": { "cluster": [ @@ -6502,8 +6587,9 @@ } }, "description": "Create a calendar.", + "docId": "ml-put-calendar", "docTag": "ml anomaly", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-calendar.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ml-put-calendar.html", "name": "ml.put_calendar", "privileges": { "cluster": [ @@ -6546,8 +6632,9 @@ } }, "description": "Add anomaly detection job to calendar.", + "docId": "ml-put-calendar-job", "docTag": "ml anomaly", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-calendar-job.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ml-put-calendar-job.html", "name": "ml.put_calendar_job", "privileges": { "cluster": [ @@ -6639,8 +6726,9 @@ } }, "description": "Create a datafeed.\nDatafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job.\nYou can associate only one datafeed with each anomaly detection job.\nThe datafeed contains a query that runs at a defined interval (`frequency`).\nIf you are concerned about delayed data, you can add a delay (`query_delay') at each interval.\nBy default, the datafeed uses the following query: `{\"match_all\": {\"boost\": 1}}`.\n\nWhen Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had\nat the time of creation and runs the query using those same roles. If you provide secondary authorization headers,\nthose credentials are used instead.\nYou must use Kibana, this API, or the create anomaly detection jobs API to create a datafeed. Do not add a datafeed\ndirectly to the `.ml-config` index. Do not give users `write` privileges on the `.ml-config` index.", + "docId": "ml-put-datafeed", "docTag": "ml anomaly", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-datafeed.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ml-put-datafeed.html", "name": "ml.put_datafeed", "privileges": { "cluster": [ @@ -6686,8 +6774,9 @@ } }, "description": "Create a filter.\nA filter contains a list of strings. It can be used by one or more anomaly detection jobs.\nSpecifically, filters are referenced in the `custom_rules` property of detector configuration objects.", + "docId": "ml-put-filter", "docTag": "ml anomaly", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-filter.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ml-put-filter.html", "name": "ml.put_filter", "privileges": { "cluster": [ @@ -6730,8 +6819,9 @@ } }, "description": "Create an anomaly detection job.\nIf you include a `datafeed_config`, you must have read index privileges on the source index.\nIf you include a `datafeed_config` but do not provide a query, the datafeed uses `{\"match_all\": {\"boost\": 1}}`.", + "docId": "ml-put-job", "docTag": "ml anomaly", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-job.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ml-put-job.html", "name": "ml.put_job", "privileges": { "cluster": [ @@ -6777,8 +6867,9 @@ } }, "description": "Create a trained model.\nEnable you to supply a trained model that is not created by data frame analytics.", + "docId": "put-trained-models", "docTag": "ml trained model", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/put-trained-models.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/put-trained-models.html", "name": "ml.put_trained_model", "privileges": { "cluster": [ @@ -6821,8 +6912,9 @@ } }, "description": "Create or update a trained model alias.\nA trained model alias is a logical name used to reference a single trained\nmodel.\nYou can use aliases instead of trained model identifiers to make it easier to\nreference your models. For example, you can use aliases in inference\naggregations and processors.\nAn alias must be unique and refer to only a single trained model. However,\nyou can have multiple aliases for each trained model.\nIf you use this API to update an alias such that it references a different\ntrained model ID and the model uses a different type of data frame analytics,\nan error occurs. For example, this situation occurs if you have a trained\nmodel for regression analysis and a trained model for classification\nanalysis; you cannot reassign an alias from one type of trained model to\nanother.\nIf you use this API to update an alias and there are very few input fields in\ncommon between the old and new trained models for the model alias, the API\nreturns a warning.", + "docId": "put-trained-models-aliases", "docTag": "ml trained model", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/put-trained-models-aliases.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/put-trained-models-aliases.html", "name": "ml.put_trained_model_alias", "privileges": { "cluster": [ @@ -6865,8 +6957,9 @@ } }, "description": "Create part of a trained model definition.", + "docId": "put-trained-model-definition-part", "docTag": "ml trained model", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/put-trained-model-definition-part.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/put-trained-model-definition-part.html", "name": "ml.put_trained_model_definition_part", "privileges": { "cluster": [ @@ -6909,8 +7002,9 @@ } }, "description": "Create a trained model vocabulary.\nThis API is supported only for natural language processing (NLP) models.\nThe vocabulary is stored in the index as described in `inference_config.*.vocabulary` of the trained model definition.", + "docId": "put-trained-model-vocabulary", "docTag": "ml trained model", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/put-trained-model-vocabulary.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/put-trained-model-vocabulary.html", "name": "ml.put_trained_model_vocabulary", "privileges": { "cluster": [ @@ -6953,8 +7047,9 @@ } }, "description": "Reset an anomaly detection job.\nAll model state and results are deleted. The job is ready to start over as if\nit had just been created.\nIt is not currently possible to reset multiple jobs using wildcards or a\ncomma separated list.", + "docId": "ml-reset-job", "docTag": "ml anomaly", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-reset-job.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ml-reset-job.html", "name": "ml.reset_job", "privileges": { "cluster": [ @@ -6994,8 +7089,9 @@ } }, "description": "Start a data frame analytics job.\nA data frame analytics job can be started and stopped multiple times\nthroughout its lifecycle.\nIf the destination index does not exist, it is created automatically the\nfirst time you start the data frame analytics job. The\n`index.number_of_shards` and `index.number_of_replicas` settings for the\ndestination index are copied from the source index. If there are multiple\nsource indices, the destination index copies the highest setting values. The\nmappings for the destination index are also copied from the source indices.\nIf there are any mapping conflicts, the job fails to start.\nIf the destination index exists, it is used as is. You can therefore set up\nthe destination index in advance with custom settings and mappings.", + "docId": "start-dfanalytics", "docTag": "ml data frame", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/start-dfanalytics.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/start-dfanalytics.html", "name": "ml.start_data_frame_analytics", "privileges": { "cluster": [ @@ -7045,8 +7141,9 @@ } }, "description": "Start datafeeds.\n\nA datafeed must be started in order to retrieve data from Elasticsearch. A datafeed can be started and stopped\nmultiple times throughout its lifecycle.\n\nBefore you can start a datafeed, the anomaly detection job must be open. Otherwise, an error occurs.\n\nIf you restart a stopped datafeed, it continues processing input data from the next millisecond after it was stopped.\nIf new data was indexed for that exact millisecond between stopping and starting, it will be ignored.\n\nWhen Elasticsearch security features are enabled, your datafeed remembers which roles the last user to create or\nupdate it had at the time of creation or update and runs the query using those same roles. If you provided secondary\nauthorization headers when you created or updated the datafeed, those credentials are used instead.", + "docId": "ml-start-datafeed", "docTag": "ml anomaly", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-start-datafeed.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ml-start-datafeed.html", "name": "ml.start_datafeed", "privileges": { "cluster": [ @@ -7089,8 +7186,9 @@ } }, "description": "Start a trained model deployment.\nIt allocates the model to every machine learning node.", + "docId": "start-trained-model-deployment", "docTag": "ml trained model", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/start-trained-model-deployment.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/start-trained-model-deployment.html", "name": "ml.start_trained_model_deployment", "privileges": { "cluster": [ @@ -7133,8 +7231,9 @@ } }, "description": "Stop data frame analytics jobs.\nA data frame analytics job can be started and stopped multiple times\nthroughout its lifecycle.", + "docId": "stop-dfanalytics", "docTag": "ml data frame", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/stop-dfanalytics.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/stop-dfanalytics.html", "name": "ml.stop_data_frame_analytics", "privileges": { "cluster": [ @@ -7177,8 +7276,9 @@ } }, "description": "Stop datafeeds.\nA datafeed that is stopped ceases to retrieve data from Elasticsearch. A datafeed can be started and stopped\nmultiple times throughout its lifecycle.", + "docId": "ml-stop-datafeed", "docTag": "ml anomaly", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-stop-datafeed.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ml-stop-datafeed.html", "name": "ml.stop_datafeed", "privileges": { "cluster": [ @@ -7221,8 +7321,9 @@ } }, "description": "Stop a trained model deployment.", + "docId": "stop-trained-model-deployment", "docTag": "ml trained model", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/stop-trained-model-deployment.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/stop-trained-model-deployment.html", "name": "ml.stop_trained_model_deployment", "privileges": { "cluster": [ @@ -7265,8 +7366,9 @@ } }, "description": "Update a data frame analytics job.", + "docId": "update-dfanalytics", "docTag": "ml data frame", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/update-dfanalytics.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/update-dfanalytics.html", "name": "ml.update_data_frame_analytics", "privileges": { "cluster": [ @@ -7316,8 +7418,9 @@ } }, "description": "Update a datafeed.\nYou must stop and start the datafeed for the changes to be applied.\nWhen Elasticsearch security features are enabled, your datafeed remembers which roles the user who updated it had at\nthe time of the update and runs the query using those same roles. If you provide secondary authorization headers,\nthose credentials are used instead.", + "docId": "ml-update-datafeed", "docTag": "ml anomaly", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-datafeed.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ml-update-datafeed.html", "name": "ml.update_datafeed", "privileges": { "cluster": [ @@ -7360,8 +7463,9 @@ } }, "description": "Update a filter.\nUpdates the description of a filter, adds items, or removes items from the list.", + "docId": "ml-update-filter", "docTag": "ml anomaly", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-filter.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ml-update-filter.html", "name": "ml.update_filter", "privileges": { "cluster": [ @@ -7404,8 +7508,9 @@ } }, "description": "Update an anomaly detection job.\nUpdates certain properties of an anomaly detection job.", + "docId": "ml-update-job", "docTag": "ml anomaly", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-job.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/ml-update-job.html", "name": "ml.update_job", "privileges": { "cluster": [ @@ -7448,8 +7553,9 @@ } }, "description": "Update a trained model deployment.", + "docId": "update-trained-model-deployment", "docTag": "ml trained model", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/update-trained-model-deployment.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/update-trained-model-deployment.html", "name": "ml.update_trained_model_deployment", "privileges": { "cluster": [ @@ -7492,8 +7598,9 @@ } }, "description": "Run multiple searches.\n\nThe format of the request is similar to the bulk API format and makes use of the newline delimited JSON (NDJSON) format.\nThe structure is as follows:\n\n```\nheader\\n\nbody\\n\nheader\\n\nbody\\n\n```\n\nThis structure is specifically optimized to reduce parsing if a specific search ends up redirected to another node.\n\nIMPORTANT: The final line of data must end with a newline character `\\n`.\nEach newline character may be preceded by a carriage return `\\r`.\nWhen sending requests to this endpoint the `Content-Type` header should be set to `application/x-ndjson`.", + "docId": "search-multi-search", "docTag": "search", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/search-multi-search.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/search-multi-search.html", "name": "msearch", "privileges": { "index": [ @@ -7543,9 +7650,10 @@ "stability": "stable" } }, - "description": "Run multiple templated searches.", + "description": "Run multiple templated searches.\n\nRun multiple templated searches with a single request.\nIf you are providing a text file or text input to `curl`, use the `--data-binary` flag instead of `-d` to preserve newlines.\nFor example:\n\n```\n$ cat requests\n{ \"index\": \"my-index\" }\n{ \"id\": \"my-search-template\", \"params\": { \"query_string\": \"hello world\", \"from\": 0, \"size\": 10 }}\n{ \"index\": \"my-other-index\" }\n{ \"id\": \"my-other-search-template\", \"params\": { \"query_type\": \"match_all\" }}\n\n$ curl -H \"Content-Type: application/x-ndjson\" -XGET localhost:9200/_msearch/template --data-binary \"@requests\"; echo\n```", + "docId": "search-multi-search-template", "docTag": "search", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/multi-search-template.html", "extDocId": "search-templates", "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/search-template.html", "name": "msearch_template", @@ -7596,10 +7704,16 @@ "stability": "stable" } }, - "description": "Get multiple term vectors.\n\nYou can specify existing documents by index and ID or provide artificial documents in the body of the request.\nYou can specify the index in the request body or request URI.\nThe response contains a `docs` array with all the fetched termvectors.\nEach element has the structure provided by the termvectors API.", + "description": "Get multiple term vectors.\n\nGet multiple term vectors with a single request.\nYou can specify existing documents by index and ID or provide artificial documents in the body of the request.\nYou can specify the index in the request body or request URI.\nThe response contains a `docs` array with all the fetched termvectors.\nEach element has the structure provided by the termvectors API.\n\n**Artificial documents**\n\nYou can also use `mtermvectors` to generate term vectors for artificial documents provided in the body of the request.\nThe mapping used is determined by the specified `_index`.", + "docId": "docs-multi-termvectors", "docTag": "document", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-multi-termvectors.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/docs-multi-termvectors.html", "name": "mtermvectors", + "privileges": { + "index": [ + "read" + ] + }, "request": { "name": "Request", "namespace": "_global.mtermvectors" @@ -7688,8 +7802,9 @@ } }, "description": "Ping the cluster.\nGet information about whether the cluster is running.", + "docId": "cluster-ping", "docTag": "cluster", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cluster.html", "name": "ping", "request": { "name": "Request", @@ -7723,9 +7838,17 @@ } }, "description": "Create or update a script or search template.\nCreates or updates a stored script or search template.", + "docId": "script-put", "docTag": "script", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/create-stored-script-api.html", + "extDocId": "search-template", + "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/search-template.html", "name": "put_script", + "privileges": { + "cluster": [ + "manage" + ] + }, "request": { "name": "Request", "namespace": "_global.put_script" @@ -8111,8 +8234,9 @@ } }, "description": "Evaluate ranked search results.\n\nEvaluate the quality of ranked search results over a set of typical search queries.", + "docId": "search-rank-eval", "docTag": "search", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/search-rank-eval.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/search-rank-eval.html", "name": "rank_eval", "privileges": { "index": [ @@ -8208,9 +8332,15 @@ } }, "description": "Render a search template.\n\nRender a search template as a search request body.", + "docId": "render-search-template-api", "docTag": "search", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/render-search-template-api.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/render-search-template-api.html", "name": "render_search_template", + "privileges": { + "index": [ + "read" + ] + }, "request": { "name": "Request", "namespace": "_global.render_search_template" @@ -8254,9 +8384,10 @@ "stability": "experimental" } }, - "description": "Run a script.\nRuns a script and returns a result.", + "description": "Run a script.\n\nRuns a script and returns a result.\nUse this API to build and test scripts, such as when defining a script for a runtime field.\nThis API requires very few dependencies and is especially useful if you don't have permissions to write documents on a cluster.\n\nThe API uses several _contexts_, which control how scripts are run, what variables are available at runtime, and what the return type is.\n\nEach context requires a script, but additional parameters depend on the context you're using for that script.", + "docId": "painless-execute-api", "docTag": "script", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/painless/master/painless-execute-api.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/painless/{branch}/painless-execute-api.html", "name": "scripts_painless_execute", "request": { "name": "Request", @@ -8294,11 +8425,17 @@ } }, "description": "Run a scrolling search.\n\nIMPORTANT: The scroll API is no longer recommend for deep pagination. If you need to preserve the index state while paging through more than 10,000 hits, use the `search_after` parameter with a point in time (PIT).\n\nThe scroll API gets large sets of results from a single scrolling search request.\nTo get the necessary scroll ID, submit a search API request that includes an argument for the `scroll` query parameter.\nThe `scroll` parameter indicates how long Elasticsearch should retain the search context for the request.\nThe search response returns a scroll ID in the `_scroll_id` response body parameter.\nYou can then use the scroll ID with the scroll API to retrieve the next batch of results for the request.\nIf the Elasticsearch security features are enabled, the access to the results of a specific scroll ID is restricted to the user or API key that submitted the search.\n\nYou can also use the scroll API to specify a new scroll parameter that extends or shortens the retention period for the search context.\n\nIMPORTANT: Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests.", + "docId": "scroll-api", "docTag": "search", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/search-request-body.html#request-body-search-scroll", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/scroll-api.html", "extDocId": "scroll-search-results", "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/paginate-search-results.html#scroll-search-results", "name": "scroll", + "privileges": { + "index": [ + "read" + ] + }, "request": { "name": "Request", "namespace": "_global.scroll" @@ -8346,7 +8483,8 @@ } }, "description": "Run a search.\n\nGet search hits that match the query defined in the request.\nYou can provide search queries using the `q` query string parameter or the request body.\nIf both are specified, only the query parameter is used.\n\nIf the Elasticsearch security features are enabled, you must have the read index privilege for the target data stream, index, or alias. For cross-cluster search, refer to the documentation about configuring CCS privileges.\nTo search a point in time (PIT) for an alias, you must have the `read` index privilege for the alias's data streams or indices.\n\n**Search slicing**\n\nWhen paging through a large number of documents, it can be helpful to split the search into multiple slices to consume them independently with the `slice` and `pit` properties.\nBy default the splitting is done first on the shards, then locally on each shard.\nThe local splitting partitions the shard into contiguous ranges based on Lucene document IDs.\n\nFor instance if the number of shards is equal to 2 and you request 4 slices, the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are assigned to the second shard.\n\nIMPORTANT: The same point-in-time ID should be used for all slices.\nIf different PIT IDs are used, slices can overlap and miss documents.\nThis situation can occur because the splitting criterion is based on Lucene document IDs, which are not stable across changes to the index.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/search-search.html", + "docId": "search-search", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/search-search.html", "extDocId": "ccs-privileges", "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/remote-clusters-cert.html#remote-clusters-privileges-ccs", "name": "search", @@ -8398,8 +8536,9 @@ "stability": "beta" } }, - "description": "Delete a search application.\nRemove a search application and its associated alias. Indices attached to the search application are not removed.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-search-application.html", + "description": "Delete a search application.\n\nRemove a search application and its associated alias. Indices attached to the search application are not removed.", + "docId": "search-application-delete", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/delete-search-application.html", "name": "search_application.delete", "privileges": { "cluster": [ @@ -8442,8 +8581,9 @@ } }, "description": "Delete a behavioral analytics collection.\nThe associated data stream is also deleted.", + "docId": "delete-analytics-collection", "docTag": "analytics", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-analytics-collection.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/delete-analytics-collection.html", "name": "search_application.delete_behavioral_analytics", "request": { "name": "Request", @@ -8478,7 +8618,8 @@ } }, "description": "Get search application details.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/get-search-application.html", + "docId": "search-application-get", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/get-search-application.html", "name": "search_application.get", "privileges": { "cluster": [ @@ -8518,8 +8659,9 @@ } }, "description": "Get behavioral analytics collections.", + "docId": "list-analytics-collection", "docTag": "analytics", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/list-analytics-collection.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/list-analytics-collection.html", "name": "search_application.get_behavioral_analytics", "request": { "name": "Request", @@ -8560,7 +8702,8 @@ } }, "description": "Get search applications.\nGet information about search applications.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/list-search-applications.html", + "docId": "list-analytics-collection", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/list-analytics-collection.html", "name": "search_application.list", "privileges": { "cluster": [ @@ -8600,7 +8743,8 @@ } }, "description": "Create or update a search application.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/put-search-application.html", + "docId": "search-application-put", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/put-search-application.html", "name": "search_application.put", "privileges": { "cluster": [ @@ -8646,8 +8790,9 @@ } }, "description": "Create a behavioral analytics collection.", + "docId": "put-analytics-collection", "docTag": "analytics", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/put-analytics-collection.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/put-analytics-collection.html", "name": "search_application.put_behavioral_analytics", "request": { "name": "Request", @@ -8682,7 +8827,8 @@ } }, "description": "Run a search application search.\nGenerate and run an Elasticsearch query that uses the specified query parameteter and the search template associated with the search application or default template.\nUnspecified template parameters are assigned their default values if applicable.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/search-application-search.html", + "docId": "search-application-search", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/search-application-search.html", "name": "search_application.search", "request": { "name": "Request", @@ -8720,10 +8866,18 @@ "stability": "stable" } }, - "description": "Search a vector tile.\n\nSearch a vector tile for geospatial values.", + "description": "Search a vector tile.\n\nSearch a vector tile for geospatial values.\nBefore using this API, you should be familiar with the Mapbox vector tile specification.\nThe API returns results as a binary mapbox vector tile.\n\nInternally, Elasticsearch translates a vector tile search API request into a search containing:\n\n* A `geo_bounding_box` query on the ``. The query uses the `//` tile as a bounding box.\n* A `geotile_grid` or `geohex_grid` aggregation on the ``. The `grid_agg` parameter determines the aggregation type. The aggregation uses the `//` tile as a bounding box.\n* Optionally, a `geo_bounds` aggregation on the ``. The search only includes this aggregation if the `exact_bounds` parameter is `true`.\n* If the optional parameter `with_labels` is `true`, the internal search will include a dynamic runtime field that calls the `getLabelPosition` function of the geometry doc value. This enables the generation of new point features containing suggested geometry labels, so that, for example, multi-polygons will have only one label.\n\nFor example, Elasticsearch may translate a vector tile search API request with a `grid_agg` argument of `geotile` and an `exact_bounds` argument of `true` into the following search\n\n```\nGET my-index/_search\n{\n \"size\": 10000,\n \"query\": {\n \"geo_bounding_box\": {\n \"my-geo-field\": {\n \"top_left\": {\n \"lat\": -40.979898069620134,\n \"lon\": -45\n },\n \"bottom_right\": {\n \"lat\": -66.51326044311186,\n \"lon\": 0\n }\n }\n }\n },\n \"aggregations\": {\n \"grid\": {\n \"geotile_grid\": {\n \"field\": \"my-geo-field\",\n \"precision\": 11,\n \"size\": 65536,\n \"bounds\": {\n \"top_left\": {\n \"lat\": -40.979898069620134,\n \"lon\": -45\n },\n \"bottom_right\": {\n \"lat\": -66.51326044311186,\n \"lon\": 0\n }\n }\n }\n },\n \"bounds\": {\n \"geo_bounds\": {\n \"field\": \"my-geo-field\",\n \"wrap_longitude\": false\n }\n }\n }\n}\n```\n\nThe API returns results as a binary Mapbox vector tile.\nMapbox vector tiles are encoded as Google Protobufs (PBF). By default, the tile contains three layers:\n\n* A `hits` layer containing a feature for each `` value matching the `geo_bounding_box` query.\n* An `aggs` layer containing a feature for each cell of the `geotile_grid` or `geohex_grid`. The layer only contains features for cells with matching data.\n* A meta layer containing:\n * A feature containing a bounding box. By default, this is the bounding box of the tile.\n * Value ranges for any sub-aggregations on the `geotile_grid` or `geohex_grid`.\n * Metadata for the search.\n\nThe API only returns features that can display at its zoom level.\nFor example, if a polygon feature has no area at its zoom level, the API omits it.\nThe API returns errors as UTF-8 encoded JSON.\n\nIMPORTANT: You can specify several options for this API as either a query parameter or request body parameter.\nIf you specify both parameters, the query parameter takes precedence.\n\n**Grid precision for geotile**\n\nFor a `grid_agg` of `geotile`, you can use cells in the `aggs` layer as tiles for lower zoom levels.\n`grid_precision` represents the additional zoom levels available through these cells. The final precision is computed by as follows: ` + grid_precision`.\nFor example, if `` is 7 and `grid_precision` is 8, then the `geotile_grid` aggregation will use a precision of 15.\nThe maximum final precision is 29.\nThe `grid_precision` also determines the number of cells for the grid as follows: `(2^grid_precision) x (2^grid_precision)`.\nFor example, a value of 8 divides the tile into a grid of 256 x 256 cells.\nThe `aggs` layer only contains features for cells with matching data.\n\n**Grid precision for geohex**\n\nFor a `grid_agg` of `geohex`, Elasticsearch uses `` and `grid_precision` to calculate a final precision as follows: ` + grid_precision`.\n\nThis precision determines the H3 resolution of the hexagonal cells produced by the `geohex` aggregation.\nThe following table maps the H3 resolution for each precision.\nFor example, if `` is 3 and `grid_precision` is 3, the precision is 6.\nAt a precision of 6, hexagonal cells have an H3 resolution of 2.\nIf `` is 3 and `grid_precision` is 4, the precision is 7.\nAt a precision of 7, hexagonal cells have an H3 resolution of 3.\n\n| Precision | Unique tile bins | H3 resolution | Unique hex bins |\tRatio |\n| --------- | ---------------- | ------------- | ----------------| ----- |\n| 1 | 4 | 0 | 122 | 30.5 |\n| 2 | 16 | 0 | 122 | 7.625 |\n| 3 | 64 | 1 | 842 | 13.15625 |\n| 4 | 256 | 1 | 842 | 3.2890625 |\n| 5 | 1024 | 2 | 5882 | 5.744140625 |\n| 6 | 4096 | 2 | 5882 | 1.436035156 |\n| 7 | 16384 | 3 | 41162 | 2.512329102 |\n| 8 | 65536 | 3 | 41162 | 0.6280822754 |\n| 9 | 262144 | 4 | 288122 | 1.099098206 |\n| 10 | 1048576 | 4 | 288122 | 0.2747745514 |\n| 11 | 4194304 | 5 | 2016842 | 0.4808526039 |\n| 12 | 16777216 | 6 | 14117882 | 0.8414913416 |\n| 13 | 67108864 | 6 | 14117882 | 0.2103728354 |\n| 14 | 268435456 | 7 | 98825162 | 0.3681524172 |\n| 15 | 1073741824 | 8 | 691776122 | 0.644266719 |\n| 16 | 4294967296 | 8 | 691776122 | 0.1610666797 |\n| 17 | 17179869184 | 9 | 4842432842 | 0.2818666889 |\n| 18 | 68719476736 | 10 | 33897029882 | 0.4932667053 |\n| 19 | 274877906944 | 11 | 237279209162 | 0.8632167343 |\n| 20 | 1099511627776 | 11 | 237279209162 | 0.2158041836 |\n| 21 | 4398046511104 | 12 | 1660954464122 | 0.3776573213 |\n| 22 | 17592186044416 | 13 | 11626681248842 | 0.6609003122 |\n| 23 | 70368744177664 | 13 | 11626681248842 | 0.165225078 |\n| 24 | 281474976710656 | 14 | 81386768741882 | 0.2891438866 |\n| 25 | 1125899906842620 | 15 | 569707381193162 | 0.5060018015 |\n| 26 | 4503599627370500 | 15 | 569707381193162 | 0.1265004504 |\n| 27 | 18014398509482000 | 15 | 569707381193162 | 0.03162511259 |\n| 28 | 72057594037927900 | 15 | 569707381193162 | 0.007906278149 |\n| 29 | 288230376151712000 | 15 | 569707381193162 | 0.001976569537 |\n\nHexagonal cells don't align perfectly on a vector tile.\nSome cells may intersect more than one vector tile.\nTo compute the H3 resolution for each precision, Elasticsearch compares the average density of hexagonal bins at each resolution with the average density of tile bins at each zoom level.\nElasticsearch uses the H3 resolution that is closest to the corresponding geotile density.", + "docId": "search-vector-tile-api", "docTag": "search", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/search-vector-tile-api.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/search-vector-tile-api.html", + "extDocId": "mapbox-vector-tile", + "extDocUrl": "https://github.com/mapbox/vector-tile-spec/blob/master/README.md", "name": "search_mvt", + "privileges": { + "index": [ + "read" + ] + }, "request": { "name": "Request", "namespace": "_global.search_mvt" @@ -8761,11 +8915,17 @@ } }, "description": "Run a search with a search template.", + "docId": "search-template-api", "docTag": "search", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/search-template.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/search-template-api.html", "extDocId": "search-template", "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/search-template.html", "name": "search_template", + "privileges": { + "index": [ + "read" + ] + }, "request": { "name": "Request", "namespace": "_global.search_template" @@ -9705,9 +9865,10 @@ "stability": "stable" } }, - "description": "Get terms in an index.\n\nDiscover terms that match a partial string in an index.\nThis \"terms enum\" API is designed for low-latency look-ups used in auto-complete scenarios.\n\nIf the `complete` property in the response is false, the returned terms set may be incomplete and should be treated as approximate.\nThis can occur due to a few reasons, such as a request timeout or a node error.\n\nNOTE: The terms enum API may return terms from deleted documents. Deleted documents are initially only marked as deleted. It is not until their segments are merged that documents are actually deleted. Until that happens, the terms enum API will return terms from these documents.", + "description": "Get terms in an index.\n\nDiscover terms that match a partial string in an index.\nThis API is designed for low-latency look-ups used in auto-complete scenarios.\n\n> info\n> The terms enum API may return terms from deleted documents. Deleted documents are initially only marked as deleted. It is not until their segments are merged that documents are actually deleted. Until that happens, the terms enum API will return terms from these documents.", + "docId": "search-terms-enum", "docTag": "search", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/search-terms-enum.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/search-terms-enum.html", "name": "terms_enum", "request": { "name": "Request", @@ -9744,10 +9905,16 @@ "stability": "stable" } }, - "description": "Get term vector information.\n\nGet information and statistics about terms in the fields of a particular document.", + "description": "Get term vector information.\n\nGet information and statistics about terms in the fields of a particular document.\n\nYou can retrieve term vectors for documents stored in the index or for artificial documents passed in the body of the request.\nYou can specify the fields you are interested in through the `fields` parameter or by adding the fields to the request body.\nFor example:\n\n```\nGET /my-index-000001/_termvectors/1?fields=message\n```\n\nFields can be specified using wildcards, similar to the multi match query.\n\nTerm vectors are real-time by default, not near real-time.\nThis can be changed by setting `realtime` parameter to `false`.\n\nYou can request three types of values: _term information_, _term statistics_, and _field statistics_.\nBy default, all term information and field statistics are returned for all fields but term statistics are excluded.\n\n**Term information**\n\n* term frequency in the field (always returned)\n* term positions (`positions: true`)\n* start and end offsets (`offsets: true`)\n* term payloads (`payloads: true`), as base64 encoded bytes\n\nIf the requested information wasn't stored in the index, it will be computed on the fly if possible.\nAdditionally, term vectors could be computed for documents not even existing in the index, but instead provided by the user.\n\n> warn\n> Start and end offsets assume UTF-16 encoding is being used. If you want to use these offsets in order to get the original text that produced this token, you should make sure that the string you are taking a sub-string of is also encoded using UTF-16.\n\n**Behaviour**\n\nThe term and field statistics are not accurate.\nDeleted documents are not taken into account.\nThe information is only retrieved for the shard the requested document resides in.\nThe term and field statistics are therefore only useful as relative measures whereas the absolute numbers have no meaning in this context.\nBy default, when requesting term vectors of artificial documents, a shard to get the statistics from is randomly selected.\nUse `routing` only to hit a particular shard.", + "docId": "docs-termvectors", "docTag": "document", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-termvectors.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/docs-termvectors.html", "name": "termvectors", + "privileges": { + "index": [ + "read" + ] + }, "request": { "name": "Request", "namespace": "_global.termvectors" @@ -9791,8 +9958,9 @@ "stability": "stable" } }, - "description": "Delete a transform.\nDeletes a transform.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-transform.html", + "description": "Delete a transform.", + "docId": "delete-transform", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/delete-transform.html", "name": "transform.delete_transform", "privileges": { "cluster": [ @@ -9831,8 +9999,9 @@ "stability": "stable" } }, - "description": "Get transforms.\nRetrieves configuration information for transforms.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/get-transform.html", + "description": "Get transforms.\nGet configuration information for transforms.", + "docId": "get-transform", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/get-transform.html", "name": "transform.get_transform", "privileges": { "cluster": [ @@ -9877,8 +10046,9 @@ "stability": "stable" } }, - "description": "Get transform stats.\nRetrieves usage information for transforms.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/get-transform-stats.html", + "description": "Get transform stats.\nGet usage information for transforms.", + "docId": "get-transform-stats", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/get-transform-stats.html", "name": "transform.get_transform_stats", "privileges": { "cluster": [ @@ -9922,7 +10092,8 @@ } }, "description": "Preview a transform.\nGenerates a preview of the results that you will get when you create a transform with the same configuration.\n\nIt returns a maximum of 100 results. The calculations are based on all the current data in the source index. It also\ngenerates a list of mappings and settings for the destination index. These values are determined based on the field\ntypes of the source index and the transform aggregations.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/preview-transform.html", + "docId": "preview-transform", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/preview-transform.html", "name": "transform.preview_transform", "privileges": { "cluster": [ @@ -9977,7 +10148,8 @@ } }, "description": "Create a transform.\nCreates a transform.\n\nA transform copies data from source indices, transforms it, and persists it into an entity-centric destination index. You can also think of the destination index as a two-dimensional tabular data structure (known as\na data frame). The ID for each document in the data frame is generated from a hash of the entity, so there is a\nunique row per entity.\n\nYou must choose either the latest or pivot method for your transform; you cannot use both in a single transform. If\nyou choose to use the pivot method for your transform, the entities are defined by the set of `group_by` fields in\nthe pivot object. If you choose to use the latest method, the entities are defined by the `unique_key` field values\nin the latest object.\n\nYou must have `create_index`, `index`, and `read` privileges on the destination index and `read` and\n`view_index_metadata` privileges on the source indices. When Elasticsearch security features are enabled, the\ntransform remembers which roles the user that created it had at the time of creation and uses those same roles. If\nthose roles do not have the required privileges on the source and destination indices, the transform fails when it\nattempts unauthorized operations.\n\nNOTE: You must use Kibana or this API to create a transform. Do not add a transform directly into any\n`.transform-internal*` indices using the Elasticsearch index API. If Elasticsearch security features are enabled, do\nnot give users any privileges on `.transform-internal*` indices. If you used transforms prior to 7.5, also do not\ngive users any privileges on `.data-frame-internal*` indices.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/put-transform.html", + "docId": "put-transform", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/put-transform.html", "name": "transform.put_transform", "privileges": { "cluster": [ @@ -10026,7 +10198,8 @@ } }, "description": "Reset a transform.\nResets a transform.\nBefore you can reset it, you must stop it; alternatively, use the `force` query parameter.\nIf the destination index was created by the transform, it is deleted.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/reset-transform.html", + "docId": "reset-transform", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/reset-transform.html", "name": "transform.reset_transform", "privileges": { "cluster": [ @@ -10066,7 +10239,8 @@ } }, "description": "Schedule a transform to start now.\nInstantly runs a transform to process data.\n\nIf you _schedule_now a transform, it will process the new data instantly,\nwithout waiting for the configured frequency interval. After _schedule_now API is called,\nthe transform will be processed again at now + frequency unless _schedule_now API\nis called again in the meantime.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/schedule-now-transform.html", + "docId": "schedule-now-transform", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/schedule-now-transform.html", "name": "transform.schedule_now_transform", "privileges": { "cluster": [ @@ -10109,7 +10283,8 @@ } }, "description": "Start a transform.\nStarts a transform.\n\nWhen you start a transform, it creates the destination index if it does not already exist. The `number_of_shards` is\nset to `1` and the `auto_expand_replicas` is set to `0-1`. If it is a pivot transform, it deduces the mapping\ndefinitions for the destination index from the source indices and the transform aggregations. If fields in the\ndestination index are derived from scripts (as in the case of `scripted_metric` or `bucket_script` aggregations),\nthe transform uses dynamic mappings unless an index template exists. If it is a latest transform, it does not deduce\nmapping definitions; it uses dynamic mappings. To use explicit mappings, create the destination index before you\nstart the transform. Alternatively, you can create an index template, though it does not affect the deduced mappings\nin a pivot transform.\n\nWhen the transform starts, a series of validations occur to ensure its success. If you deferred validation when you\ncreated the transform, they occur when you start the transform—​with the exception of privilege checks. When\nElasticsearch security features are enabled, the transform remembers which roles the user that created it had at the\ntime of creation and uses those same roles. If those roles do not have the required privileges on the source and\ndestination indices, the transform fails when it attempts unauthorized operations.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/start-transform.html", + "docId": "start-transform", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/start-transform.html", "name": "transform.start_transform", "privileges": { "cluster": [ @@ -10153,7 +10328,8 @@ } }, "description": "Stop transforms.\nStops one or more transforms.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/stop-transform.html", + "docId": "stop-transform", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/stop-transform.html", "name": "transform.stop_transform", "privileges": { "cluster": [ @@ -10193,7 +10369,8 @@ } }, "description": "Update a transform.\nUpdates certain properties of a transform.\n\nAll updated properties except `description` do not take effect until after the transform starts the next checkpoint,\nthus there is data consistency in each checkpoint. To use this API, you must have `read` and `view_index_metadata`\nprivileges for the source indices. You must also have `index` and `read` privileges for the destination index. When\nElasticsearch security features are enabled, the transform remembers which roles the user who updated it had at the\ntime of update and runs with those privileges.", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/current/update-transform.html", + "docId": "update-transform", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/update-transform.html", "name": "transform.update_transform", "privileges": { "cluster": [ @@ -10284,10 +10461,17 @@ "stability": "stable" } }, - "description": "Update documents.\nUpdates documents that match the specified query.\nIf no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes.", + "description": "Update documents.\nUpdates documents that match the specified query.\nIf no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias:\n\n* `read`\n* `index` or `write`\n\nYou can specify the query criteria in the request URI or the request body using the same syntax as the search API.\n\nWhen you submit an update by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and updates matching documents using internal versioning.\nWhen the versions match, the document is updated and the version number is incremented.\nIf a document changes between the time that the snapshot is taken and the update operation is processed, it results in a version conflict and the operation fails.\nYou can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`.\nNote that if you opt to count version conflicts, the operation could attempt to update more documents from the source than `max_docs` until it has successfully updated `max_docs` documents or it has gone through every document in the source query.\n\nNOTE: Documents with a version equal to 0 cannot be updated using update by query because internal versioning does not support 0 as a valid version number.\n\nWhile processing an update by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents.\nA bulk update request is performed for each batch of matching documents.\nAny query or update failures cause the update by query request to fail and the failures are shown in the response.\nAny update requests that completed successfully still stick, they are not rolled back.\n\n**Throttling update requests**\n\nTo control the rate at which update by query issues batches of update operations, you can set `requests_per_second` to any positive decimal number.\nThis pads each batch with a wait time to throttle the rate.\nSet `requests_per_second` to `-1` to turn off throttling.\n\nThrottling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account.\nThe padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing.\nBy default the batch size is 1000, so if `requests_per_second` is set to `500`:\n\n```\ntarget_time = 1000 / 500 per second = 2 seconds\nwait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds\n```\n\nSince the batch is issued as a single _bulk request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set.\nThis is \"bursty\" instead of \"smooth\".\n\n**Slicing**\n\nUpdate by query supports sliced scroll to parallelize the update process.\nThis can improve efficiency and provide a convenient way to break the request down into smaller parts.\n\nSetting `slices` to `auto` chooses a reasonable number for most data streams and indices.\nThis setting will use one slice per shard, up to a certain limit.\nIf there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards.\n\nAdding `slices` to `_update_by_query` just automates the manual process of creating sub-requests, which means it has some quirks:\n\n* You can see these requests in the tasks APIs. These sub-requests are \"child\" tasks of the task for the request with slices.\n* Fetching the status of the task for the request with `slices` only contains the status of completed slices.\n* These sub-requests are individually addressable for things like cancellation and rethrottling.\n* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally.\n* Canceling the request with slices will cancel each sub-request.\n* Due to the nature of slices each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution.\n* Parameters like `requests_per_second` and `max_docs` on a request with slices are distributed proportionally to each sub-request. Combine that with the point above about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being updated.\n* Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time.\n\nIf you're slicing manually or otherwise tuning automatic slicing, keep in mind that:\n\n* Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many slices hurts performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead.\n* Update performance scales linearly across available resources with the number of slices.\n\nWhether query or update performance dominates the runtime depends on the documents being reindexed and cluster resources.\n\n**Update the document source**\n\nUpdate by query supports scripts to update the document source.\nAs with the update API, you can set `ctx.op` to change the operation that is performed.\n\nSet `ctx.op = \"noop\"` if your script decides that it doesn't have to make any changes.\nThe update by query operation skips updating the document and increments the `noop` counter.\n\nSet `ctx.op = \"delete\"` if your script decides that the document should be deleted.\nThe update by query operation deletes the document and increments the `deleted` counter.\n\nUpdate by query supports only `index`, `noop`, and `delete`.\nSetting `ctx.op` to anything else is an error.\nSetting any other field in `ctx` is an error.\nThis API enables you to only modify the source of matching documents; you cannot move them.", + "docId": "docs-update-by-query", "docTag": "document", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-update-by-query.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/docs-update-by-query.html", "name": "update_by_query", + "privileges": { + "index": [ + "read", + "write" + ] + }, "request": { "name": "Request", "namespace": "_global.update_by_query" @@ -15728,7 +15912,7 @@ "kind": "properties", "properties": [ { - "description": "Defines the search definition using the Query DSL.\nThe query is optional, and when not provided, it will use `match_all` to count all the docs.", + "description": "Defines the search query using Query DSL. A request body query cannot be used\nwith the `q` query string parameter.", "name": "query", "required": false, "type": { @@ -15741,7 +15925,7 @@ } ] }, - "description": "Count search results.\nGet the number of documents matching a query.\n\nThe query can either be provided using a simple query string as a parameter or using the Query DSL defined within the request body.\nThe latter must be nested in a `query` key, which is the same as the search API.\n\nThe count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices.\n\nThe operation is broadcast across all shards.\nFor each shard ID group, a replica is chosen and the search is run against it.\nThis means that replicas increase the scalability of the count.", + "description": "Count search results.\nGet the number of documents matching a query.\n\nThe query can be provided either by using a simple query string as a parameter, or by defining Query DSL within the request body.\nThe query is optional. When no query is provided, the API uses `match_all` to count all the documents.\n\nThe count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices.\n\nThe operation is broadcast across all shards.\nFor each shard ID group, a replica is chosen and the search is run against it.\nThis means that replicas increase the scalability of the count.", "inherits": { "type": { "name": "RequestBase", @@ -15936,7 +16120,7 @@ } }, { - "description": "The query in Lucene query string syntax.", + "description": "The query in Lucene query string syntax. This parameter cannot be used with a request body.", "name": "q", "required": false, "type": { @@ -16373,7 +16557,7 @@ } }, { - "description": "Specifies the documents to delete using the Query DSL.", + "description": "The documents to delete specified with Query DSL.", "name": "query", "required": false, "type": { @@ -16398,7 +16582,7 @@ } ] }, - "description": "Delete documents.\nDeletes documents that match the specified query.", + "description": "Delete documents.\n\nDeletes documents that match the specified query.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias:\n\n* `read`\n* `delete` or `write`\n\nYou can specify the query criteria in the request URI or the request body using the same syntax as the search API.\nWhen you submit a delete by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and deletes matching documents using internal versioning.\nIf a document changes between the time that the snapshot is taken and the delete operation is processed, it results in a version conflict and the delete operation fails.\n\nNOTE: Documents with a version equal to 0 cannot be deleted using delete by query because internal versioning does not support 0 as a valid version number.\n\nWhile processing a delete by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents to delete.\nA bulk delete request is performed for each batch of matching documents.\nIf a search or bulk request is rejected, the requests are retried up to 10 times, with exponential back off.\nIf the maximum retry limit is reached, processing halts and all failed requests are returned in the response.\nAny delete requests that completed successfully still stick, they are not rolled back.\n\nYou can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`.\nNote that if you opt to count version conflicts the operation could attempt to delete more documents from the source than `max_docs` until it has successfully deleted `max_docs documents`, or it has gone through every document in the source query.\n\n**Throttling delete requests**\n\nTo control the rate at which delete by query issues batches of delete operations, you can set `requests_per_second` to any positive decimal number.\nThis pads each batch with a wait time to throttle the rate.\nSet `requests_per_second` to `-1` to disable throttling.\n\nThrottling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account.\nThe padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing.\nBy default the batch size is `1000`, so if `requests_per_second` is set to `500`:\n\n```\ntarget_time = 1000 / 500 per second = 2 seconds\nwait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds\n```\n\nSince the batch is issued as a single `_bulk` request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set.\nThis is \"bursty\" instead of \"smooth\".\n\n**Slicing**\n\nDelete by query supports sliced scroll to parallelize the delete process.\nThis can improve efficiency and provide a convenient way to break the request down into smaller parts.\n\nSetting `slices` to `auto` lets Elasticsearch choose the number of slices to use.\nThis setting will use one slice per shard, up to a certain limit.\nIf there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards.\nAdding slices to the delete by query operation creates sub-requests which means it has some quirks:\n\n* You can see these requests in the tasks APIs. These sub-requests are \"child\" tasks of the task for the request with slices.\n* Fetching the status of the task for the request with slices only contains the status of completed slices.\n* These sub-requests are individually addressable for things like cancellation and rethrottling.\n* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally.\n* Canceling the request with `slices` will cancel each sub-request.\n* Due to the nature of `slices` each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution.\n* Parameters like `requests_per_second` and `max_docs` on a request with `slices` are distributed proportionally to each sub-request. Combine that with the earlier point about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being deleted.\n* Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time.\n\nIf you're slicing manually or otherwise tuning automatic slicing, keep in mind that:\n\n* Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many `slices` hurts performance. Setting `slices` higher than the number of shards generally does not improve efficiency and adds overhead.\n* Delete performance scales linearly across available resources with the number of slices.\n\nWhether query or delete performance dominates the runtime depends on the documents being reindexed and cluster resources.\n\n**Cancel a delete by query operation**\n\nAny delete by query can be canceled using the task cancel API. For example:\n\n```\nPOST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel\n```\n\nThe task ID can be found by using the get tasks API.\n\nCancellation should happen quickly but might take a few seconds.\nThe get task status API will continue to list the delete by query task until this task checks that it has been cancelled and terminates itself.", "inherits": { "type": { "name": "RequestBase", @@ -16412,7 +16596,7 @@ }, "path": [ { - "description": "Comma-separated list of data streams, indices, and aliases to search.\nSupports wildcards (`*`).\nTo search all data streams or indices, omit this parameter or use `*` or `_all`.", + "description": "A comma-separated list of data streams, indices, and aliases to search.\nIt supports wildcards (`*`).\nTo search all data streams or indices, omit this parameter or use `*` or `_all`.", "name": "index", "required": true, "type": { @@ -16439,7 +16623,7 @@ } }, { - "description": "Analyzer to use for the query string.", + "description": "Analyzer to use for the query string.\nThis parameter can be used only when the `q` query string parameter is specified.", "name": "analyzer", "required": false, "type": { @@ -16451,7 +16635,7 @@ } }, { - "description": "If `true`, wildcard and prefix queries are analyzed.", + "description": "If `true`, wildcard and prefix queries are analyzed.\nThis parameter can be used only when the `q` query string parameter is specified.", "name": "analyze_wildcard", "required": false, "serverDefault": false, @@ -16477,7 +16661,7 @@ } }, { - "description": "The default operator for query string query: `AND` or `OR`.", + "description": "The default operator for query string query: `AND` or `OR`.\nThis parameter can be used only when the `q` query string parameter is specified.", "name": "default_operator", "required": false, "serverDefault": "OR", @@ -16490,7 +16674,7 @@ } }, { - "description": "Field to use as default where no field prefix is given in the query string.", + "description": "The field to use as default where no field prefix is given in the query string.\nThis parameter can be used only when the `q` query string parameter is specified.", "name": "df", "required": false, "type": { @@ -16502,7 +16686,7 @@ } }, { - "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`.", + "description": "The type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nIt supports comma-separated values, such as `open,hidden`.", "name": "expand_wildcards", "required": false, "serverDefault": "open", @@ -16540,7 +16724,7 @@ } }, { - "description": "If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored.", + "description": "If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored.\nThis parameter can be used only when the `q` query string parameter is specified.", "name": "lenient", "required": false, "serverDefault": false, @@ -16553,7 +16737,7 @@ } }, { - "description": "Maximum number of documents to process.\nDefaults to all documents.", + "description": "The maximum number of documents to process.\nDefaults to all documents.\nWhen set to a value less then or equal to `scroll_size`, a scroll will not be used to retrieve the results for the operation.", "name": "max_docs", "required": false, "type": { @@ -16565,7 +16749,7 @@ } }, { - "description": "Specifies the node or shard the operation should be performed on.\nRandom by default.", + "description": "The node or shard the operation should be performed on.\nIt is random by default.", "name": "preference", "required": false, "type": { @@ -16577,7 +16761,7 @@ } }, { - "description": "If `true`, Elasticsearch refreshes all shards involved in the delete by query after the request completes.", + "description": "If `true`, Elasticsearch refreshes all shards involved in the delete by query after the request completes.\nThis is different than the delete API's `refresh` parameter, which causes just the shard that received the delete request to be refreshed.\nUnlike the delete API, it does not support `wait_for`.", "name": "refresh", "required": false, "serverDefault": false, @@ -16605,6 +16789,7 @@ "description": "The throttle for this request in sub-requests per second.", "name": "requests_per_second", "required": false, + "serverDefault": -1, "type": { "kind": "instance_of", "type": { @@ -16614,7 +16799,7 @@ } }, { - "description": "Custom value used to route operations to a specific shard.", + "description": "A custom value used to route operations to a specific shard.", "name": "routing", "required": false, "type": { @@ -16626,7 +16811,7 @@ } }, { - "description": "Query in the Lucene query string syntax.", + "description": "A query in the Lucene query string syntax.", "name": "q", "required": false, "type": { @@ -16638,7 +16823,9 @@ } }, { - "description": "Period to retain the search context for scrolling.", + "description": "The period to retain the search context for scrolling.", + "extDocId": "scroll-search-results", + "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/paginate-search-results.html#scroll-search-results", "name": "scroll", "required": false, "type": { @@ -16650,7 +16837,7 @@ } }, { - "description": "Size of the scroll request that powers the operation.", + "description": "The size of the scroll request that powers the operation.", "name": "scroll_size", "required": false, "serverDefault": 1000, @@ -16663,7 +16850,7 @@ } }, { - "description": "Explicit timeout for each search request.\nDefaults to no timeout.", + "description": "The explicit timeout for each search request.\nIt defaults to no timeout.", "name": "search_timeout", "required": false, "type": { @@ -16675,7 +16862,7 @@ } }, { - "description": "The type of the search operation.\nAvailable options: `query_then_fetch`, `dfs_query_then_fetch`.", + "description": "The type of the search operation.\nAvailable options include `query_then_fetch` and `dfs_query_then_fetch`.", "name": "search_type", "required": false, "type": { @@ -16700,7 +16887,7 @@ } }, { - "description": "A comma-separated list of : pairs.", + "description": "A comma-separated list of `:` pairs.", "name": "sort", "required": false, "type": { @@ -16715,7 +16902,7 @@ } }, { - "description": "Specific `tag` of the request for logging and statistical purposes.", + "description": "The specific `tag` of the request for logging and statistical purposes.", "name": "stats", "required": false, "type": { @@ -16730,7 +16917,7 @@ } }, { - "description": "Maximum number of documents to collect for each shard.\nIf a query reaches this limit, Elasticsearch terminates the query early.\nElasticsearch collects documents before sorting.\nUse with caution.\nElasticsearch applies this parameter to each shard handling the request.\nWhen possible, let Elasticsearch perform early termination automatically.\nAvoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers.", + "description": "The maximum number of documents to collect for each shard.\nIf a query reaches this limit, Elasticsearch terminates the query early.\nElasticsearch collects documents before sorting.\n\nUse with caution.\nElasticsearch applies this parameter to each shard handling the request.\nWhen possible, let Elasticsearch perform early termination automatically.\nAvoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers.", "name": "terminate_after", "required": false, "type": { @@ -16742,7 +16929,7 @@ } }, { - "description": "Period each deletion request waits for active shards.", + "description": "The period each deletion request waits for active shards.", "name": "timeout", "required": false, "serverDefault": "1m", @@ -16767,7 +16954,7 @@ } }, { - "description": "The number of shard copies that must be active before proceeding with the operation.\nSet to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`).", + "description": "The number of shard copies that must be active before proceeding with the operation.\nSet to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`).\nThe `timeout` value controls how long each write request waits for unavailable shards to become available.", "name": "wait_for_active_shards", "required": false, "serverDefault": "1", @@ -16780,7 +16967,7 @@ } }, { - "description": "If `true`, the request blocks until the operation is complete.", + "description": "If `true`, the request blocks until the operation is complete.\nIf `false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. When you are done with a task, you should delete the task document so Elasticsearch can reclaim the space.", "name": "wait_for_completion", "required": false, "serverDefault": true, @@ -16793,13 +16980,14 @@ } } ], - "specLocation": "_global/delete_by_query/DeleteByQueryRequest.ts#L36-L217" + "specLocation": "_global/delete_by_query/DeleteByQueryRequest.ts#L36-L310" }, { "body": { "kind": "properties", "properties": [ { + "description": "The number of scroll responses pulled back by the delete by query.", "name": "batches", "required": false, "type": { @@ -16811,6 +16999,7 @@ } }, { + "description": "The number of documents that were successfully deleted.", "name": "deleted", "required": false, "type": { @@ -16822,6 +17011,7 @@ } }, { + "description": "An array of failures if there were any unrecoverable errors during the process.\nIf this array is not empty, the request ended abnormally because of those failures.\nDelete by query is implemented using batches and any failures cause the entire process to end but all failures in the current batch are collected into the array.\nYou can use the `conflicts` option to prevent reindex from ending on version conflicts.", "name": "failures", "required": false, "type": { @@ -16836,6 +17026,7 @@ } }, { + "description": "This field is always equal to zero for delete by query.\nIt exists only so that delete by query, update by query, and reindex APIs return responses with the same structure.", "name": "noops", "required": false, "type": { @@ -16847,6 +17038,7 @@ } }, { + "description": "The number of requests per second effectively run during the delete by query.", "name": "requests_per_second", "required": false, "type": { @@ -16858,6 +17050,7 @@ } }, { + "description": "The number of retries attempted by delete by query.\n`bulk` is the number of bulk actions retried.\n`search` is the number of search actions retried.", "name": "retries", "required": false, "type": { @@ -16902,6 +17095,7 @@ } }, { + "description": "The number of milliseconds the request slept to conform to `requests_per_second`.", "name": "throttled_millis", "required": false, "type": { @@ -16933,6 +17127,7 @@ } }, { + "description": "This field should always be equal to zero in a `_delete_by_query` response.\nIt has meaning only when using the task API, where it indicates the next time (in milliseconds since epoch) a throttled request will be run again in order to conform to `requests_per_second`.", "name": "throttled_until_millis", "required": false, "type": { @@ -16953,6 +17148,7 @@ } }, { + "description": "If `true`, some requests run during the delete by query operation timed out.", "name": "timed_out", "required": false, "type": { @@ -16964,6 +17160,7 @@ } }, { + "description": "The number of milliseconds from start to end of the whole operation.", "name": "took", "required": false, "type": { @@ -16984,6 +17181,7 @@ } }, { + "description": "The number of documents that were successfully processed.", "name": "total", "required": false, "type": { @@ -16995,6 +17193,7 @@ } }, { + "description": "The number of version conflicts that the delete by query hit.", "name": "version_conflicts", "required": false, "type": { @@ -17012,7 +17211,7 @@ "name": "Response", "namespace": "_global.delete_by_query" }, - "specLocation": "_global/delete_by_query/DeleteByQueryResponse.ts#L26-L45" + "specLocation": "_global/delete_by_query/DeleteByQueryResponse.ts#L26-L88" }, { "attachedBehaviors": [ @@ -17035,7 +17234,7 @@ }, "path": [ { - "description": "Identifier for the stored script or search template.", + "description": "The identifier for the stored script or search template.", "name": "id", "required": true, "type": { @@ -17049,7 +17248,7 @@ ], "query": [ { - "description": "Period to wait for a connection to the master node.\nIf no response is received before the timeout expires, the request fails and returns an error.", + "description": "The period to wait for a connection to the master node.\nIf no response is received before the timeout expires, the request fails and returns an error.\nIt can also be set to `-1` to indicate that the request should never timeout.", "name": "master_timeout", "required": false, "serverDefault": "30s", @@ -17062,7 +17261,7 @@ } }, { - "description": "Period to wait for a response.\nIf no response is received before the timeout expires, the request fails and returns an error.", + "description": "The period to wait for a response.\nIf no response is received before the timeout expires, the request fails and returns an error.\nIt can also be set to `-1` to indicate that the request should never timeout.", "name": "timeout", "required": false, "serverDefault": "30s", @@ -17075,7 +17274,7 @@ } } ], - "specLocation": "_global/delete_script/DeleteScriptRequest.ts#L24-L59" + "specLocation": "_global/delete_script/DeleteScriptRequest.ts#L24-L63" }, { "body": { @@ -17143,7 +17342,7 @@ } } ], - "specLocation": "enrich/delete_policy/DeleteEnrichPolicyRequest.ts#L24-L51" + "specLocation": "enrich/delete_policy/DeleteEnrichPolicyRequest.ts#L24-L52" }, { "body": { @@ -17309,7 +17508,7 @@ } } ], - "specLocation": "enrich/get_policy/GetEnrichPolicyRequest.ts#L24-L56" + "specLocation": "enrich/get_policy/GetEnrichPolicyRequest.ts#L24-L57" }, { "body": { @@ -17479,7 +17678,7 @@ } } ], - "specLocation": "enrich/stats/EnrichStatsRequest.ts#L23-L44" + "specLocation": "enrich/stats/EnrichStatsRequest.ts#L23-L45" }, { "body": { @@ -17579,7 +17778,7 @@ } ], "query": [], - "specLocation": "eql/delete/EqlDeleteRequest.ts#L23-L46" + "specLocation": "eql/delete/EqlDeleteRequest.ts#L23-L47" }, { "body": { @@ -18212,7 +18411,7 @@ } } ], - "specLocation": "eql/search/EqlSearchRequest.ts#L28-L148" + "specLocation": "eql/search/EqlSearchRequest.ts#L28-L149" }, { "body": { @@ -18388,7 +18587,7 @@ "kind": "instance_of", "type": { "name": "EsqlFormat", - "namespace": "esql._types" + "namespace": "esql.query" } } }, @@ -18819,7 +19018,7 @@ } ] }, - "description": "Explain a document match result.\nReturns information about why a specific document matches, or doesn’t match, a query.", + "description": "Explain a document match result.\nGet information about why a specific document matches, or doesn't match, a query.\nIt computes a score explanation for a query and a specific document.", "inherits": { "type": { "name": "RequestBase", @@ -18833,7 +19032,7 @@ }, "path": [ { - "description": "Defines the document ID.", + "description": "The document identifier.", "name": "id", "required": true, "type": { @@ -18845,7 +19044,7 @@ } }, { - "description": "Index names used to limit the request.\nOnly a single index name can be provided to this parameter.", + "description": "Index names that are used to limit the request.\nOnly a single index name can be provided to this parameter.", "name": "index", "required": true, "type": { @@ -18859,7 +19058,7 @@ ], "query": [ { - "description": "Analyzer to use for the query string.\nThis parameter can only be used when the `q` query string parameter is specified.", + "description": "The analyzer to use for the query string.\nThis parameter can be used only when the `q` query string parameter is specified.", "name": "analyzer", "required": false, "type": { @@ -18871,7 +19070,7 @@ } }, { - "description": "If `true`, wildcard and prefix queries are analyzed.", + "description": "If `true`, wildcard and prefix queries are analyzed.\nThis parameter can be used only when the `q` query string parameter is specified.", "name": "analyze_wildcard", "required": false, "serverDefault": false, @@ -18884,7 +19083,7 @@ } }, { - "description": "The default operator for query string query: `AND` or `OR`.", + "description": "The default operator for query string query: `AND` or `OR`.\nThis parameter can be used only when the `q` query string parameter is specified.", "name": "default_operator", "required": false, "serverDefault": "OR", @@ -18897,7 +19096,7 @@ } }, { - "description": "Field to use as default where no field prefix is given in the query string.", + "description": "The field to use as default where no field prefix is given in the query string.\nThis parameter can be used only when the `q` query string parameter is specified.", "name": "df", "required": false, "type": { @@ -18909,7 +19108,7 @@ } }, { - "description": "If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored.", + "description": "If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored.\nThis parameter can be used only when the `q` query string parameter is specified.", "name": "lenient", "required": false, "serverDefault": false, @@ -18922,7 +19121,7 @@ } }, { - "description": "Specifies the node or shard the operation should be performed on.\nRandom by default.", + "description": "The node or shard the operation should be performed on.\nIt is random by default.", "name": "preference", "required": false, "type": { @@ -18934,7 +19133,7 @@ } }, { - "description": "Custom value used to route operations to a specific shard.", + "description": "A custom value used to route operations to a specific shard.", "name": "routing", "required": false, "type": { @@ -18946,7 +19145,7 @@ } }, { - "description": "True or false to return the `_source` field or not, or a list of fields to return.", + "description": "`True` or `false` to return the `_source` field or not or a list of fields to return.", "name": "_source", "required": false, "type": { @@ -18958,7 +19157,7 @@ } }, { - "description": "A comma-separated list of source fields to exclude from the response.", + "description": "A comma-separated list of source fields to exclude from the response.\nYou can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter.\nIf the `_source` parameter is `false`, this parameter is ignored.", "name": "_source_excludes", "required": false, "type": { @@ -18970,7 +19169,7 @@ } }, { - "description": "A comma-separated list of source fields to include in the response.", + "description": "A comma-separated list of source fields to include in the response.\nIf this parameter is specified, only these source fields are returned.\nYou can exclude fields from this subset using the `_source_excludes` query parameter.\nIf the `_source` parameter is `false`, this parameter is ignored.", "name": "_source_includes", "required": false, "type": { @@ -18994,7 +19193,7 @@ } }, { - "description": "Query in the Lucene query string syntax.", + "description": "The query in the Lucene query string syntax.", "name": "q", "required": false, "type": { @@ -19006,7 +19205,7 @@ } } ], - "specLocation": "_global/explain/ExplainRequest.ts#L26-L113" + "specLocation": "_global/explain/ExplainRequest.ts#L26-L125" }, { "body": { @@ -19105,7 +19304,7 @@ "since": "8.5.0" } }, - "description": "List of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported.", + "description": "A list of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported.", "name": "fields", "required": false, "type": { @@ -19117,7 +19316,7 @@ } }, { - "description": "Allows to filter indices if the provided query rewrites to match_none on every shard.", + "description": "Filter indices if the provided query rewrites to `match_none` on every shard.\n\nIMPORTANT: The filtering is done on a best-effort basis, it uses index statistics and mappings to rewrite queries to `match_none` instead of fully running the request.\nFor instance a range query over a date field can rewrite to `match_none` if all documents within a shard (including deleted documents) are outside of the provided range.\nHowever, not all queries can rewrite to `match_none` so this API may return an index even if the provided filter matches no document.", "name": "index_filter", "required": false, "type": { @@ -19135,7 +19334,7 @@ "since": "7.12.0" } }, - "description": "Defines ad-hoc runtime fields in the request similar to the way it is done in search requests.\nThese fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings.", + "description": "Define ad-hoc runtime fields in the request similar to the way it is done in search requests.\nThese fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings.", "docId": "runtime-search-request", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/runtime-search-request.html", "name": "runtime_mappings", @@ -19164,7 +19363,7 @@ }, "path": [ { - "description": "Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams and indices, omit this parameter or use * or _all.", + "description": "A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams and indices, omit this parameter or use * or _all.", "name": "index", "required": false, "type": { @@ -19191,7 +19390,7 @@ } }, { - "description": "Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`.", + "description": "The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`.", "name": "expand_wildcards", "required": false, "serverDefault": "open", @@ -19204,7 +19403,7 @@ } }, { - "description": "Comma-separated list of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported.", + "description": "A comma-separated list of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported.", "name": "fields", "required": false, "type": { @@ -19248,7 +19447,7 @@ "since": "8.2.0" } }, - "description": "An optional set of filters: can include +metadata,-metadata,-nested,-multifield,-parent", + "description": "A comma-separated list of filters to apply to the response.", "name": "filters", "required": false, "type": { @@ -19266,7 +19465,7 @@ "since": "8.2.0" } }, - "description": "Only return results for fields that have one of the types in the list", + "description": "A comma-separated list of field types to include.\nAny fields that do not match one of these types will be excluded from the results.\nIt defaults to empty, meaning that all field types are returned.", "name": "types", "required": false, "type": { @@ -19300,13 +19499,14 @@ } } ], - "specLocation": "_global/field_caps/FieldCapabilitiesRequest.ts#L25-L121" + "specLocation": "_global/field_caps/FieldCapabilitiesRequest.ts#L25-L130" }, { "body": { "kind": "properties", "properties": [ { + "description": "The list of indices where this field has the same type family, or null if all indices have the same type family for the field.", "name": "indices", "required": true, "type": { @@ -19357,7 +19557,7 @@ "name": "Response", "namespace": "_global.field_caps" }, - "specLocation": "_global/field_caps/FieldCapabilitiesResponse.ts#L24-L35" + "specLocation": "_global/field_caps/FieldCapabilitiesResponse.ts#L24-L38" }, { "attachedBehaviors": [ @@ -19626,7 +19826,7 @@ }, "path": [ { - "description": "Identifier for the stored script or search template.", + "description": "The identifier for the stored script or search template.", "name": "id", "required": true, "type": { @@ -19640,9 +19840,10 @@ ], "query": [ { - "description": "Specify timeout for connection to master", + "description": "The period to wait for the master node.\nIf the master node is not available before the timeout expires, the request fails and returns an error.\nIt can also be set to `-1` to indicate that the request should never timeout.", "name": "master_timeout", "required": false, + "serverDefault": "", "type": { "kind": "instance_of", "type": { @@ -19652,7 +19853,7 @@ } } ], - "specLocation": "_global/get_script/GetScriptRequest.ts#L24-L49" + "specLocation": "_global/get_script/GetScriptRequest.ts#L24-L56" }, { "body": { @@ -20317,7 +20518,7 @@ "body": { "kind": "no_body" }, - "description": "Add an index block.\nLimits the operations allowed on an index by blocking specific operation types.", + "description": "Add an index block.\n\nAdd an index block to an index.\nIndex blocks limit the operations allowed on an index by blocking specific operation types.", "inherits": { "type": { "name": "RequestBase", @@ -20331,7 +20532,7 @@ }, "path": [ { - "description": "A comma separated list of indices to add a block to", + "description": "A comma-separated list or wildcard expression of index names used to limit the request.\nBy default, you must explicitly name the indices you are adding blocks to.\nTo allow the adding of blocks to indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`.\nYou can update this setting in the `elasticsearch.yml` file or by using the cluster update settings API.", "name": "index", "required": true, "type": { @@ -20343,7 +20544,7 @@ } }, { - "description": "The block to add (one of read, write, read_only or metadata)", + "description": "The block type to add to the index.", "name": "block", "required": true, "type": { @@ -20357,9 +20558,10 @@ ], "query": [ { - "description": "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)", + "description": "If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices.\nThis behavior applies even if the request targets other open indices.\nFor example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`.", "name": "allow_no_indices", "required": false, + "serverDefault": true, "type": { "kind": "instance_of", "type": { @@ -20369,9 +20571,10 @@ } }, { - "description": "Whether to expand wildcard expression to concrete indices that are open, closed or both.", + "description": "The type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nIt supports comma-separated values, such as `open,hidden`.", "name": "expand_wildcards", "required": false, + "serverDefault": "open", "type": { "kind": "instance_of", "type": { @@ -20381,9 +20584,10 @@ } }, { - "description": "Whether specified concrete indices should be ignored when unavailable (missing or closed)", + "description": "If `false`, the request returns an error if it targets a missing or closed index.", "name": "ignore_unavailable", "required": false, + "serverDefault": false, "type": { "kind": "instance_of", "type": { @@ -20393,9 +20597,10 @@ } }, { - "description": "Specify timeout for connection to master", + "description": "The period to wait for the master node.\nIf the master node is not available before the timeout expires, the request fails and returns an error.\nIt can also be set to `-1` to indicate that the request should never timeout.", "name": "master_timeout", "required": false, + "serverDefault": "30s", "type": { "kind": "instance_of", "type": { @@ -20405,9 +20610,10 @@ } }, { - "description": "Explicit operation timeout", + "description": "The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata.\nIf no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged.\nIt can also be set to `-1` to indicate that the request should never timeout.", "name": "timeout", "required": false, + "serverDefault": "30s", "type": { "kind": "instance_of", "type": { @@ -20417,7 +20623,7 @@ } } ], - "specLocation": "indices/add_block/IndicesAddBlockRequest.ts#L24-L49" + "specLocation": "indices/add_block/IndicesAddBlockRequest.ts#L24-L89" }, { "body": { @@ -20854,7 +21060,7 @@ "body": { "kind": "no_body" }, - "description": "Create a data stream.\nCreates a data stream.\nYou must have a matching index template with data stream enabled.", + "description": "Create a data stream.\n\nYou must have a matching index template with data stream enabled.", "inherits": { "type": { "name": "RequestBase", @@ -20908,7 +21114,7 @@ } } ], - "specLocation": "indices/create_data_stream/IndicesCreateDataStreamRequest.ts#L24-L64" + "specLocation": "indices/create_data_stream/IndicesCreateDataStreamRequest.ts#L24-L65" }, { "body": { @@ -20935,7 +21141,7 @@ "body": { "kind": "no_body" }, - "description": "Get data stream stats.\nRetrieves statistics for one or more data streams.", + "description": "Get data stream stats.\n\nGet statistics for one or more data streams.", "inherits": { "type": { "name": "RequestBase", @@ -20976,7 +21182,7 @@ } } ], - "specLocation": "indices/data_streams_stats/IndicesDataStreamsStatsRequest.ts#L23-L59" + "specLocation": "indices/data_streams_stats/IndicesDataStreamsStatsRequest.ts#L23-L61" }, { "body": { @@ -21350,7 +21556,7 @@ } } ], - "specLocation": "indices/delete_data_lifecycle/IndicesDeleteDataLifecycleRequest.ts#L24-L46" + "specLocation": "indices/delete_data_lifecycle/IndicesDeleteDataLifecycleRequest.ts#L24-L47" }, { "body": { @@ -21431,7 +21637,7 @@ } } ], - "specLocation": "indices/delete_data_stream/IndicesDeleteDataStreamRequest.ts#L24-L58" + "specLocation": "indices/delete_data_stream/IndicesDeleteDataStreamRequest.ts#L24-L59" }, { "body": { @@ -21665,7 +21871,7 @@ "body": { "kind": "no_body" }, - "description": "Check aliases.\nChecks if one or more data stream or index aliases exist.", + "description": "Check aliases.\n\nCheck if one or more data stream or index aliases exist.", "inherits": { "type": { "name": "RequestBase", @@ -21761,7 +21967,7 @@ } } ], - "specLocation": "indices/exists_alias/IndicesExistsAliasRequest.ts#L23-L79" + "specLocation": "indices/exists_alias/IndicesExistsAliasRequest.ts#L23-L81" }, { "body": { @@ -21781,7 +21987,7 @@ "body": { "kind": "no_body" }, - "description": "Check index templates.\nCheck whether index templates exist.", + "description": "Check index templates.\n\nCheck whether index templates exist.", "inherits": { "type": { "name": "RequestBase", @@ -21822,7 +22028,7 @@ } } ], - "specLocation": "indices/exists_index_template/IndicesExistsIndexTemplateRequest.ts#L24-L49" + "specLocation": "indices/exists_index_template/IndicesExistsIndexTemplateRequest.ts#L24-L51" }, { "body": { @@ -21894,7 +22100,7 @@ } } ], - "specLocation": "indices/explain_data_lifecycle/IndicesExplainDataLifecycleRequest.ts#L24-L46" + "specLocation": "indices/explain_data_lifecycle/IndicesExplainDataLifecycleRequest.ts#L24-L47" }, { "body": { @@ -22277,7 +22483,7 @@ "body": { "kind": "no_body" }, - "description": "Get data stream lifecycles.\nRetrieves the data stream lifecycle configuration of one or more data streams.", + "description": "Get data stream lifecycles.\n\nGet the data stream lifecycle configuration of one or more data streams.", "inherits": { "type": { "name": "RequestBase", @@ -22344,7 +22550,7 @@ } } ], - "specLocation": "indices/get_data_lifecycle/IndicesGetDataLifecycleRequest.ts#L24-L66" + "specLocation": "indices/get_data_lifecycle/IndicesGetDataLifecycleRequest.ts#L24-L68" }, { "body": { @@ -22380,7 +22586,7 @@ "body": { "kind": "no_body" }, - "description": "Get data streams.\nRetrieves information about one or more data streams.", + "description": "Get data streams.\n\nGet information about one or more data streams.", "inherits": { "type": { "name": "RequestBase", @@ -22469,7 +22675,7 @@ } } ], - "specLocation": "indices/get_data_stream/IndicesGetDataStreamRequest.ts#L24-L76" + "specLocation": "indices/get_data_stream/IndicesGetDataStreamRequest.ts#L24-L78" }, { "body": { @@ -25463,7 +25669,7 @@ } }, { - "description": "The inference Id", + "description": "The inference identifier.", "name": "inference_id", "required": true, "type": { @@ -25477,7 +25683,7 @@ ], "query": [ { - "description": "When true, the endpoint is not deleted, and a list of ingest processors which reference this endpoint is returned", + "description": "When true, the endpoint is not deleted and a list of ingest processors which reference this endpoint is returned.", "name": "dry_run", "required": false, "serverDefault": false, @@ -25490,7 +25696,7 @@ } }, { - "description": "When true, the inference endpoint is forcefully deleted even if it is still being used by ingest processors or semantic text fields", + "description": "When true, the inference endpoint is forcefully deleted even if it is still being used by ingest processors or semantic text fields.", "name": "force", "required": false, "serverDefault": false, @@ -25503,7 +25709,7 @@ } } ], - "specLocation": "inference/delete/DeleteRequest.ts#L24-L65" + "specLocation": "inference/delete/DeleteRequest.ts#L24-L66" }, { "body": { @@ -25569,7 +25775,7 @@ } ], "query": [], - "specLocation": "inference/get/GetRequest.ts#L24-L55" + "specLocation": "inference/get/GetRequest.ts#L24-L56" }, { "body": { @@ -25606,7 +25812,7 @@ "kind": "properties", "properties": [ { - "description": "Query input, required for rerank task.\nNot required for other tasks.", + "description": "The query input, which is required only for the `rerank` task.\nIt is not required for other tasks.", "name": "query", "required": false, "type": { @@ -25618,7 +25824,7 @@ } }, { - "description": "Inference input.\nEither a string or an array of strings.", + "description": "The text on which you want to perform the inference task.\nIt can be a single string or an array.\n\n> info\n> Inference endpoints for the `completion` task type currently only support a single string as input.", "name": "input", "required": true, "type": { @@ -25645,7 +25851,7 @@ } }, { - "description": "Optional task settings", + "description": "Task settings for the individual inference request.\nThese settings are specific to the task type you specified and override the task settings specified when initializing the service.", "name": "task_settings", "required": false, "type": { @@ -25658,7 +25864,7 @@ } ] }, - "description": "Perform inference on the service", + "description": "Perform inference on the service.\n\nThis API enables you to use machine learning models to perform specific tasks on data that you provide as an input.\nIt returns a response with the results of the tasks.\nThe inference endpoint you use can perform one specific task that has been defined when the endpoint was created with the create inference API.\n\n> info\n> The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.", "inherits": { "type": { "name": "RequestBase", @@ -25672,7 +25878,7 @@ }, "path": [ { - "description": "The task type", + "description": "The type of inference task that the model performs.", "name": "task_type", "required": false, "type": { @@ -25684,7 +25890,7 @@ } }, { - "description": "The inference Id", + "description": "The unique identifier for the inference endpoint.", "name": "inference_id", "required": true, "type": { @@ -25698,7 +25904,7 @@ ], "query": [ { - "description": "Specifies the amount of time to wait for the inference request to complete.", + "description": "The amount of time to wait for the inference request to complete.", "name": "timeout", "required": false, "serverDefault": "30s", @@ -25711,7 +25917,7 @@ } } ], - "specLocation": "inference/inference/InferenceRequest.ts#L26-L76" + "specLocation": "inference/inference/InferenceRequest.ts#L26-L89" }, { "body": { @@ -25785,7 +25991,7 @@ } ], "query": [], - "specLocation": "inference/put/PutRequest.ts#L25-L64" + "specLocation": "inference/put/PutRequest.ts#L25-L65" }, { "body": { @@ -26013,7 +26219,7 @@ }, "path": [], "query": [], - "specLocation": "_global/info/RootNodeInfoRequest.ts#L22-L38" + "specLocation": "_global/info/RootNodeInfoRequest.ts#L22-L39" }, { "body": { @@ -26147,7 +26353,7 @@ } } ], - "specLocation": "ingest/delete_pipeline/DeletePipelineRequest.ts#L24-L60" + "specLocation": "ingest/delete_pipeline/DeletePipelineRequest.ts#L24-L61" }, { "body": { @@ -26174,7 +26380,7 @@ "body": { "kind": "no_body" }, - "description": "Get pipelines.\nGet information about one or more ingest pipelines.\nThis API returns a local reference of the pipeline.", + "description": "Get pipelines.\n\nGet information about one or more ingest pipelines.\nThis API returns a local reference of the pipeline.", "inherits": { "type": { "name": "RequestBase", @@ -26228,7 +26434,7 @@ } } ], - "specLocation": "ingest/get_pipeline/GetPipelineRequest.ts#L24-L62" + "specLocation": "ingest/get_pipeline/GetPipelineRequest.ts#L24-L64" }, { "body": { @@ -26514,7 +26720,7 @@ } }, { - "description": "Pipeline to test.\nIf you don’t specify the `pipeline` request path parameter, this parameter is required.\nIf you specify both this and the request path parameter, the API only uses the request path parameter.", + "description": "The pipeline to test.\nIf you don't specify the `pipeline` request path parameter, this parameter is required.\nIf you specify both this and the request path parameter, the API only uses the request path parameter.", "name": "pipeline", "required": false, "type": { @@ -26527,7 +26733,7 @@ } ] }, - "description": "Simulate a pipeline.\nRun an ingest pipeline against a set of provided documents.\nYou can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request.", + "description": "Simulate a pipeline.\n\nRun an ingest pipeline against a set of provided documents.\nYou can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request.", "inherits": { "type": { "name": "RequestBase", @@ -26541,7 +26747,7 @@ }, "path": [ { - "description": "Pipeline to test.\nIf you don’t specify a `pipeline` in the request body, this parameter is required.", + "description": "The pipeline to test.\nIf you don't specify a `pipeline` in the request body, this parameter is required.", "name": "id", "required": false, "type": { @@ -26567,7 +26773,7 @@ } } ], - "specLocation": "ingest/simulate/SimulatePipelineRequest.ts#L25-L69" + "specLocation": "ingest/simulate/SimulatePipelineRequest.ts#L25-L72" }, { "body": { @@ -26603,7 +26809,7 @@ "body": { "kind": "no_body" }, - "description": "Get license information.\nGet information about your Elastic license including its type, its status, when it was issued, and when it expires.\n\nNOTE: If the master node is generating a new cluster state, the get license API may return a `404 Not Found` response.\nIf you receive an unexpected 404 response after cluster startup, wait a short period and retry the request.", + "description": "Get license information.\n\nGet information about your Elastic license including its type, its status, when it was issued, and when it expires.\n\n>info\n> If the master node is generating a new cluster state, the get license API may return a `404 Not Found` response.\n> If you receive an unexpected 404 response after cluster startup, wait a short period and retry the request.", "inherits": { "type": { "name": "RequestBase", @@ -26648,7 +26854,7 @@ } } ], - "specLocation": "license/get/GetLicenseRequest.ts#L22-L53" + "specLocation": "license/get/GetLicenseRequest.ts#L22-L56" }, { "body": { @@ -26877,7 +27083,7 @@ } ] }, - "description": "Get multiple documents.\n\nGet multiple JSON documents by ID from one or more indices.\nIf you specify an index in the request URI, you only need to specify the document IDs in the request body.\nTo ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail.", + "description": "Get multiple documents.\n\nGet multiple JSON documents by ID from one or more indices.\nIf you specify an index in the request URI, you only need to specify the document IDs in the request body.\nTo ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail.\n\n**Filter source fields**\n\nBy default, the `_source` field is returned for every document (if stored).\nUse the `_source` and `_source_include` or `source_exclude` attributes to filter what fields are returned for a particular document.\nYou can include the `_source`, `_source_includes`, and `_source_excludes` query parameters in the request URI to specify the defaults to use when there are no per-document instructions.\n\n**Get stored fields**\n\nUse the `stored_fields` attribute to specify the set of stored fields you want to retrieve.\nAny requested fields that are not stored are ignored.\nYou can include the `stored_fields` query parameter in the request URI to specify the defaults to use when there are no per-document instructions.", "inherits": { "type": { "name": "RequestBase", @@ -27010,13 +27216,14 @@ } } ], - "specLocation": "_global/mget/MultiGetRequest.ts#L25-L114" + "specLocation": "_global/mget/MultiGetRequest.ts#L25-L127" }, { "body": { "kind": "properties", "properties": [ { + "description": "The response includes a docs array that contains the documents in the order specified in the request.\nThe structure of the returned documents is similar to that returned by the get API.\nIf there is a failure getting a particular document, the error is included in place of the document.", "name": "docs", "required": true, "type": { @@ -27052,7 +27259,7 @@ "name": "Response", "namespace": "_global.mget" }, - "specLocation": "_global/mget/MultiGetResponse.ts#L22-L26" + "specLocation": "_global/mget/MultiGetResponse.ts#L22-L31" }, { "attachedBehaviors": [ @@ -27594,7 +27801,7 @@ } ], "query": [], - "specLocation": "ml/delete_filter/MlDeleteFilterRequest.ts#L23-L46" + "specLocation": "ml/delete_filter/MlDeleteFilterRequest.ts#L23-L47" }, { "body": { @@ -27687,7 +27894,7 @@ } } ], - "specLocation": "ml/delete_job/MlDeleteJobRequest.ts#L23-L70" + "specLocation": "ml/delete_job/MlDeleteJobRequest.ts#L23-L71" }, { "body": { @@ -27767,7 +27974,7 @@ } } ], - "specLocation": "ml/delete_trained_model/MlDeleteTrainedModelRequest.ts#L24-L55" + "specLocation": "ml/delete_trained_model/MlDeleteTrainedModelRequest.ts#L24-L56" }, { "body": { @@ -27833,7 +28040,7 @@ } ], "query": [], - "specLocation": "ml/delete_trained_model_alias/MlDeleteTrainedModelAliasRequest.ts#L23-L51" + "specLocation": "ml/delete_trained_model_alias/MlDeleteTrainedModelAliasRequest.ts#L23-L52" }, { "body": { @@ -27934,7 +28141,7 @@ }, "path": [], "query": [], - "specLocation": "ml/estimate_model_memory/MlEstimateModelMemoryRequest.ts#L26-L69" + "specLocation": "ml/estimate_model_memory/MlEstimateModelMemoryRequest.ts#L26-L70" }, { "body": { @@ -28021,7 +28228,7 @@ }, "path": [], "query": [], - "specLocation": "ml/evaluate_data_frame/MlEvaluateDataFrameRequest.ts#L25-L59" + "specLocation": "ml/evaluate_data_frame/MlEvaluateDataFrameRequest.ts#L25-L60" }, { "body": { @@ -28229,7 +28436,7 @@ } } ], - "specLocation": "ml/flush_job/MlFlushJobRequest.ts#L24-L106" + "specLocation": "ml/flush_job/MlFlushJobRequest.ts#L24-L107" }, { "body": { @@ -28364,7 +28571,7 @@ } } ], - "specLocation": "ml/get_calendar_events/MlGetCalendarEventsRequest.ts#L25-L60" + "specLocation": "ml/get_calendar_events/MlGetCalendarEventsRequest.ts#L25-L61" }, { "body": { @@ -28479,7 +28686,7 @@ } } ], - "specLocation": "ml/get_calendars/MlGetCalendarsRequest.ts#L25-L62" + "specLocation": "ml/get_calendars/MlGetCalendarsRequest.ts#L25-L63" }, { "body": { @@ -28606,7 +28813,7 @@ } } ], - "specLocation": "ml/get_data_frame_analytics/MlGetDataFrameAnalyticsRequest.ts#L24-L88" + "specLocation": "ml/get_data_frame_analytics/MlGetDataFrameAnalyticsRequest.ts#L24-L89" }, { "body": { @@ -28734,7 +28941,7 @@ } } ], - "specLocation": "ml/get_data_frame_analytics_stats/MlGetDataFrameAnalyticsStatsRequest.ts#L24-L83" + "specLocation": "ml/get_data_frame_analytics_stats/MlGetDataFrameAnalyticsStatsRequest.ts#L24-L84" }, { "body": { @@ -28822,7 +29029,7 @@ } } ], - "specLocation": "ml/get_datafeed_stats/MlGetDatafeedStatsRequest.ts#L23-L71" + "specLocation": "ml/get_datafeed_stats/MlGetDatafeedStatsRequest.ts#L23-L72" }, { "body": { @@ -28922,7 +29129,7 @@ } } ], - "specLocation": "ml/get_datafeeds/MlGetDatafeedsRequest.ts#L23-L77" + "specLocation": "ml/get_datafeeds/MlGetDatafeedsRequest.ts#L23-L78" }, { "body": { @@ -29023,7 +29230,7 @@ } } ], - "specLocation": "ml/get_filters/MlGetFiltersRequest.ts#L24-L62" + "specLocation": "ml/get_filters/MlGetFiltersRequest.ts#L24-L63" }, { "body": { @@ -29111,7 +29318,7 @@ } } ], - "specLocation": "ml/get_job_stats/MlGetJobStatsRequest.ts#L23-L67" + "specLocation": "ml/get_job_stats/MlGetJobStatsRequest.ts#L23-L68" }, { "body": { @@ -29212,7 +29419,7 @@ } } ], - "specLocation": "ml/get_jobs/MlGetJobsRequest.ts#L23-L77" + "specLocation": "ml/get_jobs/MlGetJobsRequest.ts#L23-L78" }, { "body": { @@ -29487,7 +29694,7 @@ } } ], - "specLocation": "ml/get_overall_buckets/MlGetOverallBucketsRequest.ts#L25-L152" + "specLocation": "ml/get_overall_buckets/MlGetOverallBucketsRequest.ts#L25-L153" }, { "body": { @@ -29683,7 +29890,7 @@ } } ], - "specLocation": "ml/get_trained_models/MlGetTrainedModelRequest.ts#L25-L109" + "specLocation": "ml/get_trained_models/MlGetTrainedModelRequest.ts#L25-L110" }, { "body": { @@ -29798,7 +30005,7 @@ } } ], - "specLocation": "ml/get_trained_models_stats/MlGetTrainedModelStatsRequest.ts#L24-L76" + "specLocation": "ml/get_trained_models_stats/MlGetTrainedModelStatsRequest.ts#L24-L77" }, { "body": { @@ -29924,7 +30131,7 @@ } } ], - "specLocation": "ml/infer_trained_model/MlInferTrainedModelRequest.ts#L27-L71" + "specLocation": "ml/infer_trained_model/MlInferTrainedModelRequest.ts#L27-L72" }, { "body": { @@ -30016,7 +30223,7 @@ } } ], - "specLocation": "ml/open_job/MlOpenJobRequest.ts#L24-L65" + "specLocation": "ml/open_job/MlOpenJobRequest.ts#L24-L66" }, { "body": { @@ -30105,7 +30312,7 @@ } ], "query": [], - "specLocation": "ml/post_calendar_events/MlPostCalendarEventsRequest.ts#L24-L47" + "specLocation": "ml/post_calendar_events/MlPostCalendarEventsRequest.ts#L24-L48" }, { "body": { @@ -30157,7 +30364,7 @@ } ] }, - "description": "Preview features used by data frame analytics.\nPreviews the extracted features used by a data frame analytics config.", + "description": "Preview features used by data frame analytics.\nPreview the extracted features used by a data frame analytics config.", "inherits": { "type": { "name": "RequestBase", @@ -30184,7 +30391,7 @@ } ], "query": [], - "specLocation": "ml/preview_data_frame_analytics/MlPreviewDataFrameAnalyticsRequest.ts#L24-L59" + "specLocation": "ml/preview_data_frame_analytics/MlPreviewDataFrameAnalyticsRequest.ts#L24-L60" }, { "body": { @@ -30310,7 +30517,7 @@ } } ], - "specLocation": "ml/preview_datafeed/MlPreviewDatafeedRequest.ts#L26-L80" + "specLocation": "ml/preview_datafeed/MlPreviewDatafeedRequest.ts#L26-L81" }, { "body": { @@ -30402,7 +30609,7 @@ } ], "query": [], - "specLocation": "ml/put_calendar/MlPutCalendarRequest.ts#L23-L50" + "specLocation": "ml/put_calendar/MlPutCalendarRequest.ts#L23-L51" }, { "body": { @@ -30499,7 +30706,7 @@ } ], "query": [], - "specLocation": "ml/put_calendar_job/MlPutCalendarJobRequest.ts#L23-L44" + "specLocation": "ml/put_calendar_job/MlPutCalendarJobRequest.ts#L23-L45" }, { "body": { @@ -31195,7 +31402,7 @@ } } ], - "specLocation": "ml/put_datafeed/MlPutDatafeedRequest.ts#L37-L183" + "specLocation": "ml/put_datafeed/MlPutDatafeedRequest.ts#L37-L184" }, { "body": { @@ -31463,7 +31670,7 @@ } ], "query": [], - "specLocation": "ml/put_filter/MlPutFilterRequest.ts#L23-L57" + "specLocation": "ml/put_filter/MlPutFilterRequest.ts#L23-L58" }, { "body": { @@ -31806,7 +32013,7 @@ } } ], - "specLocation": "ml/put_job/MlPutJobRequest.ts#L30-L155" + "specLocation": "ml/put_job/MlPutJobRequest.ts#L30-L156" }, { "body": { @@ -32255,7 +32462,7 @@ } } ], - "specLocation": "ml/put_trained_model/MlPutTrainedModelRequest.ts#L31-L134" + "specLocation": "ml/put_trained_model/MlPutTrainedModelRequest.ts#L31-L135" }, { "body": { @@ -32335,7 +32542,7 @@ } } ], - "specLocation": "ml/put_trained_model_alias/MlPutTrainedModelAliasRequest.ts#L23-L73" + "specLocation": "ml/put_trained_model_alias/MlPutTrainedModelAliasRequest.ts#L23-L74" }, { "body": { @@ -32439,7 +32646,7 @@ } ], "query": [], - "specLocation": "ml/put_trained_model_definition_part/MlPutTrainedModelDefinitionPartRequest.ts#L24-L64" + "specLocation": "ml/put_trained_model_definition_part/MlPutTrainedModelDefinitionPartRequest.ts#L24-L65" }, { "body": { @@ -32552,7 +32759,7 @@ } ], "query": [], - "specLocation": "ml/put_trained_model_vocabulary/MlPutTrainedModelVocabularyRequest.ts#L24-L67" + "specLocation": "ml/put_trained_model_vocabulary/MlPutTrainedModelVocabularyRequest.ts#L24-L68" }, { "body": { @@ -32633,7 +32840,7 @@ } } ], - "specLocation": "ml/reset_job/MlResetJobRequest.ts#L23-L64" + "specLocation": "ml/reset_job/MlResetJobRequest.ts#L23-L65" }, { "body": { @@ -32701,7 +32908,7 @@ } } ], - "specLocation": "ml/start_data_frame_analytics/MlStartDataFrameAnalyticsRequest.ts#L24-L67" + "specLocation": "ml/start_data_frame_analytics/MlStartDataFrameAnalyticsRequest.ts#L24-L68" }, { "body": { @@ -32850,7 +33057,7 @@ } } ], - "specLocation": "ml/start_datafeed/MlStartDatafeedRequest.ts#L24-L98" + "specLocation": "ml/start_datafeed/MlStartDatafeedRequest.ts#L24-L99" }, { "body": { @@ -33013,7 +33220,7 @@ } } ], - "specLocation": "ml/start_trained_model_deployment/MlStartTrainedModelDeploymentRequest.ts#L29-L100" + "specLocation": "ml/start_trained_model_deployment/MlStartTrainedModelDeploymentRequest.ts#L29-L101" }, { "body": { @@ -33113,7 +33320,7 @@ } } ], - "specLocation": "ml/stop_data_frame_analytics/MlStopDataFrameAnalyticsRequest.ts#L24-L77" + "specLocation": "ml/stop_data_frame_analytics/MlStopDataFrameAnalyticsRequest.ts#L24-L78" }, { "body": { @@ -33254,7 +33461,7 @@ } } ], - "specLocation": "ml/stop_datafeed/MlStopDatafeedRequest.ts#L24-L85" + "specLocation": "ml/stop_datafeed/MlStopDatafeedRequest.ts#L24-L86" }, { "body": { @@ -33341,7 +33548,7 @@ } } ], - "specLocation": "ml/stop_trained_model_deployment/MlStopTrainedModelDeploymentRequest.ts#L23-L60" + "specLocation": "ml/stop_trained_model_deployment/MlStopTrainedModelDeploymentRequest.ts#L23-L61" }, { "body": { @@ -33458,7 +33665,7 @@ } ], "query": [], - "specLocation": "ml/update_data_frame_analytics/MlUpdateDataFrameAnalyticsRequest.ts#L24-L79" + "specLocation": "ml/update_data_frame_analytics/MlUpdateDataFrameAnalyticsRequest.ts#L24-L80" }, { "body": { @@ -33883,7 +34090,7 @@ } } ], - "specLocation": "ml/update_datafeed/MlUpdateDatafeedRequest.ts#L31-L169" + "specLocation": "ml/update_datafeed/MlUpdateDatafeedRequest.ts#L31-L170" }, { "body": { @@ -34166,7 +34373,7 @@ } ], "query": [], - "specLocation": "ml/update_filter/MlUpdateFilterRequest.ts#L23-L59" + "specLocation": "ml/update_filter/MlUpdateFilterRequest.ts#L23-L60" }, { "body": { @@ -34455,7 +34662,7 @@ } ], "query": [], - "specLocation": "ml/update_job/MlUpdateJobRequest.ts#L33-L146" + "specLocation": "ml/update_job/MlUpdateJobRequest.ts#L33-L147" }, { "body": { @@ -34796,7 +35003,7 @@ } } ], - "specLocation": "ml/update_trained_model_deployment/MlUpdateTrainedModelDeploymentRequest.ts#L24-L69" + "specLocation": "ml/update_trained_model_deployment/MlUpdateTrainedModelDeploymentRequest.ts#L24-L70" }, { "body": { @@ -35032,7 +35239,7 @@ } } ], - "specLocation": "_global/msearch/MultiSearchRequest.ts#L25-L134" + "specLocation": "_global/msearch/MultiSearchRequest.ts#L25-L135" }, { "body": { @@ -35072,7 +35279,7 @@ "CommonQueryParameters" ], "body": { - "codegenName": "search_templates", + "codegenName": "search_templates\nThe request body must be newline-delimited JSON (NDJSON) in the following format:\n\n```\n
\\n\n\\n\n
\\n\n\\n\n```\n\nEach `
` and `` pair represents a search request.\nThe `
` supports the same parameters as the multi search API's `
`.\nThe `` supports the same parameters as the search template API's request body.\n\nThe `
` contains the parameters used to limit or change the search.\nIt is required for each search body but can be empty `({})` or a blank line.\n\nThe `` contains the parameters for the search.", "kind": "value", "value": { "kind": "array_of", @@ -35085,7 +35292,7 @@ } } }, - "description": "Run multiple templated searches.", + "description": "Run multiple templated searches.\n\nRun multiple templated searches with a single request.\nIf you are providing a text file or text input to `curl`, use the `--data-binary` flag instead of `-d` to preserve newlines.\nFor example:\n\n```\n$ cat requests\n{ \"index\": \"my-index\" }\n{ \"id\": \"my-search-template\", \"params\": { \"query_string\": \"hello world\", \"from\": 0, \"size\": 10 }}\n{ \"index\": \"my-other-index\" }\n{ \"id\": \"my-other-search-template\", \"params\": { \"query_type\": \"match_all\" }}\n\n$ curl -H \"Content-Type: application/x-ndjson\" -XGET localhost:9200/_msearch/template --data-binary \"@requests\"; echo\n```", "inherits": { "type": { "name": "RequestBase", @@ -35099,7 +35306,7 @@ }, "path": [ { - "description": "Comma-separated list of data streams, indices, and aliases to search.\nSupports wildcards (`*`).\nTo search all data streams and indices, omit this parameter or use `*`.", + "description": "A comma-separated list of data streams, indices, and aliases to search.\nIt supports wildcards (`*`).\nTo search all data streams and indices, omit this parameter or use `*`.", "name": "index", "required": false, "type": { @@ -35126,7 +35333,7 @@ } }, { - "description": "Maximum number of concurrent searches the API can run.", + "description": "The maximum number of concurrent searches the API can run.", "name": "max_concurrent_searches", "required": false, "type": { @@ -35138,7 +35345,7 @@ } }, { - "description": "The type of the search operation.\nAvailable options: `query_then_fetch`, `dfs_query_then_fetch`.", + "description": "The type of the search operation.", "name": "search_type", "required": false, "type": { @@ -35176,7 +35383,7 @@ } } ], - "specLocation": "_global/msearch_template/MultiSearchTemplateRequest.ts#L25-L82" + "specLocation": "_global/msearch_template/MultiSearchTemplateRequest.ts#L25-L115" }, { "body": { @@ -35209,7 +35416,7 @@ "name": "Response", "namespace": "_global.msearch_template" }, - "specLocation": "_global/msearch_template/MultiSearchTemplateResponse.ts#L22-L24" + "specLocation": "_global/msearch_template/MultiSearchTemplateResponse.ts#L22-L31" }, { "attachedBehaviors": [ @@ -35219,7 +35426,7 @@ "kind": "properties", "properties": [ { - "description": "Array of existing or artificial documents.", + "description": "An array of existing or artificial documents.", "name": "docs", "required": false, "type": { @@ -35234,7 +35441,7 @@ } }, { - "description": "Simplified syntax to specify documents by their ID if they're in the same index.", + "description": "A simplified syntax to specify documents by their ID if they're in the same index.", "name": "ids", "required": false, "type": { @@ -35250,7 +35457,7 @@ } ] }, - "description": "Get multiple term vectors.\n\nYou can specify existing documents by index and ID or provide artificial documents in the body of the request.\nYou can specify the index in the request body or request URI.\nThe response contains a `docs` array with all the fetched termvectors.\nEach element has the structure provided by the termvectors API.", + "description": "Get multiple term vectors.\n\nGet multiple term vectors with a single request.\nYou can specify existing documents by index and ID or provide artificial documents in the body of the request.\nYou can specify the index in the request body or request URI.\nThe response contains a `docs` array with all the fetched termvectors.\nEach element has the structure provided by the termvectors API.\n\n**Artificial documents**\n\nYou can also use `mtermvectors` to generate term vectors for artificial documents provided in the body of the request.\nThe mapping used is determined by the specified `_index`.", "inherits": { "type": { "name": "RequestBase", @@ -35264,7 +35471,7 @@ }, "path": [ { - "description": "Name of the index that contains the documents.", + "description": "The name of the index that contains the documents.", "name": "index", "required": false, "type": { @@ -35293,7 +35500,7 @@ } }, { - "description": "Comma-separated list or wildcard expressions of fields to include in the statistics.\nUsed as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters.", + "description": "A comma-separated list or wildcard expressions of fields to include in the statistics.\nIt is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters.", "name": "fields", "required": false, "type": { @@ -35357,7 +35564,7 @@ } }, { - "description": "Specifies the node or shard the operation should be performed on.\nRandom by default.", + "description": "The node or shard the operation should be performed on.\nIt is random by default.", "name": "preference", "required": false, "type": { @@ -35384,7 +35591,7 @@ } }, { - "description": "Custom value used to route operations to a specific shard.", + "description": "A custom value used to route operations to a specific shard.", "name": "routing", "required": false, "type": { @@ -35421,7 +35628,7 @@ } }, { - "description": "Specific version type.", + "description": "The version type.", "name": "version_type", "required": false, "type": { @@ -35433,7 +35640,7 @@ } } ], - "specLocation": "_global/mtermvectors/MultiTermVectorsRequest.ts#L31-L126" + "specLocation": "_global/mtermvectors/MultiTermVectorsRequest.ts#L31-L134" }, { "body": { @@ -35645,7 +35852,7 @@ }, "path": [], "query": [], - "specLocation": "_global/ping/PingRequest.ts#L22-L37" + "specLocation": "_global/ping/PingRequest.ts#L22-L38" }, { "body": { @@ -35666,7 +35873,7 @@ "kind": "properties", "properties": [ { - "description": "Contains the script or search template, its parameters, and its language.", + "description": "The script or search template, its parameters, and its language.", "name": "script", "required": true, "type": { @@ -35693,7 +35900,7 @@ }, "path": [ { - "description": "Identifier for the stored script or search template.\nMust be unique within the cluster.", + "description": "The identifier for the stored script or search template.\nIt must be unique within the cluster.", "name": "id", "required": true, "type": { @@ -35705,7 +35912,7 @@ } }, { - "description": "Context in which the script or search template should run.\nTo prevent errors, the API immediately compiles the script or template in this context.", + "description": "The context in which the script or search template should run.\nTo prevent errors, the API immediately compiles the script or template in this context.", "name": "context", "required": false, "type": { @@ -35719,7 +35926,19 @@ ], "query": [ { - "description": "Period to wait for a connection to the master node.\nIf no response is received before the timeout expires, the request fails and returns an error.", + "description": "The context in which the script or search template should run.\nTo prevent errors, the API immediately compiles the script or template in this context.\nIf you specify both this and the `` path parameter, the API uses the request path parameter.", + "name": "context", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "Name", + "namespace": "_types" + } + } + }, + { + "description": "The period to wait for a connection to the master node.\nIf no response is received before the timeout expires, the request fails and returns an error.\nIt can also be set to `-1` to indicate that the request should never timeout.", "name": "master_timeout", "required": false, "serverDefault": "30s", @@ -35732,7 +35951,7 @@ } }, { - "description": "Period to wait for a response.\nIf no response is received before the timeout expires, the request fails and returns an error.", + "description": "The period to wait for a response.\nIf no response is received before the timeout expires, the request fails and returns an error.\nIt can also be set to `-1` to indicate that the request should never timeout.", "name": "timeout", "required": false, "serverDefault": "30s", @@ -35745,7 +35964,7 @@ } } ], - "specLocation": "_global/put_script/PutScriptRequest.ts#L25-L76" + "specLocation": "_global/put_script/PutScriptRequest.ts#L25-L87" }, { "body": { @@ -36461,7 +36680,7 @@ }, "path": [ { - "description": "Comma-separated list of data streams, indices, and index aliases used to limit the request. Wildcard (`*`) expressions are supported.\nTo target all data streams and indices in a cluster, omit this parameter or use `_all` or `*`.", + "description": "A comma-separated list of data streams, indices, and index aliases used to limit the request.\nWildcard (`*`) expressions are supported.\nTo target all data streams and indices in a cluster, omit this parameter or use `_all` or `*`.", "name": "index", "required": false, "type": { @@ -36525,7 +36744,7 @@ } } ], - "specLocation": "_global/rank_eval/RankEvalRequest.ts#L24-L74" + "specLocation": "_global/rank_eval/RankEvalRequest.ts#L24-L76" }, { "body": { @@ -37036,6 +37255,18 @@ "body": { "kind": "properties", "properties": [ + { + "description": "The ID of the search template to render.\nIf no `source` is specified, this or the `` request path parameter is required.\nIf you specify both this parameter and the `` parameter, the API uses only ``.", + "name": "id", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "Id", + "namespace": "_types" + } + } + }, { "name": "file", "required": false, @@ -37067,7 +37298,7 @@ } }, { - "description": "An inline search template.\nSupports the same parameters as the search API's request body.\nThese parameters also support Mustache variables.\nIf no `id` or `` is specified, this parameter is required.", + "description": "An inline search template.\nIt supports the same parameters as the search API's request body.\nThese parameters also support Mustache variables.\nIf no `id` or `` is specified, this parameter is required.", "name": "source", "required": false, "type": { @@ -37094,7 +37325,7 @@ }, "path": [ { - "description": "ID of the search template to render.\nIf no `source` is specified, this or the `id` request body parameter is required.", + "description": "The ID of the search template to render.\nIf no `source` is specified, this or the `id` request body parameter is required.", "name": "id", "required": false, "type": { @@ -37107,7 +37338,7 @@ } ], "query": [], - "specLocation": "_global/render_search_template/RenderSearchTemplateRequest.ts#L25-L68" + "specLocation": "_global/render_search_template/RenderSearchTemplateRequest.ts#L25-L76" }, { "body": { @@ -37148,20 +37379,20 @@ "kind": "properties", "properties": [ { - "description": "The context that the script should run in.", + "description": "The context that the script should run in.\nNOTE: Result ordering in the field contexts is not guaranteed.", "name": "context", "required": false, "serverDefault": "painless_test", "type": { "kind": "instance_of", "type": { - "name": "string", - "namespace": "_builtins" + "name": "PainlessContext", + "namespace": "_global.scripts_painless_execute" } } }, { - "description": "Additional parameters for the `context`.", + "description": "Additional parameters for the `context`.\nNOTE: This parameter is required for all contexts except `painless_test`, which is the default if no value is provided for `context`.", "name": "context_setup", "required": false, "type": { @@ -37173,7 +37404,7 @@ } }, { - "description": "The Painless script to execute.", + "description": "The Painless script to run.", "name": "script", "required": false, "type": { @@ -37186,7 +37417,7 @@ } ] }, - "description": "Run a script.\nRuns a script and returns a result.", + "description": "Run a script.\n\nRuns a script and returns a result.\nUse this API to build and test scripts, such as when defining a script for a runtime field.\nThis API requires very few dependencies and is especially useful if you don't have permissions to write documents on a cluster.\n\nThe API uses several _contexts_, which control how scripts are run, what variables are available at runtime, and what the return type is.\n\nEach context requires a script, but additional parameters depend on the context you're using for that script.", "inherits": { "type": { "name": "RequestBase", @@ -37200,7 +37431,7 @@ }, "path": [], "query": [], - "specLocation": "_global/scripts_painless_execute/ExecutePainlessScriptRequest.ts#L24-L54" + "specLocation": "_global/scripts_painless_execute/ExecutePainlessScriptRequest.ts#L24-L64" }, { "body": { @@ -37240,7 +37471,7 @@ "kind": "properties", "properties": [ { - "description": "Period to retain the search context for scrolling.", + "description": "The period to retain the search context for scrolling.", "docId": "scroll-search-results", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/paginate-search-results.html#scroll-search-results", "name": "scroll", @@ -37255,7 +37486,7 @@ } }, { - "description": "Scroll ID of the search.", + "description": "The scroll ID of the search.", "name": "scroll_id", "required": true, "type": { @@ -37300,7 +37531,7 @@ ], "query": [ { - "description": "Period to retain the search context for scrolling.", + "description": "The period to retain the search context for scrolling.", "docId": "scroll-search-results", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/paginate-search-results.html#scroll-search-results", "name": "scroll", @@ -37344,7 +37575,7 @@ } } ], - "specLocation": "_global/scroll/ScrollRequest.ts#L24-L86" + "specLocation": "_global/scroll/ScrollRequest.ts#L24-L88" }, { "body": { @@ -38527,7 +38758,7 @@ } } ], - "specLocation": "_global/search/SearchRequest.ts#L54-L589" + "specLocation": "_global/search/SearchRequest.ts#L54-L590" }, { "body": { @@ -38569,7 +38800,7 @@ "body": { "kind": "no_body" }, - "description": "Delete a search application.\nRemove a search application and its associated alias. Indices attached to the search application are not removed.", + "description": "Delete a search application.\n\nRemove a search application and its associated alias. Indices attached to the search application are not removed.", "inherits": { "type": { "name": "RequestBase", @@ -38583,7 +38814,7 @@ }, "path": [ { - "description": "The name of the search application to delete", + "description": "The name of the search application to delete.", "name": "name", "required": true, "type": { @@ -38596,7 +38827,7 @@ } ], "query": [], - "specLocation": "search_application/delete/SearchApplicationsDeleteRequest.ts#L22-L44" + "specLocation": "search_application/delete/SearchApplicationsDeleteRequest.ts#L22-L46" }, { "body": { @@ -38650,7 +38881,7 @@ } ], "query": [], - "specLocation": "search_application/delete_behavioral_analytics/BehavioralAnalyticsDeleteRequest.ts#L22-L43" + "specLocation": "search_application/delete_behavioral_analytics/BehavioralAnalyticsDeleteRequest.ts#L22-L44" }, { "body": { @@ -38704,7 +38935,7 @@ } ], "query": [], - "specLocation": "search_application/get/SearchApplicationsGetRequest.ts#L22-L42" + "specLocation": "search_application/get/SearchApplicationsGetRequest.ts#L22-L43" }, { "body": { @@ -38761,7 +38992,7 @@ } ], "query": [], - "specLocation": "search_application/get_behavioral_analytics/BehavioralAnalyticsGetRequest.ts#L22-L46" + "specLocation": "search_application/get_behavioral_analytics/BehavioralAnalyticsGetRequest.ts#L22-L47" }, { "body": { @@ -38851,7 +39082,7 @@ } } ], - "specLocation": "search_application/list/SearchApplicationsListRequest.ts#L22-L52" + "specLocation": "search_application/list/SearchApplicationsListRequest.ts#L22-L53" }, { "body": { @@ -38947,7 +39178,7 @@ } } ], - "specLocation": "search_application/put/SearchApplicationsPutRequest.ts#L23-L56" + "specLocation": "search_application/put/SearchApplicationsPutRequest.ts#L23-L57" }, { "body": { @@ -39007,7 +39238,7 @@ } ], "query": [], - "specLocation": "search_application/put_behavioral_analytics/BehavioralAnalyticsPutRequest.ts#L22-L42" + "specLocation": "search_application/put_behavioral_analytics/BehavioralAnalyticsPutRequest.ts#L22-L43" }, { "body": { @@ -39102,7 +39333,7 @@ } } ], - "specLocation": "search_application/search/SearchApplicationsSearchRequest.ts#L24-L60" + "specLocation": "search_application/search/SearchApplicationsSearchRequest.ts#L24-L61" }, { "body": { @@ -39145,7 +39376,7 @@ "kind": "properties", "properties": [ { - "description": "Sub-aggregations for the geotile_grid.\n\nSupports the following aggregation types:\n- avg\n- cardinality\n- max\n- min\n- sum", + "description": "Sub-aggregations for the geotile_grid.\n\nIt supports the following aggregation types:\n\n- `avg`\n- `boxplot`\n- `cardinality`\n- `extended stats`\n- `max`\n- `median absolute deviation`\n- `min`\n- `percentile`\n- `percentile-rank`\n- `stats`\n- `sum`\n- `value count`\n\nThe aggregation names can't start with `_mvt_`. The `_mvt_` prefix is reserved for internal aggregations.", "name": "aggs", "required": false, "type": { @@ -39168,7 +39399,7 @@ } }, { - "description": "Size, in pixels, of a clipping buffer outside the tile. This allows renderers\nto avoid outline artifacts from geometries that extend past the extent of the tile.", + "description": "The size, in pixels, of a clipping buffer outside the tile. This allows renderers\nto avoid outline artifacts from geometries that extend past the extent of the tile.", "name": "buffer", "required": false, "serverDefault": 5, @@ -39181,7 +39412,7 @@ } }, { - "description": "If false, the meta layer’s feature is the bounding box of the tile.\nIf true, the meta layer’s feature is a bounding box resulting from a\ngeo_bounds aggregation. The aggregation runs on values that intersect\nthe // tile with wrap_longitude set to false. The resulting\nbounding box may be larger than the vector tile.", + "description": "If `false`, the meta layer's feature is the bounding box of the tile.\nIf `true`, the meta layer's feature is a bounding box resulting from a\n`geo_bounds` aggregation. The aggregation runs on values that intersect\nthe `//` tile with `wrap_longitude` set to `false`. The resulting\nbounding box may be larger than the vector tile.", "name": "exact_bounds", "required": false, "serverDefault": false, @@ -39194,7 +39425,7 @@ } }, { - "description": "Size, in pixels, of a side of the tile. Vector tiles are square with equal sides.", + "description": "The size, in pixels, of a side of the tile. Vector tiles are square with equal sides.", "name": "extent", "required": false, "serverDefault": 4096, @@ -39207,7 +39438,7 @@ } }, { - "description": "Fields to return in the `hits` layer. Supports wildcards (`*`).\nThis parameter does not support fields with array values. Fields with array\nvalues may return inconsistent results.", + "description": "The fields to return in the `hits` layer.\nIt supports wildcards (`*`).\nThis parameter does not support fields with array values. Fields with array\nvalues may return inconsistent results.", "name": "fields", "required": false, "type": { @@ -39219,7 +39450,7 @@ } }, { - "description": "Aggregation used to create a grid for the `field`.", + "description": "The aggregation used to create a grid for the `field`.", "name": "grid_agg", "required": false, "type": { @@ -39231,7 +39462,7 @@ } }, { - "description": "Additional zoom levels available through the aggs layer. For example, if is 7\nand grid_precision is 8, you can zoom in up to level 15. Accepts 0-8. If 0, results\ndon’t include the aggs layer.", + "description": "Additional zoom levels available through the aggs layer. For example, if `` is `7`\nand `grid_precision` is `8`, you can zoom in up to level 15. Accepts 0-8. If 0, results\ndon't include the aggs layer.", "name": "grid_precision", "required": false, "serverDefault": 8, @@ -39244,7 +39475,7 @@ } }, { - "description": "Determines the geometry type for features in the aggs layer. In the aggs layer,\neach feature represents a geotile_grid cell. If 'grid' each feature is a Polygon\nof the cells bounding box. If 'point' each feature is a Point that is the centroid\nof the cell.", + "description": "Determines the geometry type for features in the aggs layer. In the aggs layer,\neach feature represents a `geotile_grid` cell. If `grid, each feature is a polygon\nof the cells bounding box. If `point`, each feature is a Point that is the centroid\nof the cell.", "name": "grid_type", "required": false, "serverDefault": "grid", @@ -39257,7 +39488,7 @@ } }, { - "description": "Query DSL used to filter documents for the search.", + "description": "The query DSL used to filter documents for the search.", "name": "query", "required": false, "type": { @@ -39281,7 +39512,7 @@ } }, { - "description": "Maximum number of features to return in the hits layer. Accepts 0-10000.\nIf 0, results don’t include the hits layer.", + "description": "The maximum number of features to return in the hits layer. Accepts 0-10000.\nIf 0, results don't include the hits layer.", "name": "size", "required": false, "serverDefault": 10000, @@ -39294,7 +39525,7 @@ } }, { - "description": "Sorts features in the hits layer. By default, the API calculates a bounding\nbox for each feature. It sorts features based on this box’s diagonal length,\nfrom longest to shortest.", + "description": "Sort the features in the hits layer. By default, the API calculates a bounding\nbox for each feature. It sorts features based on this box's diagonal length,\nfrom longest to shortest.", "name": "sort", "required": false, "type": { @@ -39306,7 +39537,7 @@ } }, { - "description": "Number of hits matching the query to count accurately. If `true`, the exact number\nof hits is returned at the cost of some performance. If `false`, the response does\nnot include the total number of hits matching the query.", + "description": "The number of hits matching the query to count accurately. If `true`, the exact number\nof hits is returned at the cost of some performance. If `false`, the response does\nnot include the total number of hits matching the query.", "name": "track_total_hits", "required": false, "serverDefault": "10000", @@ -39319,7 +39550,7 @@ } }, { - "description": "If `true`, the hits and aggs layers will contain additional point features representing\nsuggested label positions for the original features.", + "description": "If `true`, the hits and aggs layers will contain additional point features representing\nsuggested label positions for the original features.\n\n* `Point` and `MultiPoint` features will have one of the points selected.\n* `Polygon` and `MultiPolygon` features will have a single point generated, either the centroid, if it is within the polygon, or another point within the polygon selected from the sorted triangle-tree.\n* `LineString` features will likewise provide a roughly central point selected from the triangle-tree.\n* The aggregation results will provide one central point for each aggregation bucket.\n\nAll attributes from the original features will also be copied to the new label features.\nIn addition, the new features will be distinguishable using the tag `_mvt_label_position`.", "name": "with_labels", "required": false, "type": { @@ -39332,7 +39563,7 @@ } ] }, - "description": "Search a vector tile.\n\nSearch a vector tile for geospatial values.", + "description": "Search a vector tile.\n\nSearch a vector tile for geospatial values.\nBefore using this API, you should be familiar with the Mapbox vector tile specification.\nThe API returns results as a binary mapbox vector tile.\n\nInternally, Elasticsearch translates a vector tile search API request into a search containing:\n\n* A `geo_bounding_box` query on the ``. The query uses the `//` tile as a bounding box.\n* A `geotile_grid` or `geohex_grid` aggregation on the ``. The `grid_agg` parameter determines the aggregation type. The aggregation uses the `//` tile as a bounding box.\n* Optionally, a `geo_bounds` aggregation on the ``. The search only includes this aggregation if the `exact_bounds` parameter is `true`.\n* If the optional parameter `with_labels` is `true`, the internal search will include a dynamic runtime field that calls the `getLabelPosition` function of the geometry doc value. This enables the generation of new point features containing suggested geometry labels, so that, for example, multi-polygons will have only one label.\n\nFor example, Elasticsearch may translate a vector tile search API request with a `grid_agg` argument of `geotile` and an `exact_bounds` argument of `true` into the following search\n\n```\nGET my-index/_search\n{\n \"size\": 10000,\n \"query\": {\n \"geo_bounding_box\": {\n \"my-geo-field\": {\n \"top_left\": {\n \"lat\": -40.979898069620134,\n \"lon\": -45\n },\n \"bottom_right\": {\n \"lat\": -66.51326044311186,\n \"lon\": 0\n }\n }\n }\n },\n \"aggregations\": {\n \"grid\": {\n \"geotile_grid\": {\n \"field\": \"my-geo-field\",\n \"precision\": 11,\n \"size\": 65536,\n \"bounds\": {\n \"top_left\": {\n \"lat\": -40.979898069620134,\n \"lon\": -45\n },\n \"bottom_right\": {\n \"lat\": -66.51326044311186,\n \"lon\": 0\n }\n }\n }\n },\n \"bounds\": {\n \"geo_bounds\": {\n \"field\": \"my-geo-field\",\n \"wrap_longitude\": false\n }\n }\n }\n}\n```\n\nThe API returns results as a binary Mapbox vector tile.\nMapbox vector tiles are encoded as Google Protobufs (PBF). By default, the tile contains three layers:\n\n* A `hits` layer containing a feature for each `` value matching the `geo_bounding_box` query.\n* An `aggs` layer containing a feature for each cell of the `geotile_grid` or `geohex_grid`. The layer only contains features for cells with matching data.\n* A meta layer containing:\n * A feature containing a bounding box. By default, this is the bounding box of the tile.\n * Value ranges for any sub-aggregations on the `geotile_grid` or `geohex_grid`.\n * Metadata for the search.\n\nThe API only returns features that can display at its zoom level.\nFor example, if a polygon feature has no area at its zoom level, the API omits it.\nThe API returns errors as UTF-8 encoded JSON.\n\nIMPORTANT: You can specify several options for this API as either a query parameter or request body parameter.\nIf you specify both parameters, the query parameter takes precedence.\n\n**Grid precision for geotile**\n\nFor a `grid_agg` of `geotile`, you can use cells in the `aggs` layer as tiles for lower zoom levels.\n`grid_precision` represents the additional zoom levels available through these cells. The final precision is computed by as follows: ` + grid_precision`.\nFor example, if `` is 7 and `grid_precision` is 8, then the `geotile_grid` aggregation will use a precision of 15.\nThe maximum final precision is 29.\nThe `grid_precision` also determines the number of cells for the grid as follows: `(2^grid_precision) x (2^grid_precision)`.\nFor example, a value of 8 divides the tile into a grid of 256 x 256 cells.\nThe `aggs` layer only contains features for cells with matching data.\n\n**Grid precision for geohex**\n\nFor a `grid_agg` of `geohex`, Elasticsearch uses `` and `grid_precision` to calculate a final precision as follows: ` + grid_precision`.\n\nThis precision determines the H3 resolution of the hexagonal cells produced by the `geohex` aggregation.\nThe following table maps the H3 resolution for each precision.\nFor example, if `` is 3 and `grid_precision` is 3, the precision is 6.\nAt a precision of 6, hexagonal cells have an H3 resolution of 2.\nIf `` is 3 and `grid_precision` is 4, the precision is 7.\nAt a precision of 7, hexagonal cells have an H3 resolution of 3.\n\n| Precision | Unique tile bins | H3 resolution | Unique hex bins |\tRatio |\n| --------- | ---------------- | ------------- | ----------------| ----- |\n| 1 | 4 | 0 | 122 | 30.5 |\n| 2 | 16 | 0 | 122 | 7.625 |\n| 3 | 64 | 1 | 842 | 13.15625 |\n| 4 | 256 | 1 | 842 | 3.2890625 |\n| 5 | 1024 | 2 | 5882 | 5.744140625 |\n| 6 | 4096 | 2 | 5882 | 1.436035156 |\n| 7 | 16384 | 3 | 41162 | 2.512329102 |\n| 8 | 65536 | 3 | 41162 | 0.6280822754 |\n| 9 | 262144 | 4 | 288122 | 1.099098206 |\n| 10 | 1048576 | 4 | 288122 | 0.2747745514 |\n| 11 | 4194304 | 5 | 2016842 | 0.4808526039 |\n| 12 | 16777216 | 6 | 14117882 | 0.8414913416 |\n| 13 | 67108864 | 6 | 14117882 | 0.2103728354 |\n| 14 | 268435456 | 7 | 98825162 | 0.3681524172 |\n| 15 | 1073741824 | 8 | 691776122 | 0.644266719 |\n| 16 | 4294967296 | 8 | 691776122 | 0.1610666797 |\n| 17 | 17179869184 | 9 | 4842432842 | 0.2818666889 |\n| 18 | 68719476736 | 10 | 33897029882 | 0.4932667053 |\n| 19 | 274877906944 | 11 | 237279209162 | 0.8632167343 |\n| 20 | 1099511627776 | 11 | 237279209162 | 0.2158041836 |\n| 21 | 4398046511104 | 12 | 1660954464122 | 0.3776573213 |\n| 22 | 17592186044416 | 13 | 11626681248842 | 0.6609003122 |\n| 23 | 70368744177664 | 13 | 11626681248842 | 0.165225078 |\n| 24 | 281474976710656 | 14 | 81386768741882 | 0.2891438866 |\n| 25 | 1125899906842620 | 15 | 569707381193162 | 0.5060018015 |\n| 26 | 4503599627370500 | 15 | 569707381193162 | 0.1265004504 |\n| 27 | 18014398509482000 | 15 | 569707381193162 | 0.03162511259 |\n| 28 | 72057594037927900 | 15 | 569707381193162 | 0.007906278149 |\n| 29 | 288230376151712000 | 15 | 569707381193162 | 0.001976569537 |\n\nHexagonal cells don't align perfectly on a vector tile.\nSome cells may intersect more than one vector tile.\nTo compute the H3 resolution for each precision, Elasticsearch compares the average density of hexagonal bins at each resolution with the average density of tile bins at each zoom level.\nElasticsearch uses the H3 resolution that is closest to the corresponding geotile density.", "inherits": { "type": { "name": "RequestBase", @@ -39408,7 +39639,7 @@ ], "query": [ { - "description": "If false, the meta layer’s feature is the bounding box of the tile.\nIf true, the meta layer’s feature is a bounding box resulting from a\ngeo_bounds aggregation. The aggregation runs on values that intersect\nthe // tile with wrap_longitude set to false. The resulting\nbounding box may be larger than the vector tile.", + "description": "If `false`, the meta layer's feature is the bounding box of the tile.\nIf true, the meta layer's feature is a bounding box resulting from a\ngeo_bounds aggregation. The aggregation runs on values that intersect\nthe // tile with wrap_longitude set to false. The resulting\nbounding box may be larger than the vector tile.", "name": "exact_bounds", "required": false, "serverDefault": false, @@ -39421,7 +39652,7 @@ } }, { - "description": "Size, in pixels, of a side of the tile. Vector tiles are square with equal sides.", + "description": "The size, in pixels, of a side of the tile. Vector tiles are square with equal sides.", "name": "extent", "required": false, "serverDefault": 4096, @@ -39446,7 +39677,7 @@ } }, { - "description": "Additional zoom levels available through the aggs layer. For example, if is 7\nand grid_precision is 8, you can zoom in up to level 15. Accepts 0-8. If 0, results\ndon’t include the aggs layer.", + "description": "Additional zoom levels available through the aggs layer. For example, if is 7\nand grid_precision is 8, you can zoom in up to level 15. Accepts 0-8. If 0, results\ndon't include the aggs layer.", "name": "grid_precision", "required": false, "serverDefault": 8, @@ -39472,7 +39703,7 @@ } }, { - "description": "Maximum number of features to return in the hits layer. Accepts 0-10000.\nIf 0, results don’t include the hits layer.", + "description": "Maximum number of features to return in the hits layer. Accepts 0-10000.\nIf 0, results don't include the hits layer.", "name": "size", "required": false, "serverDefault": 10000, @@ -39485,7 +39716,7 @@ } }, { - "description": "If `true`, the hits and aggs layers will contain additional point features representing\nsuggested label positions for the original features.", + "description": "If `true`, the hits and aggs layers will contain additional point features representing\nsuggested label positions for the original features.\n\n* `Point` and `MultiPoint` features will have one of the points selected.\n* `Polygon` and `MultiPolygon` features will have a single point generated, either the centroid, if it is within the polygon, or another point within the polygon selected from the sorted triangle-tree.\n* `LineString` features will likewise provide a roughly central point selected from the triangle-tree.\n* The aggregation results will provide one central point for each aggregation bucket.\n\nAll attributes from the original features will also be copied to the new label features.\nIn addition, the new features will be distinguishable using the tag `_mvt_label_position`.", "name": "with_labels", "required": false, "type": { @@ -39497,7 +39728,7 @@ } } ], - "specLocation": "_global/search_mvt/SearchMvtRequest.ts#L33-L199" + "specLocation": "_global/search_mvt/SearchMvtRequest.ts#L33-L373" }, { "body": { @@ -39525,7 +39756,7 @@ "kind": "properties", "properties": [ { - "description": "If `true`, returns detailed information about score calculation as part of each hit.", + "description": "If `true`, returns detailed information about score calculation as part of each hit.\nIf you specify both this and the `explain` query parameter, the API uses only the query parameter.", "name": "explain", "required": false, "serverDefault": false, @@ -39538,7 +39769,7 @@ } }, { - "description": "ID of the search template to use. If no source is specified,\nthis parameter is required.", + "description": "The ID of the search template to use. If no `source` is specified,\nthis parameter is required.", "name": "id", "required": false, "type": { @@ -39582,7 +39813,7 @@ } }, { - "description": "An inline search template. Supports the same parameters as the search API's\nrequest body. Also supports Mustache variables. If no id is specified, this\nparameter is required.", + "description": "An inline search template. Supports the same parameters as the search API's\nrequest body. It also supports Mustache variables. If no `id` is specified, this\nparameter is required.", "name": "source", "required": false, "type": { @@ -39609,7 +39840,7 @@ }, "path": [ { - "description": "Comma-separated list of data streams, indices,\nand aliases to search. Supports wildcards (*).", + "description": "A comma-separated list of data streams, indices, and aliases to search.\nIt supports wildcards (`*`).", "name": "index", "required": false, "type": { @@ -39649,7 +39880,7 @@ } }, { - "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.", + "description": "The type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.", "name": "expand_wildcards", "required": false, "type": { @@ -39674,6 +39905,10 @@ } }, { + "deprecation": { + "description": "", + "version": "7.16.0" + }, "description": "If `true`, specified concrete, expanded, or aliased indices are not included in the response when throttled.", "name": "ignore_throttled", "required": false, @@ -39700,7 +39935,7 @@ } }, { - "description": "Specifies the node or shard the operation should be performed on.\nRandom by default.", + "description": "The node or shard the operation should be performed on.\nIt is random by default.", "name": "preference", "required": false, "type": { @@ -39725,7 +39960,7 @@ } }, { - "description": "Custom value used to route operations to a specific shard.", + "description": "A custom value used to route operations to a specific shard.", "name": "routing", "required": false, "type": { @@ -39767,7 +40002,7 @@ "since": "7.0.0" } }, - "description": "If true, hits.total are rendered as an integer in the response.", + "description": "If `true`, `hits.total` is rendered as an integer in the response.\nIf `false`, it is rendered as an object.", "name": "rest_total_hits_as_int", "required": false, "serverDefault": false, @@ -39793,7 +40028,7 @@ } } ], - "specLocation": "_global/search_template/SearchTemplateRequest.ts#L32-L146" + "specLocation": "_global/search_template/SearchTemplateRequest.ts#L32-L153" }, { "body": { @@ -42923,7 +43158,7 @@ } }, { - "description": "How many matching terms to return.", + "description": "The number of matching terms to return.", "name": "size", "required": false, "serverDefault": 10, @@ -42936,7 +43171,7 @@ } }, { - "description": "The maximum length of time to spend collecting results. Defaults to \"1s\" (one second). If the timeout is exceeded the complete flag set to false in the response and the results may be partial or empty.", + "description": "The maximum length of time to spend collecting results.\nIf the timeout is exceeded the `complete` flag set to `false` in the response and the results may be partial or empty.", "name": "timeout", "required": false, "serverDefault": "1s", @@ -42949,7 +43184,7 @@ } }, { - "description": "When true the provided search string is matched against index terms without case sensitivity.", + "description": "When `true`, the provided search string is matched against index terms without case sensitivity.", "name": "case_insensitive", "required": false, "serverDefault": false, @@ -42962,7 +43197,7 @@ } }, { - "description": "Allows to filter an index shard if the provided query rewrites to match_none.", + "description": "Filter an index shard if the provided query rewrites to `match_none`.", "docId": "query-dsl", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/query-dsl.html", "name": "index_filter", @@ -42976,7 +43211,7 @@ } }, { - "description": "The string after which terms in the index should be returned. Allows for a form of pagination if the last result from one request is passed as the search_after parameter for a subsequent request.", + "description": "The string to match at the start of indexed terms.\nIf it is not provided, all terms in the field are considered.\n\n> info\n> The prefix string cannot be larger than the largest possible keyword value, which is Lucene's term byte-length limit of 32766.", "name": "string", "required": false, "type": { @@ -42988,6 +43223,7 @@ } }, { + "description": "The string after which terms in the index should be returned.\nIt allows for a form of pagination if the last result from one request is passed as the `search_after` parameter for a subsequent request.", "name": "search_after", "required": false, "type": { @@ -43000,7 +43236,7 @@ } ] }, - "description": "Get terms in an index.\n\nDiscover terms that match a partial string in an index.\nThis \"terms enum\" API is designed for low-latency look-ups used in auto-complete scenarios.\n\nIf the `complete` property in the response is false, the returned terms set may be incomplete and should be treated as approximate.\nThis can occur due to a few reasons, such as a request timeout or a node error.\n\nNOTE: The terms enum API may return terms from deleted documents. Deleted documents are initially only marked as deleted. It is not until their segments are merged that documents are actually deleted. Until that happens, the terms enum API will return terms from these documents.", + "description": "Get terms in an index.\n\nDiscover terms that match a partial string in an index.\nThis API is designed for low-latency look-ups used in auto-complete scenarios.\n\n> info\n> The terms enum API may return terms from deleted documents. Deleted documents are initially only marked as deleted. It is not until their segments are merged that documents are actually deleted. Until that happens, the terms enum API will return terms from these documents.", "inherits": { "type": { "name": "RequestBase", @@ -43014,7 +43250,7 @@ }, "path": [ { - "description": "Comma-separated list of data streams, indices, and index aliases to search. Wildcard (*) expressions are supported.", + "description": "A comma-separated list of data streams, indices, and index aliases to search.\nWildcard (`*`) expressions are supported.\nTo search all data streams or indices, omit this parameter or use `*` or `_all`.", "name": "index", "required": true, "type": { @@ -43027,7 +43263,7 @@ } ], "query": [], - "specLocation": "_global/terms_enum/TermsEnumRequest.ts#L26-L81" + "specLocation": "_global/terms_enum/TermsEnumRequest.ts#L26-L93" }, { "body": { @@ -43059,6 +43295,7 @@ } }, { + "description": "If `false`, the returned terms set may be incomplete and should be treated as approximate.\nThis can occur due to a few reasons, such as a request timeout or a node error.", "name": "complete", "required": true, "type": { @@ -43076,7 +43313,7 @@ "name": "Response", "namespace": "_global.terms_enum" }, - "specLocation": "_global/terms_enum/TermsEnumResponse.ts#L22-L28" + "specLocation": "_global/terms_enum/TermsEnumResponse.ts#L22-L32" }, { "attachedBehaviors": [ @@ -43098,7 +43335,9 @@ } }, { - "description": "Filter terms based on their tf-idf scores.", + "description": "Filter terms based on their tf-idf scores.\nThis could be useful in order find out a good characteristic vector of a document.\nThis feature works in a similar manner to the second phase of the More Like This Query.", + "extDocId": "query-dsl-mlt-query", + "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/query-dsl-mlt-query.html", "name": "filter", "required": false, "type": { @@ -43110,7 +43349,7 @@ } }, { - "description": "Overrides the default per-field analyzer.", + "description": "Override the default per-field analyzer.\nThis is useful in order to generate term vectors in any fashion, especially when using artificial documents.\nWhen providing an analyzer for a field that already stores term vectors, the term vectors will be regenerated.", "name": "per_field_analyzer", "required": false, "type": { @@ -43134,7 +43373,7 @@ } ] }, - "description": "Get term vector information.\n\nGet information and statistics about terms in the fields of a particular document.", + "description": "Get term vector information.\n\nGet information and statistics about terms in the fields of a particular document.\n\nYou can retrieve term vectors for documents stored in the index or for artificial documents passed in the body of the request.\nYou can specify the fields you are interested in through the `fields` parameter or by adding the fields to the request body.\nFor example:\n\n```\nGET /my-index-000001/_termvectors/1?fields=message\n```\n\nFields can be specified using wildcards, similar to the multi match query.\n\nTerm vectors are real-time by default, not near real-time.\nThis can be changed by setting `realtime` parameter to `false`.\n\nYou can request three types of values: _term information_, _term statistics_, and _field statistics_.\nBy default, all term information and field statistics are returned for all fields but term statistics are excluded.\n\n**Term information**\n\n* term frequency in the field (always returned)\n* term positions (`positions: true`)\n* start and end offsets (`offsets: true`)\n* term payloads (`payloads: true`), as base64 encoded bytes\n\nIf the requested information wasn't stored in the index, it will be computed on the fly if possible.\nAdditionally, term vectors could be computed for documents not even existing in the index, but instead provided by the user.\n\n> warn\n> Start and end offsets assume UTF-16 encoding is being used. If you want to use these offsets in order to get the original text that produced this token, you should make sure that the string you are taking a sub-string of is also encoded using UTF-16.\n\n**Behaviour**\n\nThe term and field statistics are not accurate.\nDeleted documents are not taken into account.\nThe information is only retrieved for the shard the requested document resides in.\nThe term and field statistics are therefore only useful as relative measures whereas the absolute numbers have no meaning in this context.\nBy default, when requesting term vectors of artificial documents, a shard to get the statistics from is randomly selected.\nUse `routing` only to hit a particular shard.", "generics": [ { "name": "TDocument", @@ -43154,7 +43393,7 @@ }, "path": [ { - "description": "Name of the index that contains the document.", + "description": "The name of the index that contains the document.", "name": "index", "required": true, "type": { @@ -43166,7 +43405,7 @@ } }, { - "description": "Unique identifier of the document.", + "description": "A unique identifier for the document.", "name": "id", "required": false, "type": { @@ -43180,7 +43419,7 @@ ], "query": [ { - "description": "Comma-separated list or wildcard expressions of fields to include in the statistics.\nUsed as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters.", + "description": "A comma-separated list or wildcard expressions of fields to include in the statistics.\nIt is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters.", "name": "fields", "required": false, "type": { @@ -43192,7 +43431,7 @@ } }, { - "description": "If `true`, the response includes the document count, sum of document frequencies, and sum of total term frequencies.", + "description": "If `true`, the response includes:\n\n* The document count (how many documents contain this field).\n* The sum of document frequencies (the sum of document frequencies for all terms in this field).\n* The sum of total term frequencies (the sum of total term frequencies of each term in this field).", "name": "field_statistics", "required": false, "serverDefault": true, @@ -43244,7 +43483,7 @@ } }, { - "description": "Specifies the node or shard the operation should be performed on.\nRandom by default.", + "description": "The node or shard the operation should be performed on.\nIt is random by default.", "name": "preference", "required": false, "type": { @@ -43271,7 +43510,7 @@ } }, { - "description": "Custom value used to route operations to a specific shard.", + "description": "A custom value that is used to route operations to a specific shard.", "name": "routing", "required": false, "type": { @@ -43283,7 +43522,7 @@ } }, { - "description": "If `true`, the response includes term frequency and document frequency.", + "description": "If `true`, the response includes:\n\n* The total term frequency (how often a term occurs in all documents).\n* The document frequency (the number of documents containing the current term).\n\nBy default these values are not returned since term statistics can have a serious performance impact.", "name": "term_statistics", "required": false, "serverDefault": false, @@ -43308,7 +43547,7 @@ } }, { - "description": "Specific version type.", + "description": "The version type.", "name": "version_type", "required": false, "type": { @@ -43320,7 +43559,7 @@ } } ], - "specLocation": "_global/termvectors/TermVectorsRequest.ts#L33-L132" + "specLocation": "_global/termvectors/TermVectorsRequest.ts#L33-L187" }, { "body": { @@ -43419,7 +43658,7 @@ "body": { "kind": "no_body" }, - "description": "Delete a transform.\nDeletes a transform.", + "description": "Delete a transform.", "inherits": { "type": { "name": "RequestBase", @@ -43513,7 +43752,7 @@ "body": { "kind": "no_body" }, - "description": "Get transforms.\nRetrieves configuration information for transforms.", + "description": "Get transforms.\nGet configuration information for transforms.", "inherits": { "type": { "name": "RequestBase", @@ -43593,7 +43832,7 @@ } } ], - "specLocation": "transform/get_transform/GetTransformRequest.ts#L24-L83" + "specLocation": "transform/get_transform/GetTransformRequest.ts#L24-L84" }, { "body": { @@ -43640,7 +43879,7 @@ "body": { "kind": "no_body" }, - "description": "Get transform stats.\nRetrieves usage information for transforms.", + "description": "Get transform stats.\nGet usage information for transforms.", "inherits": { "type": { "name": "RequestBase", @@ -43719,7 +43958,7 @@ } } ], - "specLocation": "transform/get_transform_stats/GetTransformStatsRequest.ts#L25-L75" + "specLocation": "transform/get_transform_stats/GetTransformStatsRequest.ts#L25-L76" }, { "body": { @@ -43918,7 +44157,7 @@ } } ], - "specLocation": "transform/preview_transform/PreviewTransformRequest.ts#L33-L118" + "specLocation": "transform/preview_transform/PreviewTransformRequest.ts#L33-L119" }, { "body": { @@ -44148,7 +44387,7 @@ } } ], - "specLocation": "transform/put_transform/PutTransformRequest.ts#L33-L129" + "specLocation": "transform/put_transform/PutTransformRequest.ts#L33-L130" }, { "body": { @@ -44229,7 +44468,7 @@ } } ], - "specLocation": "transform/reset_transform/ResetTransformRequest.ts#L24-L61" + "specLocation": "transform/reset_transform/ResetTransformRequest.ts#L24-L62" }, { "body": { @@ -44297,7 +44536,7 @@ } } ], - "specLocation": "transform/schedule_now_transform/ScheduleNowTransformRequest.ts#L23-L56" + "specLocation": "transform/schedule_now_transform/ScheduleNowTransformRequest.ts#L23-L57" }, { "body": { @@ -44377,7 +44616,7 @@ } } ], - "specLocation": "transform/start_transform/StartTransformRequest.ts#L24-L72" + "specLocation": "transform/start_transform/StartTransformRequest.ts#L24-L73" }, { "body": { @@ -44497,7 +44736,7 @@ } } ], - "specLocation": "transform/stop_transform/StopTransformRequest.ts#L24-L83" + "specLocation": "transform/stop_transform/StopTransformRequest.ts#L24-L84" }, { "body": { @@ -44688,7 +44927,7 @@ } } ], - "specLocation": "transform/update_transform/UpdateTransformRequest.ts#L31-L112" + "specLocation": "transform/update_transform/UpdateTransformRequest.ts#L31-L113" }, { "body": { @@ -45214,7 +45453,7 @@ } }, { - "description": "Specifies the documents to update using the Query DSL.", + "description": "The documents to update using the Query DSL.", "name": "query", "required": false, "type": { @@ -45250,7 +45489,7 @@ } }, { - "description": "What to do if update by query hits version conflicts: `abort` or `proceed`.", + "description": "The preferred behavior when update by query hits version conflicts: `abort` or `proceed`.", "name": "conflicts", "required": false, "serverDefault": "abort", @@ -45264,7 +45503,7 @@ } ] }, - "description": "Update documents.\nUpdates documents that match the specified query.\nIf no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes.", + "description": "Update documents.\nUpdates documents that match the specified query.\nIf no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias:\n\n* `read`\n* `index` or `write`\n\nYou can specify the query criteria in the request URI or the request body using the same syntax as the search API.\n\nWhen you submit an update by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and updates matching documents using internal versioning.\nWhen the versions match, the document is updated and the version number is incremented.\nIf a document changes between the time that the snapshot is taken and the update operation is processed, it results in a version conflict and the operation fails.\nYou can opt to count version conflicts instead of halting and returning by setting `conflicts` to `proceed`.\nNote that if you opt to count version conflicts, the operation could attempt to update more documents from the source than `max_docs` until it has successfully updated `max_docs` documents or it has gone through every document in the source query.\n\nNOTE: Documents with a version equal to 0 cannot be updated using update by query because internal versioning does not support 0 as a valid version number.\n\nWhile processing an update by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents.\nA bulk update request is performed for each batch of matching documents.\nAny query or update failures cause the update by query request to fail and the failures are shown in the response.\nAny update requests that completed successfully still stick, they are not rolled back.\n\n**Throttling update requests**\n\nTo control the rate at which update by query issues batches of update operations, you can set `requests_per_second` to any positive decimal number.\nThis pads each batch with a wait time to throttle the rate.\nSet `requests_per_second` to `-1` to turn off throttling.\n\nThrottling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account.\nThe padding time is the difference between the batch size divided by the `requests_per_second` and the time spent writing.\nBy default the batch size is 1000, so if `requests_per_second` is set to `500`:\n\n```\ntarget_time = 1000 / 500 per second = 2 seconds\nwait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds\n```\n\nSince the batch is issued as a single _bulk request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set.\nThis is \"bursty\" instead of \"smooth\".\n\n**Slicing**\n\nUpdate by query supports sliced scroll to parallelize the update process.\nThis can improve efficiency and provide a convenient way to break the request down into smaller parts.\n\nSetting `slices` to `auto` chooses a reasonable number for most data streams and indices.\nThis setting will use one slice per shard, up to a certain limit.\nIf there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards.\n\nAdding `slices` to `_update_by_query` just automates the manual process of creating sub-requests, which means it has some quirks:\n\n* You can see these requests in the tasks APIs. These sub-requests are \"child\" tasks of the task for the request with slices.\n* Fetching the status of the task for the request with `slices` only contains the status of completed slices.\n* These sub-requests are individually addressable for things like cancellation and rethrottling.\n* Rethrottling the request with `slices` will rethrottle the unfinished sub-request proportionally.\n* Canceling the request with slices will cancel each sub-request.\n* Due to the nature of slices each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution.\n* Parameters like `requests_per_second` and `max_docs` on a request with slices are distributed proportionally to each sub-request. Combine that with the point above about distribution being uneven and you should conclude that using `max_docs` with `slices` might not result in exactly `max_docs` documents being updated.\n* Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time.\n\nIf you're slicing manually or otherwise tuning automatic slicing, keep in mind that:\n\n* Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many slices hurts performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead.\n* Update performance scales linearly across available resources with the number of slices.\n\nWhether query or update performance dominates the runtime depends on the documents being reindexed and cluster resources.\n\n**Update the document source**\n\nUpdate by query supports scripts to update the document source.\nAs with the update API, you can set `ctx.op` to change the operation that is performed.\n\nSet `ctx.op = \"noop\"` if your script decides that it doesn't have to make any changes.\nThe update by query operation skips updating the document and increments the `noop` counter.\n\nSet `ctx.op = \"delete\"` if your script decides that the document should be deleted.\nThe update by query operation deletes the document and increments the `deleted` counter.\n\nUpdate by query supports only `index`, `noop`, and `delete`.\nSetting `ctx.op` to anything else is an error.\nSetting any other field in `ctx` is an error.\nThis API enables you to only modify the source of matching documents; you cannot move them.", "inherits": { "type": { "name": "RequestBase", @@ -45278,7 +45517,7 @@ }, "path": [ { - "description": "Comma-separated list of data streams, indices, and aliases to search.\nSupports wildcards (`*`).\nTo search all data streams or indices, omit this parameter or use `*` or `_all`.", + "description": "A comma-separated list of data streams, indices, and aliases to search.\nIt supports wildcards (`*`).\nTo search all data streams or indices, omit this parameter or use `*` or `_all`.", "name": "index", "required": true, "type": { @@ -45305,7 +45544,7 @@ } }, { - "description": "Analyzer to use for the query string.", + "description": "The analyzer to use for the query string.\nThis parameter can be used only when the `q` query string parameter is specified.", "name": "analyzer", "required": false, "type": { @@ -45317,7 +45556,7 @@ } }, { - "description": "If `true`, wildcard and prefix queries are analyzed.", + "description": "If `true`, wildcard and prefix queries are analyzed.\nThis parameter can be used only when the `q` query string parameter is specified.", "name": "analyze_wildcard", "required": false, "serverDefault": false, @@ -45330,7 +45569,7 @@ } }, { - "description": "What to do if update by query hits version conflicts: `abort` or `proceed`.", + "description": "The preferred behavior when update by query hits version conflicts: `abort` or `proceed`.", "name": "conflicts", "required": false, "serverDefault": "abort", @@ -45343,7 +45582,7 @@ } }, { - "description": "The default operator for query string query: `AND` or `OR`.", + "description": "The default operator for query string query: `AND` or `OR`.\nThis parameter can be used only when the `q` query string parameter is specified.", "name": "default_operator", "required": false, "serverDefault": "OR", @@ -45356,7 +45595,7 @@ } }, { - "description": "Field to use as default where no field prefix is given in the query string.", + "description": "The field to use as default where no field prefix is given in the query string.\nThis parameter can be used only when the `q` query string parameter is specified.", "name": "df", "required": false, "type": { @@ -45368,7 +45607,7 @@ } }, { - "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.", + "description": "The type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nIt supports comma-separated values, such as `open,hidden`.\nValid values are: `all`, `open`, `closed`, `hidden`, `none`.", "name": "expand_wildcards", "required": false, "type": { @@ -45405,7 +45644,7 @@ } }, { - "description": "If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored.", + "description": "If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored.\nThis parameter can be used only when the `q` query string parameter is specified.", "name": "lenient", "required": false, "serverDefault": false, @@ -45418,7 +45657,7 @@ } }, { - "description": "Maximum number of documents to process.\nDefaults to all documents.", + "description": "The maximum number of documents to process.\nIt defaults to all documents.\nWhen set to a value less then or equal to `scroll_size` then a scroll will not be used to retrieve the results for the operation.", "name": "max_docs", "required": false, "type": { @@ -45430,7 +45669,7 @@ } }, { - "description": "ID of the pipeline to use to preprocess incoming documents.\nIf the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request.\nIf a final pipeline is configured it will always run, regardless of the value of this parameter.", + "description": "The ID of the pipeline to use to preprocess incoming documents.\nIf the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request.\nIf a final pipeline is configured it will always run, regardless of the value of this parameter.", "name": "pipeline", "required": false, "type": { @@ -45442,7 +45681,7 @@ } }, { - "description": "Specifies the node or shard the operation should be performed on.\nRandom by default.", + "description": "The node or shard the operation should be performed on.\nIt is random by default.", "name": "preference", "required": false, "type": { @@ -45454,7 +45693,7 @@ } }, { - "description": "Query in the Lucene query string syntax.", + "description": "A query in the Lucene query string syntax.", "name": "q", "required": false, "type": { @@ -45466,7 +45705,7 @@ } }, { - "description": "If `true`, Elasticsearch refreshes affected shards to make the operation visible to search.", + "description": "If `true`, Elasticsearch refreshes affected shards to make the operation visible to search after the request completes.\nThis is different than the update API's `refresh` parameter, which causes just the shard that received the request to be refreshed.", "name": "refresh", "required": false, "serverDefault": false, @@ -45479,7 +45718,7 @@ } }, { - "description": "If `true`, the request cache is used for this request.", + "description": "If `true`, the request cache is used for this request.\nIt defaults to the index-level setting.", "name": "request_cache", "required": false, "type": { @@ -45504,7 +45743,7 @@ } }, { - "description": "Custom value used to route operations to a specific shard.", + "description": "A custom value used to route operations to a specific shard.", "name": "routing", "required": false, "type": { @@ -45516,9 +45755,11 @@ } }, { - "description": "Period to retain the search context for scrolling.", + "description": "The period to retain the search context for scrolling.", + "extDocId": "search-scroll-results", "name": "scroll", "required": false, + "serverDefault": "5m", "type": { "kind": "instance_of", "type": { @@ -45528,7 +45769,7 @@ } }, { - "description": "Size of the scroll request that powers the operation.", + "description": "The size of the scroll request that powers the operation.", "name": "scroll_size", "required": false, "serverDefault": 1000, @@ -45541,7 +45782,7 @@ } }, { - "description": "Explicit timeout for each search request.", + "description": "An explicit timeout for each search request.\nBy default, there is no timeout.", "name": "search_timeout", "required": false, "type": { @@ -45553,7 +45794,7 @@ } }, { - "description": "The type of the search operation. Available options: `query_then_fetch`, `dfs_query_then_fetch`.", + "description": "The type of the search operation. Available options include `query_then_fetch` and `dfs_query_then_fetch`.", "name": "search_type", "required": false, "type": { @@ -45593,7 +45834,7 @@ } }, { - "description": "Specific `tag` of the request for logging and statistical purposes.", + "description": "The specific `tag` of the request for logging and statistical purposes.", "name": "stats", "required": false, "type": { @@ -45608,7 +45849,7 @@ } }, { - "description": "Maximum number of documents to collect for each shard.\nIf a query reaches this limit, Elasticsearch terminates the query early.\nElasticsearch collects documents before sorting.\nUse with caution.\nElasticsearch applies this parameter to each shard handling the request.\nWhen possible, let Elasticsearch perform early termination automatically.\nAvoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers.", + "description": "The maximum number of documents to collect for each shard.\nIf a query reaches this limit, Elasticsearch terminates the query early.\nElasticsearch collects documents before sorting.\n\nIMPORTANT: Use with caution.\nElasticsearch applies this parameter to each shard handling the request.\nWhen possible, let Elasticsearch perform early termination automatically.\nAvoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers.", "name": "terminate_after", "required": false, "type": { @@ -45620,7 +45861,7 @@ } }, { - "description": "Period each update request waits for the following operations: dynamic mapping updates, waiting for active shards.", + "description": "The period each update request waits for the following operations: dynamic mapping updates, waiting for active shards.\nBy default, it is one minute.\nThis guarantees Elasticsearch waits for at least the timeout before failing.\nThe actual wait time could be longer, particularly when multiple waits occur.", "name": "timeout", "required": false, "serverDefault": "1m", @@ -45657,7 +45898,7 @@ } }, { - "description": "The number of shard copies that must be active before proceeding with the operation.\nSet to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`).", + "description": "The number of shard copies that must be active before proceeding with the operation.\nSet to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`).\nThe `timeout` parameter controls how long each write request waits for unavailable shards to become available.\nBoth work exactly the way they work in the bulk API.", "name": "wait_for_active_shards", "required": false, "serverDefault": "1", @@ -45670,7 +45911,7 @@ } }, { - "description": "If `true`, the request blocks until the operation is complete.", + "description": "If `true`, the request blocks until the operation is complete.\nIf `false`, Elasticsearch performs some preflight checks, launches the request, and returns a task ID that you can use to cancel or get the status of the task.\nElasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`.", "name": "wait_for_completion", "required": false, "serverDefault": true, @@ -45683,13 +45924,14 @@ } } ], - "specLocation": "_global/update_by_query/UpdateByQueryRequest.ts#L37-L233" + "specLocation": "_global/update_by_query/UpdateByQueryRequest.ts#L37-L336" }, { "body": { "kind": "properties", "properties": [ { + "description": "The number of scroll responses pulled back by the update by query.", "name": "batches", "required": false, "type": { @@ -45701,6 +45943,7 @@ } }, { + "description": "Array of failures if there were any unrecoverable errors during the process.\nIf this is non-empty then the request ended because of those failures.\nUpdate by query is implemented using batches.\nAny failure causes the entire process to end, but all failures in the current batch are collected into the array.\nYou can use the `conflicts` option to prevent reindex from ending when version conflicts occur.", "name": "failures", "required": false, "type": { @@ -45715,6 +45958,7 @@ } }, { + "description": "The number of documents that were ignored because the script used for the update by query returned a noop value for `ctx.op`.", "name": "noops", "required": false, "type": { @@ -45726,6 +45970,7 @@ } }, { + "description": "The number of documents that were successfully deleted.", "name": "deleted", "required": false, "type": { @@ -45737,6 +45982,7 @@ } }, { + "description": "The number of requests per second effectively run during the update by query.", "name": "requests_per_second", "required": false, "type": { @@ -45748,6 +45994,7 @@ } }, { + "description": "The number of retries attempted by update by query.\n`bulk` is the number of bulk actions retried.\n`search` is the number of search actions retried.", "name": "retries", "required": false, "type": { @@ -45770,6 +46017,7 @@ } }, { + "description": "If true, some requests timed out during the update by query.", "name": "timed_out", "required": false, "type": { @@ -45781,6 +46029,7 @@ } }, { + "description": "The number of milliseconds from start to end of the whole operation.", "name": "took", "required": false, "type": { @@ -45801,6 +46050,7 @@ } }, { + "description": "The number of documents that were successfully processed.", "name": "total", "required": false, "type": { @@ -45812,6 +46062,7 @@ } }, { + "description": "The number of documents that were successfully updated.", "name": "updated", "required": false, "type": { @@ -45823,6 +46074,7 @@ } }, { + "description": "The number of version conflicts that the update by query hit.", "name": "version_conflicts", "required": false, "type": { @@ -45845,6 +46097,7 @@ } }, { + "description": "The number of milliseconds the request slept to conform to `requests_per_second`.", "name": "throttled_millis", "required": false, "type": { @@ -45876,6 +46129,7 @@ } }, { + "description": "This field should always be equal to zero in an _update_by_query response.\nIt only has meaning when using the task API, where it indicates the next time (in milliseconds since epoch) a throttled request will be run again in order to conform to `requests_per_second`.", "name": "throttled_until_millis", "required": false, "type": { @@ -45902,7 +46156,7 @@ "name": "Response", "namespace": "_global.update_by_query" }, - "specLocation": "_global/update_by_query/UpdateByQueryResponse.ts#L26-L45" + "specLocation": "_global/update_by_query/UpdateByQueryResponse.ts#L26-L67" }, { "kind": "enum", @@ -51232,7 +51486,7 @@ } ], "shortcutProperty": "source", - "specLocation": "_types/Scripting.ts#L73-L97" + "specLocation": "_types/Scripting.ts#L75-L99" }, { "isOpen": true, @@ -53317,7 +53571,7 @@ } } ], - "specLocation": "_types/Scripting.ts#L99-L102" + "specLocation": "_types/Scripting.ts#L101-L104" }, { "kind": "type_alias", @@ -77345,7 +77599,7 @@ } }, { - "description": "ID of the search template to use. If no source is specified,\nthis parameter is required.", + "description": "The ID of the search template to use. If no `source` is specified,\nthis parameter is required.", "name": "id", "required": false, "type": { @@ -77389,7 +77643,7 @@ } }, { - "description": "An inline search template. Supports the same parameters as the search API's\nrequest body. Also supports Mustache variables. If no id is specified, this\nparameter is required.", + "description": "An inline search template. Supports the same parameters as the search API's\nrequest body. It also supports Mustache variables. If no `id` is specified, this\nparameter is required.", "name": "source", "required": false, "type": { @@ -77403,6 +77657,60 @@ ], "specLocation": "_global/msearch_template/types.ts#L28-L54" }, + { + "kind": "enum", + "members": [ + { + "description": "The default context if no other context is specified.", + "name": "painless_test" + }, + { + "description": "Treats scripts as if they were run inside a script query.", + "name": "filter" + }, + { + "description": "Treats scripts as if they were run inside a `script_score` function in a `function_score` query.", + "name": "score" + }, + { + "description": "The context for boolean fields. The script returns a `true` or `false` response.", + "name": "boolean_field" + }, + { + "description": "The context for date fields. `emit` takes a long value and the script returns a sorted list of dates.", + "name": "date_field" + }, + { + "description": "The context for double numeric fields. The script returns a sorted list of double values.", + "name": "double_field" + }, + { + "description": "The context for geo-point fields. `emit` takes two double parameters, the latitude and longitude values, and the script returns an object in GeoJSON format containing the coordinates for the geo point.", + "name": "geo_point_field" + }, + { + "description": "The context for `ip` fields. The script returns a sorted list of IP addresses.", + "name": "ip_field" + }, + { + "description": "The context for keyword fields. The script returns a sorted list of string values.", + "name": "keyword_field" + }, + { + "description": "The context for long numeric fields. The script returns a sorted list of long values.", + "name": "long_field" + }, + { + "description": "The context for composite runtime fields. The script returns a map of values.", + "name": "composite_field" + } + ], + "name": { + "name": "PainlessContext", + "namespace": "_global.scripts_painless_execute" + }, + "specLocation": "_global/scripts_painless_execute/types.ts#L57-L80" + }, { "codegenNames": [ "fetch", @@ -94518,40 +94826,6 @@ }, "specLocation": "eql/search/types.ts#L20-L32" }, - { - "kind": "enum", - "members": [ - { - "name": "csv" - }, - { - "name": "json" - }, - { - "name": "tsv" - }, - { - "name": "txt" - }, - { - "name": "yaml" - }, - { - "name": "cbor" - }, - { - "name": "smile" - }, - { - "name": "arrow" - } - ], - "name": { - "name": "EsqlFormat", - "namespace": "esql._types" - }, - "specLocation": "esql/_types/QueryParameters.ts#L20-L29" - }, { "kind": "type_alias", "name": { @@ -94672,6 +94946,40 @@ "kind": "union_of" } }, + { + "kind": "enum", + "members": [ + { + "name": "csv" + }, + { + "name": "json" + }, + { + "name": "tsv" + }, + { + "name": "txt" + }, + { + "name": "yaml" + }, + { + "name": "cbor" + }, + { + "name": "smile" + }, + { + "name": "arrow" + } + ], + "name": { + "name": "EsqlFormat", + "namespace": "esql.query" + }, + "specLocation": "esql/query/QueryParameters.ts#L20-L29" + }, { "kind": "type_alias", "name": { @@ -95704,15 +96012,19 @@ "kind": "enum", "members": [ { + "description": "Disable metadata changes, such as closing the index.", "name": "metadata" }, { + "description": "Disable read operations.", "name": "read" }, { + "description": "Disable write operations and metadata changes.", "name": "read_only" }, { + "description": "Disable write operations. However, metadata changes are still allowed.", "name": "write" } ], @@ -95720,7 +96032,7 @@ "name": "IndicesBlockOptions", "namespace": "indices.add_block" }, - "specLocation": "indices/add_block/IndicesAddBlockRequest.ts#L51-L56" + "specLocation": "indices/add_block/IndicesAddBlockRequest.ts#L91-L100" }, { "kind": "type_alias", @@ -95812,7 +96124,7 @@ "name": "ModeEnum", "namespace": "indices.migrate_reindex" }, - "specLocation": "indices/migrate_reindex/MigrateReindexRequest.ts#L51-L53" + "specLocation": "indices/migrate_reindex/MigrateReindexRequest.ts#L54-L56" }, { "kind": "enum", @@ -119859,7 +120171,7 @@ }, "properties": [ { - "description": "The language the script is written in.", + "description": "The language the script is written in.\nFor serach templates, use `mustache`.", "name": "lang", "required": true, "type": { @@ -119893,7 +120205,7 @@ } }, { - "description": "The script source.", + "description": "The script source.\nFor search templates, an object containing the search template.", "name": "source", "required": true, "type": { @@ -119905,7 +120217,7 @@ } } ], - "specLocation": "_types/Scripting.ts#L47-L57" + "specLocation": "_types/Scripting.ts#L47-L59" }, { "kind": "interface", @@ -140154,7 +140466,7 @@ } }, { - "description": "Maximum number of terms that must be returned per field.", + "description": "The maximum number of terms that must be returned per field.", "name": "max_num_terms", "required": false, "serverDefault": 25, @@ -140167,7 +140479,7 @@ } }, { - "description": "Ignore words with more than this frequency in the source doc.\nDefaults to unbounded.", + "description": "Ignore words with more than this frequency in the source doc.\nIt defaults to unbounded.", "name": "max_term_freq", "required": false, "type": { @@ -141755,7 +142067,7 @@ }, "properties": [ { - "description": "Document that’s temporarily indexed in-memory and accessible from the script.", + "description": "Document that's temporarily indexed in-memory and accessible from the script.", "name": "document", "required": true, "type": { @@ -141763,7 +142075,7 @@ } }, { - "description": "Index containing a mapping that’s compatible with the indexed document.\nYou may specify a remote index by prefixing the index with the remote cluster alias.", + "description": "Index containing a mapping that's compatible with the indexed document.\nYou may specify a remote index by prefixing the index with the remote cluster alias.\nFor example, `remote1:my_index` indicates that you want to run the painless script against the \"my_index\" index on the \"remote1\" cluster.\nThis request will be forwarded to the \"remote1\" cluster if you have configured a connection to that remote cluster.\n\nNOTE: Wildcards are not accepted in the index expression for this endpoint.\nThe expression `*:myindex` will return the error \"No such remote cluster\" and the expression `logs*` or `remote1:logs*` will return the error \"index not found\".", "name": "index", "required": true, "type": { @@ -141787,7 +142099,7 @@ } } ], - "specLocation": "_global/scripts_painless_execute/types.ts#L25-L39" + "specLocation": "_global/scripts_painless_execute/types.ts#L27-L46" }, { "kind": "interface", diff --git a/output/schema/schema.json b/output/schema/schema.json index ee3c12c5f3..7987e3b2b1 100644 --- a/output/schema/schema.json +++ b/output/schema/schema.json @@ -121662,40 +121662,6 @@ }, "specLocation": "eql/search/types.ts#L20-L32" }, - { - "kind": "enum", - "members": [ - { - "name": "csv" - }, - { - "name": "json" - }, - { - "name": "tsv" - }, - { - "name": "txt" - }, - { - "name": "yaml" - }, - { - "name": "cbor" - }, - { - "name": "smile" - }, - { - "name": "arrow" - } - ], - "name": { - "name": "EsqlFormat", - "namespace": "esql._types" - }, - "specLocation": "esql/_types/QueryParameters.ts#L20-L29" - }, { "kind": "interface", "name": { @@ -122051,7 +122017,7 @@ "kind": "instance_of", "type": { "name": "EsqlFormat", - "namespace": "esql._types" + "namespace": "esql.query" } } }, @@ -122261,6 +122227,40 @@ }, "specLocation": "esql/async_query_get/AsyncQueryGetResponse.ts#L22-L24" }, + { + "kind": "enum", + "members": [ + { + "name": "csv" + }, + { + "name": "json" + }, + { + "name": "tsv" + }, + { + "name": "txt" + }, + { + "name": "yaml" + }, + { + "name": "cbor" + }, + { + "name": "smile" + }, + { + "name": "arrow" + } + ], + "name": { + "name": "EsqlFormat", + "namespace": "esql.query" + }, + "specLocation": "esql/query/QueryParameters.ts#L20-L29" + }, { "kind": "request", "attachedBehaviors": [ @@ -122402,7 +122402,7 @@ "kind": "instance_of", "type": { "name": "EsqlFormat", - "namespace": "esql._types" + "namespace": "esql.query" } } }, diff --git a/output/typescript/types.ts b/output/typescript/types.ts index f466905465..45f6365f47 100644 --- a/output/typescript/types.ts +++ b/output/typescript/types.ts @@ -10503,8 +10503,6 @@ export type EqlSearchResponse = EqlEqlSearchResponseBase