diff --git a/docs/reference.asciidoc b/docs/reference.asciidoc index 72f5286..f5079f9 100644 --- a/docs/reference.asciidoc +++ b/docs/reference.asciidoc @@ -570,6 +570,12 @@ client.msearch({ ... }) ** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. ** *`ignore_throttled` (Optional, boolean)*: If true, concrete, expanded or aliased indices are ignored when frozen. ** *`ignore_unavailable` (Optional, boolean)*: If true, missing or closed indices are not included in the response. +** *`include_named_queries_score` (Optional, boolean)*: Indicates whether hit.matched_queries should be rendered as a map that includes +the name of the matched query associated with its score (true) +or as an array containing the name of the matched queries (false) +This functionality reruns each named query on every hit in a search response. +Typically, this adds a small overhead to a request. +However, using computationally expensive named queries on a large number of hits may add significant overhead. ** *`max_concurrent_searches` (Optional, number)*: Maximum number of concurrent searches the multi search API can execute. ** *`max_concurrent_shard_requests` (Optional, number)*: Maximum number of concurrent shard requests that each sub-search request executes per node. ** *`pre_filter_shard_size` (Optional, number)*: Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint. @@ -911,6 +917,12 @@ If the request can target data streams, this argument determines whether wildcar Supports a list of values, such as `open,hidden`. ** *`ignore_throttled` (Optional, boolean)*: If `true`, concrete, expanded or aliased indices will be ignored when frozen. ** *`ignore_unavailable` (Optional, boolean)*: If `false`, the request returns an error if it targets a missing or closed index. +** *`include_named_queries_score` (Optional, boolean)*: Indicates whether hit.matched_queries should be rendered as a map that includes +the name of the matched query associated with its score (true) +or as an array containing the name of the matched queries (false) +This functionality reruns each named query on every hit in a search response. +Typically, this adds a small overhead to a request. +However, using computationally expensive named queries on a large number of hits may add significant overhead. ** *`lenient` (Optional, boolean)*: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can only be used when the `q` query string parameter is specified. ** *`max_concurrent_shard_requests` (Optional, number)*: Defines the number of concurrent shard requests per node this search executes concurrently. @@ -1202,6 +1214,7 @@ If the index has a default ingest pipeline specified, then setting the value to If a final pipeline is configured it will always run, regardless of the value of this parameter. ** *`preference` (Optional, string)*: Specifies the node or shard the operation should be performed on. Random by default. +** *`q` (Optional, string)*: Query in the Lucene query string syntax. ** *`refresh` (Optional, boolean)*: If `true`, Elasticsearch refreshes affected shards to make the operation visible to search. ** *`request_cache` (Optional, boolean)*: If `true`, the request cache is used for this request. ** *`requests_per_second` (Optional, float)*: The throttle for this request in sub-requests per second. @@ -1409,8 +1422,8 @@ A partial reduction is performed every time the coordinating node has received a Get aliases. Retrieves the cluster’s index aliases, including filter and routing information. The API does not return data stream aliases. -> info -> CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use [the /_alias endpoints](#endpoint-alias). + +CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API. {ref}/cat-alias.html[Endpoint documentation] [source,ts] @@ -1430,9 +1443,9 @@ client.cat.aliases({ ... }) Get component templates. Returns information about component templates in a cluster. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. -> info -> CAT APIs are only intended for human consumption using the command line or Kibana console. -They are not intended for use by applications. For application consumption, use [the /_component_template endpoints](#endpoint-component-template). + +CAT APIs are only intended for human consumption using the command line or Kibana console. +They are not intended for use by applications. For application consumption, use the get component template API. {ref}/cat-component-templates.html[Endpoint documentation] [source,ts] @@ -1449,11 +1462,11 @@ client.cat.componentTemplates({ ... }) [discrete] ==== count Get a document count. -Provides quick access to a document count for a data stream, an index, or an entire cluster.n/ +Provides quick access to a document count for a data stream, an index, or an entire cluster. The document count only includes live documents, not deleted documents which have not yet been removed by the merge process. -> info -> CAT APIs are only intended for human consumption using the command line or Kibana console. -They are not intended for use by applications. For application consumption, use [the /_count endpoints](#endpoint-count). + +CAT APIs are only intended for human consumption using the command line or Kibana console. +They are not intended for use by applications. For application consumption, use the count API. {ref}/cat-count.html[Endpoint documentation] [source,ts] @@ -1484,9 +1497,6 @@ client.cat.help() ==== indices Get index information. Returns high-level information about indices in a cluster, including backing indices for data streams. -> info -> CAT APIs are only intended for human consumption using the command line or Kibana console. -They are not intended for use by applications. For application consumption, use an index endpoint. Use this request to get the following information for each index in a cluster: - shard count @@ -1496,7 +1506,10 @@ Use this request to get the following information for each index in a cluster: - total store size of all shards, including shard replicas These metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents. -To get an accurate count of Elasticsearch documents, use the [/_cat/count](#operation-cat-count) or [count](#endpoint-count) endpoints. +To get an accurate count of Elasticsearch documents, use the cat count or count APIs. + +CAT APIs are only intended for human consumption using the command line or Kibana console. +They are not intended for use by applications. For application consumption, use an index endpoint. {ref}/cat-indices.html[Endpoint documentation] [source,ts] @@ -1522,10 +1535,9 @@ Supports wildcards (`*`). To target all data streams and indices, omit this para Get data frame analytics jobs. Returns configuration and usage information about data frame analytics jobs. -> info -> CAT APIs are only intended for human consumption using the Kibana +CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For -application consumption, use [the /_ml/data_frame/analytics endpoints](#endpoint-ml). +application consumption, use the get data frame analytics jobs statistics API. {ref}/cat-dfanalytics.html[Endpoint documentation] [source,ts] @@ -1553,10 +1565,9 @@ This API returns a maximum of 10,000 datafeeds. If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. -> info -> CAT APIs are only intended for human consumption using the Kibana +CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For -application consumption, use [the /_ml/datafeeds endpoints](#endpoint-ml). +application consumption, use the get datafeed statistics API. {ref}/cat-datafeeds.html[Endpoint documentation] [source,ts] @@ -1590,10 +1601,9 @@ This API returns a maximum of 10,000 jobs. If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. -> info -> CAT APIs are only intended for human consumption using the Kibana +CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For -application consumption, use [the /_ml/anomaly_detectors endpoints](#endpoint-ml). +application consumption, use the get anomaly detection job statistics API. {ref}/cat-anomaly-detectors.html[Endpoint documentation] [source,ts] @@ -1625,10 +1635,9 @@ matches. Get trained models. Returns configuration and usage information about inference trained models. -> info -> CAT APIs are only intended for human consumption using the Kibana +CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For -application consumption, use [the /_ml/trained_models endpoints](#endpoint-ml). +application consumption, use the get trained models statistics API. {ref}/cat-trained-model.html[Endpoint documentation] [source,ts] @@ -1655,10 +1664,9 @@ If `false`, the API returns a 404 status code when there are no matches or only Get transforms. Returns configuration and usage information about transforms. -> info -> CAT APIs are only intended for human consumption using the Kibana +CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For -application consumption, use [the /_transform endpoints](#endpoint-transform). +application consumption, use the get transform statistics API. {ref}/cat-transforms.html[Endpoint documentation] [source,ts] @@ -1799,7 +1807,7 @@ client.cluster.putComponentTemplate({ name, template }) * *Request (object):* ** *`name` (string)*: Name of the component template to create. -Elasticsearch includes the following built-in component templates: `logs-mappings`; 'logs-settings`; `metrics-mappings`; `metrics-settings`;`synthetics-mapping`; `synthetics-settings`. +Elasticsearch includes the following built-in component templates: `logs-mappings`; `logs-settings`; `metrics-mappings`; `metrics-settings`;`synthetics-mapping`; `synthetics-settings`. Elastic Agent uses these templates to configure backing indices for its data streams. If you use Elastic Agent and want to overwrite one of these templates, set the `version` for your replacement template higher than the current version. If you don’t use Elastic Agent and want to disable all built-in component and index templates, set `stack.templates.enabled` to `false` using the cluster update settings API. @@ -1817,6 +1825,428 @@ that uses deprecated components, Elasticsearch will emit a deprecation warning. ** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +[discrete] +=== connector +[discrete] +==== check_in +Updates the last_seen field in the connector, and sets it to current timestamp + +{ref}/check-in-connector-api.html[Endpoint documentation] +[source,ts] +---- +client.connector.checkIn({ connector_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (string)*: The unique identifier of the connector to be checked in + +[discrete] +==== delete +Deletes a connector. + +{ref}/delete-connector-api.html[Endpoint documentation] +[source,ts] +---- +client.connector.delete({ connector_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (string)*: The unique identifier of the connector to be deleted +** *`delete_sync_jobs` (Optional, boolean)*: A flag indicating if associated sync jobs should be also removed. Defaults to false. + +[discrete] +==== get +Retrieves a connector. + +{ref}/get-connector-api.html[Endpoint documentation] +[source,ts] +---- +client.connector.get({ connector_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (string)*: The unique identifier of the connector + +[discrete] +==== list +Returns existing connectors. + +{ref}/list-connector-api.html[Endpoint documentation] +[source,ts] +---- +client.connector.list({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`from` (Optional, number)*: Starting offset (default: 0) +** *`size` (Optional, number)*: Specifies a max number of results to get +** *`index_name` (Optional, string | string[])*: A list of connector index names to fetch connector documents for +** *`connector_name` (Optional, string | string[])*: A list of connector names to fetch connector documents for +** *`service_type` (Optional, string | string[])*: A list of connector service types to fetch connector documents for +** *`query` (Optional, string)*: A wildcard query string that filters connectors with matching name, description or index name + +[discrete] +==== post +Creates a connector. + +{ref}/create-connector-api.html[Endpoint documentation] +[source,ts] +---- +client.connector.post({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`description` (Optional, string)* +** *`index_name` (Optional, string)* +** *`is_native` (Optional, boolean)* +** *`language` (Optional, string)* +** *`name` (Optional, string)* +** *`service_type` (Optional, string)* + +[discrete] +==== put +Creates or updates a connector. + +{ref}/create-connector-api.html[Endpoint documentation] +[source,ts] +---- +client.connector.put({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (Optional, string)*: The unique identifier of the connector to be created or updated. ID is auto-generated if not provided. +** *`description` (Optional, string)* +** *`index_name` (Optional, string)* +** *`is_native` (Optional, boolean)* +** *`language` (Optional, string)* +** *`name` (Optional, string)* +** *`service_type` (Optional, string)* + +[discrete] +==== sync_job_cancel +Cancels a connector sync job. + +{ref}/cancel-connector-sync-job-api.html[Endpoint documentation] +[source,ts] +---- +client.connector.syncJobCancel({ connector_sync_job_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_sync_job_id` (string)*: The unique identifier of the connector sync job + +[discrete] +==== sync_job_delete +Deletes a connector sync job. + +{ref}/delete-connector-sync-job-api.html[Endpoint documentation] +[source,ts] +---- +client.connector.syncJobDelete({ connector_sync_job_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_sync_job_id` (string)*: The unique identifier of the connector sync job to be deleted + +[discrete] +==== sync_job_get +Retrieves a connector sync job. + +{ref}/get-connector-sync-job-api.html[Endpoint documentation] +[source,ts] +---- +client.connector.syncJobGet({ connector_sync_job_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_sync_job_id` (string)*: The unique identifier of the connector sync job + +[discrete] +==== sync_job_list +Lists connector sync jobs. + +{ref}/list-connector-sync-jobs-api.html[Endpoint documentation] +[source,ts] +---- +client.connector.syncJobList({ ... }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`from` (Optional, number)*: Starting offset (default: 0) +** *`size` (Optional, number)*: Specifies a max number of results to get +** *`status` (Optional, Enum("canceling" | "canceled" | "completed" | "error" | "in_progress" | "pending" | "suspended"))*: A sync job status to fetch connector sync jobs for +** *`connector_id` (Optional, string)*: A connector id to fetch connector sync jobs for +** *`job_type` (Optional, Enum("full" | "incremental" | "access_control") | Enum("full" | "incremental" | "access_control")[])*: A list of job types to fetch the sync jobs for + +[discrete] +==== sync_job_post +Creates a connector sync job. + +{ref}/create-connector-sync-job-api.html[Endpoint documentation] +[source,ts] +---- +client.connector.syncJobPost({ id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`id` (string)*: The id of the associated connector +** *`job_type` (Optional, Enum("full" | "incremental" | "access_control"))* +** *`trigger_method` (Optional, Enum("on_demand" | "scheduled"))* + +[discrete] +==== update_active_filtering +Activates the valid draft filtering for a connector. + +{ref}/update-connector-filtering-api.html[Endpoint documentation] +[source,ts] +---- +client.connector.updateActiveFiltering({ connector_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (string)*: The unique identifier of the connector to be updated + +[discrete] +==== update_api_key_id +Updates the API key id in the connector document + +{ref}/update-connector-api-key-id-api.html[Endpoint documentation] +[source,ts] +---- +client.connector.updateApiKeyId({ connector_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (string)*: The unique identifier of the connector to be updated +** *`api_key_id` (Optional, string)* +** *`api_key_secret_id` (Optional, string)* + +[discrete] +==== update_configuration +Updates the configuration field in the connector document + +{ref}/update-connector-configuration-api.html[Endpoint documentation] +[source,ts] +---- +client.connector.updateConfiguration({ connector_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (string)*: The unique identifier of the connector to be updated +** *`configuration` (Optional, Record)* +** *`values` (Optional, Record)* + +[discrete] +==== update_error +Updates the filtering field in the connector document + +{ref}/update-connector-error-api.html[Endpoint documentation] +[source,ts] +---- +client.connector.updateError({ connector_id, error }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (string)*: The unique identifier of the connector to be updated +** *`error` (T | null)* + +[discrete] +==== update_filtering +Updates the filtering field in the connector document + +{ref}/update-connector-filtering-api.html[Endpoint documentation] +[source,ts] +---- +client.connector.updateFiltering({ connector_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (string)*: The unique identifier of the connector to be updated +** *`filtering` (Optional, { active, domain, draft }[])* +** *`rules` (Optional, { created_at, field, id, order, policy, rule, updated_at, value }[])* +** *`advanced_snippet` (Optional, { created_at, updated_at, value })* + +[discrete] +==== update_filtering_validation +Updates the draft filtering validation info for a connector. +[source,ts] +---- +client.connector.updateFilteringValidation({ connector_id, validation }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (string)*: The unique identifier of the connector to be updated +** *`validation` ({ errors, state })* + +[discrete] +==== update_index_name +Updates the index_name in the connector document + +{ref}/update-connector-index-name-api.html[Endpoint documentation] +[source,ts] +---- +client.connector.updateIndexName({ connector_id, index_name }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (string)*: The unique identifier of the connector to be updated +** *`index_name` (T | null)* + +[discrete] +==== update_name +Updates the name and description fields in the connector document + +{ref}/update-connector-name-description-api.html[Endpoint documentation] +[source,ts] +---- +client.connector.updateName({ connector_id }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (string)*: The unique identifier of the connector to be updated +** *`name` (Optional, string)* +** *`description` (Optional, string)* + +[discrete] +==== update_native +Updates the is_native flag in the connector document +[source,ts] +---- +client.connector.updateNative({ connector_id, is_native }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (string)*: The unique identifier of the connector to be updated +** *`is_native` (boolean)* + +[discrete] +==== update_pipeline +Updates the pipeline field in the connector document + +{ref}/update-connector-pipeline-api.html[Endpoint documentation] +[source,ts] +---- +client.connector.updatePipeline({ connector_id, pipeline }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (string)*: The unique identifier of the connector to be updated +** *`pipeline` ({ extract_binary_content, name, reduce_whitespace, run_ml_inference })* + +[discrete] +==== update_scheduling +Updates the scheduling field in the connector document + +{ref}/update-connector-scheduling-api.html[Endpoint documentation] +[source,ts] +---- +client.connector.updateScheduling({ connector_id, scheduling }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (string)*: The unique identifier of the connector to be updated +** *`scheduling` ({ access_control, full, incremental })* + +[discrete] +==== update_service_type +Updates the service type of the connector + +{ref}/update-connector-service-type-api.html[Endpoint documentation] +[source,ts] +---- +client.connector.updateServiceType({ connector_id, service_type }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (string)*: The unique identifier of the connector to be updated +** *`service_type` (string)* + +[discrete] +==== update_status +Updates the status of the connector + +{ref}/update-connector-status-api.html[Endpoint documentation] +[source,ts] +---- +client.connector.updateStatus({ connector_id, status }) +---- + +[discrete] +==== Arguments + +* *Request (object):* +** *`connector_id` (string)*: The unique identifier of the connector to be updated +** *`status` (Enum("created" | "needs_configuration" | "configured" | "connected" | "error"))* + [discrete] === enrich [discrete] @@ -2158,6 +2588,8 @@ Cannot include `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, `#`, `:`, or a space Cannot start with `-`, `_`, `+`, or `.ds-`; Cannot be `.` or `..`; Cannot be longer than 255 bytes. Multi-byte characters count towards this limit faster. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== data_streams_stats @@ -2271,6 +2703,7 @@ client.indices.deleteDataStream({ name }) * *Request (object):* ** *`name` (string | string[])*: List of data streams to delete. Wildcard (`*`) expressions are supported. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. ** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of data stream that wildcard patterns can match. Supports a list of values,such as `open,hidden`. [discrete] @@ -2467,6 +2900,7 @@ To target all data streams, omit this parameter or use `*` or `_all`. Supports a list of values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. ** *`include_defaults` (Optional, boolean)*: If `true`, return all default settings in the response. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== get_data_stream @@ -2488,6 +2922,7 @@ Wildcard (`*`) expressions are supported. If omitted, all data streams are retur ** *`expand_wildcards` (Optional, Enum("all" | "open" | "closed" | "hidden" | "none") | Enum("all" | "open" | "closed" | "hidden" | "none")[])*: Type of data stream that wildcard patterns can match. Supports a list of values, such as `open,hidden`. ** *`include_defaults` (Optional, boolean)*: If true, returns all relevant default configurations for the index template. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== get_index_template @@ -2602,6 +3037,8 @@ client.indices.migrateToDataStream({ name }) * *Request (object):* ** *`name` (string)*: Name of the index alias to convert to a data stream. +** *`master_timeout` (Optional, string | -1 | 0)*: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. +** *`timeout` (Optional, string | -1 | 0)*: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. [discrete] ==== modify_data_stream @@ -3351,6 +3788,7 @@ client.ml.closeJob({ job_id }) [discrete] ==== delete_calendar +Delete a calendar. Removes all scheduled events from a calendar, then deletes it. {ref}/ml-delete-calendar.html[Endpoint documentation] @@ -3367,7 +3805,7 @@ client.ml.deleteCalendar({ calendar_id }) [discrete] ==== delete_calendar_event -Deletes scheduled events from a calendar. +Delete events from a calendar. {ref}/ml-delete-calendar-event.html[Endpoint documentation] [source,ts] @@ -3385,7 +3823,7 @@ You can obtain this identifier by using the get calendar events API. [discrete] ==== delete_calendar_job -Deletes anomaly detection jobs from a calendar. +Delete anomaly jobs from a calendar. {ref}/ml-delete-calendar-job.html[Endpoint documentation] [source,ts] @@ -3403,7 +3841,7 @@ list of jobs or groups. [discrete] ==== delete_data_frame_analytics -Deletes a data frame analytics job. +Delete a data frame analytics job. {ref}/delete-dfanalytics.html[Endpoint documentation] [source,ts] @@ -3421,7 +3859,7 @@ client.ml.deleteDataFrameAnalytics({ id }) [discrete] ==== delete_datafeed -Deletes an existing datafeed. +Delete a datafeed. {ref}/ml-delete-datafeed.html[Endpoint documentation] [source,ts] @@ -3442,7 +3880,7 @@ stopping and deleting the datafeed. [discrete] ==== delete_filter -Deletes a filter. +Delete a filter. If an anomaly detection job references the filter, you cannot delete the filter. You must update or delete the job before you can delete the filter. @@ -3489,8 +3927,8 @@ job deletion completes. [discrete] ==== delete_trained_model -Deletes an existing trained inference model that is currently not referenced -by an ingest pipeline. +Delete an unreferenced trained model. +The request deletes a trained inference model that is not referenced by an ingest pipeline. {ref}/delete-trained-models.html[Endpoint documentation] [source,ts] @@ -3507,7 +3945,7 @@ client.ml.deleteTrainedModel({ model_id }) [discrete] ==== delete_trained_model_alias -Deletes a trained model alias. +Delete a trained model alias. This API deletes an existing model alias that refers to a trained model. If the model alias is missing or refers to a model other than the one identified by the `model_id`, this API returns an error. @@ -3527,6 +3965,7 @@ client.ml.deleteTrainedModelAlias({ model_alias, model_id }) [discrete] ==== estimate_model_memory +Estimate job model memory usage. Makes an estimation of the memory usage for an anomaly detection job model. It is based on analysis configuration details for the job and cardinality estimates for the fields it references. @@ -3558,7 +3997,7 @@ omitted from the request if no detectors have a `by_field_name`, [discrete] ==== evaluate_data_frame -Evaluates the data frame analytics for an annotated index. +Evaluate data frame analytics. The API packages together commonly used evaluation metrics for various types of machine learning features. This has been designed for use on indexes created by data frame analytics. Evaluation requires both a ground truth @@ -3580,7 +4019,7 @@ client.ml.evaluateDataFrame({ evaluation, index }) [discrete] ==== flush_job -Forces any buffered data to be processed by the job. +Force buffered data to be processed. The flush jobs API is only applicable when sending data for analysis using the post data API. Depending on the content of the buffer, then it might additionally calculate new results. Both flush and close operations are @@ -3609,7 +4048,7 @@ client.ml.flushJob({ job_id }) [discrete] ==== get_calendar_events -Retrieves information about the scheduled events in calendars. +Get info about events in calendars. {ref}/ml-get-calendar-event.html[Endpoint documentation] [source,ts] @@ -3630,7 +4069,7 @@ client.ml.getCalendarEvents({ calendar_id }) [discrete] ==== get_calendars -Retrieves configuration information for calendars. +Get calendar configuration info. {ref}/ml-get-calendar.html[Endpoint documentation] [source,ts] @@ -3649,7 +4088,7 @@ client.ml.getCalendars({ ... }) [discrete] ==== get_data_frame_analytics -Retrieves configuration information for data frame analytics jobs. +Get data frame analytics job configuration info. You can get information for multiple data frame analytics jobs in a single API request by using a comma-separated list of data frame analytics jobs or a wildcard expression. @@ -3686,7 +4125,7 @@ be retrieved and then added to another cluster. [discrete] ==== get_data_frame_analytics_stats -Retrieves usage information for data frame analytics jobs. +Get data frame analytics jobs usage info. {ref}/get-dfanalytics-stats.html[Endpoint documentation] [source,ts] @@ -3718,7 +4157,7 @@ there are no matches or only partial matches. [discrete] ==== get_datafeed_stats -Retrieves usage information for datafeeds. +Get datafeeds usage info. You can get statistics for multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can get statistics for all datafeeds by using `_all`, by specifying `*` as the @@ -3752,7 +4191,7 @@ partial matches. If this parameter is `false`, the request returns a [discrete] ==== get_datafeeds -Retrieves configuration information for datafeeds. +Get datafeeds configuration info. You can get information for multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can get information for all datafeeds by using `_all`, by specifying `*` as the @@ -3788,7 +4227,7 @@ be retrieved and then added to another cluster. [discrete] ==== get_filters -Retrieves filters. +Get filters. You can get a single filter or all filters. {ref}/ml-get-filter.html[Endpoint documentation] @@ -3807,7 +4246,7 @@ client.ml.getFilters({ ... }) [discrete] ==== get_job_stats -Retrieves usage information for anomaly detection jobs. +Get anomaly detection jobs usage info. {ref}/ml-get-job-stats.html[Endpoint documentation] [source,ts] @@ -3836,7 +4275,7 @@ code when there are no matches or only partial matches. [discrete] ==== get_jobs -Retrieves configuration information for anomaly detection jobs. +Get anomaly detection jobs configuration info. You can get information for multiple anomaly detection jobs in a single API request by using a group name, a comma-separated list of jobs, or a wildcard expression. You can get information for all anomaly detection jobs by using @@ -3871,7 +4310,9 @@ be retrieved and then added to another cluster. [discrete] ==== get_overall_buckets -Retrieves overall bucket results that summarize the bucket results of +Get overall bucket results. + +Retrievs overall bucket results that summarize the bucket results of multiple anomaly detection jobs. The `overall_score` is calculated by combining the scores of all the @@ -3915,7 +4356,7 @@ using `_all` or by specifying `*` as the ``. [discrete] ==== get_trained_models -Retrieves configuration information for a trained model. +Get trained model configuration info. {ref}/get-trained-models.html[Endpoint documentation] [source,ts] @@ -3955,7 +4396,8 @@ tags are returned. [discrete] ==== get_trained_models_stats -Retrieves usage information for trained models. You can get usage information for multiple trained +Get trained models usage info. +You can get usage information for multiple trained models in a single API request by using a comma-separated list of model IDs or a wildcard expression. {ref}/get-trained-models-stats.html[Endpoint documentation] @@ -3983,7 +4425,7 @@ subset of results when there are partial matches. [discrete] ==== infer_trained_model -Evaluates a trained model. +Evaluate a trained model. {ref}/infer-trained-model.html[Endpoint documentation] [source,ts] @@ -4005,9 +4447,8 @@ Currently, for NLP models, only a single value is allowed. [discrete] ==== open_job Open anomaly detection jobs. -An anomaly detection job must be opened in order for it to be ready to -receive and analyze data. It can be opened and closed multiple times -throughout its lifecycle. +An anomaly detection job must be opened to be ready to receive and analyze +data. It can be opened and closed multiple times throughout its lifecycle. When you open a new job, it starts with an empty model. When you open an existing job, the most recent model state is automatically loaded. The job is ready to resume its analysis from where it left off, once @@ -4028,7 +4469,7 @@ client.ml.openJob({ job_id }) [discrete] ==== post_calendar_events -Adds scheduled events to a calendar. +Add scheduled events to the calendar. {ref}/ml-post-calendar-event.html[Endpoint documentation] [source,ts] @@ -4045,6 +4486,7 @@ client.ml.postCalendarEvents({ calendar_id, events }) [discrete] ==== preview_data_frame_analytics +Preview features used by data frame analytics. Previews the extracted features used by a data frame analytics config. {ref}/preview-dfanalytics.html[Endpoint documentation] @@ -4064,7 +4506,7 @@ this API. [discrete] ==== preview_datafeed -Previews a datafeed. +Preview a datafeed. This API returns the first "page" of search results from a datafeed. You can preview an existing datafeed or provide configuration details for a datafeed and anomaly detection job in the API. The preview shows the structure of the data @@ -4098,7 +4540,7 @@ used. You cannot specify a `job_config` object unless you also supply a `datafee [discrete] ==== put_calendar -Creates a calendar. +Create a calendar. {ref}/ml-put-calendar.html[Endpoint documentation] [source,ts] @@ -4116,7 +4558,7 @@ client.ml.putCalendar({ calendar_id }) [discrete] ==== put_calendar_job -Adds an anomaly detection job to a calendar. +Add anomaly detection job to calendar. {ref}/ml-put-calendar-job.html[Endpoint documentation] [source,ts] @@ -4133,7 +4575,7 @@ client.ml.putCalendarJob({ calendar_id, job_id }) [discrete] ==== put_data_frame_analytics -Instantiates a data frame analytics job. +Create a data frame analytics job. This API creates a data frame analytics job that performs an analysis on the source indices and stores the outcome in a destination index. @@ -4206,7 +4648,7 @@ greater than that setting. [discrete] ==== put_datafeed -Instantiates a datafeed. +Create a datafeed. Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job. You can associate only one datafeed with each anomaly detection job. The datafeed contains a query that runs at a defined interval (`frequency`). @@ -4276,7 +4718,7 @@ whether wildcard expressions match hidden data streams. Supports a list of value [discrete] ==== put_filter -Instantiates a filter. +Create a filter. A filter contains a list of strings. It can be used by one or more anomaly detection jobs. Specifically, filters are referenced in the `custom_rules` property of detector configuration objects. @@ -4329,7 +4771,8 @@ client.ml.putJob({ job_id, analysis_config, data_description }) [discrete] ==== put_trained_model -Enables you to supply a trained model that is not created by data frame analytics. +Create a trained model. +Enable you to supply a trained model that is not created by data frame analytics. {ref}/put-trained-models.html[Endpoint documentation] [source,ts] @@ -4375,8 +4818,9 @@ to complete. [discrete] ==== put_trained_model_alias -Creates or updates a trained model alias. A trained model alias is a logical -name used to reference a single trained model. +Create or update a trained model alias. +A trained model alias is a logical name used to reference a single trained +model. You can use aliases instead of trained model identifiers to make it easier to reference your models. For example, you can use aliases in inference aggregations and processors. @@ -4410,7 +4854,7 @@ already assigned and this parameter is false, the API returns an error. [discrete] ==== put_trained_model_definition_part -Creates part of a trained model definition. +Create part of a trained model definition. {ref}/put-trained-model-definition-part.html[Endpoint documentation] [source,ts] @@ -4431,7 +4875,7 @@ order of their part number. The first part must be `0` and the final part must b [discrete] ==== put_trained_model_vocabulary -Creates a trained model vocabulary. +Create a trained model vocabulary. This API is supported only for natural language processing (NLP) models. The vocabulary is stored in the index as described in `inference_config.*.vocabulary` of the trained model definition. @@ -4452,7 +4896,7 @@ client.ml.putTrainedModelVocabulary({ model_id, vocabulary }) [discrete] ==== reset_job -Resets an anomaly detection job. +Reset an anomaly detection job. All model state and results are deleted. The job is ready to start over as if it had just been created. It is not currently possible to reset multiple jobs using wildcards or a @@ -4477,7 +4921,7 @@ reset. [discrete] ==== start_data_frame_analytics -Starts a data frame analytics job. +Start a data frame analytics job. A data frame analytics job can be started and stopped multiple times throughout its lifecycle. If the destination index does not exist, it is created automatically the @@ -4508,7 +4952,7 @@ starts. [discrete] ==== start_datafeed -Starts one or more datafeeds. +Start datafeeds. A datafeed must be started in order to retrieve data from Elasticsearch. A datafeed can be started and stopped multiple times throughout its lifecycle. @@ -4541,7 +4985,8 @@ characters. [discrete] ==== start_trained_model_deployment -Starts a trained model deployment, which allocates the model to every machine learning node. +Start a trained model deployment. +It allocates the model to every machine learning node. {ref}/start-trained-model-deployment.html[Endpoint documentation] [source,ts] @@ -4576,7 +5021,7 @@ it will automatically be changed to a value less than the number of hardware thr [discrete] ==== stop_data_frame_analytics -Stops one or more data frame analytics jobs. +Stop data frame analytics jobs. A data frame analytics job can be started and stopped multiple times throughout its lifecycle. @@ -4610,7 +5055,7 @@ stops. Defaults to 20 seconds. [discrete] ==== stop_datafeed -Stops one or more datafeeds. +Stop datafeeds. A datafeed that is stopped ceases to retrieve data from Elasticsearch. A datafeed can be started and stopped multiple times throughout its lifecycle. @@ -4633,7 +5078,7 @@ the identifier. [discrete] ==== stop_trained_model_deployment -Stops a trained model deployment. +Stop a trained model deployment. {ref}/stop-trained-model-deployment.html[Endpoint documentation] [source,ts] @@ -4655,7 +5100,7 @@ restart the model deployment. [discrete] ==== update_data_frame_analytics -Updates an existing data frame analytics job. +Update a data frame analytics job. {ref}/update-dfanalytics.html[Endpoint documentation] [source,ts] @@ -4685,7 +5130,7 @@ learning node capacity for it to be immediately assigned to a node. [discrete] ==== update_datafeed -Updates the properties of a datafeed. +Update a datafeed. You must stop and start the datafeed for the changes to be applied. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who updated it had at the time of the update and runs the query using those same roles. If you provide secondary authorization headers, @@ -4758,6 +5203,7 @@ whether wildcard expressions match hidden data streams. Supports a list of value [discrete] ==== update_filter +Update a filter. Updates the description of a filter, adds items, or removes items from the list. {ref}/ml-update-filter.html[Endpoint documentation] @@ -4777,6 +5223,7 @@ client.ml.updateFilter({ filter_id }) [discrete] ==== update_job +Update an anomaly detection job. Updates certain properties of an anomaly detection job. {ref}/ml-update-job.html[Endpoint documentation] @@ -4842,7 +5289,7 @@ value is null, which means all results are retained. [discrete] ==== update_trained_model_deployment -Starts a trained model deployment, which allocates the model to every machine learning node. +Update a trained model deployment. {ref}/update-trained-model-deployment.html[Endpoint documentation] [source,ts] @@ -4963,7 +5410,7 @@ client.queryRules.putRule({ ruleset_id, rule_id, type, criteria, actions }) * *Request (object):* ** *`ruleset_id` (string)*: The unique identifier of the query ruleset containing the rule to be created or updated ** *`rule_id` (string)*: The unique identifier of the query rule within the specified ruleset to be created or updated -** *`type` (Enum("pinned"))* +** *`type` (Enum("pinned" | "exclude"))* ** *`criteria` ({ type, metadata, values } | { type, metadata, values }[])* ** *`actions` ({ ids, docs })* ** *`priority` (Optional, number)* @@ -5551,7 +5998,7 @@ client.synonyms.putSynonym({ id, synonyms_set }) * *Request (object):* ** *`id` (string)*: The id of the synonyms set to be created or updated -** *`synonyms_set` ({ id, synonyms }[])*: The synonym set information to update +** *`synonyms_set` ({ id, synonyms } | { id, synonyms }[])*: The synonym set information to update [discrete] ==== put_synonym_rule diff --git a/src/api/api/cat.ts b/src/api/api/cat.ts index 118001f..1451bd3 100644 --- a/src/api/api/cat.ts +++ b/src/api/api/cat.ts @@ -45,7 +45,7 @@ export default class Cat { } /** - * Get aliases. Retrieves the cluster’s index aliases, including filter and routing information. The API does not return data stream aliases. > info > CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use [the /_alias endpoints](#endpoint-alias). + * Get aliases. Retrieves the cluster’s index aliases, including filter and routing information. The API does not return data stream aliases. CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-alias.html | Elasticsearch API documentation} */ async aliases (this: That, params?: T.CatAliasesRequest | TB.CatAliasesRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -85,7 +85,7 @@ export default class Cat { } /** - * Get component templates. Returns information about component templates in a cluster. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. > info > CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use [the /_component_template endpoints](#endpoint-component-template). + * Get component templates. Returns information about component templates in a cluster. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get component template API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-component-templates.html | Elasticsearch API documentation} */ async componentTemplates (this: That, params?: T.CatComponentTemplatesRequest | TB.CatComponentTemplatesRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -125,7 +125,7 @@ export default class Cat { } /** - * Get a document count. Provides quick access to a document count for a data stream, an index, or an entire cluster.n/ The document count only includes live documents, not deleted documents which have not yet been removed by the merge process. > info > CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use [the /_count endpoints](#endpoint-count). + * Get a document count. Provides quick access to a document count for a data stream, an index, or an entire cluster. The document count only includes live documents, not deleted documents which have not yet been removed by the merge process. CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the count API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-count.html | Elasticsearch API documentation} */ async count (this: That, params?: T.CatCountRequest | TB.CatCountRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -195,7 +195,7 @@ export default class Cat { } /** - * Get index information. Returns high-level information about indices in a cluster, including backing indices for data streams. > info > CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use an index endpoint. Use this request to get the following information for each index in a cluster: - shard count - document count - deleted document count - primary store size - total store size of all shards, including shard replicas These metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents. To get an accurate count of Elasticsearch documents, use the [/_cat/count](#operation-cat-count) or [count](#endpoint-count) endpoints. + * Get index information. Returns high-level information about indices in a cluster, including backing indices for data streams. Use this request to get the following information for each index in a cluster: - shard count - document count - deleted document count - primary store size - total store size of all shards, including shard replicas These metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents. To get an accurate count of Elasticsearch documents, use the cat count or count APIs. CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use an index endpoint. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-indices.html | Elasticsearch API documentation} */ async indices (this: That, params?: T.CatIndicesRequest | TB.CatIndicesRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -235,7 +235,7 @@ export default class Cat { } /** - * Get data frame analytics jobs. Returns configuration and usage information about data frame analytics jobs. > info > CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use [the /_ml/data_frame/analytics endpoints](#endpoint-ml). + * Get data frame analytics jobs. Returns configuration and usage information about data frame analytics jobs. CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get data frame analytics jobs statistics API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-dfanalytics.html | Elasticsearch API documentation} */ async mlDataFrameAnalytics (this: That, params?: T.CatMlDataFrameAnalyticsRequest | TB.CatMlDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -275,7 +275,7 @@ export default class Cat { } /** - * Get datafeeds. Returns configuration and usage information about datafeeds. This API returns a maximum of 10,000 datafeeds. If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. > info > CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use [the /_ml/datafeeds endpoints](#endpoint-ml). + * Get datafeeds. Returns configuration and usage information about datafeeds. This API returns a maximum of 10,000 datafeeds. If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get datafeed statistics API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-datafeeds.html | Elasticsearch API documentation} */ async mlDatafeeds (this: That, params?: T.CatMlDatafeedsRequest | TB.CatMlDatafeedsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -315,7 +315,7 @@ export default class Cat { } /** - * Get anomaly detection jobs. Returns configuration and usage information for anomaly detection jobs. This API returns a maximum of 10,000 jobs. If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. > info > CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use [the /_ml/anomaly_detectors endpoints](#endpoint-ml). + * Get anomaly detection jobs. Returns configuration and usage information for anomaly detection jobs. This API returns a maximum of 10,000 jobs. If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster privileges to use this API. CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get anomaly detection job statistics API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-anomaly-detectors.html | Elasticsearch API documentation} */ async mlJobs (this: That, params?: T.CatMlJobsRequest | TB.CatMlJobsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -355,7 +355,7 @@ export default class Cat { } /** - * Get trained models. Returns configuration and usage information about inference trained models. > info > CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use [the /_ml/trained_models endpoints](#endpoint-ml). + * Get trained models. Returns configuration and usage information about inference trained models. CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get trained models statistics API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-trained-model.html | Elasticsearch API documentation} */ async mlTrainedModels (this: That, params?: T.CatMlTrainedModelsRequest | TB.CatMlTrainedModelsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -395,7 +395,7 @@ export default class Cat { } /** - * Get transforms. Returns configuration and usage information about transforms. > info > CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use [the /_transform endpoints](#endpoint-transform). + * Get transforms. Returns configuration and usage information about transforms. CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get transform statistics API. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-transforms.html | Elasticsearch API documentation} */ async transforms (this: That, params?: T.CatTransformsRequest | TB.CatTransformsRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/api/connector.ts b/src/api/api/connector.ts new file mode 100644 index 0000000..d9ec7c4 --- /dev/null +++ b/src/api/api/connector.ts @@ -0,0 +1,993 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* eslint-disable import/export */ +/* eslint-disable @typescript-eslint/no-misused-new */ +/* eslint-disable @typescript-eslint/no-extraneous-class */ +/* eslint-disable @typescript-eslint/no-unused-vars */ + +// This file was automatically generated by elastic/elastic-client-generator-js +// DO NOT MODIFY IT BY HAND. Instead, modify the source open api file, +// and elastic/elastic-client-generator-js to regenerate this file again. + +import { + Transport, + TransportRequestMetadata, + TransportRequestOptions, + TransportRequestOptionsWithMeta, + TransportRequestOptionsWithOutMeta, + TransportResult +} from '@elastic/transport' +import * as T from '../types' +import * as TB from '../typesWithBodyKey' +interface That { transport: Transport } + +export default class Connector { + transport: Transport + constructor (transport: Transport) { + this.transport = transport + } + + /** + * Updates the last_seen field in the connector, and sets it to current timestamp + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/check-in-connector-api.html | Elasticsearch API documentation} + */ + async checkIn (this: That, params: T.ConnectorCheckInRequest | TB.ConnectorCheckInRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async checkIn (this: That, params: T.ConnectorCheckInRequest | TB.ConnectorCheckInRequest, options?: TransportRequestOptionsWithMeta): Promise> + async checkIn (this: That, params: T.ConnectorCheckInRequest | TB.ConnectorCheckInRequest, options?: TransportRequestOptions): Promise + async checkIn (this: That, params: T.ConnectorCheckInRequest | TB.ConnectorCheckInRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['connector_id'] + const querystring: Record = {} + const body = undefined + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}/_check_in` + const meta: TransportRequestMetadata = { + name: 'connector.check_in', + pathParts: { + connector_id: params.connector_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Deletes a connector. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-connector-api.html | Elasticsearch API documentation} + */ + async delete (this: That, params: T.ConnectorDeleteRequest | TB.ConnectorDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async delete (this: That, params: T.ConnectorDeleteRequest | TB.ConnectorDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> + async delete (this: That, params: T.ConnectorDeleteRequest | TB.ConnectorDeleteRequest, options?: TransportRequestOptions): Promise + async delete (this: That, params: T.ConnectorDeleteRequest | TB.ConnectorDeleteRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['connector_id'] + const querystring: Record = {} + const body = undefined + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'connector.delete', + pathParts: { + connector_id: params.connector_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Retrieves a connector. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-connector-api.html | Elasticsearch API documentation} + */ + async get (this: That, params: T.ConnectorGetRequest | TB.ConnectorGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async get (this: That, params: T.ConnectorGetRequest | TB.ConnectorGetRequest, options?: TransportRequestOptionsWithMeta): Promise> + async get (this: That, params: T.ConnectorGetRequest | TB.ConnectorGetRequest, options?: TransportRequestOptions): Promise + async get (this: That, params: T.ConnectorGetRequest | TB.ConnectorGetRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['connector_id'] + const querystring: Record = {} + const body = undefined + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'connector.get', + pathParts: { + connector_id: params.connector_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Returns existing connectors. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/list-connector-api.html | Elasticsearch API documentation} + */ + async list (this: That, params?: T.ConnectorListRequest | TB.ConnectorListRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async list (this: That, params?: T.ConnectorListRequest | TB.ConnectorListRequest, options?: TransportRequestOptionsWithMeta): Promise> + async list (this: That, params?: T.ConnectorListRequest | TB.ConnectorListRequest, options?: TransportRequestOptions): Promise + async list (this: That, params?: T.ConnectorListRequest | TB.ConnectorListRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_connector' + const meta: TransportRequestMetadata = { + name: 'connector.list' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Creates a connector. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/create-connector-api.html | Elasticsearch API documentation} + */ + async post (this: That, params?: T.ConnectorPostRequest | TB.ConnectorPostRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async post (this: That, params?: T.ConnectorPostRequest | TB.ConnectorPostRequest, options?: TransportRequestOptionsWithMeta): Promise> + async post (this: That, params?: T.ConnectorPostRequest | TB.ConnectorPostRequest, options?: TransportRequestOptions): Promise + async post (this: That, params?: T.ConnectorPostRequest | TB.ConnectorPostRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['description', 'index_name', 'is_native', 'language', 'name', 'service_type'] + const querystring: Record = {} + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = '/_connector' + const meta: TransportRequestMetadata = { + name: 'connector.post' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Creates or updates a connector. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/create-connector-api.html | Elasticsearch API documentation} + */ + async put (this: That, params?: T.ConnectorPutRequest | TB.ConnectorPutRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async put (this: That, params?: T.ConnectorPutRequest | TB.ConnectorPutRequest, options?: TransportRequestOptionsWithMeta): Promise> + async put (this: That, params?: T.ConnectorPutRequest | TB.ConnectorPutRequest, options?: TransportRequestOptions): Promise + async put (this: That, params?: T.ConnectorPutRequest | TB.ConnectorPutRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['connector_id'] + const acceptedBody: string[] = ['description', 'index_name', 'is_native', 'language', 'name', 'service_type'] + const querystring: Record = {} + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } + + params = params ?? {} + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + let method = '' + let path = '' + if (params.connector_id != null) { + method = 'PUT' + path = `/_connector/${encodeURIComponent(params.connector_id.toString())}` + } else { + method = 'PUT' + path = '/_connector' + } + const meta: TransportRequestMetadata = { + name: 'connector.put', + pathParts: { + connector_id: params.connector_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Cancels a connector sync job. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/cancel-connector-sync-job-api.html | Elasticsearch API documentation} + */ + async syncJobCancel (this: That, params: T.ConnectorSyncJobCancelRequest | TB.ConnectorSyncJobCancelRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async syncJobCancel (this: That, params: T.ConnectorSyncJobCancelRequest | TB.ConnectorSyncJobCancelRequest, options?: TransportRequestOptionsWithMeta): Promise> + async syncJobCancel (this: That, params: T.ConnectorSyncJobCancelRequest | TB.ConnectorSyncJobCancelRequest, options?: TransportRequestOptions): Promise + async syncJobCancel (this: That, params: T.ConnectorSyncJobCancelRequest | TB.ConnectorSyncJobCancelRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['connector_sync_job_id'] + const querystring: Record = {} + const body = undefined + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_connector/_sync_job/${encodeURIComponent(params.connector_sync_job_id.toString())}/_cancel` + const meta: TransportRequestMetadata = { + name: 'connector.sync_job_cancel', + pathParts: { + connector_sync_job_id: params.connector_sync_job_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Deletes a connector sync job. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-connector-sync-job-api.html | Elasticsearch API documentation} + */ + async syncJobDelete (this: That, params: T.ConnectorSyncJobDeleteRequest | TB.ConnectorSyncJobDeleteRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async syncJobDelete (this: That, params: T.ConnectorSyncJobDeleteRequest | TB.ConnectorSyncJobDeleteRequest, options?: TransportRequestOptionsWithMeta): Promise> + async syncJobDelete (this: That, params: T.ConnectorSyncJobDeleteRequest | TB.ConnectorSyncJobDeleteRequest, options?: TransportRequestOptions): Promise + async syncJobDelete (this: That, params: T.ConnectorSyncJobDeleteRequest | TB.ConnectorSyncJobDeleteRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['connector_sync_job_id'] + const querystring: Record = {} + const body = undefined + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'DELETE' + const path = `/_connector/_sync_job/${encodeURIComponent(params.connector_sync_job_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'connector.sync_job_delete', + pathParts: { + connector_sync_job_id: params.connector_sync_job_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Retrieves a connector sync job. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-connector-sync-job-api.html | Elasticsearch API documentation} + */ + async syncJobGet (this: That, params: T.ConnectorSyncJobGetRequest | TB.ConnectorSyncJobGetRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async syncJobGet (this: That, params: T.ConnectorSyncJobGetRequest | TB.ConnectorSyncJobGetRequest, options?: TransportRequestOptionsWithMeta): Promise> + async syncJobGet (this: That, params: T.ConnectorSyncJobGetRequest | TB.ConnectorSyncJobGetRequest, options?: TransportRequestOptions): Promise + async syncJobGet (this: That, params: T.ConnectorSyncJobGetRequest | TB.ConnectorSyncJobGetRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['connector_sync_job_id'] + const querystring: Record = {} + const body = undefined + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = `/_connector/_sync_job/${encodeURIComponent(params.connector_sync_job_id.toString())}` + const meta: TransportRequestMetadata = { + name: 'connector.sync_job_get', + pathParts: { + connector_sync_job_id: params.connector_sync_job_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Lists connector sync jobs. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/list-connector-sync-jobs-api.html | Elasticsearch API documentation} + */ + async syncJobList (this: That, params?: T.ConnectorSyncJobListRequest | TB.ConnectorSyncJobListRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async syncJobList (this: That, params?: T.ConnectorSyncJobListRequest | TB.ConnectorSyncJobListRequest, options?: TransportRequestOptionsWithMeta): Promise> + async syncJobList (this: That, params?: T.ConnectorSyncJobListRequest | TB.ConnectorSyncJobListRequest, options?: TransportRequestOptions): Promise + async syncJobList (this: That, params?: T.ConnectorSyncJobListRequest | TB.ConnectorSyncJobListRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const querystring: Record = {} + const body = undefined + + params = params ?? {} + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'GET' + const path = '/_connector/_sync_job' + const meta: TransportRequestMetadata = { + name: 'connector.sync_job_list' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Creates a connector sync job. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/create-connector-sync-job-api.html | Elasticsearch API documentation} + */ + async syncJobPost (this: That, params: T.ConnectorSyncJobPostRequest | TB.ConnectorSyncJobPostRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async syncJobPost (this: That, params: T.ConnectorSyncJobPostRequest | TB.ConnectorSyncJobPostRequest, options?: TransportRequestOptionsWithMeta): Promise> + async syncJobPost (this: That, params: T.ConnectorSyncJobPostRequest | TB.ConnectorSyncJobPostRequest, options?: TransportRequestOptions): Promise + async syncJobPost (this: That, params: T.ConnectorSyncJobPostRequest | TB.ConnectorSyncJobPostRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = [] + const acceptedBody: string[] = ['id', 'job_type', 'trigger_method'] + const querystring: Record = {} + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'POST' + const path = '/_connector/_sync_job' + const meta: TransportRequestMetadata = { + name: 'connector.sync_job_post' + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Activates the valid draft filtering for a connector. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-filtering-api.html | Elasticsearch API documentation} + */ + async updateActiveFiltering (this: That, params: T.ConnectorUpdateActiveFilteringRequest | TB.ConnectorUpdateActiveFilteringRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateActiveFiltering (this: That, params: T.ConnectorUpdateActiveFilteringRequest | TB.ConnectorUpdateActiveFilteringRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateActiveFiltering (this: That, params: T.ConnectorUpdateActiveFilteringRequest | TB.ConnectorUpdateActiveFilteringRequest, options?: TransportRequestOptions): Promise + async updateActiveFiltering (this: That, params: T.ConnectorUpdateActiveFilteringRequest | TB.ConnectorUpdateActiveFilteringRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['connector_id'] + const querystring: Record = {} + const body = undefined + + for (const key in params) { + if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}/_filtering/_activate` + const meta: TransportRequestMetadata = { + name: 'connector.update_active_filtering', + pathParts: { + connector_id: params.connector_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Updates the API key id in the connector document + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-api-key-id-api.html | Elasticsearch API documentation} + */ + async updateApiKeyId (this: That, params: T.ConnectorUpdateApiKeyIdRequest | TB.ConnectorUpdateApiKeyIdRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateApiKeyId (this: That, params: T.ConnectorUpdateApiKeyIdRequest | TB.ConnectorUpdateApiKeyIdRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateApiKeyId (this: That, params: T.ConnectorUpdateApiKeyIdRequest | TB.ConnectorUpdateApiKeyIdRequest, options?: TransportRequestOptions): Promise + async updateApiKeyId (this: That, params: T.ConnectorUpdateApiKeyIdRequest | TB.ConnectorUpdateApiKeyIdRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['connector_id'] + const acceptedBody: string[] = ['api_key_id', 'api_key_secret_id'] + const querystring: Record = {} + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}/_api_key_id` + const meta: TransportRequestMetadata = { + name: 'connector.update_api_key_id', + pathParts: { + connector_id: params.connector_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Updates the configuration field in the connector document + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-configuration-api.html | Elasticsearch API documentation} + */ + async updateConfiguration (this: That, params: T.ConnectorUpdateConfigurationRequest | TB.ConnectorUpdateConfigurationRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateConfiguration (this: That, params: T.ConnectorUpdateConfigurationRequest | TB.ConnectorUpdateConfigurationRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateConfiguration (this: That, params: T.ConnectorUpdateConfigurationRequest | TB.ConnectorUpdateConfigurationRequest, options?: TransportRequestOptions): Promise + async updateConfiguration (this: That, params: T.ConnectorUpdateConfigurationRequest | TB.ConnectorUpdateConfigurationRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['connector_id'] + const acceptedBody: string[] = ['configuration', 'values'] + const querystring: Record = {} + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}/_configuration` + const meta: TransportRequestMetadata = { + name: 'connector.update_configuration', + pathParts: { + connector_id: params.connector_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Updates the filtering field in the connector document + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-error-api.html | Elasticsearch API documentation} + */ + async updateError (this: That, params: T.ConnectorUpdateErrorRequest | TB.ConnectorUpdateErrorRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateError (this: That, params: T.ConnectorUpdateErrorRequest | TB.ConnectorUpdateErrorRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateError (this: That, params: T.ConnectorUpdateErrorRequest | TB.ConnectorUpdateErrorRequest, options?: TransportRequestOptions): Promise + async updateError (this: That, params: T.ConnectorUpdateErrorRequest | TB.ConnectorUpdateErrorRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['connector_id'] + const acceptedBody: string[] = ['error'] + const querystring: Record = {} + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}/_error` + const meta: TransportRequestMetadata = { + name: 'connector.update_error', + pathParts: { + connector_id: params.connector_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Updates the filtering field in the connector document + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-filtering-api.html | Elasticsearch API documentation} + */ + async updateFiltering (this: That, params: T.ConnectorUpdateFilteringRequest | TB.ConnectorUpdateFilteringRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateFiltering (this: That, params: T.ConnectorUpdateFilteringRequest | TB.ConnectorUpdateFilteringRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateFiltering (this: That, params: T.ConnectorUpdateFilteringRequest | TB.ConnectorUpdateFilteringRequest, options?: TransportRequestOptions): Promise + async updateFiltering (this: That, params: T.ConnectorUpdateFilteringRequest | TB.ConnectorUpdateFilteringRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['connector_id'] + const acceptedBody: string[] = ['filtering', 'rules', 'advanced_snippet'] + const querystring: Record = {} + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}/_filtering` + const meta: TransportRequestMetadata = { + name: 'connector.update_filtering', + pathParts: { + connector_id: params.connector_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Updates the draft filtering validation info for a connector. + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-filtering-validation-api.html | Elasticsearch API documentation} + */ + async updateFilteringValidation (this: That, params: T.ConnectorUpdateFilteringValidationRequest | TB.ConnectorUpdateFilteringValidationRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateFilteringValidation (this: That, params: T.ConnectorUpdateFilteringValidationRequest | TB.ConnectorUpdateFilteringValidationRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateFilteringValidation (this: That, params: T.ConnectorUpdateFilteringValidationRequest | TB.ConnectorUpdateFilteringValidationRequest, options?: TransportRequestOptions): Promise + async updateFilteringValidation (this: That, params: T.ConnectorUpdateFilteringValidationRequest | TB.ConnectorUpdateFilteringValidationRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['connector_id'] + const acceptedBody: string[] = ['validation'] + const querystring: Record = {} + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}/_filtering/_validation` + const meta: TransportRequestMetadata = { + name: 'connector.update_filtering_validation', + pathParts: { + connector_id: params.connector_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Updates the index_name in the connector document + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-index-name-api.html | Elasticsearch API documentation} + */ + async updateIndexName (this: That, params: T.ConnectorUpdateIndexNameRequest | TB.ConnectorUpdateIndexNameRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateIndexName (this: That, params: T.ConnectorUpdateIndexNameRequest | TB.ConnectorUpdateIndexNameRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateIndexName (this: That, params: T.ConnectorUpdateIndexNameRequest | TB.ConnectorUpdateIndexNameRequest, options?: TransportRequestOptions): Promise + async updateIndexName (this: That, params: T.ConnectorUpdateIndexNameRequest | TB.ConnectorUpdateIndexNameRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['connector_id'] + const acceptedBody: string[] = ['index_name'] + const querystring: Record = {} + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}/_index_name` + const meta: TransportRequestMetadata = { + name: 'connector.update_index_name', + pathParts: { + connector_id: params.connector_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Updates the name and description fields in the connector document + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-name-description-api.html | Elasticsearch API documentation} + */ + async updateName (this: That, params: T.ConnectorUpdateNameRequest | TB.ConnectorUpdateNameRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateName (this: That, params: T.ConnectorUpdateNameRequest | TB.ConnectorUpdateNameRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateName (this: That, params: T.ConnectorUpdateNameRequest | TB.ConnectorUpdateNameRequest, options?: TransportRequestOptions): Promise + async updateName (this: That, params: T.ConnectorUpdateNameRequest | TB.ConnectorUpdateNameRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['connector_id'] + const acceptedBody: string[] = ['name', 'description'] + const querystring: Record = {} + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}/_name` + const meta: TransportRequestMetadata = { + name: 'connector.update_name', + pathParts: { + connector_id: params.connector_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Updates the is_native flag in the connector document + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-native-api.html | Elasticsearch API documentation} + */ + async updateNative (this: That, params: T.ConnectorUpdateNativeRequest | TB.ConnectorUpdateNativeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateNative (this: That, params: T.ConnectorUpdateNativeRequest | TB.ConnectorUpdateNativeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateNative (this: That, params: T.ConnectorUpdateNativeRequest | TB.ConnectorUpdateNativeRequest, options?: TransportRequestOptions): Promise + async updateNative (this: That, params: T.ConnectorUpdateNativeRequest | TB.ConnectorUpdateNativeRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['connector_id'] + const acceptedBody: string[] = ['is_native'] + const querystring: Record = {} + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}/_native` + const meta: TransportRequestMetadata = { + name: 'connector.update_native', + pathParts: { + connector_id: params.connector_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Updates the pipeline field in the connector document + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-pipeline-api.html | Elasticsearch API documentation} + */ + async updatePipeline (this: That, params: T.ConnectorUpdatePipelineRequest | TB.ConnectorUpdatePipelineRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updatePipeline (this: That, params: T.ConnectorUpdatePipelineRequest | TB.ConnectorUpdatePipelineRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updatePipeline (this: That, params: T.ConnectorUpdatePipelineRequest | TB.ConnectorUpdatePipelineRequest, options?: TransportRequestOptions): Promise + async updatePipeline (this: That, params: T.ConnectorUpdatePipelineRequest | TB.ConnectorUpdatePipelineRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['connector_id'] + const acceptedBody: string[] = ['pipeline'] + const querystring: Record = {} + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}/_pipeline` + const meta: TransportRequestMetadata = { + name: 'connector.update_pipeline', + pathParts: { + connector_id: params.connector_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Updates the scheduling field in the connector document + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-scheduling-api.html | Elasticsearch API documentation} + */ + async updateScheduling (this: That, params: T.ConnectorUpdateSchedulingRequest | TB.ConnectorUpdateSchedulingRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateScheduling (this: That, params: T.ConnectorUpdateSchedulingRequest | TB.ConnectorUpdateSchedulingRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateScheduling (this: That, params: T.ConnectorUpdateSchedulingRequest | TB.ConnectorUpdateSchedulingRequest, options?: TransportRequestOptions): Promise + async updateScheduling (this: That, params: T.ConnectorUpdateSchedulingRequest | TB.ConnectorUpdateSchedulingRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['connector_id'] + const acceptedBody: string[] = ['scheduling'] + const querystring: Record = {} + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}/_scheduling` + const meta: TransportRequestMetadata = { + name: 'connector.update_scheduling', + pathParts: { + connector_id: params.connector_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Updates the service type of the connector + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-service-type-api.html | Elasticsearch API documentation} + */ + async updateServiceType (this: That, params: T.ConnectorUpdateServiceTypeRequest | TB.ConnectorUpdateServiceTypeRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateServiceType (this: That, params: T.ConnectorUpdateServiceTypeRequest | TB.ConnectorUpdateServiceTypeRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateServiceType (this: That, params: T.ConnectorUpdateServiceTypeRequest | TB.ConnectorUpdateServiceTypeRequest, options?: TransportRequestOptions): Promise + async updateServiceType (this: That, params: T.ConnectorUpdateServiceTypeRequest | TB.ConnectorUpdateServiceTypeRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['connector_id'] + const acceptedBody: string[] = ['service_type'] + const querystring: Record = {} + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}/_service_type` + const meta: TransportRequestMetadata = { + name: 'connector.update_service_type', + pathParts: { + connector_id: params.connector_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } + + /** + * Updates the status of the connector + * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-status-api.html | Elasticsearch API documentation} + */ + async updateStatus (this: That, params: T.ConnectorUpdateStatusRequest | TB.ConnectorUpdateStatusRequest, options?: TransportRequestOptionsWithOutMeta): Promise + async updateStatus (this: That, params: T.ConnectorUpdateStatusRequest | TB.ConnectorUpdateStatusRequest, options?: TransportRequestOptionsWithMeta): Promise> + async updateStatus (this: That, params: T.ConnectorUpdateStatusRequest | TB.ConnectorUpdateStatusRequest, options?: TransportRequestOptions): Promise + async updateStatus (this: That, params: T.ConnectorUpdateStatusRequest | TB.ConnectorUpdateStatusRequest, options?: TransportRequestOptions): Promise { + const acceptedPath: string[] = ['connector_id'] + const acceptedBody: string[] = ['status'] + const querystring: Record = {} + // @ts-expect-error + const userBody: any = params?.body + let body: Record | string + if (typeof userBody === 'string') { + body = userBody + } else { + body = userBody != null ? { ...userBody } : undefined + } + + for (const key in params) { + if (acceptedBody.includes(key)) { + body = body ?? {} + // @ts-expect-error + body[key] = params[key] + } else if (acceptedPath.includes(key)) { + continue + } else if (key !== 'body') { + // @ts-expect-error + querystring[key] = params[key] + } + } + + const method = 'PUT' + const path = `/_connector/${encodeURIComponent(params.connector_id.toString())}/_status` + const meta: TransportRequestMetadata = { + name: 'connector.update_status', + pathParts: { + connector_id: params.connector_id + } + } + return await this.transport.request({ path, method, querystring, body, meta }, options) + } +} diff --git a/src/api/api/ml.ts b/src/api/api/ml.ts index 638fe26..a0bc3f3 100644 --- a/src/api/api/ml.ts +++ b/src/api/api/ml.ts @@ -89,7 +89,7 @@ export default class Ml { } /** - * Removes all scheduled events from a calendar, then deletes it. + * Delete a calendar. Removes all scheduled events from a calendar, then deletes it. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-calendar.html | Elasticsearch API documentation} */ async deleteCalendar (this: That, params: T.MlDeleteCalendarRequest | TB.MlDeleteCalendarRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -121,7 +121,7 @@ export default class Ml { } /** - * Deletes scheduled events from a calendar. + * Delete events from a calendar. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-calendar-event.html | Elasticsearch API documentation} */ async deleteCalendarEvent (this: That, params: T.MlDeleteCalendarEventRequest | TB.MlDeleteCalendarEventRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -154,7 +154,7 @@ export default class Ml { } /** - * Deletes anomaly detection jobs from a calendar. + * Delete anomaly jobs from a calendar. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-calendar-job.html | Elasticsearch API documentation} */ async deleteCalendarJob (this: That, params: T.MlDeleteCalendarJobRequest | TB.MlDeleteCalendarJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -187,7 +187,7 @@ export default class Ml { } /** - * Deletes a data frame analytics job. + * Delete a data frame analytics job. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-dfanalytics.html | Elasticsearch API documentation} */ async deleteDataFrameAnalytics (this: That, params: T.MlDeleteDataFrameAnalyticsRequest | TB.MlDeleteDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -219,7 +219,7 @@ export default class Ml { } /** - * Deletes an existing datafeed. + * Delete a datafeed. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-datafeed.html | Elasticsearch API documentation} */ async deleteDatafeed (this: That, params: T.MlDeleteDatafeedRequest | TB.MlDeleteDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -251,7 +251,7 @@ export default class Ml { } /** - * Deletes a filter. If an anomaly detection job references the filter, you cannot delete the filter. You must update or delete the job before you can delete the filter. + * Delete a filter. If an anomaly detection job references the filter, you cannot delete the filter. You must update or delete the job before you can delete the filter. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-delete-filter.html | Elasticsearch API documentation} */ async deleteFilter (this: That, params: T.MlDeleteFilterRequest | TB.MlDeleteFilterRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -315,7 +315,7 @@ export default class Ml { } /** - * Deletes an existing trained inference model that is currently not referenced by an ingest pipeline. + * Delete an unreferenced trained model. The request deletes a trained inference model that is not referenced by an ingest pipeline. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-trained-models.html | Elasticsearch API documentation} */ async deleteTrainedModel (this: That, params: T.MlDeleteTrainedModelRequest | TB.MlDeleteTrainedModelRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -347,7 +347,7 @@ export default class Ml { } /** - * Deletes a trained model alias. This API deletes an existing model alias that refers to a trained model. If the model alias is missing or refers to a model other than the one identified by the `model_id`, this API returns an error. + * Delete a trained model alias. This API deletes an existing model alias that refers to a trained model. If the model alias is missing or refers to a model other than the one identified by the `model_id`, this API returns an error. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-trained-models-aliases.html | Elasticsearch API documentation} */ async deleteTrainedModelAlias (this: That, params: T.MlDeleteTrainedModelAliasRequest | TB.MlDeleteTrainedModelAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -380,7 +380,7 @@ export default class Ml { } /** - * Makes an estimation of the memory usage for an anomaly detection job model. It is based on analysis configuration details for the job and cardinality estimates for the fields it references. + * Estimate job model memory usage. Makes an estimation of the memory usage for an anomaly detection job model. It is based on analysis configuration details for the job and cardinality estimates for the fields it references. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-apis.html | Elasticsearch API documentation} */ async estimateModelMemory (this: That, params?: T.MlEstimateModelMemoryRequest | TB.MlEstimateModelMemoryRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -422,7 +422,7 @@ export default class Ml { } /** - * Evaluates the data frame analytics for an annotated index. The API packages together commonly used evaluation metrics for various types of machine learning features. This has been designed for use on indexes created by data frame analytics. Evaluation requires both a ground truth field and an analytics result field to be present. + * Evaluate data frame analytics. The API packages together commonly used evaluation metrics for various types of machine learning features. This has been designed for use on indexes created by data frame analytics. Evaluation requires both a ground truth field and an analytics result field to be present. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/evaluate-dfanalytics.html | Elasticsearch API documentation} */ async evaluateDataFrame (this: That, params: T.MlEvaluateDataFrameRequest | TB.MlEvaluateDataFrameRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -463,7 +463,7 @@ export default class Ml { } /** - * Forces any buffered data to be processed by the job. The flush jobs API is only applicable when sending data for analysis using the post data API. Depending on the content of the buffer, then it might additionally calculate new results. Both flush and close operations are similar, however the flush is more efficient if you are expecting to send more data for analysis. When flushing, the job remains open and is available to continue analyzing data. A close operation additionally prunes and persists the model state to disk and the job must be opened again before analyzing further data. + * Force buffered data to be processed. The flush jobs API is only applicable when sending data for analysis using the post data API. Depending on the content of the buffer, then it might additionally calculate new results. Both flush and close operations are similar, however the flush is more efficient if you are expecting to send more data for analysis. When flushing, the job remains open and is available to continue analyzing data. A close operation additionally prunes and persists the model state to disk and the job must be opened again before analyzing further data. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-flush-job.html | Elasticsearch API documentation} */ async flushJob (this: That, params: T.MlFlushJobRequest | TB.MlFlushJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -507,7 +507,7 @@ export default class Ml { } /** - * Retrieves information about the scheduled events in calendars. + * Get info about events in calendars. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-calendar-event.html | Elasticsearch API documentation} */ async getCalendarEvents (this: That, params: T.MlGetCalendarEventsRequest | TB.MlGetCalendarEventsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -539,7 +539,7 @@ export default class Ml { } /** - * Retrieves configuration information for calendars. + * Get calendar configuration info. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-calendar.html | Elasticsearch API documentation} */ async getCalendars (this: That, params?: T.MlGetCalendarsRequest | TB.MlGetCalendarsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -591,7 +591,7 @@ export default class Ml { } /** - * Retrieves configuration information for data frame analytics jobs. You can get information for multiple data frame analytics jobs in a single API request by using a comma-separated list of data frame analytics jobs or a wildcard expression. + * Get data frame analytics job configuration info. You can get information for multiple data frame analytics jobs in a single API request by using a comma-separated list of data frame analytics jobs or a wildcard expression. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-dfanalytics.html | Elasticsearch API documentation} */ async getDataFrameAnalytics (this: That, params?: T.MlGetDataFrameAnalyticsRequest | TB.MlGetDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -631,7 +631,7 @@ export default class Ml { } /** - * Retrieves usage information for data frame analytics jobs. + * Get data frame analytics jobs usage info. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-dfanalytics-stats.html | Elasticsearch API documentation} */ async getDataFrameAnalyticsStats (this: That, params?: T.MlGetDataFrameAnalyticsStatsRequest | TB.MlGetDataFrameAnalyticsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -671,7 +671,7 @@ export default class Ml { } /** - * Retrieves usage information for datafeeds. You can get statistics for multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can get statistics for all datafeeds by using `_all`, by specifying `*` as the ``, or by omitting the ``. If the datafeed is stopped, the only information you receive is the `datafeed_id` and the `state`. This API returns a maximum of 10,000 datafeeds. + * Get datafeeds usage info. You can get statistics for multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can get statistics for all datafeeds by using `_all`, by specifying `*` as the ``, or by omitting the ``. If the datafeed is stopped, the only information you receive is the `datafeed_id` and the `state`. This API returns a maximum of 10,000 datafeeds. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-datafeed-stats.html | Elasticsearch API documentation} */ async getDatafeedStats (this: That, params?: T.MlGetDatafeedStatsRequest | TB.MlGetDatafeedStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -711,7 +711,7 @@ export default class Ml { } /** - * Retrieves configuration information for datafeeds. You can get information for multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can get information for all datafeeds by using `_all`, by specifying `*` as the ``, or by omitting the ``. This API returns a maximum of 10,000 datafeeds. + * Get datafeeds configuration info. You can get information for multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can get information for all datafeeds by using `_all`, by specifying `*` as the ``, or by omitting the ``. This API returns a maximum of 10,000 datafeeds. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-datafeed.html | Elasticsearch API documentation} */ async getDatafeeds (this: That, params?: T.MlGetDatafeedsRequest | TB.MlGetDatafeedsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -751,7 +751,7 @@ export default class Ml { } /** - * Retrieves filters. You can get a single filter or all filters. + * Get filters. You can get a single filter or all filters. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-filter.html | Elasticsearch API documentation} */ async getFilters (this: That, params?: T.MlGetFiltersRequest | TB.MlGetFiltersRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -791,7 +791,7 @@ export default class Ml { } /** - * Retrieves usage information for anomaly detection jobs. + * Get anomaly detection jobs usage info. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-job-stats.html | Elasticsearch API documentation} */ async getJobStats (this: That, params?: T.MlGetJobStatsRequest | TB.MlGetJobStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -831,7 +831,7 @@ export default class Ml { } /** - * Retrieves configuration information for anomaly detection jobs. You can get information for multiple anomaly detection jobs in a single API request by using a group name, a comma-separated list of jobs, or a wildcard expression. You can get information for all anomaly detection jobs by using `_all`, by specifying `*` as the ``, or by omitting the ``. + * Get anomaly detection jobs configuration info. You can get information for multiple anomaly detection jobs in a single API request by using a group name, a comma-separated list of jobs, or a wildcard expression. You can get information for all anomaly detection jobs by using `_all`, by specifying `*` as the ``, or by omitting the ``. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-job.html | Elasticsearch API documentation} */ async getJobs (this: That, params?: T.MlGetJobsRequest | TB.MlGetJobsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -871,7 +871,7 @@ export default class Ml { } /** - * Retrieves overall bucket results that summarize the bucket results of multiple anomaly detection jobs. The `overall_score` is calculated by combining the scores of all the buckets within the overall bucket span. First, the maximum `anomaly_score` per anomaly detection job in the overall bucket is calculated. Then the `top_n` of those scores are averaged to result in the `overall_score`. This means that you can fine-tune the `overall_score` so that it is more or less sensitive to the number of jobs that detect an anomaly at the same time. For example, if you set `top_n` to `1`, the `overall_score` is the maximum bucket score in the overall bucket. Alternatively, if you set `top_n` to the number of jobs, the `overall_score` is high only when all jobs detect anomalies in that overall bucket. If you set the `bucket_span` parameter (to a value greater than its default), the `overall_score` is the maximum `overall_score` of the overall buckets that have a span equal to the jobs' largest bucket span. + * Get overall bucket results. Retrievs overall bucket results that summarize the bucket results of multiple anomaly detection jobs. The `overall_score` is calculated by combining the scores of all the buckets within the overall bucket span. First, the maximum `anomaly_score` per anomaly detection job in the overall bucket is calculated. Then the `top_n` of those scores are averaged to result in the `overall_score`. This means that you can fine-tune the `overall_score` so that it is more or less sensitive to the number of jobs that detect an anomaly at the same time. For example, if you set `top_n` to `1`, the `overall_score` is the maximum bucket score in the overall bucket. Alternatively, if you set `top_n` to the number of jobs, the `overall_score` is high only when all jobs detect anomalies in that overall bucket. If you set the `bucket_span` parameter (to a value greater than its default), the `overall_score` is the maximum `overall_score` of the overall buckets that have a span equal to the jobs' largest bucket span. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-get-overall-buckets.html | Elasticsearch API documentation} */ async getOverallBuckets (this: That, params: T.MlGetOverallBucketsRequest | TB.MlGetOverallBucketsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -915,7 +915,7 @@ export default class Ml { } /** - * Retrieves configuration information for a trained model. + * Get trained model configuration info. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-trained-models.html | Elasticsearch API documentation} */ async getTrainedModels (this: That, params?: T.MlGetTrainedModelsRequest | TB.MlGetTrainedModelsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -955,7 +955,7 @@ export default class Ml { } /** - * Retrieves usage information for trained models. You can get usage information for multiple trained models in a single API request by using a comma-separated list of model IDs or a wildcard expression. + * Get trained models usage info. You can get usage information for multiple trained models in a single API request by using a comma-separated list of model IDs or a wildcard expression. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/get-trained-models-stats.html | Elasticsearch API documentation} */ async getTrainedModelsStats (this: That, params?: T.MlGetTrainedModelsStatsRequest | TB.MlGetTrainedModelsStatsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -995,7 +995,7 @@ export default class Ml { } /** - * Evaluates a trained model. + * Evaluate a trained model. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/infer-trained-model.html | Elasticsearch API documentation} */ async inferTrainedModel (this: That, params: T.MlInferTrainedModelRequest | TB.MlInferTrainedModelRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1039,7 +1039,7 @@ export default class Ml { } /** - * Open anomaly detection jobs. An anomaly detection job must be opened in order for it to be ready to receive and analyze data. It can be opened and closed multiple times throughout its lifecycle. When you open a new job, it starts with an empty model. When you open an existing job, the most recent model state is automatically loaded. The job is ready to resume its analysis from where it left off, once new data is received. + * Open anomaly detection jobs. An anomaly detection job must be opened to be ready to receive and analyze data. It can be opened and closed multiple times throughout its lifecycle. When you open a new job, it starts with an empty model. When you open an existing job, the most recent model state is automatically loaded. The job is ready to resume its analysis from where it left off, once new data is received. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-open-job.html | Elasticsearch API documentation} */ async openJob (this: That, params: T.MlOpenJobRequest | TB.MlOpenJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1083,7 +1083,7 @@ export default class Ml { } /** - * Adds scheduled events to a calendar. + * Add scheduled events to the calendar. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-post-calendar-event.html | Elasticsearch API documentation} */ async postCalendarEvents (this: That, params: T.MlPostCalendarEventsRequest | TB.MlPostCalendarEventsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1127,7 +1127,7 @@ export default class Ml { } /** - * Previews the extracted features used by a data frame analytics config. + * Preview features used by data frame analytics. Previews the extracted features used by a data frame analytics config. * @see {@link http://www.elastic.co/guide/en/elasticsearch/reference/master/preview-dfanalytics.html | Elasticsearch API documentation} */ async previewDataFrameAnalytics (this: That, params?: T.MlPreviewDataFrameAnalyticsRequest | TB.MlPreviewDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1179,7 +1179,7 @@ export default class Ml { } /** - * Previews a datafeed. This API returns the first "page" of search results from a datafeed. You can preview an existing datafeed or provide configuration details for a datafeed and anomaly detection job in the API. The preview shows the structure of the data that will be passed to the anomaly detection engine. IMPORTANT: When Elasticsearch security features are enabled, the preview uses the credentials of the user that called the API. However, when the datafeed starts it uses the roles of the last user that created or updated the datafeed. To get a preview that accurately reflects the behavior of the datafeed, use the appropriate credentials. You can also use secondary authorization headers to supply the credentials. + * Preview a datafeed. This API returns the first "page" of search results from a datafeed. You can preview an existing datafeed or provide configuration details for a datafeed and anomaly detection job in the API. The preview shows the structure of the data that will be passed to the anomaly detection engine. IMPORTANT: When Elasticsearch security features are enabled, the preview uses the credentials of the user that called the API. However, when the datafeed starts it uses the roles of the last user that created or updated the datafeed. To get a preview that accurately reflects the behavior of the datafeed, use the appropriate credentials. You can also use secondary authorization headers to supply the credentials. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-preview-datafeed.html | Elasticsearch API documentation} */ async previewDatafeed (this: That, params?: T.MlPreviewDatafeedRequest | TB.MlPreviewDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise> @@ -1231,7 +1231,7 @@ export default class Ml { } /** - * Creates a calendar. + * Create a calendar. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-put-calendar.html | Elasticsearch API documentation} */ async putCalendar (this: That, params: T.MlPutCalendarRequest | TB.MlPutCalendarRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1275,7 +1275,7 @@ export default class Ml { } /** - * Adds an anomaly detection job to a calendar. + * Add anomaly detection job to calendar. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-put-calendar-job.html | Elasticsearch API documentation} */ async putCalendarJob (this: That, params: T.MlPutCalendarJobRequest | TB.MlPutCalendarJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1308,7 +1308,7 @@ export default class Ml { } /** - * Instantiates a data frame analytics job. This API creates a data frame analytics job that performs an analysis on the source indices and stores the outcome in a destination index. + * Create a data frame analytics job. This API creates a data frame analytics job that performs an analysis on the source indices and stores the outcome in a destination index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-dfanalytics.html | Elasticsearch API documentation} */ async putDataFrameAnalytics (this: That, params: T.MlPutDataFrameAnalyticsRequest | TB.MlPutDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1352,7 +1352,7 @@ export default class Ml { } /** - * Instantiates a datafeed. Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job. You can associate only one datafeed with each anomaly detection job. The datafeed contains a query that runs at a defined interval (`frequency`). If you are concerned about delayed data, you can add a delay (`query_delay') at each interval. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. You must use Kibana, this API, or the create anomaly detection jobs API to create a datafeed. Do not add a datafeed directly to the `.ml-config` index. Do not give users `write` privileges on the `.ml-config` index. + * Create a datafeed. Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job. You can associate only one datafeed with each anomaly detection job. The datafeed contains a query that runs at a defined interval (`frequency`). If you are concerned about delayed data, you can add a delay (`query_delay') at each interval. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. You must use Kibana, this API, or the create anomaly detection jobs API to create a datafeed. Do not add a datafeed directly to the `.ml-config` index. Do not give users `write` privileges on the `.ml-config` index. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-put-datafeed.html | Elasticsearch API documentation} */ async putDatafeed (this: That, params: T.MlPutDatafeedRequest | TB.MlPutDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1396,7 +1396,7 @@ export default class Ml { } /** - * Instantiates a filter. A filter contains a list of strings. It can be used by one or more anomaly detection jobs. Specifically, filters are referenced in the `custom_rules` property of detector configuration objects. + * Create a filter. A filter contains a list of strings. It can be used by one or more anomaly detection jobs. Specifically, filters are referenced in the `custom_rules` property of detector configuration objects. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-put-filter.html | Elasticsearch API documentation} */ async putFilter (this: That, params: T.MlPutFilterRequest | TB.MlPutFilterRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1484,7 +1484,7 @@ export default class Ml { } /** - * Enables you to supply a trained model that is not created by data frame analytics. + * Create a trained model. Enable you to supply a trained model that is not created by data frame analytics. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-trained-models.html | Elasticsearch API documentation} */ async putTrainedModel (this: That, params: T.MlPutTrainedModelRequest | TB.MlPutTrainedModelRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1528,7 +1528,7 @@ export default class Ml { } /** - * Creates or updates a trained model alias. A trained model alias is a logical name used to reference a single trained model. You can use aliases instead of trained model identifiers to make it easier to reference your models. For example, you can use aliases in inference aggregations and processors. An alias must be unique and refer to only a single trained model. However, you can have multiple aliases for each trained model. If you use this API to update an alias such that it references a different trained model ID and the model uses a different type of data frame analytics, an error occurs. For example, this situation occurs if you have a trained model for regression analysis and a trained model for classification analysis; you cannot reassign an alias from one type of trained model to another. If you use this API to update an alias and there are very few input fields in common between the old and new trained models for the model alias, the API returns a warning. + * Create or update a trained model alias. A trained model alias is a logical name used to reference a single trained model. You can use aliases instead of trained model identifiers to make it easier to reference your models. For example, you can use aliases in inference aggregations and processors. An alias must be unique and refer to only a single trained model. However, you can have multiple aliases for each trained model. If you use this API to update an alias such that it references a different trained model ID and the model uses a different type of data frame analytics, an error occurs. For example, this situation occurs if you have a trained model for regression analysis and a trained model for classification analysis; you cannot reassign an alias from one type of trained model to another. If you use this API to update an alias and there are very few input fields in common between the old and new trained models for the model alias, the API returns a warning. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-trained-models-aliases.html | Elasticsearch API documentation} */ async putTrainedModelAlias (this: That, params: T.MlPutTrainedModelAliasRequest | TB.MlPutTrainedModelAliasRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1561,7 +1561,7 @@ export default class Ml { } /** - * Creates part of a trained model definition. + * Create part of a trained model definition. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-trained-model-definition-part.html | Elasticsearch API documentation} */ async putTrainedModelDefinitionPart (this: That, params: T.MlPutTrainedModelDefinitionPartRequest | TB.MlPutTrainedModelDefinitionPartRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1606,7 +1606,7 @@ export default class Ml { } /** - * Creates a trained model vocabulary. This API is supported only for natural language processing (NLP) models. The vocabulary is stored in the index as described in `inference_config.*.vocabulary` of the trained model definition. + * Create a trained model vocabulary. This API is supported only for natural language processing (NLP) models. The vocabulary is stored in the index as described in `inference_config.*.vocabulary` of the trained model definition. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/put-trained-model-vocabulary.html | Elasticsearch API documentation} */ async putTrainedModelVocabulary (this: That, params: T.MlPutTrainedModelVocabularyRequest | TB.MlPutTrainedModelVocabularyRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1650,7 +1650,7 @@ export default class Ml { } /** - * Resets an anomaly detection job. All model state and results are deleted. The job is ready to start over as if it had just been created. It is not currently possible to reset multiple jobs using wildcards or a comma separated list. + * Reset an anomaly detection job. All model state and results are deleted. The job is ready to start over as if it had just been created. It is not currently possible to reset multiple jobs using wildcards or a comma separated list. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-reset-job.html | Elasticsearch API documentation} */ async resetJob (this: That, params: T.MlResetJobRequest | TB.MlResetJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1682,7 +1682,7 @@ export default class Ml { } /** - * Starts a data frame analytics job. A data frame analytics job can be started and stopped multiple times throughout its lifecycle. If the destination index does not exist, it is created automatically the first time you start the data frame analytics job. The `index.number_of_shards` and `index.number_of_replicas` settings for the destination index are copied from the source index. If there are multiple source indices, the destination index copies the highest setting values. The mappings for the destination index are also copied from the source indices. If there are any mapping conflicts, the job fails to start. If the destination index exists, it is used as is. You can therefore set up the destination index in advance with custom settings and mappings. + * Start a data frame analytics job. A data frame analytics job can be started and stopped multiple times throughout its lifecycle. If the destination index does not exist, it is created automatically the first time you start the data frame analytics job. The `index.number_of_shards` and `index.number_of_replicas` settings for the destination index are copied from the source index. If there are multiple source indices, the destination index copies the highest setting values. The mappings for the destination index are also copied from the source indices. If there are any mapping conflicts, the job fails to start. If the destination index exists, it is used as is. You can therefore set up the destination index in advance with custom settings and mappings. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/start-dfanalytics.html | Elasticsearch API documentation} */ async startDataFrameAnalytics (this: That, params: T.MlStartDataFrameAnalyticsRequest | TB.MlStartDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1714,7 +1714,7 @@ export default class Ml { } /** - * Starts one or more datafeeds. A datafeed must be started in order to retrieve data from Elasticsearch. A datafeed can be started and stopped multiple times throughout its lifecycle. Before you can start a datafeed, the anomaly detection job must be open. Otherwise, an error occurs. If you restart a stopped datafeed, it continues processing input data from the next millisecond after it was stopped. If new data was indexed for that exact millisecond between stopping and starting, it will be ignored. When Elasticsearch security features are enabled, your datafeed remembers which roles the last user to create or update it had at the time of creation or update and runs the query using those same roles. If you provided secondary authorization headers when you created or updated the datafeed, those credentials are used instead. + * Start datafeeds. A datafeed must be started in order to retrieve data from Elasticsearch. A datafeed can be started and stopped multiple times throughout its lifecycle. Before you can start a datafeed, the anomaly detection job must be open. Otherwise, an error occurs. If you restart a stopped datafeed, it continues processing input data from the next millisecond after it was stopped. If new data was indexed for that exact millisecond between stopping and starting, it will be ignored. When Elasticsearch security features are enabled, your datafeed remembers which roles the last user to create or update it had at the time of creation or update and runs the query using those same roles. If you provided secondary authorization headers when you created or updated the datafeed, those credentials are used instead. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-start-datafeed.html | Elasticsearch API documentation} */ async startDatafeed (this: That, params: T.MlStartDatafeedRequest | TB.MlStartDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1758,7 +1758,7 @@ export default class Ml { } /** - * Starts a trained model deployment, which allocates the model to every machine learning node. + * Start a trained model deployment. It allocates the model to every machine learning node. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/start-trained-model-deployment.html | Elasticsearch API documentation} */ async startTrainedModelDeployment (this: That, params: T.MlStartTrainedModelDeploymentRequest | TB.MlStartTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1790,7 +1790,7 @@ export default class Ml { } /** - * Stops one or more data frame analytics jobs. A data frame analytics job can be started and stopped multiple times throughout its lifecycle. + * Stop data frame analytics jobs. A data frame analytics job can be started and stopped multiple times throughout its lifecycle. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/stop-dfanalytics.html | Elasticsearch API documentation} */ async stopDataFrameAnalytics (this: That, params: T.MlStopDataFrameAnalyticsRequest | TB.MlStopDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1822,7 +1822,7 @@ export default class Ml { } /** - * Stops one or more datafeeds. A datafeed that is stopped ceases to retrieve data from Elasticsearch. A datafeed can be started and stopped multiple times throughout its lifecycle. + * Stop datafeeds. A datafeed that is stopped ceases to retrieve data from Elasticsearch. A datafeed can be started and stopped multiple times throughout its lifecycle. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-stop-datafeed.html | Elasticsearch API documentation} */ async stopDatafeed (this: That, params: T.MlStopDatafeedRequest | TB.MlStopDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1866,7 +1866,7 @@ export default class Ml { } /** - * Stops a trained model deployment. + * Stop a trained model deployment. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/stop-trained-model-deployment.html | Elasticsearch API documentation} */ async stopTrainedModelDeployment (this: That, params: T.MlStopTrainedModelDeploymentRequest | TB.MlStopTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1898,7 +1898,7 @@ export default class Ml { } /** - * Updates an existing data frame analytics job. + * Update a data frame analytics job. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-dfanalytics.html | Elasticsearch API documentation} */ async updateDataFrameAnalytics (this: That, params: T.MlUpdateDataFrameAnalyticsRequest | TB.MlUpdateDataFrameAnalyticsRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1942,7 +1942,7 @@ export default class Ml { } /** - * Updates the properties of a datafeed. You must stop and start the datafeed for the changes to be applied. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who updated it had at the time of the update and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. + * Update a datafeed. You must stop and start the datafeed for the changes to be applied. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who updated it had at the time of the update and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-update-datafeed.html | Elasticsearch API documentation} */ async updateDatafeed (this: That, params: T.MlUpdateDatafeedRequest | TB.MlUpdateDatafeedRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -1986,7 +1986,7 @@ export default class Ml { } /** - * Updates the description of a filter, adds items, or removes items from the list. + * Update a filter. Updates the description of a filter, adds items, or removes items from the list. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-update-filter.html | Elasticsearch API documentation} */ async updateFilter (this: That, params: T.MlUpdateFilterRequest | TB.MlUpdateFilterRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2030,7 +2030,7 @@ export default class Ml { } /** - * Updates certain properties of an anomaly detection job. + * Update an anomaly detection job. Updates certain properties of an anomaly detection job. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-update-job.html | Elasticsearch API documentation} */ async updateJob (this: That, params: T.MlUpdateJobRequest | TB.MlUpdateJobRequest, options?: TransportRequestOptionsWithOutMeta): Promise @@ -2074,7 +2074,7 @@ export default class Ml { } /** - * Starts a trained model deployment, which allocates the model to every machine learning node. + * Update a trained model deployment. * @see {@link https://www.elastic.co/guide/en/elasticsearch/reference/master/update-trained-model-deployment.html | Elasticsearch API documentation} */ async updateTrainedModelDeployment (this: That, params: T.MlUpdateTrainedModelDeploymentRequest | TB.MlUpdateTrainedModelDeploymentRequest, options?: TransportRequestOptionsWithOutMeta): Promise diff --git a/src/api/index.ts b/src/api/index.ts index 8f7d03c..a1d6689 100644 --- a/src/api/index.ts +++ b/src/api/index.ts @@ -32,6 +32,7 @@ import CatApi from './api/cat' import clearScrollApi from './api/clear_scroll' import closePointInTimeApi from './api/close_point_in_time' import ClusterApi from './api/cluster' +import ConnectorApi from './api/connector' import countApi from './api/count' import createApi from './api/create' import deleteApi from './api/delete' @@ -91,6 +92,7 @@ export default interface API { clearScroll: typeof clearScrollApi closePointInTime: typeof closePointInTimeApi cluster: ClusterApi + connector: ConnectorApi count: typeof countApi create: typeof createApi delete: typeof deleteApi @@ -146,6 +148,7 @@ export default interface API { const kAsyncSearch = Symbol('AsyncSearch') const kCat = Symbol('Cat') const kCluster = Symbol('Cluster') +const kConnector = Symbol('Connector') const kEnrich = Symbol('Enrich') const kEql = Symbol('Eql') const kEsql = Symbol('Esql') @@ -168,6 +171,7 @@ export default class API { [kAsyncSearch]: symbol | null [kCat]: symbol | null [kCluster]: symbol | null + [kConnector]: symbol | null [kEnrich]: symbol | null [kEql]: symbol | null [kEsql]: symbol | null @@ -189,6 +193,7 @@ export default class API { this[kAsyncSearch] = null this[kCat] = null this[kCluster] = null + this[kConnector] = null this[kEnrich] = null this[kEql] = null this[kEsql] = null @@ -256,6 +261,9 @@ Object.defineProperties(API.prototype, { cluster: { get () { return this[kCluster] === null ? (this[kCluster] = new ClusterApi(this.transport)) : this[kCluster] } }, + connector: { + get () { return this[kConnector] === null ? (this[kConnector] = new ConnectorApi(this.transport)) : this[kConnector] } + }, enrich: { get () { return this[kEnrich] === null ? (this[kEnrich] = new EnrichApi(this.transport)) : this[kEnrich] } }, diff --git a/src/api/types.ts b/src/api/types.ts index bb095d6..6b777ef 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -361,6 +361,7 @@ export interface FieldCapsResponse { export interface GetGetResult { _index: IndexName fields?: Record + _ignored?: string[] found: boolean _id: Id _primary_term?: long @@ -458,6 +459,16 @@ export interface HealthReportBaseIndicator { diagnosis?: HealthReportDiagnosis[] } +export interface HealthReportDataStreamLifecycleDetails { + stagnating_backing_indices_count: integer + total_backing_indices_in_error: integer + stagnating_backing_indices?: HealthReportStagnatingBackingIndices[] +} + +export interface HealthReportDataStreamLifecycleIndicator extends HealthReportBaseIndicator { + details?: HealthReportDataStreamLifecycleDetails +} + export interface HealthReportDiagnosis { id: string action: string @@ -517,6 +528,7 @@ export interface HealthReportIndicators { shards_availability?: HealthReportShardsAvailabilityIndicator disk?: HealthReportDiskIndicator repository_integrity?: HealthReportRepositoryIntegrityIndicator + data_stream_lifecycle?: HealthReportDataStreamLifecycleIndicator ilm?: HealthReportIlmIndicator slm?: HealthReportSlmIndicator shards_capacity?: HealthReportShardsCapacityIndicator @@ -613,6 +625,12 @@ export interface HealthReportSlmIndicatorUnhealthyPolicies { invocations_since_last_success?: Record } +export interface HealthReportStagnatingBackingIndices { + index_name: IndexName + first_occurrence_timestamp: long + retry_count: integer +} + export interface IndexRequest extends RequestBase { id?: Id index: IndexName @@ -771,6 +789,7 @@ export interface MsearchRequest extends RequestBase { expand_wildcards?: ExpandWildcards ignore_throttled?: boolean ignore_unavailable?: boolean + include_named_queries_score?: boolean max_concurrent_searches?: long max_concurrent_shard_requests?: long pre_filter_shard_size?: long @@ -1096,7 +1115,7 @@ export interface RenderSearchTemplateResponse { export interface ScriptsPainlessExecutePainlessContextSetup { document: any index: IndexName - query: QueryDslQueryContainer + query?: QueryDslQueryContainer } export interface ScriptsPainlessExecuteRequest extends RequestBase { @@ -1130,6 +1149,7 @@ export interface SearchRequest extends RequestBase { expand_wildcards?: ExpandWildcards ignore_throttled?: boolean ignore_unavailable?: boolean + include_named_queries_score?: boolean lenient?: boolean max_concurrent_shard_requests?: long min_compatible_shard_node?: VersionString @@ -1397,7 +1417,6 @@ export interface SearchHighlightBase { export interface SearchHighlightField extends SearchHighlightBase { fragment_offset?: integer matched_fields?: Fields - analyzer?: AnalysisAnalyzer } export type SearchHighlighterEncoder = 'default' | 'html' @@ -1418,7 +1437,7 @@ export interface SearchHit { fields?: Record highlight?: Record inner_hits?: Record - matched_queries?: string[] + matched_queries?: string[] | Record _nested?: SearchNestedIdentity _ignored?: string[] ignored_field_values?: Record @@ -1899,6 +1918,7 @@ export interface UpdateByQueryRequest extends RequestBase { lenient?: boolean pipeline?: string preference?: string + q?: string refresh?: boolean request_cache?: boolean requests_per_second?: float @@ -2291,6 +2311,7 @@ export interface KnnQuery extends QueryDslQueryBase { query_vector?: QueryVector query_vector_builder?: QueryVectorBuilder num_candidates?: integer + k?: integer filter?: QueryDslQueryContainer | QueryDslQueryContainer[] similarity?: float } @@ -4204,13 +4225,61 @@ export interface AggregationsWeightedAverageValue { export interface AggregationsWeightedAvgAggregate extends AggregationsSingleMetricAggregateBase { } -export type AnalysisAnalyzer = AnalysisCustomAnalyzer | AnalysisFingerprintAnalyzer | AnalysisKeywordAnalyzer | AnalysisLanguageAnalyzer | AnalysisNoriAnalyzer | AnalysisPatternAnalyzer | AnalysisSimpleAnalyzer | AnalysisStandardAnalyzer | AnalysisStopAnalyzer | AnalysisWhitespaceAnalyzer | AnalysisIcuAnalyzer | AnalysisKuromojiAnalyzer | AnalysisSnowballAnalyzer | AnalysisDutchAnalyzer +export type AnalysisAnalyzer = AnalysisCustomAnalyzer | AnalysisFingerprintAnalyzer | AnalysisKeywordAnalyzer | AnalysisLanguageAnalyzer | AnalysisNoriAnalyzer | AnalysisPatternAnalyzer | AnalysisSimpleAnalyzer | AnalysisStandardAnalyzer | AnalysisStopAnalyzer | AnalysisWhitespaceAnalyzer | AnalysisIcuAnalyzer | AnalysisKuromojiAnalyzer | AnalysisSnowballAnalyzer | AnalysisArabicAnalyzer | AnalysisArmenianAnalyzer | AnalysisBasqueAnalyzer | AnalysisBengaliAnalyzer | AnalysisBrazilianAnalyzer | AnalysisBulgarianAnalyzer | AnalysisCatalanAnalyzer | AnalysisChineseAnalyzer | AnalysisCjkAnalyzer | AnalysisCzechAnalyzer | AnalysisDanishAnalyzer | AnalysisDutchAnalyzer | AnalysisEnglishAnalyzer | AnalysisEstonianAnalyzer | AnalysisFinnishAnalyzer | AnalysisFrenchAnalyzer | AnalysisGalicianAnalyzer | AnalysisGermanAnalyzer | AnalysisGreekAnalyzer | AnalysisHindiAnalyzer | AnalysisHungarianAnalyzer | AnalysisIndonesianAnalyzer | AnalysisIrishAnalyzer | AnalysisItalianAnalyzer | AnalysisLatvianAnalyzer | AnalysisLithuanianAnalyzer | AnalysisNorwegianAnalyzer | AnalysisPersianAnalyzer | AnalysisPortugueseAnalyzer | AnalysisRomanianAnalyzer | AnalysisRussianAnalyzer | AnalysisSerbianAnalyzer | AnalysisSoraniAnalyzer | AnalysisSpanishAnalyzer | AnalysisSwedishAnalyzer | AnalysisTurkishAnalyzer | AnalysisThaiAnalyzer + +export interface AnalysisArabicAnalyzer { + type: 'arabic' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisArmenianAnalyzer { + type: 'armenian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} export interface AnalysisAsciiFoldingTokenFilter extends AnalysisTokenFilterBase { type: 'asciifolding' preserve_original?: SpecUtilsStringified } +export interface AnalysisBasqueAnalyzer { + type: 'basque' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisBengaliAnalyzer { + type: 'bengali' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisBrazilianAnalyzer { + type: 'brazilian' + stopwords?: AnalysisStopWords + stopwords_path?: string +} + +export interface AnalysisBulgarianAnalyzer { + type: 'bulgarian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisCatalanAnalyzer { + type: 'catalan' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + export type AnalysisCharFilter = string | AnalysisCharFilterDefinition export interface AnalysisCharFilterBase { @@ -4225,6 +4294,18 @@ export interface AnalysisCharGroupTokenizer extends AnalysisTokenizerBase { max_token_length?: integer } +export interface AnalysisChineseAnalyzer { + type: 'chinese' + stopwords?: AnalysisStopWords + stopwords_path?: string +} + +export interface AnalysisCjkAnalyzer { + type: 'cjk' + stopwords?: AnalysisStopWords + stopwords_path?: string +} + export interface AnalysisCommonGramsTokenFilter extends AnalysisTokenFilterBase { type: 'common_grams' common_words?: string[] @@ -4251,8 +4332,8 @@ export interface AnalysisConditionTokenFilter extends AnalysisTokenFilterBase { export interface AnalysisCustomAnalyzer { type: 'custom' - char_filter?: string[] - filter?: string[] + char_filter?: string | string[] + filter?: string | string[] position_increment_gap?: integer position_offset_gap?: integer tokenizer: string @@ -4264,6 +4345,19 @@ export interface AnalysisCustomNormalizer { filter?: string[] } +export interface AnalysisCzechAnalyzer { + type: 'czech' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisDanishAnalyzer { + type: 'danish' + stopwords?: AnalysisStopWords + stopwords_path?: string +} + export type AnalysisDelimitedPayloadEncoding = 'int' | 'float' | 'identity' export interface AnalysisDelimitedPayloadTokenFilter extends AnalysisTokenFilterBase { @@ -4279,6 +4373,8 @@ export interface AnalysisDictionaryDecompounderTokenFilter extends AnalysisCompo export interface AnalysisDutchAnalyzer { type: 'dutch' stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] } export type AnalysisEdgeNGramSide = 'front' | 'back' @@ -4306,6 +4402,19 @@ export interface AnalysisElisionTokenFilter extends AnalysisTokenFilterBase { articles_case?: SpecUtilsStringified } +export interface AnalysisEnglishAnalyzer { + type: 'english' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisEstonianAnalyzer { + type: 'estonian' + stopwords?: AnalysisStopWords + stopwords_path?: string +} + export interface AnalysisFingerprintAnalyzer { type: 'fingerprint' version?: VersionString @@ -4322,11 +4431,59 @@ export interface AnalysisFingerprintTokenFilter extends AnalysisTokenFilterBase separator?: string } +export interface AnalysisFinnishAnalyzer { + type: 'finnish' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisFrenchAnalyzer { + type: 'french' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisGalicianAnalyzer { + type: 'galician' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisGermanAnalyzer { + type: 'german' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisGreekAnalyzer { + type: 'greek' + stopwords?: AnalysisStopWords + stopwords_path?: string +} + +export interface AnalysisHindiAnalyzer { + type: 'hindi' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + export interface AnalysisHtmlStripCharFilter extends AnalysisCharFilterBase { type: 'html_strip' escaped_tags?: string[] } +export interface AnalysisHungarianAnalyzer { + type: 'hungarian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + export interface AnalysisHunspellTokenFilter extends AnalysisTokenFilterBase { type: 'hunspell' dedup?: boolean @@ -4402,6 +4559,27 @@ export interface AnalysisIcuTransformTokenFilter extends AnalysisTokenFilterBase id: string } +export interface AnalysisIndonesianAnalyzer { + type: 'indonesian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisIrishAnalyzer { + type: 'irish' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisItalianAnalyzer { + type: 'italian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + export interface AnalysisKStemTokenFilter extends AnalysisTokenFilterBase { type: 'kstem' } @@ -4490,6 +4668,13 @@ export interface AnalysisLanguageAnalyzer { stopwords_path?: string } +export interface AnalysisLatvianAnalyzer { + type: 'latvian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + export interface AnalysisLengthTokenFilter extends AnalysisTokenFilterBase { type: 'length' max?: integer @@ -4506,6 +4691,13 @@ export interface AnalysisLimitTokenCountTokenFilter extends AnalysisTokenFilterB max_token_count?: SpecUtilsStringified } +export interface AnalysisLithuanianAnalyzer { + type: 'lithuanian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + export interface AnalysisLowercaseNormalizer { type: 'lowercase' } @@ -4571,6 +4763,13 @@ export interface AnalysisNoriTokenizer extends AnalysisTokenizerBase { export type AnalysisNormalizer = AnalysisLowercaseNormalizer | AnalysisCustomNormalizer +export interface AnalysisNorwegianAnalyzer { + type: 'norwegian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + export interface AnalysisPathHierarchyTokenizer extends AnalysisTokenizerBase { type: 'path_hierarchy' buffer_size?: SpecUtilsStringified @@ -4617,6 +4816,12 @@ export interface AnalysisPatternTokenizer extends AnalysisTokenizerBase { pattern?: string } +export interface AnalysisPersianAnalyzer { + type: 'persian' + stopwords?: AnalysisStopWords + stopwords_path?: string +} + export type AnalysisPhoneticEncoder = 'metaphone' | 'double_metaphone' | 'soundex' | 'refined_soundex' | 'caverphone1' | 'caverphone2' | 'cologne' | 'nysiis' | 'koelnerphonetik' | 'haasephonetik' | 'beider_morse' | 'daitch_mokotoff' export type AnalysisPhoneticLanguage = 'any' | 'common' | 'cyrillic' | 'english' | 'french' | 'german' | 'hebrew' | 'hungarian' | 'polish' | 'romanian' | 'russian' | 'spanish' @@ -4639,6 +4844,13 @@ export interface AnalysisPorterStemTokenFilter extends AnalysisTokenFilterBase { type: 'porter_stem' } +export interface AnalysisPortugueseAnalyzer { + type: 'portuguese' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + export interface AnalysisPredicateTokenFilter extends AnalysisTokenFilterBase { type: 'predicate_token_filter' script: Script | string @@ -4652,6 +4864,27 @@ export interface AnalysisReverseTokenFilter extends AnalysisTokenFilterBase { type: 'reverse' } +export interface AnalysisRomanianAnalyzer { + type: 'romanian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisRussianAnalyzer { + type: 'russian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisSerbianAnalyzer { + type: 'serbian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + export interface AnalysisShingleTokenFilter extends AnalysisTokenFilterBase { type: 'shingle' filler_token?: string @@ -4681,6 +4914,20 @@ export interface AnalysisSnowballTokenFilter extends AnalysisTokenFilterBase { language?: AnalysisSnowballLanguage } +export interface AnalysisSoraniAnalyzer { + type: 'sorani' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisSpanishAnalyzer { + type: 'spanish' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + export interface AnalysisStandardAnalyzer { type: 'standard' max_token_length?: integer @@ -4721,6 +4968,13 @@ export interface AnalysisStopTokenFilter extends AnalysisTokenFilterBase { export type AnalysisStopWords = string | string[] +export interface AnalysisSwedishAnalyzer { + type: 'swedish' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + export type AnalysisSynonymFormat = 'solr' | 'wordnet' export interface AnalysisSynonymGraphTokenFilter extends AnalysisTokenFilterBase { @@ -4747,6 +5001,12 @@ export interface AnalysisSynonymTokenFilter extends AnalysisTokenFilterBase { updateable?: boolean } +export interface AnalysisThaiAnalyzer { + type: 'thai' + stopwords?: AnalysisStopWords + stopwords_path?: string +} + export type AnalysisTokenChar = 'letter' | 'digit' | 'whitespace' | 'punctuation' | 'symbol' | 'custom' export type AnalysisTokenFilter = string | AnalysisTokenFilterDefinition @@ -4774,6 +5034,13 @@ export interface AnalysisTruncateTokenFilter extends AnalysisTokenFilterBase { length?: integer } +export interface AnalysisTurkishAnalyzer { + type: 'turkish' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + export interface AnalysisUaxEmailUrlTokenizer extends AnalysisTokenizerBase { type: 'uax_url_email' max_token_length?: integer @@ -9122,9 +9389,9 @@ export interface ConnectorConnectorConfigProperties { required: boolean sensitive: boolean tooltip?: string | null - type: ConnectorConnectorFieldType - ui_restrictions: string[] - validations: ConnectorValidation[] + type?: ConnectorConnectorFieldType + ui_restrictions?: string[] + validations?: ConnectorValidation[] value: any } @@ -9986,22 +10253,51 @@ export interface GraphExploreResponse { vertices: GraphVertex[] } -export type IlmActions = any +export interface IlmActions { + allocate?: IlmAllocateAction + delete?: IlmDeleteAction + downsample?: IlmDownsampleAction + freeze?: EmptyObject + forcemerge?: IlmForceMergeAction + migrate?: IlmMigrateAction + readonly?: EmptyObject + rollover?: IlmRolloverAction + set_priority?: IlmSetPriorityAction + searchable_snapshot?: IlmSearchableSnapshotAction + shrink?: IlmShrinkAction + unfollow?: EmptyObject + wait_for_snapshot?: IlmWaitForSnapshotAction +} -export interface IlmConfigurations { - rollover?: IndicesRolloverRolloverConditions - forcemerge?: IlmForceMergeConfiguration - shrink?: IlmShrinkConfiguration +export interface IlmAllocateAction { + number_of_replicas?: integer + total_shards_per_node?: integer + include?: Record + exclude?: Record + require?: Record } -export interface IlmForceMergeConfiguration { +export interface IlmDeleteAction { + delete_searchable_snapshot?: boolean +} + +export interface IlmDownsampleAction { + fixed_interval: DurationLarge + wait_timeout?: Duration +} + +export interface IlmForceMergeAction { max_num_segments: integer + index_codec?: string +} + +export interface IlmMigrateAction { + enabled?: boolean } export interface IlmPhase { actions?: IlmActions min_age?: Duration | long - configurations?: IlmConfigurations } export interface IlmPhases { @@ -10017,8 +10313,36 @@ export interface IlmPolicy { _meta?: Metadata } -export interface IlmShrinkConfiguration { - number_of_shards: integer +export interface IlmRolloverAction { + max_size?: ByteSize + max_primary_shard_size?: ByteSize + max_age?: Duration + max_docs?: long + max_primary_shard_docs?: long + min_size?: ByteSize + min_primary_shard_size?: ByteSize + min_age?: Duration + min_docs?: long + min_primary_shard_docs?: long +} + +export interface IlmSearchableSnapshotAction { + snapshot_repository: string + force_merge_index?: boolean +} + +export interface IlmSetPriorityAction { + priority?: integer +} + +export interface IlmShrinkAction { + number_of_shards?: integer + max_primary_shard_size?: ByteSize + allow_write_after_shrink?: boolean +} + +export interface IlmWaitForSnapshotAction { + policy: string } export interface IlmDeleteLifecycleRequest extends RequestBase { @@ -10194,6 +10518,7 @@ export interface IndicesCacheQueries { export interface IndicesDataStream { _meta?: Metadata allow_custom_routing?: boolean + failure_store?: IndicesFailureStore generation: integer hidden: boolean ilm_policy?: Name @@ -10203,6 +10528,7 @@ export interface IndicesDataStream { lifecycle?: IndicesDataStreamLifecycleWithRollover name: DataStreamName replicated?: boolean + rollover_on_write: boolean status: HealthStatus system?: boolean template: Name @@ -10213,8 +10539,8 @@ export interface IndicesDataStreamIndex { index_name: IndexName index_uuid: Uuid ilm_policy?: Name - managed_by: IndicesManagedBy - prefer_ilm: boolean + managed_by?: IndicesManagedBy + prefer_ilm?: boolean } export interface IndicesDataStreamLifecycle { @@ -10262,6 +10588,12 @@ export interface IndicesDownsamplingRound { config: IndicesDownsampleConfig } +export interface IndicesFailureStore { + enabled: boolean + indices: IndicesDataStreamIndex[] + rollover_on_write: boolean +} + export interface IndicesFielddataFrequencyFilter { max: double min: double @@ -10812,6 +11144,8 @@ export interface IndicesCreateResponse { export interface IndicesCreateDataStreamRequest extends RequestBase { name: DataStreamName + master_timeout?: Duration + timeout?: Duration } export type IndicesCreateDataStreamResponse = AcknowledgedResponseBase @@ -10869,6 +11203,7 @@ export type IndicesDeleteDataLifecycleResponse = AcknowledgedResponseBase export interface IndicesDeleteDataStreamRequest extends RequestBase { name: DataStreamNames + master_timeout?: Duration expand_wildcards?: ExpandWildcards } @@ -11096,6 +11431,7 @@ export interface IndicesGetDataLifecycleRequest extends RequestBase { name: DataStreamNames expand_wildcards?: ExpandWildcards include_defaults?: boolean + master_timeout?: Duration } export interface IndicesGetDataLifecycleResponse { @@ -11106,6 +11442,7 @@ export interface IndicesGetDataStreamRequest extends RequestBase { name?: DataStreamNames expand_wildcards?: ExpandWildcards include_defaults?: boolean + master_timeout?: Duration } export interface IndicesGetDataStreamResponse { @@ -11186,6 +11523,8 @@ export type IndicesGetTemplateResponse = Record export interface IndicesMigrateToDataStreamRequest extends RequestBase { name: IndexName + master_timeout?: Duration + timeout?: Duration } export type IndicesMigrateToDataStreamResponse = AcknowledgedResponseBase @@ -11223,6 +11562,7 @@ export interface IndicesOpenResponse { export interface IndicesPromoteDataStreamRequest extends RequestBase { name: IndexName + master_timeout?: Duration } export type IndicesPromoteDataStreamResponse = any @@ -13614,7 +13954,7 @@ export interface MlPerPartitionCategorization { stop_on_warn?: boolean } -export type MlPredictedValue = string | double | boolean | integer +export type MlPredictedValue = ScalarValue | ScalarValue[] export interface MlQuestionAnsweringInferenceOptions { num_top_classes?: integer @@ -13812,7 +14152,7 @@ export interface MlTrainedModelDeploymentStats { error_count: integer inference_count: integer model_id: Id - nodes: MlTrainedModelDeploymentNodesStats + nodes: MlTrainedModelDeploymentNodesStats[] number_of_allocations: integer queue_capacity: integer rejected_execution_count: integer @@ -13847,7 +14187,7 @@ export interface MlTrainedModelInferenceStats { failure_count: integer inference_count: integer missing_all_fields_count: integer - timestamp: DateTime + timestamp: EpochTime } export interface MlTrainedModelLocation { @@ -15623,13 +15963,6 @@ export interface NodesGetRepositoriesMeteringInfoResponseBase extends NodesNodes nodes: Record } -export interface NodesHotThreadsHotThread { - hosts: Host[] - node_id: Id - node_name: Name - threads: string[] -} - export interface NodesHotThreadsRequest extends RequestBase { node_id?: NodeIds ignore_idle_threads?: boolean @@ -15643,7 +15976,6 @@ export interface NodesHotThreadsRequest extends RequestBase { } export interface NodesHotThreadsResponse { - hot_threads: NodesHotThreadsHotThread[] } export interface NodesInfoDeprecationIndexing { @@ -16081,7 +16413,7 @@ export interface QueryRulesQueryRuleCriteria { export type QueryRulesQueryRuleCriteriaType = 'global' | 'exact' | 'exact_fuzzy' | 'fuzzy' | 'prefix' | 'suffix' | 'contains' | 'lt' | 'lte' | 'gt' | 'gte' | 'always' -export type QueryRulesQueryRuleType = 'pinned' +export type QueryRulesQueryRuleType = 'pinned' | 'exclude' export interface QueryRulesQueryRuleset { ruleset_id: Id @@ -18229,7 +18561,7 @@ export interface SynonymsGetSynonymsSetsSynonymsSetItem { export interface SynonymsPutSynonymRequest extends RequestBase { id: Id - synonyms_set: SynonymsSynonymRule[] + synonyms_set: SynonymsSynonymRule | SynonymsSynonymRule[] } export interface SynonymsPutSynonymResponse { diff --git a/src/api/typesWithBodyKey.ts b/src/api/typesWithBodyKey.ts index f9c0b80..cf01dc9 100644 --- a/src/api/typesWithBodyKey.ts +++ b/src/api/typesWithBodyKey.ts @@ -382,6 +382,7 @@ export interface FieldCapsResponse { export interface GetGetResult { _index: IndexName fields?: Record + _ignored?: string[] found: boolean _id: Id _primary_term?: long @@ -479,6 +480,16 @@ export interface HealthReportBaseIndicator { diagnosis?: HealthReportDiagnosis[] } +export interface HealthReportDataStreamLifecycleDetails { + stagnating_backing_indices_count: integer + total_backing_indices_in_error: integer + stagnating_backing_indices?: HealthReportStagnatingBackingIndices[] +} + +export interface HealthReportDataStreamLifecycleIndicator extends HealthReportBaseIndicator { + details?: HealthReportDataStreamLifecycleDetails +} + export interface HealthReportDiagnosis { id: string action: string @@ -538,6 +549,7 @@ export interface HealthReportIndicators { shards_availability?: HealthReportShardsAvailabilityIndicator disk?: HealthReportDiskIndicator repository_integrity?: HealthReportRepositoryIntegrityIndicator + data_stream_lifecycle?: HealthReportDataStreamLifecycleIndicator ilm?: HealthReportIlmIndicator slm?: HealthReportSlmIndicator shards_capacity?: HealthReportShardsCapacityIndicator @@ -634,6 +646,12 @@ export interface HealthReportSlmIndicatorUnhealthyPolicies { invocations_since_last_success?: Record } +export interface HealthReportStagnatingBackingIndices { + index_name: IndexName + first_occurrence_timestamp: long + retry_count: integer +} + export interface IndexRequest extends RequestBase { id?: Id index: IndexName @@ -799,6 +817,7 @@ export interface MsearchRequest extends RequestBase { expand_wildcards?: ExpandWildcards ignore_throttled?: boolean ignore_unavailable?: boolean + include_named_queries_score?: boolean max_concurrent_searches?: long max_concurrent_shard_requests?: long pre_filter_shard_size?: long @@ -1141,7 +1160,7 @@ export interface RenderSearchTemplateResponse { export interface ScriptsPainlessExecutePainlessContextSetup { document: any index: IndexName - query: QueryDslQueryContainer + query?: QueryDslQueryContainer } export interface ScriptsPainlessExecuteRequest extends RequestBase { @@ -1182,6 +1201,7 @@ export interface SearchRequest extends RequestBase { expand_wildcards?: ExpandWildcards ignore_throttled?: boolean ignore_unavailable?: boolean + include_named_queries_score?: boolean lenient?: boolean max_concurrent_shard_requests?: long min_compatible_shard_node?: VersionString @@ -1452,7 +1472,6 @@ export interface SearchHighlightBase { export interface SearchHighlightField extends SearchHighlightBase { fragment_offset?: integer matched_fields?: Fields - analyzer?: AnalysisAnalyzer } export type SearchHighlighterEncoder = 'default' | 'html' @@ -1473,7 +1492,7 @@ export interface SearchHit { fields?: Record highlight?: Record inner_hits?: Record - matched_queries?: string[] + matched_queries?: string[] | Record _nested?: SearchNestedIdentity _ignored?: string[] ignored_field_values?: Record @@ -1969,6 +1988,7 @@ export interface UpdateByQueryRequest extends RequestBase { lenient?: boolean pipeline?: string preference?: string + q?: string refresh?: boolean request_cache?: boolean requests_per_second?: float @@ -2364,6 +2384,7 @@ export interface KnnQuery extends QueryDslQueryBase { query_vector?: QueryVector query_vector_builder?: QueryVectorBuilder num_candidates?: integer + k?: integer filter?: QueryDslQueryContainer | QueryDslQueryContainer[] similarity?: float } @@ -4277,13 +4298,61 @@ export interface AggregationsWeightedAverageValue { export interface AggregationsWeightedAvgAggregate extends AggregationsSingleMetricAggregateBase { } -export type AnalysisAnalyzer = AnalysisCustomAnalyzer | AnalysisFingerprintAnalyzer | AnalysisKeywordAnalyzer | AnalysisLanguageAnalyzer | AnalysisNoriAnalyzer | AnalysisPatternAnalyzer | AnalysisSimpleAnalyzer | AnalysisStandardAnalyzer | AnalysisStopAnalyzer | AnalysisWhitespaceAnalyzer | AnalysisIcuAnalyzer | AnalysisKuromojiAnalyzer | AnalysisSnowballAnalyzer | AnalysisDutchAnalyzer +export type AnalysisAnalyzer = AnalysisCustomAnalyzer | AnalysisFingerprintAnalyzer | AnalysisKeywordAnalyzer | AnalysisLanguageAnalyzer | AnalysisNoriAnalyzer | AnalysisPatternAnalyzer | AnalysisSimpleAnalyzer | AnalysisStandardAnalyzer | AnalysisStopAnalyzer | AnalysisWhitespaceAnalyzer | AnalysisIcuAnalyzer | AnalysisKuromojiAnalyzer | AnalysisSnowballAnalyzer | AnalysisArabicAnalyzer | AnalysisArmenianAnalyzer | AnalysisBasqueAnalyzer | AnalysisBengaliAnalyzer | AnalysisBrazilianAnalyzer | AnalysisBulgarianAnalyzer | AnalysisCatalanAnalyzer | AnalysisChineseAnalyzer | AnalysisCjkAnalyzer | AnalysisCzechAnalyzer | AnalysisDanishAnalyzer | AnalysisDutchAnalyzer | AnalysisEnglishAnalyzer | AnalysisEstonianAnalyzer | AnalysisFinnishAnalyzer | AnalysisFrenchAnalyzer | AnalysisGalicianAnalyzer | AnalysisGermanAnalyzer | AnalysisGreekAnalyzer | AnalysisHindiAnalyzer | AnalysisHungarianAnalyzer | AnalysisIndonesianAnalyzer | AnalysisIrishAnalyzer | AnalysisItalianAnalyzer | AnalysisLatvianAnalyzer | AnalysisLithuanianAnalyzer | AnalysisNorwegianAnalyzer | AnalysisPersianAnalyzer | AnalysisPortugueseAnalyzer | AnalysisRomanianAnalyzer | AnalysisRussianAnalyzer | AnalysisSerbianAnalyzer | AnalysisSoraniAnalyzer | AnalysisSpanishAnalyzer | AnalysisSwedishAnalyzer | AnalysisTurkishAnalyzer | AnalysisThaiAnalyzer + +export interface AnalysisArabicAnalyzer { + type: 'arabic' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisArmenianAnalyzer { + type: 'armenian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} export interface AnalysisAsciiFoldingTokenFilter extends AnalysisTokenFilterBase { type: 'asciifolding' preserve_original?: SpecUtilsStringified } +export interface AnalysisBasqueAnalyzer { + type: 'basque' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisBengaliAnalyzer { + type: 'bengali' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisBrazilianAnalyzer { + type: 'brazilian' + stopwords?: AnalysisStopWords + stopwords_path?: string +} + +export interface AnalysisBulgarianAnalyzer { + type: 'bulgarian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisCatalanAnalyzer { + type: 'catalan' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + export type AnalysisCharFilter = string | AnalysisCharFilterDefinition export interface AnalysisCharFilterBase { @@ -4298,6 +4367,18 @@ export interface AnalysisCharGroupTokenizer extends AnalysisTokenizerBase { max_token_length?: integer } +export interface AnalysisChineseAnalyzer { + type: 'chinese' + stopwords?: AnalysisStopWords + stopwords_path?: string +} + +export interface AnalysisCjkAnalyzer { + type: 'cjk' + stopwords?: AnalysisStopWords + stopwords_path?: string +} + export interface AnalysisCommonGramsTokenFilter extends AnalysisTokenFilterBase { type: 'common_grams' common_words?: string[] @@ -4324,8 +4405,8 @@ export interface AnalysisConditionTokenFilter extends AnalysisTokenFilterBase { export interface AnalysisCustomAnalyzer { type: 'custom' - char_filter?: string[] - filter?: string[] + char_filter?: string | string[] + filter?: string | string[] position_increment_gap?: integer position_offset_gap?: integer tokenizer: string @@ -4337,6 +4418,19 @@ export interface AnalysisCustomNormalizer { filter?: string[] } +export interface AnalysisCzechAnalyzer { + type: 'czech' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisDanishAnalyzer { + type: 'danish' + stopwords?: AnalysisStopWords + stopwords_path?: string +} + export type AnalysisDelimitedPayloadEncoding = 'int' | 'float' | 'identity' export interface AnalysisDelimitedPayloadTokenFilter extends AnalysisTokenFilterBase { @@ -4352,6 +4446,8 @@ export interface AnalysisDictionaryDecompounderTokenFilter extends AnalysisCompo export interface AnalysisDutchAnalyzer { type: 'dutch' stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] } export type AnalysisEdgeNGramSide = 'front' | 'back' @@ -4379,6 +4475,19 @@ export interface AnalysisElisionTokenFilter extends AnalysisTokenFilterBase { articles_case?: SpecUtilsStringified } +export interface AnalysisEnglishAnalyzer { + type: 'english' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisEstonianAnalyzer { + type: 'estonian' + stopwords?: AnalysisStopWords + stopwords_path?: string +} + export interface AnalysisFingerprintAnalyzer { type: 'fingerprint' version?: VersionString @@ -4395,11 +4504,59 @@ export interface AnalysisFingerprintTokenFilter extends AnalysisTokenFilterBase separator?: string } +export interface AnalysisFinnishAnalyzer { + type: 'finnish' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisFrenchAnalyzer { + type: 'french' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisGalicianAnalyzer { + type: 'galician' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisGermanAnalyzer { + type: 'german' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisGreekAnalyzer { + type: 'greek' + stopwords?: AnalysisStopWords + stopwords_path?: string +} + +export interface AnalysisHindiAnalyzer { + type: 'hindi' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + export interface AnalysisHtmlStripCharFilter extends AnalysisCharFilterBase { type: 'html_strip' escaped_tags?: string[] } +export interface AnalysisHungarianAnalyzer { + type: 'hungarian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + export interface AnalysisHunspellTokenFilter extends AnalysisTokenFilterBase { type: 'hunspell' dedup?: boolean @@ -4475,6 +4632,27 @@ export interface AnalysisIcuTransformTokenFilter extends AnalysisTokenFilterBase id: string } +export interface AnalysisIndonesianAnalyzer { + type: 'indonesian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisIrishAnalyzer { + type: 'irish' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisItalianAnalyzer { + type: 'italian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + export interface AnalysisKStemTokenFilter extends AnalysisTokenFilterBase { type: 'kstem' } @@ -4563,6 +4741,13 @@ export interface AnalysisLanguageAnalyzer { stopwords_path?: string } +export interface AnalysisLatvianAnalyzer { + type: 'latvian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + export interface AnalysisLengthTokenFilter extends AnalysisTokenFilterBase { type: 'length' max?: integer @@ -4579,6 +4764,13 @@ export interface AnalysisLimitTokenCountTokenFilter extends AnalysisTokenFilterB max_token_count?: SpecUtilsStringified } +export interface AnalysisLithuanianAnalyzer { + type: 'lithuanian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + export interface AnalysisLowercaseNormalizer { type: 'lowercase' } @@ -4644,6 +4836,13 @@ export interface AnalysisNoriTokenizer extends AnalysisTokenizerBase { export type AnalysisNormalizer = AnalysisLowercaseNormalizer | AnalysisCustomNormalizer +export interface AnalysisNorwegianAnalyzer { + type: 'norwegian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + export interface AnalysisPathHierarchyTokenizer extends AnalysisTokenizerBase { type: 'path_hierarchy' buffer_size?: SpecUtilsStringified @@ -4690,6 +4889,12 @@ export interface AnalysisPatternTokenizer extends AnalysisTokenizerBase { pattern?: string } +export interface AnalysisPersianAnalyzer { + type: 'persian' + stopwords?: AnalysisStopWords + stopwords_path?: string +} + export type AnalysisPhoneticEncoder = 'metaphone' | 'double_metaphone' | 'soundex' | 'refined_soundex' | 'caverphone1' | 'caverphone2' | 'cologne' | 'nysiis' | 'koelnerphonetik' | 'haasephonetik' | 'beider_morse' | 'daitch_mokotoff' export type AnalysisPhoneticLanguage = 'any' | 'common' | 'cyrillic' | 'english' | 'french' | 'german' | 'hebrew' | 'hungarian' | 'polish' | 'romanian' | 'russian' | 'spanish' @@ -4712,6 +4917,13 @@ export interface AnalysisPorterStemTokenFilter extends AnalysisTokenFilterBase { type: 'porter_stem' } +export interface AnalysisPortugueseAnalyzer { + type: 'portuguese' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + export interface AnalysisPredicateTokenFilter extends AnalysisTokenFilterBase { type: 'predicate_token_filter' script: Script | string @@ -4725,6 +4937,27 @@ export interface AnalysisReverseTokenFilter extends AnalysisTokenFilterBase { type: 'reverse' } +export interface AnalysisRomanianAnalyzer { + type: 'romanian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisRussianAnalyzer { + type: 'russian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisSerbianAnalyzer { + type: 'serbian' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + export interface AnalysisShingleTokenFilter extends AnalysisTokenFilterBase { type: 'shingle' filler_token?: string @@ -4754,6 +4987,20 @@ export interface AnalysisSnowballTokenFilter extends AnalysisTokenFilterBase { language?: AnalysisSnowballLanguage } +export interface AnalysisSoraniAnalyzer { + type: 'sorani' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + +export interface AnalysisSpanishAnalyzer { + type: 'spanish' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + export interface AnalysisStandardAnalyzer { type: 'standard' max_token_length?: integer @@ -4794,6 +5041,13 @@ export interface AnalysisStopTokenFilter extends AnalysisTokenFilterBase { export type AnalysisStopWords = string | string[] +export interface AnalysisSwedishAnalyzer { + type: 'swedish' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + export type AnalysisSynonymFormat = 'solr' | 'wordnet' export interface AnalysisSynonymGraphTokenFilter extends AnalysisTokenFilterBase { @@ -4820,6 +5074,12 @@ export interface AnalysisSynonymTokenFilter extends AnalysisTokenFilterBase { updateable?: boolean } +export interface AnalysisThaiAnalyzer { + type: 'thai' + stopwords?: AnalysisStopWords + stopwords_path?: string +} + export type AnalysisTokenChar = 'letter' | 'digit' | 'whitespace' | 'punctuation' | 'symbol' | 'custom' export type AnalysisTokenFilter = string | AnalysisTokenFilterDefinition @@ -4847,6 +5107,13 @@ export interface AnalysisTruncateTokenFilter extends AnalysisTokenFilterBase { length?: integer } +export interface AnalysisTurkishAnalyzer { + type: 'turkish' + stopwords?: AnalysisStopWords + stopwords_path?: string + stem_exclusion?: string[] +} + export interface AnalysisUaxEmailUrlTokenizer extends AnalysisTokenizerBase { type: 'uax_url_email' max_token_length?: integer @@ -9223,9 +9490,9 @@ export interface ConnectorConnectorConfigProperties { required: boolean sensitive: boolean tooltip?: string | null - type: ConnectorConnectorFieldType - ui_restrictions: string[] - validations: ConnectorValidation[] + type?: ConnectorConnectorFieldType + ui_restrictions?: string[] + validations?: ConnectorValidation[] value: any } @@ -10151,22 +10418,51 @@ export interface GraphExploreResponse { vertices: GraphVertex[] } -export type IlmActions = any +export interface IlmActions { + allocate?: IlmAllocateAction + delete?: IlmDeleteAction + downsample?: IlmDownsampleAction + freeze?: EmptyObject + forcemerge?: IlmForceMergeAction + migrate?: IlmMigrateAction + readonly?: EmptyObject + rollover?: IlmRolloverAction + set_priority?: IlmSetPriorityAction + searchable_snapshot?: IlmSearchableSnapshotAction + shrink?: IlmShrinkAction + unfollow?: EmptyObject + wait_for_snapshot?: IlmWaitForSnapshotAction +} -export interface IlmConfigurations { - rollover?: IndicesRolloverRolloverConditions - forcemerge?: IlmForceMergeConfiguration - shrink?: IlmShrinkConfiguration +export interface IlmAllocateAction { + number_of_replicas?: integer + total_shards_per_node?: integer + include?: Record + exclude?: Record + require?: Record } -export interface IlmForceMergeConfiguration { +export interface IlmDeleteAction { + delete_searchable_snapshot?: boolean +} + +export interface IlmDownsampleAction { + fixed_interval: DurationLarge + wait_timeout?: Duration +} + +export interface IlmForceMergeAction { max_num_segments: integer + index_codec?: string +} + +export interface IlmMigrateAction { + enabled?: boolean } export interface IlmPhase { actions?: IlmActions min_age?: Duration | long - configurations?: IlmConfigurations } export interface IlmPhases { @@ -10182,8 +10478,36 @@ export interface IlmPolicy { _meta?: Metadata } -export interface IlmShrinkConfiguration { - number_of_shards: integer +export interface IlmRolloverAction { + max_size?: ByteSize + max_primary_shard_size?: ByteSize + max_age?: Duration + max_docs?: long + max_primary_shard_docs?: long + min_size?: ByteSize + min_primary_shard_size?: ByteSize + min_age?: Duration + min_docs?: long + min_primary_shard_docs?: long +} + +export interface IlmSearchableSnapshotAction { + snapshot_repository: string + force_merge_index?: boolean +} + +export interface IlmSetPriorityAction { + priority?: integer +} + +export interface IlmShrinkAction { + number_of_shards?: integer + max_primary_shard_size?: ByteSize + allow_write_after_shrink?: boolean +} + +export interface IlmWaitForSnapshotAction { + policy: string } export interface IlmDeleteLifecycleRequest extends RequestBase { @@ -10368,6 +10692,7 @@ export interface IndicesCacheQueries { export interface IndicesDataStream { _meta?: Metadata allow_custom_routing?: boolean + failure_store?: IndicesFailureStore generation: integer hidden: boolean ilm_policy?: Name @@ -10377,6 +10702,7 @@ export interface IndicesDataStream { lifecycle?: IndicesDataStreamLifecycleWithRollover name: DataStreamName replicated?: boolean + rollover_on_write: boolean status: HealthStatus system?: boolean template: Name @@ -10387,8 +10713,8 @@ export interface IndicesDataStreamIndex { index_name: IndexName index_uuid: Uuid ilm_policy?: Name - managed_by: IndicesManagedBy - prefer_ilm: boolean + managed_by?: IndicesManagedBy + prefer_ilm?: boolean } export interface IndicesDataStreamLifecycle { @@ -10436,6 +10762,12 @@ export interface IndicesDownsamplingRound { config: IndicesDownsampleConfig } +export interface IndicesFailureStore { + enabled: boolean + indices: IndicesDataStreamIndex[] + rollover_on_write: boolean +} + export interface IndicesFielddataFrequencyFilter { max: double min: double @@ -10995,6 +11327,8 @@ export interface IndicesCreateResponse { export interface IndicesCreateDataStreamRequest extends RequestBase { name: DataStreamName + master_timeout?: Duration + timeout?: Duration } export type IndicesCreateDataStreamResponse = AcknowledgedResponseBase @@ -11052,6 +11386,7 @@ export type IndicesDeleteDataLifecycleResponse = AcknowledgedResponseBase export interface IndicesDeleteDataStreamRequest extends RequestBase { name: DataStreamNames + master_timeout?: Duration expand_wildcards?: ExpandWildcards } @@ -11280,6 +11615,7 @@ export interface IndicesGetDataLifecycleRequest extends RequestBase { name: DataStreamNames expand_wildcards?: ExpandWildcards include_defaults?: boolean + master_timeout?: Duration } export interface IndicesGetDataLifecycleResponse { @@ -11290,6 +11626,7 @@ export interface IndicesGetDataStreamRequest extends RequestBase { name?: DataStreamNames expand_wildcards?: ExpandWildcards include_defaults?: boolean + master_timeout?: Duration } export interface IndicesGetDataStreamResponse { @@ -11370,6 +11707,8 @@ export type IndicesGetTemplateResponse = Record export interface IndicesMigrateToDataStreamRequest extends RequestBase { name: IndexName + master_timeout?: Duration + timeout?: Duration } export type IndicesMigrateToDataStreamResponse = AcknowledgedResponseBase @@ -11410,6 +11749,7 @@ export interface IndicesOpenResponse { export interface IndicesPromoteDataStreamRequest extends RequestBase { name: IndexName + master_timeout?: Duration } export type IndicesPromoteDataStreamResponse = any @@ -13849,7 +14189,7 @@ export interface MlPerPartitionCategorization { stop_on_warn?: boolean } -export type MlPredictedValue = string | double | boolean | integer +export type MlPredictedValue = ScalarValue | ScalarValue[] export interface MlQuestionAnsweringInferenceOptions { num_top_classes?: integer @@ -14047,7 +14387,7 @@ export interface MlTrainedModelDeploymentStats { error_count: integer inference_count: integer model_id: Id - nodes: MlTrainedModelDeploymentNodesStats + nodes: MlTrainedModelDeploymentNodesStats[] number_of_allocations: integer queue_capacity: integer rejected_execution_count: integer @@ -14082,7 +14422,7 @@ export interface MlTrainedModelInferenceStats { failure_count: integer inference_count: integer missing_all_fields_count: integer - timestamp: DateTime + timestamp: EpochTime } export interface MlTrainedModelLocation { @@ -15972,13 +16312,6 @@ export interface NodesGetRepositoriesMeteringInfoResponseBase extends NodesNodes nodes: Record } -export interface NodesHotThreadsHotThread { - hosts: Host[] - node_id: Id - node_name: Name - threads: string[] -} - export interface NodesHotThreadsRequest extends RequestBase { node_id?: NodeIds ignore_idle_threads?: boolean @@ -15992,7 +16325,6 @@ export interface NodesHotThreadsRequest extends RequestBase { } export interface NodesHotThreadsResponse { - hot_threads: NodesHotThreadsHotThread[] } export interface NodesInfoDeprecationIndexing { @@ -16433,7 +16765,7 @@ export interface QueryRulesQueryRuleCriteria { export type QueryRulesQueryRuleCriteriaType = 'global' | 'exact' | 'exact_fuzzy' | 'fuzzy' | 'prefix' | 'suffix' | 'contains' | 'lt' | 'lte' | 'gt' | 'gte' | 'always' -export type QueryRulesQueryRuleType = 'pinned' +export type QueryRulesQueryRuleType = 'pinned' | 'exclude' export interface QueryRulesQueryRuleset { ruleset_id: Id @@ -18704,7 +19036,7 @@ export interface SynonymsPutSynonymRequest extends RequestBase { id: Id /** @deprecated The use of the 'body' key has been deprecated, move the nested keys to the top level object. */ body?: { - synonyms_set: SynonymsSynonymRule[] + synonyms_set: SynonymsSynonymRule | SynonymsSynonymRule[] } }