diff --git a/output/schema/schema.json b/output/schema/schema.json index 6a4a721221..01c531d9b6 100644 --- a/output/schema/schema.json +++ b/output/schema/schema.json @@ -1131,6 +1131,11 @@ "docId": "cat-trained-model", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-trained-model.html", "name": "cat.ml_trained_models", + "privileges": { + "cluster": [ + "monitor_ml" + ] + }, "request": { "name": "Request", "namespace": "cat.ml_trained_models" @@ -1448,6 +1453,14 @@ "docId": "cat-segments", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-segments.html", "name": "cat.segments", + "privileges": { + "cluster": [ + "monitor" + ], + "index": [ + "monitor" + ] + }, "request": { "name": "Request", "namespace": "cat.segments" @@ -1494,6 +1507,14 @@ "docId": "cat-shards", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-shards.html", "name": "cat.shards", + "privileges": { + "cluster": [ + "monitor" + ], + "index": [ + "monitor" + ] + }, "request": { "name": "Request", "namespace": "cat.shards" @@ -1540,6 +1561,11 @@ "docId": "cat-snapshots", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-snapshots.html", "name": "cat.snapshots", + "privileges": { + "cluster": [ + "monitor_snapshot" + ] + }, "request": { "name": "Request", "namespace": "cat.snapshots" @@ -1586,6 +1612,11 @@ "docId": "tasks", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/tasks.html", "name": "cat.tasks", + "privileges": { + "cluster": [ + "monitor" + ] + }, "request": { "name": "Request", "namespace": "cat.tasks" @@ -1626,6 +1657,11 @@ "docId": "cat-templates", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-templates.html", "name": "cat.templates", + "privileges": { + "cluster": [ + "monitor" + ] + }, "request": { "name": "Request", "namespace": "cat.templates" @@ -1672,6 +1708,11 @@ "docId": "cat-thread-pool", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-thread-pool.html", "name": "cat.thread_pool", + "privileges": { + "cluster": [ + "monitor" + ] + }, "request": { "name": "Request", "namespace": "cat.thread_pool" @@ -1718,6 +1759,11 @@ "docId": "cat-transforms", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/cat-transforms.html", "name": "cat.transforms", + "privileges": { + "cluster": [ + "monitor_transform" + ] + }, "request": { "name": "Request", "namespace": "cat.transforms" @@ -83403,7 +83449,7 @@ }, "properties": [ { - "description": "the id", + "description": "The identifier for the job.", "name": "id", "required": false, "type": { @@ -83418,7 +83464,7 @@ "aliases": [ "t" ], - "description": "analysis type", + "description": "The type of analysis that the job performs.", "name": "type", "required": false, "type": { @@ -83434,7 +83480,7 @@ "ct", "createTime" ], - "description": "job creation time", + "description": "The time when the job was created.", "name": "create_time", "required": false, "type": { @@ -83449,7 +83495,7 @@ "aliases": [ "v" ], - "description": "the version of Elasticsearch when the analytics was created", + "description": "The version of Elasticsearch when the job was created.", "name": "version", "required": false, "type": { @@ -83465,7 +83511,7 @@ "si", "sourceIndex" ], - "description": "source index", + "description": "The name of the source index.", "name": "source_index", "required": false, "type": { @@ -83481,7 +83527,7 @@ "di", "destIndex" ], - "description": "destination index", + "description": "The name of the destination index.", "name": "dest_index", "required": false, "type": { @@ -83496,7 +83542,7 @@ "aliases": [ "d" ], - "description": "description", + "description": "A description of the job.", "name": "description", "required": false, "type": { @@ -83512,7 +83558,7 @@ "mml", "modelMemoryLimit" ], - "description": "model memory limit", + "description": "The approximate maximum amount of memory resources that are permitted for the job.", "name": "model_memory_limit", "required": false, "type": { @@ -83527,7 +83573,7 @@ "aliases": [ "s" ], - "description": "job state", + "description": "The current status of the job.", "name": "state", "required": false, "type": { @@ -83543,7 +83589,7 @@ "fr", "failureReason" ], - "description": "failure reason", + "description": "Messages about the reason why the job failed.", "name": "failure_reason", "required": false, "type": { @@ -83558,7 +83604,7 @@ "aliases": [ "p" ], - "description": "progress", + "description": "The progress report for the job by phase.", "name": "progress", "required": false, "type": { @@ -83574,7 +83620,7 @@ "ae", "assignmentExplanation" ], - "description": "why the job is or is not assigned to a node", + "description": "Messages related to the selection of a node.", "name": "assignment_explanation", "required": false, "type": { @@ -83590,7 +83636,7 @@ "ni", "nodeId" ], - "description": "id of the assigned node", + "description": "The unique identifier of the assigned node.", "name": "node.id", "required": false, "type": { @@ -83606,7 +83652,7 @@ "nn", "nodeName" ], - "description": "name of the assigned node", + "description": "The name of the assigned node.", "name": "node.name", "required": false, "type": { @@ -83622,7 +83668,7 @@ "ne", "nodeEphemeralId" ], - "description": "ephemeral id of the assigned node", + "description": "The ephemeral identifier of the assigned node.", "name": "node.ephemeral_id", "required": false, "type": { @@ -83638,7 +83684,7 @@ "na", "nodeAddress" ], - "description": "network address of the assigned node", + "description": "The network address of the assigned node.", "name": "node.address", "required": false, "type": { @@ -83780,7 +83826,7 @@ }, "properties": [ { - "description": "the datafeed_id", + "description": "The datafeed identifier.", "name": "id", "required": false, "type": { @@ -83795,7 +83841,7 @@ "aliases": [ "s" ], - "description": "the datafeed state", + "description": "The status of the datafeed.", "name": "state", "required": false, "type": { @@ -83810,7 +83856,7 @@ "aliases": [ "ae" ], - "description": "why the datafeed is or is not assigned to a node", + "description": "For started datafeeds only, contains messages relating to the selection of a node.", "name": "assignment_explanation", "required": false, "type": { @@ -83826,7 +83872,7 @@ "bc", "bucketsCount" ], - "description": "bucket count", + "description": "The number of buckets processed.", "name": "buckets.count", "required": false, "type": { @@ -83842,7 +83888,7 @@ "sc", "searchCount" ], - "description": "number of searches ran by the datafeed", + "description": "The number of searches run by the datafeed.", "name": "search.count", "required": false, "type": { @@ -83858,7 +83904,7 @@ "st", "searchTime" ], - "description": "the total search time", + "description": "The total time the datafeed spent searching, in milliseconds.", "name": "search.time", "required": false, "type": { @@ -83874,7 +83920,7 @@ "sba", "searchBucketAvg" ], - "description": "the average search time per bucket (millisecond)", + "description": "The average search time per bucket, in milliseconds.", "name": "search.bucket_avg", "required": false, "type": { @@ -83890,7 +83936,7 @@ "seah", "searchExpAvgHour" ], - "description": "the exponential average search time per hour (millisecond)", + "description": "The exponential average search time per hour, in milliseconds.", "name": "search.exp_avg_hour", "required": false, "type": { @@ -83906,7 +83952,7 @@ "ni", "nodeId" ], - "description": "id of the assigned node", + "description": "The unique identifier of the assigned node.\nFor started datafeeds only, this information pertains to the node upon which the datafeed is started.", "name": "node.id", "required": false, "type": { @@ -83922,7 +83968,7 @@ "nn", "nodeName" ], - "description": "name of the assigned node", + "description": "The name of the assigned node.\nFor started datafeeds only, this information pertains to the node upon which the datafeed is started.", "name": "node.name", "required": false, "type": { @@ -83938,7 +83984,7 @@ "ne", "nodeEphemeralId" ], - "description": "ephemeral id of the assigned node", + "description": "The ephemeral identifier of the assigned node.\nFor started datafeeds only, this information pertains to the node upon which the datafeed is started.", "name": "node.ephemeral_id", "required": false, "type": { @@ -83954,7 +84000,7 @@ "na", "nodeAddress" ], - "description": "network address of the assigned node", + "description": "The network address of the assigned node.\nFor started datafeeds only, this information pertains to the node upon which the datafeed is started.", "name": "node.address", "required": false, "type": { @@ -83966,7 +84012,7 @@ } } ], - "specLocation": "cat/ml_datafeeds/types.ts#L22-L83" + "specLocation": "cat/ml_datafeeds/types.ts#L22-L87" }, { "attachedBehaviors": [ @@ -84085,7 +84131,7 @@ }, "properties": [ { - "description": "the job_id", + "description": "The anomaly detection job identifier.", "name": "id", "required": false, "type": { @@ -84100,7 +84146,7 @@ "aliases": [ "s" ], - "description": "the job state", + "description": "The status of the anomaly detection job.", "name": "state", "required": false, "type": { @@ -84115,7 +84161,7 @@ "aliases": [ "ot" ], - "description": "the amount of time the job has been opened", + "description": "For open jobs only, the amount of time the job has been opened.", "name": "opened_time", "required": false, "type": { @@ -84130,7 +84176,7 @@ "aliases": [ "ae" ], - "description": "why the job is or is not assigned to a node", + "description": "For open anomaly detection jobs only, contains messages relating to the selection of a node to run the job.", "name": "assignment_explanation", "required": false, "type": { @@ -84146,7 +84192,7 @@ "dpr", "dataProcessedRecords" ], - "description": "number of processed records", + "description": "The number of input documents that have been processed by the anomaly detection job.\nThis value includes documents with missing fields, since they are nonetheless analyzed.\nIf you use datafeeds and have aggregations in your search query, the `processed_record_count` is the number of aggregation results processed, not the number of Elasticsearch documents.", "name": "data.processed_records", "required": false, "type": { @@ -84162,7 +84208,7 @@ "dpf", "dataProcessedFields" ], - "description": "number of processed fields", + "description": "The total number of fields in all the documents that have been processed by the anomaly detection job.\nOnly fields that are specified in the detector configuration object contribute to this count.\nThe timestamp is not included in this count.", "name": "data.processed_fields", "required": false, "type": { @@ -84178,7 +84224,7 @@ "dib", "dataInputBytes" ], - "description": "total input bytes", + "description": "The number of bytes of input data posted to the anomaly detection job.", "name": "data.input_bytes", "required": false, "type": { @@ -84194,7 +84240,7 @@ "dir", "dataInputRecords" ], - "description": "total record count", + "description": "The number of input documents posted to the anomaly detection job.", "name": "data.input_records", "required": false, "type": { @@ -84210,7 +84256,7 @@ "dif", "dataInputFields" ], - "description": "total field count", + "description": "The total number of fields in input documents posted to the anomaly detection job.\nThis count includes fields that are not used in the analysis.\nHowever, be aware that if you are using a datafeed, it extracts only the required fields from the documents it retrieves before posting them to the job.", "name": "data.input_fields", "required": false, "type": { @@ -84226,7 +84272,7 @@ "did", "dataInvalidDates" ], - "description": "number of records with invalid dates", + "description": "The number of input documents with either a missing date field or a date that could not be parsed.", "name": "data.invalid_dates", "required": false, "type": { @@ -84242,7 +84288,7 @@ "dmf", "dataMissingFields" ], - "description": "number of records with missing fields", + "description": "The number of input documents that are missing a field that the anomaly detection job is configured to analyze.\nInput documents with missing fields are still processed because it is possible that not all fields are missing.\nIf you are using datafeeds or posting data to the job in JSON format, a high `missing_field_count` is often not an indication of data issues.\nIt is not necessarily a cause for concern.", "name": "data.missing_fields", "required": false, "type": { @@ -84258,7 +84304,7 @@ "doot", "dataOutOfOrderTimestamps" ], - "description": "number of records handled out of order", + "description": "The number of input documents that have a timestamp chronologically preceding the start of the current anomaly detection bucket offset by the latency window.\nThis information is applicable only when you provide data to the anomaly detection job by using the post data API.\nThese out of order documents are discarded, since jobs require time series data to be in ascending chronological order.", "name": "data.out_of_order_timestamps", "required": false, "type": { @@ -84274,7 +84320,7 @@ "deb", "dataEmptyBuckets" ], - "description": "number of empty buckets", + "description": "The number of buckets which did not contain any data.\nIf your data contains many empty buckets, consider increasing your `bucket_span` or using functions that are tolerant to gaps in data such as mean, `non_null_sum` or `non_zero_count`.", "name": "data.empty_buckets", "required": false, "type": { @@ -84290,7 +84336,7 @@ "dsb", "dataSparseBuckets" ], - "description": "number of sparse buckets", + "description": "The number of buckets that contained few data points compared to the expected number of data points.\nIf your data contains many sparse buckets, consider using a longer `bucket_span`.", "name": "data.sparse_buckets", "required": false, "type": { @@ -84306,7 +84352,7 @@ "db", "dataBuckets" ], - "description": "total bucket count", + "description": "The total number of buckets processed.", "name": "data.buckets", "required": false, "type": { @@ -84322,7 +84368,7 @@ "der", "dataEarliestRecord" ], - "description": "earliest record time", + "description": "The timestamp of the earliest chronologically input document.", "name": "data.earliest_record", "required": false, "type": { @@ -84338,7 +84384,7 @@ "dlr", "dataLatestRecord" ], - "description": "latest record time", + "description": "The timestamp of the latest chronologically input document.", "name": "data.latest_record", "required": false, "type": { @@ -84354,7 +84400,7 @@ "dl", "dataLast" ], - "description": "last time data was seen", + "description": "The timestamp at which data was last analyzed, according to server time.", "name": "data.last", "required": false, "type": { @@ -84370,7 +84416,7 @@ "dleb", "dataLastEmptyBucket" ], - "description": "last time an empty bucket occurred", + "description": "The timestamp of the last bucket that did not contain any data.", "name": "data.last_empty_bucket", "required": false, "type": { @@ -84386,7 +84432,7 @@ "dlsb", "dataLastSparseBucket" ], - "description": "last time a sparse bucket occurred", + "description": "The timestamp of the last bucket that was considered sparse.", "name": "data.last_sparse_bucket", "required": false, "type": { @@ -84402,7 +84448,7 @@ "mb", "modelBytes" ], - "description": "model size", + "description": "The number of bytes of memory used by the models.\nThis is the maximum value since the last time the model was persisted.\nIf the job is closed, this value indicates the latest size.", "name": "model.bytes", "required": false, "type": { @@ -84418,7 +84464,7 @@ "mms", "modelMemoryStatus" ], - "description": "current memory status", + "description": "The status of the mathematical models.", "name": "model.memory_status", "required": false, "type": { @@ -84434,7 +84480,7 @@ "mbe", "modelBytesExceeded" ], - "description": "how much the model has exceeded the limit", + "description": "The number of bytes over the high limit for memory usage at the last allocation failure.", "name": "model.bytes_exceeded", "required": false, "type": { @@ -84450,7 +84496,7 @@ "mml", "modelMemoryLimit" ], - "description": "model memory limit", + "description": "The upper limit for model memory usage, checked on increasing values.", "name": "model.memory_limit", "required": false, "type": { @@ -84466,7 +84512,7 @@ "mbf", "modelByFields" ], - "description": "count of 'by' fields", + "description": "The number of `by` field values that were analyzed by the models.\nThis value is cumulative for all detectors in the job.", "name": "model.by_fields", "required": false, "type": { @@ -84482,7 +84528,7 @@ "mof", "modelOverFields" ], - "description": "count of 'over' fields", + "description": "The number of `over` field values that were analyzed by the models.\nThis value is cumulative for all detectors in the job.", "name": "model.over_fields", "required": false, "type": { @@ -84498,7 +84544,7 @@ "mpf", "modelPartitionFields" ], - "description": "count of 'partition' fields", + "description": "The number of `partition` field values that were analyzed by the models.\nThis value is cumulative for all detectors in the job.", "name": "model.partition_fields", "required": false, "type": { @@ -84514,7 +84560,7 @@ "mbaf", "modelBucketAllocationFailures" ], - "description": "number of bucket allocation failures", + "description": "The number of buckets for which new entities in incoming data were not processed due to insufficient model memory.\nThis situation is also signified by a `hard_limit: memory_status` property value.", "name": "model.bucket_allocation_failures", "required": false, "type": { @@ -84530,7 +84576,7 @@ "mcs", "modelCategorizationStatus" ], - "description": "current categorization status", + "description": "The status of categorization for the job.", "name": "model.categorization_status", "required": false, "type": { @@ -84546,7 +84592,7 @@ "mcdc", "modelCategorizedDocCount" ], - "description": "count of categorized documents", + "description": "The number of documents that have had a field categorized.", "name": "model.categorized_doc_count", "required": false, "type": { @@ -84562,7 +84608,7 @@ "mtcc", "modelTotalCategoryCount" ], - "description": "count of categories", + "description": "The number of categories created by categorization.", "name": "model.total_category_count", "required": false, "type": { @@ -84577,7 +84623,7 @@ "aliases": [ "modelFrequentCategoryCount" ], - "description": "count of frequent categories", + "description": "The number of categories that match more than 1% of categorized documents.", "name": "model.frequent_category_count", "required": false, "type": { @@ -84593,7 +84639,7 @@ "mrcc", "modelRareCategoryCount" ], - "description": "count of rare categories", + "description": "The number of categories that match just one categorized document.", "name": "model.rare_category_count", "required": false, "type": { @@ -84609,7 +84655,7 @@ "mdcc", "modelDeadCategoryCount" ], - "description": "count of dead categories", + "description": "The number of categories created by categorization that will never be assigned again because another category’s definition makes it a superset of the dead category.\nDead categories are a side effect of the way categorization has no prior training.", "name": "model.dead_category_count", "required": false, "type": { @@ -84625,7 +84671,7 @@ "mfcc", "modelFailedCategoryCount" ], - "description": "count of failed categories", + "description": "The number of times that categorization wanted to create a new category but couldn’t because the job had hit its `model_memory_limit`.\nThis count does not track which specific categories failed to be created.\nTherefore you cannot use this value to determine the number of unique categories that were missed.", "name": "model.failed_category_count", "required": false, "type": { @@ -84641,7 +84687,7 @@ "mlt", "modelLogTime" ], - "description": "when the model stats were gathered", + "description": "The timestamp when the model stats were gathered, according to server time.", "name": "model.log_time", "required": false, "type": { @@ -84657,7 +84703,7 @@ "mt", "modelTimestamp" ], - "description": "the time of the last record when the model stats were gathered", + "description": "The timestamp of the last record when the model stats were gathered.", "name": "model.timestamp", "required": false, "type": { @@ -84673,7 +84719,7 @@ "ft", "forecastsTotal" ], - "description": "total number of forecasts", + "description": "The number of individual forecasts currently available for the job.\nA value of one or more indicates that forecasts exist.", "name": "forecasts.total", "required": false, "type": { @@ -84689,7 +84735,7 @@ "fmmin", "forecastsMemoryMin" ], - "description": "minimum memory used by forecasts", + "description": "The minimum memory usage in bytes for forecasts related to the anomaly detection job.", "name": "forecasts.memory.min", "required": false, "type": { @@ -84705,7 +84751,7 @@ "fmmax", "forecastsMemoryMax" ], - "description": "maximum memory used by forecasts", + "description": "The maximum memory usage in bytes for forecasts related to the anomaly detection job.", "name": "forecasts.memory.max", "required": false, "type": { @@ -84721,7 +84767,7 @@ "fmavg", "forecastsMemoryAvg" ], - "description": "average memory used by forecasts", + "description": "The average memory usage in bytes for forecasts related to the anomaly detection job.", "name": "forecasts.memory.avg", "required": false, "type": { @@ -84737,7 +84783,7 @@ "fmt", "forecastsMemoryTotal" ], - "description": "total memory used by all forecasts", + "description": "The total memory usage in bytes for forecasts related to the anomaly detection job.", "name": "forecasts.memory.total", "required": false, "type": { @@ -84753,7 +84799,7 @@ "frmin", "forecastsRecordsMin" ], - "description": "minimum record count for forecasts", + "description": "The minimum number of `model_forecast` documents written for forecasts related to the anomaly detection job.", "name": "forecasts.records.min", "required": false, "type": { @@ -84769,7 +84815,7 @@ "frmax", "forecastsRecordsMax" ], - "description": "maximum record count for forecasts", + "description": "The maximum number of `model_forecast` documents written for forecasts related to the anomaly detection job.", "name": "forecasts.records.max", "required": false, "type": { @@ -84785,7 +84831,7 @@ "fravg", "forecastsRecordsAvg" ], - "description": "average record count for forecasts", + "description": "The average number of `model_forecast` documents written for forecasts related to the anomaly detection job.", "name": "forecasts.records.avg", "required": false, "type": { @@ -84801,7 +84847,7 @@ "frt", "forecastsRecordsTotal" ], - "description": "total record count for all forecasts", + "description": "The total number of `model_forecast` documents written for forecasts related to the anomaly detection job.", "name": "forecasts.records.total", "required": false, "type": { @@ -84817,7 +84863,7 @@ "ftmin", "forecastsTimeMin" ], - "description": "minimum runtime for forecasts", + "description": "The minimum runtime in milliseconds for forecasts related to the anomaly detection job.", "name": "forecasts.time.min", "required": false, "type": { @@ -84833,7 +84879,7 @@ "ftmax", "forecastsTimeMax" ], - "description": "maximum run time for forecasts", + "description": "The maximum runtime in milliseconds for forecasts related to the anomaly detection job.", "name": "forecasts.time.max", "required": false, "type": { @@ -84849,7 +84895,7 @@ "ftavg", "forecastsTimeAvg" ], - "description": "average runtime for all forecasts (milliseconds)", + "description": "The average runtime in milliseconds for forecasts related to the anomaly detection job.", "name": "forecasts.time.avg", "required": false, "type": { @@ -84865,7 +84911,7 @@ "ftt", "forecastsTimeTotal" ], - "description": "total runtime for all forecasts", + "description": "The total runtime in milliseconds for forecasts related to the anomaly detection job.", "name": "forecasts.time.total", "required": false, "type": { @@ -84881,7 +84927,7 @@ "ni", "nodeId" ], - "description": "id of the assigned node", + "description": "The uniqe identifier of the assigned node.", "name": "node.id", "required": false, "type": { @@ -84897,7 +84943,7 @@ "nn", "nodeName" ], - "description": "name of the assigned node", + "description": "The name of the assigned node.", "name": "node.name", "required": false, "type": { @@ -84913,7 +84959,7 @@ "ne", "nodeEphemeralId" ], - "description": "ephemeral id of the assigned node", + "description": "The ephemeral identifier of the assigned node.", "name": "node.ephemeral_id", "required": false, "type": { @@ -84929,7 +84975,7 @@ "na", "nodeAddress" ], - "description": "network address of the assigned node", + "description": "The network address of the assigned node.", "name": "node.address", "required": false, "type": { @@ -84945,7 +84991,7 @@ "bc", "bucketsCount" ], - "description": "bucket count", + "description": "The number of bucket results produced by the job.", "name": "buckets.count", "required": false, "type": { @@ -84961,7 +85007,7 @@ "btt", "bucketsTimeTotal" ], - "description": "total bucket processing time", + "description": "The sum of all bucket processing times, in milliseconds.", "name": "buckets.time.total", "required": false, "type": { @@ -84977,7 +85023,7 @@ "btmin", "bucketsTimeMin" ], - "description": "minimum bucket processing time", + "description": "The minimum of all bucket processing times, in milliseconds.", "name": "buckets.time.min", "required": false, "type": { @@ -84993,7 +85039,7 @@ "btmax", "bucketsTimeMax" ], - "description": "maximum bucket processing time", + "description": "The maximum of all bucket processing times, in milliseconds.", "name": "buckets.time.max", "required": false, "type": { @@ -85009,7 +85055,7 @@ "btea", "bucketsTimeExpAvg" ], - "description": "exponential average bucket processing time (milliseconds)", + "description": "The exponential moving average of all bucket processing times, in milliseconds.", "name": "buckets.time.exp_avg", "required": false, "type": { @@ -85025,7 +85071,7 @@ "bteah", "bucketsTimeExpAvgHour" ], - "description": "exponential average bucket processing time by hour (milliseconds)", + "description": "The exponential moving average of bucket processing times calculated in a one hour time window, in milliseconds.", "name": "buckets.time.exp_avg_hour", "required": false, "type": { @@ -85037,7 +85083,7 @@ } } ], - "specLocation": "cat/ml_jobs/types.ts#L24-L325" + "specLocation": "cat/ml_jobs/types.ts#L24-L347" }, { "attachedBehaviors": [ @@ -85182,7 +85228,7 @@ }, "path": [ { - "description": "The ID of the trained models stats to fetch", + "description": "A unique identifier for the trained model.", "name": "model_id", "required": false, "type": { @@ -85196,9 +85242,10 @@ ], "query": [ { - "description": "Whether to ignore if a wildcard expression matches no trained models. (This includes `_all` string or when no trained models have been specified)", + "description": "Specifies what to do when the request: contains wildcard expressions and there are no models that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches.\nIf `true`, the API returns an empty array when there are no matches and the subset of results when there are partial matches.\nIf `false`, the API returns a 404 status code when there are no matches or only partial matches.", "name": "allow_no_match", "required": false, + "serverDefault": true, "type": { "kind": "instance_of", "type": { @@ -85208,7 +85255,7 @@ } }, { - "description": "The unit in which to display byte values", + "description": "The unit used to display byte values.", "name": "bytes", "required": false, "type": { @@ -85220,7 +85267,7 @@ } }, { - "description": "Comma-separated list of column names to display", + "description": "A comma-separated list of column names to display.", "name": "h", "required": false, "type": { @@ -85232,7 +85279,7 @@ } }, { - "description": "Comma-separated list of column names or column aliases to sort by", + "description": "A comma-separated list of column names or aliases used to sort the response.", "name": "s", "required": false, "type": { @@ -85244,7 +85291,7 @@ } }, { - "description": "skips a number of trained models", + "description": "Skips the specified number of transforms.", "name": "from", "required": false, "type": { @@ -85256,7 +85303,7 @@ } }, { - "description": "specifies a max number of trained models to get", + "description": "The maximum number of transforms to display.", "name": "size", "required": false, "type": { @@ -85268,7 +85315,7 @@ } } ], - "specLocation": "cat/ml_trained_models/CatTrainedModelsRequest.ts#L24-L48" + "specLocation": "cat/ml_trained_models/CatTrainedModelsRequest.ts#L24-L63" }, { "body": { @@ -85299,7 +85346,7 @@ }, "properties": [ { - "description": "the trained model id", + "description": "The model identifier.", "name": "id", "required": false, "type": { @@ -85315,7 +85362,7 @@ "c", "createdBy" ], - "description": "who created the model", + "description": "Information about the creator of the model.", "name": "created_by", "required": false, "type": { @@ -85331,7 +85378,7 @@ "hs", "modelHeapSize" ], - "description": "the estimated heap size to keep the model in memory", + "description": "The estimated heap size to keep the model in memory.", "name": "heap_size", "required": false, "type": { @@ -85347,7 +85394,7 @@ "o", "modelOperations" ], - "description": "the estimated number of operations to use the model", + "description": "The estimated number of operations to use the model.\nThis number helps to measure the computational complexity of the model.", "name": "operations", "required": false, "type": { @@ -85362,7 +85409,7 @@ "aliases": [ "l" ], - "description": "The license level of the model", + "description": "The license level of the model.", "name": "license", "required": false, "type": { @@ -85377,7 +85424,7 @@ "aliases": [ "ct" ], - "description": "The time the model was created", + "description": "The time the model was created.", "name": "create_time", "required": false, "type": { @@ -85392,7 +85439,7 @@ "aliases": [ "v" ], - "description": "The version of Elasticsearch when the model was created", + "description": "The version of Elasticsearch when the model was created.", "name": "version", "required": false, "type": { @@ -85407,7 +85454,7 @@ "aliases": [ "d" ], - "description": "The model description", + "description": "A description of the model.", "name": "description", "required": false, "type": { @@ -85423,7 +85470,7 @@ "ip", "ingestPipelines" ], - "description": "The number of pipelines referencing the model", + "description": "The number of pipelines that are referencing the model.", "name": "ingest.pipelines", "required": false, "type": { @@ -85439,7 +85486,7 @@ "ic", "ingestCount" ], - "description": "The total number of docs processed by the model", + "description": "The total number of documents that are processed by the model.", "name": "ingest.count", "required": false, "type": { @@ -85455,7 +85502,7 @@ "it", "ingestTime" ], - "description": "The total time spent processing docs with this model", + "description": "The total time spent processing documents with thie model.", "name": "ingest.time", "required": false, "type": { @@ -85471,7 +85518,7 @@ "icurr", "ingestCurrent" ], - "description": "The total documents currently being handled by the model", + "description": "The total number of documents that are currently being handled by the model.", "name": "ingest.current", "required": false, "type": { @@ -85487,7 +85534,7 @@ "if", "ingestFailed" ], - "description": "The total count of failed ingest attempts with this model", + "description": "The total number of failed ingest attempts with the model.", "name": "ingest.failed", "required": false, "type": { @@ -85503,7 +85550,7 @@ "dfid", "dataFrameAnalytics" ], - "description": "The data frame analytics config id that created the model (if still available)", + "description": "The identifier for the data frame analytics job that created the model.\nOnly displayed if the job is still available.", "name": "data_frame.id", "required": false, "type": { @@ -85519,7 +85566,7 @@ "dft", "dataFrameAnalyticsTime" ], - "description": "The time the data frame analytics config was created", + "description": "The time the data frame analytics job was created.", "name": "data_frame.create_time", "required": false, "type": { @@ -85535,7 +85582,7 @@ "dfsi", "dataFrameAnalyticsSrcIndex" ], - "description": "The source index used to train in the data frame analysis", + "description": "The source index used to train in the data frame analysis.", "name": "data_frame.source_index", "required": false, "type": { @@ -85551,7 +85598,7 @@ "dfa", "dataFrameAnalyticsAnalysis" ], - "description": "The analysis used by the data frame to build the model", + "description": "The analysis used by the data frame to build the model.", "name": "data_frame.analysis", "required": false, "type": { @@ -85581,7 +85628,7 @@ } } ], - "specLocation": "cat/ml_trained_models/types.ts#L23-L114" + "specLocation": "cat/ml_trained_models/types.ts#L23-L115" }, { "kind": "interface", @@ -87253,7 +87300,7 @@ } } ], - "specLocation": "cat/nodes/types.ts#L23-L543" + "specLocation": "cat/nodes/types.ts#L23-L542" }, { "attachedBehaviors": [ @@ -88219,7 +88266,7 @@ "body": { "kind": "no_body" }, - "description": "Provides low-level information about the segments in the shards of an index.", + "description": "Returns low-level information about the Lucene segments in index shards.\nFor data streams, the API returns information about the backing indices.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index segments API.", "inherits": { "type": { "name": "CatRequestBase", @@ -88233,7 +88280,7 @@ }, "path": [ { - "description": "A comma-separated list of index names to limit the returned information", + "description": "A comma-separated list of data streams, indices, and aliases used to limit the request.\nSupports wildcards (`*`).\nTo target all data streams and indices, omit this parameter or use `*` or `_all`.", "name": "index", "required": false, "type": { @@ -88247,7 +88294,7 @@ ], "query": [ { - "description": "The unit in which to display byte values", + "description": "The unit used to display byte values.", "name": "bytes", "required": false, "type": { @@ -88259,7 +88306,7 @@ } } ], - "specLocation": "cat/segments/CatSegmentsRequest.ts#L23-L36" + "specLocation": "cat/segments/CatSegmentsRequest.ts#L23-L49" }, { "body": { @@ -88294,7 +88341,7 @@ "i", "idx" ], - "description": "index name", + "description": "The index name.", "name": "index", "required": false, "type": { @@ -88310,7 +88357,7 @@ "s", "sh" ], - "description": "shard name", + "description": "The shard name.", "name": "shard", "required": false, "type": { @@ -88327,7 +88374,7 @@ "pr", "primaryOrReplica" ], - "description": "primary or replica", + "description": "The shard type: `primary` or `replica`.", "name": "prirep", "required": false, "type": { @@ -88339,7 +88386,7 @@ } }, { - "description": "ip of node where it lives", + "description": "The IP address of the node where it lives.", "name": "ip", "required": false, "type": { @@ -88351,7 +88398,7 @@ } }, { - "description": "unique id of node where it lives", + "description": "The unique identifier of the node where it lives.", "name": "id", "required": false, "type": { @@ -88366,7 +88413,7 @@ "aliases": [ "seg" ], - "description": "segment name", + "description": "The segment name, which is derived from the segment generation and used internally to create file names in the directory of the shard.", "name": "segment", "required": false, "type": { @@ -88382,7 +88429,7 @@ "g", "gen" ], - "description": "segment generation", + "description": "The segment generation number.\nElasticsearch increments this generation number for each segment written then uses this number to derive the segment name.", "name": "generation", "required": false, "type": { @@ -88398,7 +88445,7 @@ "dc", "docsCount" ], - "description": "number of docs in segment", + "description": "The number of documents in the segment.\nThis excludes deleted documents and counts any nested documents separately from their parents.\nIt also excludes documents which were indexed recently and do not yet belong to a segment.", "name": "docs.count", "required": false, "type": { @@ -88414,7 +88461,7 @@ "dd", "docsDeleted" ], - "description": "number of deleted docs in segment", + "description": "The number of deleted documents in the segment, which might be higher or lower than the number of delete operations you have performed.\nThis number excludes deletes that were performed recently and do not yet belong to a segment.\nDeleted documents are cleaned up by the automatic merge process if it makes sense to do so.\nAlso, Elasticsearch creates extra deleted documents to internally track the recent history of operations on a shard.", "name": "docs.deleted", "required": false, "type": { @@ -88429,7 +88476,7 @@ "aliases": [ "si" ], - "description": "segment size in bytes", + "description": "The segment size in bytes.", "name": "size", "required": false, "type": { @@ -88445,7 +88492,7 @@ "sm", "sizeMemory" ], - "description": "segment memory in bytes", + "description": "The segment memory in bytes.\nA value of `-1` indicates Elasticsearch was unable to compute this number.", "name": "size.memory", "required": false, "type": { @@ -88461,7 +88508,7 @@ "ic", "isCommitted" ], - "description": "is segment committed", + "description": "If `true`, the segment is synced to disk.\nSegments that are synced can survive a hard reboot.\nIf `false`, the data from uncommitted segments is also stored in the transaction log so that Elasticsearch is able to replay changes on the next start.", "name": "committed", "required": false, "type": { @@ -88477,7 +88524,7 @@ "is", "isSearchable" ], - "description": "is segment searched", + "description": "If `true`, the segment is searchable.\nIf `false`, the segment has most likely been written to disk but needs a refresh to be searchable.", "name": "searchable", "required": false, "type": { @@ -88492,7 +88539,7 @@ "aliases": [ "v" ], - "description": "version", + "description": "The version of Lucene used to write the segment.", "name": "version", "required": false, "type": { @@ -88508,7 +88555,7 @@ "ico", "isCompound" ], - "description": "is segment compound", + "description": "If `true`, the segment is stored in a compound file.\nThis means Lucene merged all files from the segment in a single file to save file descriptors.", "name": "compound", "required": false, "type": { @@ -88520,7 +88567,7 @@ } } ], - "specLocation": "cat/segments/types.ts#L22-L96" + "specLocation": "cat/segments/types.ts#L22-L107" }, { "attachedBehaviors": [ @@ -88530,7 +88577,7 @@ "body": { "kind": "no_body" }, - "description": "Provides a detailed view of shard allocation on nodes.", + "description": "Returns information about the shards in a cluster.\nFor data streams, the API returns information about the backing indices.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications.", "inherits": { "type": { "name": "CatRequestBase", @@ -88544,7 +88591,7 @@ }, "path": [ { - "description": "A comma-separated list of index names to limit the returned information", + "description": "A comma-separated list of data streams, indices, and aliases used to limit the request.\nSupports wildcards (`*`).\nTo target all data streams and indices, omit this parameter or use `*` or `_all`.", "name": "index", "required": false, "type": { @@ -88558,7 +88605,7 @@ ], "query": [ { - "description": "The unit in which to display byte values", + "description": "The unit used to display byte values.", "name": "bytes", "required": false, "type": { @@ -88570,7 +88617,7 @@ } } ], - "specLocation": "cat/shards/CatShardsRequest.ts#L23-L36" + "specLocation": "cat/shards/CatShardsRequest.ts#L23-L49" }, { "body": { @@ -88605,7 +88652,7 @@ "i", "idx" ], - "description": "index name", + "description": "The index name.", "name": "index", "required": false, "type": { @@ -88621,7 +88668,7 @@ "s", "sh" ], - "description": "shard name", + "description": "The shard name.", "name": "shard", "required": false, "type": { @@ -88638,7 +88685,7 @@ "pr", "primaryOrReplica" ], - "description": "primary or replica", + "description": "The shard type: `primary` or `replica`.", "name": "prirep", "required": false, "type": { @@ -88653,7 +88700,7 @@ "aliases": [ "st" ], - "description": "shard state", + "description": "The shard state.\nReturned values include:\n`INITIALIZING`: The shard is recovering from a peer shard or gateway.\n`RELOCATING`: The shard is relocating.\n`STARTED`: The shard has started.\n`UNASSIGNED`: The shard is not assigned to any node.", "name": "state", "required": false, "type": { @@ -88669,7 +88716,7 @@ "d", "dc" ], - "description": "number of docs in shard", + "description": "The number of documents in the shard.", "name": "docs", "required": false, "type": { @@ -88696,7 +88743,7 @@ "aliases": [ "sto" ], - "description": "store size of shard (how much disk it uses)", + "description": "The disk space used by the shard.", "name": "store", "required": false, "type": { @@ -88720,7 +88767,7 @@ } }, { - "description": "ip of node where it lives", + "description": "The IP address of the node.", "name": "ip", "required": false, "type": { @@ -88744,7 +88791,7 @@ } }, { - "description": "unique id of node where it lives", + "description": "The unique identifier for the node.", "name": "id", "required": false, "type": { @@ -88759,7 +88806,7 @@ "aliases": [ "n" ], - "description": "name of node where it lives", + "description": "The name of node.", "name": "node", "required": false, "type": { @@ -88783,7 +88830,7 @@ } }, { - "description": "sync id", + "description": "The sync identifier.", "name": "sync_id", "required": false, "type": { @@ -88798,7 +88845,7 @@ "aliases": [ "ur" ], - "description": "reason shard is unassigned", + "description": "The reason for the last change to the state of an unassigned shard.\nIt does not explain why the shard is currently unassigned; use the cluster allocation explain API for that information.\nReturned values include:\n`ALLOCATION_FAILED`: Unassigned as a result of a failed allocation of the shard.\n`CLUSTER_RECOVERED`: Unassigned as a result of a full cluster recovery.\n`DANGLING_INDEX_IMPORTED`: Unassigned as a result of importing a dangling index.\n`EXISTING_INDEX_RESTORED`: Unassigned as a result of restoring into a closed index.\n`FORCED_EMPTY_PRIMARY`: The shard’s allocation was last modified by forcing an empty primary using the cluster reroute API.\n`INDEX_CLOSED`: Unassigned because the index was closed.\n`INDEX_CREATED`: Unassigned as a result of an API creation of an index.\n`INDEX_REOPENED`: Unassigned as a result of opening a closed index.\n`MANUAL_ALLOCATION`: The shard’s allocation was last modified by the cluster reroute API.\n`NEW_INDEX_RESTORED`: Unassigned as a result of restoring into a new index.\n`NODE_LEFT`: Unassigned as a result of the node hosting it leaving the cluster.\n`NODE_RESTARTING`: Similar to `NODE_LEFT`, except that the node was registered as restarting using the node shutdown API.\n`PRIMARY_FAILED`: The shard was initializing as a replica, but the primary shard failed before the initialization completed.\n`REALLOCATED_REPLICA`: A better replica location is identified and causes the existing replica allocation to be cancelled.\n`REINITIALIZED`: When a shard moves from started back to initializing.\n`REPLICA_ADDED`: Unassigned as a result of explicit addition of a replica.\n`REROUTE_CANCELLED`: Unassigned as a result of explicit cancel reroute command.", "name": "unassigned.reason", "required": false, "type": { @@ -88813,7 +88860,7 @@ "aliases": [ "ua" ], - "description": "time shard became unassigned (UTC)", + "description": "The time at which the shard became unassigned in Coordinated Universal Time (UTC).", "name": "unassigned.at", "required": false, "type": { @@ -88828,7 +88875,7 @@ "aliases": [ "uf" ], - "description": "time has been unassigned", + "description": "The time at which the shard was requested to be unassigned in Coordinated Universal Time (UTC).", "name": "unassigned.for", "required": false, "type": { @@ -88843,7 +88890,7 @@ "aliases": [ "ud" ], - "description": "additional details as to why the shard became unassigned", + "description": "Additional details as to why the shard became unassigned.\nIt does not explain why the shard is not assigned; use the cluster allocation explain API for that information.", "name": "unassigned.details", "required": false, "type": { @@ -88858,7 +88905,7 @@ "aliases": [ "rs" ], - "description": "recovery source type", + "description": "The type of recovery source.", "name": "recoverysource.type", "required": false, "type": { @@ -88874,7 +88921,7 @@ "cs", "completionSize" ], - "description": "size of completion", + "description": "The size of completion.", "name": "completion.size", "required": false, "type": { @@ -88890,7 +88937,7 @@ "fm", "fielddataMemory" ], - "description": "used fielddata cache", + "description": "The used fielddata cache memory.", "name": "fielddata.memory_size", "required": false, "type": { @@ -88906,7 +88953,7 @@ "fe", "fielddataEvictions" ], - "description": "fielddata evictions", + "description": "The fielddata cache evictions.", "name": "fielddata.evictions", "required": false, "type": { @@ -88922,7 +88969,7 @@ "qcm", "queryCacheMemory" ], - "description": "used query cache", + "description": "The used query cache memory.", "name": "query_cache.memory_size", "required": false, "type": { @@ -88938,7 +88985,7 @@ "qce", "queryCacheEvictions" ], - "description": "query cache evictions", + "description": "The query cache evictions.", "name": "query_cache.evictions", "required": false, "type": { @@ -88954,7 +89001,7 @@ "ft", "flushTotal" ], - "description": "number of flushes", + "description": "The number of flushes.", "name": "flush.total", "required": false, "type": { @@ -88970,7 +89017,7 @@ "ftt", "flushTotalTime" ], - "description": "time spent in flush", + "description": "The time spent in flush.", "name": "flush.total_time", "required": false, "type": { @@ -88986,7 +89033,7 @@ "gc", "getCurrent" ], - "description": "number of current get ops", + "description": "The number of current get operations.", "name": "get.current", "required": false, "type": { @@ -89002,7 +89049,7 @@ "gti", "getTime" ], - "description": "time spent in get", + "description": "The time spent in get operations.", "name": "get.time", "required": false, "type": { @@ -89018,7 +89065,7 @@ "gto", "getTotal" ], - "description": "number of get ops", + "description": "The number of get operations.", "name": "get.total", "required": false, "type": { @@ -89034,7 +89081,7 @@ "geti", "getExistsTime" ], - "description": "time spent in successful gets", + "description": "The time spent in successful get operations.", "name": "get.exists_time", "required": false, "type": { @@ -89050,7 +89097,7 @@ "geto", "getExistsTotal" ], - "description": "number of successful gets", + "description": "The number of successful get operations.", "name": "get.exists_total", "required": false, "type": { @@ -89066,7 +89113,7 @@ "gmti", "getMissingTime" ], - "description": "time spent in failed gets", + "description": "The time spent in failed get operations.", "name": "get.missing_time", "required": false, "type": { @@ -89082,7 +89129,7 @@ "gmto", "getMissingTotal" ], - "description": "number of failed gets", + "description": "The number of failed get operations.", "name": "get.missing_total", "required": false, "type": { @@ -89098,7 +89145,7 @@ "idc", "indexingDeleteCurrent" ], - "description": "number of current deletions", + "description": "The number of current deletion operations.", "name": "indexing.delete_current", "required": false, "type": { @@ -89114,7 +89161,7 @@ "idti", "indexingDeleteTime" ], - "description": "time spent in deletions", + "description": "The time spent in deletion operations.", "name": "indexing.delete_time", "required": false, "type": { @@ -89130,7 +89177,7 @@ "idto", "indexingDeleteTotal" ], - "description": "number of delete ops", + "description": "The number of delete operations.", "name": "indexing.delete_total", "required": false, "type": { @@ -89146,7 +89193,7 @@ "iic", "indexingIndexCurrent" ], - "description": "number of current indexing ops", + "description": "The number of current indexing operations.", "name": "indexing.index_current", "required": false, "type": { @@ -89162,7 +89209,7 @@ "iiti", "indexingIndexTime" ], - "description": "time spent in indexing", + "description": "The time spent in indexing operations.", "name": "indexing.index_time", "required": false, "type": { @@ -89178,7 +89225,7 @@ "iito", "indexingIndexTotal" ], - "description": "number of indexing ops", + "description": "The number of indexing operations.", "name": "indexing.index_total", "required": false, "type": { @@ -89194,7 +89241,7 @@ "iif", "indexingIndexFailed" ], - "description": "number of failed indexing ops", + "description": "The number of failed indexing operations.", "name": "indexing.index_failed", "required": false, "type": { @@ -89210,7 +89257,7 @@ "mc", "mergesCurrent" ], - "description": "number of current merges", + "description": "The number of current merge operations.", "name": "merges.current", "required": false, "type": { @@ -89226,7 +89273,7 @@ "mcd", "mergesCurrentDocs" ], - "description": "number of current merging docs", + "description": "The number of current merging documents.", "name": "merges.current_docs", "required": false, "type": { @@ -89242,7 +89289,7 @@ "mcs", "mergesCurrentSize" ], - "description": "size of current merges", + "description": "The size of current merge operations.", "name": "merges.current_size", "required": false, "type": { @@ -89258,7 +89305,7 @@ "mt", "mergesTotal" ], - "description": "number of completed merge ops", + "description": "The number of completed merge operations.", "name": "merges.total", "required": false, "type": { @@ -89274,7 +89321,7 @@ "mtd", "mergesTotalDocs" ], - "description": "docs merged", + "description": "The nuber of merged documents.", "name": "merges.total_docs", "required": false, "type": { @@ -89290,7 +89337,7 @@ "mts", "mergesTotalSize" ], - "description": "size merged", + "description": "The size of current merges.", "name": "merges.total_size", "required": false, "type": { @@ -89306,7 +89353,7 @@ "mtt", "mergesTotalTime" ], - "description": "time spent in merges", + "description": "The time spent merging documents.", "name": "merges.total_time", "required": false, "type": { @@ -89318,7 +89365,7 @@ } }, { - "description": "total refreshes", + "description": "The total number of refreshes.", "name": "refresh.total", "required": false, "type": { @@ -89330,7 +89377,7 @@ } }, { - "description": "time spent in refreshes", + "description": "The time spent in refreshes.", "name": "refresh.time", "required": false, "type": { @@ -89346,7 +89393,7 @@ "rto", "refreshTotal" ], - "description": "total external refreshes", + "description": "The total nunber of external refreshes.", "name": "refresh.external_total", "required": false, "type": { @@ -89362,7 +89409,7 @@ "rti", "refreshTime" ], - "description": "time spent in external refreshes", + "description": "The time spent in external refreshes.", "name": "refresh.external_time", "required": false, "type": { @@ -89378,7 +89425,7 @@ "rli", "refreshListeners" ], - "description": "number of pending refresh listeners", + "description": "The number of pending refresh listeners.", "name": "refresh.listeners", "required": false, "type": { @@ -89394,7 +89441,7 @@ "sfc", "searchFetchCurrent" ], - "description": "current fetch phase ops", + "description": "The current fetch phase operations.", "name": "search.fetch_current", "required": false, "type": { @@ -89410,7 +89457,7 @@ "sfti", "searchFetchTime" ], - "description": "time spent in fetch phase", + "description": "The time spent in fetch phase.", "name": "search.fetch_time", "required": false, "type": { @@ -89426,7 +89473,7 @@ "sfto", "searchFetchTotal" ], - "description": "total fetch ops", + "description": "The total number of fetch operations.", "name": "search.fetch_total", "required": false, "type": { @@ -89442,7 +89489,7 @@ "so", "searchOpenContexts" ], - "description": "open search contexts", + "description": "The number of open search contexts.", "name": "search.open_contexts", "required": false, "type": { @@ -89458,7 +89505,7 @@ "sqc", "searchQueryCurrent" ], - "description": "current query phase ops", + "description": "The current query phase operations.", "name": "search.query_current", "required": false, "type": { @@ -89474,7 +89521,7 @@ "sqti", "searchQueryTime" ], - "description": "time spent in query phase", + "description": "The time spent in query phase.", "name": "search.query_time", "required": false, "type": { @@ -89490,7 +89537,7 @@ "sqto", "searchQueryTotal" ], - "description": "total query phase ops", + "description": "The total number of query phase operations.", "name": "search.query_total", "required": false, "type": { @@ -89506,7 +89553,7 @@ "scc", "searchScrollCurrent" ], - "description": "open scroll contexts", + "description": "The open scroll contexts.", "name": "search.scroll_current", "required": false, "type": { @@ -89522,7 +89569,7 @@ "scti", "searchScrollTime" ], - "description": "time scroll contexts held open", + "description": "The time scroll contexts were held open.", "name": "search.scroll_time", "required": false, "type": { @@ -89538,7 +89585,7 @@ "scto", "searchScrollTotal" ], - "description": "completed scroll contexts", + "description": "The number of completed scroll contexts.", "name": "search.scroll_total", "required": false, "type": { @@ -89554,7 +89601,7 @@ "sc", "segmentsCount" ], - "description": "number of segments", + "description": "The number of segments.", "name": "segments.count", "required": false, "type": { @@ -89570,7 +89617,7 @@ "sm", "segmentsMemory" ], - "description": "memory used by segments", + "description": "The memory used by segments.", "name": "segments.memory", "required": false, "type": { @@ -89586,7 +89633,7 @@ "siwm", "segmentsIndexWriterMemory" ], - "description": "memory used by index writer", + "description": "The memory used by the index writer.", "name": "segments.index_writer_memory", "required": false, "type": { @@ -89602,7 +89649,7 @@ "svmm", "segmentsVersionMapMemory" ], - "description": "memory used by version map", + "description": "The memory used by the version map.", "name": "segments.version_map_memory", "required": false, "type": { @@ -89618,7 +89665,7 @@ "sfbm", "fixedBitsetMemory" ], - "description": "memory used by fixed bit sets for nested object field types and export type filters for types referred in _parent fields", + "description": "The memory used by fixed bit sets for nested object field types and export type filters for types referred in `_parent` fields.", "name": "segments.fixed_bitset_memory", "required": false, "type": { @@ -89634,7 +89681,7 @@ "sqm", "maxSeqNo" ], - "description": "max sequence number", + "description": "The maximum sequence number.", "name": "seq_no.max", "required": false, "type": { @@ -89650,7 +89697,7 @@ "sql", "localCheckpoint" ], - "description": "local checkpoint", + "description": "The local checkpoint.", "name": "seq_no.local_checkpoint", "required": false, "type": { @@ -89666,7 +89713,7 @@ "sqg", "globalCheckpoint" ], - "description": "global checkpoint", + "description": "The global checkpoint.", "name": "seq_no.global_checkpoint", "required": false, "type": { @@ -89682,7 +89729,7 @@ "wc", "warmerCurrent" ], - "description": "current warmer ops", + "description": "The number of current warmer operations.", "name": "warmer.current", "required": false, "type": { @@ -89698,7 +89745,7 @@ "wto", "warmerTotal" ], - "description": "total warmer ops", + "description": "The total number of warmer operations.", "name": "warmer.total", "required": false, "type": { @@ -89714,7 +89761,7 @@ "wtt", "warmerTotalTime" ], - "description": "time spent in warmers", + "description": "The time spent in warmer operations.", "name": "warmer.total_time", "required": false, "type": { @@ -89730,7 +89777,7 @@ "pd", "dataPath" ], - "description": "shard data path", + "description": "The shard data path.", "name": "path.data", "required": false, "type": { @@ -89746,7 +89793,7 @@ "ps", "statsPath" ], - "description": "shard state path", + "description": "The shard state path.", "name": "path.state", "required": false, "type": { @@ -89762,7 +89809,7 @@ "bto", "bulkTotalOperations" ], - "description": "number of bulk shard ops", + "description": "The number of bulk shard operations.", "name": "bulk.total_operations", "required": false, "type": { @@ -89778,7 +89825,7 @@ "btti", "bulkTotalTime" ], - "description": "time spend in shard bulk", + "description": "The time spent in shard bulk operations.", "name": "bulk.total_time", "required": false, "type": { @@ -89794,7 +89841,7 @@ "btsi", "bulkTotalSizeInBytes" ], - "description": "total size in bytes of shard bulk", + "description": "The total size in bytes of shard bulk operations.", "name": "bulk.total_size_in_bytes", "required": false, "type": { @@ -89810,7 +89857,7 @@ "bati", "bulkAvgTime" ], - "description": "average time spend in shard bulk", + "description": "The average time spent in shard bulk operations.", "name": "bulk.avg_time", "required": false, "type": { @@ -89826,7 +89873,7 @@ "basi", "bulkAvgSizeInBytes" ], - "description": "avg size in bytes of shard bulk", + "description": "The average size in bytes of shard bulk operations.", "name": "bulk.avg_size_in_bytes", "required": false, "type": { @@ -89838,7 +89885,7 @@ } } ], - "specLocation": "cat/shards/types.ts#L20-L396" + "specLocation": "cat/shards/types.ts#L20-L421" }, { "attachedBehaviors": [ @@ -89848,7 +89895,7 @@ "body": { "kind": "no_body" }, - "description": "Returns all snapshots in a specific repository.", + "description": "Returns information about the snapshots stored in one or more repositories.\nA snapshot is a backup of an index or running Elasticsearch cluster.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot API.", "inherits": { "type": { "name": "CatRequestBase", @@ -89862,7 +89909,7 @@ }, "path": [ { - "description": "Name of repository from which to fetch the snapshot information", + "description": "A comma-separated list of snapshot repositories used to limit the request.\nAccepts wildcard expressions.\n`_all` returns all repositories.\nIf any repository fails during the request, Elasticsearch returns an error.", "name": "repository", "required": false, "type": { @@ -89876,9 +89923,10 @@ ], "query": [ { - "description": "Set to true to ignore unavailable snapshots", + "description": "If `true`, the response does not include information from unavailable snapshots.", "name": "ignore_unavailable", "required": false, + "serverDefault": false, "type": { "kind": "instance_of", "type": { @@ -89888,7 +89936,7 @@ } } ], - "specLocation": "cat/snapshots/CatSnapshotsRequest.ts#L23-L36" + "specLocation": "cat/snapshots/CatSnapshotsRequest.ts#L23-L50" }, { "body": { @@ -89922,7 +89970,7 @@ "aliases": [ "snapshot" ], - "description": "unique snapshot", + "description": "The unique identifier for the snapshot.", "name": "id", "required": false, "type": { @@ -89938,7 +89986,7 @@ "re", "repo" ], - "description": "repository name", + "description": "The repository name.", "name": "repository", "required": false, "type": { @@ -89953,7 +90001,7 @@ "aliases": [ "s" ], - "description": "snapshot name", + "description": "The state of the snapshot process.\nReturned values include:\n`FAILED`: The snapshot process failed.\n`INCOMPATIBLE`: The snapshot process is incompatible with the current cluster version.\n`IN_PROGRESS`: The snapshot process started but has not completed.\n`PARTIAL`: The snapshot process completed with a partial success.\n`SUCCESS`: The snapshot process completed with a full success.", "name": "status", "required": false, "type": { @@ -89969,7 +90017,7 @@ "ste", "startEpoch" ], - "description": "start time in seconds since 1970-01-01 00:00:00", + "description": "The Unix epoch time (seconds since 1970-01-01 00:00:00) at which the snapshot process started.", "name": "start_epoch", "required": false, "type": { @@ -90003,7 +90051,7 @@ "sti", "startTime" ], - "description": "start time in HH:MM:SS", + "description": "The time (HH:MM:SS) at which the snapshot process started.", "name": "start_time", "required": false, "type": { @@ -90019,7 +90067,7 @@ "ete", "endEpoch" ], - "description": "end time in seconds since 1970-01-01 00:00:00", + "description": "The Unix epoch time (seconds since 1970-01-01 00:00:00) at which the snapshot process ended.", "name": "end_epoch", "required": false, "type": { @@ -90053,7 +90101,7 @@ "eti", "endTime" ], - "description": "end time in HH:MM:SS", + "description": "The time (HH:MM:SS) at which the snapshot process ended.", "name": "end_time", "required": false, "type": { @@ -90068,7 +90116,7 @@ "aliases": [ "dur" ], - "description": "duration", + "description": "The time it took the snapshot process to complete, in time units.", "name": "duration", "required": false, "type": { @@ -90083,7 +90131,7 @@ "aliases": [ "i" ], - "description": "number of indices", + "description": "The number of indices in the snapshot.", "name": "indices", "required": false, "type": { @@ -90098,7 +90146,7 @@ "aliases": [ "ss" ], - "description": "number of successful shards", + "description": "The number of successful shards in the snapshot.", "name": "successful_shards", "required": false, "type": { @@ -90113,7 +90161,7 @@ "aliases": [ "fs" ], - "description": "number of failed shards", + "description": "The number of failed shards in the snapshot.", "name": "failed_shards", "required": false, "type": { @@ -90128,7 +90176,7 @@ "aliases": [ "ts" ], - "description": "number of total shards", + "description": "The total number of shards in the snapshot.", "name": "total_shards", "required": false, "type": { @@ -90143,7 +90191,7 @@ "aliases": [ "r" ], - "description": "reason for failures", + "description": "The reason for any snapshot failures.", "name": "reason", "required": false, "type": { @@ -90155,7 +90203,7 @@ } } ], - "specLocation": "cat/snapshots/types.ts#L24-L90" + "specLocation": "cat/snapshots/types.ts#L24-L96" }, { "attachedBehaviors": [ @@ -90165,7 +90213,7 @@ "body": { "kind": "no_body" }, - "description": "Returns information about the tasks currently executing on one or more nodes in the cluster.", + "description": "Returns information about tasks currently executing in the cluster.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the task management API.", "inherits": { "type": { "name": "CatRequestBase", @@ -90180,7 +90228,7 @@ "path": [], "query": [ { - "description": "A comma-separated list of actions that should be returned. Leave empty to return all.", + "description": "The task action names, which are used to limit the response.", "name": "actions", "required": false, "type": { @@ -90195,9 +90243,10 @@ } }, { - "description": "Return detailed task information (default: false)", + "description": "If `true`, the response includes detailed information about shard recoveries.", "name": "detailed", "required": false, + "serverDefault": false, "type": { "kind": "instance_of", "type": { @@ -90207,6 +90256,7 @@ } }, { + "description": "Unique node identifiers, which are used to limit the response.", "name": "node_id", "required": false, "type": { @@ -90221,18 +90271,19 @@ } }, { - "name": "parent_task", + "description": "The parent task identifier, which is used to limit the response.", + "name": "parent_task_id", "required": false, "type": { "kind": "instance_of", "type": { - "name": "long", - "namespace": "_types" + "name": "string", + "namespace": "_builtins" } } } ], - "specLocation": "cat/tasks/CatTasksRequest.ts#L23-L36" + "specLocation": "cat/tasks/CatTasksRequest.ts#L23-L48" }, { "body": { @@ -90263,7 +90314,7 @@ }, "properties": [ { - "description": "id of the task with the node", + "description": "The identifier of the task with the node.", "name": "id", "required": false, "type": { @@ -90278,7 +90329,7 @@ "aliases": [ "ac" ], - "description": "task action", + "description": "The task action.", "name": "action", "required": false, "type": { @@ -90293,7 +90344,7 @@ "aliases": [ "ti" ], - "description": "unique task id", + "description": "The unique task identifier.", "name": "task_id", "required": false, "type": { @@ -90308,7 +90359,7 @@ "aliases": [ "pti" ], - "description": "parent task id", + "description": "The parent task identifier.", "name": "parent_task_id", "required": false, "type": { @@ -90323,7 +90374,7 @@ "aliases": [ "ty" ], - "description": "task type", + "description": "The task type.", "name": "type", "required": false, "type": { @@ -90338,7 +90389,7 @@ "aliases": [ "start" ], - "description": "start time in ms", + "description": "The start time in milliseconds.", "name": "start_time", "required": false, "type": { @@ -90355,7 +90406,7 @@ "hms", "hhmmss" ], - "description": "start time in HH:MM:SS", + "description": "The start time in `HH:MM:SS` format.", "name": "timestamp", "required": false, "type": { @@ -90367,7 +90418,7 @@ } }, { - "description": "running time ns", + "description": "The running time in nanoseconds.", "name": "running_time_ns", "required": false, "type": { @@ -90382,7 +90433,7 @@ "aliases": [ "time" ], - "description": "running time", + "description": "The running time.", "name": "running_time", "required": false, "type": { @@ -90397,7 +90448,7 @@ "aliases": [ "ni" ], - "description": "unique node id", + "description": "The unique node identifier.", "name": "node_id", "required": false, "type": { @@ -90412,7 +90463,7 @@ "aliases": [ "i" ], - "description": "ip address", + "description": "The IP address for the node.", "name": "ip", "required": false, "type": { @@ -90427,7 +90478,7 @@ "aliases": [ "po" ], - "description": "bound transport port", + "description": "The bound transport port for the node.", "name": "port", "required": false, "type": { @@ -90442,7 +90493,7 @@ "aliases": [ "n" ], - "description": "node name", + "description": "The node name.", "name": "node", "required": false, "type": { @@ -90457,7 +90508,7 @@ "aliases": [ "v" ], - "description": "es version", + "description": "The Elasticsearch version.", "name": "version", "required": false, "type": { @@ -90472,7 +90523,7 @@ "aliases": [ "x" ], - "description": "X-Opaque-ID header", + "description": "The X-Opaque-ID header.", "name": "x_opaque_id", "required": false, "type": { @@ -90487,7 +90538,7 @@ "aliases": [ "desc" ], - "description": "task action", + "description": "The task action description.", "name": "description", "required": false, "type": { @@ -90509,7 +90560,7 @@ "body": { "kind": "no_body" }, - "description": "Returns information about existing templates.", + "description": "Returns information about index templates in a cluster.\nYou can use index templates to apply index settings and field mappings to new indices at creation.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get index template API.", "inherits": { "type": { "name": "CatRequestBase", @@ -90523,7 +90574,7 @@ }, "path": [ { - "description": "A pattern that returned template names must match", + "description": "The name of the template to return.\nAccepts wildcard expressions. If omitted, all templates are returned.", "name": "name", "required": false, "type": { @@ -90536,7 +90587,7 @@ } ], "query": [], - "specLocation": "cat/templates/CatTemplatesRequest.ts#L23-L33" + "specLocation": "cat/templates/CatTemplatesRequest.ts#L23-L41" }, { "body": { @@ -90570,7 +90621,7 @@ "aliases": [ "n" ], - "description": "template name", + "description": "The template name.", "name": "name", "required": false, "type": { @@ -90585,7 +90636,7 @@ "aliases": [ "t" ], - "description": "template index patterns", + "description": "The template index patterns.", "name": "index_patterns", "required": false, "type": { @@ -90601,7 +90652,7 @@ "o", "p" ], - "description": "template application order/priority number", + "description": "The template application order or priority number.", "name": "order", "required": false, "type": { @@ -90616,7 +90667,7 @@ "aliases": [ "v" ], - "description": "version", + "description": "The template version.", "name": "version", "required": false, "type": { @@ -90643,7 +90694,7 @@ "aliases": [ "c" ], - "description": "component templates comprising index template", + "description": "The component templates that comprise the index template.", "name": "composed_of", "required": false, "type": { @@ -90665,7 +90716,7 @@ "body": { "kind": "no_body" }, - "description": "Returns cluster-wide thread pool statistics per node.\nBy default the active, queue and rejected statistics are returned for all thread pools.", + "description": "Returns thread pool statistics for each node in a cluster.\nReturned information includes all built-in thread pools and custom thread pools.\nIMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.", "inherits": { "type": { "name": "CatRequestBase", @@ -90679,7 +90730,7 @@ }, "path": [ { - "description": "List of thread pool names used to limit the request. Accepts wildcard expressions.", + "description": "A comma-separated list of thread pool names used to limit the request.\nAccepts wildcard expressions.", "name": "thread_pool_patterns", "required": false, "type": { @@ -90693,7 +90744,7 @@ ], "query": [ { - "description": "Unit used to display time values.", + "description": "The unit used to display time values.", "name": "time", "required": false, "type": { @@ -90705,7 +90756,7 @@ } } ], - "specLocation": "cat/thread_pool/CatThreadPoolRequest.ts#L24-L43" + "specLocation": "cat/thread_pool/CatThreadPoolRequest.ts#L24-L48" }, { "body": { @@ -90739,7 +90790,7 @@ "aliases": [ "nn" ], - "description": "node name", + "description": "The node name.", "name": "node_name", "required": false, "type": { @@ -90754,7 +90805,7 @@ "aliases": [ "id" ], - "description": "persistent node id", + "description": "The persistent node identifier.", "name": "node_id", "required": false, "type": { @@ -90769,7 +90820,7 @@ "aliases": [ "eid" ], - "description": "ephemeral node id", + "description": "The ephemeral node identifier.", "name": "ephemeral_node_id", "required": false, "type": { @@ -90784,7 +90835,7 @@ "aliases": [ "p" ], - "description": "process id", + "description": "The process identifier.", "name": "pid", "required": false, "type": { @@ -90799,7 +90850,7 @@ "aliases": [ "h" ], - "description": "host name", + "description": "The host name for the current node.", "name": "host", "required": false, "type": { @@ -90814,7 +90865,7 @@ "aliases": [ "i" ], - "description": "ip address", + "description": "The IP address for the current node.", "name": "ip", "required": false, "type": { @@ -90829,7 +90880,7 @@ "aliases": [ "po" ], - "description": "bound transport port", + "description": "The bound transport port for the current node.", "name": "port", "required": false, "type": { @@ -90844,7 +90895,7 @@ "aliases": [ "n" ], - "description": "thread pool name", + "description": "The thread pool name.", "name": "name", "required": false, "type": { @@ -90859,7 +90910,7 @@ "aliases": [ "t" ], - "description": "thread pool type", + "description": "The thread pool type.\nReturned values include `fixed`, `fixed_auto_queue_size`, `direct`, and `scaling`.", "name": "type", "required": false, "type": { @@ -90874,7 +90925,7 @@ "aliases": [ "a" ], - "description": "number of active threads", + "description": "The number of active threads in the current thread pool.", "name": "active", "required": false, "type": { @@ -90889,7 +90940,7 @@ "aliases": [ "psz" ], - "description": "number of threads", + "description": "The number of threads in the current thread pool.", "name": "pool_size", "required": false, "type": { @@ -90904,7 +90955,7 @@ "aliases": [ "q" ], - "description": "number of tasks currently in queue", + "description": "The number of tasks currently in queue.", "name": "queue", "required": false, "type": { @@ -90919,7 +90970,7 @@ "aliases": [ "qs" ], - "description": "maximum number of tasks permitted in queue", + "description": "The maximum number of tasks permitted in the queue.", "name": "queue_size", "required": false, "type": { @@ -90934,7 +90985,7 @@ "aliases": [ "r" ], - "description": "number of rejected tasks", + "description": "The number of rejected tasks.", "name": "rejected", "required": false, "type": { @@ -90949,7 +91000,7 @@ "aliases": [ "l" ], - "description": "highest number of seen active threads", + "description": "The highest number of active threads in the current thread pool.", "name": "largest", "required": false, "type": { @@ -90964,7 +91015,7 @@ "aliases": [ "c" ], - "description": "number of completed tasks", + "description": "The number of completed tasks.", "name": "completed", "required": false, "type": { @@ -90979,7 +91030,7 @@ "aliases": [ "cr" ], - "description": "core number of threads in a scaling thread pool", + "description": "The core number of active threads allowed in a scaling thread pool.", "name": "core", "required": false, "type": { @@ -91006,7 +91057,7 @@ "aliases": [ "mx" ], - "description": "maximum number of threads in a scaling thread pool", + "description": "The maximum number of active threads allowed in a scaling thread pool.", "name": "max", "required": false, "type": { @@ -91033,7 +91084,7 @@ "aliases": [ "sz" ], - "description": "number of threads in a fixed thread pool", + "description": "The number of active threads allowed in a fixed thread pool.", "name": "size", "required": false, "type": { @@ -91060,7 +91111,7 @@ "aliases": [ "ka" ], - "description": "thread keep alive time", + "description": "The thread keep alive time.", "name": "keep_alive", "required": false, "type": { @@ -91084,7 +91135,7 @@ } } ], - "specLocation": "cat/thread_pool/types.ts#L22-L123" + "specLocation": "cat/thread_pool/types.ts#L22-L124" }, { "attachedBehaviors": [ @@ -91108,7 +91159,7 @@ }, "path": [ { - "description": "The id of the transform for which to get stats. '_all' or '*' implies all transforms", + "description": "A transform identifier or a wildcard expression.\nIf you do not specify one of these options, the API returns information for all transforms.", "name": "transform_id", "required": false, "type": { @@ -91122,9 +91173,10 @@ ], "query": [ { - "description": "Whether to ignore if a wildcard expression matches no transforms. (This includes `_all` string or when no transforms have been specified)", + "description": "Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches.\nIf `true`, it returns an empty transforms array when there are no matches and the subset of results when there are partial matches.\nIf `false`, the request returns a 404 status code when there are no matches or only partial matches.", "name": "allow_no_match", "required": false, + "serverDefault": true, "type": { "kind": "instance_of", "type": { @@ -91134,9 +91186,10 @@ } }, { - "description": "skips a number of transform configs, defaults to 0", + "description": "Skips the specified number of transforms.", "name": "from", "required": false, + "serverDefault": 0, "type": { "kind": "instance_of", "type": { @@ -91149,7 +91202,7 @@ "description": "Comma-separated list of column names to display.", "name": "h", "required": false, - "serverDefault": "create_time,id,state,type", + "serverDefault": "changes_last_detection_time,checkpoint,checkpoint_progress,documents_processed,id,last_search_time,state", "type": { "kind": "instance_of", "type": { @@ -91159,7 +91212,7 @@ } }, { - "description": "Comma-separated list of column names or column aliases used to sort the\nresponse.", + "description": "Comma-separated list of column names or column aliases used to sort the response.", "name": "s", "required": false, "type": { @@ -91171,7 +91224,7 @@ } }, { - "description": "Unit used to display time values.", + "description": "The unit used to display time values.", "name": "time", "required": false, "type": { @@ -91183,9 +91236,10 @@ } }, { - "description": "specifies a max number of transforms to get, defaults to 100", + "description": "The maximum number of transforms to obtain.", "name": "size", "required": false, + "serverDefault": 100, "type": { "kind": "instance_of", "type": { @@ -91195,7 +91249,7 @@ } } ], - "specLocation": "cat/transforms/CatTransformsRequest.ts#L25-L59" + "specLocation": "cat/transforms/CatTransformsRequest.ts#L25-L77" }, { "body": { @@ -91226,7 +91280,7 @@ }, "properties": [ { - "description": "the id", + "description": "The transform identifier.", "name": "id", "required": false, "type": { @@ -91241,7 +91295,7 @@ "aliases": [ "s" ], - "description": "transform state", + "description": "The status of the transform.\nReturned values include:\n`aborting`: The transform is aborting.\n`failed: The transform failed. For more information about the failure, check the `reason` field.\n`indexing`: The transform is actively processing data and creating new documents.\n`started`: The transform is running but not actively indexing data.\n`stopped`: The transform is stopped.\n`stopping`: The transform is stopping.", "name": "state", "required": false, "type": { @@ -91256,7 +91310,7 @@ "aliases": [ "c" ], - "description": "checkpoint", + "description": "The sequence number for the checkpoint.", "name": "checkpoint", "required": false, "type": { @@ -91272,7 +91326,7 @@ "docp", "documentsProcessed" ], - "description": "the number of documents read from source indices and processed", + "description": "The number of documents that have been processed from the source index of the transform.", "name": "documents_processed", "required": false, "type": { @@ -91288,7 +91342,7 @@ "cp", "checkpointProgress" ], - "description": "progress of the checkpoint", + "description": "The progress of the next checkpoint that is currently in progress.", "name": "checkpoint_progress", "required": false, "type": { @@ -91316,7 +91370,7 @@ "lst", "lastSearchTime" ], - "description": "last time transform searched for updates", + "description": "The timestamp of the last search in the source indices.\nThis field is shown only if the transform is running.", "name": "last_search_time", "required": false, "type": { @@ -91343,7 +91397,7 @@ "aliases": [ "cldt" ], - "description": "changes last detected time", + "description": "The timestamp when changes were last detected in the source indices.", "name": "changes_last_detection_time", "required": false, "type": { @@ -91371,7 +91425,7 @@ "ct", "createTime" ], - "description": "transform creation time", + "description": "The time the transform was created.", "name": "create_time", "required": false, "type": { @@ -91386,7 +91440,7 @@ "aliases": [ "v" ], - "description": "the version of Elasticsearch when the transform was created", + "description": "The version of Elasticsearch that existed on the node when the transform was created.", "name": "version", "required": false, "type": { @@ -91402,7 +91456,7 @@ "si", "sourceIndex" ], - "description": "source index", + "description": "The source indices for the transform.", "name": "source_index", "required": false, "type": { @@ -91418,7 +91472,7 @@ "di", "destIndex" ], - "description": "destination index", + "description": "The destination index for the transform.", "name": "dest_index", "required": false, "type": { @@ -91433,7 +91487,7 @@ "aliases": [ "p" ], - "description": "transform pipeline", + "description": "The unique identifier for the ingest pipeline.", "name": "pipeline", "required": false, "type": { @@ -91448,7 +91502,7 @@ "aliases": [ "d" ], - "description": "description", + "description": "The description of the transform.", "name": "description", "required": false, "type": { @@ -91463,7 +91517,7 @@ "aliases": [ "tt" ], - "description": "batch or continuous transform", + "description": "The type of transform: `batch` or `continuous`.", "name": "transform_type", "required": false, "type": { @@ -91478,7 +91532,7 @@ "aliases": [ "f" ], - "description": "frequency of transform", + "description": "The interval between checks for changes in the source indices when the transform is running continuously.", "name": "frequency", "required": false, "type": { @@ -91493,7 +91547,7 @@ "aliases": [ "mpsz" ], - "description": "max page search size", + "description": "The initial page size that is used for the composite aggregation for each checkpoint.", "name": "max_page_search_size", "required": false, "type": { @@ -91508,7 +91562,7 @@ "aliases": [ "dps" ], - "description": "docs per second", + "description": "The number of input documents per second.", "name": "docs_per_second", "required": false, "type": { @@ -91523,7 +91577,7 @@ "aliases": [ "r" ], - "description": "reason for the current state", + "description": "If a transform has a `failed` state, these details describe the reason for failure.", "name": "reason", "required": false, "type": { @@ -91538,7 +91592,7 @@ "aliases": [ "st" ], - "description": "total number of search phases", + "description": "The total number of search operations on the source index for the transform.", "name": "search_total", "required": false, "type": { @@ -91553,7 +91607,7 @@ "aliases": [ "sf" ], - "description": "total number of search failures", + "description": "The total number of search failures.", "name": "search_failure", "required": false, "type": { @@ -91568,7 +91622,7 @@ "aliases": [ "stime" ], - "description": "total search time", + "description": "The total amount of search time, in milliseconds.", "name": "search_time", "required": false, "type": { @@ -91583,7 +91637,7 @@ "aliases": [ "it" ], - "description": "total number of index phases done by the transform", + "description": "The total number of index operations done by the transform.", "name": "index_total", "required": false, "type": { @@ -91598,7 +91652,7 @@ "aliases": [ "if" ], - "description": "total number of index failures", + "description": "The total number of indexing failures.", "name": "index_failure", "required": false, "type": { @@ -91613,7 +91667,7 @@ "aliases": [ "itime" ], - "description": "total time spent indexing documents", + "description": "The total time spent indexing documents, in milliseconds.", "name": "index_time", "required": false, "type": { @@ -91628,7 +91682,7 @@ "aliases": [ "doci" ], - "description": "the number of documents written to the destination index", + "description": "The number of documents that have been indexed into the destination index for the transform.", "name": "documents_indexed", "required": false, "type": { @@ -91643,7 +91697,7 @@ "aliases": [ "dtime" ], - "description": "total time spent deleting documents", + "description": "The total time spent deleting documents, in milliseconds.", "name": "delete_time", "required": false, "type": { @@ -91658,7 +91712,7 @@ "aliases": [ "docd" ], - "description": "the number of documents deleted from the destination index", + "description": "The number of documents deleted from the destination index due to the retention policy for the transform.", "name": "documents_deleted", "required": false, "type": { @@ -91673,7 +91727,7 @@ "aliases": [ "tc" ], - "description": "the number of times the transform has been triggered", + "description": "The number of times the transform has been triggered by the scheduler.\nFor example, the scheduler triggers the transform indexer to check for updates or ingest new data at an interval specified in the `frequency` property.", "name": "trigger_count", "required": false, "type": { @@ -91688,7 +91742,7 @@ "aliases": [ "pp" ], - "description": "the number of pages processed", + "description": "The number of search or bulk index operations processed.\nDocuments are processed in batches instead of individually.", "name": "pages_processed", "required": false, "type": { @@ -91703,7 +91757,7 @@ "aliases": [ "pt" ], - "description": "the total time spent processing documents", + "description": "The total time spent processing results, in milliseconds.", "name": "processing_time", "required": false, "type": { @@ -91719,7 +91773,7 @@ "cdtea", "checkpointTimeExpAvg" ], - "description": "exponential average checkpoint processing time (milliseconds)", + "description": "The exponential moving average of the duration of the checkpoint, in milliseconds.", "name": "checkpoint_duration_time_exp_avg", "required": false, "type": { @@ -91734,7 +91788,7 @@ "aliases": [ "idea" ], - "description": "exponential average number of documents indexed", + "description": "The exponential moving average of the number of new documents that have been indexed.", "name": "indexed_documents_exp_avg", "required": false, "type": { @@ -91749,7 +91803,7 @@ "aliases": [ "pdea" ], - "description": "exponential average number of documents processed", + "description": "The exponential moving average of the number of documents that have been processed.", "name": "processed_documents_exp_avg", "required": false, "type": { @@ -91761,7 +91815,7 @@ } } ], - "specLocation": "cat/transforms/types.ts#L22-L187" + "specLocation": "cat/transforms/types.ts#L22-L197" }, { "kind": "interface", @@ -131007,7 +131061,7 @@ } } ], - "specLocation": "ml/_types/Job.ts#L129-L149" + "specLocation": "ml/_types/Job.ts#L139-L159" }, { "kind": "interface", @@ -131066,7 +131120,7 @@ } } ], - "specLocation": "ml/_types/Job.ts#L151-L167" + "specLocation": "ml/_types/Job.ts#L161-L177" }, { "kind": "interface", @@ -135952,7 +136006,7 @@ } } ], - "specLocation": "ml/_types/Job.ts#L51-L75" + "specLocation": "ml/_types/Job.ts#L61-L85" }, { "kind": "interface", @@ -135984,7 +136038,7 @@ } } ], - "specLocation": "ml/_types/Job.ts#L169-L172" + "specLocation": "ml/_types/Job.ts#L179-L182" }, { "kind": "enum", @@ -136003,7 +136057,7 @@ "name": "JobBlockedReason", "namespace": "ml._types" }, - "specLocation": "ml/_types/Job.ts#L174-L178" + "specLocation": "ml/_types/Job.ts#L184-L188" }, { "kind": "interface", @@ -136203,7 +136257,7 @@ } } ], - "specLocation": "ml/_types/Job.ts#L77-L95" + "specLocation": "ml/_types/Job.ts#L87-L105" }, { "kind": "interface", @@ -136290,24 +136344,29 @@ } } ], - "specLocation": "ml/_types/Job.ts#L120-L127" + "specLocation": "ml/_types/Job.ts#L130-L137" }, { "kind": "enum", "members": [ { + "description": "The job close action is in progress and has not yet completed. A closing job cannot accept further data.", "name": "closing" }, { + "description": "The job finished successfully with its model state persisted. The job must be opened before it can accept further data.", "name": "closed" }, { + "description": "The job is available to receive and process data.", "name": "opened" }, { + "description": "The job did not finish successfully due to an error.\nThis situation can occur due to invalid input data, a fatal error occurring during the analysis, or an external interaction such as the process being killed by the Linux out of memory (OOM) killer.\nIf the job had irrevocably failed, it must be force closed and then deleted.\nIf the datafeed can be corrected, the job can be closed and then re-opened.", "name": "failed" }, { + "description": "The job open action is in progress and has not yet completed.", "name": "opening" } ], @@ -136315,7 +136374,7 @@ "name": "JobState", "namespace": "ml._types" }, - "specLocation": "ml/_types/Job.ts#L36-L42" + "specLocation": "ml/_types/Job.ts#L36-L52" }, { "kind": "interface", @@ -136369,7 +136428,7 @@ } } ], - "specLocation": "ml/_types/Job.ts#L44-L49" + "specLocation": "ml/_types/Job.ts#L54-L59" }, { "kind": "interface", @@ -136489,7 +136548,7 @@ } } ], - "specLocation": "ml/_types/Job.ts#L96-L107" + "specLocation": "ml/_types/Job.ts#L106-L117" }, { "kind": "interface", @@ -136641,7 +136700,7 @@ } } ], - "specLocation": "ml/_types/Job.ts#L109-L118" + "specLocation": "ml/_types/Job.ts#L119-L128" }, { "kind": "enum", diff --git a/output/schema/validation-errors.json b/output/schema/validation-errors.json index 0704b1234d..7bcc6164e1 100644 --- a/output/schema/validation-errors.json +++ b/output/schema/validation-errors.json @@ -358,10 +358,8 @@ "cat.tasks": { "request": [ "Request: query parameter 'node_id' does not exist in the json spec", - "Request: query parameter 'parent_task' does not exist in the json spec", "Request: missing json spec query parameter 'format'", "Request: missing json spec query parameter 'nodes'", - "Request: missing json spec query parameter 'parent_task_id'", "Request: missing json spec query parameter 'h'", "Request: missing json spec query parameter 'help'", "Request: missing json spec query parameter 's'", diff --git a/output/typescript/types.ts b/output/typescript/types.ts index 8420415c8e..47c76e7756 100644 --- a/output/typescript/types.ts +++ b/output/typescript/types.ts @@ -7817,7 +7817,7 @@ export interface CatTasksRequest extends CatCatRequestBase { actions?: string[] detailed?: boolean node_id?: string[] - parent_task?: long + parent_task_id?: string } export type CatTasksResponse = CatTasksTasksRecord[] diff --git a/specification/cat/ml_data_frame_analytics/types.ts b/specification/cat/ml_data_frame_analytics/types.ts index eedbaeb5c2..0e53a6ed15 100644 --- a/specification/cat/ml_data_frame_analytics/types.ts +++ b/specification/cat/ml_data_frame_analytics/types.ts @@ -21,81 +21,81 @@ import { Id, IndexName, Name, VersionString } from '@_types/common' export class DataFrameAnalyticsRecord { /** - * the id + * The identifier for the job. */ 'id'?: Id /** - * analysis type + * The type of analysis that the job performs. * @aliases t */ 'type'?: string /** - * job creation time + * The time when the job was created. * @aliases ct, createTime */ 'create_time'?: string /** - * the version of Elasticsearch when the analytics was created + * The version of Elasticsearch when the job was created. * @aliases v */ 'version'?: VersionString /** - * source index + * The name of the source index. * @aliases si, sourceIndex */ 'source_index'?: IndexName /** - * destination index + * The name of the destination index. * @aliases di, destIndex */ 'dest_index'?: IndexName /** - * description + * A description of the job. * @aliases d */ 'description'?: string /** - * model memory limit + * The approximate maximum amount of memory resources that are permitted for the job. * @aliases mml, modelMemoryLimit */ 'model_memory_limit'?: string /** - * job state + * The current status of the job. * @aliases s */ 'state'?: string /** - * failure reason + * Messages about the reason why the job failed. * @aliases fr, failureReason */ 'failure_reason'?: string /** - * progress + * The progress report for the job by phase. * @aliases p */ 'progress'?: string /** - * why the job is or is not assigned to a node + * Messages related to the selection of a node. * @aliases ae, assignmentExplanation */ 'assignment_explanation'?: string /** - * id of the assigned node + * The unique identifier of the assigned node. * @aliases ni, nodeId */ 'node.id'?: Id /** - * name of the assigned node + * The name of the assigned node. * @aliases nn, nodeName */ 'node.name'?: Name /** - * ephemeral id of the assigned node + * The ephemeral identifier of the assigned node. * @aliases ne, nodeEphemeralId */ 'node.ephemeral_id'?: Id /** - * network address of the assigned node + * The network address of the assigned node. * @aliases na, nodeAddress */ 'node.address'?: string diff --git a/specification/cat/ml_datafeeds/types.ts b/specification/cat/ml_datafeeds/types.ts index bdd15f216d..638baedf2b 100644 --- a/specification/cat/ml_datafeeds/types.ts +++ b/specification/cat/ml_datafeeds/types.ts @@ -21,62 +21,66 @@ import { DatafeedState } from '@ml/_types/Datafeed' export class DatafeedsRecord { /** - * the datafeed_id + * The datafeed identifier. */ 'id'?: string /** - * the datafeed state + * The status of the datafeed. * @aliases s */ 'state'?: DatafeedState /** - * why the datafeed is or is not assigned to a node + * For started datafeeds only, contains messages relating to the selection of a node. * @aliases ae */ 'assignment_explanation'?: string /** - * bucket count + * The number of buckets processed. * @aliases bc, bucketsCount */ 'buckets.count'?: string /** - * number of searches ran by the datafeed + * The number of searches run by the datafeed. * @aliases sc, searchCount */ 'search.count'?: string /** - * the total search time + * The total time the datafeed spent searching, in milliseconds. * @aliases st, searchTime */ 'search.time'?: string /** - * the average search time per bucket (millisecond) + * The average search time per bucket, in milliseconds. * @aliases sba, searchBucketAvg */ 'search.bucket_avg'?: string /** - * the exponential average search time per hour (millisecond) + * The exponential average search time per hour, in milliseconds. * @aliases seah, searchExpAvgHour */ //Node info 'search.exp_avg_hour'?: string /** - * id of the assigned node + * The unique identifier of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. * @aliases ni, nodeId */ 'node.id'?: string /** - * name of the assigned node + * The name of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. * @aliases nn, nodeName */ 'node.name'?: string /** - * ephemeral id of the assigned node + * The ephemeral identifier of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. * @aliases ne, nodeEphemeralId */ 'node.ephemeral_id'?: string /** - * network address of the assigned node + * The network address of the assigned node. + * For started datafeeds only, this information pertains to the node upon which the datafeed is started. * @aliases na, nodeAddress */ 'node.address'?: string diff --git a/specification/cat/ml_jobs/types.ts b/specification/cat/ml_jobs/types.ts index 80b62277f2..7cf8427bc4 100644 --- a/specification/cat/ml_jobs/types.ts +++ b/specification/cat/ml_jobs/types.ts @@ -23,302 +23,324 @@ import { ByteSize, Id, NodeId } from '@_types/common' export class JobsRecord { /** - * the job_id + * The anomaly detection job identifier. */ id?: Id - /** - * the job state + * The status of the anomaly detection job. * @aliases s */ 'state'?: JobState /** - * the amount of time the job has been opened + * For open jobs only, the amount of time the job has been opened. * @aliases ot */ 'opened_time'?: string /** - * why the job is or is not assigned to a node + * For open anomaly detection jobs only, contains messages relating to the selection of a node to run the job. * @aliases ae */ 'assignment_explanation'?: string /** - * number of processed records + * The number of input documents that have been processed by the anomaly detection job. + * This value includes documents with missing fields, since they are nonetheless analyzed. + * If you use datafeeds and have aggregations in your search query, the `processed_record_count` is the number of aggregation results processed, not the number of Elasticsearch documents. * @aliases dpr, dataProcessedRecords */ 'data.processed_records'?: string /** - * number of processed fields + * The total number of fields in all the documents that have been processed by the anomaly detection job. + * Only fields that are specified in the detector configuration object contribute to this count. + * The timestamp is not included in this count. * @aliases dpf, dataProcessedFields */ 'data.processed_fields'?: string /** - * total input bytes + * The number of bytes of input data posted to the anomaly detection job. * @aliases dib, dataInputBytes */ 'data.input_bytes'?: ByteSize /** - * total record count + * The number of input documents posted to the anomaly detection job. * @aliases dir, dataInputRecords */ 'data.input_records'?: string /** - * total field count + * The total number of fields in input documents posted to the anomaly detection job. + * This count includes fields that are not used in the analysis. + * However, be aware that if you are using a datafeed, it extracts only the required fields from the documents it retrieves before posting them to the job. * @aliases dif, dataInputFields */ 'data.input_fields'?: string /** - * number of records with invalid dates + * The number of input documents with either a missing date field or a date that could not be parsed. * @aliases did, dataInvalidDates */ 'data.invalid_dates'?: string /** - * number of records with missing fields + * The number of input documents that are missing a field that the anomaly detection job is configured to analyze. + * Input documents with missing fields are still processed because it is possible that not all fields are missing. + * If you are using datafeeds or posting data to the job in JSON format, a high `missing_field_count` is often not an indication of data issues. + * It is not necessarily a cause for concern. * @aliases dmf, dataMissingFields */ 'data.missing_fields'?: string /** - * number of records handled out of order + * The number of input documents that have a timestamp chronologically preceding the start of the current anomaly detection bucket offset by the latency window. + * This information is applicable only when you provide data to the anomaly detection job by using the post data API. + * These out of order documents are discarded, since jobs require time series data to be in ascending chronological order. * @aliases doot, dataOutOfOrderTimestamps */ 'data.out_of_order_timestamps'?: string /** - * number of empty buckets + * The number of buckets which did not contain any data. + * If your data contains many empty buckets, consider increasing your `bucket_span` or using functions that are tolerant to gaps in data such as mean, `non_null_sum` or `non_zero_count`. * @aliases deb, dataEmptyBuckets */ 'data.empty_buckets'?: string /** - * number of sparse buckets + * The number of buckets that contained few data points compared to the expected number of data points. + * If your data contains many sparse buckets, consider using a longer `bucket_span`. * @aliases dsb, dataSparseBuckets */ 'data.sparse_buckets'?: string /** - * total bucket count + * The total number of buckets processed. * @aliases db, dataBuckets */ 'data.buckets'?: string /** - * earliest record time + * The timestamp of the earliest chronologically input document. * @aliases der, dataEarliestRecord */ 'data.earliest_record'?: string /** - * latest record time + * The timestamp of the latest chronologically input document. * @aliases dlr, dataLatestRecord */ 'data.latest_record'?: string /** - * last time data was seen + * The timestamp at which data was last analyzed, according to server time. * @aliases dl, dataLast */ 'data.last'?: string /** - * last time an empty bucket occurred + * The timestamp of the last bucket that did not contain any data. * @aliases dleb, dataLastEmptyBucket */ 'data.last_empty_bucket'?: string /** - * last time a sparse bucket occurred + * The timestamp of the last bucket that was considered sparse. * @aliases dlsb, dataLastSparseBucket */ 'data.last_sparse_bucket'?: string /** - * model size + * The number of bytes of memory used by the models. + * This is the maximum value since the last time the model was persisted. + * If the job is closed, this value indicates the latest size. * @aliases mb, modelBytes */ 'model.bytes'?: ByteSize /** - * current memory status + * The status of the mathematical models. * @aliases mms, modelMemoryStatus */ 'model.memory_status'?: MemoryStatus /** - * how much the model has exceeded the limit + * The number of bytes over the high limit for memory usage at the last allocation failure. * @aliases mbe, modelBytesExceeded */ 'model.bytes_exceeded'?: ByteSize /** - * model memory limit + * The upper limit for model memory usage, checked on increasing values. * @aliases mml, modelMemoryLimit */ 'model.memory_limit'?: string /** - * count of 'by' fields + * The number of `by` field values that were analyzed by the models. + * This value is cumulative for all detectors in the job. * @aliases mbf, modelByFields */ 'model.by_fields'?: string /** - * count of 'over' fields + * The number of `over` field values that were analyzed by the models. + * This value is cumulative for all detectors in the job. * @aliases mof, modelOverFields */ 'model.over_fields'?: string /** - * count of 'partition' fields + * The number of `partition` field values that were analyzed by the models. + * This value is cumulative for all detectors in the job. * @aliases mpf, modelPartitionFields */ 'model.partition_fields'?: string /** - * number of bucket allocation failures + * The number of buckets for which new entities in incoming data were not processed due to insufficient model memory. + * This situation is also signified by a `hard_limit: memory_status` property value. * @aliases mbaf, modelBucketAllocationFailures */ 'model.bucket_allocation_failures'?: string /** - * current categorization status + * The status of categorization for the job. * @aliases mcs, modelCategorizationStatus */ 'model.categorization_status'?: CategorizationStatus /** - * count of categorized documents + * The number of documents that have had a field categorized. * @aliases mcdc, modelCategorizedDocCount */ 'model.categorized_doc_count'?: string /** - * count of categories + * The number of categories created by categorization. * @aliases mtcc, modelTotalCategoryCount */ 'model.total_category_count'?: string /** - * count of frequent categories + * The number of categories that match more than 1% of categorized documents. * @aliases modelFrequentCategoryCount */ 'model.frequent_category_count'?: string /** - * count of rare categories + * The number of categories that match just one categorized document. * @aliases mrcc, modelRareCategoryCount */ 'model.rare_category_count'?: string /** - * count of dead categories + * The number of categories created by categorization that will never be assigned again because another category’s definition makes it a superset of the dead category. + * Dead categories are a side effect of the way categorization has no prior training. * @aliases mdcc, modelDeadCategoryCount */ 'model.dead_category_count'?: string /** - * count of failed categories + * The number of times that categorization wanted to create a new category but couldn’t because the job had hit its `model_memory_limit`. + * This count does not track which specific categories failed to be created. + * Therefore you cannot use this value to determine the number of unique categories that were missed. * @aliases mfcc, modelFailedCategoryCount */ 'model.failed_category_count'?: string /** - * when the model stats were gathered + * The timestamp when the model stats were gathered, according to server time. * @aliases mlt, modelLogTime */ 'model.log_time'?: string /** - * the time of the last record when the model stats were gathered + * The timestamp of the last record when the model stats were gathered. * @aliases mt, modelTimestamp */ 'model.timestamp'?: string /** - * total number of forecasts + * The number of individual forecasts currently available for the job. + * A value of one or more indicates that forecasts exist. * @aliases ft, forecastsTotal */ 'forecasts.total'?: string /** - * minimum memory used by forecasts + * The minimum memory usage in bytes for forecasts related to the anomaly detection job. * @aliases fmmin, forecastsMemoryMin */ 'forecasts.memory.min'?: string /** - * maximum memory used by forecasts + * The maximum memory usage in bytes for forecasts related to the anomaly detection job. * @aliases fmmax, forecastsMemoryMax */ 'forecasts.memory.max'?: string /** - * average memory used by forecasts + * The average memory usage in bytes for forecasts related to the anomaly detection job. * @aliases fmavg, forecastsMemoryAvg */ 'forecasts.memory.avg'?: string /** - * total memory used by all forecasts + * The total memory usage in bytes for forecasts related to the anomaly detection job. * @aliases fmt, forecastsMemoryTotal */ 'forecasts.memory.total'?: string /** - * minimum record count for forecasts + * The minimum number of `model_forecast` documents written for forecasts related to the anomaly detection job. * @aliases frmin, forecastsRecordsMin */ 'forecasts.records.min'?: string /** - * maximum record count for forecasts + * The maximum number of `model_forecast` documents written for forecasts related to the anomaly detection job. * @aliases frmax, forecastsRecordsMax */ 'forecasts.records.max'?: string /** - * average record count for forecasts + * The average number of `model_forecast` documents written for forecasts related to the anomaly detection job. * @aliases fravg, forecastsRecordsAvg */ 'forecasts.records.avg'?: string /** - * total record count for all forecasts + * The total number of `model_forecast` documents written for forecasts related to the anomaly detection job. * @aliases frt, forecastsRecordsTotal */ 'forecasts.records.total'?: string /** - * minimum runtime for forecasts + * The minimum runtime in milliseconds for forecasts related to the anomaly detection job. * @aliases ftmin, forecastsTimeMin */ 'forecasts.time.min'?: string /** - * maximum run time for forecasts + * The maximum runtime in milliseconds for forecasts related to the anomaly detection job. * @aliases ftmax, forecastsTimeMax */ 'forecasts.time.max'?: string /** - * average runtime for all forecasts (milliseconds) + * The average runtime in milliseconds for forecasts related to the anomaly detection job. * @aliases ftavg, forecastsTimeAvg */ 'forecasts.time.avg'?: string /** - * total runtime for all forecasts + * The total runtime in milliseconds for forecasts related to the anomaly detection job. * @aliases ftt, forecastsTimeTotal */ 'forecasts.time.total'?: string /** - * id of the assigned node + * The uniqe identifier of the assigned node. * @aliases ni, nodeId */ 'node.id'?: NodeId /** - * name of the assigned node + * The name of the assigned node. * @aliases nn, nodeName */ 'node.name'?: string /** - * ephemeral id of the assigned node + * The ephemeral identifier of the assigned node. * @aliases ne, nodeEphemeralId */ 'node.ephemeral_id'?: NodeId /** - * network address of the assigned node + * The network address of the assigned node. * @aliases na, nodeAddress */ 'node.address'?: string /** - * bucket count + * The number of bucket results produced by the job. * @aliases bc, bucketsCount */ 'buckets.count'?: string /** - * total bucket processing time + * The sum of all bucket processing times, in milliseconds. * @aliases btt, bucketsTimeTotal */ 'buckets.time.total'?: string /** - * minimum bucket processing time + * The minimum of all bucket processing times, in milliseconds. * @aliases btmin, bucketsTimeMin */ 'buckets.time.min'?: string /** - * maximum bucket processing time + * The maximum of all bucket processing times, in milliseconds. * @aliases btmax, bucketsTimeMax */ 'buckets.time.max'?: string /** - * exponential average bucket processing time (milliseconds) + * The exponential moving average of all bucket processing times, in milliseconds. * @aliases btea, bucketsTimeExpAvg */ 'buckets.time.exp_avg'?: string /** - * exponential average bucket processing time by hour (milliseconds) + * The exponential moving average of bucket processing times calculated in a one hour time window, in milliseconds. * @aliases bteah, bucketsTimeExpAvgHour */ 'buckets.time.exp_avg_hour'?: string diff --git a/specification/cat/ml_trained_models/CatTrainedModelsRequest.ts b/specification/cat/ml_trained_models/CatTrainedModelsRequest.ts index e2490b29dd..1f7d3eff48 100644 --- a/specification/cat/ml_trained_models/CatTrainedModelsRequest.ts +++ b/specification/cat/ml_trained_models/CatTrainedModelsRequest.ts @@ -32,17 +32,32 @@ import { integer } from '@_types/Numeric' * @availability stack since=7.7.0 stability=stable * @availability serverless stability=stable visibility=public * @doc_id cat-trained-model + * @cluster_privileges monitor_ml */ export interface Request extends CatRequestBase { path_parts: { + /** + * A unique identifier for the trained model. + */ model_id?: Id } query_parameters: { + /** + * Specifies what to do when the request: contains wildcard expressions and there are no models that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. + * If `true`, the API returns an empty array when there are no matches and the subset of results when there are partial matches. + * If `false`, the API returns a 404 status code when there are no matches or only partial matches. + * @server_default true + */ allow_no_match?: boolean + /** The unit used to display byte values. */ bytes?: Bytes + /** A comma-separated list of column names to display. */ h?: CatTrainedModelsColumns + /** A comma-separated list of column names or aliases used to sort the response. */ s?: CatTrainedModelsColumns + /** Skips the specified number of transforms. */ from?: integer + /** The maximum number of transforms to display. */ size?: integer } } diff --git a/specification/cat/ml_trained_models/types.ts b/specification/cat/ml_trained_models/types.ts index 79289f64c5..88ece53f01 100644 --- a/specification/cat/ml_trained_models/types.ts +++ b/specification/cat/ml_trained_models/types.ts @@ -22,87 +22,88 @@ import { DateTime } from '@_types/Time' export class TrainedModelsRecord { /** - * the trained model id + * The model identifier. */ 'id'?: Id /** - * who created the model + * Information about the creator of the model. * @aliases c, createdBy */ 'created_by'?: string /** - * the estimated heap size to keep the model in memory + * The estimated heap size to keep the model in memory. * @aliases hs,modelHeapSize */ 'heap_size'?: ByteSize /** - * the estimated number of operations to use the model + * The estimated number of operations to use the model. + * This number helps to measure the computational complexity of the model. * @aliases o, modelOperations */ 'operations'?: string /** - * The license level of the model + * The license level of the model. * @aliases l */ 'license'?: string /** - * The time the model was created + * The time the model was created. * @aliases ct */ 'create_time'?: DateTime /** - * The version of Elasticsearch when the model was created + * The version of Elasticsearch when the model was created. * @aliases v */ 'version'?: VersionString /** - * The model description + * A description of the model. * @aliases d */ 'description'?: string /** - * The number of pipelines referencing the model + * The number of pipelines that are referencing the model. * @aliases ip, ingestPipelines */ 'ingest.pipelines'?: string /** - * The total number of docs processed by the model + * The total number of documents that are processed by the model. * @aliases ic, ingestCount */ 'ingest.count'?: string /** - * The total time spent processing docs with this model + * The total time spent processing documents with thie model. * @aliases it, ingestTime */ 'ingest.time'?: string /** - * The total documents currently being handled by the model + * The total number of documents that are currently being handled by the model. * @aliases icurr, ingestCurrent */ 'ingest.current'?: string /** - * The total count of failed ingest attempts with this model + * The total number of failed ingest attempts with the model. * @aliases if, ingestFailed */ 'ingest.failed'?: string - /** - * The data frame analytics config id that created the model (if still available) + * The identifier for the data frame analytics job that created the model. + * Only displayed if the job is still available. * @aliases dfid, dataFrameAnalytics */ 'data_frame.id'?: string /** - * The time the data frame analytics config was created + * The time the data frame analytics job was created. * @aliases dft, dataFrameAnalyticsTime */ 'data_frame.create_time'?: string /** - * The source index used to train in the data frame analysis + * The source index used to train in the data frame analysis. * @aliases dfsi, dataFrameAnalyticsSrcIndex */ 'data_frame.source_index'?: string /** - * The analysis used by the data frame to build the model + * The analysis used by the data frame to build the model. * @aliases dfa, dataFrameAnalyticsAnalysis */ 'data_frame.analysis'?: string diff --git a/specification/cat/nodes/types.ts b/specification/cat/nodes/types.ts index ace158ad89..2d4dca6bf8 100644 --- a/specification/cat/nodes/types.ts +++ b/specification/cat/nodes/types.ts @@ -498,7 +498,6 @@ export class NodesRecord { * @aliases sfbm,fixedBitsetMemory */ 'segments.fixed_bitset_memory'?: string - /** * The number of current suggest operations. * @aliases suc,suggestCurrent diff --git a/specification/cat/segments/CatSegmentsRequest.ts b/specification/cat/segments/CatSegmentsRequest.ts index 6c53faff5c..695870bea0 100644 --- a/specification/cat/segments/CatSegmentsRequest.ts +++ b/specification/cat/segments/CatSegmentsRequest.ts @@ -21,16 +21,29 @@ import { CatRequestBase } from '@cat/_types/CatBase' import { Bytes, Indices } from '@_types/common' /** + * Returns low-level information about the Lucene segments in index shards. + * For data streams, the API returns information about the backing indices. + * IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index segments API. * @rest_spec_name cat.segments * @availability stack since=0.0.0 stability=stable * @availability serverless stability=stable visibility=private * @doc_id cat-segments + * @cluster_privileges monitor + * @index_privileges monitor */ export interface Request extends CatRequestBase { path_parts: { + /** + * A comma-separated list of data streams, indices, and aliases used to limit the request. + * Supports wildcards (`*`). + * To target all data streams and indices, omit this parameter or use `*` or `_all`. + */ index?: Indices } query_parameters: { + /** + * The unit used to display byte values. + */ bytes?: Bytes } } diff --git a/specification/cat/segments/types.ts b/specification/cat/segments/types.ts index 9c68421553..7a6668d4cd 100644 --- a/specification/cat/segments/types.ts +++ b/specification/cat/segments/types.ts @@ -21,75 +21,86 @@ import { ByteSize, IndexName, NodeId, VersionString } from '@_types/common' export class SegmentsRecord { /** - * index name + * The index name. * @aliases i, idx */ 'index'?: IndexName /** - * shard name + * The shard name. * @aliases s, sh */ 'shard'?: string /** - * primary or replica + * The shard type: `primary` or `replica`. * @aliases p,pr,primaryOrReplica */ 'prirep'?: string /** - * ip of node where it lives + * The IP address of the node where it lives. */ 'ip'?: string /** - * unique id of node where it lives + * The unique identifier of the node where it lives. */ 'id'?: NodeId /** - * segment name + * The segment name, which is derived from the segment generation and used internally to create file names in the directory of the shard. * @aliases seg */ 'segment'?: string /** - * segment generation + * The segment generation number. + * Elasticsearch increments this generation number for each segment written then uses this number to derive the segment name. * @aliases g,gen */ 'generation'?: string /** - * number of docs in segment + * The number of documents in the segment. + * This excludes deleted documents and counts any nested documents separately from their parents. + * It also excludes documents which were indexed recently and do not yet belong to a segment. * @aliases dc,docsCount */ 'docs.count'?: string /** - * number of deleted docs in segment + * The number of deleted documents in the segment, which might be higher or lower than the number of delete operations you have performed. + * This number excludes deletes that were performed recently and do not yet belong to a segment. + * Deleted documents are cleaned up by the automatic merge process if it makes sense to do so. + * Also, Elasticsearch creates extra deleted documents to internally track the recent history of operations on a shard. * @aliases dd,docsDeleted */ 'docs.deleted'?: string /** - * segment size in bytes + * The segment size in bytes. * @aliases si */ 'size'?: ByteSize /** - * segment memory in bytes + * The segment memory in bytes. + * A value of `-1` indicates Elasticsearch was unable to compute this number. * @aliases sm,sizeMemory */ 'size.memory'?: ByteSize /** - * is segment committed + * If `true`, the segment is synced to disk. + * Segments that are synced can survive a hard reboot. + * If `false`, the data from uncommitted segments is also stored in the transaction log so that Elasticsearch is able to replay changes on the next start. * @aliases ic,isCommitted */ 'committed'?: string /** - * is segment searched + * If `true`, the segment is searchable. + * If `false`, the segment has most likely been written to disk but needs a refresh to be searchable. * @aliases is,isSearchable */ 'searchable'?: string /** - * version + * The version of Lucene used to write the segment. * @aliases v */ 'version'?: VersionString /** - * is segment compound + * If `true`, the segment is stored in a compound file. + * This means Lucene merged all files from the segment in a single file to save file descriptors. * @aliases ico,isCompound */ 'compound'?: string diff --git a/specification/cat/shards/CatShardsRequest.ts b/specification/cat/shards/CatShardsRequest.ts index 299d0d33e0..092f5600c2 100644 --- a/specification/cat/shards/CatShardsRequest.ts +++ b/specification/cat/shards/CatShardsRequest.ts @@ -21,16 +21,29 @@ import { CatRequestBase } from '@cat/_types/CatBase' import { Bytes, Indices } from '@_types/common' /** + * Returns information about the shards in a cluster. + * For data streams, the API returns information about the backing indices. + * IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. * @rest_spec_name cat.shards * @availability stack since=0.0.0 stability=stable * @availability serverless stability=stable visibility=private * @doc_id cat-shards + * @cluster_privileges monitor + * @index_privileges monitor */ export interface Request extends CatRequestBase { path_parts: { + /** + * A comma-separated list of data streams, indices, and aliases used to limit the request. + * Supports wildcards (`*`). + * To target all data streams and indices, omit this parameter or use `*` or `_all`. + */ index?: Indices } query_parameters: { + /** + * The unit used to display byte values. + */ bytes?: Bytes } } diff --git a/specification/cat/shards/types.ts b/specification/cat/shards/types.ts index 1c1e0a5968..666e271559 100644 --- a/specification/cat/shards/types.ts +++ b/specification/cat/shards/types.ts @@ -19,377 +19,402 @@ export class ShardsRecord { /** - * index name + * The index name. * @aliases i,idx */ 'index'?: string /** - * shard name + * The shard name. * @aliases s,sh */ 'shard'?: string /** - * primary or replica + * The shard type: `primary` or `replica`. * @aliases p,pr,primaryOrReplica */ 'prirep'?: string /** - * shard state + * The shard state. + * Returned values include: + * `INITIALIZING`: The shard is recovering from a peer shard or gateway. + * `RELOCATING`: The shard is relocating. + * `STARTED`: The shard has started. + * `UNASSIGNED`: The shard is not assigned to any node. * @aliases st */ 'state'?: string /** - * number of docs in shard + * The number of documents in the shard. * @aliases d,dc */ 'docs'?: string | null /** - * store size of shard (how much disk it uses) + * The disk space used by the shard. * @aliases sto */ 'store'?: string | null /** - * ip of node where it lives + * The IP address of the node. */ 'ip'?: string | null /** - * unique id of node where it lives + * The unique identifier for the node. */ 'id'?: string /** - * name of node where it lives + * The name of node. * @aliases n */ 'node'?: string | null /** - * sync id + * The sync identifier. */ 'sync_id'?: string /** - * reason shard is unassigned + * The reason for the last change to the state of an unassigned shard. + * It does not explain why the shard is currently unassigned; use the cluster allocation explain API for that information. + * Returned values include: + * `ALLOCATION_FAILED`: Unassigned as a result of a failed allocation of the shard. + * `CLUSTER_RECOVERED`: Unassigned as a result of a full cluster recovery. + * `DANGLING_INDEX_IMPORTED`: Unassigned as a result of importing a dangling index. + * `EXISTING_INDEX_RESTORED`: Unassigned as a result of restoring into a closed index. + * `FORCED_EMPTY_PRIMARY`: The shard’s allocation was last modified by forcing an empty primary using the cluster reroute API. + * `INDEX_CLOSED`: Unassigned because the index was closed. + * `INDEX_CREATED`: Unassigned as a result of an API creation of an index. + * `INDEX_REOPENED`: Unassigned as a result of opening a closed index. + * `MANUAL_ALLOCATION`: The shard’s allocation was last modified by the cluster reroute API. + * `NEW_INDEX_RESTORED`: Unassigned as a result of restoring into a new index. + * `NODE_LEFT`: Unassigned as a result of the node hosting it leaving the cluster. + * `NODE_RESTARTING`: Similar to `NODE_LEFT`, except that the node was registered as restarting using the node shutdown API. + * `PRIMARY_FAILED`: The shard was initializing as a replica, but the primary shard failed before the initialization completed. + * `REALLOCATED_REPLICA`: A better replica location is identified and causes the existing replica allocation to be cancelled. + * `REINITIALIZED`: When a shard moves from started back to initializing. + * `REPLICA_ADDED`: Unassigned as a result of explicit addition of a replica. + * `REROUTE_CANCELLED`: Unassigned as a result of explicit cancel reroute command. * @aliases ur */ 'unassigned.reason'?: string /** - * time shard became unassigned (UTC) + * The time at which the shard became unassigned in Coordinated Universal Time (UTC). * @aliases ua */ 'unassigned.at'?: string /** - * time has been unassigned + * The time at which the shard was requested to be unassigned in Coordinated Universal Time (UTC). * @aliases uf */ 'unassigned.for'?: string /** - * additional details as to why the shard became unassigned + * Additional details as to why the shard became unassigned. + * It does not explain why the shard is not assigned; use the cluster allocation explain API for that information. * @aliases ud */ 'unassigned.details'?: string /** - * recovery source type + * The type of recovery source. * @aliases rs */ 'recoverysource.type'?: string /** - * size of completion + * The size of completion. * @aliases cs,completionSize */ 'completion.size'?: string /** - * used fielddata cache + * The used fielddata cache memory. * @aliases fm,fielddataMemory */ 'fielddata.memory_size'?: string /** - * fielddata evictions + * The fielddata cache evictions. * @aliases fe,fielddataEvictions */ 'fielddata.evictions'?: string /** - * used query cache + * The used query cache memory. * @aliases qcm,queryCacheMemory */ 'query_cache.memory_size'?: string /** - * query cache evictions + * The query cache evictions. * @aliases qce,queryCacheEvictions */ 'query_cache.evictions'?: string /** - * number of flushes + * The number of flushes. * @aliases ft,flushTotal */ 'flush.total'?: string /** - * time spent in flush + * The time spent in flush. * @aliases ftt,flushTotalTime */ 'flush.total_time'?: string /** - * number of current get ops + * The number of current get operations. * @aliases gc,getCurrent */ 'get.current'?: string /** - * time spent in get + * The time spent in get operations. * @aliases gti,getTime */ 'get.time'?: string /** - * number of get ops + * The number of get operations. * @aliases gto,getTotal */ 'get.total'?: string /** - * time spent in successful gets + * The time spent in successful get operations. * @aliases geti,getExistsTime */ 'get.exists_time'?: string /** - * number of successful gets + * The number of successful get operations. * @aliases geto,getExistsTotal */ 'get.exists_total'?: string /** - * time spent in failed gets + * The time spent in failed get operations. * @aliases gmti,getMissingTime */ 'get.missing_time'?: string /** - * number of failed gets + * The number of failed get operations. * @aliases gmto,getMissingTotal */ 'get.missing_total'?: string /** - * number of current deletions + * The number of current deletion operations. * @aliases idc,indexingDeleteCurrent */ 'indexing.delete_current'?: string /** - * time spent in deletions + * The time spent in deletion operations. * @aliases idti,indexingDeleteTime */ 'indexing.delete_time'?: string /** - * number of delete ops + * The number of delete operations. * @aliases idto,indexingDeleteTotal */ 'indexing.delete_total'?: string /** - * number of current indexing ops + * The number of current indexing operations. * @aliases iic,indexingIndexCurrent */ 'indexing.index_current'?: string /** - * time spent in indexing + * The time spent in indexing operations. * @aliases iiti,indexingIndexTime */ 'indexing.index_time'?: string /** - * number of indexing ops + * The number of indexing operations. * @aliases iito,indexingIndexTotal */ 'indexing.index_total'?: string /** - * number of failed indexing ops + * The number of failed indexing operations. * @aliases iif,indexingIndexFailed */ 'indexing.index_failed'?: string /** - * number of current merges + * The number of current merge operations. * @aliases mc,mergesCurrent */ 'merges.current'?: string /** - * number of current merging docs + * The number of current merging documents. * @aliases mcd,mergesCurrentDocs */ 'merges.current_docs'?: string /** - * size of current merges + * The size of current merge operations. * @aliases mcs,mergesCurrentSize */ 'merges.current_size'?: string /** - * number of completed merge ops + * The number of completed merge operations. * @aliases mt,mergesTotal */ 'merges.total'?: string /** - * docs merged + * The nuber of merged documents. * @aliases mtd,mergesTotalDocs */ 'merges.total_docs'?: string /** - * size merged + * The size of current merges. * @aliases mts,mergesTotalSize */ 'merges.total_size'?: string /** - * time spent in merges + * The time spent merging documents. * @aliases mtt,mergesTotalTime */ 'merges.total_time'?: string /** - * total refreshes + * The total number of refreshes. */ 'refresh.total'?: string /** - * time spent in refreshes + * The time spent in refreshes. */ 'refresh.time'?: string /** - * total external refreshes + * The total nunber of external refreshes. * @aliases rto,refreshTotal */ 'refresh.external_total'?: string /** - * time spent in external refreshes + * The time spent in external refreshes. * @aliases rti,refreshTime */ 'refresh.external_time'?: string /** - * number of pending refresh listeners + * The number of pending refresh listeners. * @aliases rli,refreshListeners */ 'refresh.listeners'?: string /** - * current fetch phase ops + * The current fetch phase operations. * @aliases sfc,searchFetchCurrent */ 'search.fetch_current'?: string /** - * time spent in fetch phase + * The time spent in fetch phase. * @aliases sfti,searchFetchTime */ 'search.fetch_time'?: string /** - * total fetch ops + * The total number of fetch operations. * @aliases sfto,searchFetchTotal */ 'search.fetch_total'?: string /** - * open search contexts + * The number of open search contexts. * @aliases so,searchOpenContexts */ 'search.open_contexts'?: string /** - * current query phase ops + * The current query phase operations. * @aliases sqc,searchQueryCurrent */ 'search.query_current'?: string /** - * time spent in query phase + * The time spent in query phase. * @aliases sqti,searchQueryTime */ 'search.query_time'?: string /** - * total query phase ops + * The total number of query phase operations. * @aliases sqto,searchQueryTotal */ 'search.query_total'?: string /** - * open scroll contexts + * The open scroll contexts. * @aliases scc,searchScrollCurrent */ 'search.scroll_current'?: string /** - * time scroll contexts held open + * The time scroll contexts were held open. * @aliases scti,searchScrollTime */ 'search.scroll_time'?: string /** - * completed scroll contexts + * The number of completed scroll contexts. * @aliases scto,searchScrollTotal */ 'search.scroll_total'?: string /** - * number of segments + * The number of segments. * @aliases sc,segmentsCount */ 'segments.count'?: string /** - * memory used by segments + * The memory used by segments. * @aliases sm,segmentsMemory */ 'segments.memory'?: string /** - * memory used by index writer + * The memory used by the index writer. * @aliases siwm,segmentsIndexWriterMemory */ 'segments.index_writer_memory'?: string /** - * memory used by version map + * The memory used by the version map. * @aliases svmm,segmentsVersionMapMemory */ 'segments.version_map_memory'?: string /** - * memory used by fixed bit sets for nested object field types and export type filters for types referred in _parent fields + * The memory used by fixed bit sets for nested object field types and export type filters for types referred in `_parent` fields. * @aliases sfbm,fixedBitsetMemory */ 'segments.fixed_bitset_memory'?: string /** - * max sequence number + * The maximum sequence number. * @aliases sqm,maxSeqNo */ 'seq_no.max'?: string /** - * local checkpoint + * The local checkpoint. * @aliases sql,localCheckpoint */ 'seq_no.local_checkpoint'?: string /** - * global checkpoint + * The global checkpoint. * @aliases sqg,globalCheckpoint */ 'seq_no.global_checkpoint'?: string /** - * current warmer ops + * The number of current warmer operations. * @aliases wc,warmerCurrent */ 'warmer.current'?: string /** - * total warmer ops + * The total number of warmer operations. * @aliases wto,warmerTotal */ 'warmer.total'?: string /** - * time spent in warmers + * The time spent in warmer operations. * @aliases wtt,warmerTotalTime */ 'warmer.total_time'?: string /** - * shard data path + * The shard data path. * @aliases pd,dataPath */ 'path.data'?: string /** - * shard state path + * The shard state path. * @aliases ps,statsPath */ 'path.state'?: string /** - * number of bulk shard ops + * The number of bulk shard operations. * @aliases bto,bulkTotalOperations */ 'bulk.total_operations'?: string /** - * time spend in shard bulk + * The time spent in shard bulk operations. * @aliases btti,bulkTotalTime */ 'bulk.total_time'?: string /** - * total size in bytes of shard bulk + * The total size in bytes of shard bulk operations. * @aliases btsi,bulkTotalSizeInBytes */ 'bulk.total_size_in_bytes'?: string /** - * average time spend in shard bulk + * The average time spent in shard bulk operations. * @aliases bati,bulkAvgTime */ 'bulk.avg_time'?: string /** - * avg size in bytes of shard bulk + * The average size in bytes of shard bulk operations. * @aliases basi,bulkAvgSizeInBytes */ 'bulk.avg_size_in_bytes'?: string diff --git a/specification/cat/snapshots/CatSnapshotsRequest.ts b/specification/cat/snapshots/CatSnapshotsRequest.ts index 95f2b1bf3a..fa54d7e435 100644 --- a/specification/cat/snapshots/CatSnapshotsRequest.ts +++ b/specification/cat/snapshots/CatSnapshotsRequest.ts @@ -21,16 +21,30 @@ import { CatRequestBase } from '@cat/_types/CatBase' import { Names } from '@_types/common' /** + * Returns information about the snapshots stored in one or more repositories. + * A snapshot is a backup of an index or running Elasticsearch cluster. + * IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot API. * @rest_spec_name cat.snapshots * @availability stack since=2.1.0 stability=stable * @availability serverless stability=stable visibility=private * @doc_id cat-snapshots + * @cluster_privileges monitor_snapshot */ export interface Request extends CatRequestBase { path_parts: { + /** + * A comma-separated list of snapshot repositories used to limit the request. + * Accepts wildcard expressions. + * `_all` returns all repositories. + * If any repository fails during the request, Elasticsearch returns an error. + */ repository?: Names } query_parameters: { + /** + * If `true`, the response does not include information from unavailable snapshots. + * @server_default false + */ ignore_unavailable?: boolean } } diff --git a/specification/cat/snapshots/types.ts b/specification/cat/snapshots/types.ts index 92270e86bd..9b2a6c3064 100644 --- a/specification/cat/snapshots/types.ts +++ b/specification/cat/snapshots/types.ts @@ -23,67 +23,73 @@ import { Stringified } from '@spec_utils/Stringified' export class SnapshotsRecord { /** - * unique snapshot + * The unique identifier for the snapshot. * @aliases snapshot */ 'id'?: string /** - * repository name + * The repository name. * @aliases re,repo */ 'repository'?: string /** - * snapshot name + * The state of the snapshot process. + * Returned values include: + * `FAILED`: The snapshot process failed. + * `INCOMPATIBLE`: The snapshot process is incompatible with the current cluster version. + * `IN_PROGRESS`: The snapshot process started but has not completed. + * `PARTIAL`: The snapshot process completed with a partial success. + * `SUCCESS`: The snapshot process completed with a full success. * @aliases s */ 'status'?: string /** - * start time in seconds since 1970-01-01 00:00:00 + * The Unix epoch time (seconds since 1970-01-01 00:00:00) at which the snapshot process started. * @aliases ste,startEpoch */ 'start_epoch'?: Stringified> /** - * start time in HH:MM:SS + * The time (HH:MM:SS) at which the snapshot process started. * @aliases sti,startTime */ 'start_time'?: ScheduleTimeOfDay /** - * end time in seconds since 1970-01-01 00:00:00 + * The Unix epoch time (seconds since 1970-01-01 00:00:00) at which the snapshot process ended. * @aliases ete,endEpoch */ 'end_epoch'?: Stringified> /** - * end time in HH:MM:SS + * The time (HH:MM:SS) at which the snapshot process ended. * @aliases eti,endTime */ 'end_time'?: TimeOfDay /** - * duration + * The time it took the snapshot process to complete, in time units. * @aliases dur */ 'duration'?: Duration /** - * number of indices + * The number of indices in the snapshot. * @aliases i */ 'indices'?: string /** - * number of successful shards + * The number of successful shards in the snapshot. * @aliases ss */ 'successful_shards'?: string /** - * number of failed shards + * The number of failed shards in the snapshot. * @aliases fs */ 'failed_shards'?: string /** - * number of total shards + * The total number of shards in the snapshot. * @aliases ts */ 'total_shards'?: string /** - * reason for failures + * The reason for any snapshot failures. * @aliases r */ 'reason'?: string diff --git a/specification/cat/tasks/CatTasksRequest.ts b/specification/cat/tasks/CatTasksRequest.ts index e53d65f714..22a61bfdcf 100644 --- a/specification/cat/tasks/CatTasksRequest.ts +++ b/specification/cat/tasks/CatTasksRequest.ts @@ -21,16 +21,28 @@ import { CatRequestBase } from '@cat/_types/CatBase' import { long } from '@_types/Numeric' /** + * Returns information about tasks currently executing in the cluster. + * IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the task management API. * @rest_spec_name cat.tasks * @availability stack since=5.0.0 stability=experimental * @availability serverless stability=experimental visibility=private * @doc_id tasks + * @cluster_privileges monitor */ export interface Request extends CatRequestBase { query_parameters: { + /** + * The task action names, which are used to limit the response. + */ actions?: string[] + /** + * If `true`, the response includes detailed information about shard recoveries. + * @server_default false + */ detailed?: boolean + /** Unique node identifiers, which are used to limit the response. */ node_id?: string[] - parent_task?: long + /** The parent task identifier, which is used to limit the response. */ + parent_task_id?: string } } diff --git a/specification/cat/tasks/types.ts b/specification/cat/tasks/types.ts index 0207f34f62..5037a0f34c 100644 --- a/specification/cat/tasks/types.ts +++ b/specification/cat/tasks/types.ts @@ -21,80 +21,80 @@ import { Id, NodeId, VersionString } from '@_types/common' export class TasksRecord { /** - * id of the task with the node + * The identifier of the task with the node. */ 'id'?: Id /** - * task action + * The task action. * @aliases ac */ 'action'?: string /** - * unique task id + * The unique task identifier. * @aliases ti */ 'task_id'?: Id /** - * parent task id + * The parent task identifier. * @aliases pti */ 'parent_task_id'?: string /** - * task type + * The task type. * @aliases ty */ 'type'?: string /** - * start time in ms + * The start time in milliseconds. * @aliases start */ 'start_time'?: string /** - * start time in HH:MM:SS + * The start time in `HH:MM:SS` format. * @aliases ts,hms,hhmmss */ 'timestamp'?: string /** - * running time ns + * The running time in nanoseconds. */ 'running_time_ns'?: string /** - * running time + * The running time. * @aliases time */ 'running_time'?: string /** - * unique node id + * The unique node identifier. * @aliases ni */ 'node_id'?: NodeId /** - * ip address + * The IP address for the node. * @aliases i */ 'ip'?: string /** - * bound transport port + * The bound transport port for the node. * @aliases po */ 'port'?: string /** - * node name + * The node name. * @aliases n */ 'node'?: string /** - * es version + * The Elasticsearch version. * @aliases v */ 'version'?: VersionString /** - * X-Opaque-ID header + * The X-Opaque-ID header. * @aliases x */ 'x_opaque_id'?: string /** - * task action + * The task action description. * @aliases desc */ 'description'?: string diff --git a/specification/cat/templates/CatTemplatesRequest.ts b/specification/cat/templates/CatTemplatesRequest.ts index 2b0e2b0fe1..14e3a4b365 100644 --- a/specification/cat/templates/CatTemplatesRequest.ts +++ b/specification/cat/templates/CatTemplatesRequest.ts @@ -21,13 +21,21 @@ import { CatRequestBase } from '@cat/_types/CatBase' import { Name } from '@_types/common' /** + * Returns information about index templates in a cluster. + * You can use index templates to apply index settings and field mappings to new indices at creation. + * IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get index template API. * @rest_spec_name cat.templates * @availability stack since=5.2.0 stability=stable * @availability serverless stability=stable visibility=private * @doc_id cat-templates + * @cluster_privileges monitor */ export interface Request extends CatRequestBase { path_parts: { + /** + * The name of the template to return. + * Accepts wildcard expressions. If omitted, all templates are returned. + */ name?: Name } } diff --git a/specification/cat/templates/types.ts b/specification/cat/templates/types.ts index 42073ad2e3..33798f38f2 100644 --- a/specification/cat/templates/types.ts +++ b/specification/cat/templates/types.ts @@ -21,27 +21,27 @@ import { Name, VersionString } from '@_types/common' export class TemplatesRecord { /** - * template name + * The template name. * @aliases n */ 'name'?: Name /** - * template index patterns + * The template index patterns. * @aliases t */ 'index_patterns'?: string /** - * template application order/priority number + * The template application order or priority number. * @aliases o,p */ 'order'?: string /** - * version + * The template version. * @aliases v */ 'version'?: VersionString | null /** - * component templates comprising index template + * The component templates that comprise the index template. * @aliases c */ 'composed_of'?: string diff --git a/specification/cat/thread_pool/CatThreadPoolRequest.ts b/specification/cat/thread_pool/CatThreadPoolRequest.ts index 9dd5f87186..6081a1bb50 100644 --- a/specification/cat/thread_pool/CatThreadPoolRequest.ts +++ b/specification/cat/thread_pool/CatThreadPoolRequest.ts @@ -22,21 +22,26 @@ import { Names } from '@_types/common' import { TimeUnit } from '@_types/Time' /** + * Returns thread pool statistics for each node in a cluster. + * Returned information includes all built-in thread pools and custom thread pools. + * IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. * @rest_spec_name cat.thread_pool * @availability stack since=0.0.0 stability=stable * @availability serverless stability=stable visibility=private * @doc_id cat-thread-pool + * @cluster_privileges monitor */ export interface Request extends CatRequestBase { path_parts: { /** - * List of thread pool names used to limit the request. Accepts wildcard expressions. + * A comma-separated list of thread pool names used to limit the request. + * Accepts wildcard expressions. */ thread_pool_patterns?: Names } query_parameters: { /** - * Unit used to display time values. + * The unit used to display time values. */ time?: TimeUnit } diff --git a/specification/cat/thread_pool/types.ts b/specification/cat/thread_pool/types.ts index 060323b781..53d613d2d4 100644 --- a/specification/cat/thread_pool/types.ts +++ b/specification/cat/thread_pool/types.ts @@ -21,102 +21,103 @@ import { NodeId } from '@_types/common' export class ThreadPoolRecord { /** - * node name + * The node name. * @aliases nn */ 'node_name'?: string /** - * persistent node id + * The persistent node identifier. * @aliases id */ 'node_id'?: NodeId /** - * ephemeral node id + * The ephemeral node identifier. * @aliases eid */ 'ephemeral_node_id'?: string /** - * process id + * The process identifier. * @aliases p */ 'pid'?: string /** - * host name + * The host name for the current node. * @aliases h */ 'host'?: string /** - * ip address + * The IP address for the current node. * @aliases i */ 'ip'?: string /** - * bound transport port + * The bound transport port for the current node. * @aliases po */ 'port'?: string /** - * thread pool name + * The thread pool name. * @aliases n */ 'name'?: string /** - * thread pool type + * The thread pool type. + * Returned values include `fixed`, `fixed_auto_queue_size`, `direct`, and `scaling`. * @aliases t */ 'type'?: string /** - * number of active threads + * The number of active threads in the current thread pool. * @aliases a */ 'active'?: string /** - * number of threads + * The number of threads in the current thread pool. * @aliases psz */ 'pool_size'?: string /** - * number of tasks currently in queue + * The number of tasks currently in queue. * @aliases q */ 'queue'?: string /** - * maximum number of tasks permitted in queue + * The maximum number of tasks permitted in the queue. * @aliases qs */ 'queue_size'?: string /** - * number of rejected tasks + * The number of rejected tasks. * @aliases r */ 'rejected'?: string /** - * highest number of seen active threads + * The highest number of active threads in the current thread pool. * @aliases l */ 'largest'?: string /** - * number of completed tasks + * The number of completed tasks. * @aliases c */ 'completed'?: string /** - * core number of threads in a scaling thread pool + * The core number of active threads allowed in a scaling thread pool. * @aliases cr */ 'core'?: string | null /** - * maximum number of threads in a scaling thread pool + * The maximum number of active threads allowed in a scaling thread pool. * @aliases mx */ 'max'?: string | null /** - * number of threads in a fixed thread pool + * The number of active threads allowed in a fixed thread pool. * @aliases sz */ 'size'?: string | null /** - * thread keep alive time + * The thread keep alive time. * @aliases ka */ 'keep_alive'?: string | null diff --git a/specification/cat/transforms/CatTransformsRequest.ts b/specification/cat/transforms/CatTransformsRequest.ts index 10441cfbba..5b5f4c9bc7 100644 --- a/specification/cat/transforms/CatTransformsRequest.ts +++ b/specification/cat/transforms/CatTransformsRequest.ts @@ -33,27 +33,45 @@ import { Duration, TimeUnit } from '@_types/Time' * @availability stack since=7.7.0 stability=stable * @availability serverless stability=stable visibility=public * @doc_id cat-transforms + * @cluster_privileges monitor_transform */ export interface Request extends CatRequestBase { path_parts: { + /** + * A transform identifier or a wildcard expression. + * If you do not specify one of these options, the API returns information for all transforms. + */ transform_id?: Id } query_parameters: { + /** + * Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. + * If `true`, it returns an empty transforms array when there are no matches and the subset of results when there are partial matches. + * If `false`, the request returns a 404 status code when there are no matches or only partial matches. + * @server_default true + */ allow_no_match?: boolean + /** + * Skips the specified number of transforms. + * @server_default 0 + */ from?: integer /** * Comma-separated list of column names to display. - * @server_default create_time,id,state,type + * @server_default changes_last_detection_time,checkpoint,checkpoint_progress,documents_processed,id,last_search_time,state */ h?: CatTransformColumns - /** Comma-separated list of column names or column aliases used to sort the - * response. + /** Comma-separated list of column names or column aliases used to sort the response. */ s?: CatTransformColumns /** - * Unit used to display time values. + * The unit used to display time values. */ time?: TimeUnit + /** + * The maximum number of transforms to obtain. + * @server_default 100 + */ size?: integer } } diff --git a/specification/cat/transforms/types.ts b/specification/cat/transforms/types.ts index 83d50a717b..57d93e3dc1 100644 --- a/specification/cat/transforms/types.ts +++ b/specification/cat/transforms/types.ts @@ -21,166 +21,176 @@ import { Id, VersionString } from '@_types/common' export class TransformsRecord { /** - * the id + * The transform identifier. */ 'id'?: Id /** - * transform state + * The status of the transform. + * Returned values include: + * `aborting`: The transform is aborting. + * `failed: The transform failed. For more information about the failure, check the `reason` field. + * `indexing`: The transform is actively processing data and creating new documents. + * `started`: The transform is running but not actively indexing data. + * `stopped`: The transform is stopped. + * `stopping`: The transform is stopping. * @aliases s */ 'state'?: string /** - * checkpoint + * The sequence number for the checkpoint. * @aliases c */ 'checkpoint'?: string /** - * the number of documents read from source indices and processed + * The number of documents that have been processed from the source index of the transform. * @aliases docp, documentsProcessed */ 'documents_processed'?: string /** - * progress of the checkpoint + * The progress of the next checkpoint that is currently in progress. * @aliases cp, checkpointProgress */ 'checkpoint_progress'?: string | null /** - * last time transform searched for updates + * The timestamp of the last search in the source indices. + * This field is shown only if the transform is running. * @aliases lst, lastSearchTime */ 'last_search_time'?: string | null /** - * changes last detected time + * The timestamp when changes were last detected in the source indices. * @aliases cldt */ 'changes_last_detection_time'?: string | null /** - * transform creation time + * The time the transform was created. * @aliases ct, createTime */ 'create_time'?: string /** - * the version of Elasticsearch when the transform was created + * The version of Elasticsearch that existed on the node when the transform was created. * @aliases v */ 'version'?: VersionString /** - * source index + * The source indices for the transform. * @aliases si, sourceIndex */ 'source_index'?: string /** - * destination index + * The destination index for the transform. * @aliases di, destIndex */ 'dest_index'?: string /** - * transform pipeline + * The unique identifier for the ingest pipeline. * @aliases p */ 'pipeline'?: string /** - * description + * The description of the transform. * @aliases d */ 'description'?: string /** - * batch or continuous transform + * The type of transform: `batch` or `continuous`. * @aliases tt */ 'transform_type'?: string /** - * frequency of transform + * The interval between checks for changes in the source indices when the transform is running continuously. * @aliases f */ 'frequency'?: string /** - * max page search size + * The initial page size that is used for the composite aggregation for each checkpoint. * @aliases mpsz */ 'max_page_search_size'?: string /** - * docs per second + * The number of input documents per second. * @aliases dps */ 'docs_per_second'?: string /** - * reason for the current state + * If a transform has a `failed` state, these details describe the reason for failure. * @aliases r */ 'reason'?: string /** - * total number of search phases + * The total number of search operations on the source index for the transform. * @aliases st */ 'search_total'?: string /** - * total number of search failures + * The total number of search failures. * @aliases sf */ 'search_failure'?: string /** - * total search time + * The total amount of search time, in milliseconds. * @aliases stime */ 'search_time'?: string /** - * total number of index phases done by the transform + * The total number of index operations done by the transform. * @aliases it */ 'index_total'?: string /** - * total number of index failures + * The total number of indexing failures. * @aliases if */ 'index_failure'?: string /** - * total time spent indexing documents + * The total time spent indexing documents, in milliseconds. * @aliases itime */ 'index_time'?: string /** - * the number of documents written to the destination index + * The number of documents that have been indexed into the destination index for the transform. * @aliases doci */ 'documents_indexed'?: string /** - * total time spent deleting documents + * The total time spent deleting documents, in milliseconds. * @aliases dtime */ 'delete_time'?: string /** - * the number of documents deleted from the destination index + * The number of documents deleted from the destination index due to the retention policy for the transform. * @aliases docd */ 'documents_deleted'?: string /** - * the number of times the transform has been triggered + * The number of times the transform has been triggered by the scheduler. + * For example, the scheduler triggers the transform indexer to check for updates or ingest new data at an interval specified in the `frequency` property. * @aliases tc */ 'trigger_count'?: string /** - * the number of pages processed + * The number of search or bulk index operations processed. + * Documents are processed in batches instead of individually. * @aliases pp */ 'pages_processed'?: string /** - * the total time spent processing documents + * The total time spent processing results, in milliseconds. * @aliases pt */ 'processing_time'?: string /** - * exponential average checkpoint processing time (milliseconds) + * The exponential moving average of the duration of the checkpoint, in milliseconds. * @aliases cdtea, checkpointTimeExpAvg */ 'checkpoint_duration_time_exp_avg'?: string /** - * exponential average number of documents indexed + * The exponential moving average of the number of new documents that have been indexed. * @aliases idea */ 'indexed_documents_exp_avg'?: string /** - * exponential average number of documents processed + * The exponential moving average of the number of documents that have been processed. * @aliases pdea */ 'processed_documents_exp_avg'?: string diff --git a/specification/ml/_types/Job.ts b/specification/ml/_types/Job.ts index ecd3c7243e..548218f93f 100644 --- a/specification/ml/_types/Job.ts +++ b/specification/ml/_types/Job.ts @@ -34,10 +34,20 @@ import { ModelSizeStats } from './Model' import { Datafeed, DatafeedConfig } from '@ml/_types/Datafeed' export enum JobState { + /** The job close action is in progress and has not yet completed. A closing job cannot accept further data. */ closing = 0, + /** The job finished successfully with its model state persisted. The job must be opened before it can accept further data. */ closed = 1, + /** The job is available to receive and process data. */ opened = 2, + /** + * The job did not finish successfully due to an error. + * This situation can occur due to invalid input data, a fatal error occurring during the analysis, or an external interaction such as the process being killed by the Linux out of memory (OOM) killer. + * If the job had irrevocably failed, it must be force closed and then deleted. + * If the datafeed can be corrected, the job can be closed and then re-opened. + */ failed = 3, + /** The job open action is in progress and has not yet completed. */ opening = 4 }