diff --git a/output/openapi/elasticsearch-openapi.json b/output/openapi/elasticsearch-openapi.json index 8e07c2a96d..d414976e5e 100644 --- a/output/openapi/elasticsearch-openapi.json +++ b/output/openapi/elasticsearch-openapi.json @@ -20573,7 +20573,7 @@ "inference" ], "summary": "Create an inference endpoint", - "description": "IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face.\nFor built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models.\nHowever, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.\n\nThe following integrations are available through the inference API. You can find the available task types next to the integration name:\n* AlibabaCloud AI Search (`completion`, `rerank`, `sparse_embedding`, `text_embedding`)\n* Amazon Bedrock (`completion`, `text_embedding`)\n* Amazon SageMaker (`chat_completion`, `completion`, `rerank`, `sparse_embedding`, `text_embedding`)\n* Anthropic (`completion`)\n* Azure AI Studio (`completion`, 'rerank', `text_embedding`)\n* Azure OpenAI (`completion`, `text_embedding`)\n* Cohere (`completion`, `rerank`, `text_embedding`)\n* DeepSeek (`completion`, `chat_completion`)\n* Elasticsearch (`rerank`, `sparse_embedding`, `text_embedding` - this service is for built-in models and models uploaded through Eland)\n* ELSER (`sparse_embedding`)\n* Google AI Studio (`completion`, `text_embedding`)\n* Google Vertex AI (`rerank`, `text_embedding`)\n* Hugging Face (`chat_completion`, `completion`, `rerank`, `text_embedding`)\n* Mistral (`chat_completion`, `completion`, `text_embedding`)\n* OpenAI (`chat_completion`, `completion`, `text_embedding`)\n* VoyageAI (`text_embedding`, `rerank`)\n* Watsonx inference integration (`text_embedding`)\n* JinaAI (`text_embedding`, `rerank`)\n\n## Required authorization\n\n* Cluster privileges: `manage_inference`\n", + "description": "IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face.\nFor built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models.\nHowever, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.\n\nThe following integrations are available through the inference API. You can find the available task types next to the integration name:\n* AI21 (`chat_completion`, `completion`)\n* AlibabaCloud AI Search (`completion`, `rerank`, `sparse_embedding`, `text_embedding`)\n* Amazon Bedrock (`completion`, `text_embedding`)\n* Amazon SageMaker (`chat_completion`, `completion`, `rerank`, `sparse_embedding`, `text_embedding`)\n* Anthropic (`completion`)\n* Azure AI Studio (`completion`, 'rerank', `text_embedding`)\n* Azure OpenAI (`completion`, `text_embedding`)\n* Cohere (`completion`, `rerank`, `text_embedding`)\n* DeepSeek (`completion`, `chat_completion`)\n* Elasticsearch (`rerank`, `sparse_embedding`, `text_embedding` - this service is for built-in models and models uploaded through Eland)\n* ELSER (`sparse_embedding`)\n* Google AI Studio (`completion`, `text_embedding`)\n* Google Vertex AI (`rerank`, `text_embedding`)\n* Hugging Face (`chat_completion`, `completion`, `rerank`, `text_embedding`)\n* Mistral (`chat_completion`, `completion`, `text_embedding`)\n* OpenAI (`chat_completion`, `completion`, `text_embedding`)\n* VoyageAI (`text_embedding`, `rerank`)\n* Watsonx inference integration (`text_embedding`)\n* JinaAI (`text_embedding`, `rerank`)\n\n## Required authorization\n\n* Cluster privileges: `manage_inference`\n", "operationId": "inference-put", "parameters": [ { @@ -20694,7 +20694,7 @@ "inference" ], "summary": "Create an inference endpoint", - "description": "IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face.\nFor built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models.\nHowever, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.\n\nThe following integrations are available through the inference API. You can find the available task types next to the integration name:\n* AlibabaCloud AI Search (`completion`, `rerank`, `sparse_embedding`, `text_embedding`)\n* Amazon Bedrock (`completion`, `text_embedding`)\n* Amazon SageMaker (`chat_completion`, `completion`, `rerank`, `sparse_embedding`, `text_embedding`)\n* Anthropic (`completion`)\n* Azure AI Studio (`completion`, 'rerank', `text_embedding`)\n* Azure OpenAI (`completion`, `text_embedding`)\n* Cohere (`completion`, `rerank`, `text_embedding`)\n* DeepSeek (`completion`, `chat_completion`)\n* Elasticsearch (`rerank`, `sparse_embedding`, `text_embedding` - this service is for built-in models and models uploaded through Eland)\n* ELSER (`sparse_embedding`)\n* Google AI Studio (`completion`, `text_embedding`)\n* Google Vertex AI (`rerank`, `text_embedding`)\n* Hugging Face (`chat_completion`, `completion`, `rerank`, `text_embedding`)\n* Mistral (`chat_completion`, `completion`, `text_embedding`)\n* OpenAI (`chat_completion`, `completion`, `text_embedding`)\n* VoyageAI (`text_embedding`, `rerank`)\n* Watsonx inference integration (`text_embedding`)\n* JinaAI (`text_embedding`, `rerank`)\n\n## Required authorization\n\n* Cluster privileges: `manage_inference`\n", + "description": "IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face.\nFor built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models.\nHowever, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.\n\nThe following integrations are available through the inference API. You can find the available task types next to the integration name:\n* AI21 (`chat_completion`, `completion`)\n* AlibabaCloud AI Search (`completion`, `rerank`, `sparse_embedding`, `text_embedding`)\n* Amazon Bedrock (`completion`, `text_embedding`)\n* Amazon SageMaker (`chat_completion`, `completion`, `rerank`, `sparse_embedding`, `text_embedding`)\n* Anthropic (`completion`)\n* Azure AI Studio (`completion`, 'rerank', `text_embedding`)\n* Azure OpenAI (`completion`, `text_embedding`)\n* Cohere (`completion`, `rerank`, `text_embedding`)\n* DeepSeek (`completion`, `chat_completion`)\n* Elasticsearch (`rerank`, `sparse_embedding`, `text_embedding` - this service is for built-in models and models uploaded through Eland)\n* ELSER (`sparse_embedding`)\n* Google AI Studio (`completion`, `text_embedding`)\n* Google Vertex AI (`rerank`, `text_embedding`)\n* Hugging Face (`chat_completion`, `completion`, `rerank`, `text_embedding`)\n* Mistral (`chat_completion`, `completion`, `text_embedding`)\n* OpenAI (`chat_completion`, `completion`, `text_embedding`)\n* VoyageAI (`text_embedding`, `rerank`)\n* Watsonx inference integration (`text_embedding`)\n* JinaAI (`text_embedding`, `rerank`)\n\n## Required authorization\n\n* Cluster privileges: `manage_inference`\n", "operationId": "inference-put-1", "parameters": [ { @@ -20812,6 +20812,100 @@ ] } }, + "/_inference/{task_type}/{ai21_inference_id}": { + "put": { + "tags": [ + "inference" + ], + "summary": "Create a AI21 inference endpoint", + "description": "Create an inference endpoint to perform an inference task with the `ai21` service.\n\n## Required authorization\n\n* Cluster privileges: `manage_inference`\n", + "operationId": "inference-put-ai21", + "parameters": [ + { + "in": "path", + "name": "task_type", + "description": "The type of the inference task that the model will perform.", + "required": true, + "deprecated": false, + "schema": { + "$ref": "#/components/schemas/inference._types.Ai21TaskType" + }, + "style": "simple" + }, + { + "in": "path", + "name": "ai21_inference_id", + "description": "The unique identifier of the inference endpoint.", + "required": true, + "deprecated": false, + "schema": { + "$ref": "#/components/schemas/_types.Id" + }, + "style": "simple" + }, + { + "in": "query", + "name": "timeout", + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "deprecated": false, + "schema": { + "$ref": "#/components/schemas/_types.Duration" + }, + "style": "form" + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "service": { + "$ref": "#/components/schemas/inference._types.Ai21ServiceType" + }, + "service_settings": { + "$ref": "#/components/schemas/inference._types.Ai21ServiceSettings" + } + }, + "required": [ + "service", + "service_settings" + ] + }, + "examples": { + "PutAi21RequestExample1": { + "description": "Run `PUT _inference/completion/ai21-completion` to create an AI21 inference endpoint that performs a `completion` task.", + "value": "{\n \"service\": \"ai21\",\n \"service_settings\": {\n \"api_key\": \"ai21-api-key\",\n \"model_id\": \"jamba-large\" \n }\n}" + }, + "PutAi21RequestExample2": { + "description": "Run `PUT _inference/chat-completion/ai21-chat-completion` to create a AI21 inference endpoint that performs a `chat_completion` task.", + "value": "{\n \"service\": \"ai21\",\n \"service_settings\": {\n \"api_key\": \"ai21-api-key\",\n \"model_id\": \"jamba-mini\" \n }\n}" + } + } + } + } + }, + "responses": { + "200": { + "description": "", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/inference._types.InferenceEndpointInfoAi21" + } + } + } + } + }, + "x-state": "Generally available; Added in 9.2.0", + "x-metaTags": [ + { + "content": "Elasticsearch, Machine Learning", + "name": "product_name" + } + ] + } + }, "/_inference/{task_type}/{alibabacloud_inference_id}": { "put": { "tags": [ @@ -88743,6 +88837,81 @@ "relevance_score" ] }, + "inference._types.Ai21TaskType": { + "type": "string", + "enum": [ + "completion", + "chat_completion" + ] + }, + "inference._types.Ai21ServiceType": { + "type": "string", + "enum": [ + "ai21" + ] + }, + "inference._types.Ai21ServiceSettings": { + "type": "object", + "properties": { + "model_id": { + "externalDocs": { + "url": "https://docs.ai21.com/docs/jamba-foundation-models" + }, + "description": "The name of the model to use for the inference task.\nRefer to the AI21 models documentation for the list of supported models and versions.\nService has been tested and confirmed to be working for `completion` and `chat_completion` tasks with the following models:\n* `jamba-mini`\n* `jamba-large`", + "type": "string" + }, + "api_key": { + "description": "A valid API key for accessing AI21 API.\n\nIMPORTANT: You need to provide the API key only once, during the inference model creation.\nThe get inference endpoint API does not retrieve your API key.\nAfter creating the inference model, you cannot change the associated API key.\nIf you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key.", + "type": "string" + }, + "rate_limit": { + "$ref": "#/components/schemas/inference._types.RateLimitSetting" + } + }, + "required": [ + "model_id" + ] + }, + "inference._types.RateLimitSetting": { + "description": "This setting helps to minimize the number of rate limit errors returned from the service.", + "type": "object", + "properties": { + "requests_per_minute": { + "description": "The number of requests allowed per minute.\nBy default, the number of requests allowed per minute is set by each service as follows:\n\n* `alibabacloud-ai-search` service: `1000`\n* `anthropic` service: `50`\n* `azureaistudio` service: `240`\n* `azureopenai` service and task type `text_embedding`: `1440`\n* `azureopenai` service and task type `completion`: `120`\n* `cohere` service: `10000`\n* `elastic` service and task type `chat_completion`: `240`\n* `googleaistudio` service: `360`\n* `googlevertexai` service: `30000`\n* `hugging_face` service: `3000`\n* `jinaai` service: `2000`\n* `mistral` service: `240`\n* `openai` service and task type `text_embedding`: `3000`\n* `openai` service and task type `completion`: `500`\n* `voyageai` service: `2000`\n* `watsonxai` service: `120`", + "type": "number" + } + } + }, + "inference._types.InferenceEndpointInfoAi21": { + "allOf": [ + { + "$ref": "#/components/schemas/inference._types.InferenceEndpoint" + }, + { + "type": "object", + "properties": { + "inference_id": { + "description": "The inference Id", + "type": "string" + }, + "task_type": { + "$ref": "#/components/schemas/inference._types.TaskTypeAi21" + } + }, + "required": [ + "inference_id", + "task_type" + ] + } + ] + }, + "inference._types.TaskTypeAi21": { + "type": "string", + "enum": [ + "completion", + "chat_completion" + ] + }, "inference._types.AlibabaCloudTaskType": { "type": "string", "enum": [ @@ -88791,16 +88960,6 @@ "workspace" ] }, - "inference._types.RateLimitSetting": { - "description": "This setting helps to minimize the number of rate limit errors returned from the service.", - "type": "object", - "properties": { - "requests_per_minute": { - "description": "The number of requests allowed per minute.\nBy default, the number of requests allowed per minute is set by each service as follows:\n\n* `alibabacloud-ai-search` service: `1000`\n* `anthropic` service: `50`\n* `azureaistudio` service: `240`\n* `azureopenai` service and task type `text_embedding`: `1440`\n* `azureopenai` service and task type `completion`: `120`\n* `cohere` service: `10000`\n* `elastic` service and task type `chat_completion`: `240`\n* `googleaistudio` service: `360`\n* `googlevertexai` service: `30000`\n* `hugging_face` service: `3000`\n* `jinaai` service: `2000`\n* `mistral` service: `240`\n* `openai` service and task type `text_embedding`: `3000`\n* `openai` service and task type `completion`: `500`\n* `voyageai` service: `2000`\n* `watsonxai` service: `120`", - "type": "number" - } - } - }, "inference._types.AlibabaCloudTaskSettings": { "type": "object", "properties": { diff --git a/output/openapi/elasticsearch-serverless-openapi.json b/output/openapi/elasticsearch-serverless-openapi.json index a10abb801d..fd422ee6b7 100644 --- a/output/openapi/elasticsearch-serverless-openapi.json +++ b/output/openapi/elasticsearch-serverless-openapi.json @@ -11368,7 +11368,7 @@ "inference" ], "summary": "Create an inference endpoint", - "description": "IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face.\nFor built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models.\nHowever, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.\n\nThe following integrations are available through the inference API. You can find the available task types next to the integration name:\n* AlibabaCloud AI Search (`completion`, `rerank`, `sparse_embedding`, `text_embedding`)\n* Amazon Bedrock (`completion`, `text_embedding`)\n* Amazon SageMaker (`chat_completion`, `completion`, `rerank`, `sparse_embedding`, `text_embedding`)\n* Anthropic (`completion`)\n* Azure AI Studio (`completion`, 'rerank', `text_embedding`)\n* Azure OpenAI (`completion`, `text_embedding`)\n* Cohere (`completion`, `rerank`, `text_embedding`)\n* DeepSeek (`completion`, `chat_completion`)\n* Elasticsearch (`rerank`, `sparse_embedding`, `text_embedding` - this service is for built-in models and models uploaded through Eland)\n* ELSER (`sparse_embedding`)\n* Google AI Studio (`completion`, `text_embedding`)\n* Google Vertex AI (`rerank`, `text_embedding`)\n* Hugging Face (`chat_completion`, `completion`, `rerank`, `text_embedding`)\n* Mistral (`chat_completion`, `completion`, `text_embedding`)\n* OpenAI (`chat_completion`, `completion`, `text_embedding`)\n* VoyageAI (`text_embedding`, `rerank`)\n* Watsonx inference integration (`text_embedding`)\n* JinaAI (`text_embedding`, `rerank`)\n\n## Required authorization\n\n* Cluster privileges: `manage_inference`\n", + "description": "IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face.\nFor built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models.\nHowever, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.\n\nThe following integrations are available through the inference API. You can find the available task types next to the integration name:\n* AI21 (`chat_completion`, `completion`)\n* AlibabaCloud AI Search (`completion`, `rerank`, `sparse_embedding`, `text_embedding`)\n* Amazon Bedrock (`completion`, `text_embedding`)\n* Amazon SageMaker (`chat_completion`, `completion`, `rerank`, `sparse_embedding`, `text_embedding`)\n* Anthropic (`completion`)\n* Azure AI Studio (`completion`, 'rerank', `text_embedding`)\n* Azure OpenAI (`completion`, `text_embedding`)\n* Cohere (`completion`, `rerank`, `text_embedding`)\n* DeepSeek (`completion`, `chat_completion`)\n* Elasticsearch (`rerank`, `sparse_embedding`, `text_embedding` - this service is for built-in models and models uploaded through Eland)\n* ELSER (`sparse_embedding`)\n* Google AI Studio (`completion`, `text_embedding`)\n* Google Vertex AI (`rerank`, `text_embedding`)\n* Hugging Face (`chat_completion`, `completion`, `rerank`, `text_embedding`)\n* Mistral (`chat_completion`, `completion`, `text_embedding`)\n* OpenAI (`chat_completion`, `completion`, `text_embedding`)\n* VoyageAI (`text_embedding`, `rerank`)\n* Watsonx inference integration (`text_embedding`)\n* JinaAI (`text_embedding`, `rerank`)\n\n## Required authorization\n\n* Cluster privileges: `manage_inference`\n", "operationId": "inference-put", "parameters": [ { @@ -11489,7 +11489,7 @@ "inference" ], "summary": "Create an inference endpoint", - "description": "IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face.\nFor built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models.\nHowever, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.\n\nThe following integrations are available through the inference API. You can find the available task types next to the integration name:\n* AlibabaCloud AI Search (`completion`, `rerank`, `sparse_embedding`, `text_embedding`)\n* Amazon Bedrock (`completion`, `text_embedding`)\n* Amazon SageMaker (`chat_completion`, `completion`, `rerank`, `sparse_embedding`, `text_embedding`)\n* Anthropic (`completion`)\n* Azure AI Studio (`completion`, 'rerank', `text_embedding`)\n* Azure OpenAI (`completion`, `text_embedding`)\n* Cohere (`completion`, `rerank`, `text_embedding`)\n* DeepSeek (`completion`, `chat_completion`)\n* Elasticsearch (`rerank`, `sparse_embedding`, `text_embedding` - this service is for built-in models and models uploaded through Eland)\n* ELSER (`sparse_embedding`)\n* Google AI Studio (`completion`, `text_embedding`)\n* Google Vertex AI (`rerank`, `text_embedding`)\n* Hugging Face (`chat_completion`, `completion`, `rerank`, `text_embedding`)\n* Mistral (`chat_completion`, `completion`, `text_embedding`)\n* OpenAI (`chat_completion`, `completion`, `text_embedding`)\n* VoyageAI (`text_embedding`, `rerank`)\n* Watsonx inference integration (`text_embedding`)\n* JinaAI (`text_embedding`, `rerank`)\n\n## Required authorization\n\n* Cluster privileges: `manage_inference`\n", + "description": "IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face.\nFor built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models.\nHowever, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.\n\nThe following integrations are available through the inference API. You can find the available task types next to the integration name:\n* AI21 (`chat_completion`, `completion`)\n* AlibabaCloud AI Search (`completion`, `rerank`, `sparse_embedding`, `text_embedding`)\n* Amazon Bedrock (`completion`, `text_embedding`)\n* Amazon SageMaker (`chat_completion`, `completion`, `rerank`, `sparse_embedding`, `text_embedding`)\n* Anthropic (`completion`)\n* Azure AI Studio (`completion`, 'rerank', `text_embedding`)\n* Azure OpenAI (`completion`, `text_embedding`)\n* Cohere (`completion`, `rerank`, `text_embedding`)\n* DeepSeek (`completion`, `chat_completion`)\n* Elasticsearch (`rerank`, `sparse_embedding`, `text_embedding` - this service is for built-in models and models uploaded through Eland)\n* ELSER (`sparse_embedding`)\n* Google AI Studio (`completion`, `text_embedding`)\n* Google Vertex AI (`rerank`, `text_embedding`)\n* Hugging Face (`chat_completion`, `completion`, `rerank`, `text_embedding`)\n* Mistral (`chat_completion`, `completion`, `text_embedding`)\n* OpenAI (`chat_completion`, `completion`, `text_embedding`)\n* VoyageAI (`text_embedding`, `rerank`)\n* Watsonx inference integration (`text_embedding`)\n* JinaAI (`text_embedding`, `rerank`)\n\n## Required authorization\n\n* Cluster privileges: `manage_inference`\n", "operationId": "inference-put-1", "parameters": [ { @@ -11607,6 +11607,100 @@ ] } }, + "/_inference/{task_type}/{ai21_inference_id}": { + "put": { + "tags": [ + "inference" + ], + "summary": "Create a AI21 inference endpoint", + "description": "Create an inference endpoint to perform an inference task with the `ai21` service.\n\n## Required authorization\n\n* Cluster privileges: `manage_inference`\n", + "operationId": "inference-put-ai21", + "parameters": [ + { + "in": "path", + "name": "task_type", + "description": "The type of the inference task that the model will perform.", + "required": true, + "deprecated": false, + "schema": { + "$ref": "#/components/schemas/inference._types.Ai21TaskType" + }, + "style": "simple" + }, + { + "in": "path", + "name": "ai21_inference_id", + "description": "The unique identifier of the inference endpoint.", + "required": true, + "deprecated": false, + "schema": { + "$ref": "#/components/schemas/_types.Id" + }, + "style": "simple" + }, + { + "in": "query", + "name": "timeout", + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "deprecated": false, + "schema": { + "$ref": "#/components/schemas/_types.Duration" + }, + "style": "form" + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "service": { + "$ref": "#/components/schemas/inference._types.Ai21ServiceType" + }, + "service_settings": { + "$ref": "#/components/schemas/inference._types.Ai21ServiceSettings" + } + }, + "required": [ + "service", + "service_settings" + ] + }, + "examples": { + "PutAi21RequestExample1": { + "description": "Run `PUT _inference/completion/ai21-completion` to create an AI21 inference endpoint that performs a `completion` task.", + "value": "{\n \"service\": \"ai21\",\n \"service_settings\": {\n \"api_key\": \"ai21-api-key\",\n \"model_id\": \"jamba-large\" \n }\n}" + }, + "PutAi21RequestExample2": { + "description": "Run `PUT _inference/chat-completion/ai21-chat-completion` to create a AI21 inference endpoint that performs a `chat_completion` task.", + "value": "{\n \"service\": \"ai21\",\n \"service_settings\": {\n \"api_key\": \"ai21-api-key\",\n \"model_id\": \"jamba-mini\" \n }\n}" + } + } + } + } + }, + "responses": { + "200": { + "description": "", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/inference._types.InferenceEndpointInfoAi21" + } + } + } + } + }, + "x-state": "Generally available", + "x-metaTags": [ + { + "content": "Elasticsearch, Machine Learning", + "name": "product_name" + } + ] + } + }, "/_inference/{task_type}/{alibabacloud_inference_id}": { "put": { "tags": [ @@ -56063,6 +56157,81 @@ "relevance_score" ] }, + "inference._types.Ai21TaskType": { + "type": "string", + "enum": [ + "completion", + "chat_completion" + ] + }, + "inference._types.Ai21ServiceType": { + "type": "string", + "enum": [ + "ai21" + ] + }, + "inference._types.Ai21ServiceSettings": { + "type": "object", + "properties": { + "model_id": { + "externalDocs": { + "url": "https://docs.ai21.com/docs/jamba-foundation-models" + }, + "description": "The name of the model to use for the inference task.\nRefer to the AI21 models documentation for the list of supported models and versions.\nService has been tested and confirmed to be working for `completion` and `chat_completion` tasks with the following models:\n* `jamba-mini`\n* `jamba-large`", + "type": "string" + }, + "api_key": { + "description": "A valid API key for accessing AI21 API.\n\nIMPORTANT: You need to provide the API key only once, during the inference model creation.\nThe get inference endpoint API does not retrieve your API key.\nAfter creating the inference model, you cannot change the associated API key.\nIf you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key.", + "type": "string" + }, + "rate_limit": { + "$ref": "#/components/schemas/inference._types.RateLimitSetting" + } + }, + "required": [ + "model_id" + ] + }, + "inference._types.RateLimitSetting": { + "description": "This setting helps to minimize the number of rate limit errors returned from the service.", + "type": "object", + "properties": { + "requests_per_minute": { + "description": "The number of requests allowed per minute.\nBy default, the number of requests allowed per minute is set by each service as follows:\n\n* `alibabacloud-ai-search` service: `1000`\n* `anthropic` service: `50`\n* `azureaistudio` service: `240`\n* `azureopenai` service and task type `text_embedding`: `1440`\n* `azureopenai` service and task type `completion`: `120`\n* `cohere` service: `10000`\n* `elastic` service and task type `chat_completion`: `240`\n* `googleaistudio` service: `360`\n* `googlevertexai` service: `30000`\n* `hugging_face` service: `3000`\n* `jinaai` service: `2000`\n* `mistral` service: `240`\n* `openai` service and task type `text_embedding`: `3000`\n* `openai` service and task type `completion`: `500`\n* `voyageai` service: `2000`\n* `watsonxai` service: `120`", + "type": "number" + } + } + }, + "inference._types.InferenceEndpointInfoAi21": { + "allOf": [ + { + "$ref": "#/components/schemas/inference._types.InferenceEndpoint" + }, + { + "type": "object", + "properties": { + "inference_id": { + "description": "The inference Id", + "type": "string" + }, + "task_type": { + "$ref": "#/components/schemas/inference._types.TaskTypeAi21" + } + }, + "required": [ + "inference_id", + "task_type" + ] + } + ] + }, + "inference._types.TaskTypeAi21": { + "type": "string", + "enum": [ + "completion", + "chat_completion" + ] + }, "inference._types.AlibabaCloudTaskType": { "type": "string", "enum": [ @@ -56111,16 +56280,6 @@ "workspace" ] }, - "inference._types.RateLimitSetting": { - "description": "This setting helps to minimize the number of rate limit errors returned from the service.", - "type": "object", - "properties": { - "requests_per_minute": { - "description": "The number of requests allowed per minute.\nBy default, the number of requests allowed per minute is set by each service as follows:\n\n* `alibabacloud-ai-search` service: `1000`\n* `anthropic` service: `50`\n* `azureaistudio` service: `240`\n* `azureopenai` service and task type `text_embedding`: `1440`\n* `azureopenai` service and task type `completion`: `120`\n* `cohere` service: `10000`\n* `elastic` service and task type `chat_completion`: `240`\n* `googleaistudio` service: `360`\n* `googlevertexai` service: `30000`\n* `hugging_face` service: `3000`\n* `jinaai` service: `2000`\n* `mistral` service: `240`\n* `openai` service and task type `text_embedding`: `3000`\n* `openai` service and task type `completion`: `500`\n* `voyageai` service: `2000`\n* `watsonxai` service: `120`", - "type": "number" - } - } - }, "inference._types.AlibabaCloudTaskSettings": { "type": "object", "properties": { diff --git a/output/schema/schema.json b/output/schema/schema.json index e4e60de3c9..4af88fb511 100644 --- a/output/schema/schema.json +++ b/output/schema/schema.json @@ -9920,7 +9920,7 @@ "visibility": "public" } }, - "description": "Create an inference endpoint.\n\nIMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face.\nFor built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models.\nHowever, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.\n\nThe following integrations are available through the inference API. You can find the available task types next to the integration name:\n* AlibabaCloud AI Search (`completion`, `rerank`, `sparse_embedding`, `text_embedding`)\n* Amazon Bedrock (`completion`, `text_embedding`)\n* Amazon SageMaker (`chat_completion`, `completion`, `rerank`, `sparse_embedding`, `text_embedding`)\n* Anthropic (`completion`)\n* Azure AI Studio (`completion`, 'rerank', `text_embedding`)\n* Azure OpenAI (`completion`, `text_embedding`)\n* Cohere (`completion`, `rerank`, `text_embedding`)\n* DeepSeek (`completion`, `chat_completion`)\n* Elasticsearch (`rerank`, `sparse_embedding`, `text_embedding` - this service is for built-in models and models uploaded through Eland)\n* ELSER (`sparse_embedding`)\n* Google AI Studio (`completion`, `text_embedding`)\n* Google Vertex AI (`rerank`, `text_embedding`)\n* Hugging Face (`chat_completion`, `completion`, `rerank`, `text_embedding`)\n* Mistral (`chat_completion`, `completion`, `text_embedding`)\n* OpenAI (`chat_completion`, `completion`, `text_embedding`)\n* VoyageAI (`text_embedding`, `rerank`)\n* Watsonx inference integration (`text_embedding`)\n* JinaAI (`text_embedding`, `rerank`)", + "description": "Create an inference endpoint.\n\nIMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face.\nFor built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models.\nHowever, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.\n\nThe following integrations are available through the inference API. You can find the available task types next to the integration name:\n* AI21 (`chat_completion`, `completion`)\n* AlibabaCloud AI Search (`completion`, `rerank`, `sparse_embedding`, `text_embedding`)\n* Amazon Bedrock (`completion`, `text_embedding`)\n* Amazon SageMaker (`chat_completion`, `completion`, `rerank`, `sparse_embedding`, `text_embedding`)\n* Anthropic (`completion`)\n* Azure AI Studio (`completion`, 'rerank', `text_embedding`)\n* Azure OpenAI (`completion`, `text_embedding`)\n* Cohere (`completion`, `rerank`, `text_embedding`)\n* DeepSeek (`completion`, `chat_completion`)\n* Elasticsearch (`rerank`, `sparse_embedding`, `text_embedding` - this service is for built-in models and models uploaded through Eland)\n* ELSER (`sparse_embedding`)\n* Google AI Studio (`completion`, `text_embedding`)\n* Google Vertex AI (`rerank`, `text_embedding`)\n* Hugging Face (`chat_completion`, `completion`, `rerank`, `text_embedding`)\n* Mistral (`chat_completion`, `completion`, `text_embedding`)\n* OpenAI (`chat_completion`, `completion`, `text_embedding`)\n* VoyageAI (`text_embedding`, `rerank`)\n* Watsonx inference integration (`text_embedding`)\n* JinaAI (`text_embedding`, `rerank`)", "docId": "inference-api-put", "docUrl": "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put", "extPreviousVersionDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/8.18/put-inference-api.html", @@ -9960,6 +9960,51 @@ } ] }, + { + "availability": { + "serverless": { + "stability": "stable", + "visibility": "public" + }, + "stack": { + "since": "9.2.0", + "stability": "stable", + "visibility": "public" + } + }, + "description": "Create a AI21 inference endpoint.\n\nCreate an inference endpoint to perform an inference task with the `ai21` service.", + "docId": "inference-api-put-ai21", + "docUrl": "https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-ai21", + "name": "inference.put_ai21", + "privileges": { + "cluster": [ + "manage_inference" + ] + }, + "request": { + "name": "Request", + "namespace": "inference.put_ai21" + }, + "requestBodyRequired": false, + "requestMediaType": [ + "application/json" + ], + "response": { + "name": "Response", + "namespace": "inference.put_ai21" + }, + "responseMediaType": [ + "application/json" + ], + "urls": [ + { + "methods": [ + "PUT" + ], + "path": "/_inference/{task_type}/{ai21_inference_id}" + } + ] + }, { "availability": { "serverless": { @@ -167605,6 +167650,85 @@ ], "specLocation": "inference/_types/CommonTypes.ts#L99-L116" }, + { + "kind": "interface", + "name": { + "name": "Ai21ServiceSettings", + "namespace": "inference._types" + }, + "properties": [ + { + "description": "The name of the model to use for the inference task.\nRefer to the AI21 models documentation for the list of supported models and versions.\nService has been tested and confirmed to be working for `completion` and `chat_completion` tasks with the following models:\n* `jamba-mini`\n* `jamba-large`", + "extDocId": "ai21-api-models", + "extDocUrl": "https://docs.ai21.com/docs/jamba-foundation-models", + "name": "model_id", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "string", + "namespace": "_builtins" + } + } + }, + { + "description": "A valid API key for accessing AI21 API.\n\nIMPORTANT: You need to provide the API key only once, during the inference model creation.\nThe get inference endpoint API does not retrieve your API key.\nAfter creating the inference model, you cannot change the associated API key.\nIf you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key.", + "name": "api_key", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "string", + "namespace": "_builtins" + } + } + }, + { + "description": "This setting helps to minimize the number of rate limit errors returned from the AI21 API.\nBy default, the `ai21` service sets the number of requests allowed per minute to 200. Please refer to AI21 documentation for more details.", + "extDocId": "ai21-rate-limit", + "extDocUrl": "https://docs.ai21.com/reference/api-rate-limits", + "name": "rate_limit", + "required": false, + "type": { + "kind": "instance_of", + "type": { + "name": "RateLimitSetting", + "namespace": "inference._types" + } + } + } + ], + "specLocation": "inference/_types/CommonTypes.ts#L292-L317" + }, + { + "kind": "enum", + "members": [ + { + "name": "ai21" + } + ], + "name": { + "name": "Ai21ServiceType", + "namespace": "inference._types" + }, + "specLocation": "inference/_types/CommonTypes.ts#L324-L326" + }, + { + "kind": "enum", + "members": [ + { + "name": "completion" + }, + { + "name": "chat_completion" + } + ], + "name": { + "name": "Ai21TaskType", + "namespace": "inference._types" + }, + "specLocation": "inference/_types/CommonTypes.ts#L319-L322" + }, { "kind": "interface", "name": { @@ -167675,7 +167799,7 @@ } } ], - "specLocation": "inference/_types/CommonTypes.ts#L292-L337" + "specLocation": "inference/_types/CommonTypes.ts#L328-L373" }, { "kind": "enum", @@ -167688,7 +167812,7 @@ "name": "AlibabaCloudServiceType", "namespace": "inference._types" }, - "specLocation": "inference/_types/CommonTypes.ts#L362-L364" + "specLocation": "inference/_types/CommonTypes.ts#L398-L400" }, { "kind": "interface", @@ -167722,7 +167846,7 @@ } } ], - "specLocation": "inference/_types/CommonTypes.ts#L339-L353" + "specLocation": "inference/_types/CommonTypes.ts#L375-L389" }, { "kind": "enum", @@ -167744,7 +167868,7 @@ "name": "AlibabaCloudTaskType", "namespace": "inference._types" }, - "specLocation": "inference/_types/CommonTypes.ts#L355-L360" + "specLocation": "inference/_types/CommonTypes.ts#L391-L396" }, { "kind": "interface", @@ -167832,7 +167956,7 @@ } } ], - "specLocation": "inference/_types/CommonTypes.ts#L366-L408" + "specLocation": "inference/_types/CommonTypes.ts#L402-L444" }, { "kind": "enum", @@ -167845,7 +167969,7 @@ "name": "AmazonBedrockServiceType", "namespace": "inference._types" }, - "specLocation": "inference/_types/CommonTypes.ts#L441-L443" + "specLocation": "inference/_types/CommonTypes.ts#L477-L479" }, { "kind": "interface", @@ -167904,7 +168028,7 @@ } } ], - "specLocation": "inference/_types/CommonTypes.ts#L410-L434" + "specLocation": "inference/_types/CommonTypes.ts#L446-L470" }, { "kind": "enum", @@ -167920,7 +168044,7 @@ "name": "AmazonBedrockTaskType", "namespace": "inference._types" }, - "specLocation": "inference/_types/CommonTypes.ts#L436-L439" + "specLocation": "inference/_types/CommonTypes.ts#L472-L475" }, { "kind": "enum", @@ -167936,7 +168060,7 @@ "name": "AmazonSageMakerApi", "namespace": "inference._types" }, - "specLocation": "inference/_types/CommonTypes.ts#L501-L504" + "specLocation": "inference/_types/CommonTypes.ts#L537-L540" }, { "kind": "interface", @@ -168079,7 +168203,7 @@ } } ], - "specLocation": "inference/_types/CommonTypes.ts#L445-L499" + "specLocation": "inference/_types/CommonTypes.ts#L481-L535" }, { "kind": "enum", @@ -168092,7 +168216,7 @@ "name": "AmazonSageMakerServiceType", "namespace": "inference._types" }, - "specLocation": "inference/_types/CommonTypes.ts#L584-L586" + "specLocation": "inference/_types/CommonTypes.ts#L620-L622" }, { "kind": "interface", @@ -168172,7 +168296,7 @@ } } ], - "specLocation": "inference/_types/CommonTypes.ts#L535-L564" + "specLocation": "inference/_types/CommonTypes.ts#L571-L600" }, { "kind": "interface", @@ -168219,7 +168343,7 @@ } } ], - "specLocation": "inference/_types/CommonTypes.ts#L588-L604" + "specLocation": "inference/_types/CommonTypes.ts#L624-L640" }, { "kind": "enum", @@ -168232,7 +168356,7 @@ "name": "AnthropicServiceType", "namespace": "inference._types" }, - "specLocation": "inference/_types/CommonTypes.ts#L637-L639" + "specLocation": "inference/_types/CommonTypes.ts#L673-L675" }, { "kind": "interface", @@ -168292,7 +168416,7 @@ } } ], - "specLocation": "inference/_types/CommonTypes.ts#L606-L631" + "specLocation": "inference/_types/CommonTypes.ts#L642-L667" }, { "kind": "enum", @@ -168305,7 +168429,7 @@ "name": "AnthropicTaskType", "namespace": "inference._types" }, - "specLocation": "inference/_types/CommonTypes.ts#L633-L635" + "specLocation": "inference/_types/CommonTypes.ts#L669-L671" }, { "kind": "interface", @@ -168379,7 +168503,7 @@ } } ], - "specLocation": "inference/_types/CommonTypes.ts#L641-L683" + "specLocation": "inference/_types/CommonTypes.ts#L677-L719" }, { "kind": "enum", @@ -168392,7 +168516,7 @@ "name": "AzureAiStudioServiceType", "namespace": "inference._types" }, - "specLocation": "inference/_types/CommonTypes.ts#L730-L732" + "specLocation": "inference/_types/CommonTypes.ts#L766-L768" }, { "kind": "interface", @@ -168487,7 +168611,7 @@ } } ], - "specLocation": "inference/_types/CommonTypes.ts#L685-L722" + "specLocation": "inference/_types/CommonTypes.ts#L721-L758" }, { "kind": "enum", @@ -168506,7 +168630,7 @@ "name": "AzureAiStudioTaskType", "namespace": "inference._types" }, - "specLocation": "inference/_types/CommonTypes.ts#L724-L728" + "specLocation": "inference/_types/CommonTypes.ts#L760-L764" }, { "kind": "interface", @@ -168598,7 +168722,7 @@ } } ], - "specLocation": "inference/_types/CommonTypes.ts#L734-L779" + "specLocation": "inference/_types/CommonTypes.ts#L770-L815" }, { "kind": "enum", @@ -168611,7 +168735,7 @@ "name": "AzureOpenAIServiceType", "namespace": "inference._types" }, - "specLocation": "inference/_types/CommonTypes.ts#L794-L796" + "specLocation": "inference/_types/CommonTypes.ts#L830-L832" }, { "kind": "interface", @@ -168633,7 +168757,7 @@ } } ], - "specLocation": "inference/_types/CommonTypes.ts#L781-L787" + "specLocation": "inference/_types/CommonTypes.ts#L817-L823" }, { "kind": "enum", @@ -168649,7 +168773,7 @@ "name": "AzureOpenAITaskType", "namespace": "inference._types" }, - "specLocation": "inference/_types/CommonTypes.ts#L789-L792" + "specLocation": "inference/_types/CommonTypes.ts#L825-L828" }, { "kind": "enum", @@ -168674,7 +168798,7 @@ "name": "CohereEmbeddingType", "namespace": "inference._types" }, - "specLocation": "inference/_types/CommonTypes.ts#L851-L857" + "specLocation": "inference/_types/CommonTypes.ts#L887-L893" }, { "kind": "enum", @@ -168696,7 +168820,7 @@ "name": "CohereInputType", "namespace": "inference._types" }, - "specLocation": "inference/_types/CommonTypes.ts#L859-L864" + "specLocation": "inference/_types/CommonTypes.ts#L895-L900" }, { "kind": "interface", @@ -168769,7 +168893,7 @@ } } ], - "specLocation": "inference/_types/CommonTypes.ts#L798-L839" + "specLocation": "inference/_types/CommonTypes.ts#L834-L875" }, { "kind": "enum", @@ -168782,7 +168906,7 @@ "name": "CohereServiceType", "namespace": "inference._types" }, - "specLocation": "inference/_types/CommonTypes.ts#L847-L849" + "specLocation": "inference/_types/CommonTypes.ts#L883-L885" }, { "kind": "enum", @@ -168801,7 +168925,7 @@ "name": "CohereSimilarityType", "namespace": "inference._types" }, - "specLocation": "inference/_types/CommonTypes.ts#L866-L870" + "specLocation": "inference/_types/CommonTypes.ts#L902-L906" }, { "kind": "interface", @@ -168859,7 +168983,7 @@ } } ], - "specLocation": "inference/_types/CommonTypes.ts#L878-L910" + "specLocation": "inference/_types/CommonTypes.ts#L914-L946" }, { "kind": "enum", @@ -168878,7 +169002,7 @@ "name": "CohereTaskType", "namespace": "inference._types" }, - "specLocation": "inference/_types/CommonTypes.ts#L841-L845" + "specLocation": "inference/_types/CommonTypes.ts#L877-L881" }, { "kind": "enum", @@ -168897,7 +169021,7 @@ "name": "CohereTruncateType", "namespace": "inference._types" }, - "specLocation": "inference/_types/CommonTypes.ts#L872-L876" + "specLocation": "inference/_types/CommonTypes.ts#L908-L912" }, { "kind": "interface", @@ -169180,7 +169304,7 @@ } } ], - "specLocation": "inference/_types/CommonTypes.ts#L982-L993" + "specLocation": "inference/_types/CommonTypes.ts#L1018-L1029" }, { "kind": "interface", @@ -169198,7 +169322,7 @@ } } ], - "specLocation": "inference/_types/CommonTypes.ts#L995-L1133" + "specLocation": "inference/_types/CommonTypes.ts#L1031-L1169" }, { "kind": "interface", @@ -169276,7 +169400,7 @@ } } ], - "specLocation": "inference/_types/CommonTypes.ts#L912-L980" + "specLocation": "inference/_types/CommonTypes.ts#L948-L1016" }, { "kind": "enum", @@ -169289,7 +169413,7 @@ "name": "CustomServiceType", "namespace": "inference._types" }, - "specLocation": "inference/_types/CommonTypes.ts#L1142-L1144" + "specLocation": "inference/_types/CommonTypes.ts#L1178-L1180" }, { "kind": "interface", @@ -169307,7 +169431,7 @@ } } ], - "specLocation": "inference/_types/CommonTypes.ts#L1146-L1160" + "specLocation": "inference/_types/CommonTypes.ts#L1182-L1196" }, { "kind": "enum", @@ -169329,7 +169453,7 @@ "name": "CustomTaskType", "namespace": "inference._types" }, - "specLocation": "inference/_types/CommonTypes.ts#L1135-L1140" + "specLocation": "inference/_types/CommonTypes.ts#L1171-L1176" }, { "kind": "interface", @@ -169377,7 +169501,7 @@ } } ], - "specLocation": "inference/_types/CommonTypes.ts#L1174-L1196" + "specLocation": "inference/_types/CommonTypes.ts#L1210-L1232" }, { "kind": "enum", @@ -169390,7 +169514,7 @@ "name": "DeepSeekServiceType", "namespace": "inference._types" }, - "specLocation": "inference/_types/CommonTypes.ts#L1198-L1200" + "specLocation": "inference/_types/CommonTypes.ts#L1234-L1236" }, { "kind": "interface", @@ -169531,7 +169655,7 @@ } } ], - "specLocation": "inference/_types/CommonTypes.ts#L1223-L1257" + "specLocation": "inference/_types/CommonTypes.ts#L1259-L1293" }, { "kind": "enum", @@ -169544,7 +169668,7 @@ "name": "ElasticsearchServiceType", "namespace": "inference._types" }, - "specLocation": "inference/_types/CommonTypes.ts#L1273-L1275" + "specLocation": "inference/_types/CommonTypes.ts#L1309-L1311" }, { "kind": "interface", @@ -169567,7 +169691,7 @@ } } ], - "specLocation": "inference/_types/CommonTypes.ts#L1259-L1265" + "specLocation": "inference/_types/CommonTypes.ts#L1295-L1301" }, { "kind": "enum", @@ -169586,7 +169710,7 @@ "name": "ElasticsearchTaskType", "namespace": "inference._types" }, - "specLocation": "inference/_types/CommonTypes.ts#L1267-L1271" + "specLocation": "inference/_types/CommonTypes.ts#L1303-L1307" }, { "kind": "interface", @@ -169632,7 +169756,7 @@ } } ], - "specLocation": "inference/_types/CommonTypes.ts#L1277-L1303" + "specLocation": "inference/_types/CommonTypes.ts#L1313-L1339" }, { "kind": "enum", @@ -169645,7 +169769,7 @@ "name": "ElserServiceType", "namespace": "inference._types" }, - "specLocation": "inference/_types/CommonTypes.ts#L1309-L1311" + "specLocation": "inference/_types/CommonTypes.ts#L1345-L1347" }, { "kind": "enum", @@ -169658,7 +169782,7 @@ "name": "ElserTaskType", "namespace": "inference._types" }, - "specLocation": "inference/_types/CommonTypes.ts#L1305-L1307" + "specLocation": "inference/_types/CommonTypes.ts#L1341-L1343" }, { "kind": "enum", @@ -169671,7 +169795,7 @@ "name": "GoogleAiServiceType", "namespace": "inference._types" }, - "specLocation": "inference/_types/CommonTypes.ts#L1336-L1338" + "specLocation": "inference/_types/CommonTypes.ts#L1372-L1374" }, { "kind": "interface", @@ -169719,7 +169843,7 @@ } } ], - "specLocation": "inference/_types/CommonTypes.ts#L1313-L1329" + "specLocation": "inference/_types/CommonTypes.ts#L1349-L1365" }, { "kind": "enum", @@ -169735,7 +169859,7 @@ "name": "GoogleAiStudioTaskType", "namespace": "inference._types" }, - "specLocation": "inference/_types/CommonTypes.ts#L1331-L1334" + "specLocation": "inference/_types/CommonTypes.ts#L1367-L1370" }, { "kind": "interface", @@ -169809,7 +169933,7 @@ } } ], - "specLocation": "inference/_types/CommonTypes.ts#L1340-L1366" + "specLocation": "inference/_types/CommonTypes.ts#L1376-L1402" }, { "kind": "enum", @@ -169822,7 +169946,7 @@ "name": "GoogleVertexAIServiceType", "namespace": "inference._types" }, - "specLocation": "inference/_types/CommonTypes.ts#L1386-L1388" + "specLocation": "inference/_types/CommonTypes.ts#L1422-L1424" }, { "kind": "interface", @@ -169856,7 +169980,7 @@ } } ], - "specLocation": "inference/_types/CommonTypes.ts#L1368-L1377" + "specLocation": "inference/_types/CommonTypes.ts#L1404-L1413" }, { "kind": "enum", @@ -169878,7 +170002,7 @@ "name": "GoogleVertexAITaskType", "namespace": "inference._types" }, - "specLocation": "inference/_types/CommonTypes.ts#L1379-L1384" + "specLocation": "inference/_types/CommonTypes.ts#L1415-L1420" }, { "kind": "interface", @@ -169940,7 +170064,7 @@ } } ], - "specLocation": "inference/_types/CommonTypes.ts#L1390-L1422" + "specLocation": "inference/_types/CommonTypes.ts#L1426-L1458" }, { "kind": "enum", @@ -169953,7 +170077,7 @@ "name": "HuggingFaceServiceType", "namespace": "inference._types" }, - "specLocation": "inference/_types/CommonTypes.ts#L1443-L1445" + "specLocation": "inference/_types/CommonTypes.ts#L1479-L1481" }, { "kind": "interface", @@ -169987,7 +170111,7 @@ } } ], - "specLocation": "inference/_types/CommonTypes.ts#L1424-L1434" + "specLocation": "inference/_types/CommonTypes.ts#L1460-L1470" }, { "kind": "enum", @@ -170009,7 +170133,7 @@ "name": "HuggingFaceTaskType", "namespace": "inference._types" }, - "specLocation": "inference/_types/CommonTypes.ts#L1436-L1441" + "specLocation": "inference/_types/CommonTypes.ts#L1472-L1477" }, { "kind": "interface", @@ -170101,7 +170225,7 @@ } } ], - "specLocation": "inference/_types/Services.ts#L288-L344" + "specLocation": "inference/_types/Services.ts#L301-L357" }, { "kind": "interface", @@ -170160,7 +170284,7 @@ } } ], - "specLocation": "inference/_types/Services.ts#L45-L65" + "specLocation": "inference/_types/Services.ts#L46-L66" }, { "kind": "interface", @@ -170201,7 +170325,47 @@ } } ], - "specLocation": "inference/_types/Services.ts#L67-L79" + "specLocation": "inference/_types/Services.ts#L68-L80" + }, + { + "kind": "interface", + "inherits": { + "type": { + "name": "InferenceEndpoint", + "namespace": "inference._types" + } + }, + "name": { + "name": "InferenceEndpointInfoAi21", + "namespace": "inference._types" + }, + "properties": [ + { + "description": "The inference Id", + "name": "inference_id", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "string", + "namespace": "_builtins" + } + } + }, + { + "description": "The task type", + "name": "task_type", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "TaskTypeAi21", + "namespace": "inference._types" + } + } + } + ], + "specLocation": "inference/_types/Services.ts#L82-L91" }, { "kind": "interface", @@ -170241,7 +170405,7 @@ } } ], - "specLocation": "inference/_types/Services.ts#L80-L89" + "specLocation": "inference/_types/Services.ts#L93-L102" }, { "kind": "interface", @@ -170281,7 +170445,7 @@ } } ], - "specLocation": "inference/_types/Services.ts#L91-L100" + "specLocation": "inference/_types/Services.ts#L104-L113" }, { "kind": "interface", @@ -170321,7 +170485,7 @@ } } ], - "specLocation": "inference/_types/Services.ts#L102-L111" + "specLocation": "inference/_types/Services.ts#L115-L124" }, { "kind": "interface", @@ -170361,7 +170525,7 @@ } } ], - "specLocation": "inference/_types/Services.ts#L113-L122" + "specLocation": "inference/_types/Services.ts#L126-L135" }, { "kind": "interface", @@ -170401,7 +170565,7 @@ } } ], - "specLocation": "inference/_types/Services.ts#L124-L133" + "specLocation": "inference/_types/Services.ts#L137-L146" }, { "kind": "interface", @@ -170441,7 +170605,7 @@ } } ], - "specLocation": "inference/_types/Services.ts#L135-L144" + "specLocation": "inference/_types/Services.ts#L148-L157" }, { "kind": "interface", @@ -170481,7 +170645,7 @@ } } ], - "specLocation": "inference/_types/Services.ts#L146-L155" + "specLocation": "inference/_types/Services.ts#L159-L168" }, { "kind": "interface", @@ -170521,7 +170685,7 @@ } } ], - "specLocation": "inference/_types/Services.ts#L157-L166" + "specLocation": "inference/_types/Services.ts#L170-L179" }, { "kind": "interface", @@ -170561,7 +170725,7 @@ } } ], - "specLocation": "inference/_types/Services.ts#L167-L176" + "specLocation": "inference/_types/Services.ts#L180-L189" }, { "kind": "interface", @@ -170601,7 +170765,7 @@ } } ], - "specLocation": "inference/_types/Services.ts#L189-L198" + "specLocation": "inference/_types/Services.ts#L202-L211" }, { "kind": "interface", @@ -170641,7 +170805,7 @@ } } ], - "specLocation": "inference/_types/Services.ts#L178-L187" + "specLocation": "inference/_types/Services.ts#L191-L200" }, { "kind": "interface", @@ -170681,7 +170845,7 @@ } } ], - "specLocation": "inference/_types/Services.ts#L200-L209" + "specLocation": "inference/_types/Services.ts#L213-L222" }, { "kind": "interface", @@ -170721,7 +170885,7 @@ } } ], - "specLocation": "inference/_types/Services.ts#L211-L220" + "specLocation": "inference/_types/Services.ts#L224-L233" }, { "kind": "interface", @@ -170761,7 +170925,7 @@ } } ], - "specLocation": "inference/_types/Services.ts#L222-L231" + "specLocation": "inference/_types/Services.ts#L235-L244" }, { "kind": "interface", @@ -170801,7 +170965,7 @@ } } ], - "specLocation": "inference/_types/Services.ts#L233-L242" + "specLocation": "inference/_types/Services.ts#L246-L255" }, { "kind": "interface", @@ -170841,7 +171005,7 @@ } } ], - "specLocation": "inference/_types/Services.ts#L244-L253" + "specLocation": "inference/_types/Services.ts#L257-L266" }, { "kind": "interface", @@ -170881,7 +171045,7 @@ } } ], - "specLocation": "inference/_types/Services.ts#L255-L264" + "specLocation": "inference/_types/Services.ts#L268-L277" }, { "kind": "interface", @@ -170921,7 +171085,7 @@ } } ], - "specLocation": "inference/_types/Services.ts#L266-L275" + "specLocation": "inference/_types/Services.ts#L279-L288" }, { "kind": "interface", @@ -170961,7 +171125,7 @@ } } ], - "specLocation": "inference/_types/Services.ts#L277-L286" + "specLocation": "inference/_types/Services.ts#L290-L299" }, { "kind": "interface", @@ -171121,7 +171285,7 @@ } } ], - "specLocation": "inference/_types/CommonTypes.ts#L1447-L1476" + "specLocation": "inference/_types/CommonTypes.ts#L1483-L1512" }, { "kind": "enum", @@ -171134,7 +171298,7 @@ "name": "JinaAIServiceType", "namespace": "inference._types" }, - "specLocation": "inference/_types/CommonTypes.ts#L1506-L1508" + "specLocation": "inference/_types/CommonTypes.ts#L1542-L1544" }, { "kind": "enum", @@ -171153,7 +171317,7 @@ "name": "JinaAISimilarityType", "namespace": "inference._types" }, - "specLocation": "inference/_types/CommonTypes.ts#L1510-L1514" + "specLocation": "inference/_types/CommonTypes.ts#L1546-L1550" }, { "kind": "interface", @@ -171199,7 +171363,7 @@ } } ], - "specLocation": "inference/_types/CommonTypes.ts#L1478-L1499" + "specLocation": "inference/_types/CommonTypes.ts#L1514-L1535" }, { "kind": "enum", @@ -171215,7 +171379,7 @@ "name": "JinaAITaskType", "namespace": "inference._types" }, - "specLocation": "inference/_types/CommonTypes.ts#L1501-L1504" + "specLocation": "inference/_types/CommonTypes.ts#L1537-L1540" }, { "kind": "enum", @@ -171237,7 +171401,7 @@ "name": "JinaAITextEmbeddingTask", "namespace": "inference._types" }, - "specLocation": "inference/_types/CommonTypes.ts#L1516-L1521" + "specLocation": "inference/_types/CommonTypes.ts#L1552-L1557" }, { "kind": "interface", @@ -171395,7 +171559,7 @@ } } ], - "specLocation": "inference/_types/CommonTypes.ts#L1523-L1550" + "specLocation": "inference/_types/CommonTypes.ts#L1559-L1586" }, { "kind": "enum", @@ -171408,7 +171572,7 @@ "name": "MistralServiceType", "namespace": "inference._types" }, - "specLocation": "inference/_types/CommonTypes.ts#L1558-L1560" + "specLocation": "inference/_types/CommonTypes.ts#L1594-L1596" }, { "kind": "enum", @@ -171427,7 +171591,7 @@ "name": "MistralTaskType", "namespace": "inference._types" }, - "specLocation": "inference/_types/CommonTypes.ts#L1552-L1556" + "specLocation": "inference/_types/CommonTypes.ts#L1588-L1592" }, { "kind": "interface", @@ -171514,7 +171678,7 @@ } } ], - "specLocation": "inference/_types/CommonTypes.ts#L1562-L1604" + "specLocation": "inference/_types/CommonTypes.ts#L1598-L1640" }, { "kind": "enum", @@ -171527,7 +171691,7 @@ "name": "OpenAIServiceType", "namespace": "inference._types" }, - "specLocation": "inference/_types/CommonTypes.ts#L1620-L1622" + "specLocation": "inference/_types/CommonTypes.ts#L1656-L1658" }, { "kind": "interface", @@ -171549,7 +171713,7 @@ } } ], - "specLocation": "inference/_types/CommonTypes.ts#L1606-L1612" + "specLocation": "inference/_types/CommonTypes.ts#L1642-L1648" }, { "kind": "enum", @@ -171568,7 +171732,7 @@ "name": "OpenAITaskType", "namespace": "inference._types" }, - "specLocation": "inference/_types/CommonTypes.ts#L1614-L1618" + "specLocation": "inference/_types/CommonTypes.ts#L1650-L1654" }, { "kind": "interface", @@ -171635,7 +171799,7 @@ } } ], - "specLocation": "inference/_types/Services.ts#L350-L376" + "specLocation": "inference/_types/Services.ts#L363-L389" }, { "kind": "interface", @@ -171783,7 +171947,7 @@ "name": "ServiceSettings", "namespace": "inference._types" }, - "specLocation": "inference/_types/Services.ts#L346-L346", + "specLocation": "inference/_types/Services.ts#L359-L359", "type": { "kind": "user_defined_value" } @@ -171867,7 +172031,7 @@ "name": "TaskSettings", "namespace": "inference._types" }, - "specLocation": "inference/_types/Services.ts#L348-L348", + "specLocation": "inference/_types/Services.ts#L361-L361", "type": { "kind": "user_defined_value" } @@ -171897,6 +172061,22 @@ }, "specLocation": "inference/_types/TaskType.ts#L20-L29" }, + { + "kind": "enum", + "members": [ + { + "name": "completion" + }, + { + "name": "chat_completion" + } + ], + "name": { + "name": "TaskTypeAi21", + "namespace": "inference._types" + }, + "specLocation": "inference/_types/TaskType.ts#L36-L39" + }, { "kind": "enum", "members": [ @@ -171917,7 +172097,7 @@ "name": "TaskTypeAlibabaCloudAI", "namespace": "inference._types" }, - "specLocation": "inference/_types/TaskType.ts#L36-L41" + "specLocation": "inference/_types/TaskType.ts#L41-L46" }, { "kind": "enum", @@ -171933,7 +172113,7 @@ "name": "TaskTypeAmazonBedrock", "namespace": "inference._types" }, - "specLocation": "inference/_types/TaskType.ts#L43-L46" + "specLocation": "inference/_types/TaskType.ts#L48-L51" }, { "kind": "enum", @@ -171958,7 +172138,7 @@ "name": "TaskTypeAmazonSageMaker", "namespace": "inference._types" }, - "specLocation": "inference/_types/TaskType.ts#L48-L54" + "specLocation": "inference/_types/TaskType.ts#L53-L59" }, { "kind": "enum", @@ -171971,7 +172151,7 @@ "name": "TaskTypeAnthropic", "namespace": "inference._types" }, - "specLocation": "inference/_types/TaskType.ts#L56-L58" + "specLocation": "inference/_types/TaskType.ts#L61-L63" }, { "kind": "enum", @@ -171990,7 +172170,7 @@ "name": "TaskTypeAzureAIStudio", "namespace": "inference._types" }, - "specLocation": "inference/_types/TaskType.ts#L60-L64" + "specLocation": "inference/_types/TaskType.ts#L65-L69" }, { "kind": "enum", @@ -172006,7 +172186,7 @@ "name": "TaskTypeAzureOpenAI", "namespace": "inference._types" }, - "specLocation": "inference/_types/TaskType.ts#L66-L69" + "specLocation": "inference/_types/TaskType.ts#L71-L74" }, { "kind": "enum", @@ -172025,7 +172205,7 @@ "name": "TaskTypeCohere", "namespace": "inference._types" }, - "specLocation": "inference/_types/TaskType.ts#L71-L75" + "specLocation": "inference/_types/TaskType.ts#L76-L80" }, { "kind": "enum", @@ -172047,7 +172227,7 @@ "name": "TaskTypeCustom", "namespace": "inference._types" }, - "specLocation": "inference/_types/TaskType.ts#L77-L82" + "specLocation": "inference/_types/TaskType.ts#L82-L87" }, { "kind": "enum", @@ -172063,7 +172243,7 @@ "name": "TaskTypeDeepSeek", "namespace": "inference._types" }, - "specLocation": "inference/_types/TaskType.ts#L84-L87" + "specLocation": "inference/_types/TaskType.ts#L89-L92" }, { "kind": "enum", @@ -172076,7 +172256,7 @@ "name": "TaskTypeELSER", "namespace": "inference._types" }, - "specLocation": "inference/_types/TaskType.ts#L95-L97" + "specLocation": "inference/_types/TaskType.ts#L100-L102" }, { "kind": "enum", @@ -172095,7 +172275,7 @@ "name": "TaskTypeElasticsearch", "namespace": "inference._types" }, - "specLocation": "inference/_types/TaskType.ts#L89-L93" + "specLocation": "inference/_types/TaskType.ts#L94-L98" }, { "kind": "enum", @@ -172111,7 +172291,7 @@ "name": "TaskTypeGoogleAIStudio", "namespace": "inference._types" }, - "specLocation": "inference/_types/TaskType.ts#L99-L102" + "specLocation": "inference/_types/TaskType.ts#L104-L107" }, { "kind": "enum", @@ -172127,7 +172307,7 @@ "name": "TaskTypeGoogleVertexAI", "namespace": "inference._types" }, - "specLocation": "inference/_types/TaskType.ts#L104-L107" + "specLocation": "inference/_types/TaskType.ts#L109-L112" }, { "kind": "enum", @@ -172149,7 +172329,7 @@ "name": "TaskTypeHuggingFace", "namespace": "inference._types" }, - "specLocation": "inference/_types/TaskType.ts#L109-L114" + "specLocation": "inference/_types/TaskType.ts#L114-L119" }, { "kind": "enum", @@ -172184,7 +172364,7 @@ "name": "TaskTypeMistral", "namespace": "inference._types" }, - "specLocation": "inference/_types/TaskType.ts#L116-L120" + "specLocation": "inference/_types/TaskType.ts#L121-L125" }, { "kind": "enum", @@ -172203,7 +172383,7 @@ "name": "TaskTypeOpenAI", "namespace": "inference._types" }, - "specLocation": "inference/_types/TaskType.ts#L122-L126" + "specLocation": "inference/_types/TaskType.ts#L127-L131" }, { "kind": "enum", @@ -172219,7 +172399,7 @@ "name": "TaskTypeVoyageAI", "namespace": "inference._types" }, - "specLocation": "inference/_types/TaskType.ts#L128-L131" + "specLocation": "inference/_types/TaskType.ts#L133-L136" }, { "kind": "enum", @@ -172238,7 +172418,7 @@ "name": "TaskTypeWatsonx", "namespace": "inference._types" }, - "specLocation": "inference/_types/TaskType.ts#L133-L137" + "specLocation": "inference/_types/TaskType.ts#L138-L142" }, { "kind": "interface", @@ -172484,7 +172664,7 @@ } } ], - "specLocation": "inference/_types/CommonTypes.ts#L1624-L1655" + "specLocation": "inference/_types/CommonTypes.ts#L1660-L1691" }, { "kind": "enum", @@ -172497,7 +172677,7 @@ "name": "VoyageAIServiceType", "namespace": "inference._types" }, - "specLocation": "inference/_types/CommonTypes.ts#L1688-L1690" + "specLocation": "inference/_types/CommonTypes.ts#L1724-L1726" }, { "kind": "interface", @@ -172557,7 +172737,7 @@ } } ], - "specLocation": "inference/_types/CommonTypes.ts#L1657-L1681" + "specLocation": "inference/_types/CommonTypes.ts#L1693-L1717" }, { "kind": "enum", @@ -172573,7 +172753,7 @@ "name": "VoyageAITaskType", "namespace": "inference._types" }, - "specLocation": "inference/_types/CommonTypes.ts#L1683-L1686" + "specLocation": "inference/_types/CommonTypes.ts#L1719-L1722" }, { "kind": "interface", @@ -172661,7 +172841,7 @@ } } ], - "specLocation": "inference/_types/CommonTypes.ts#L1692-L1730" + "specLocation": "inference/_types/CommonTypes.ts#L1728-L1766" }, { "kind": "enum", @@ -172674,7 +172854,7 @@ "name": "WatsonxServiceType", "namespace": "inference._types" }, - "specLocation": "inference/_types/CommonTypes.ts#L1738-L1740" + "specLocation": "inference/_types/CommonTypes.ts#L1774-L1776" }, { "kind": "enum", @@ -172693,7 +172873,7 @@ "name": "WatsonxTaskType", "namespace": "inference._types" }, - "specLocation": "inference/_types/CommonTypes.ts#L1732-L1736" + "specLocation": "inference/_types/CommonTypes.ts#L1768-L1772" }, { "kind": "request", @@ -173420,7 +173600,7 @@ } } }, - "description": "Create an inference endpoint.\n\nIMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face.\nFor built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models.\nHowever, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.\n\nThe following integrations are available through the inference API. You can find the available task types next to the integration name:\n* AlibabaCloud AI Search (`completion`, `rerank`, `sparse_embedding`, `text_embedding`)\n* Amazon Bedrock (`completion`, `text_embedding`)\n* Amazon SageMaker (`chat_completion`, `completion`, `rerank`, `sparse_embedding`, `text_embedding`)\n* Anthropic (`completion`)\n* Azure AI Studio (`completion`, 'rerank', `text_embedding`)\n* Azure OpenAI (`completion`, `text_embedding`)\n* Cohere (`completion`, `rerank`, `text_embedding`)\n* DeepSeek (`completion`, `chat_completion`)\n* Elasticsearch (`rerank`, `sparse_embedding`, `text_embedding` - this service is for built-in models and models uploaded through Eland)\n* ELSER (`sparse_embedding`)\n* Google AI Studio (`completion`, `text_embedding`)\n* Google Vertex AI (`rerank`, `text_embedding`)\n* Hugging Face (`chat_completion`, `completion`, `rerank`, `text_embedding`)\n* Mistral (`chat_completion`, `completion`, `text_embedding`)\n* OpenAI (`chat_completion`, `completion`, `text_embedding`)\n* VoyageAI (`text_embedding`, `rerank`)\n* Watsonx inference integration (`text_embedding`)\n* JinaAI (`text_embedding`, `rerank`)", + "description": "Create an inference endpoint.\n\nIMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face.\nFor built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models.\nHowever, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.\n\nThe following integrations are available through the inference API. You can find the available task types next to the integration name:\n* AI21 (`chat_completion`, `completion`)\n* AlibabaCloud AI Search (`completion`, `rerank`, `sparse_embedding`, `text_embedding`)\n* Amazon Bedrock (`completion`, `text_embedding`)\n* Amazon SageMaker (`chat_completion`, `completion`, `rerank`, `sparse_embedding`, `text_embedding`)\n* Anthropic (`completion`)\n* Azure AI Studio (`completion`, 'rerank', `text_embedding`)\n* Azure OpenAI (`completion`, `text_embedding`)\n* Cohere (`completion`, `rerank`, `text_embedding`)\n* DeepSeek (`completion`, `chat_completion`)\n* Elasticsearch (`rerank`, `sparse_embedding`, `text_embedding` - this service is for built-in models and models uploaded through Eland)\n* ELSER (`sparse_embedding`)\n* Google AI Studio (`completion`, `text_embedding`)\n* Google Vertex AI (`rerank`, `text_embedding`)\n* Hugging Face (`chat_completion`, `completion`, `rerank`, `text_embedding`)\n* Mistral (`chat_completion`, `completion`, `text_embedding`)\n* OpenAI (`chat_completion`, `completion`, `text_embedding`)\n* VoyageAI (`text_embedding`, `rerank`)\n* Watsonx inference integration (`text_embedding`)\n* JinaAI (`text_embedding`, `rerank`)", "examples": { "InferencePutExample1": { "alternatives": [ @@ -173505,7 +173685,7 @@ } } ], - "specLocation": "inference/put/PutRequest.ts#L26-L88" + "specLocation": "inference/put/PutRequest.ts#L26-L89" }, { "kind": "response", @@ -173526,6 +173706,125 @@ }, "specLocation": "inference/put/PutResponse.ts#L22-L25" }, + { + "kind": "request", + "attachedBehaviors": [ + "CommonQueryParameters" + ], + "body": { + "kind": "properties", + "properties": [ + { + "description": "The type of service supported for the specified task type. In this case, `ai21`.", + "name": "service", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "Ai21ServiceType", + "namespace": "inference._types" + } + } + }, + { + "description": "Settings used to install the inference model. These settings are specific to the `ai21` service.", + "name": "service_settings", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "Ai21ServiceSettings", + "namespace": "inference._types" + } + } + } + ] + }, + "description": "Create a AI21 inference endpoint.\n\nCreate an inference endpoint to perform an inference task with the `ai21` service.", + "examples": { + "PutAi21RequestExample1": { + "description": "Run `PUT _inference/completion/ai21-completion` to create an AI21 inference endpoint that performs a `completion` task.", + "method_request": "PUT _inference/completion/ai21-completion", + "value": "{\n \"service\": \"ai21\",\n \"service_settings\": {\n \"api_key\": \"ai21-api-key\",\n \"model_id\": \"jamba-large\" \n }\n}" + }, + "PutAi21RequestExample2": { + "description": "Run `PUT _inference/chat-completion/ai21-chat-completion` to create a AI21 inference endpoint that performs a `chat_completion` task.", + "method_request": "PUT _inference/chat-completion/ai21-chat-completion", + "value": "{\n \"service\": \"ai21\",\n \"service_settings\": {\n \"api_key\": \"ai21-api-key\",\n \"model_id\": \"jamba-mini\" \n }\n}" + } + }, + "inherits": { + "type": { + "name": "RequestBase", + "namespace": "_types" + } + }, + "name": { + "name": "Request", + "namespace": "inference.put_ai21" + }, + "path": [ + { + "description": "The type of the inference task that the model will perform.", + "name": "task_type", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "Ai21TaskType", + "namespace": "inference._types" + } + } + }, + { + "description": "The unique identifier of the inference endpoint.", + "name": "ai21_inference_id", + "required": true, + "type": { + "kind": "instance_of", + "type": { + "name": "Id", + "namespace": "_types" + } + } + } + ], + "query": [ + { + "description": "Specifies the amount of time to wait for the inference endpoint to be created.", + "name": "timeout", + "required": false, + "serverDefault": "30s", + "type": { + "kind": "instance_of", + "type": { + "name": "Duration", + "namespace": "_types" + } + } + } + ], + "specLocation": "inference/put_ai21/PutAi21Request.ts#L29-L73" + }, + { + "kind": "response", + "body": { + "kind": "value", + "codegenName": "endpoint_info", + "value": { + "kind": "instance_of", + "type": { + "name": "InferenceEndpointInfoAi21", + "namespace": "inference._types" + } + } + }, + "name": { + "name": "Response", + "namespace": "inference.put_ai21" + }, + "specLocation": "inference/put_ai21/PutAi21Response.ts#L22-L25" + }, { "kind": "request", "attachedBehaviors": [ diff --git a/output/typescript/types.ts b/output/typescript/types.ts index 3d874c764b..ecae7392da 100644 --- a/output/typescript/types.ts +++ b/output/typescript/types.ts @@ -13731,6 +13731,16 @@ export interface InferenceAdaptiveAllocations { min_number_of_allocations?: integer } +export interface InferenceAi21ServiceSettings { + model_id: string + api_key?: string + rate_limit?: InferenceRateLimitSetting +} + +export type InferenceAi21ServiceType = 'ai21' + +export type InferenceAi21TaskType = 'completion' | 'chat_completion' + export interface InferenceAlibabaCloudServiceSettings { api_key: string host: string @@ -14044,6 +14054,11 @@ export interface InferenceInferenceEndpointInfo extends InferenceInferenceEndpoi task_type: InferenceTaskType } +export interface InferenceInferenceEndpointInfoAi21 extends InferenceInferenceEndpoint { + inference_id: string + task_type: InferenceTaskTypeAi21 +} + export interface InferenceInferenceEndpointInfoAlibabaCloudAI extends InferenceInferenceEndpoint { inference_id: string task_type: InferenceTaskTypeAlibabaCloudAI @@ -14247,6 +14262,8 @@ export type InferenceTaskSettings = any export type InferenceTaskType = 'sparse_embedding' | 'text_embedding' | 'rerank' | 'completion' | 'chat_completion' +export type InferenceTaskTypeAi21 = 'completion' | 'chat_completion' + export type InferenceTaskTypeAlibabaCloudAI = 'text_embedding' | 'rerank' | 'completion' | 'sparse_embedding' export type InferenceTaskTypeAmazonBedrock = 'text_embedding' | 'completion' @@ -14401,6 +14418,18 @@ export interface InferencePutRequest extends RequestBase { export type InferencePutResponse = InferenceInferenceEndpointInfo +export interface InferencePutAi21Request extends RequestBase { + task_type: InferenceAi21TaskType + ai21_inference_id: Id + timeout?: Duration + body?: { + service: InferenceAi21ServiceType + service_settings: InferenceAi21ServiceSettings + } +} + +export type InferencePutAi21Response = InferenceInferenceEndpointInfoAi21 + export interface InferencePutAlibabacloudRequest extends RequestBase { task_type: InferenceAlibabaCloudTaskType alibabacloud_inference_id: Id diff --git a/package-lock.json b/package-lock.json index d72f33ac58..cd855fd21a 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,9 +1,11 @@ { "name": "elasticsearch-specification", + "version": "overlay", "lockfileVersion": 3, "requires": true, "packages": { "": { + "version": "overlay", "dependencies": { "@redocly/cli": "^1.34.5", "@stoplight/spectral-cli": "^6.14.2" diff --git a/specification/_doc_ids/table.csv b/specification/_doc_ids/table.csv index 2d0863ed02..d41dea4903 100644 --- a/specification/_doc_ids/table.csv +++ b/specification/_doc_ids/table.csv @@ -2,6 +2,8 @@ doc_id,doc_url,previous_version_doc_url,description ack-watch, https://www.elastic.co/docs/explore-analyze/alerts-cases/watcher/actions#example,, apis,https://www.elastic.co/docs/api/doc/elasticsearch,, add-nodes,https://www.elastic.co/docs/deploy-manage/maintenance/add-and-remove-elasticsearch-nodes,, +ai21-api-models,https://docs.ai21.com/docs/jamba-foundation-models,, +ai21-rate-limit,https://docs.ai21.com/reference/api-rate-limits,, alias-update,https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-alias,https://www.elastic.co/guide/en/elasticsearch/reference/8.18/indices-add-alias.html, aliases-update,https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-update-aliases,https://www.elastic.co/guide/en/elasticsearch/reference/8.18/indices-aliases.html, alibabacloud-api-keys,https://opensearch.console.aliyun.com/cn-shanghai/rag/api-key,, @@ -356,6 +358,7 @@ inference-api-post,https://www.elastic.co/docs/api/doc/elasticsearch/operation/o inference-api-post-eis-chat-completion,https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-post-eis-chat-completion,, inference-api-put,https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put,https://www.elastic.co/guide/en/elasticsearch/reference/8.18/put-inference-api.html, inference-api-put-alibabacloud,https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-alibabacloud,https://www.elastic.co/guide/en/elasticsearch/reference/8.18/infer-service-alibabacloud-ai-search.html, +inference-api-put-ai21,https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-ai21,, inference-api-put-amazonbedrock,https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-amazonbedrock,https://www.elastic.co/guide/en/elasticsearch/reference/8.18/infer-service-amazon-bedrock.html, inference-api-put-amazonsagemaker,https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-amazonsagemaker,https://www.elastic.co/guide/en/elasticsearch/reference/8.18/infer-service-amazon-sagemaker.html, inference-api-put-anthropic,https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-inference-put-anthropic,https://www.elastic.co/guide/en/elasticsearch/reference/8.18/infer-service-anthropic.html, diff --git a/specification/_json_spec/inference.put_ai21.json b/specification/_json_spec/inference.put_ai21.json new file mode 100644 index 0000000000..6d59087402 --- /dev/null +++ b/specification/_json_spec/inference.put_ai21.json @@ -0,0 +1,35 @@ +{ + "inference.put_ai21": { + "documentation": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-service-ai21.html", + "description": "Configure a AI21 inference endpoint" + }, + "stability": "stable", + "visibility": "public", + "headers": { + "accept": ["application/json"], + "content_type": ["application/json"] + }, + "url": { + "paths": [ + { + "path": "/_inference/{task_type}/{ai21_inference_id}", + "methods": ["PUT"], + "parts": { + "task_type": { + "type": "string", + "description": "The task type" + }, + "ai21_inference_id": { + "type": "string", + "description": "The inference ID" + } + } + } + ] + }, + "body": { + "description": "The inference endpoint's task and service settings" + } + } +} diff --git a/specification/inference/_types/CommonTypes.ts b/specification/inference/_types/CommonTypes.ts index e211260f63..2bbe2ac3b8 100644 --- a/specification/inference/_types/CommonTypes.ts +++ b/specification/inference/_types/CommonTypes.ts @@ -289,6 +289,42 @@ export interface CompletionTool { function: CompletionToolFunction } +export class Ai21ServiceSettings { + /** + * The name of the model to use for the inference task. + * Refer to the AI21 models documentation for the list of supported models and versions. + * Service has been tested and confirmed to be working for `completion` and `chat_completion` tasks with the following models: + * * `jamba-mini` + * * `jamba-large` + * @ext_doc_id ai21-api-models + */ + model_id: string + /** + * A valid API key for accessing AI21 API. + * + * IMPORTANT: You need to provide the API key only once, during the inference model creation. + * The get inference endpoint API does not retrieve your API key. + * After creating the inference model, you cannot change the associated API key. + * If you want to use a different API key, delete the inference model and recreate it with the same name and the updated API key. + */ + api_key?: string + /** + * This setting helps to minimize the number of rate limit errors returned from the AI21 API. + * By default, the `ai21` service sets the number of requests allowed per minute to 200. Please refer to AI21 documentation for more details. + * @ext_doc_id ai21-rate-limit + */ + rate_limit?: RateLimitSetting +} + +export enum Ai21TaskType { + completion, + chat_completion +} + +export enum Ai21ServiceType { + ai21 +} + export class AlibabaCloudServiceSettings { /** * A valid API key for the AlibabaCloud AI Search API. diff --git a/specification/inference/_types/Services.ts b/specification/inference/_types/Services.ts index 69b084d63d..5dd1d2ea4a 100644 --- a/specification/inference/_types/Services.ts +++ b/specification/inference/_types/Services.ts @@ -21,6 +21,7 @@ import { integer } from '@_types/Numeric' import { UserDefinedValue } from '@spec_utils/UserDefinedValue' import { TaskType, + TaskTypeAi21, TaskTypeAlibabaCloudAI, TaskTypeAmazonBedrock, TaskTypeAmazonSageMaker, @@ -77,6 +78,18 @@ export class InferenceEndpointInfo extends InferenceEndpoint { */ task_type: TaskType } + +export class InferenceEndpointInfoAi21 extends InferenceEndpoint { + /** + * The inference Id + */ + inference_id: string + /** + * The task type + */ + task_type: TaskTypeAi21 +} + export class InferenceEndpointInfoAlibabaCloudAI extends InferenceEndpoint { /** * The inference Id diff --git a/specification/inference/_types/TaskType.ts b/specification/inference/_types/TaskType.ts index 2e97db63ba..a46bff5638 100644 --- a/specification/inference/_types/TaskType.ts +++ b/specification/inference/_types/TaskType.ts @@ -33,6 +33,11 @@ export enum TaskTypeJinaAi { rerank } +export enum TaskTypeAi21 { + completion, + chat_completion +} + export enum TaskTypeAlibabaCloudAI { text_embedding, rerank, diff --git a/specification/inference/put/PutRequest.ts b/specification/inference/put/PutRequest.ts index 4159dad70d..3ed581fd6c 100644 --- a/specification/inference/put/PutRequest.ts +++ b/specification/inference/put/PutRequest.ts @@ -31,6 +31,7 @@ import { TaskType } from '@inference/_types/TaskType' * However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. * * The following integrations are available through the inference API. You can find the available task types next to the integration name: + * * AI21 (`chat_completion`, `completion`) * * AlibabaCloud AI Search (`completion`, `rerank`, `sparse_embedding`, `text_embedding`) * * Amazon Bedrock (`completion`, `text_embedding`) * * Amazon SageMaker (`chat_completion`, `completion`, `rerank`, `sparse_embedding`, `text_embedding`) diff --git a/specification/inference/put_ai21/PutAi21Request.ts b/specification/inference/put_ai21/PutAi21Request.ts new file mode 100644 index 0000000000..0c2806cd24 --- /dev/null +++ b/specification/inference/put_ai21/PutAi21Request.ts @@ -0,0 +1,73 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import { RequestBase } from '@_types/Base' +import { Id } from '@_types/common' +import { Duration } from '@_types/Time' +import { + Ai21ServiceSettings, + Ai21ServiceType, + Ai21TaskType +} from '@inference/_types/CommonTypes' + +/** + * Create a AI21 inference endpoint. + * + * Create an inference endpoint to perform an inference task with the `ai21` service. + * @rest_spec_name inference.put_ai21 + * @availability stack since=9.2.0 stability=stable visibility=public + * @availability serverless stability=stable visibility=public + * @cluster_privileges manage_inference + * @doc_id inference-api-put-ai21 + */ +export interface Request extends RequestBase { + urls: [ + { + path: '/_inference/{task_type}/{ai21_inference_id}' + methods: ['PUT'] + } + ] + path_parts: { + /** + * The type of the inference task that the model will perform. + */ + task_type: Ai21TaskType + /** + * The unique identifier of the inference endpoint. + */ + ai21_inference_id: Id + } + query_parameters: { + /** + * Specifies the amount of time to wait for the inference endpoint to be created. + * @server_default 30s + */ + timeout?: Duration + } + body: { + /** + * The type of service supported for the specified task type. In this case, `ai21`. + */ + service: Ai21ServiceType + /** + * Settings used to install the inference model. These settings are specific to the `ai21` service. + */ + service_settings: Ai21ServiceSettings + } +} diff --git a/specification/inference/put_ai21/PutAi21Response.ts b/specification/inference/put_ai21/PutAi21Response.ts new file mode 100644 index 0000000000..03b9729988 --- /dev/null +++ b/specification/inference/put_ai21/PutAi21Response.ts @@ -0,0 +1,25 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import { InferenceEndpointInfoAi21 } from '@inference/_types/Services' + +export class Response { + /** @codegen_name endpoint_info */ + body: InferenceEndpointInfoAi21 +} diff --git a/specification/inference/put_ai21/examples/request/PutAi21RequestExample1.yaml b/specification/inference/put_ai21/examples/request/PutAi21RequestExample1.yaml new file mode 100644 index 0000000000..2b1e46a349 --- /dev/null +++ b/specification/inference/put_ai21/examples/request/PutAi21RequestExample1.yaml @@ -0,0 +1,12 @@ +# summary: +description: Run `PUT _inference/completion/ai21-completion` to create an AI21 inference endpoint that performs a `completion` task. +method_request: 'PUT _inference/completion/ai21-completion' +# type: "request" +value: |- + { + "service": "ai21", + "service_settings": { + "api_key": "ai21-api-key", + "model_id": "jamba-large" + } + } diff --git a/specification/inference/put_ai21/examples/request/PutAi21RequestExample2.yaml b/specification/inference/put_ai21/examples/request/PutAi21RequestExample2.yaml new file mode 100644 index 0000000000..63041d1f03 --- /dev/null +++ b/specification/inference/put_ai21/examples/request/PutAi21RequestExample2.yaml @@ -0,0 +1,12 @@ +# summary: +description: Run `PUT _inference/chat-completion/ai21-chat-completion` to create a AI21 inference endpoint that performs a `chat_completion` task. +method_request: 'PUT _inference/chat-completion/ai21-chat-completion' +# type: "request" +value: |- + { + "service": "ai21", + "service_settings": { + "api_key": "ai21-api-key", + "model_id": "jamba-mini" + } + }