diff --git a/openapi.yaml b/openapi.yaml index 0995edca..6169142d 100644 --- a/openapi.yaml +++ b/openapi.yaml @@ -2,125 +2,13 @@ openapi: 3.0.0 info: title: OpenAI API description: APIs for sampling from and fine-tuning language models - version: '1.3.1' + version: '2.0.0' servers: - url: https://api.openai.com/v1 tags: - name: OpenAI description: The OpenAI REST API paths: - /engines: - get: - operationId: listEngines - deprecated: true - tags: - - OpenAI - summary: Lists the currently available (non-finetuned) models, and provides basic information about each one such as the owner and availability. - responses: - "200": - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/ListEnginesResponse' - x-oaiMeta: - name: List engines - group: engines - path: list - examples: - curl: | - curl https://api.openai.com/v1/engines \ - -H "Authorization: Bearer $OPENAI_API_KEY" - python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.Engine.list() - node.js: | - const { Configuration, OpenAIApi } = require("openai"); - const configuration = new Configuration({ - apiKey: process.env.OPENAI_API_KEY, - }); - const openai = new OpenAIApi(configuration); - const response = await openai.listEngines(); - response: | - { - "data": [ - { - "id": "engine-id-0", - "object": "engine", - "owner": "organization-owner", - "ready": true - }, - { - "id": "engine-id-2", - "object": "engine", - "owner": "organization-owner", - "ready": true - }, - { - "id": "engine-id-3", - "object": "engine", - "owner": "openai", - "ready": false - }, - ], - "object": "list" - } - - /engines/{engine_id}: - get: - operationId: retrieveEngine - deprecated: true - tags: - - OpenAI - summary: Retrieves a model instance, providing basic information about it such as the owner and availability. - parameters: - - in: path - name: engine_id - required: true - schema: - type: string - # ideally this will be an actual ID, so this will always work from browser - example: - davinci - description: &engine_id_description > - The ID of the engine to use for this request - responses: - "200": - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/Engine' - x-oaiMeta: - name: Retrieve engine - group: engines - path: retrieve - examples: - curl: | - curl https://api.openai.com/v1/engines/VAR_model_id \ - -H "Authorization: Bearer $OPENAI_API_KEY" - python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.Engine.retrieve("VAR_model_id") - node.js: | - const { Configuration, OpenAIApi } = require("openai"); - const configuration = new Configuration({ - apiKey: process.env.OPENAI_API_KEY, - }); - const openai = new OpenAIApi(configuration); - const response = await openai.retrieveEngine("VAR_model_id"); - response: | - { - "id": "VAR_model_id", - "object": "engine", - "owner": "openai", - "ready": true - } - /chat/completions: post: operationId: createChatCompletion @@ -768,101 +656,6 @@ paths: "text": "Hello, my name is Wolfgang and I come from Germany. Where are you heading today?" } - /engines/{engine_id}/search: - post: - operationId: createSearch - deprecated: true - tags: - - OpenAI - summary: | - The search endpoint computes similarity scores between provided query and documents. Documents can be passed directly to the API if there are no more than 200 of them. - - To go beyond the 200 document limit, documents can be processed offline and then used for efficient retrieval at query time. When `file` is set, the search endpoint searches over all the documents in the given file and returns up to the `max_rerank` number of documents. These documents will be returned along with their search scores. - - The similarity score is a positive score that usually ranges from 0 to 300 (but can sometimes go higher), where a score above 200 usually means the document is semantically similar to the query. - parameters: - - in: path - name: engine_id - required: true - schema: - type: string - example: davinci - description: The ID of the engine to use for this request. You can select one of `ada`, `babbage`, `curie`, or `davinci`. - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/CreateSearchRequest' - responses: - "200": - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/CreateSearchResponse' - x-oaiMeta: - name: Create search - group: searches - path: create - examples: - curl: | - curl https://api.openai.com/v1/engines/davinci/search \ - -H "Content-Type: application/json" \ - -H "Authorization: Bearer $OPENAI_API_KEY" \ - -d '{ - "documents": ["White House", "hospital", "school"], - "query": "the president" - }' - python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.Engine("davinci").search( - documents=["White House", "hospital", "school"], - query="the president" - ) - node.js: | - const { Configuration, OpenAIApi } = require("openai"); - const configuration = new Configuration({ - apiKey: process.env.OPENAI_API_KEY, - }); - const openai = new OpenAIApi(configuration); - const response = await openai.createSearch("davinci", { - documents: ["White House", "hospital", "school"], - query: "the president", - }); - parameters: | - { - "documents": [ - "White House", - "hospital", - "school" - ], - "query": "the president" - } - response: | - { - "data": [ - { - "document": 0, - "object": "search_result", - "score": 215.412 - }, - { - "document": 1, - "object": "search_result", - "score": 40.316 - }, - { - "document": 2, - "object": "search_result", - "score": 55.226 - } - ], - "object": "list" - } - /files: get: operationId: listFiles @@ -1114,229 +907,6 @@ paths: const openai = new OpenAIApi(configuration); const response = await openai.downloadFile("file-XjGxS3KTG0uNmNOK362iJua3"); - /answers: - post: - operationId: createAnswer - deprecated: true - tags: - - OpenAI - summary: | - Answers the specified question using the provided documents and examples. - - The endpoint first [searches](/docs/api-reference/searches) over provided documents or files to find relevant context. The relevant context is combined with the provided examples and question to create the prompt for [completion](/docs/api-reference/completions). - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/CreateAnswerRequest' - responses: - "200": - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/CreateAnswerResponse' - x-oaiMeta: - name: Create answer - group: answers - path: create - examples: - curl: | - curl https://api.openai.com/v1/answers \ - -H "Authorization: Bearer $OPENAI_API_KEY" \ - -H "Content-Type: application/json" \ - -d '{ - "documents": ["Puppy A is happy.", "Puppy B is sad."], - "question": "which puppy is happy?", - "search_model": "ada", - "model": "curie", - "examples_context": "In 2017, U.S. life expectancy was 78.6 years.", - "examples": [["What is human life expectancy in the United States?","78 years."]], - "max_tokens": 5, - "stop": ["\n", "<|endoftext|>"] - }' - python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.Answer.create( - search_model="ada", - model="curie", - question="which puppy is happy?", - documents=["Puppy A is happy.", "Puppy B is sad."], - examples_context="In 2017, U.S. life expectancy was 78.6 years.", - examples=[["What is human life expectancy in the United States?","78 years."]], - max_tokens=5, - stop=["\n", "<|endoftext|>"], - ) - node.js: | - const { Configuration, OpenAIApi } = require("openai"); - const configuration = new Configuration({ - apiKey: process.env.OPENAI_API_KEY, - }); - const openai = new OpenAIApi(configuration); - const response = await openai.createAnswer({ - search_model: "ada", - model: "curie", - question: "which puppy is happy?", - documents: ["Puppy A is happy.", "Puppy B is sad."], - examples_context: "In 2017, U.S. life expectancy was 78.6 years.", - examples: [["What is human life expectancy in the United States?","78 years."]], - max_tokens: 5, - stop: ["\n", "<|endoftext|>"], - }); - parameters: | - { - "documents": ["Puppy A is happy.", "Puppy B is sad."], - "question": "which puppy is happy?", - "search_model": "ada", - "model": "curie", - "examples_context": "In 2017, U.S. life expectancy was 78.6 years.", - "examples": [["What is human life expectancy in the United States?","78 years."]], - "max_tokens": 5, - "stop": ["\n", "<|endoftext|>"] - } - response: | - { - "answers": [ - "puppy A." - ], - "completion": "cmpl-2euVa1kmKUuLpSX600M41125Mo9NI", - "model": "curie:2020-05-03", - "object": "answer", - "search_model": "ada", - "selected_documents": [ - { - "document": 0, - "text": "Puppy A is happy. " - }, - { - "document": 1, - "text": "Puppy B is sad. " - } - ] - } - - /classifications: - post: - operationId: createClassification - deprecated: true - tags: - - OpenAI - summary: | - Classifies the specified `query` using provided examples. - - The endpoint first [searches](/docs/api-reference/searches) over the labeled examples - to select the ones most relevant for the particular query. Then, the relevant examples - are combined with the query to construct a prompt to produce the final label via the - [completions](/docs/api-reference/completions) endpoint. - - Labeled examples can be provided via an uploaded `file`, or explicitly listed in the - request using the `examples` parameter for quick tests and small scale use cases. - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/CreateClassificationRequest' - responses: - "200": - description: OK - content: - application/json: - schema: - $ref: '#/components/schemas/CreateClassificationResponse' - x-oaiMeta: - name: Create classification - group: classifications - path: create - examples: - curl: | - curl https://api.openai.com/v1/classifications \ - -H "Authorization: Bearer $OPENAI_API_KEY" \ - -H "Content-Type: application/json" \ - -d '{ - "examples": [ - ["A happy moment", "Positive"], - ["I am sad.", "Negative"], - ["I am feeling awesome", "Positive"] - ], - "query": "It is a raining day :(", - "search_model": "ada", - "model": "curie", - "labels":["Positive", "Negative", "Neutral"] - }' - python: | - import os - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.Classification.create( - search_model="ada", - model="curie", - examples=[ - ["A happy moment", "Positive"], - ["I am sad.", "Negative"], - ["I am feeling awesome", "Positive"] - ], - query="It is a raining day :(", - labels=["Positive", "Negative", "Neutral"], - ) - node.js: | - const { Configuration, OpenAIApi } = require("openai"); - const configuration = new Configuration({ - apiKey: process.env.OPENAI_API_KEY, - }); - const openai = new OpenAIApi(configuration); - const response = await openai.createClassification({ - search_model: "ada", - model: "curie", - examples: [ - ["A happy moment", "Positive"], - ["I am sad.", "Negative"], - ["I am feeling awesome", "Positive"] - ], - query:"It is a raining day :(", - labels: ["Positive", "Negative", "Neutral"], - }); - parameters: | - { - "examples": [ - ["A happy moment", "Positive"], - ["I am sad.", "Negative"], - ["I am feeling awesome", "Positive"] - ], - "labels": ["Positive", "Negative", "Neutral"], - "query": "It is a raining day :(", - "search_model": "ada", - "model": "curie" - } - response: | - { - "completion": "cmpl-2euN7lUVZ0d4RKbQqRV79IiiE6M1f", - "label": "Negative", - "model": "curie:2020-05-03", - "object": "classification", - "search_model": "ada", - "selected_examples": [ - { - "document": 1, - "label": "Negative", - "text": "I am sad." - }, - { - "document": 0, - "label": "Positive", - "text": "A happy moment" - }, - { - "document": 2, - "label": "Positive", - "text": "I am feeling awesome" - } - ] - } - /fine-tunes: post: operationId: createFineTune @@ -2030,18 +1600,6 @@ components: $ref: '#/components/schemas/Error' required: - error - ListEnginesResponse: - type: object - properties: - object: - type: string - data: - type: array - items: - $ref: '#/components/schemas/Engine' - required: - - object - - data ListModelsResponse: type: object @@ -2894,75 +2452,6 @@ components: - model - results - CreateSearchRequest: - type: object - properties: - query: - description: Query to search against the documents. - type: string - example: "the president" - minLength: 1 - documents: - description: | - Up to 200 documents to search over, provided as a list of strings. - - The maximum document length (in tokens) is 2034 minus the number of tokens in the query. - - You should specify either `documents` or a `file`, but not both. - type: array - minItems: 1 - maxItems: 200 - items: - type: string - nullable: true - example: "['White House', 'hospital', 'school']" - file: - description: | - The ID of an uploaded file that contains documents to search over. - - You should specify either `documents` or a `file`, but not both. - type: string - nullable: true - max_rerank: - description: | - The maximum number of documents to be re-ranked and returned by search. - - This flag only takes effect when `file` is set. - type: integer - minimum: 1 - default: 200 - nullable: true - return_metadata: &return_metadata_configuration - description: | - A special boolean flag for showing metadata. If set to `true`, each document entry in the returned JSON will contain a "metadata" field. - - This flag only takes effect when `file` is set. - type: boolean - default: false - nullable: true - user: *end_user_param_configuration - required: - - query - - CreateSearchResponse: - type: object - properties: - object: - type: string - model: - type: string - data: - type: array - items: - type: object - properties: - object: - type: string - document: - type: integer - score: - type: number - ListFilesResponse: type: object properties: @@ -3012,251 +2501,6 @@ components: - object - deleted - CreateAnswerRequest: - type: object - additionalProperties: false - properties: - model: - description: ID of the model to use for completion. You can select one of `ada`, `babbage`, `curie`, or `davinci`. - type: string - question: - description: Question to get answered. - type: string - minLength: 1 - example: "What is the capital of Japan?" - examples: - description: List of (question, answer) pairs that will help steer the model towards the tone and answer format you'd like. We recommend adding 2 to 3 examples. - type: array - minItems: 1 - maxItems: 200 - items: - type: array - minItems: 2 - maxItems: 2 - items: - type: string - minLength: 1 - example: "[['What is the capital of Canada?', 'Ottawa'], ['Which province is Ottawa in?', 'Ontario']]" - examples_context: - description: A text snippet containing the contextual information used to generate the answers for the `examples` you provide. - type: string - example: "Ottawa, Canada's capital, is located in the east of southern Ontario, near the city of Montréal and the U.S. border." - documents: - description: | - List of documents from which the answer for the input `question` should be derived. If this is an empty list, the question will be answered based on the question-answer examples. - - You should specify either `documents` or a `file`, but not both. - type: array - maxItems: 200 - items: - type: string - example: "['Japan is an island country in East Asia, located in the northwest Pacific Ocean.', 'Tokyo is the capital and most populous prefecture of Japan.']" - nullable: true - file: - description: | - The ID of an uploaded file that contains documents to search over. See [upload file](/docs/api-reference/files/upload) for how to upload a file of the desired format and purpose. - - You should specify either `documents` or a `file`, but not both. - type: string - nullable: true - search_model: &search_model_configuration - description: ID of the model to use for [Search](/docs/api-reference/searches/create). You can select one of `ada`, `babbage`, `curie`, or `davinci`. - type: string - default: ada - nullable: true - max_rerank: - description: The maximum number of documents to be ranked by [Search](/docs/api-reference/searches/create) when using `file`. Setting it to a higher value leads to improved accuracy but with increased latency and cost. - type: integer - default: 200 - nullable: true - temperature: - description: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. - type: number - default: 0 - nullable: true - logprobs: &context_completions_logprobs_configuration - type: integer - minimum: 0 - maximum: 5 - default: null - nullable: true - description: | - Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. - - The maximum value for `logprobs` is 5. - - When `logprobs` is set, `completion` will be automatically added into `expand` to get the logprobs. - max_tokens: - description: The maximum number of tokens allowed for the generated answer - type: integer - default: 16 - nullable: true - stop: - description: *completions_stop_description - default: null - oneOf: - - type: string - default: <|endoftext|> - example: "\n" - - type: array - minItems: 1 - maxItems: 4 - items: - type: string - example: '["\n"]' - nullable: true - n: - description: How many answers to generate for each question. - type: integer - minimum: 1 - maximum: 10 - default: 1 - nullable: true - logit_bias: *completions_logit_bias - return_metadata: *return_metadata_configuration - return_prompt: &return_prompt_configuration - description: If set to `true`, the returned JSON will include a "prompt" field containing the final prompt that was used to request a completion. This is mainly useful for debugging purposes. - type: boolean - default: false - nullable: true - expand: &expand_configuration - description: If an object name is in the list, we provide the full information of the object; otherwise, we only provide the object ID. Currently we support `completion` and `file` objects for expansion. - type: array - items: {} - nullable: true - default: [] - user: *end_user_param_configuration - required: - - model - - question - - examples - - examples_context - - CreateAnswerResponse: - type: object - properties: - object: - type: string - model: - type: string - search_model: - type: string - completion: - type: string - answers: - type: array - items: - type: string - selected_documents: - type: array - items: - type: object - properties: - document: - type: integer - text: - type: string - - CreateClassificationRequest: - type: object - additionalProperties: false - properties: - model: - description: ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. - type: string - query: - description: Query to be classified. - type: string - minLength: 1 - example: "The plot is not very attractive." - examples: - description: | - A list of examples with labels, in the following format: - - `[["The movie is so interesting.", "Positive"], ["It is quite boring.", "Negative"], ...]` - - All the label strings will be normalized to be capitalized. - - You should specify either `examples` or `file`, but not both. - type: array - minItems: 2 - maxItems: 200 - items: - type: array - minItems: 2 - maxItems: 2 - items: - type: string - minLength: 1 - example: "[['Do not see this film.', 'Negative'], ['Smart, provocative and blisteringly funny.', 'Positive']]" - nullable: true - file: - description: | - The ID of the uploaded file that contains training examples. See [upload file](/docs/api-reference/files/upload) for how to upload a file of the desired format and purpose. - - You should specify either `examples` or `file`, but not both. - type: string - nullable: true - labels: - description: The set of categories being classified. If not specified, candidate labels will be automatically collected from the examples you provide. All the label strings will be normalized to be capitalized. - type: array - minItems: 2 - maxItems: 200 - default: null - items: - type: string - example: ["Positive", "Negative"] - nullable: true - search_model: *search_model_configuration - temperature: - description: - What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. - type: number - minimum: 0 - maximum: 2 - default: 0 - nullable: true - example: 0 - logprobs: *context_completions_logprobs_configuration - max_examples: - description: The maximum number of examples to be ranked by [Search](/docs/api-reference/searches/create) when using `file`. Setting it to a higher value leads to improved accuracy but with increased latency and cost. - type: integer - default: 200 - nullable: true - logit_bias: *completions_logit_bias - return_prompt: *return_prompt_configuration - return_metadata: *return_metadata_configuration - expand: *expand_configuration - user: *end_user_param_configuration - required: - - model - - query - - CreateClassificationResponse: - type: object - properties: - object: - type: string - model: - type: string - search_model: - type: string - completion: - type: string - label: - type: string - selected_examples: - type: array - items: - type: object - properties: - document: - type: integer - text: - type: string - label: - type: string - CreateFineTuneRequest: type: object properties: @@ -3610,24 +2854,6 @@ components: required: - text - Engine: - title: Engine - properties: - id: - type: string - object: - type: string - created: - type: integer - nullable: true - ready: - type: boolean - required: - - id - - object - - created - - ready - Model: title: Model properties: @@ -3794,36 +3020,3 @@ x-oaiMeta: Given a input text, outputs if the model classifies it as violating OpenAI's content policy. Related guide: [Moderations](/docs/guides/moderation) - - id: searches - title: Searches - warning: - title: This endpoint is deprecated and will be removed on December 3rd, 2022 - message: We’ve developed new methods with better performance. [Learn more](https://help.openai.com/en/articles/6272952-search-transition-guide). - description: | - Given a query and a set of documents or labels, the model ranks each document based on its semantic similarity to the provided query. - - Related guide: [Search](/docs/guides/search) - - id: classifications - title: Classifications - warning: - title: This endpoint is deprecated and will be removed on December 3rd, 2022 - message: We’ve developed new methods with better performance. [Learn more](https://help.openai.com/en/articles/6272941-classifications-transition-guide). - description: | - Given a query and a set of labeled examples, the model will predict the most likely label for the query. Useful as a drop-in replacement for any ML classification or text-to-label task. - - Related guide: [Classification](/docs/guides/classifications) - - id: answers - title: Answers - warning: - title: This endpoint is deprecated and will be removed on December 3rd, 2022 - message: We’ve developed new methods with better performance. [Learn more](https://help.openai.com/en/articles/6233728-answers-transition-guide). - description: | - Given a question, a set of documents, and some examples, the API generates an answer to the question based on the information in the set of documents. This is useful for question-answering applications on sources of truth, like company documentation or a knowledge base. - - Related guide: [Question answering](/docs/guides/answers) - - id: engines - title: Engines - description: These endpoints describe and provide access to the various engines available in the API. - warning: - title: The Engines endpoints are deprecated. - message: Please use their replacement, [Models](/docs/api-reference/models), instead. [Learn more](https://help.openai.com/TODO).